2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/pagemap.h>
16 #include <linux/pagevec.h>
17 #include <linux/mpage.h>
19 #include <linux/gfs2_ondisk.h>
20 #include <linux/lm_interface.h>
29 #include "ops_address.h"
38 static void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
39 unsigned int from, unsigned int to)
41 struct buffer_head *head = page_buffers(page);
42 unsigned int bsize = head->b_size;
43 struct buffer_head *bh;
44 unsigned int start, end;
46 for (bh = head, start = 0; bh != head || !start;
47 bh = bh->b_this_page, start = end) {
49 if (end <= from || start >= to)
51 gfs2_trans_add_bh(ip->i_gl, bh, 0);
56 * gfs2_get_block - Fills in a buffer head with details about a block
58 * @lblock: The block number to look up
59 * @bh_result: The buffer head to return the result in
60 * @create: Non-zero if we may add block to the file
65 int gfs2_get_block(struct inode *inode, sector_t lblock,
66 struct buffer_head *bh_result, int create)
68 return gfs2_block_map(inode, lblock, create, bh_result);
72 * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
74 * @lblock: The block number to look up
75 * @bh_result: The buffer head to return the result in
76 * @create: Non-zero if we may add block to the file
81 static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
82 struct buffer_head *bh_result, int create)
86 error = gfs2_block_map(inode, lblock, 0, bh_result);
89 if (bh_result->b_blocknr == 0)
94 static int gfs2_get_block_direct(struct inode *inode, sector_t lblock,
95 struct buffer_head *bh_result, int create)
97 return gfs2_block_map(inode, lblock, 0, bh_result);
101 * gfs2_writepage - Write complete page
102 * @page: Page to write
106 * Some of this is copied from block_write_full_page() although we still
107 * call it to do most of the work.
110 static int gfs2_writepage(struct page *page, struct writeback_control *wbc)
112 struct inode *inode = page->mapping->host;
113 struct gfs2_inode *ip = GFS2_I(inode);
114 struct gfs2_sbd *sdp = GFS2_SB(inode);
115 loff_t i_size = i_size_read(inode);
116 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
121 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl))) {
125 if (current->journal_info)
128 /* Is the page fully outside i_size? (truncate in progress) */
129 offset = i_size & (PAGE_CACHE_SIZE-1);
130 if (page->index > end_index || (page->index == end_index && !offset)) {
131 page->mapping->a_ops->invalidatepage(page, 0);
133 return 0; /* don't care */
136 if (sdp->sd_args.ar_data == GFS2_DATA_ORDERED || gfs2_is_jdata(ip)) {
137 error = gfs2_trans_begin(sdp, RES_DINODE + 1, 0);
140 if (!page_has_buffers(page)) {
141 create_empty_buffers(page, inode->i_sb->s_blocksize,
142 (1 << BH_Dirty)|(1 << BH_Uptodate));
144 gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1);
147 error = block_write_full_page(page, gfs2_get_block_noalloc, wbc);
150 gfs2_meta_cache_flush(ip);
154 redirty_page_for_writepage(wbc, page);
159 static int zero_readpage(struct page *page)
163 kaddr = kmap_atomic(page, KM_USER0);
164 memset(kaddr, 0, PAGE_CACHE_SIZE);
165 kunmap_atomic(kaddr, KM_USER0);
167 SetPageUptodate(page);
173 * stuffed_readpage - Fill in a Linux page with stuffed file data
180 static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
182 struct buffer_head *dibh;
186 /* Only the first page of a stuffed file might contain data */
187 if (unlikely(page->index))
188 return zero_readpage(page);
190 error = gfs2_meta_inode_buffer(ip, &dibh);
194 kaddr = kmap_atomic(page, KM_USER0);
195 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode),
197 memset(kaddr + ip->i_di.di_size, 0, PAGE_CACHE_SIZE - ip->i_di.di_size);
198 kunmap_atomic(kaddr, KM_USER0);
202 SetPageUptodate(page);
209 * gfs2_readpage - readpage with locking
210 * @file: The file to read a page for. N.B. This may be NULL if we are
211 * reading an internal file.
212 * @page: The page to read
217 static int gfs2_readpage(struct file *file, struct page *page)
219 struct gfs2_inode *ip = GFS2_I(page->mapping->host);
220 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
221 struct gfs2_file *gf = NULL;
222 struct gfs2_holder gh;
226 if (likely(file != &gfs2_internal_file_sentinel)) {
228 gf = file->private_data;
229 if (test_bit(GFF_EXLOCK, &gf->f_flags))
230 /* gfs2_sharewrite_nopage has grabbed the ip->i_gl already */
233 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME|GL_AOP, &gh);
235 error = gfs2_glock_nq_m_atime(1, &gh);
241 if (gfs2_is_stuffed(ip)) {
242 error = stuffed_readpage(ip, page);
245 error = mpage_readpage(page, gfs2_get_block);
247 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
251 gfs2_glock_dq_m(1, &gh);
252 gfs2_holder_uninit(&gh);
259 gfs2_holder_uninit(&gh);
264 * gfs2_readpages - Read a bunch of pages at once
267 * 1. This is only for readahead, so we can simply ignore any things
268 * which are slightly inconvenient (such as locking conflicts between
269 * the page lock and the glock) and return having done no I/O. Its
270 * obviously not something we'd want to do on too regular a basis.
271 * Any I/O we ignore at this time will be done via readpage later.
272 * 2. We have to handle stuffed files here too.
273 * 3. mpage_readpages() does most of the heavy lifting in the common case.
274 * 4. gfs2_get_block() is relied upon to set BH_Boundary in the right places.
275 * 5. We use LM_FLAG_TRY_1CB here, effectively we then have lock-ahead as
276 * well as read-ahead.
278 static int gfs2_readpages(struct file *file, struct address_space *mapping,
279 struct list_head *pages, unsigned nr_pages)
281 struct inode *inode = mapping->host;
282 struct gfs2_inode *ip = GFS2_I(inode);
283 struct gfs2_sbd *sdp = GFS2_SB(inode);
284 struct gfs2_holder gh;
289 if (likely(file != &gfs2_internal_file_sentinel)) {
291 struct gfs2_file *gf = file->private_data;
292 if (test_bit(GFF_EXLOCK, &gf->f_flags))
295 gfs2_holder_init(ip->i_gl, LM_ST_SHARED,
296 LM_FLAG_TRY_1CB|GL_ATIME|GL_AOP, &gh);
298 ret = gfs2_glock_nq_m_atime(1, &gh);
299 if (ret == GLR_TRYFAILED)
305 if (gfs2_is_stuffed(ip)) {
306 struct pagevec lru_pvec;
307 pagevec_init(&lru_pvec, 0);
308 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
309 struct page *page = list_entry(pages->prev, struct page, lru);
310 prefetchw(&page->flags);
311 list_del(&page->lru);
312 if (!add_to_page_cache(page, mapping,
313 page->index, GFP_KERNEL)) {
314 ret = stuffed_readpage(ip, page);
316 if (!pagevec_add(&lru_pvec, page))
317 __pagevec_lru_add(&lru_pvec);
319 page_cache_release(page);
322 pagevec_lru_add(&lru_pvec);
325 /* What we really want to do .... */
326 ret = mpage_readpages(mapping, pages, nr_pages, gfs2_get_block);
330 gfs2_glock_dq_m(1, &gh);
331 gfs2_holder_uninit(&gh);
334 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
341 gfs2_holder_uninit(&gh);
346 * gfs2_prepare_write - Prepare to write a page to a file
347 * @file: The file to write to
348 * @page: The page which is to be prepared for writing
349 * @from: From (byte range within page)
350 * @to: To (byte range within page)
355 static int gfs2_prepare_write(struct file *file, struct page *page,
356 unsigned from, unsigned to)
358 struct gfs2_inode *ip = GFS2_I(page->mapping->host);
359 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
360 unsigned int data_blocks, ind_blocks, rblocks;
363 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + from;
364 loff_t end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
365 struct gfs2_alloc *al;
366 unsigned int write_len = to - from;
369 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_ATIME|GL_AOP, &ip->i_gh);
370 error = gfs2_glock_nq_m_atime(1, &ip->i_gh);
374 gfs2_write_calc_reserv(ip, write_len, &data_blocks, &ind_blocks);
376 error = gfs2_write_alloc_required(ip, pos, write_len, &alloc_required);
381 ip->i_alloc.al_requested = 0;
382 if (alloc_required) {
383 al = gfs2_alloc_get(ip);
385 error = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
389 error = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid);
393 al->al_requested = data_blocks + ind_blocks;
394 error = gfs2_inplace_reserve(ip);
399 rblocks = RES_DINODE + ind_blocks;
400 if (gfs2_is_jdata(ip))
401 rblocks += data_blocks ? data_blocks : 1;
402 if (ind_blocks || data_blocks)
403 rblocks += RES_STATFS + RES_QUOTA;
405 error = gfs2_trans_begin(sdp, rblocks, 0);
409 if (gfs2_is_stuffed(ip)) {
410 if (end > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) {
411 error = gfs2_unstuff_dinode(ip, page);
414 } else if (!PageUptodate(page))
415 error = stuffed_readpage(ip, page);
420 error = block_prepare_write(page, from, to, gfs2_get_block);
425 if (alloc_required) {
426 gfs2_inplace_release(ip);
428 gfs2_quota_unlock(ip);
433 gfs2_glock_dq_m(1, &ip->i_gh);
435 gfs2_holder_uninit(&ip->i_gh);
442 * gfs2_commit_write - Commit write to a file
443 * @file: The file to write to
444 * @page: The page containing the data
445 * @from: From (byte range within page)
446 * @to: To (byte range within page)
451 static int gfs2_commit_write(struct file *file, struct page *page,
452 unsigned from, unsigned to)
454 struct inode *inode = page->mapping->host;
455 struct gfs2_inode *ip = GFS2_I(inode);
456 struct gfs2_sbd *sdp = GFS2_SB(inode);
457 int error = -EOPNOTSUPP;
458 struct buffer_head *dibh;
459 struct gfs2_alloc *al = &ip->i_alloc;
460 struct gfs2_dinode *di;
462 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_locked_by_me(ip->i_gl)))
465 error = gfs2_meta_inode_buffer(ip, &dibh);
469 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
470 di = (struct gfs2_dinode *)dibh->b_data;
472 if (gfs2_is_stuffed(ip)) {
476 file_size = ((u64)page->index << PAGE_CACHE_SHIFT) + to;
478 kaddr = kmap_atomic(page, KM_USER0);
479 memcpy(dibh->b_data + sizeof(struct gfs2_dinode) + from,
480 kaddr + from, to - from);
481 kunmap_atomic(kaddr, KM_USER0);
483 SetPageUptodate(page);
485 if (inode->i_size < file_size)
486 i_size_write(inode, file_size);
488 if (sdp->sd_args.ar_data == GFS2_DATA_ORDERED ||
490 gfs2_page_add_databufs(ip, page, from, to);
491 error = generic_commit_write(file, page, from, to);
496 if (ip->i_di.di_size < inode->i_size) {
497 ip->i_di.di_size = inode->i_size;
498 di->di_size = cpu_to_be64(inode->i_size);
503 if (al->al_requested) {
504 gfs2_inplace_release(ip);
505 gfs2_quota_unlock(ip);
508 gfs2_glock_dq_m(1, &ip->i_gh);
509 gfs2_holder_uninit(&ip->i_gh);
516 if (al->al_requested) {
517 gfs2_inplace_release(ip);
518 gfs2_quota_unlock(ip);
521 gfs2_glock_dq_m(1, &ip->i_gh);
522 gfs2_holder_uninit(&ip->i_gh);
524 ClearPageUptodate(page);
529 * gfs2_bmap - Block map function
530 * @mapping: Address space info
531 * @lblock: The block to map
533 * Returns: The disk address for the block or 0 on hole or error
536 static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
538 struct gfs2_inode *ip = GFS2_I(mapping->host);
539 struct gfs2_holder i_gh;
543 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
547 if (!gfs2_is_stuffed(ip))
548 dblock = generic_block_bmap(mapping, lblock, gfs2_get_block);
550 gfs2_glock_dq_uninit(&i_gh);
555 static void discard_buffer(struct gfs2_sbd *sdp, struct buffer_head *bh)
557 struct gfs2_bufdata *bd;
563 bh->b_private = NULL;
565 gfs2_log_unlock(sdp);
568 clear_buffer_dirty(bh);
570 clear_buffer_mapped(bh);
571 clear_buffer_req(bh);
572 clear_buffer_new(bh);
573 clear_buffer_delay(bh);
577 static void gfs2_invalidatepage(struct page *page, unsigned long offset)
579 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
580 struct buffer_head *head, *bh, *next;
581 unsigned int curr_off = 0;
583 BUG_ON(!PageLocked(page));
584 if (!page_has_buffers(page))
587 bh = head = page_buffers(page);
589 unsigned int next_off = curr_off + bh->b_size;
590 next = bh->b_this_page;
592 if (offset <= curr_off)
593 discard_buffer(sdp, bh);
597 } while (bh != head);
600 try_to_release_page(page, 0);
605 static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
606 const struct iovec *iov, loff_t offset,
607 unsigned long nr_segs)
609 struct file *file = iocb->ki_filp;
610 struct inode *inode = file->f_mapping->host;
611 struct gfs2_inode *ip = GFS2_I(inode);
612 struct gfs2_holder gh;
616 mutex_lock(&inode->i_mutex);
618 * Shared lock, even if its a write, since we do no allocation
619 * on this path. All we need change is atime.
621 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &gh);
622 rv = gfs2_glock_nq_m_atime(1, &gh);
626 if (offset > i_size_read(inode))
630 * Should we return an error here? I can't see that O_DIRECT for
631 * a journaled file makes any sense. For now we'll silently fall
632 * back to buffered I/O, likewise we do the same for stuffed
633 * files since they are (a) small and (b) unaligned.
635 if (gfs2_is_jdata(ip))
638 if (gfs2_is_stuffed(ip))
641 rv = blockdev_direct_IO_own_locking(rw, iocb, inode,
643 iov, offset, nr_segs,
644 gfs2_get_block_direct, NULL);
646 gfs2_glock_dq_m(1, &gh);
647 gfs2_holder_uninit(&gh);
649 mutex_unlock(&inode->i_mutex);
655 * stuck_releasepage - We're stuck in gfs2_releasepage(). Print stuff out.
656 * @bh: the buffer we're stuck on
660 static void stuck_releasepage(struct buffer_head *bh)
662 struct inode *inode = bh->b_page->mapping->host;
663 struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
664 struct gfs2_bufdata *bd = bh->b_private;
665 struct gfs2_glock *gl;
666 static unsigned limit = 0;
672 fs_warn(sdp, "stuck in gfs2_releasepage() %p\n", inode);
673 fs_warn(sdp, "blkno = %llu, bh->b_count = %d\n",
674 (unsigned long long)bh->b_blocknr, atomic_read(&bh->b_count));
675 fs_warn(sdp, "pinned = %u\n", buffer_pinned(bh));
676 fs_warn(sdp, "bh->b_private = %s\n", (bd) ? "!NULL" : "NULL");
683 fs_warn(sdp, "gl = (%u, %llu)\n",
684 gl->gl_name.ln_type, (unsigned long long)gl->gl_name.ln_number);
686 fs_warn(sdp, "bd_list_tr = %s, bd_le.le_list = %s\n",
687 (list_empty(&bd->bd_list_tr)) ? "no" : "yes",
688 (list_empty(&bd->bd_le.le_list)) ? "no" : "yes");
690 if (gl->gl_ops == &gfs2_inode_glops) {
691 struct gfs2_inode *ip = gl->gl_object;
697 fs_warn(sdp, "ip = %llu %llu\n",
698 (unsigned long long)ip->i_num.no_formal_ino,
699 (unsigned long long)ip->i_num.no_addr);
701 for (x = 0; x < GFS2_MAX_META_HEIGHT; x++)
702 fs_warn(sdp, "ip->i_cache[%u] = %s\n",
703 x, (ip->i_cache[x]) ? "!NULL" : "NULL");
708 * gfs2_releasepage - free the metadata associated with a page
709 * @page: the page that's being released
710 * @gfp_mask: passed from Linux VFS, ignored by us
712 * Call try_to_free_buffers() if the buffers in this page can be
718 int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
720 struct inode *aspace = page->mapping->host;
721 struct gfs2_sbd *sdp = aspace->i_sb->s_fs_info;
722 struct buffer_head *bh, *head;
723 struct gfs2_bufdata *bd;
724 unsigned long t = jiffies + gfs2_tune_get(sdp, gt_stall_secs) * HZ;
726 if (!page_has_buffers(page))
729 head = bh = page_buffers(page);
731 while (atomic_read(&bh->b_count)) {
732 if (!atomic_read(&aspace->i_writecount))
735 if (time_after_eq(jiffies, t)) {
736 stuck_releasepage(bh);
737 /* should we withdraw here? */
744 gfs2_assert_warn(sdp, !buffer_pinned(bh));
745 gfs2_assert_warn(sdp, !buffer_dirty(bh));
750 gfs2_assert_warn(sdp, bd->bd_bh == bh);
751 gfs2_assert_warn(sdp, list_empty(&bd->bd_list_tr));
752 gfs2_assert_warn(sdp, !bd->bd_ail);
754 if (!list_empty(&bd->bd_le.le_list))
756 bh->b_private = NULL;
758 gfs2_log_unlock(sdp);
760 kmem_cache_free(gfs2_bufdata_cachep, bd);
762 bh = bh->b_this_page;
763 } while (bh != head);
766 return try_to_free_buffers(page);
769 const struct address_space_operations gfs2_file_aops = {
770 .writepage = gfs2_writepage,
771 .readpage = gfs2_readpage,
772 .readpages = gfs2_readpages,
773 .sync_page = block_sync_page,
774 .prepare_write = gfs2_prepare_write,
775 .commit_write = gfs2_commit_write,
777 .invalidatepage = gfs2_invalidatepage,
778 .releasepage = gfs2_releasepage,
779 .direct_IO = gfs2_direct_IO,