2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
16 #include <linux/pagemap.h>
17 #include <linux/writeback.h>
18 #include <linux/swap.h>
19 #include <linux/delay.h>
20 #include <linux/bio.h>
21 #include <linux/gfs2_ondisk.h>
34 #include "ops_address.h"
36 static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wbc)
39 struct buffer_head *bh, *head;
41 int write_op = (1 << BIO_RW_META) | ((wbc->sync_mode == WB_SYNC_ALL ?
42 WRITE_SYNC_PLUG : WRITE));
44 BUG_ON(!PageLocked(page));
45 BUG_ON(!page_has_buffers(page));
47 head = page_buffers(page);
51 if (!buffer_mapped(bh))
54 * If it's a fully non-blocking write attempt and we cannot
55 * lock the buffer then redirty the page. Note that this can
56 * potentially cause a busy-wait loop from pdflush and kswapd
57 * activity, but those code paths have their own higher-level
60 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
62 } else if (!trylock_buffer(bh)) {
63 redirty_page_for_writepage(wbc, page);
66 if (test_clear_buffer_dirty(bh)) {
67 mark_buffer_async_write(bh);
71 } while ((bh = bh->b_this_page) != head);
74 * The page and its buffers are protected by PageWriteback(), so we can
75 * drop the bh refcounts early.
77 BUG_ON(PageWriteback(page));
78 set_page_writeback(page);
81 struct buffer_head *next = bh->b_this_page;
82 if (buffer_async_write(bh)) {
83 submit_bh(write_op, bh);
92 end_page_writeback(page);
97 static const struct address_space_operations aspace_aops = {
98 .writepage = gfs2_aspace_writepage,
99 .releasepage = gfs2_releasepage,
100 .sync_page = block_sync_page,
104 * gfs2_aspace_get - Create and initialize a struct inode structure
105 * @sdp: the filesystem the aspace is in
107 * Right now a struct inode is just a struct inode. Maybe Linux
108 * will supply a more lightweight address space construct (that works)
111 * Make sure pages/buffers in this aspace aren't in high memory.
113 * Returns: the aspace
116 struct inode *gfs2_aspace_get(struct gfs2_sbd *sdp)
118 struct inode *aspace;
119 struct gfs2_inode *ip;
121 aspace = new_inode(sdp->sd_vfs);
123 mapping_set_gfp_mask(aspace->i_mapping, GFP_NOFS);
124 aspace->i_mapping->a_ops = &aspace_aops;
125 aspace->i_size = ~0ULL;
127 clear_bit(GIF_USER, &ip->i_flags);
128 insert_inode_hash(aspace);
133 void gfs2_aspace_put(struct inode *aspace)
135 remove_inode_hash(aspace);
140 * gfs2_meta_sync - Sync all buffers associated with a glock
145 void gfs2_meta_sync(struct gfs2_glock *gl)
147 struct address_space *mapping = gl->gl_aspace->i_mapping;
150 filemap_fdatawrite(mapping);
151 error = filemap_fdatawait(mapping);
154 gfs2_io_error(gl->gl_sbd);
158 * gfs2_getbuf - Get a buffer with a given address space
160 * @blkno: the block number (filesystem scope)
161 * @create: 1 if the buffer should be created
163 * Returns: the buffer
166 struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
168 struct address_space *mapping = gl->gl_aspace->i_mapping;
169 struct gfs2_sbd *sdp = gl->gl_sbd;
171 struct buffer_head *bh;
176 shift = PAGE_CACHE_SHIFT - sdp->sd_sb.sb_bsize_shift;
177 index = blkno >> shift; /* convert block to page */
178 bufnum = blkno - (index << shift); /* block buf index within page */
182 page = grab_cache_page(mapping, index);
188 page = find_lock_page(mapping, index);
193 if (!page_has_buffers(page))
194 create_empty_buffers(page, sdp->sd_sb.sb_bsize, 0);
196 /* Locate header for our buffer within our page */
197 for (bh = page_buffers(page); bufnum--; bh = bh->b_this_page)
201 if (!buffer_mapped(bh))
202 map_bh(bh, sdp->sd_vfs, blkno);
205 mark_page_accessed(page);
206 page_cache_release(page);
211 static void meta_prep_new(struct buffer_head *bh)
213 struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data;
216 clear_buffer_dirty(bh);
217 set_buffer_uptodate(bh);
220 mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
224 * gfs2_meta_new - Get a block
225 * @gl: The glock associated with this block
226 * @blkno: The block number
228 * Returns: The buffer
231 struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno)
233 struct buffer_head *bh;
234 bh = gfs2_getbuf(gl, blkno, CREATE);
240 * gfs2_meta_read - Read a block from disk
241 * @gl: The glock covering the block
242 * @blkno: The block number
244 * @bhp: the place where the buffer is returned (NULL on failure)
249 int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
250 struct buffer_head **bhp)
252 struct gfs2_sbd *sdp = gl->gl_sbd;
253 struct buffer_head *bh;
255 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
258 *bhp = bh = gfs2_getbuf(gl, blkno, CREATE);
261 if (buffer_uptodate(bh)) {
265 bh->b_end_io = end_buffer_read_sync;
267 submit_bh(READ_SYNC | (1 << BIO_RW_META), bh);
268 if (!(flags & DIO_WAIT))
272 if (unlikely(!buffer_uptodate(bh))) {
273 struct gfs2_trans *tr = current->journal_info;
274 if (tr && tr->tr_touched)
275 gfs2_io_error_bh(sdp, bh);
284 * gfs2_meta_wait - Reread a block from disk
285 * @sdp: the filesystem
286 * @bh: The block to wait for
291 int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh)
293 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
298 if (!buffer_uptodate(bh)) {
299 struct gfs2_trans *tr = current->journal_info;
300 if (tr && tr->tr_touched)
301 gfs2_io_error_bh(sdp, bh);
304 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
311 * gfs2_attach_bufdata - attach a struct gfs2_bufdata structure to a buffer
312 * @gl: the glock the buffer belongs to
313 * @bh: The buffer to be attached to
314 * @meta: Flag to indicate whether its metadata or not
317 void gfs2_attach_bufdata(struct gfs2_glock *gl, struct buffer_head *bh,
320 struct gfs2_bufdata *bd;
323 lock_page(bh->b_page);
327 unlock_page(bh->b_page);
331 bd = kmem_cache_zalloc(gfs2_bufdata_cachep, GFP_NOFS | __GFP_NOFAIL);
335 INIT_LIST_HEAD(&bd->bd_list_tr);
337 lops_init_le(&bd->bd_le, &gfs2_buf_lops);
339 lops_init_le(&bd->bd_le, &gfs2_databuf_lops);
343 unlock_page(bh->b_page);
346 void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr, int meta)
348 struct gfs2_sbd *sdp = GFS2_SB(bh->b_page->mapping->host);
349 struct gfs2_bufdata *bd = bh->b_private;
350 if (test_clear_buffer_pinned(bh)) {
351 list_del_init(&bd->bd_le.le_list);
353 gfs2_assert_warn(sdp, sdp->sd_log_num_buf);
354 sdp->sd_log_num_buf--;
357 gfs2_assert_warn(sdp, sdp->sd_log_num_databuf);
358 sdp->sd_log_num_databuf--;
359 tr->tr_num_databuf_rm++;
366 gfs2_remove_from_ail(bd);
367 bh->b_private = NULL;
369 bd->bd_blkno = bh->b_blocknr;
370 gfs2_trans_add_revoke(sdp, bd);
373 clear_buffer_dirty(bh);
374 clear_buffer_uptodate(bh);
378 * gfs2_meta_wipe - make inode's buffers so they aren't dirty/pinned anymore
379 * @ip: the inode who owns the buffers
380 * @bstart: the first buffer in the run
381 * @blen: the number of buffers in the run
385 void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen)
387 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
388 struct buffer_head *bh;
391 bh = gfs2_getbuf(ip->i_gl, bstart, NO_CREATE);
395 gfs2_remove_from_journal(bh, current->journal_info, 1);
396 gfs2_log_unlock(sdp);
407 * gfs2_meta_indirect_buffer - Get a metadata buffer
408 * @ip: The GFS2 inode
409 * @height: The level of this buf in the metadata (indir addr) tree (if any)
410 * @num: The block number (device relative) of the buffer
411 * @new: Non-zero if we may create a new buffer
412 * @bhp: the buffer is returned here
417 int gfs2_meta_indirect_buffer(struct gfs2_inode *ip, int height, u64 num,
418 int new, struct buffer_head **bhp)
420 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
421 struct gfs2_glock *gl = ip->i_gl;
422 struct buffer_head *bh;
427 bh = gfs2_meta_new(gl, num);
428 gfs2_trans_add_bh(ip->i_gl, bh, 1);
429 gfs2_metatype_set(bh, GFS2_METATYPE_IN, GFS2_FORMAT_IN);
430 gfs2_buffer_clear_tail(bh, sizeof(struct gfs2_meta_header));
432 u32 mtype = height ? GFS2_METATYPE_IN : GFS2_METATYPE_DI;
433 ret = gfs2_meta_read(gl, num, DIO_WAIT, &bh);
434 if (ret == 0 && gfs2_metatype_check(sdp, bh, mtype)) {
444 * gfs2_meta_ra - start readahead on an extent of a file
445 * @gl: the glock the blocks belong to
446 * @dblock: the starting disk block
447 * @extlen: the number of blocks in the extent
449 * returns: the first buffer in the extent
452 struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
454 struct gfs2_sbd *sdp = gl->gl_sbd;
455 struct buffer_head *first_bh, *bh;
456 u32 max_ra = gfs2_tune_get(sdp, gt_max_readahead) >>
457 sdp->sd_sb.sb_bsize_shift;
466 first_bh = gfs2_getbuf(gl, dblock, CREATE);
468 if (buffer_uptodate(first_bh))
470 if (!buffer_locked(first_bh))
471 ll_rw_block(READ_SYNC | (1 << BIO_RW_META), 1, &first_bh);
477 bh = gfs2_getbuf(gl, dblock, CREATE);
479 if (!buffer_uptodate(bh) && !buffer_locked(bh))
480 ll_rw_block(READA, 1, &bh);
484 if (!buffer_locked(first_bh) && buffer_uptodate(first_bh))
488 wait_on_buffer(first_bh);