1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
6 * Buffer cache handling
8 * Copyright (C) 2002, 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
27 #include <linux/types.h>
28 #include <linux/slab.h>
29 #include <linux/highmem.h>
31 #include <cluster/masklog.h>
40 #include "buffer_head_io.h"
42 int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh,
47 mlog_entry("(bh->b_blocknr = %llu, inode=%p)\n",
48 (unsigned long long)bh->b_blocknr, inode);
50 BUG_ON(bh->b_blocknr < OCFS2_SUPER_BLOCK_BLKNO);
51 BUG_ON(buffer_jbd(bh));
53 /* No need to check for a soft readonly file system here. non
54 * journalled writes are only ever done on system files which
55 * can get modified during recovery even if read-only. */
56 if (ocfs2_is_hard_readonly(osb)) {
61 mutex_lock(&OCFS2_I(inode)->ip_io_mutex);
64 set_buffer_uptodate(bh);
66 /* remove from dirty list before I/O. */
67 clear_buffer_dirty(bh);
69 get_bh(bh); /* for end_buffer_write_sync() */
70 bh->b_end_io = end_buffer_write_sync;
75 if (buffer_uptodate(bh)) {
76 ocfs2_set_buffer_uptodate(inode, bh);
78 /* We don't need to remove the clustered uptodate
79 * information for this bh as it's not marked locally
85 mutex_unlock(&OCFS2_I(inode)->ip_io_mutex);
91 int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
92 unsigned int nr, struct buffer_head *bhs[])
96 struct buffer_head *bh;
99 mlog(ML_BH_IO, "No buffers will be read!\n");
103 for (i = 0 ; i < nr ; i++) {
104 if (bhs[i] == NULL) {
105 bhs[i] = sb_getblk(osb->sb, block++);
106 if (bhs[i] == NULL) {
114 if (buffer_jbd(bh)) {
116 "trying to sync read a jbd "
117 "managed bh (blocknr = %llu), skipping\n",
118 (unsigned long long)bh->b_blocknr);
122 if (buffer_dirty(bh)) {
123 /* This should probably be a BUG, or
124 * at least return an error. */
126 "trying to sync read a dirty "
127 "buffer! (blocknr = %llu), skipping\n",
128 (unsigned long long)bh->b_blocknr);
133 if (buffer_jbd(bh)) {
135 "block %llu had the JBD bit set "
136 "while I was in lock_buffer!",
137 (unsigned long long)bh->b_blocknr);
141 clear_buffer_uptodate(bh);
142 get_bh(bh); /* for end_buffer_read_sync() */
143 bh->b_end_io = end_buffer_read_sync;
147 for (i = nr; i > 0; i--) {
150 /* No need to wait on the buffer if it's managed by JBD. */
154 if (!buffer_uptodate(bh)) {
155 /* Status won't be cleared from here on out,
156 * so we can safely record this and loop back
157 * to cleanup the other buffers. */
168 int ocfs2_read_blocks(struct inode *inode, u64 block, int nr,
169 struct buffer_head *bhs[], int flags)
172 int i, ignore_cache = 0;
173 struct buffer_head *bh;
175 mlog_entry("(inode=%p, block=(%llu), nr=(%d), flags=%d)\n",
176 inode, (unsigned long long)block, nr, flags);
179 BUG_ON((flags & OCFS2_BH_READAHEAD) &&
180 (flags & OCFS2_BH_IGNORE_CACHE));
189 mlog(ML_ERROR, "asked to read %d blocks!\n", nr);
196 mlog(ML_BH_IO, "No buffers will be read!\n");
201 mutex_lock(&OCFS2_I(inode)->ip_io_mutex);
202 for (i = 0 ; i < nr ; i++) {
203 if (bhs[i] == NULL) {
204 bhs[i] = sb_getblk(inode->i_sb, block++);
205 if (bhs[i] == NULL) {
206 mutex_unlock(&OCFS2_I(inode)->ip_io_mutex);
213 ignore_cache = (flags & OCFS2_BH_IGNORE_CACHE);
215 /* There are three read-ahead cases here which we need to
216 * be concerned with. All three assume a buffer has
217 * previously been submitted with OCFS2_BH_READAHEAD
218 * and it hasn't yet completed I/O.
220 * 1) The current request is sync to disk. This rarely
221 * happens these days, and never when performance
222 * matters - the code can just wait on the buffer
223 * lock and re-submit.
225 * 2) The current request is cached, but not
226 * readahead. ocfs2_buffer_uptodate() will return
227 * false anyway, so we'll wind up waiting on the
228 * buffer lock to do I/O. We re-check the request
229 * with after getting the lock to avoid a re-submit.
231 * 3) The current request is readahead (and so must
232 * also be a caching one). We short circuit if the
233 * buffer is locked (under I/O) and if it's in the
234 * uptodate cache. The re-check from #2 catches the
235 * case that the previous read-ahead completes just
236 * before our is-it-in-flight check.
239 if (!ignore_cache && !ocfs2_buffer_uptodate(inode, bh)) {
241 "bh (%llu), inode %llu not uptodate\n",
242 (unsigned long long)bh->b_blocknr,
243 (unsigned long long)OCFS2_I(inode)->ip_blkno);
244 /* We're using ignore_cache here to say
249 if (buffer_jbd(bh)) {
251 mlog(ML_BH_IO, "trying to sync read a jbd "
252 "managed bh (blocknr = %llu)\n",
253 (unsigned long long)bh->b_blocknr);
258 if (buffer_dirty(bh)) {
259 /* This should probably be a BUG, or
260 * at least return an error. */
261 mlog(ML_BH_IO, "asking me to sync read a dirty "
262 "buffer! (blocknr = %llu)\n",
263 (unsigned long long)bh->b_blocknr);
267 /* A read-ahead request was made - if the
268 * buffer is already under read-ahead from a
269 * previously submitted request than we are
271 if ((flags & OCFS2_BH_READAHEAD)
272 && ocfs2_buffer_read_ahead(inode, bh))
276 if (buffer_jbd(bh)) {
277 #ifdef CATCH_BH_JBD_RACES
278 mlog(ML_ERROR, "block %llu had the JBD bit set "
279 "while I was in lock_buffer!",
280 (unsigned long long)bh->b_blocknr);
288 /* Re-check ocfs2_buffer_uptodate() as a
289 * previously read-ahead buffer may have
290 * completed I/O while we were waiting for the
292 if (!(flags & OCFS2_BH_IGNORE_CACHE)
293 && !(flags & OCFS2_BH_READAHEAD)
294 && ocfs2_buffer_uptodate(inode, bh)) {
299 clear_buffer_uptodate(bh);
300 get_bh(bh); /* for end_buffer_read_sync() */
301 bh->b_end_io = end_buffer_read_sync;
309 for (i = (nr - 1); i >= 0; i--) {
312 if (!(flags & OCFS2_BH_READAHEAD)) {
313 /* We know this can't have changed as we hold the
314 * inode sem. Avoid doing any work on the bh if the
319 if (!buffer_uptodate(bh)) {
320 /* Status won't be cleared from here on out,
321 * so we can safely record this and loop back
322 * to cleanup the other buffers. Don't need to
323 * remove the clustered uptodate information
324 * for this bh as it's not marked locally
333 /* Always set the buffer in the cache, even if it was
334 * a forced read, or read-ahead which hasn't yet
336 ocfs2_set_buffer_uptodate(inode, bh);
338 mutex_unlock(&OCFS2_I(inode)->ip_io_mutex);
340 mlog(ML_BH_IO, "block=(%llu), nr=(%d), cached=%s, flags=0x%x\n",
341 (unsigned long long)block, nr,
342 ((flags & OCFS2_BH_IGNORE_CACHE) || ignore_cache) ? "no" : "yes",
351 /* Check whether the blkno is the super block or one of the backups. */
352 static void ocfs2_check_super_or_backup(struct super_block *sb,
358 if (blkno == OCFS2_SUPER_BLOCK_BLKNO)
361 for (i = 0; i < OCFS2_MAX_BACKUP_SUPERBLOCKS; i++) {
362 backup_blkno = ocfs2_backup_super_blkno(sb, i);
363 if (backup_blkno == blkno)
371 * Write super block and backups doesn't need to collaborate with journal,
372 * so we don't need to lock ip_io_mutex and inode doesn't need to bea passed
373 * into this function.
375 int ocfs2_write_super_or_backup(struct ocfs2_super *osb,
376 struct buffer_head *bh)
382 BUG_ON(buffer_jbd(bh));
383 ocfs2_check_super_or_backup(osb->sb, bh->b_blocknr);
385 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) {
391 set_buffer_uptodate(bh);
393 /* remove from dirty list before I/O. */
394 clear_buffer_dirty(bh);
396 get_bh(bh); /* for end_buffer_write_sync() */
397 bh->b_end_io = end_buffer_write_sync;
398 submit_bh(WRITE, bh);
402 if (!buffer_uptodate(bh)) {