4 * Copyright (C) 2002, Linus Torvalds.
6 * Contains all the functions related to writing back and waiting
7 * upon dirty inodes against superblocks, and writing back dirty
8 * pages against inodes. ie: data writeback. Writeout of the
9 * inode itself is not handled here.
11 * 10Apr2002 akpm@zip.com.au
12 * Split out of fs/inode.c
13 * Additions for address_space-based writeback
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/spinlock.h>
19 #include <linux/sched.h>
22 #include <linux/writeback.h>
23 #include <linux/blkdev.h>
24 #include <linux/backing-dev.h>
25 #include <linux/buffer_head.h>
29 * __mark_inode_dirty - internal function
30 * @inode: inode to mark
31 * @flags: what kind of dirty (i.e. I_DIRTY_SYNC)
32 * Mark an inode as dirty. Callers should use mark_inode_dirty or
33 * mark_inode_dirty_sync.
35 * Put the inode on the super block's dirty list.
37 * CAREFUL! We mark it dirty unconditionally, but move it onto the
38 * dirty list only if it is hashed or if it refers to a blockdev.
39 * If it was not hashed, it will never be added to the dirty list
40 * even if it is later hashed, as it will have been marked dirty already.
42 * In short, make sure you hash any inodes _before_ you start marking
45 * This function *must* be atomic for the I_DIRTY_PAGES case -
46 * set_page_dirty() is called under spinlock in several places.
48 * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
49 * the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of
50 * the kernel-internal blockdev inode represents the dirtying time of the
51 * blockdev's pages. This is why for I_DIRTY_PAGES we always use
52 * page->mapping->host, so the page-dirtying time is recorded in the internal
55 void __mark_inode_dirty(struct inode *inode, int flags)
57 struct super_block *sb = inode->i_sb;
60 * Don't do this for I_DIRTY_PAGES - that doesn't actually
61 * dirty the inode itself
63 if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
64 if (sb->s_op->dirty_inode)
65 sb->s_op->dirty_inode(inode);
69 * make sure that changes are seen by all cpus before we test i_state
74 /* avoid the locking if we can */
75 if ((inode->i_state & flags) == flags)
78 if (unlikely(block_dump)) {
79 struct dentry *dentry = NULL;
80 const char *name = "?";
82 if (!list_empty(&inode->i_dentry)) {
83 dentry = list_entry(inode->i_dentry.next,
84 struct dentry, d_alias);
85 if (dentry && dentry->d_name.name)
86 name = (const char *) dentry->d_name.name;
89 if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev"))
91 "%s(%d): dirtied inode %lu (%s) on %s\n",
92 current->comm, current->pid, inode->i_ino,
93 name, inode->i_sb->s_id);
96 spin_lock(&inode_lock);
97 if ((inode->i_state & flags) != flags) {
98 const int was_dirty = inode->i_state & I_DIRTY;
100 inode->i_state |= flags;
103 * If the inode is locked, just update its dirty state.
104 * The unlocker will place the inode on the appropriate
105 * superblock list, based upon its state.
107 if (inode->i_state & I_LOCK)
111 * Only add valid (hashed) inodes to the superblock's
112 * dirty list. Add blockdev inodes as well.
114 if (!S_ISBLK(inode->i_mode)) {
115 if (hlist_unhashed(&inode->i_hash))
118 if (inode->i_state & (I_FREEING|I_CLEAR))
122 * If the inode was already on s_dirty or s_io, don't
123 * reposition it (that would break s_dirty time-ordering).
126 inode->dirtied_when = jiffies;
127 list_move(&inode->i_list, &sb->s_dirty);
131 spin_unlock(&inode_lock);
134 EXPORT_SYMBOL(__mark_inode_dirty);
136 static int write_inode(struct inode *inode, int sync)
138 if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode))
139 return inode->i_sb->s_op->write_inode(inode, sync);
144 * Write a single inode's dirty pages and inode data out to disk.
145 * If `wait' is set, wait on the writeout.
147 * The whole writeout design is quite complex and fragile. We want to avoid
148 * starvation of particular inodes when others are being redirtied, prevent
151 * Called under inode_lock.
154 __sync_single_inode(struct inode *inode, struct writeback_control *wbc)
157 struct address_space *mapping = inode->i_mapping;
158 struct super_block *sb = inode->i_sb;
159 int wait = wbc->sync_mode == WB_SYNC_ALL;
162 BUG_ON(inode->i_state & I_LOCK);
164 /* Set I_LOCK, reset I_DIRTY */
165 dirty = inode->i_state & I_DIRTY;
166 inode->i_state |= I_LOCK;
167 inode->i_state &= ~I_DIRTY;
169 spin_unlock(&inode_lock);
171 ret = do_writepages(mapping, wbc);
173 /* Don't write the inode if only I_DIRTY_PAGES was set */
174 if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
175 int err = write_inode(inode, wait);
181 int err = filemap_fdatawait(mapping);
186 spin_lock(&inode_lock);
187 inode->i_state &= ~I_LOCK;
188 if (!(inode->i_state & I_FREEING)) {
189 if (!(inode->i_state & I_DIRTY) &&
190 mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
192 * We didn't write back all the pages. nfs_writepages()
193 * sometimes bales out without doing anything. Redirty
194 * the inode. It is still on sb->s_io.
196 if (wbc->for_kupdate) {
198 * For the kupdate function we leave the inode
199 * at the head of sb_dirty so it will get more
200 * writeout as soon as the queue becomes
203 inode->i_state |= I_DIRTY_PAGES;
204 list_move_tail(&inode->i_list, &sb->s_dirty);
207 * Otherwise fully redirty the inode so that
208 * other inodes on this superblock will get some
209 * writeout. Otherwise heavy writing to one
210 * file would indefinitely suspend writeout of
211 * all the other files.
213 inode->i_state |= I_DIRTY_PAGES;
214 inode->dirtied_when = jiffies;
215 list_move(&inode->i_list, &sb->s_dirty);
217 } else if (inode->i_state & I_DIRTY) {
219 * Someone redirtied the inode while were writing back
222 list_move(&inode->i_list, &sb->s_dirty);
223 } else if (atomic_read(&inode->i_count)) {
225 * The inode is clean, inuse
227 list_move(&inode->i_list, &inode_in_use);
230 * The inode is clean, unused
232 list_move(&inode->i_list, &inode_unused);
235 wake_up_inode(inode);
240 * Write out an inode's dirty pages. Called under inode_lock. Either the
241 * caller has ref on the inode (either via __iget or via syscall against an fd)
242 * or the inode has I_WILL_FREE set (via generic_forget_inode)
245 __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
247 wait_queue_head_t *wqh;
249 if (!atomic_read(&inode->i_count))
250 WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
252 WARN_ON(inode->i_state & I_WILL_FREE);
254 if ((wbc->sync_mode != WB_SYNC_ALL) && (inode->i_state & I_LOCK)) {
255 struct address_space *mapping = inode->i_mapping;
258 list_move(&inode->i_list, &inode->i_sb->s_dirty);
261 * Even if we don't actually write the inode itself here,
262 * we can at least start some of the data writeout..
264 spin_unlock(&inode_lock);
265 ret = do_writepages(mapping, wbc);
266 spin_lock(&inode_lock);
271 * It's a data-integrity sync. We must wait.
273 if (inode->i_state & I_LOCK) {
274 DEFINE_WAIT_BIT(wq, &inode->i_state, __I_LOCK);
276 wqh = bit_waitqueue(&inode->i_state, __I_LOCK);
278 spin_unlock(&inode_lock);
279 __wait_on_bit(wqh, &wq, inode_wait,
280 TASK_UNINTERRUPTIBLE);
281 spin_lock(&inode_lock);
282 } while (inode->i_state & I_LOCK);
284 return __sync_single_inode(inode, wbc);
288 * Write out a superblock's list of dirty inodes. A wait will be performed
289 * upon no inodes, all inodes or the final one, depending upon sync_mode.
291 * If older_than_this is non-NULL, then only write out inodes which
292 * had their first dirtying at a time earlier than *older_than_this.
294 * If we're a pdlfush thread, then implement pdflush collision avoidance
295 * against the entire list.
297 * WB_SYNC_HOLD is a hack for sys_sync(): reattach the inode to sb->s_dirty so
298 * that it can be located for waiting on in __writeback_single_inode().
300 * Called under inode_lock.
302 * If `bdi' is non-zero then we're being asked to writeback a specific queue.
303 * This function assumes that the blockdev superblock's inodes are backed by
304 * a variety of queues, so all inodes are searched. For other superblocks,
305 * assume that all inodes are backed by the same queue.
307 * FIXME: this linear search could get expensive with many fileystems. But
308 * how to fix? We need to go from an address_space to all inodes which share
309 * a queue with that address_space. (Easy: have a global "dirty superblocks"
312 * The inodes to be written are parked on sb->s_io. They are moved back onto
313 * sb->s_dirty as they are selected for writing. This way, none can be missed
314 * on the writer throttling path, and we get decent balancing between many
315 * throttled threads: we don't want them all piling up on __wait_on_inode.
318 sync_sb_inodes(struct super_block *sb, struct writeback_control *wbc)
320 const unsigned long start = jiffies; /* livelock avoidance */
322 if (!wbc->for_kupdate || list_empty(&sb->s_io))
323 list_splice_init(&sb->s_dirty, &sb->s_io);
325 while (!list_empty(&sb->s_io)) {
326 struct inode *inode = list_entry(sb->s_io.prev,
327 struct inode, i_list);
328 struct address_space *mapping = inode->i_mapping;
329 struct backing_dev_info *bdi = mapping->backing_dev_info;
332 if (!bdi_cap_writeback_dirty(bdi)) {
333 list_move(&inode->i_list, &sb->s_dirty);
334 if (sb_is_blkdev_sb(sb)) {
336 * Dirty memory-backed blockdev: the ramdisk
337 * driver does this. Skip just this inode
342 * Dirty memory-backed inode against a filesystem other
343 * than the kernel-internal bdev filesystem. Skip the
349 if (wbc->nonblocking && bdi_write_congested(bdi)) {
350 wbc->encountered_congestion = 1;
351 if (!sb_is_blkdev_sb(sb))
352 break; /* Skip a congested fs */
353 list_move(&inode->i_list, &sb->s_dirty);
354 continue; /* Skip a congested blockdev */
357 if (wbc->bdi && bdi != wbc->bdi) {
358 if (!sb_is_blkdev_sb(sb))
359 break; /* fs has the wrong queue */
360 list_move(&inode->i_list, &sb->s_dirty);
361 continue; /* blockdev has wrong queue */
364 /* Was this inode dirtied after sync_sb_inodes was called? */
365 if (time_after(inode->dirtied_when, start))
368 /* Was this inode dirtied too recently? */
369 if (wbc->older_than_this && time_after(inode->dirtied_when,
370 *wbc->older_than_this))
373 /* Is another pdflush already flushing this queue? */
374 if (current_is_pdflush() && !writeback_acquire(bdi))
377 BUG_ON(inode->i_state & I_FREEING);
379 pages_skipped = wbc->pages_skipped;
380 __writeback_single_inode(inode, wbc);
381 if (wbc->sync_mode == WB_SYNC_HOLD) {
382 inode->dirtied_when = jiffies;
383 list_move(&inode->i_list, &sb->s_dirty);
385 if (current_is_pdflush())
386 writeback_release(bdi);
387 if (wbc->pages_skipped != pages_skipped) {
389 * writeback is not making progress due to locked
390 * buffers. Skip this inode for now.
392 list_move(&inode->i_list, &sb->s_dirty);
394 spin_unlock(&inode_lock);
397 spin_lock(&inode_lock);
398 if (wbc->nr_to_write <= 0)
401 return; /* Leave any unwritten inodes on s_io */
405 * Start writeback of dirty pagecache data against all unlocked inodes.
408 * We don't need to grab a reference to superblock here. If it has non-empty
409 * ->s_dirty it's hadn't been killed yet and kill_super() won't proceed
410 * past sync_inodes_sb() until both the ->s_dirty and ->s_io lists are
411 * empty. Since __sync_single_inode() regains inode_lock before it finally moves
412 * inode from superblock lists we are OK.
414 * If `older_than_this' is non-zero then only flush inodes which have a
415 * flushtime older than *older_than_this.
417 * If `bdi' is non-zero then we will scan the first inode against each
418 * superblock until we find the matching ones. One group will be the dirty
419 * inodes against a filesystem. Then when we hit the dummy blockdev superblock,
420 * sync_sb_inodes will seekout the blockdev which matches `bdi'. Maybe not
421 * super-efficient but we're about to do a ton of I/O...
424 writeback_inodes(struct writeback_control *wbc)
426 struct super_block *sb;
431 sb = sb_entry(super_blocks.prev);
432 for (; sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.prev)) {
433 if (!list_empty(&sb->s_dirty) || !list_empty(&sb->s_io)) {
434 /* we're making our own get_super here */
436 spin_unlock(&sb_lock);
438 * If we can't get the readlock, there's no sense in
439 * waiting around, most of the time the FS is going to
440 * be unmounted by the time it is released.
442 if (down_read_trylock(&sb->s_umount)) {
444 spin_lock(&inode_lock);
445 sync_sb_inodes(sb, wbc);
446 spin_unlock(&inode_lock);
448 up_read(&sb->s_umount);
451 if (__put_super_and_need_restart(sb))
454 if (wbc->nr_to_write <= 0)
457 spin_unlock(&sb_lock);
461 * writeback and wait upon the filesystem's dirty inodes. The caller will
462 * do this in two passes - one to write, and one to wait. WB_SYNC_HOLD is
463 * used to park the written inodes on sb->s_dirty for the wait pass.
465 * A finite limit is set on the number of pages which will be written.
466 * To prevent infinite livelock of sys_sync().
468 * We add in the number of potentially dirty inodes, because each inode write
469 * can dirty pagecache in the underlying blockdev.
471 void sync_inodes_sb(struct super_block *sb, int wait)
473 struct writeback_control wbc = {
474 .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_HOLD,
476 .range_end = LLONG_MAX,
478 unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
479 unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
481 wbc.nr_to_write = nr_dirty + nr_unstable +
482 (inodes_stat.nr_inodes - inodes_stat.nr_unused) +
483 nr_dirty + nr_unstable;
484 wbc.nr_to_write += wbc.nr_to_write / 2; /* Bit more for luck */
485 spin_lock(&inode_lock);
486 sync_sb_inodes(sb, &wbc);
487 spin_unlock(&inode_lock);
491 * Rather lame livelock avoidance.
493 static void set_sb_syncing(int val)
495 struct super_block *sb;
497 sb = sb_entry(super_blocks.prev);
498 for (; sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.prev)) {
501 spin_unlock(&sb_lock);
505 * sync_inodes - writes all inodes to disk
506 * @wait: wait for completion
508 * sync_inodes() goes through each super block's dirty inode list, writes the
509 * inodes out, waits on the writeout and puts the inodes back on the normal
512 * This is for sys_sync(). fsync_dev() uses the same algorithm. The subtle
513 * part of the sync functions is that the blockdev "superblock" is processed
514 * last. This is because the write_inode() function of a typical fs will
515 * perform no I/O, but will mark buffers in the blockdev mapping as dirty.
516 * What we want to do is to perform all that dirtying first, and then write
517 * back all those inode blocks via the blockdev mapping in one sweep. So the
518 * additional (somewhat redundant) sync_blockdev() calls here are to make
519 * sure that really happens. Because if we call sync_inodes_sb(wait=1) with
520 * outstanding dirty inodes, the writeback goes block-at-a-time within the
521 * filesystem's write_inode(). This is extremely slow.
523 static void __sync_inodes(int wait)
525 struct super_block *sb;
529 list_for_each_entry(sb, &super_blocks, s_list) {
534 spin_unlock(&sb_lock);
535 down_read(&sb->s_umount);
537 sync_inodes_sb(sb, wait);
538 sync_blockdev(sb->s_bdev);
540 up_read(&sb->s_umount);
542 if (__put_super_and_need_restart(sb))
545 spin_unlock(&sb_lock);
548 void sync_inodes(int wait)
560 * write_inode_now - write an inode to disk
561 * @inode: inode to write to disk
562 * @sync: whether the write should be synchronous or not
564 * This function commits an inode to disk immediately if it is dirty. This is
565 * primarily needed by knfsd.
567 * The caller must either have a ref on the inode or must have set I_WILL_FREE.
569 int write_inode_now(struct inode *inode, int sync)
572 struct writeback_control wbc = {
573 .nr_to_write = LONG_MAX,
574 .sync_mode = WB_SYNC_ALL,
576 .range_end = LLONG_MAX,
579 if (!mapping_cap_writeback_dirty(inode->i_mapping))
583 spin_lock(&inode_lock);
584 ret = __writeback_single_inode(inode, &wbc);
585 spin_unlock(&inode_lock);
587 wait_on_inode(inode);
590 EXPORT_SYMBOL(write_inode_now);
593 * sync_inode - write an inode and its pages to disk.
594 * @inode: the inode to sync
595 * @wbc: controls the writeback mode
597 * sync_inode() will write an inode and its pages to disk. It will also
598 * correctly update the inode on its superblock's dirty inode lists and will
599 * update inode->i_state.
601 * The caller must have a ref on the inode.
603 int sync_inode(struct inode *inode, struct writeback_control *wbc)
607 spin_lock(&inode_lock);
608 ret = __writeback_single_inode(inode, wbc);
609 spin_unlock(&inode_lock);
612 EXPORT_SYMBOL(sync_inode);
615 * generic_osync_inode - flush all dirty data for a given inode to disk
616 * @inode: inode to write
617 * @mapping: the address_space that should be flushed
618 * @what: what to write and wait upon
620 * This can be called by file_write functions for files which have the
621 * O_SYNC flag set, to flush dirty writes to disk.
623 * @what is a bitmask, specifying which part of the inode's data should be
624 * written and waited upon.
626 * OSYNC_DATA: i_mapping's dirty data
627 * OSYNC_METADATA: the buffers at i_mapping->private_list
628 * OSYNC_INODE: the inode itself
631 int generic_osync_inode(struct inode *inode, struct address_space *mapping, int what)
634 int need_write_inode_now = 0;
637 if (what & OSYNC_DATA)
638 err = filemap_fdatawrite(mapping);
639 if (what & (OSYNC_METADATA|OSYNC_DATA)) {
640 err2 = sync_mapping_buffers(mapping);
644 if (what & OSYNC_DATA) {
645 err2 = filemap_fdatawait(mapping);
650 spin_lock(&inode_lock);
651 if ((inode->i_state & I_DIRTY) &&
652 ((what & OSYNC_INODE) || (inode->i_state & I_DIRTY_DATASYNC)))
653 need_write_inode_now = 1;
654 spin_unlock(&inode_lock);
656 if (need_write_inode_now) {
657 err2 = write_inode_now(inode, 1);
662 wait_on_inode(inode);
667 EXPORT_SYMBOL(generic_osync_inode);
670 * writeback_acquire: attempt to get exclusive writeback access to a device
671 * @bdi: the device's backing_dev_info structure
673 * It is a waste of resources to have more than one pdflush thread blocked on
674 * a single request queue. Exclusion at the request_queue level is obtained
675 * via a flag in the request_queue's backing_dev_info.state.
677 * Non-request_queue-backed address_spaces will share default_backing_dev_info,
678 * unless they implement their own. Which is somewhat inefficient, as this
679 * may prevent concurrent writeback against multiple devices.
681 int writeback_acquire(struct backing_dev_info *bdi)
683 return !test_and_set_bit(BDI_pdflush, &bdi->state);
687 * writeback_in_progress: determine whether there is writeback in progress
688 * @bdi: the device's backing_dev_info structure.
690 * Determine whether there is writeback in progress against a backing device.
692 int writeback_in_progress(struct backing_dev_info *bdi)
694 return test_bit(BDI_pdflush, &bdi->state);
698 * writeback_release: relinquish exclusive writeback access against a device.
699 * @bdi: the device's backing_dev_info structure
701 void writeback_release(struct backing_dev_info *bdi)
703 BUG_ON(!writeback_in_progress(bdi));
704 clear_bit(BDI_pdflush, &bdi->state);