4 * Copyright (C) 2002, Linus Torvalds.
6 * Contains functions related to writing back dirty pages at the
9 * 10Apr2002 akpm@zip.com.au
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/spinlock.h>
18 #include <linux/swap.h>
19 #include <linux/slab.h>
20 #include <linux/pagemap.h>
21 #include <linux/writeback.h>
22 #include <linux/init.h>
23 #include <linux/backing-dev.h>
24 #include <linux/blkdev.h>
25 #include <linux/mpage.h>
26 #include <linux/rmap.h>
27 #include <linux/percpu.h>
28 #include <linux/notifier.h>
29 #include <linux/smp.h>
30 #include <linux/sysctl.h>
31 #include <linux/cpu.h>
32 #include <linux/syscalls.h>
35 * The maximum number of pages to writeout in a single bdflush/kupdate
36 * operation. We do this so we don't hold I_LOCK against an inode for
37 * enormous amounts of time, which would block a userspace task which has
38 * been forced to throttle against that inode. Also, the code reevaluates
39 * the dirty each time it has written this many pages.
41 #define MAX_WRITEBACK_PAGES 1024
44 * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
45 * will look to see if it needs to force writeback or throttling.
47 static long ratelimit_pages = 32;
49 static int dirty_exceeded __cacheline_aligned_in_smp; /* Dirty mem may be over limit */
52 * When balance_dirty_pages decides that the caller needs to perform some
53 * non-background writeback, this is how many pages it will attempt to write.
54 * It should be somewhat larger than RATELIMIT_PAGES to ensure that reasonably
55 * large amounts of I/O are submitted.
57 static inline long sync_writeback_pages(void)
59 return ratelimit_pages + ratelimit_pages / 2;
62 /* The following parameters are exported via /proc/sys/vm */
65 * Start background writeback (via pdflush) at this percentage
67 int dirty_background_ratio = 10;
70 * The generator of dirty data starts writeback at this percentage
72 int vm_dirty_ratio = 40;
75 * The interval between `kupdate'-style writebacks, in jiffies
77 int dirty_writeback_interval = 5 * HZ;
80 * The longest number of jiffies for which data is allowed to remain dirty
82 int dirty_expire_interval = 30 * HZ;
85 * Flag that makes the machine dump writes/reads and block dirtyings.
90 * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies:
91 * a full sync is triggered after this time elapses without any disk activity.
95 EXPORT_SYMBOL(laptop_mode);
97 /* End of sysctl-exported parameters */
100 static void background_writeout(unsigned long _min_pages);
103 * Work out the current dirty-memory clamping and background writeout
106 * The main aim here is to lower them aggressively if there is a lot of mapped
107 * memory around. To avoid stressing page reclaim with lots of unreclaimable
108 * pages. It is better to clamp down on writers than to start swapping, and
109 * performing lots of scanning.
111 * We only allow 1/2 of the currently-unmapped memory to be dirtied.
113 * We don't permit the clamping level to fall below 5% - that is getting rather
116 * We make sure that the background writeout level is below the adjusted
120 get_dirty_limits(long *pbackground, long *pdirty,
121 struct address_space *mapping)
123 int background_ratio; /* Percentages */
128 unsigned long available_memory = vm_total_pages;
129 struct task_struct *tsk;
131 #ifdef CONFIG_HIGHMEM
133 * If this mapping can only allocate from low memory,
134 * we exclude high memory from our count.
136 if (mapping && !(mapping_gfp_mask(mapping) & __GFP_HIGHMEM))
137 available_memory -= totalhigh_pages;
141 unmapped_ratio = 100 - ((global_page_state(NR_FILE_MAPPED) +
142 global_page_state(NR_ANON_PAGES)) * 100) /
145 dirty_ratio = vm_dirty_ratio;
146 if (dirty_ratio > unmapped_ratio / 2)
147 dirty_ratio = unmapped_ratio / 2;
152 background_ratio = dirty_background_ratio;
153 if (background_ratio >= dirty_ratio)
154 background_ratio = dirty_ratio / 2;
156 background = (background_ratio * available_memory) / 100;
157 dirty = (dirty_ratio * available_memory) / 100;
159 if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
160 background += background / 4;
163 *pbackground = background;
168 * balance_dirty_pages() must be called by processes which are generating dirty
169 * data. It looks at the number of dirty pages in the machine and will force
170 * the caller to perform writeback if the system is over `vm_dirty_ratio'.
171 * If we're over `background_thresh' then pdflush is woken to perform some
174 static void balance_dirty_pages(struct address_space *mapping)
177 long background_thresh;
179 unsigned long pages_written = 0;
180 unsigned long write_chunk = sync_writeback_pages();
182 struct backing_dev_info *bdi = mapping->backing_dev_info;
185 struct writeback_control wbc = {
187 .sync_mode = WB_SYNC_NONE,
188 .older_than_this = NULL,
189 .nr_to_write = write_chunk,
193 get_dirty_limits(&background_thresh, &dirty_thresh, mapping);
194 nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
195 global_page_state(NR_UNSTABLE_NFS);
196 if (nr_reclaimable + global_page_state(NR_WRITEBACK) <=
203 /* Note: nr_reclaimable denotes nr_dirty + nr_unstable.
204 * Unstable writes are a feature of certain networked
205 * filesystems (i.e. NFS) in which data may have been
206 * written to the server's write cache, but has not yet
207 * been flushed to permanent storage.
209 if (nr_reclaimable) {
210 writeback_inodes(&wbc);
211 get_dirty_limits(&background_thresh,
212 &dirty_thresh, mapping);
213 nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
214 global_page_state(NR_UNSTABLE_NFS);
216 global_page_state(NR_WRITEBACK)
219 pages_written += write_chunk - wbc.nr_to_write;
220 if (pages_written >= write_chunk)
221 break; /* We've done our duty */
223 blk_congestion_wait(WRITE, HZ/10);
226 if (nr_reclaimable + global_page_state(NR_WRITEBACK)
227 <= dirty_thresh && dirty_exceeded)
230 if (writeback_in_progress(bdi))
231 return; /* pdflush is already working this queue */
234 * In laptop mode, we wait until hitting the higher threshold before
235 * starting background writeout, and then write out all the way down
236 * to the lower threshold. So slow writers cause minimal disk activity.
238 * In normal mode, we start background writeout at the lower
239 * background_thresh, to keep the amount of dirty memory low.
241 if ((laptop_mode && pages_written) ||
242 (!laptop_mode && (nr_reclaimable > background_thresh)))
243 pdflush_operation(background_writeout, 0);
246 void set_page_dirty_balance(struct page *page)
248 if (set_page_dirty(page)) {
249 struct address_space *mapping = page_mapping(page);
252 balance_dirty_pages_ratelimited(mapping);
257 * balance_dirty_pages_ratelimited_nr - balance dirty memory state
258 * @mapping: address_space which was dirtied
259 * @nr_pages_dirtied: number of pages which the caller has just dirtied
261 * Processes which are dirtying memory should call in here once for each page
262 * which was newly dirtied. The function will periodically check the system's
263 * dirty state and will initiate writeback if needed.
265 * On really big machines, get_writeback_state is expensive, so try to avoid
266 * calling it too often (ratelimiting). But once we're over the dirty memory
267 * limit we decrease the ratelimiting by a lot, to prevent individual processes
268 * from overshooting the limit by (ratelimit_pages) each.
270 void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
271 unsigned long nr_pages_dirtied)
273 static DEFINE_PER_CPU(unsigned long, ratelimits) = 0;
274 unsigned long ratelimit;
277 ratelimit = ratelimit_pages;
282 * Check the rate limiting. Also, we do not want to throttle real-time
283 * tasks in balance_dirty_pages(). Period.
286 p = &__get_cpu_var(ratelimits);
287 *p += nr_pages_dirtied;
288 if (unlikely(*p >= ratelimit)) {
291 balance_dirty_pages(mapping);
296 EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr);
298 void throttle_vm_writeout(void)
300 long background_thresh;
304 get_dirty_limits(&background_thresh, &dirty_thresh, NULL);
307 * Boost the allowable dirty threshold a bit for page
308 * allocators so they don't get DoS'ed by heavy writers
310 dirty_thresh += dirty_thresh / 10; /* wheeee... */
312 if (global_page_state(NR_UNSTABLE_NFS) +
313 global_page_state(NR_WRITEBACK) <= dirty_thresh)
315 blk_congestion_wait(WRITE, HZ/10);
321 * writeback at least _min_pages, and keep writing until the amount of dirty
322 * memory is less than the background threshold, or until we're all clean.
324 static void background_writeout(unsigned long _min_pages)
326 long min_pages = _min_pages;
327 struct writeback_control wbc = {
329 .sync_mode = WB_SYNC_NONE,
330 .older_than_this = NULL,
337 long background_thresh;
340 get_dirty_limits(&background_thresh, &dirty_thresh, NULL);
341 if (global_page_state(NR_FILE_DIRTY) +
342 global_page_state(NR_UNSTABLE_NFS) < background_thresh
345 wbc.encountered_congestion = 0;
346 wbc.nr_to_write = MAX_WRITEBACK_PAGES;
347 wbc.pages_skipped = 0;
348 writeback_inodes(&wbc);
349 min_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
350 if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) {
351 /* Wrote less than expected */
352 blk_congestion_wait(WRITE, HZ/10);
353 if (!wbc.encountered_congestion)
360 * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back
361 * the whole world. Returns 0 if a pdflush thread was dispatched. Returns
362 * -1 if all pdflush threads were busy.
364 int wakeup_pdflush(long nr_pages)
367 nr_pages = global_page_state(NR_FILE_DIRTY) +
368 global_page_state(NR_UNSTABLE_NFS);
369 return pdflush_operation(background_writeout, nr_pages);
372 static void wb_timer_fn(unsigned long unused);
373 static void laptop_timer_fn(unsigned long unused);
375 static DEFINE_TIMER(wb_timer, wb_timer_fn, 0, 0);
376 static DEFINE_TIMER(laptop_mode_wb_timer, laptop_timer_fn, 0, 0);
379 * Periodic writeback of "old" data.
381 * Define "old": the first time one of an inode's pages is dirtied, we mark the
382 * dirtying-time in the inode's address_space. So this periodic writeback code
383 * just walks the superblock inode list, writing back any inodes which are
384 * older than a specific point in time.
386 * Try to run once per dirty_writeback_interval. But if a writeback event
387 * takes longer than a dirty_writeback_interval interval, then leave a
390 * older_than_this takes precedence over nr_to_write. So we'll only write back
391 * all dirty pages if they are all attached to "old" mappings.
393 static void wb_kupdate(unsigned long arg)
395 unsigned long oldest_jif;
396 unsigned long start_jif;
397 unsigned long next_jif;
399 struct writeback_control wbc = {
401 .sync_mode = WB_SYNC_NONE,
402 .older_than_this = &oldest_jif,
411 oldest_jif = jiffies - dirty_expire_interval;
413 next_jif = start_jif + dirty_writeback_interval;
414 nr_to_write = global_page_state(NR_FILE_DIRTY) +
415 global_page_state(NR_UNSTABLE_NFS) +
416 (inodes_stat.nr_inodes - inodes_stat.nr_unused);
417 while (nr_to_write > 0) {
418 wbc.encountered_congestion = 0;
419 wbc.nr_to_write = MAX_WRITEBACK_PAGES;
420 writeback_inodes(&wbc);
421 if (wbc.nr_to_write > 0) {
422 if (wbc.encountered_congestion)
423 blk_congestion_wait(WRITE, HZ/10);
425 break; /* All the old data is written */
427 nr_to_write -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
429 if (time_before(next_jif, jiffies + HZ))
430 next_jif = jiffies + HZ;
431 if (dirty_writeback_interval)
432 mod_timer(&wb_timer, next_jif);
436 * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
438 int dirty_writeback_centisecs_handler(ctl_table *table, int write,
439 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
441 proc_dointvec_userhz_jiffies(table, write, file, buffer, length, ppos);
442 if (dirty_writeback_interval) {
444 jiffies + dirty_writeback_interval);
446 del_timer(&wb_timer);
451 static void wb_timer_fn(unsigned long unused)
453 if (pdflush_operation(wb_kupdate, 0) < 0)
454 mod_timer(&wb_timer, jiffies + HZ); /* delay 1 second */
457 static void laptop_flush(unsigned long unused)
462 static void laptop_timer_fn(unsigned long unused)
464 pdflush_operation(laptop_flush, 0);
468 * We've spun up the disk and we're in laptop mode: schedule writeback
469 * of all dirty data a few seconds from now. If the flush is already scheduled
470 * then push it back - the user is still using the disk.
472 void laptop_io_completion(void)
474 mod_timer(&laptop_mode_wb_timer, jiffies + laptop_mode);
478 * We're in laptop mode and we've just synced. The sync's writes will have
479 * caused another writeback to be scheduled by laptop_io_completion.
480 * Nothing needs to be written back anymore, so we unschedule the writeback.
482 void laptop_sync_completion(void)
484 del_timer(&laptop_mode_wb_timer);
488 * If ratelimit_pages is too high then we can get into dirty-data overload
489 * if a large number of processes all perform writes at the same time.
490 * If it is too low then SMP machines will call the (expensive)
491 * get_writeback_state too often.
493 * Here we set ratelimit_pages to a level which ensures that when all CPUs are
494 * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory
495 * thresholds before writeback cuts in.
497 * But the limit should not be set too high. Because it also controls the
498 * amount of memory which the balance_dirty_pages() caller has to write back.
499 * If this is too large then the caller will block on the IO queue all the
500 * time. So limit it to four megabytes - the balance_dirty_pages() caller
501 * will write six megabyte chunks, max.
504 void writeback_set_ratelimit(void)
506 ratelimit_pages = vm_total_pages / (num_online_cpus() * 32);
507 if (ratelimit_pages < 16)
508 ratelimit_pages = 16;
509 if (ratelimit_pages * PAGE_CACHE_SIZE > 4096 * 1024)
510 ratelimit_pages = (4096 * 1024) / PAGE_CACHE_SIZE;
514 ratelimit_handler(struct notifier_block *self, unsigned long u, void *v)
516 writeback_set_ratelimit();
520 static struct notifier_block __cpuinitdata ratelimit_nb = {
521 .notifier_call = ratelimit_handler,
526 * If the machine has a large highmem:lowmem ratio then scale back the default
527 * dirty memory thresholds: allowing too much dirty highmem pins an excessive
528 * number of buffer_heads.
530 void __init page_writeback_init(void)
532 long buffer_pages = nr_free_buffer_pages();
535 correction = (100 * 4 * buffer_pages) / vm_total_pages;
537 if (correction < 100) {
538 dirty_background_ratio *= correction;
539 dirty_background_ratio /= 100;
540 vm_dirty_ratio *= correction;
541 vm_dirty_ratio /= 100;
543 if (dirty_background_ratio <= 0)
544 dirty_background_ratio = 1;
545 if (vm_dirty_ratio <= 0)
548 mod_timer(&wb_timer, jiffies + dirty_writeback_interval);
549 writeback_set_ratelimit();
550 register_cpu_notifier(&ratelimit_nb);
553 int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
557 if (wbc->nr_to_write <= 0)
559 wbc->for_writepages = 1;
560 if (mapping->a_ops->writepages)
561 ret = mapping->a_ops->writepages(mapping, wbc);
563 ret = generic_writepages(mapping, wbc);
564 wbc->for_writepages = 0;
569 * write_one_page - write out a single page and optionally wait on I/O
571 * @page: the page to write
572 * @wait: if true, wait on writeout
574 * The page must be locked by the caller and will be unlocked upon return.
576 * write_one_page() returns a negative error code if I/O failed.
578 int write_one_page(struct page *page, int wait)
580 struct address_space *mapping = page->mapping;
582 struct writeback_control wbc = {
583 .sync_mode = WB_SYNC_ALL,
587 BUG_ON(!PageLocked(page));
590 wait_on_page_writeback(page);
592 if (clear_page_dirty_for_io(page)) {
593 page_cache_get(page);
594 ret = mapping->a_ops->writepage(page, &wbc);
595 if (ret == 0 && wait) {
596 wait_on_page_writeback(page);
600 page_cache_release(page);
606 EXPORT_SYMBOL(write_one_page);
609 * For address_spaces which do not use buffers. Just tag the page as dirty in
612 * This is also used when a single buffer is being dirtied: we want to set the
613 * page dirty in that case, but not all the buffers. This is a "bottom-up"
614 * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
616 * Most callers have locked the page, which pins the address_space in memory.
617 * But zap_pte_range() does not lock the page, however in that case the
618 * mapping is pinned by the vma's ->vm_file reference.
620 * We take care to handle the case where the page was truncated from the
621 * mapping by re-checking page_mapping() insode tree_lock.
623 int __set_page_dirty_nobuffers(struct page *page)
625 if (!TestSetPageDirty(page)) {
626 struct address_space *mapping = page_mapping(page);
627 struct address_space *mapping2;
630 write_lock_irq(&mapping->tree_lock);
631 mapping2 = page_mapping(page);
632 if (mapping2) { /* Race with truncate? */
633 BUG_ON(mapping2 != mapping);
634 if (mapping_cap_account_dirty(mapping))
635 __inc_zone_page_state(page,
637 radix_tree_tag_set(&mapping->page_tree,
638 page_index(page), PAGECACHE_TAG_DIRTY);
640 write_unlock_irq(&mapping->tree_lock);
642 /* !PageAnon && !swapper_space */
643 __mark_inode_dirty(mapping->host,
651 EXPORT_SYMBOL(__set_page_dirty_nobuffers);
654 * When a writepage implementation decides that it doesn't want to write this
655 * page for some reason, it should redirty the locked page via
656 * redirty_page_for_writepage() and it should then unlock the page and return 0
658 int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
660 wbc->pages_skipped++;
661 return __set_page_dirty_nobuffers(page);
663 EXPORT_SYMBOL(redirty_page_for_writepage);
666 * If the mapping doesn't provide a set_page_dirty a_op, then
667 * just fall through and assume that it wants buffer_heads.
669 int fastcall set_page_dirty(struct page *page)
671 struct address_space *mapping = page_mapping(page);
673 if (likely(mapping)) {
674 int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
677 return __set_page_dirty_buffers(page);
679 if (!PageDirty(page)) {
680 if (!TestSetPageDirty(page))
685 EXPORT_SYMBOL(set_page_dirty);
688 * set_page_dirty() is racy if the caller has no reference against
689 * page->mapping->host, and if the page is unlocked. This is because another
690 * CPU could truncate the page off the mapping and then free the mapping.
692 * Usually, the page _is_ locked, or the caller is a user-space process which
693 * holds a reference on the inode by having an open file.
695 * In other cases, the page should be locked before running set_page_dirty().
697 int set_page_dirty_lock(struct page *page)
701 lock_page_nosync(page);
702 ret = set_page_dirty(page);
706 EXPORT_SYMBOL(set_page_dirty_lock);
709 * Clear a page's dirty flag, while caring for dirty memory accounting.
710 * Returns true if the page was previously dirty.
712 int test_clear_page_dirty(struct page *page)
714 struct address_space *mapping = page_mapping(page);
718 write_lock_irqsave(&mapping->tree_lock, flags);
719 if (TestClearPageDirty(page)) {
720 radix_tree_tag_clear(&mapping->page_tree,
722 PAGECACHE_TAG_DIRTY);
723 write_unlock_irqrestore(&mapping->tree_lock, flags);
725 * We can continue to use `mapping' here because the
726 * page is locked, which pins the address_space
728 if (mapping_cap_account_dirty(mapping)) {
730 dec_zone_page_state(page, NR_FILE_DIRTY);
734 write_unlock_irqrestore(&mapping->tree_lock, flags);
737 return TestClearPageDirty(page);
739 EXPORT_SYMBOL(test_clear_page_dirty);
742 * Clear a page's dirty flag, while caring for dirty memory accounting.
743 * Returns true if the page was previously dirty.
745 * This is for preparing to put the page under writeout. We leave the page
746 * tagged as dirty in the radix tree so that a concurrent write-for-sync
747 * can discover it via a PAGECACHE_TAG_DIRTY walk. The ->writepage
748 * implementation will run either set_page_writeback() or set_page_dirty(),
749 * at which stage we bring the page's dirty flag and radix-tree dirty tag
752 * This incoherency between the page's dirty flag and radix-tree tag is
753 * unfortunate, but it only exists while the page is locked.
755 int clear_page_dirty_for_io(struct page *page)
757 struct address_space *mapping = page_mapping(page);
760 if (TestClearPageDirty(page)) {
761 if (mapping_cap_account_dirty(mapping)) {
763 dec_zone_page_state(page, NR_FILE_DIRTY);
769 return TestClearPageDirty(page);
771 EXPORT_SYMBOL(clear_page_dirty_for_io);
773 int test_clear_page_writeback(struct page *page)
775 struct address_space *mapping = page_mapping(page);
781 write_lock_irqsave(&mapping->tree_lock, flags);
782 ret = TestClearPageWriteback(page);
784 radix_tree_tag_clear(&mapping->page_tree,
786 PAGECACHE_TAG_WRITEBACK);
787 write_unlock_irqrestore(&mapping->tree_lock, flags);
789 ret = TestClearPageWriteback(page);
794 int test_set_page_writeback(struct page *page)
796 struct address_space *mapping = page_mapping(page);
802 write_lock_irqsave(&mapping->tree_lock, flags);
803 ret = TestSetPageWriteback(page);
805 radix_tree_tag_set(&mapping->page_tree,
807 PAGECACHE_TAG_WRITEBACK);
808 if (!PageDirty(page))
809 radix_tree_tag_clear(&mapping->page_tree,
811 PAGECACHE_TAG_DIRTY);
812 write_unlock_irqrestore(&mapping->tree_lock, flags);
814 ret = TestSetPageWriteback(page);
819 EXPORT_SYMBOL(test_set_page_writeback);
822 * Wakes up tasks that are being throttled due to writeback congestion
824 void writeback_congestion_end(void)
826 blk_congestion_end(WRITE);
828 EXPORT_SYMBOL(writeback_congestion_end);
831 * Return true if any of the pages in the mapping are marged with the
834 int mapping_tagged(struct address_space *mapping, int tag)
839 read_lock_irqsave(&mapping->tree_lock, flags);
840 ret = radix_tree_tagged(&mapping->page_tree, tag);
841 read_unlock_irqrestore(&mapping->tree_lock, flags);
844 EXPORT_SYMBOL(mapping_tagged);