[PATCH] ext4 balloc: fix off-by-one against grp_goal
[linux-2.6] / mm / page-writeback.c
1 /*
2  * mm/page-writeback.c
3  *
4  * Copyright (C) 2002, Linus Torvalds.
5  *
6  * Contains functions related to writing back dirty pages at the
7  * address_space level.
8  *
9  * 10Apr2002    akpm@zip.com.au
10  *              Initial version
11  */
12
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/spinlock.h>
16 #include <linux/fs.h>
17 #include <linux/mm.h>
18 #include <linux/swap.h>
19 #include <linux/slab.h>
20 #include <linux/pagemap.h>
21 #include <linux/writeback.h>
22 #include <linux/init.h>
23 #include <linux/backing-dev.h>
24 #include <linux/blkdev.h>
25 #include <linux/mpage.h>
26 #include <linux/rmap.h>
27 #include <linux/percpu.h>
28 #include <linux/notifier.h>
29 #include <linux/smp.h>
30 #include <linux/sysctl.h>
31 #include <linux/cpu.h>
32 #include <linux/syscalls.h>
33 #include <linux/buffer_head.h>
34 #include <linux/pagevec.h>
35
36 /*
37  * The maximum number of pages to writeout in a single bdflush/kupdate
38  * operation.  We do this so we don't hold I_LOCK against an inode for
39  * enormous amounts of time, which would block a userspace task which has
40  * been forced to throttle against that inode.  Also, the code reevaluates
41  * the dirty each time it has written this many pages.
42  */
43 #define MAX_WRITEBACK_PAGES     1024
44
45 /*
46  * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
47  * will look to see if it needs to force writeback or throttling.
48  */
49 static long ratelimit_pages = 32;
50
51 static int dirty_exceeded __cacheline_aligned_in_smp;   /* Dirty mem may be over limit */
52
53 /*
54  * When balance_dirty_pages decides that the caller needs to perform some
55  * non-background writeback, this is how many pages it will attempt to write.
56  * It should be somewhat larger than RATELIMIT_PAGES to ensure that reasonably
57  * large amounts of I/O are submitted.
58  */
59 static inline long sync_writeback_pages(void)
60 {
61         return ratelimit_pages + ratelimit_pages / 2;
62 }
63
64 /* The following parameters are exported via /proc/sys/vm */
65
66 /*
67  * Start background writeback (via pdflush) at this percentage
68  */
69 int dirty_background_ratio = 10;
70
71 /*
72  * The generator of dirty data starts writeback at this percentage
73  */
74 int vm_dirty_ratio = 40;
75
76 /*
77  * The interval between `kupdate'-style writebacks, in jiffies
78  */
79 int dirty_writeback_interval = 5 * HZ;
80
81 /*
82  * The longest number of jiffies for which data is allowed to remain dirty
83  */
84 int dirty_expire_interval = 30 * HZ;
85
86 /*
87  * Flag that makes the machine dump writes/reads and block dirtyings.
88  */
89 int block_dump;
90
91 /*
92  * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies:
93  * a full sync is triggered after this time elapses without any disk activity.
94  */
95 int laptop_mode;
96
97 EXPORT_SYMBOL(laptop_mode);
98
99 /* End of sysctl-exported parameters */
100
101
102 static void background_writeout(unsigned long _min_pages);
103
104 /*
105  * Work out the current dirty-memory clamping and background writeout
106  * thresholds.
107  *
108  * The main aim here is to lower them aggressively if there is a lot of mapped
109  * memory around.  To avoid stressing page reclaim with lots of unreclaimable
110  * pages.  It is better to clamp down on writers than to start swapping, and
111  * performing lots of scanning.
112  *
113  * We only allow 1/2 of the currently-unmapped memory to be dirtied.
114  *
115  * We don't permit the clamping level to fall below 5% - that is getting rather
116  * excessive.
117  *
118  * We make sure that the background writeout level is below the adjusted
119  * clamping level.
120  */
121 static void
122 get_dirty_limits(long *pbackground, long *pdirty,
123                                         struct address_space *mapping)
124 {
125         int background_ratio;           /* Percentages */
126         int dirty_ratio;
127         int unmapped_ratio;
128         long background;
129         long dirty;
130         unsigned long available_memory = vm_total_pages;
131         struct task_struct *tsk;
132
133 #ifdef CONFIG_HIGHMEM
134         /*
135          * If this mapping can only allocate from low memory,
136          * we exclude high memory from our count.
137          */
138         if (mapping && !(mapping_gfp_mask(mapping) & __GFP_HIGHMEM))
139                 available_memory -= totalhigh_pages;
140 #endif
141
142
143         unmapped_ratio = 100 - ((global_page_state(NR_FILE_MAPPED) +
144                                 global_page_state(NR_ANON_PAGES)) * 100) /
145                                         vm_total_pages;
146
147         dirty_ratio = vm_dirty_ratio;
148         if (dirty_ratio > unmapped_ratio / 2)
149                 dirty_ratio = unmapped_ratio / 2;
150
151         if (dirty_ratio < 5)
152                 dirty_ratio = 5;
153
154         background_ratio = dirty_background_ratio;
155         if (background_ratio >= dirty_ratio)
156                 background_ratio = dirty_ratio / 2;
157
158         background = (background_ratio * available_memory) / 100;
159         dirty = (dirty_ratio * available_memory) / 100;
160         tsk = current;
161         if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
162                 background += background / 4;
163                 dirty += dirty / 4;
164         }
165         *pbackground = background;
166         *pdirty = dirty;
167 }
168
169 /*
170  * balance_dirty_pages() must be called by processes which are generating dirty
171  * data.  It looks at the number of dirty pages in the machine and will force
172  * the caller to perform writeback if the system is over `vm_dirty_ratio'.
173  * If we're over `background_thresh' then pdflush is woken to perform some
174  * writeout.
175  */
176 static void balance_dirty_pages(struct address_space *mapping)
177 {
178         long nr_reclaimable;
179         long background_thresh;
180         long dirty_thresh;
181         unsigned long pages_written = 0;
182         unsigned long write_chunk = sync_writeback_pages();
183
184         struct backing_dev_info *bdi = mapping->backing_dev_info;
185
186         for (;;) {
187                 struct writeback_control wbc = {
188                         .bdi            = bdi,
189                         .sync_mode      = WB_SYNC_NONE,
190                         .older_than_this = NULL,
191                         .nr_to_write    = write_chunk,
192                         .range_cyclic   = 1,
193                 };
194
195                 get_dirty_limits(&background_thresh, &dirty_thresh, mapping);
196                 nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
197                                         global_page_state(NR_UNSTABLE_NFS);
198                 if (nr_reclaimable + global_page_state(NR_WRITEBACK) <=
199                         dirty_thresh)
200                                 break;
201
202                 if (!dirty_exceeded)
203                         dirty_exceeded = 1;
204
205                 /* Note: nr_reclaimable denotes nr_dirty + nr_unstable.
206                  * Unstable writes are a feature of certain networked
207                  * filesystems (i.e. NFS) in which data may have been
208                  * written to the server's write cache, but has not yet
209                  * been flushed to permanent storage.
210                  */
211                 if (nr_reclaimable) {
212                         writeback_inodes(&wbc);
213                         get_dirty_limits(&background_thresh,
214                                                 &dirty_thresh, mapping);
215                         nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
216                                         global_page_state(NR_UNSTABLE_NFS);
217                         if (nr_reclaimable +
218                                 global_page_state(NR_WRITEBACK)
219                                         <= dirty_thresh)
220                                                 break;
221                         pages_written += write_chunk - wbc.nr_to_write;
222                         if (pages_written >= write_chunk)
223                                 break;          /* We've done our duty */
224                 }
225                 congestion_wait(WRITE, HZ/10);
226         }
227
228         if (nr_reclaimable + global_page_state(NR_WRITEBACK)
229                 <= dirty_thresh && dirty_exceeded)
230                         dirty_exceeded = 0;
231
232         if (writeback_in_progress(bdi))
233                 return;         /* pdflush is already working this queue */
234
235         /*
236          * In laptop mode, we wait until hitting the higher threshold before
237          * starting background writeout, and then write out all the way down
238          * to the lower threshold.  So slow writers cause minimal disk activity.
239          *
240          * In normal mode, we start background writeout at the lower
241          * background_thresh, to keep the amount of dirty memory low.
242          */
243         if ((laptop_mode && pages_written) ||
244              (!laptop_mode && (nr_reclaimable > background_thresh)))
245                 pdflush_operation(background_writeout, 0);
246 }
247
248 void set_page_dirty_balance(struct page *page)
249 {
250         if (set_page_dirty(page)) {
251                 struct address_space *mapping = page_mapping(page);
252
253                 if (mapping)
254                         balance_dirty_pages_ratelimited(mapping);
255         }
256 }
257
258 /**
259  * balance_dirty_pages_ratelimited_nr - balance dirty memory state
260  * @mapping: address_space which was dirtied
261  * @nr_pages_dirtied: number of pages which the caller has just dirtied
262  *
263  * Processes which are dirtying memory should call in here once for each page
264  * which was newly dirtied.  The function will periodically check the system's
265  * dirty state and will initiate writeback if needed.
266  *
267  * On really big machines, get_writeback_state is expensive, so try to avoid
268  * calling it too often (ratelimiting).  But once we're over the dirty memory
269  * limit we decrease the ratelimiting by a lot, to prevent individual processes
270  * from overshooting the limit by (ratelimit_pages) each.
271  */
272 void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
273                                         unsigned long nr_pages_dirtied)
274 {
275         static DEFINE_PER_CPU(unsigned long, ratelimits) = 0;
276         unsigned long ratelimit;
277         unsigned long *p;
278
279         ratelimit = ratelimit_pages;
280         if (dirty_exceeded)
281                 ratelimit = 8;
282
283         /*
284          * Check the rate limiting. Also, we do not want to throttle real-time
285          * tasks in balance_dirty_pages(). Period.
286          */
287         preempt_disable();
288         p =  &__get_cpu_var(ratelimits);
289         *p += nr_pages_dirtied;
290         if (unlikely(*p >= ratelimit)) {
291                 *p = 0;
292                 preempt_enable();
293                 balance_dirty_pages(mapping);
294                 return;
295         }
296         preempt_enable();
297 }
298 EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr);
299
300 void throttle_vm_writeout(void)
301 {
302         long background_thresh;
303         long dirty_thresh;
304
305         for ( ; ; ) {
306                 get_dirty_limits(&background_thresh, &dirty_thresh, NULL);
307
308                 /*
309                  * Boost the allowable dirty threshold a bit for page
310                  * allocators so they don't get DoS'ed by heavy writers
311                  */
312                 dirty_thresh += dirty_thresh / 10;      /* wheeee... */
313
314                 if (global_page_state(NR_UNSTABLE_NFS) +
315                         global_page_state(NR_WRITEBACK) <= dirty_thresh)
316                                 break;
317                 congestion_wait(WRITE, HZ/10);
318         }
319 }
320
321
322 /*
323  * writeback at least _min_pages, and keep writing until the amount of dirty
324  * memory is less than the background threshold, or until we're all clean.
325  */
326 static void background_writeout(unsigned long _min_pages)
327 {
328         long min_pages = _min_pages;
329         struct writeback_control wbc = {
330                 .bdi            = NULL,
331                 .sync_mode      = WB_SYNC_NONE,
332                 .older_than_this = NULL,
333                 .nr_to_write    = 0,
334                 .nonblocking    = 1,
335                 .range_cyclic   = 1,
336         };
337
338         for ( ; ; ) {
339                 long background_thresh;
340                 long dirty_thresh;
341
342                 get_dirty_limits(&background_thresh, &dirty_thresh, NULL);
343                 if (global_page_state(NR_FILE_DIRTY) +
344                         global_page_state(NR_UNSTABLE_NFS) < background_thresh
345                                 && min_pages <= 0)
346                         break;
347                 wbc.encountered_congestion = 0;
348                 wbc.nr_to_write = MAX_WRITEBACK_PAGES;
349                 wbc.pages_skipped = 0;
350                 writeback_inodes(&wbc);
351                 min_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
352                 if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) {
353                         /* Wrote less than expected */
354                         congestion_wait(WRITE, HZ/10);
355                         if (!wbc.encountered_congestion)
356                                 break;
357                 }
358         }
359 }
360
361 /*
362  * Start writeback of `nr_pages' pages.  If `nr_pages' is zero, write back
363  * the whole world.  Returns 0 if a pdflush thread was dispatched.  Returns
364  * -1 if all pdflush threads were busy.
365  */
366 int wakeup_pdflush(long nr_pages)
367 {
368         if (nr_pages == 0)
369                 nr_pages = global_page_state(NR_FILE_DIRTY) +
370                                 global_page_state(NR_UNSTABLE_NFS);
371         return pdflush_operation(background_writeout, nr_pages);
372 }
373
374 static void wb_timer_fn(unsigned long unused);
375 static void laptop_timer_fn(unsigned long unused);
376
377 static DEFINE_TIMER(wb_timer, wb_timer_fn, 0, 0);
378 static DEFINE_TIMER(laptop_mode_wb_timer, laptop_timer_fn, 0, 0);
379
380 /*
381  * Periodic writeback of "old" data.
382  *
383  * Define "old": the first time one of an inode's pages is dirtied, we mark the
384  * dirtying-time in the inode's address_space.  So this periodic writeback code
385  * just walks the superblock inode list, writing back any inodes which are
386  * older than a specific point in time.
387  *
388  * Try to run once per dirty_writeback_interval.  But if a writeback event
389  * takes longer than a dirty_writeback_interval interval, then leave a
390  * one-second gap.
391  *
392  * older_than_this takes precedence over nr_to_write.  So we'll only write back
393  * all dirty pages if they are all attached to "old" mappings.
394  */
395 static void wb_kupdate(unsigned long arg)
396 {
397         unsigned long oldest_jif;
398         unsigned long start_jif;
399         unsigned long next_jif;
400         long nr_to_write;
401         struct writeback_control wbc = {
402                 .bdi            = NULL,
403                 .sync_mode      = WB_SYNC_NONE,
404                 .older_than_this = &oldest_jif,
405                 .nr_to_write    = 0,
406                 .nonblocking    = 1,
407                 .for_kupdate    = 1,
408                 .range_cyclic   = 1,
409         };
410
411         sync_supers();
412
413         oldest_jif = jiffies - dirty_expire_interval;
414         start_jif = jiffies;
415         next_jif = start_jif + dirty_writeback_interval;
416         nr_to_write = global_page_state(NR_FILE_DIRTY) +
417                         global_page_state(NR_UNSTABLE_NFS) +
418                         (inodes_stat.nr_inodes - inodes_stat.nr_unused);
419         while (nr_to_write > 0) {
420                 wbc.encountered_congestion = 0;
421                 wbc.nr_to_write = MAX_WRITEBACK_PAGES;
422                 writeback_inodes(&wbc);
423                 if (wbc.nr_to_write > 0) {
424                         if (wbc.encountered_congestion)
425                                 congestion_wait(WRITE, HZ/10);
426                         else
427                                 break;  /* All the old data is written */
428                 }
429                 nr_to_write -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
430         }
431         if (time_before(next_jif, jiffies + HZ))
432                 next_jif = jiffies + HZ;
433         if (dirty_writeback_interval)
434                 mod_timer(&wb_timer, next_jif);
435 }
436
437 /*
438  * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
439  */
440 int dirty_writeback_centisecs_handler(ctl_table *table, int write,
441                 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
442 {
443         proc_dointvec_userhz_jiffies(table, write, file, buffer, length, ppos);
444         if (dirty_writeback_interval) {
445                 mod_timer(&wb_timer,
446                         jiffies + dirty_writeback_interval);
447                 } else {
448                 del_timer(&wb_timer);
449         }
450         return 0;
451 }
452
453 static void wb_timer_fn(unsigned long unused)
454 {
455         if (pdflush_operation(wb_kupdate, 0) < 0)
456                 mod_timer(&wb_timer, jiffies + HZ); /* delay 1 second */
457 }
458
459 static void laptop_flush(unsigned long unused)
460 {
461         sys_sync();
462 }
463
464 static void laptop_timer_fn(unsigned long unused)
465 {
466         pdflush_operation(laptop_flush, 0);
467 }
468
469 /*
470  * We've spun up the disk and we're in laptop mode: schedule writeback
471  * of all dirty data a few seconds from now.  If the flush is already scheduled
472  * then push it back - the user is still using the disk.
473  */
474 void laptop_io_completion(void)
475 {
476         mod_timer(&laptop_mode_wb_timer, jiffies + laptop_mode);
477 }
478
479 /*
480  * We're in laptop mode and we've just synced. The sync's writes will have
481  * caused another writeback to be scheduled by laptop_io_completion.
482  * Nothing needs to be written back anymore, so we unschedule the writeback.
483  */
484 void laptop_sync_completion(void)
485 {
486         del_timer(&laptop_mode_wb_timer);
487 }
488
489 /*
490  * If ratelimit_pages is too high then we can get into dirty-data overload
491  * if a large number of processes all perform writes at the same time.
492  * If it is too low then SMP machines will call the (expensive)
493  * get_writeback_state too often.
494  *
495  * Here we set ratelimit_pages to a level which ensures that when all CPUs are
496  * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory
497  * thresholds before writeback cuts in.
498  *
499  * But the limit should not be set too high.  Because it also controls the
500  * amount of memory which the balance_dirty_pages() caller has to write back.
501  * If this is too large then the caller will block on the IO queue all the
502  * time.  So limit it to four megabytes - the balance_dirty_pages() caller
503  * will write six megabyte chunks, max.
504  */
505
506 void writeback_set_ratelimit(void)
507 {
508         ratelimit_pages = vm_total_pages / (num_online_cpus() * 32);
509         if (ratelimit_pages < 16)
510                 ratelimit_pages = 16;
511         if (ratelimit_pages * PAGE_CACHE_SIZE > 4096 * 1024)
512                 ratelimit_pages = (4096 * 1024) / PAGE_CACHE_SIZE;
513 }
514
515 static int __cpuinit
516 ratelimit_handler(struct notifier_block *self, unsigned long u, void *v)
517 {
518         writeback_set_ratelimit();
519         return 0;
520 }
521
522 static struct notifier_block __cpuinitdata ratelimit_nb = {
523         .notifier_call  = ratelimit_handler,
524         .next           = NULL,
525 };
526
527 /*
528  * If the machine has a large highmem:lowmem ratio then scale back the default
529  * dirty memory thresholds: allowing too much dirty highmem pins an excessive
530  * number of buffer_heads.
531  */
532 void __init page_writeback_init(void)
533 {
534         long buffer_pages = nr_free_buffer_pages();
535         long correction;
536
537         correction = (100 * 4 * buffer_pages) / vm_total_pages;
538
539         if (correction < 100) {
540                 dirty_background_ratio *= correction;
541                 dirty_background_ratio /= 100;
542                 vm_dirty_ratio *= correction;
543                 vm_dirty_ratio /= 100;
544
545                 if (dirty_background_ratio <= 0)
546                         dirty_background_ratio = 1;
547                 if (vm_dirty_ratio <= 0)
548                         vm_dirty_ratio = 1;
549         }
550         mod_timer(&wb_timer, jiffies + dirty_writeback_interval);
551         writeback_set_ratelimit();
552         register_cpu_notifier(&ratelimit_nb);
553 }
554
555 /**
556  * generic_writepages - walk the list of dirty pages of the given
557  *                      address space and writepage() all of them.
558  *
559  * @mapping: address space structure to write
560  * @wbc: subtract the number of written pages from *@wbc->nr_to_write
561  *
562  * This is a library function, which implements the writepages()
563  * address_space_operation.
564  *
565  * If a page is already under I/O, generic_writepages() skips it, even
566  * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
567  * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
568  * and msync() need to guarantee that all the data which was dirty at the time
569  * the call was made get new I/O started against them.  If wbc->sync_mode is
570  * WB_SYNC_ALL then we were called for data integrity and we must wait for
571  * existing IO to complete.
572  *
573  * Derived from mpage_writepages() - if you fix this you should check that
574  * also!
575  */
576 int generic_writepages(struct address_space *mapping,
577                        struct writeback_control *wbc)
578 {
579         struct backing_dev_info *bdi = mapping->backing_dev_info;
580         int ret = 0;
581         int done = 0;
582         int (*writepage)(struct page *page, struct writeback_control *wbc);
583         struct pagevec pvec;
584         int nr_pages;
585         pgoff_t index;
586         pgoff_t end;            /* Inclusive */
587         int scanned = 0;
588         int range_whole = 0;
589
590         if (wbc->nonblocking && bdi_write_congested(bdi)) {
591                 wbc->encountered_congestion = 1;
592                 return 0;
593         }
594
595         writepage = mapping->a_ops->writepage;
596
597         /* deal with chardevs and other special file */
598         if (!writepage)
599                 return 0;
600
601         pagevec_init(&pvec, 0);
602         if (wbc->range_cyclic) {
603                 index = mapping->writeback_index; /* Start from prev offset */
604                 end = -1;
605         } else {
606                 index = wbc->range_start >> PAGE_CACHE_SHIFT;
607                 end = wbc->range_end >> PAGE_CACHE_SHIFT;
608                 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
609                         range_whole = 1;
610                 scanned = 1;
611         }
612 retry:
613         while (!done && (index <= end) &&
614                (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
615                                               PAGECACHE_TAG_DIRTY,
616                                               min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
617                 unsigned i;
618
619                 scanned = 1;
620                 for (i = 0; i < nr_pages; i++) {
621                         struct page *page = pvec.pages[i];
622
623                         /*
624                          * At this point we hold neither mapping->tree_lock nor
625                          * lock on the page itself: the page may be truncated or
626                          * invalidated (changing page->mapping to NULL), or even
627                          * swizzled back from swapper_space to tmpfs file
628                          * mapping
629                          */
630                         lock_page(page);
631
632                         if (unlikely(page->mapping != mapping)) {
633                                 unlock_page(page);
634                                 continue;
635                         }
636
637                         if (!wbc->range_cyclic && page->index > end) {
638                                 done = 1;
639                                 unlock_page(page);
640                                 continue;
641                         }
642
643                         if (wbc->sync_mode != WB_SYNC_NONE)
644                                 wait_on_page_writeback(page);
645
646                         if (PageWriteback(page) ||
647                             !clear_page_dirty_for_io(page)) {
648                                 unlock_page(page);
649                                 continue;
650                         }
651
652                         ret = (*writepage)(page, wbc);
653                         if (ret) {
654                                 if (ret == -ENOSPC)
655                                         set_bit(AS_ENOSPC, &mapping->flags);
656                                 else
657                                         set_bit(AS_EIO, &mapping->flags);
658                         }
659
660                         if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE))
661                                 unlock_page(page);
662                         if (ret || (--(wbc->nr_to_write) <= 0))
663                                 done = 1;
664                         if (wbc->nonblocking && bdi_write_congested(bdi)) {
665                                 wbc->encountered_congestion = 1;
666                                 done = 1;
667                         }
668                 }
669                 pagevec_release(&pvec);
670                 cond_resched();
671         }
672         if (!scanned && !done) {
673                 /*
674                  * We hit the last page and there is more work to be done: wrap
675                  * back to the start of the file
676                  */
677                 scanned = 1;
678                 index = 0;
679                 goto retry;
680         }
681         if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
682                 mapping->writeback_index = index;
683         return ret;
684 }
685
686 EXPORT_SYMBOL(generic_writepages);
687
688 int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
689 {
690         int ret;
691
692         if (wbc->nr_to_write <= 0)
693                 return 0;
694         wbc->for_writepages = 1;
695         if (mapping->a_ops->writepages)
696                 ret = mapping->a_ops->writepages(mapping, wbc);
697         else
698                 ret = generic_writepages(mapping, wbc);
699         wbc->for_writepages = 0;
700         return ret;
701 }
702
703 /**
704  * write_one_page - write out a single page and optionally wait on I/O
705  *
706  * @page: the page to write
707  * @wait: if true, wait on writeout
708  *
709  * The page must be locked by the caller and will be unlocked upon return.
710  *
711  * write_one_page() returns a negative error code if I/O failed.
712  */
713 int write_one_page(struct page *page, int wait)
714 {
715         struct address_space *mapping = page->mapping;
716         int ret = 0;
717         struct writeback_control wbc = {
718                 .sync_mode = WB_SYNC_ALL,
719                 .nr_to_write = 1,
720         };
721
722         BUG_ON(!PageLocked(page));
723
724         if (wait)
725                 wait_on_page_writeback(page);
726
727         if (clear_page_dirty_for_io(page)) {
728                 page_cache_get(page);
729                 ret = mapping->a_ops->writepage(page, &wbc);
730                 if (ret == 0 && wait) {
731                         wait_on_page_writeback(page);
732                         if (PageError(page))
733                                 ret = -EIO;
734                 }
735                 page_cache_release(page);
736         } else {
737                 unlock_page(page);
738         }
739         return ret;
740 }
741 EXPORT_SYMBOL(write_one_page);
742
743 /*
744  * For address_spaces which do not use buffers.  Just tag the page as dirty in
745  * its radix tree.
746  *
747  * This is also used when a single buffer is being dirtied: we want to set the
748  * page dirty in that case, but not all the buffers.  This is a "bottom-up"
749  * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
750  *
751  * Most callers have locked the page, which pins the address_space in memory.
752  * But zap_pte_range() does not lock the page, however in that case the
753  * mapping is pinned by the vma's ->vm_file reference.
754  *
755  * We take care to handle the case where the page was truncated from the
756  * mapping by re-checking page_mapping() insode tree_lock.
757  */
758 int __set_page_dirty_nobuffers(struct page *page)
759 {
760         if (!TestSetPageDirty(page)) {
761                 struct address_space *mapping = page_mapping(page);
762                 struct address_space *mapping2;
763
764                 if (mapping) {
765                         write_lock_irq(&mapping->tree_lock);
766                         mapping2 = page_mapping(page);
767                         if (mapping2) { /* Race with truncate? */
768                                 BUG_ON(mapping2 != mapping);
769                                 if (mapping_cap_account_dirty(mapping))
770                                         __inc_zone_page_state(page,
771                                                                 NR_FILE_DIRTY);
772                                 radix_tree_tag_set(&mapping->page_tree,
773                                         page_index(page), PAGECACHE_TAG_DIRTY);
774                         }
775                         write_unlock_irq(&mapping->tree_lock);
776                         if (mapping->host) {
777                                 /* !PageAnon && !swapper_space */
778                                 __mark_inode_dirty(mapping->host,
779                                                         I_DIRTY_PAGES);
780                         }
781                 }
782                 return 1;
783         }
784         return 0;
785 }
786 EXPORT_SYMBOL(__set_page_dirty_nobuffers);
787
788 /*
789  * When a writepage implementation decides that it doesn't want to write this
790  * page for some reason, it should redirty the locked page via
791  * redirty_page_for_writepage() and it should then unlock the page and return 0
792  */
793 int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
794 {
795         wbc->pages_skipped++;
796         return __set_page_dirty_nobuffers(page);
797 }
798 EXPORT_SYMBOL(redirty_page_for_writepage);
799
800 /*
801  * If the mapping doesn't provide a set_page_dirty a_op, then
802  * just fall through and assume that it wants buffer_heads.
803  */
804 int fastcall set_page_dirty(struct page *page)
805 {
806         struct address_space *mapping = page_mapping(page);
807
808         if (likely(mapping)) {
809                 int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
810 #ifdef CONFIG_BLOCK
811                 if (!spd)
812                         spd = __set_page_dirty_buffers;
813 #endif
814                 return (*spd)(page);
815         }
816         if (!PageDirty(page)) {
817                 if (!TestSetPageDirty(page))
818                         return 1;
819         }
820         return 0;
821 }
822 EXPORT_SYMBOL(set_page_dirty);
823
824 /*
825  * set_page_dirty() is racy if the caller has no reference against
826  * page->mapping->host, and if the page is unlocked.  This is because another
827  * CPU could truncate the page off the mapping and then free the mapping.
828  *
829  * Usually, the page _is_ locked, or the caller is a user-space process which
830  * holds a reference on the inode by having an open file.
831  *
832  * In other cases, the page should be locked before running set_page_dirty().
833  */
834 int set_page_dirty_lock(struct page *page)
835 {
836         int ret;
837
838         lock_page_nosync(page);
839         ret = set_page_dirty(page);
840         unlock_page(page);
841         return ret;
842 }
843 EXPORT_SYMBOL(set_page_dirty_lock);
844
845 /*
846  * Clear a page's dirty flag, while caring for dirty memory accounting. 
847  * Returns true if the page was previously dirty.
848  */
849 int test_clear_page_dirty(struct page *page)
850 {
851         struct address_space *mapping = page_mapping(page);
852         unsigned long flags;
853
854         if (mapping) {
855                 write_lock_irqsave(&mapping->tree_lock, flags);
856                 if (TestClearPageDirty(page)) {
857                         radix_tree_tag_clear(&mapping->page_tree,
858                                                 page_index(page),
859                                                 PAGECACHE_TAG_DIRTY);
860                         write_unlock_irqrestore(&mapping->tree_lock, flags);
861                         /*
862                          * We can continue to use `mapping' here because the
863                          * page is locked, which pins the address_space
864                          */
865                         if (mapping_cap_account_dirty(mapping)) {
866                                 page_mkclean(page);
867                                 dec_zone_page_state(page, NR_FILE_DIRTY);
868                         }
869                         return 1;
870                 }
871                 write_unlock_irqrestore(&mapping->tree_lock, flags);
872                 return 0;
873         }
874         return TestClearPageDirty(page);
875 }
876 EXPORT_SYMBOL(test_clear_page_dirty);
877
878 /*
879  * Clear a page's dirty flag, while caring for dirty memory accounting.
880  * Returns true if the page was previously dirty.
881  *
882  * This is for preparing to put the page under writeout.  We leave the page
883  * tagged as dirty in the radix tree so that a concurrent write-for-sync
884  * can discover it via a PAGECACHE_TAG_DIRTY walk.  The ->writepage
885  * implementation will run either set_page_writeback() or set_page_dirty(),
886  * at which stage we bring the page's dirty flag and radix-tree dirty tag
887  * back into sync.
888  *
889  * This incoherency between the page's dirty flag and radix-tree tag is
890  * unfortunate, but it only exists while the page is locked.
891  */
892 int clear_page_dirty_for_io(struct page *page)
893 {
894         struct address_space *mapping = page_mapping(page);
895
896         if (mapping) {
897                 if (TestClearPageDirty(page)) {
898                         if (mapping_cap_account_dirty(mapping)) {
899                                 page_mkclean(page);
900                                 dec_zone_page_state(page, NR_FILE_DIRTY);
901                         }
902                         return 1;
903                 }
904                 return 0;
905         }
906         return TestClearPageDirty(page);
907 }
908 EXPORT_SYMBOL(clear_page_dirty_for_io);
909
910 int test_clear_page_writeback(struct page *page)
911 {
912         struct address_space *mapping = page_mapping(page);
913         int ret;
914
915         if (mapping) {
916                 unsigned long flags;
917
918                 write_lock_irqsave(&mapping->tree_lock, flags);
919                 ret = TestClearPageWriteback(page);
920                 if (ret)
921                         radix_tree_tag_clear(&mapping->page_tree,
922                                                 page_index(page),
923                                                 PAGECACHE_TAG_WRITEBACK);
924                 write_unlock_irqrestore(&mapping->tree_lock, flags);
925         } else {
926                 ret = TestClearPageWriteback(page);
927         }
928         return ret;
929 }
930
931 int test_set_page_writeback(struct page *page)
932 {
933         struct address_space *mapping = page_mapping(page);
934         int ret;
935
936         if (mapping) {
937                 unsigned long flags;
938
939                 write_lock_irqsave(&mapping->tree_lock, flags);
940                 ret = TestSetPageWriteback(page);
941                 if (!ret)
942                         radix_tree_tag_set(&mapping->page_tree,
943                                                 page_index(page),
944                                                 PAGECACHE_TAG_WRITEBACK);
945                 if (!PageDirty(page))
946                         radix_tree_tag_clear(&mapping->page_tree,
947                                                 page_index(page),
948                                                 PAGECACHE_TAG_DIRTY);
949                 write_unlock_irqrestore(&mapping->tree_lock, flags);
950         } else {
951                 ret = TestSetPageWriteback(page);
952         }
953         return ret;
954
955 }
956 EXPORT_SYMBOL(test_set_page_writeback);
957
958 /*
959  * Return true if any of the pages in the mapping are marged with the
960  * passed tag.
961  */
962 int mapping_tagged(struct address_space *mapping, int tag)
963 {
964         unsigned long flags;
965         int ret;
966
967         read_lock_irqsave(&mapping->tree_lock, flags);
968         ret = radix_tree_tagged(&mapping->page_tree, tag);
969         read_unlock_irqrestore(&mapping->tree_lock, flags);
970         return ret;
971 }
972 EXPORT_SYMBOL(mapping_tagged);