[PATCH] remove needless check in fs/read_write.c
[linux-2.6] / fs / mpage.c
1 /*
2  * fs/mpage.c
3  *
4  * Copyright (C) 2002, Linus Torvalds.
5  *
6  * Contains functions related to preparing and submitting BIOs which contain
7  * multiple pagecache pages.
8  *
9  * 15May2002    akpm@zip.com.au
10  *              Initial version
11  * 27Jun2002    axboe@suse.de
12  *              use bio_add_page() to build bio's just the right size
13  */
14
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/mm.h>
18 #include <linux/kdev_t.h>
19 #include <linux/bio.h>
20 #include <linux/fs.h>
21 #include <linux/buffer_head.h>
22 #include <linux/blkdev.h>
23 #include <linux/highmem.h>
24 #include <linux/prefetch.h>
25 #include <linux/mpage.h>
26 #include <linux/writeback.h>
27 #include <linux/backing-dev.h>
28 #include <linux/pagevec.h>
29
30 /*
31  * I/O completion handler for multipage BIOs.
32  *
33  * The mpage code never puts partial pages into a BIO (except for end-of-file).
34  * If a page does not map to a contiguous run of blocks then it simply falls
35  * back to block_read_full_page().
36  *
37  * Why is this?  If a page's completion depends on a number of different BIOs
38  * which can complete in any order (or at the same time) then determining the
39  * status of that page is hard.  See end_buffer_async_read() for the details.
40  * There is no point in duplicating all that complexity.
41  */
42 static int mpage_end_io_read(struct bio *bio, unsigned int bytes_done, int err)
43 {
44         const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
45         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
46
47         if (bio->bi_size)
48                 return 1;
49
50         do {
51                 struct page *page = bvec->bv_page;
52
53                 if (--bvec >= bio->bi_io_vec)
54                         prefetchw(&bvec->bv_page->flags);
55
56                 if (uptodate) {
57                         SetPageUptodate(page);
58                 } else {
59                         ClearPageUptodate(page);
60                         SetPageError(page);
61                 }
62                 unlock_page(page);
63         } while (bvec >= bio->bi_io_vec);
64         bio_put(bio);
65         return 0;
66 }
67
68 static int mpage_end_io_write(struct bio *bio, unsigned int bytes_done, int err)
69 {
70         const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
71         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
72
73         if (bio->bi_size)
74                 return 1;
75
76         do {
77                 struct page *page = bvec->bv_page;
78
79                 if (--bvec >= bio->bi_io_vec)
80                         prefetchw(&bvec->bv_page->flags);
81
82                 if (!uptodate){
83                         SetPageError(page);
84                         if (page->mapping)
85                                 set_bit(AS_EIO, &page->mapping->flags);
86                 }
87                 end_page_writeback(page);
88         } while (bvec >= bio->bi_io_vec);
89         bio_put(bio);
90         return 0;
91 }
92
93 static struct bio *mpage_bio_submit(int rw, struct bio *bio)
94 {
95         bio->bi_end_io = mpage_end_io_read;
96         if (rw == WRITE)
97                 bio->bi_end_io = mpage_end_io_write;
98         submit_bio(rw, bio);
99         return NULL;
100 }
101
102 static struct bio *
103 mpage_alloc(struct block_device *bdev,
104                 sector_t first_sector, int nr_vecs,
105                 gfp_t gfp_flags)
106 {
107         struct bio *bio;
108
109         bio = bio_alloc(gfp_flags, nr_vecs);
110
111         if (bio == NULL && (current->flags & PF_MEMALLOC)) {
112                 while (!bio && (nr_vecs /= 2))
113                         bio = bio_alloc(gfp_flags, nr_vecs);
114         }
115
116         if (bio) {
117                 bio->bi_bdev = bdev;
118                 bio->bi_sector = first_sector;
119         }
120         return bio;
121 }
122
123 /*
124  * support function for mpage_readpages.  The fs supplied get_block might
125  * return an up to date buffer.  This is used to map that buffer into
126  * the page, which allows readpage to avoid triggering a duplicate call
127  * to get_block.
128  *
129  * The idea is to avoid adding buffers to pages that don't already have
130  * them.  So when the buffer is up to date and the page size == block size,
131  * this marks the page up to date instead of adding new buffers.
132  */
133 static void 
134 map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block) 
135 {
136         struct inode *inode = page->mapping->host;
137         struct buffer_head *page_bh, *head;
138         int block = 0;
139
140         if (!page_has_buffers(page)) {
141                 /*
142                  * don't make any buffers if there is only one buffer on
143                  * the page and the page just needs to be set up to date
144                  */
145                 if (inode->i_blkbits == PAGE_CACHE_SHIFT && 
146                     buffer_uptodate(bh)) {
147                         SetPageUptodate(page);    
148                         return;
149                 }
150                 create_empty_buffers(page, 1 << inode->i_blkbits, 0);
151         }
152         head = page_buffers(page);
153         page_bh = head;
154         do {
155                 if (block == page_block) {
156                         page_bh->b_state = bh->b_state;
157                         page_bh->b_bdev = bh->b_bdev;
158                         page_bh->b_blocknr = bh->b_blocknr;
159                         break;
160                 }
161                 page_bh = page_bh->b_this_page;
162                 block++;
163         } while (page_bh != head);
164 }
165
166 static struct bio *
167 do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
168                         sector_t *last_block_in_bio, get_block_t get_block)
169 {
170         struct inode *inode = page->mapping->host;
171         const unsigned blkbits = inode->i_blkbits;
172         const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits;
173         const unsigned blocksize = 1 << blkbits;
174         sector_t block_in_file;
175         sector_t last_block;
176         sector_t blocks[MAX_BUF_PER_PAGE];
177         unsigned page_block;
178         unsigned first_hole = blocks_per_page;
179         struct block_device *bdev = NULL;
180         struct buffer_head bh;
181         int length;
182         int fully_mapped = 1;
183
184         if (page_has_buffers(page))
185                 goto confused;
186
187         block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
188         last_block = (i_size_read(inode) + blocksize - 1) >> blkbits;
189
190         bh.b_page = page;
191         for (page_block = 0; page_block < blocks_per_page;
192                                 page_block++, block_in_file++) {
193                 bh.b_state = 0;
194                 if (block_in_file < last_block) {
195                         if (get_block(inode, block_in_file, &bh, 0))
196                                 goto confused;
197                 }
198
199                 if (!buffer_mapped(&bh)) {
200                         fully_mapped = 0;
201                         if (first_hole == blocks_per_page)
202                                 first_hole = page_block;
203                         continue;
204                 }
205
206                 /* some filesystems will copy data into the page during
207                  * the get_block call, in which case we don't want to
208                  * read it again.  map_buffer_to_page copies the data
209                  * we just collected from get_block into the page's buffers
210                  * so readpage doesn't have to repeat the get_block call
211                  */
212                 if (buffer_uptodate(&bh)) {
213                         map_buffer_to_page(page, &bh, page_block);
214                         goto confused;
215                 }
216         
217                 if (first_hole != blocks_per_page)
218                         goto confused;          /* hole -> non-hole */
219
220                 /* Contiguous blocks? */
221                 if (page_block && blocks[page_block-1] != bh.b_blocknr-1)
222                         goto confused;
223                 blocks[page_block] = bh.b_blocknr;
224                 bdev = bh.b_bdev;
225         }
226
227         if (first_hole != blocks_per_page) {
228                 char *kaddr = kmap_atomic(page, KM_USER0);
229                 memset(kaddr + (first_hole << blkbits), 0,
230                                 PAGE_CACHE_SIZE - (first_hole << blkbits));
231                 flush_dcache_page(page);
232                 kunmap_atomic(kaddr, KM_USER0);
233                 if (first_hole == 0) {
234                         SetPageUptodate(page);
235                         unlock_page(page);
236                         goto out;
237                 }
238         } else if (fully_mapped) {
239                 SetPageMappedToDisk(page);
240         }
241
242         /*
243          * This page will go to BIO.  Do we need to send this BIO off first?
244          */
245         if (bio && (*last_block_in_bio != blocks[0] - 1))
246                 bio = mpage_bio_submit(READ, bio);
247
248 alloc_new:
249         if (bio == NULL) {
250                 bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
251                                 min_t(int, nr_pages, bio_get_nr_vecs(bdev)),
252                                 GFP_KERNEL);
253                 if (bio == NULL)
254                         goto confused;
255         }
256
257         length = first_hole << blkbits;
258         if (bio_add_page(bio, page, length, 0) < length) {
259                 bio = mpage_bio_submit(READ, bio);
260                 goto alloc_new;
261         }
262
263         if (buffer_boundary(&bh) || (first_hole != blocks_per_page))
264                 bio = mpage_bio_submit(READ, bio);
265         else
266                 *last_block_in_bio = blocks[blocks_per_page - 1];
267 out:
268         return bio;
269
270 confused:
271         if (bio)
272                 bio = mpage_bio_submit(READ, bio);
273         if (!PageUptodate(page))
274                 block_read_full_page(page, get_block);
275         else
276                 unlock_page(page);
277         goto out;
278 }
279
280 /**
281  * mpage_readpages - populate an address space with some pages, and
282  *                       start reads against them.
283  *
284  * @mapping: the address_space
285  * @pages: The address of a list_head which contains the target pages.  These
286  *   pages have their ->index populated and are otherwise uninitialised.
287  *
288  *   The page at @pages->prev has the lowest file offset, and reads should be
289  *   issued in @pages->prev to @pages->next order.
290  *
291  * @nr_pages: The number of pages at *@pages
292  * @get_block: The filesystem's block mapper function.
293  *
294  * This function walks the pages and the blocks within each page, building and
295  * emitting large BIOs.
296  *
297  * If anything unusual happens, such as:
298  *
299  * - encountering a page which has buffers
300  * - encountering a page which has a non-hole after a hole
301  * - encountering a page with non-contiguous blocks
302  *
303  * then this code just gives up and calls the buffer_head-based read function.
304  * It does handle a page which has holes at the end - that is a common case:
305  * the end-of-file on blocksize < PAGE_CACHE_SIZE setups.
306  *
307  * BH_Boundary explanation:
308  *
309  * There is a problem.  The mpage read code assembles several pages, gets all
310  * their disk mappings, and then submits them all.  That's fine, but obtaining
311  * the disk mappings may require I/O.  Reads of indirect blocks, for example.
312  *
313  * So an mpage read of the first 16 blocks of an ext2 file will cause I/O to be
314  * submitted in the following order:
315  *      12 0 1 2 3 4 5 6 7 8 9 10 11 13 14 15 16
316  * because the indirect block has to be read to get the mappings of blocks
317  * 13,14,15,16.  Obviously, this impacts performance.
318  *
319  * So what we do it to allow the filesystem's get_block() function to set
320  * BH_Boundary when it maps block 11.  BH_Boundary says: mapping of the block
321  * after this one will require I/O against a block which is probably close to
322  * this one.  So you should push what I/O you have currently accumulated.
323  *
324  * This all causes the disk requests to be issued in the correct order.
325  */
326 int
327 mpage_readpages(struct address_space *mapping, struct list_head *pages,
328                                 unsigned nr_pages, get_block_t get_block)
329 {
330         struct bio *bio = NULL;
331         unsigned page_idx;
332         sector_t last_block_in_bio = 0;
333         struct pagevec lru_pvec;
334
335         pagevec_init(&lru_pvec, 0);
336         for (page_idx = 0; page_idx < nr_pages; page_idx++) {
337                 struct page *page = list_entry(pages->prev, struct page, lru);
338
339                 prefetchw(&page->flags);
340                 list_del(&page->lru);
341                 if (!add_to_page_cache(page, mapping,
342                                         page->index, GFP_KERNEL)) {
343                         bio = do_mpage_readpage(bio, page,
344                                         nr_pages - page_idx,
345                                         &last_block_in_bio, get_block);
346                         if (!pagevec_add(&lru_pvec, page))
347                                 __pagevec_lru_add(&lru_pvec);
348                 } else {
349                         page_cache_release(page);
350                 }
351         }
352         pagevec_lru_add(&lru_pvec);
353         BUG_ON(!list_empty(pages));
354         if (bio)
355                 mpage_bio_submit(READ, bio);
356         return 0;
357 }
358 EXPORT_SYMBOL(mpage_readpages);
359
360 /*
361  * This isn't called much at all
362  */
363 int mpage_readpage(struct page *page, get_block_t get_block)
364 {
365         struct bio *bio = NULL;
366         sector_t last_block_in_bio = 0;
367
368         bio = do_mpage_readpage(bio, page, 1,
369                         &last_block_in_bio, get_block);
370         if (bio)
371                 mpage_bio_submit(READ, bio);
372         return 0;
373 }
374 EXPORT_SYMBOL(mpage_readpage);
375
376 /*
377  * Writing is not so simple.
378  *
379  * If the page has buffers then they will be used for obtaining the disk
380  * mapping.  We only support pages which are fully mapped-and-dirty, with a
381  * special case for pages which are unmapped at the end: end-of-file.
382  *
383  * If the page has no buffers (preferred) then the page is mapped here.
384  *
385  * If all blocks are found to be contiguous then the page can go into the
386  * BIO.  Otherwise fall back to the mapping's writepage().
387  * 
388  * FIXME: This code wants an estimate of how many pages are still to be
389  * written, so it can intelligently allocate a suitably-sized BIO.  For now,
390  * just allocate full-size (16-page) BIOs.
391  */
392 static struct bio *
393 __mpage_writepage(struct bio *bio, struct page *page, get_block_t get_block,
394         sector_t *last_block_in_bio, int *ret, struct writeback_control *wbc,
395         writepage_t writepage_fn)
396 {
397         struct address_space *mapping = page->mapping;
398         struct inode *inode = page->mapping->host;
399         const unsigned blkbits = inode->i_blkbits;
400         unsigned long end_index;
401         const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits;
402         sector_t last_block;
403         sector_t block_in_file;
404         sector_t blocks[MAX_BUF_PER_PAGE];
405         unsigned page_block;
406         unsigned first_unmapped = blocks_per_page;
407         struct block_device *bdev = NULL;
408         int boundary = 0;
409         sector_t boundary_block = 0;
410         struct block_device *boundary_bdev = NULL;
411         int length;
412         struct buffer_head map_bh;
413         loff_t i_size = i_size_read(inode);
414
415         if (page_has_buffers(page)) {
416                 struct buffer_head *head = page_buffers(page);
417                 struct buffer_head *bh = head;
418
419                 /* If they're all mapped and dirty, do it */
420                 page_block = 0;
421                 do {
422                         BUG_ON(buffer_locked(bh));
423                         if (!buffer_mapped(bh)) {
424                                 /*
425                                  * unmapped dirty buffers are created by
426                                  * __set_page_dirty_buffers -> mmapped data
427                                  */
428                                 if (buffer_dirty(bh))
429                                         goto confused;
430                                 if (first_unmapped == blocks_per_page)
431                                         first_unmapped = page_block;
432                                 continue;
433                         }
434
435                         if (first_unmapped != blocks_per_page)
436                                 goto confused;  /* hole -> non-hole */
437
438                         if (!buffer_dirty(bh) || !buffer_uptodate(bh))
439                                 goto confused;
440                         if (page_block) {
441                                 if (bh->b_blocknr != blocks[page_block-1] + 1)
442                                         goto confused;
443                         }
444                         blocks[page_block++] = bh->b_blocknr;
445                         boundary = buffer_boundary(bh);
446                         if (boundary) {
447                                 boundary_block = bh->b_blocknr;
448                                 boundary_bdev = bh->b_bdev;
449                         }
450                         bdev = bh->b_bdev;
451                 } while ((bh = bh->b_this_page) != head);
452
453                 if (first_unmapped)
454                         goto page_is_mapped;
455
456                 /*
457                  * Page has buffers, but they are all unmapped. The page was
458                  * created by pagein or read over a hole which was handled by
459                  * block_read_full_page().  If this address_space is also
460                  * using mpage_readpages then this can rarely happen.
461                  */
462                 goto confused;
463         }
464
465         /*
466          * The page has no buffers: map it to disk
467          */
468         BUG_ON(!PageUptodate(page));
469         block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
470         last_block = (i_size - 1) >> blkbits;
471         map_bh.b_page = page;
472         for (page_block = 0; page_block < blocks_per_page; ) {
473
474                 map_bh.b_state = 0;
475                 if (get_block(inode, block_in_file, &map_bh, 1))
476                         goto confused;
477                 if (buffer_new(&map_bh))
478                         unmap_underlying_metadata(map_bh.b_bdev,
479                                                 map_bh.b_blocknr);
480                 if (buffer_boundary(&map_bh)) {
481                         boundary_block = map_bh.b_blocknr;
482                         boundary_bdev = map_bh.b_bdev;
483                 }
484                 if (page_block) {
485                         if (map_bh.b_blocknr != blocks[page_block-1] + 1)
486                                 goto confused;
487                 }
488                 blocks[page_block++] = map_bh.b_blocknr;
489                 boundary = buffer_boundary(&map_bh);
490                 bdev = map_bh.b_bdev;
491                 if (block_in_file == last_block)
492                         break;
493                 block_in_file++;
494         }
495         BUG_ON(page_block == 0);
496
497         first_unmapped = page_block;
498
499 page_is_mapped:
500         end_index = i_size >> PAGE_CACHE_SHIFT;
501         if (page->index >= end_index) {
502                 /*
503                  * The page straddles i_size.  It must be zeroed out on each
504                  * and every writepage invokation because it may be mmapped.
505                  * "A file is mapped in multiples of the page size.  For a file
506                  * that is not a multiple of the page size, the remaining memory
507                  * is zeroed when mapped, and writes to that region are not
508                  * written out to the file."
509                  */
510                 unsigned offset = i_size & (PAGE_CACHE_SIZE - 1);
511                 char *kaddr;
512
513                 if (page->index > end_index || !offset)
514                         goto confused;
515                 kaddr = kmap_atomic(page, KM_USER0);
516                 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
517                 flush_dcache_page(page);
518                 kunmap_atomic(kaddr, KM_USER0);
519         }
520
521         /*
522          * This page will go to BIO.  Do we need to send this BIO off first?
523          */
524         if (bio && *last_block_in_bio != blocks[0] - 1)
525                 bio = mpage_bio_submit(WRITE, bio);
526
527 alloc_new:
528         if (bio == NULL) {
529                 bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
530                                 bio_get_nr_vecs(bdev), GFP_NOFS|__GFP_HIGH);
531                 if (bio == NULL)
532                         goto confused;
533         }
534
535         /*
536          * Must try to add the page before marking the buffer clean or
537          * the confused fail path above (OOM) will be very confused when
538          * it finds all bh marked clean (i.e. it will not write anything)
539          */
540         length = first_unmapped << blkbits;
541         if (bio_add_page(bio, page, length, 0) < length) {
542                 bio = mpage_bio_submit(WRITE, bio);
543                 goto alloc_new;
544         }
545
546         /*
547          * OK, we have our BIO, so we can now mark the buffers clean.  Make
548          * sure to only clean buffers which we know we'll be writing.
549          */
550         if (page_has_buffers(page)) {
551                 struct buffer_head *head = page_buffers(page);
552                 struct buffer_head *bh = head;
553                 unsigned buffer_counter = 0;
554
555                 do {
556                         if (buffer_counter++ == first_unmapped)
557                                 break;
558                         clear_buffer_dirty(bh);
559                         bh = bh->b_this_page;
560                 } while (bh != head);
561
562                 /*
563                  * we cannot drop the bh if the page is not uptodate
564                  * or a concurrent readpage would fail to serialize with the bh
565                  * and it would read from disk before we reach the platter.
566                  */
567                 if (buffer_heads_over_limit && PageUptodate(page))
568                         try_to_free_buffers(page);
569         }
570
571         BUG_ON(PageWriteback(page));
572         set_page_writeback(page);
573         unlock_page(page);
574         if (boundary || (first_unmapped != blocks_per_page)) {
575                 bio = mpage_bio_submit(WRITE, bio);
576                 if (boundary_block) {
577                         write_boundary_block(boundary_bdev,
578                                         boundary_block, 1 << blkbits);
579                 }
580         } else {
581                 *last_block_in_bio = blocks[blocks_per_page - 1];
582         }
583         goto out;
584
585 confused:
586         if (bio)
587                 bio = mpage_bio_submit(WRITE, bio);
588
589         if (writepage_fn) {
590                 *ret = (*writepage_fn)(page, wbc);
591         } else {
592                 *ret = -EAGAIN;
593                 goto out;
594         }
595         /*
596          * The caller has a ref on the inode, so *mapping is stable
597          */
598         if (*ret) {
599                 if (*ret == -ENOSPC)
600                         set_bit(AS_ENOSPC, &mapping->flags);
601                 else
602                         set_bit(AS_EIO, &mapping->flags);
603         }
604 out:
605         return bio;
606 }
607
608 /**
609  * mpage_writepages - walk the list of dirty pages of the given
610  * address space and writepage() all of them.
611  * 
612  * @mapping: address space structure to write
613  * @wbc: subtract the number of written pages from *@wbc->nr_to_write
614  * @get_block: the filesystem's block mapper function.
615  *             If this is NULL then use a_ops->writepage.  Otherwise, go
616  *             direct-to-BIO.
617  *
618  * This is a library function, which implements the writepages()
619  * address_space_operation.
620  *
621  * If a page is already under I/O, generic_writepages() skips it, even
622  * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
623  * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
624  * and msync() need to guarantee that all the data which was dirty at the time
625  * the call was made get new I/O started against them.  If wbc->sync_mode is
626  * WB_SYNC_ALL then we were called for data integrity and we must wait for
627  * existing IO to complete.
628  */
629 int
630 mpage_writepages(struct address_space *mapping,
631                 struct writeback_control *wbc, get_block_t get_block)
632 {
633         struct backing_dev_info *bdi = mapping->backing_dev_info;
634         struct bio *bio = NULL;
635         sector_t last_block_in_bio = 0;
636         int ret = 0;
637         int done = 0;
638         int (*writepage)(struct page *page, struct writeback_control *wbc);
639         struct pagevec pvec;
640         int nr_pages;
641         pgoff_t index;
642         pgoff_t end = -1;               /* Inclusive */
643         int scanned = 0;
644         int is_range = 0;
645
646         if (wbc->nonblocking && bdi_write_congested(bdi)) {
647                 wbc->encountered_congestion = 1;
648                 return 0;
649         }
650
651         writepage = NULL;
652         if (get_block == NULL)
653                 writepage = mapping->a_ops->writepage;
654
655         pagevec_init(&pvec, 0);
656         if (wbc->sync_mode == WB_SYNC_NONE) {
657                 index = mapping->writeback_index; /* Start from prev offset */
658         } else {
659                 index = 0;                        /* whole-file sweep */
660                 scanned = 1;
661         }
662         if (wbc->start || wbc->end) {
663                 index = wbc->start >> PAGE_CACHE_SHIFT;
664                 end = wbc->end >> PAGE_CACHE_SHIFT;
665                 is_range = 1;
666                 scanned = 1;
667         }
668 retry:
669         while (!done && (index <= end) &&
670                         (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
671                         PAGECACHE_TAG_DIRTY,
672                         min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
673                 unsigned i;
674
675                 scanned = 1;
676                 for (i = 0; i < nr_pages; i++) {
677                         struct page *page = pvec.pages[i];
678
679                         /*
680                          * At this point we hold neither mapping->tree_lock nor
681                          * lock on the page itself: the page may be truncated or
682                          * invalidated (changing page->mapping to NULL), or even
683                          * swizzled back from swapper_space to tmpfs file
684                          * mapping
685                          */
686
687                         lock_page(page);
688
689                         if (unlikely(page->mapping != mapping)) {
690                                 unlock_page(page);
691                                 continue;
692                         }
693
694                         if (unlikely(is_range) && page->index > end) {
695                                 done = 1;
696                                 unlock_page(page);
697                                 continue;
698                         }
699
700                         if (wbc->sync_mode != WB_SYNC_NONE)
701                                 wait_on_page_writeback(page);
702
703                         if (PageWriteback(page) ||
704                                         !clear_page_dirty_for_io(page)) {
705                                 unlock_page(page);
706                                 continue;
707                         }
708
709                         if (writepage) {
710                                 ret = (*writepage)(page, wbc);
711                                 if (ret) {
712                                         if (ret == -ENOSPC)
713                                                 set_bit(AS_ENOSPC,
714                                                         &mapping->flags);
715                                         else
716                                                 set_bit(AS_EIO,
717                                                         &mapping->flags);
718                                 }
719                         } else {
720                                 bio = __mpage_writepage(bio, page, get_block,
721                                                 &last_block_in_bio, &ret, wbc,
722                                                 page->mapping->a_ops->writepage);
723                         }
724                         if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE))
725                                 unlock_page(page);
726                         if (ret || (--(wbc->nr_to_write) <= 0))
727                                 done = 1;
728                         if (wbc->nonblocking && bdi_write_congested(bdi)) {
729                                 wbc->encountered_congestion = 1;
730                                 done = 1;
731                         }
732                 }
733                 pagevec_release(&pvec);
734                 cond_resched();
735         }
736         if (!scanned && !done) {
737                 /*
738                  * We hit the last page and there is more work to be done: wrap
739                  * back to the start of the file
740                  */
741                 scanned = 1;
742                 index = 0;
743                 goto retry;
744         }
745         if (!is_range)
746                 mapping->writeback_index = index;
747         if (bio)
748                 mpage_bio_submit(WRITE, bio);
749         return ret;
750 }
751 EXPORT_SYMBOL(mpage_writepages);
752
753 int mpage_writepage(struct page *page, get_block_t get_block,
754         struct writeback_control *wbc)
755 {
756         int ret = 0;
757         struct bio *bio;
758         sector_t last_block_in_bio = 0;
759
760         bio = __mpage_writepage(NULL, page, get_block,
761                         &last_block_in_bio, &ret, wbc, NULL);
762         if (bio)
763                 mpage_bio_submit(WRITE, bio);
764
765         return ret;
766 }
767 EXPORT_SYMBOL(mpage_writepage);