vmscan: fix it to take care of nodemask
[linux-2.6] / fs / buffer.c
1 /*
2  *  linux/fs/buffer.c
3  *
4  *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
5  */
6
7 /*
8  * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9  *
10  * Removed a lot of unnecessary code and simplified things now that
11  * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12  *
13  * Speed up hash, lru, and free list operations.  Use gfp() for allocating
14  * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
15  *
16  * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17  *
18  * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
19  */
20
21 #include <linux/kernel.h>
22 #include <linux/syscalls.h>
23 #include <linux/fs.h>
24 #include <linux/mm.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
27 #include <linux/capability.h>
28 #include <linux/blkdev.h>
29 #include <linux/file.h>
30 #include <linux/quotaops.h>
31 #include <linux/highmem.h>
32 #include <linux/module.h>
33 #include <linux/writeback.h>
34 #include <linux/hash.h>
35 #include <linux/suspend.h>
36 #include <linux/buffer_head.h>
37 #include <linux/task_io_accounting_ops.h>
38 #include <linux/bio.h>
39 #include <linux/notifier.h>
40 #include <linux/cpu.h>
41 #include <linux/bitops.h>
42 #include <linux/mpage.h>
43 #include <linux/bit_spinlock.h>
44
45 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
46
47 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
48
49 inline void
50 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
51 {
52         bh->b_end_io = handler;
53         bh->b_private = private;
54 }
55
56 static int sync_buffer(void *word)
57 {
58         struct block_device *bd;
59         struct buffer_head *bh
60                 = container_of(word, struct buffer_head, b_state);
61
62         smp_mb();
63         bd = bh->b_bdev;
64         if (bd)
65                 blk_run_address_space(bd->bd_inode->i_mapping);
66         io_schedule();
67         return 0;
68 }
69
70 void __lock_buffer(struct buffer_head *bh)
71 {
72         wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
73                                                         TASK_UNINTERRUPTIBLE);
74 }
75 EXPORT_SYMBOL(__lock_buffer);
76
77 void unlock_buffer(struct buffer_head *bh)
78 {
79         clear_bit_unlock(BH_Lock, &bh->b_state);
80         smp_mb__after_clear_bit();
81         wake_up_bit(&bh->b_state, BH_Lock);
82 }
83
84 /*
85  * Block until a buffer comes unlocked.  This doesn't stop it
86  * from becoming locked again - you have to lock it yourself
87  * if you want to preserve its state.
88  */
89 void __wait_on_buffer(struct buffer_head * bh)
90 {
91         wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
92 }
93
94 static void
95 __clear_page_buffers(struct page *page)
96 {
97         ClearPagePrivate(page);
98         set_page_private(page, 0);
99         page_cache_release(page);
100 }
101
102
103 static int quiet_error(struct buffer_head *bh)
104 {
105         if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
106                 return 0;
107         return 1;
108 }
109
110
111 static void buffer_io_error(struct buffer_head *bh)
112 {
113         char b[BDEVNAME_SIZE];
114         printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
115                         bdevname(bh->b_bdev, b),
116                         (unsigned long long)bh->b_blocknr);
117 }
118
119 /*
120  * End-of-IO handler helper function which does not touch the bh after
121  * unlocking it.
122  * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
123  * a race there is benign: unlock_buffer() only use the bh's address for
124  * hashing after unlocking the buffer, so it doesn't actually touch the bh
125  * itself.
126  */
127 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
128 {
129         if (uptodate) {
130                 set_buffer_uptodate(bh);
131         } else {
132                 /* This happens, due to failed READA attempts. */
133                 clear_buffer_uptodate(bh);
134         }
135         unlock_buffer(bh);
136 }
137
138 /*
139  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
140  * unlock the buffer. This is what ll_rw_block uses too.
141  */
142 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
143 {
144         __end_buffer_read_notouch(bh, uptodate);
145         put_bh(bh);
146 }
147
148 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
149 {
150         char b[BDEVNAME_SIZE];
151
152         if (uptodate) {
153                 set_buffer_uptodate(bh);
154         } else {
155                 if (!buffer_eopnotsupp(bh) && !quiet_error(bh)) {
156                         buffer_io_error(bh);
157                         printk(KERN_WARNING "lost page write due to "
158                                         "I/O error on %s\n",
159                                        bdevname(bh->b_bdev, b));
160                 }
161                 set_buffer_write_io_error(bh);
162                 clear_buffer_uptodate(bh);
163         }
164         unlock_buffer(bh);
165         put_bh(bh);
166 }
167
168 /*
169  * Various filesystems appear to want __find_get_block to be non-blocking.
170  * But it's the page lock which protects the buffers.  To get around this,
171  * we get exclusion from try_to_free_buffers with the blockdev mapping's
172  * private_lock.
173  *
174  * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
175  * may be quite high.  This code could TryLock the page, and if that
176  * succeeds, there is no need to take private_lock. (But if
177  * private_lock is contended then so is mapping->tree_lock).
178  */
179 static struct buffer_head *
180 __find_get_block_slow(struct block_device *bdev, sector_t block)
181 {
182         struct inode *bd_inode = bdev->bd_inode;
183         struct address_space *bd_mapping = bd_inode->i_mapping;
184         struct buffer_head *ret = NULL;
185         pgoff_t index;
186         struct buffer_head *bh;
187         struct buffer_head *head;
188         struct page *page;
189         int all_mapped = 1;
190
191         index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
192         page = find_get_page(bd_mapping, index);
193         if (!page)
194                 goto out;
195
196         spin_lock(&bd_mapping->private_lock);
197         if (!page_has_buffers(page))
198                 goto out_unlock;
199         head = page_buffers(page);
200         bh = head;
201         do {
202                 if (bh->b_blocknr == block) {
203                         ret = bh;
204                         get_bh(bh);
205                         goto out_unlock;
206                 }
207                 if (!buffer_mapped(bh))
208                         all_mapped = 0;
209                 bh = bh->b_this_page;
210         } while (bh != head);
211
212         /* we might be here because some of the buffers on this page are
213          * not mapped.  This is due to various races between
214          * file io on the block device and getblk.  It gets dealt with
215          * elsewhere, don't buffer_error if we had some unmapped buffers
216          */
217         if (all_mapped) {
218                 printk("__find_get_block_slow() failed. "
219                         "block=%llu, b_blocknr=%llu\n",
220                         (unsigned long long)block,
221                         (unsigned long long)bh->b_blocknr);
222                 printk("b_state=0x%08lx, b_size=%zu\n",
223                         bh->b_state, bh->b_size);
224                 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
225         }
226 out_unlock:
227         spin_unlock(&bd_mapping->private_lock);
228         page_cache_release(page);
229 out:
230         return ret;
231 }
232
233 /* If invalidate_buffers() will trash dirty buffers, it means some kind
234    of fs corruption is going on. Trashing dirty data always imply losing
235    information that was supposed to be just stored on the physical layer
236    by the user.
237
238    Thus invalidate_buffers in general usage is not allwowed to trash
239    dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
240    be preserved.  These buffers are simply skipped.
241   
242    We also skip buffers which are still in use.  For example this can
243    happen if a userspace program is reading the block device.
244
245    NOTE: In the case where the user removed a removable-media-disk even if
246    there's still dirty data not synced on disk (due a bug in the device driver
247    or due an error of the user), by not destroying the dirty buffers we could
248    generate corruption also on the next media inserted, thus a parameter is
249    necessary to handle this case in the most safe way possible (trying
250    to not corrupt also the new disk inserted with the data belonging to
251    the old now corrupted disk). Also for the ramdisk the natural thing
252    to do in order to release the ramdisk memory is to destroy dirty buffers.
253
254    These are two special cases. Normal usage imply the device driver
255    to issue a sync on the device (without waiting I/O completion) and
256    then an invalidate_buffers call that doesn't trash dirty buffers.
257
258    For handling cache coherency with the blkdev pagecache the 'update' case
259    is been introduced. It is needed to re-read from disk any pinned
260    buffer. NOTE: re-reading from disk is destructive so we can do it only
261    when we assume nobody is changing the buffercache under our I/O and when
262    we think the disk contains more recent information than the buffercache.
263    The update == 1 pass marks the buffers we need to update, the update == 2
264    pass does the actual I/O. */
265 void invalidate_bdev(struct block_device *bdev)
266 {
267         struct address_space *mapping = bdev->bd_inode->i_mapping;
268
269         if (mapping->nrpages == 0)
270                 return;
271
272         invalidate_bh_lrus();
273         invalidate_mapping_pages(mapping, 0, -1);
274 }
275
276 /*
277  * Kick pdflush then try to free up some ZONE_NORMAL memory.
278  */
279 static void free_more_memory(void)
280 {
281         struct zone *zone;
282         int nid;
283
284         wakeup_pdflush(1024);
285         yield();
286
287         for_each_online_node(nid) {
288                 (void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
289                                                 gfp_zone(GFP_NOFS), NULL,
290                                                 &zone);
291                 if (zone)
292                         try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
293                                                 GFP_NOFS, NULL);
294         }
295 }
296
297 /*
298  * I/O completion handler for block_read_full_page() - pages
299  * which come unlocked at the end of I/O.
300  */
301 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
302 {
303         unsigned long flags;
304         struct buffer_head *first;
305         struct buffer_head *tmp;
306         struct page *page;
307         int page_uptodate = 1;
308
309         BUG_ON(!buffer_async_read(bh));
310
311         page = bh->b_page;
312         if (uptodate) {
313                 set_buffer_uptodate(bh);
314         } else {
315                 clear_buffer_uptodate(bh);
316                 if (!quiet_error(bh))
317                         buffer_io_error(bh);
318                 SetPageError(page);
319         }
320
321         /*
322          * Be _very_ careful from here on. Bad things can happen if
323          * two buffer heads end IO at almost the same time and both
324          * decide that the page is now completely done.
325          */
326         first = page_buffers(page);
327         local_irq_save(flags);
328         bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
329         clear_buffer_async_read(bh);
330         unlock_buffer(bh);
331         tmp = bh;
332         do {
333                 if (!buffer_uptodate(tmp))
334                         page_uptodate = 0;
335                 if (buffer_async_read(tmp)) {
336                         BUG_ON(!buffer_locked(tmp));
337                         goto still_busy;
338                 }
339                 tmp = tmp->b_this_page;
340         } while (tmp != bh);
341         bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
342         local_irq_restore(flags);
343
344         /*
345          * If none of the buffers had errors and they are all
346          * uptodate then we can set the page uptodate.
347          */
348         if (page_uptodate && !PageError(page))
349                 SetPageUptodate(page);
350         unlock_page(page);
351         return;
352
353 still_busy:
354         bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
355         local_irq_restore(flags);
356         return;
357 }
358
359 /*
360  * Completion handler for block_write_full_page() - pages which are unlocked
361  * during I/O, and which have PageWriteback cleared upon I/O completion.
362  */
363 static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
364 {
365         char b[BDEVNAME_SIZE];
366         unsigned long flags;
367         struct buffer_head *first;
368         struct buffer_head *tmp;
369         struct page *page;
370
371         BUG_ON(!buffer_async_write(bh));
372
373         page = bh->b_page;
374         if (uptodate) {
375                 set_buffer_uptodate(bh);
376         } else {
377                 if (!quiet_error(bh)) {
378                         buffer_io_error(bh);
379                         printk(KERN_WARNING "lost page write due to "
380                                         "I/O error on %s\n",
381                                bdevname(bh->b_bdev, b));
382                 }
383                 set_bit(AS_EIO, &page->mapping->flags);
384                 set_buffer_write_io_error(bh);
385                 clear_buffer_uptodate(bh);
386                 SetPageError(page);
387         }
388
389         first = page_buffers(page);
390         local_irq_save(flags);
391         bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
392
393         clear_buffer_async_write(bh);
394         unlock_buffer(bh);
395         tmp = bh->b_this_page;
396         while (tmp != bh) {
397                 if (buffer_async_write(tmp)) {
398                         BUG_ON(!buffer_locked(tmp));
399                         goto still_busy;
400                 }
401                 tmp = tmp->b_this_page;
402         }
403         bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
404         local_irq_restore(flags);
405         end_page_writeback(page);
406         return;
407
408 still_busy:
409         bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
410         local_irq_restore(flags);
411         return;
412 }
413
414 /*
415  * If a page's buffers are under async readin (end_buffer_async_read
416  * completion) then there is a possibility that another thread of
417  * control could lock one of the buffers after it has completed
418  * but while some of the other buffers have not completed.  This
419  * locked buffer would confuse end_buffer_async_read() into not unlocking
420  * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
421  * that this buffer is not under async I/O.
422  *
423  * The page comes unlocked when it has no locked buffer_async buffers
424  * left.
425  *
426  * PageLocked prevents anyone starting new async I/O reads any of
427  * the buffers.
428  *
429  * PageWriteback is used to prevent simultaneous writeout of the same
430  * page.
431  *
432  * PageLocked prevents anyone from starting writeback of a page which is
433  * under read I/O (PageWriteback is only ever set against a locked page).
434  */
435 static void mark_buffer_async_read(struct buffer_head *bh)
436 {
437         bh->b_end_io = end_buffer_async_read;
438         set_buffer_async_read(bh);
439 }
440
441 void mark_buffer_async_write(struct buffer_head *bh)
442 {
443         bh->b_end_io = end_buffer_async_write;
444         set_buffer_async_write(bh);
445 }
446 EXPORT_SYMBOL(mark_buffer_async_write);
447
448
449 /*
450  * fs/buffer.c contains helper functions for buffer-backed address space's
451  * fsync functions.  A common requirement for buffer-based filesystems is
452  * that certain data from the backing blockdev needs to be written out for
453  * a successful fsync().  For example, ext2 indirect blocks need to be
454  * written back and waited upon before fsync() returns.
455  *
456  * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
457  * inode_has_buffers() and invalidate_inode_buffers() are provided for the
458  * management of a list of dependent buffers at ->i_mapping->private_list.
459  *
460  * Locking is a little subtle: try_to_free_buffers() will remove buffers
461  * from their controlling inode's queue when they are being freed.  But
462  * try_to_free_buffers() will be operating against the *blockdev* mapping
463  * at the time, not against the S_ISREG file which depends on those buffers.
464  * So the locking for private_list is via the private_lock in the address_space
465  * which backs the buffers.  Which is different from the address_space 
466  * against which the buffers are listed.  So for a particular address_space,
467  * mapping->private_lock does *not* protect mapping->private_list!  In fact,
468  * mapping->private_list will always be protected by the backing blockdev's
469  * ->private_lock.
470  *
471  * Which introduces a requirement: all buffers on an address_space's
472  * ->private_list must be from the same address_space: the blockdev's.
473  *
474  * address_spaces which do not place buffers at ->private_list via these
475  * utility functions are free to use private_lock and private_list for
476  * whatever they want.  The only requirement is that list_empty(private_list)
477  * be true at clear_inode() time.
478  *
479  * FIXME: clear_inode should not call invalidate_inode_buffers().  The
480  * filesystems should do that.  invalidate_inode_buffers() should just go
481  * BUG_ON(!list_empty).
482  *
483  * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
484  * take an address_space, not an inode.  And it should be called
485  * mark_buffer_dirty_fsync() to clearly define why those buffers are being
486  * queued up.
487  *
488  * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
489  * list if it is already on a list.  Because if the buffer is on a list,
490  * it *must* already be on the right one.  If not, the filesystem is being
491  * silly.  This will save a ton of locking.  But first we have to ensure
492  * that buffers are taken *off* the old inode's list when they are freed
493  * (presumably in truncate).  That requires careful auditing of all
494  * filesystems (do it inside bforget()).  It could also be done by bringing
495  * b_inode back.
496  */
497
498 /*
499  * The buffer's backing address_space's private_lock must be held
500  */
501 static void __remove_assoc_queue(struct buffer_head *bh)
502 {
503         list_del_init(&bh->b_assoc_buffers);
504         WARN_ON(!bh->b_assoc_map);
505         if (buffer_write_io_error(bh))
506                 set_bit(AS_EIO, &bh->b_assoc_map->flags);
507         bh->b_assoc_map = NULL;
508 }
509
510 int inode_has_buffers(struct inode *inode)
511 {
512         return !list_empty(&inode->i_data.private_list);
513 }
514
515 /*
516  * osync is designed to support O_SYNC io.  It waits synchronously for
517  * all already-submitted IO to complete, but does not queue any new
518  * writes to the disk.
519  *
520  * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
521  * you dirty the buffers, and then use osync_inode_buffers to wait for
522  * completion.  Any other dirty buffers which are not yet queued for
523  * write will not be flushed to disk by the osync.
524  */
525 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
526 {
527         struct buffer_head *bh;
528         struct list_head *p;
529         int err = 0;
530
531         spin_lock(lock);
532 repeat:
533         list_for_each_prev(p, list) {
534                 bh = BH_ENTRY(p);
535                 if (buffer_locked(bh)) {
536                         get_bh(bh);
537                         spin_unlock(lock);
538                         wait_on_buffer(bh);
539                         if (!buffer_uptodate(bh))
540                                 err = -EIO;
541                         brelse(bh);
542                         spin_lock(lock);
543                         goto repeat;
544                 }
545         }
546         spin_unlock(lock);
547         return err;
548 }
549
550 /**
551  * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
552  * @mapping: the mapping which wants those buffers written
553  *
554  * Starts I/O against the buffers at mapping->private_list, and waits upon
555  * that I/O.
556  *
557  * Basically, this is a convenience function for fsync().
558  * @mapping is a file or directory which needs those buffers to be written for
559  * a successful fsync().
560  */
561 int sync_mapping_buffers(struct address_space *mapping)
562 {
563         struct address_space *buffer_mapping = mapping->assoc_mapping;
564
565         if (buffer_mapping == NULL || list_empty(&mapping->private_list))
566                 return 0;
567
568         return fsync_buffers_list(&buffer_mapping->private_lock,
569                                         &mapping->private_list);
570 }
571 EXPORT_SYMBOL(sync_mapping_buffers);
572
573 /*
574  * Called when we've recently written block `bblock', and it is known that
575  * `bblock' was for a buffer_boundary() buffer.  This means that the block at
576  * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
577  * dirty, schedule it for IO.  So that indirects merge nicely with their data.
578  */
579 void write_boundary_block(struct block_device *bdev,
580                         sector_t bblock, unsigned blocksize)
581 {
582         struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
583         if (bh) {
584                 if (buffer_dirty(bh))
585                         ll_rw_block(WRITE, 1, &bh);
586                 put_bh(bh);
587         }
588 }
589
590 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
591 {
592         struct address_space *mapping = inode->i_mapping;
593         struct address_space *buffer_mapping = bh->b_page->mapping;
594
595         mark_buffer_dirty(bh);
596         if (!mapping->assoc_mapping) {
597                 mapping->assoc_mapping = buffer_mapping;
598         } else {
599                 BUG_ON(mapping->assoc_mapping != buffer_mapping);
600         }
601         if (!bh->b_assoc_map) {
602                 spin_lock(&buffer_mapping->private_lock);
603                 list_move_tail(&bh->b_assoc_buffers,
604                                 &mapping->private_list);
605                 bh->b_assoc_map = mapping;
606                 spin_unlock(&buffer_mapping->private_lock);
607         }
608 }
609 EXPORT_SYMBOL(mark_buffer_dirty_inode);
610
611 /*
612  * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
613  * dirty.
614  *
615  * If warn is true, then emit a warning if the page is not uptodate and has
616  * not been truncated.
617  */
618 static void __set_page_dirty(struct page *page,
619                 struct address_space *mapping, int warn)
620 {
621         spin_lock_irq(&mapping->tree_lock);
622         if (page->mapping) {    /* Race with truncate? */
623                 WARN_ON_ONCE(warn && !PageUptodate(page));
624                 account_page_dirtied(page, mapping);
625                 radix_tree_tag_set(&mapping->page_tree,
626                                 page_index(page), PAGECACHE_TAG_DIRTY);
627         }
628         spin_unlock_irq(&mapping->tree_lock);
629         __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
630 }
631
632 /*
633  * Add a page to the dirty page list.
634  *
635  * It is a sad fact of life that this function is called from several places
636  * deeply under spinlocking.  It may not sleep.
637  *
638  * If the page has buffers, the uptodate buffers are set dirty, to preserve
639  * dirty-state coherency between the page and the buffers.  It the page does
640  * not have buffers then when they are later attached they will all be set
641  * dirty.
642  *
643  * The buffers are dirtied before the page is dirtied.  There's a small race
644  * window in which a writepage caller may see the page cleanness but not the
645  * buffer dirtiness.  That's fine.  If this code were to set the page dirty
646  * before the buffers, a concurrent writepage caller could clear the page dirty
647  * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
648  * page on the dirty page list.
649  *
650  * We use private_lock to lock against try_to_free_buffers while using the
651  * page's buffer list.  Also use this to protect against clean buffers being
652  * added to the page after it was set dirty.
653  *
654  * FIXME: may need to call ->reservepage here as well.  That's rather up to the
655  * address_space though.
656  */
657 int __set_page_dirty_buffers(struct page *page)
658 {
659         int newly_dirty;
660         struct address_space *mapping = page_mapping(page);
661
662         if (unlikely(!mapping))
663                 return !TestSetPageDirty(page);
664
665         spin_lock(&mapping->private_lock);
666         if (page_has_buffers(page)) {
667                 struct buffer_head *head = page_buffers(page);
668                 struct buffer_head *bh = head;
669
670                 do {
671                         set_buffer_dirty(bh);
672                         bh = bh->b_this_page;
673                 } while (bh != head);
674         }
675         newly_dirty = !TestSetPageDirty(page);
676         spin_unlock(&mapping->private_lock);
677
678         if (newly_dirty)
679                 __set_page_dirty(page, mapping, 1);
680         return newly_dirty;
681 }
682 EXPORT_SYMBOL(__set_page_dirty_buffers);
683
684 /*
685  * Write out and wait upon a list of buffers.
686  *
687  * We have conflicting pressures: we want to make sure that all
688  * initially dirty buffers get waited on, but that any subsequently
689  * dirtied buffers don't.  After all, we don't want fsync to last
690  * forever if somebody is actively writing to the file.
691  *
692  * Do this in two main stages: first we copy dirty buffers to a
693  * temporary inode list, queueing the writes as we go.  Then we clean
694  * up, waiting for those writes to complete.
695  * 
696  * During this second stage, any subsequent updates to the file may end
697  * up refiling the buffer on the original inode's dirty list again, so
698  * there is a chance we will end up with a buffer queued for write but
699  * not yet completed on that list.  So, as a final cleanup we go through
700  * the osync code to catch these locked, dirty buffers without requeuing
701  * any newly dirty buffers for write.
702  */
703 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
704 {
705         struct buffer_head *bh;
706         struct list_head tmp;
707         struct address_space *mapping;
708         int err = 0, err2;
709
710         INIT_LIST_HEAD(&tmp);
711
712         spin_lock(lock);
713         while (!list_empty(list)) {
714                 bh = BH_ENTRY(list->next);
715                 mapping = bh->b_assoc_map;
716                 __remove_assoc_queue(bh);
717                 /* Avoid race with mark_buffer_dirty_inode() which does
718                  * a lockless check and we rely on seeing the dirty bit */
719                 smp_mb();
720                 if (buffer_dirty(bh) || buffer_locked(bh)) {
721                         list_add(&bh->b_assoc_buffers, &tmp);
722                         bh->b_assoc_map = mapping;
723                         if (buffer_dirty(bh)) {
724                                 get_bh(bh);
725                                 spin_unlock(lock);
726                                 /*
727                                  * Ensure any pending I/O completes so that
728                                  * ll_rw_block() actually writes the current
729                                  * contents - it is a noop if I/O is still in
730                                  * flight on potentially older contents.
731                                  */
732                                 ll_rw_block(SWRITE_SYNC, 1, &bh);
733                                 brelse(bh);
734                                 spin_lock(lock);
735                         }
736                 }
737         }
738
739         while (!list_empty(&tmp)) {
740                 bh = BH_ENTRY(tmp.prev);
741                 get_bh(bh);
742                 mapping = bh->b_assoc_map;
743                 __remove_assoc_queue(bh);
744                 /* Avoid race with mark_buffer_dirty_inode() which does
745                  * a lockless check and we rely on seeing the dirty bit */
746                 smp_mb();
747                 if (buffer_dirty(bh)) {
748                         list_add(&bh->b_assoc_buffers,
749                                  &mapping->private_list);
750                         bh->b_assoc_map = mapping;
751                 }
752                 spin_unlock(lock);
753                 wait_on_buffer(bh);
754                 if (!buffer_uptodate(bh))
755                         err = -EIO;
756                 brelse(bh);
757                 spin_lock(lock);
758         }
759         
760         spin_unlock(lock);
761         err2 = osync_buffers_list(lock, list);
762         if (err)
763                 return err;
764         else
765                 return err2;
766 }
767
768 /*
769  * Invalidate any and all dirty buffers on a given inode.  We are
770  * probably unmounting the fs, but that doesn't mean we have already
771  * done a sync().  Just drop the buffers from the inode list.
772  *
773  * NOTE: we take the inode's blockdev's mapping's private_lock.  Which
774  * assumes that all the buffers are against the blockdev.  Not true
775  * for reiserfs.
776  */
777 void invalidate_inode_buffers(struct inode *inode)
778 {
779         if (inode_has_buffers(inode)) {
780                 struct address_space *mapping = &inode->i_data;
781                 struct list_head *list = &mapping->private_list;
782                 struct address_space *buffer_mapping = mapping->assoc_mapping;
783
784                 spin_lock(&buffer_mapping->private_lock);
785                 while (!list_empty(list))
786                         __remove_assoc_queue(BH_ENTRY(list->next));
787                 spin_unlock(&buffer_mapping->private_lock);
788         }
789 }
790 EXPORT_SYMBOL(invalidate_inode_buffers);
791
792 /*
793  * Remove any clean buffers from the inode's buffer list.  This is called
794  * when we're trying to free the inode itself.  Those buffers can pin it.
795  *
796  * Returns true if all buffers were removed.
797  */
798 int remove_inode_buffers(struct inode *inode)
799 {
800         int ret = 1;
801
802         if (inode_has_buffers(inode)) {
803                 struct address_space *mapping = &inode->i_data;
804                 struct list_head *list = &mapping->private_list;
805                 struct address_space *buffer_mapping = mapping->assoc_mapping;
806
807                 spin_lock(&buffer_mapping->private_lock);
808                 while (!list_empty(list)) {
809                         struct buffer_head *bh = BH_ENTRY(list->next);
810                         if (buffer_dirty(bh)) {
811                                 ret = 0;
812                                 break;
813                         }
814                         __remove_assoc_queue(bh);
815                 }
816                 spin_unlock(&buffer_mapping->private_lock);
817         }
818         return ret;
819 }
820
821 /*
822  * Create the appropriate buffers when given a page for data area and
823  * the size of each buffer.. Use the bh->b_this_page linked list to
824  * follow the buffers created.  Return NULL if unable to create more
825  * buffers.
826  *
827  * The retry flag is used to differentiate async IO (paging, swapping)
828  * which may not fail from ordinary buffer allocations.
829  */
830 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
831                 int retry)
832 {
833         struct buffer_head *bh, *head;
834         long offset;
835
836 try_again:
837         head = NULL;
838         offset = PAGE_SIZE;
839         while ((offset -= size) >= 0) {
840                 bh = alloc_buffer_head(GFP_NOFS);
841                 if (!bh)
842                         goto no_grow;
843
844                 bh->b_bdev = NULL;
845                 bh->b_this_page = head;
846                 bh->b_blocknr = -1;
847                 head = bh;
848
849                 bh->b_state = 0;
850                 atomic_set(&bh->b_count, 0);
851                 bh->b_private = NULL;
852                 bh->b_size = size;
853
854                 /* Link the buffer to its page */
855                 set_bh_page(bh, page, offset);
856
857                 init_buffer(bh, NULL, NULL);
858         }
859         return head;
860 /*
861  * In case anything failed, we just free everything we got.
862  */
863 no_grow:
864         if (head) {
865                 do {
866                         bh = head;
867                         head = head->b_this_page;
868                         free_buffer_head(bh);
869                 } while (head);
870         }
871
872         /*
873          * Return failure for non-async IO requests.  Async IO requests
874          * are not allowed to fail, so we have to wait until buffer heads
875          * become available.  But we don't want tasks sleeping with 
876          * partially complete buffers, so all were released above.
877          */
878         if (!retry)
879                 return NULL;
880
881         /* We're _really_ low on memory. Now we just
882          * wait for old buffer heads to become free due to
883          * finishing IO.  Since this is an async request and
884          * the reserve list is empty, we're sure there are 
885          * async buffer heads in use.
886          */
887         free_more_memory();
888         goto try_again;
889 }
890 EXPORT_SYMBOL_GPL(alloc_page_buffers);
891
892 static inline void
893 link_dev_buffers(struct page *page, struct buffer_head *head)
894 {
895         struct buffer_head *bh, *tail;
896
897         bh = head;
898         do {
899                 tail = bh;
900                 bh = bh->b_this_page;
901         } while (bh);
902         tail->b_this_page = head;
903         attach_page_buffers(page, head);
904 }
905
906 /*
907  * Initialise the state of a blockdev page's buffers.
908  */ 
909 static void
910 init_page_buffers(struct page *page, struct block_device *bdev,
911                         sector_t block, int size)
912 {
913         struct buffer_head *head = page_buffers(page);
914         struct buffer_head *bh = head;
915         int uptodate = PageUptodate(page);
916
917         do {
918                 if (!buffer_mapped(bh)) {
919                         init_buffer(bh, NULL, NULL);
920                         bh->b_bdev = bdev;
921                         bh->b_blocknr = block;
922                         if (uptodate)
923                                 set_buffer_uptodate(bh);
924                         set_buffer_mapped(bh);
925                 }
926                 block++;
927                 bh = bh->b_this_page;
928         } while (bh != head);
929 }
930
931 /*
932  * Create the page-cache page that contains the requested block.
933  *
934  * This is user purely for blockdev mappings.
935  */
936 static struct page *
937 grow_dev_page(struct block_device *bdev, sector_t block,
938                 pgoff_t index, int size)
939 {
940         struct inode *inode = bdev->bd_inode;
941         struct page *page;
942         struct buffer_head *bh;
943
944         page = find_or_create_page(inode->i_mapping, index,
945                 (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
946         if (!page)
947                 return NULL;
948
949         BUG_ON(!PageLocked(page));
950
951         if (page_has_buffers(page)) {
952                 bh = page_buffers(page);
953                 if (bh->b_size == size) {
954                         init_page_buffers(page, bdev, block, size);
955                         return page;
956                 }
957                 if (!try_to_free_buffers(page))
958                         goto failed;
959         }
960
961         /*
962          * Allocate some buffers for this page
963          */
964         bh = alloc_page_buffers(page, size, 0);
965         if (!bh)
966                 goto failed;
967
968         /*
969          * Link the page to the buffers and initialise them.  Take the
970          * lock to be atomic wrt __find_get_block(), which does not
971          * run under the page lock.
972          */
973         spin_lock(&inode->i_mapping->private_lock);
974         link_dev_buffers(page, bh);
975         init_page_buffers(page, bdev, block, size);
976         spin_unlock(&inode->i_mapping->private_lock);
977         return page;
978
979 failed:
980         BUG();
981         unlock_page(page);
982         page_cache_release(page);
983         return NULL;
984 }
985
986 /*
987  * Create buffers for the specified block device block's page.  If
988  * that page was dirty, the buffers are set dirty also.
989  */
990 static int
991 grow_buffers(struct block_device *bdev, sector_t block, int size)
992 {
993         struct page *page;
994         pgoff_t index;
995         int sizebits;
996
997         sizebits = -1;
998         do {
999                 sizebits++;
1000         } while ((size << sizebits) < PAGE_SIZE);
1001
1002         index = block >> sizebits;
1003
1004         /*
1005          * Check for a block which wants to lie outside our maximum possible
1006          * pagecache index.  (this comparison is done using sector_t types).
1007          */
1008         if (unlikely(index != block >> sizebits)) {
1009                 char b[BDEVNAME_SIZE];
1010
1011                 printk(KERN_ERR "%s: requested out-of-range block %llu for "
1012                         "device %s\n",
1013                         __func__, (unsigned long long)block,
1014                         bdevname(bdev, b));
1015                 return -EIO;
1016         }
1017         block = index << sizebits;
1018         /* Create a page with the proper size buffers.. */
1019         page = grow_dev_page(bdev, block, index, size);
1020         if (!page)
1021                 return 0;
1022         unlock_page(page);
1023         page_cache_release(page);
1024         return 1;
1025 }
1026
1027 static struct buffer_head *
1028 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1029 {
1030         /* Size must be multiple of hard sectorsize */
1031         if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
1032                         (size < 512 || size > PAGE_SIZE))) {
1033                 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1034                                         size);
1035                 printk(KERN_ERR "hardsect size: %d\n",
1036                                         bdev_hardsect_size(bdev));
1037
1038                 dump_stack();
1039                 return NULL;
1040         }
1041
1042         for (;;) {
1043                 struct buffer_head * bh;
1044                 int ret;
1045
1046                 bh = __find_get_block(bdev, block, size);
1047                 if (bh)
1048                         return bh;
1049
1050                 ret = grow_buffers(bdev, block, size);
1051                 if (ret < 0)
1052                         return NULL;
1053                 if (ret == 0)
1054                         free_more_memory();
1055         }
1056 }
1057
1058 /*
1059  * The relationship between dirty buffers and dirty pages:
1060  *
1061  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1062  * the page is tagged dirty in its radix tree.
1063  *
1064  * At all times, the dirtiness of the buffers represents the dirtiness of
1065  * subsections of the page.  If the page has buffers, the page dirty bit is
1066  * merely a hint about the true dirty state.
1067  *
1068  * When a page is set dirty in its entirety, all its buffers are marked dirty
1069  * (if the page has buffers).
1070  *
1071  * When a buffer is marked dirty, its page is dirtied, but the page's other
1072  * buffers are not.
1073  *
1074  * Also.  When blockdev buffers are explicitly read with bread(), they
1075  * individually become uptodate.  But their backing page remains not
1076  * uptodate - even if all of its buffers are uptodate.  A subsequent
1077  * block_read_full_page() against that page will discover all the uptodate
1078  * buffers, will set the page uptodate and will perform no I/O.
1079  */
1080
1081 /**
1082  * mark_buffer_dirty - mark a buffer_head as needing writeout
1083  * @bh: the buffer_head to mark dirty
1084  *
1085  * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1086  * backing page dirty, then tag the page as dirty in its address_space's radix
1087  * tree and then attach the address_space's inode to its superblock's dirty
1088  * inode list.
1089  *
1090  * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
1091  * mapping->tree_lock and the global inode_lock.
1092  */
1093 void mark_buffer_dirty(struct buffer_head *bh)
1094 {
1095         WARN_ON_ONCE(!buffer_uptodate(bh));
1096
1097         /*
1098          * Very *carefully* optimize the it-is-already-dirty case.
1099          *
1100          * Don't let the final "is it dirty" escape to before we
1101          * perhaps modified the buffer.
1102          */
1103         if (buffer_dirty(bh)) {
1104                 smp_mb();
1105                 if (buffer_dirty(bh))
1106                         return;
1107         }
1108
1109         if (!test_set_buffer_dirty(bh)) {
1110                 struct page *page = bh->b_page;
1111                 if (!TestSetPageDirty(page))
1112                         __set_page_dirty(page, page_mapping(page), 0);
1113         }
1114 }
1115
1116 /*
1117  * Decrement a buffer_head's reference count.  If all buffers against a page
1118  * have zero reference count, are clean and unlocked, and if the page is clean
1119  * and unlocked then try_to_free_buffers() may strip the buffers from the page
1120  * in preparation for freeing it (sometimes, rarely, buffers are removed from
1121  * a page but it ends up not being freed, and buffers may later be reattached).
1122  */
1123 void __brelse(struct buffer_head * buf)
1124 {
1125         if (atomic_read(&buf->b_count)) {
1126                 put_bh(buf);
1127                 return;
1128         }
1129         WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1130 }
1131
1132 /*
1133  * bforget() is like brelse(), except it discards any
1134  * potentially dirty data.
1135  */
1136 void __bforget(struct buffer_head *bh)
1137 {
1138         clear_buffer_dirty(bh);
1139         if (bh->b_assoc_map) {
1140                 struct address_space *buffer_mapping = bh->b_page->mapping;
1141
1142                 spin_lock(&buffer_mapping->private_lock);
1143                 list_del_init(&bh->b_assoc_buffers);
1144                 bh->b_assoc_map = NULL;
1145                 spin_unlock(&buffer_mapping->private_lock);
1146         }
1147         __brelse(bh);
1148 }
1149
1150 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1151 {
1152         lock_buffer(bh);
1153         if (buffer_uptodate(bh)) {
1154                 unlock_buffer(bh);
1155                 return bh;
1156         } else {
1157                 get_bh(bh);
1158                 bh->b_end_io = end_buffer_read_sync;
1159                 submit_bh(READ, bh);
1160                 wait_on_buffer(bh);
1161                 if (buffer_uptodate(bh))
1162                         return bh;
1163         }
1164         brelse(bh);
1165         return NULL;
1166 }
1167
1168 /*
1169  * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
1170  * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
1171  * refcount elevated by one when they're in an LRU.  A buffer can only appear
1172  * once in a particular CPU's LRU.  A single buffer can be present in multiple
1173  * CPU's LRUs at the same time.
1174  *
1175  * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1176  * sb_find_get_block().
1177  *
1178  * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
1179  * a local interrupt disable for that.
1180  */
1181
1182 #define BH_LRU_SIZE     8
1183
1184 struct bh_lru {
1185         struct buffer_head *bhs[BH_LRU_SIZE];
1186 };
1187
1188 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1189
1190 #ifdef CONFIG_SMP
1191 #define bh_lru_lock()   local_irq_disable()
1192 #define bh_lru_unlock() local_irq_enable()
1193 #else
1194 #define bh_lru_lock()   preempt_disable()
1195 #define bh_lru_unlock() preempt_enable()
1196 #endif
1197
1198 static inline void check_irqs_on(void)
1199 {
1200 #ifdef irqs_disabled
1201         BUG_ON(irqs_disabled());
1202 #endif
1203 }
1204
1205 /*
1206  * The LRU management algorithm is dopey-but-simple.  Sorry.
1207  */
1208 static void bh_lru_install(struct buffer_head *bh)
1209 {
1210         struct buffer_head *evictee = NULL;
1211         struct bh_lru *lru;
1212
1213         check_irqs_on();
1214         bh_lru_lock();
1215         lru = &__get_cpu_var(bh_lrus);
1216         if (lru->bhs[0] != bh) {
1217                 struct buffer_head *bhs[BH_LRU_SIZE];
1218                 int in;
1219                 int out = 0;
1220
1221                 get_bh(bh);
1222                 bhs[out++] = bh;
1223                 for (in = 0; in < BH_LRU_SIZE; in++) {
1224                         struct buffer_head *bh2 = lru->bhs[in];
1225
1226                         if (bh2 == bh) {
1227                                 __brelse(bh2);
1228                         } else {
1229                                 if (out >= BH_LRU_SIZE) {
1230                                         BUG_ON(evictee != NULL);
1231                                         evictee = bh2;
1232                                 } else {
1233                                         bhs[out++] = bh2;
1234                                 }
1235                         }
1236                 }
1237                 while (out < BH_LRU_SIZE)
1238                         bhs[out++] = NULL;
1239                 memcpy(lru->bhs, bhs, sizeof(bhs));
1240         }
1241         bh_lru_unlock();
1242
1243         if (evictee)
1244                 __brelse(evictee);
1245 }
1246
1247 /*
1248  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
1249  */
1250 static struct buffer_head *
1251 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1252 {
1253         struct buffer_head *ret = NULL;
1254         struct bh_lru *lru;
1255         unsigned int i;
1256
1257         check_irqs_on();
1258         bh_lru_lock();
1259         lru = &__get_cpu_var(bh_lrus);
1260         for (i = 0; i < BH_LRU_SIZE; i++) {
1261                 struct buffer_head *bh = lru->bhs[i];
1262
1263                 if (bh && bh->b_bdev == bdev &&
1264                                 bh->b_blocknr == block && bh->b_size == size) {
1265                         if (i) {
1266                                 while (i) {
1267                                         lru->bhs[i] = lru->bhs[i - 1];
1268                                         i--;
1269                                 }
1270                                 lru->bhs[0] = bh;
1271                         }
1272                         get_bh(bh);
1273                         ret = bh;
1274                         break;
1275                 }
1276         }
1277         bh_lru_unlock();
1278         return ret;
1279 }
1280
1281 /*
1282  * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
1283  * it in the LRU and mark it as accessed.  If it is not present then return
1284  * NULL
1285  */
1286 struct buffer_head *
1287 __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1288 {
1289         struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1290
1291         if (bh == NULL) {
1292                 bh = __find_get_block_slow(bdev, block);
1293                 if (bh)
1294                         bh_lru_install(bh);
1295         }
1296         if (bh)
1297                 touch_buffer(bh);
1298         return bh;
1299 }
1300 EXPORT_SYMBOL(__find_get_block);
1301
1302 /*
1303  * __getblk will locate (and, if necessary, create) the buffer_head
1304  * which corresponds to the passed block_device, block and size. The
1305  * returned buffer has its reference count incremented.
1306  *
1307  * __getblk() cannot fail - it just keeps trying.  If you pass it an
1308  * illegal block number, __getblk() will happily return a buffer_head
1309  * which represents the non-existent block.  Very weird.
1310  *
1311  * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1312  * attempt is failing.  FIXME, perhaps?
1313  */
1314 struct buffer_head *
1315 __getblk(struct block_device *bdev, sector_t block, unsigned size)
1316 {
1317         struct buffer_head *bh = __find_get_block(bdev, block, size);
1318
1319         might_sleep();
1320         if (bh == NULL)
1321                 bh = __getblk_slow(bdev, block, size);
1322         return bh;
1323 }
1324 EXPORT_SYMBOL(__getblk);
1325
1326 /*
1327  * Do async read-ahead on a buffer..
1328  */
1329 void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1330 {
1331         struct buffer_head *bh = __getblk(bdev, block, size);
1332         if (likely(bh)) {
1333                 ll_rw_block(READA, 1, &bh);
1334                 brelse(bh);
1335         }
1336 }
1337 EXPORT_SYMBOL(__breadahead);
1338
1339 /**
1340  *  __bread() - reads a specified block and returns the bh
1341  *  @bdev: the block_device to read from
1342  *  @block: number of block
1343  *  @size: size (in bytes) to read
1344  * 
1345  *  Reads a specified block, and returns buffer head that contains it.
1346  *  It returns NULL if the block was unreadable.
1347  */
1348 struct buffer_head *
1349 __bread(struct block_device *bdev, sector_t block, unsigned size)
1350 {
1351         struct buffer_head *bh = __getblk(bdev, block, size);
1352
1353         if (likely(bh) && !buffer_uptodate(bh))
1354                 bh = __bread_slow(bh);
1355         return bh;
1356 }
1357 EXPORT_SYMBOL(__bread);
1358
1359 /*
1360  * invalidate_bh_lrus() is called rarely - but not only at unmount.
1361  * This doesn't race because it runs in each cpu either in irq
1362  * or with preempt disabled.
1363  */
1364 static void invalidate_bh_lru(void *arg)
1365 {
1366         struct bh_lru *b = &get_cpu_var(bh_lrus);
1367         int i;
1368
1369         for (i = 0; i < BH_LRU_SIZE; i++) {
1370                 brelse(b->bhs[i]);
1371                 b->bhs[i] = NULL;
1372         }
1373         put_cpu_var(bh_lrus);
1374 }
1375         
1376 void invalidate_bh_lrus(void)
1377 {
1378         on_each_cpu(invalidate_bh_lru, NULL, 1);
1379 }
1380 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1381
1382 void set_bh_page(struct buffer_head *bh,
1383                 struct page *page, unsigned long offset)
1384 {
1385         bh->b_page = page;
1386         BUG_ON(offset >= PAGE_SIZE);
1387         if (PageHighMem(page))
1388                 /*
1389                  * This catches illegal uses and preserves the offset:
1390                  */
1391                 bh->b_data = (char *)(0 + offset);
1392         else
1393                 bh->b_data = page_address(page) + offset;
1394 }
1395 EXPORT_SYMBOL(set_bh_page);
1396
1397 /*
1398  * Called when truncating a buffer on a page completely.
1399  */
1400 static void discard_buffer(struct buffer_head * bh)
1401 {
1402         lock_buffer(bh);
1403         clear_buffer_dirty(bh);
1404         bh->b_bdev = NULL;
1405         clear_buffer_mapped(bh);
1406         clear_buffer_req(bh);
1407         clear_buffer_new(bh);
1408         clear_buffer_delay(bh);
1409         clear_buffer_unwritten(bh);
1410         unlock_buffer(bh);
1411 }
1412
1413 /**
1414  * block_invalidatepage - invalidate part of all of a buffer-backed page
1415  *
1416  * @page: the page which is affected
1417  * @offset: the index of the truncation point
1418  *
1419  * block_invalidatepage() is called when all or part of the page has become
1420  * invalidatedby a truncate operation.
1421  *
1422  * block_invalidatepage() does not have to release all buffers, but it must
1423  * ensure that no dirty buffer is left outside @offset and that no I/O
1424  * is underway against any of the blocks which are outside the truncation
1425  * point.  Because the caller is about to free (and possibly reuse) those
1426  * blocks on-disk.
1427  */
1428 void block_invalidatepage(struct page *page, unsigned long offset)
1429 {
1430         struct buffer_head *head, *bh, *next;
1431         unsigned int curr_off = 0;
1432
1433         BUG_ON(!PageLocked(page));
1434         if (!page_has_buffers(page))
1435                 goto out;
1436
1437         head = page_buffers(page);
1438         bh = head;
1439         do {
1440                 unsigned int next_off = curr_off + bh->b_size;
1441                 next = bh->b_this_page;
1442
1443                 /*
1444                  * is this block fully invalidated?
1445                  */
1446                 if (offset <= curr_off)
1447                         discard_buffer(bh);
1448                 curr_off = next_off;
1449                 bh = next;
1450         } while (bh != head);
1451
1452         /*
1453          * We release buffers only if the entire page is being invalidated.
1454          * The get_block cached value has been unconditionally invalidated,
1455          * so real IO is not possible anymore.
1456          */
1457         if (offset == 0)
1458                 try_to_release_page(page, 0);
1459 out:
1460         return;
1461 }
1462 EXPORT_SYMBOL(block_invalidatepage);
1463
1464 /*
1465  * We attach and possibly dirty the buffers atomically wrt
1466  * __set_page_dirty_buffers() via private_lock.  try_to_free_buffers
1467  * is already excluded via the page lock.
1468  */
1469 void create_empty_buffers(struct page *page,
1470                         unsigned long blocksize, unsigned long b_state)
1471 {
1472         struct buffer_head *bh, *head, *tail;
1473
1474         head = alloc_page_buffers(page, blocksize, 1);
1475         bh = head;
1476         do {
1477                 bh->b_state |= b_state;
1478                 tail = bh;
1479                 bh = bh->b_this_page;
1480         } while (bh);
1481         tail->b_this_page = head;
1482
1483         spin_lock(&page->mapping->private_lock);
1484         if (PageUptodate(page) || PageDirty(page)) {
1485                 bh = head;
1486                 do {
1487                         if (PageDirty(page))
1488                                 set_buffer_dirty(bh);
1489                         if (PageUptodate(page))
1490                                 set_buffer_uptodate(bh);
1491                         bh = bh->b_this_page;
1492                 } while (bh != head);
1493         }
1494         attach_page_buffers(page, head);
1495         spin_unlock(&page->mapping->private_lock);
1496 }
1497 EXPORT_SYMBOL(create_empty_buffers);
1498
1499 /*
1500  * We are taking a block for data and we don't want any output from any
1501  * buffer-cache aliases starting from return from that function and
1502  * until the moment when something will explicitly mark the buffer
1503  * dirty (hopefully that will not happen until we will free that block ;-)
1504  * We don't even need to mark it not-uptodate - nobody can expect
1505  * anything from a newly allocated buffer anyway. We used to used
1506  * unmap_buffer() for such invalidation, but that was wrong. We definitely
1507  * don't want to mark the alias unmapped, for example - it would confuse
1508  * anyone who might pick it with bread() afterwards...
1509  *
1510  * Also..  Note that bforget() doesn't lock the buffer.  So there can
1511  * be writeout I/O going on against recently-freed buffers.  We don't
1512  * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1513  * only if we really need to.  That happens here.
1514  */
1515 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1516 {
1517         struct buffer_head *old_bh;
1518
1519         might_sleep();
1520
1521         old_bh = __find_get_block_slow(bdev, block);
1522         if (old_bh) {
1523                 clear_buffer_dirty(old_bh);
1524                 wait_on_buffer(old_bh);
1525                 clear_buffer_req(old_bh);
1526                 __brelse(old_bh);
1527         }
1528 }
1529 EXPORT_SYMBOL(unmap_underlying_metadata);
1530
1531 /*
1532  * NOTE! All mapped/uptodate combinations are valid:
1533  *
1534  *      Mapped  Uptodate        Meaning
1535  *
1536  *      No      No              "unknown" - must do get_block()
1537  *      No      Yes             "hole" - zero-filled
1538  *      Yes     No              "allocated" - allocated on disk, not read in
1539  *      Yes     Yes             "valid" - allocated and up-to-date in memory.
1540  *
1541  * "Dirty" is valid only with the last case (mapped+uptodate).
1542  */
1543
1544 /*
1545  * While block_write_full_page is writing back the dirty buffers under
1546  * the page lock, whoever dirtied the buffers may decide to clean them
1547  * again at any time.  We handle that by only looking at the buffer
1548  * state inside lock_buffer().
1549  *
1550  * If block_write_full_page() is called for regular writeback
1551  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1552  * locked buffer.   This only can happen if someone has written the buffer
1553  * directly, with submit_bh().  At the address_space level PageWriteback
1554  * prevents this contention from occurring.
1555  */
1556 static int __block_write_full_page(struct inode *inode, struct page *page,
1557                         get_block_t *get_block, struct writeback_control *wbc)
1558 {
1559         int err;
1560         sector_t block;
1561         sector_t last_block;
1562         struct buffer_head *bh, *head;
1563         const unsigned blocksize = 1 << inode->i_blkbits;
1564         int nr_underway = 0;
1565
1566         BUG_ON(!PageLocked(page));
1567
1568         last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1569
1570         if (!page_has_buffers(page)) {
1571                 create_empty_buffers(page, blocksize,
1572                                         (1 << BH_Dirty)|(1 << BH_Uptodate));
1573         }
1574
1575         /*
1576          * Be very careful.  We have no exclusion from __set_page_dirty_buffers
1577          * here, and the (potentially unmapped) buffers may become dirty at
1578          * any time.  If a buffer becomes dirty here after we've inspected it
1579          * then we just miss that fact, and the page stays dirty.
1580          *
1581          * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1582          * handle that here by just cleaning them.
1583          */
1584
1585         block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1586         head = page_buffers(page);
1587         bh = head;
1588
1589         /*
1590          * Get all the dirty buffers mapped to disk addresses and
1591          * handle any aliases from the underlying blockdev's mapping.
1592          */
1593         do {
1594                 if (block > last_block) {
1595                         /*
1596                          * mapped buffers outside i_size will occur, because
1597                          * this page can be outside i_size when there is a
1598                          * truncate in progress.
1599                          */
1600                         /*
1601                          * The buffer was zeroed by block_write_full_page()
1602                          */
1603                         clear_buffer_dirty(bh);
1604                         set_buffer_uptodate(bh);
1605                 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1606                            buffer_dirty(bh)) {
1607                         WARN_ON(bh->b_size != blocksize);
1608                         err = get_block(inode, block, bh, 1);
1609                         if (err)
1610                                 goto recover;
1611                         clear_buffer_delay(bh);
1612                         if (buffer_new(bh)) {
1613                                 /* blockdev mappings never come here */
1614                                 clear_buffer_new(bh);
1615                                 unmap_underlying_metadata(bh->b_bdev,
1616                                                         bh->b_blocknr);
1617                         }
1618                 }
1619                 bh = bh->b_this_page;
1620                 block++;
1621         } while (bh != head);
1622
1623         do {
1624                 if (!buffer_mapped(bh))
1625                         continue;
1626                 /*
1627                  * If it's a fully non-blocking write attempt and we cannot
1628                  * lock the buffer then redirty the page.  Note that this can
1629                  * potentially cause a busy-wait loop from pdflush and kswapd
1630                  * activity, but those code paths have their own higher-level
1631                  * throttling.
1632                  */
1633                 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1634                         lock_buffer(bh);
1635                 } else if (!trylock_buffer(bh)) {
1636                         redirty_page_for_writepage(wbc, page);
1637                         continue;
1638                 }
1639                 if (test_clear_buffer_dirty(bh)) {
1640                         mark_buffer_async_write(bh);
1641                 } else {
1642                         unlock_buffer(bh);
1643                 }
1644         } while ((bh = bh->b_this_page) != head);
1645
1646         /*
1647          * The page and its buffers are protected by PageWriteback(), so we can
1648          * drop the bh refcounts early.
1649          */
1650         BUG_ON(PageWriteback(page));
1651         set_page_writeback(page);
1652
1653         do {
1654                 struct buffer_head *next = bh->b_this_page;
1655                 if (buffer_async_write(bh)) {
1656                         submit_bh(WRITE, bh);
1657                         nr_underway++;
1658                 }
1659                 bh = next;
1660         } while (bh != head);
1661         unlock_page(page);
1662
1663         err = 0;
1664 done:
1665         if (nr_underway == 0) {
1666                 /*
1667                  * The page was marked dirty, but the buffers were
1668                  * clean.  Someone wrote them back by hand with
1669                  * ll_rw_block/submit_bh.  A rare case.
1670                  */
1671                 end_page_writeback(page);
1672
1673                 /*
1674                  * The page and buffer_heads can be released at any time from
1675                  * here on.
1676                  */
1677         }
1678         return err;
1679
1680 recover:
1681         /*
1682          * ENOSPC, or some other error.  We may already have added some
1683          * blocks to the file, so we need to write these out to avoid
1684          * exposing stale data.
1685          * The page is currently locked and not marked for writeback
1686          */
1687         bh = head;
1688         /* Recovery: lock and submit the mapped buffers */
1689         do {
1690                 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1691                     !buffer_delay(bh)) {
1692                         lock_buffer(bh);
1693                         mark_buffer_async_write(bh);
1694                 } else {
1695                         /*
1696                          * The buffer may have been set dirty during
1697                          * attachment to a dirty page.
1698                          */
1699                         clear_buffer_dirty(bh);
1700                 }
1701         } while ((bh = bh->b_this_page) != head);
1702         SetPageError(page);
1703         BUG_ON(PageWriteback(page));
1704         mapping_set_error(page->mapping, err);
1705         set_page_writeback(page);
1706         do {
1707                 struct buffer_head *next = bh->b_this_page;
1708                 if (buffer_async_write(bh)) {
1709                         clear_buffer_dirty(bh);
1710                         submit_bh(WRITE, bh);
1711                         nr_underway++;
1712                 }
1713                 bh = next;
1714         } while (bh != head);
1715         unlock_page(page);
1716         goto done;
1717 }
1718
1719 /*
1720  * If a page has any new buffers, zero them out here, and mark them uptodate
1721  * and dirty so they'll be written out (in order to prevent uninitialised
1722  * block data from leaking). And clear the new bit.
1723  */
1724 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1725 {
1726         unsigned int block_start, block_end;
1727         struct buffer_head *head, *bh;
1728
1729         BUG_ON(!PageLocked(page));
1730         if (!page_has_buffers(page))
1731                 return;
1732
1733         bh = head = page_buffers(page);
1734         block_start = 0;
1735         do {
1736                 block_end = block_start + bh->b_size;
1737
1738                 if (buffer_new(bh)) {
1739                         if (block_end > from && block_start < to) {
1740                                 if (!PageUptodate(page)) {
1741                                         unsigned start, size;
1742
1743                                         start = max(from, block_start);
1744                                         size = min(to, block_end) - start;
1745
1746                                         zero_user(page, start, size);
1747                                         set_buffer_uptodate(bh);
1748                                 }
1749
1750                                 clear_buffer_new(bh);
1751                                 mark_buffer_dirty(bh);
1752                         }
1753                 }
1754
1755                 block_start = block_end;
1756                 bh = bh->b_this_page;
1757         } while (bh != head);
1758 }
1759 EXPORT_SYMBOL(page_zero_new_buffers);
1760
1761 static int __block_prepare_write(struct inode *inode, struct page *page,
1762                 unsigned from, unsigned to, get_block_t *get_block)
1763 {
1764         unsigned block_start, block_end;
1765         sector_t block;
1766         int err = 0;
1767         unsigned blocksize, bbits;
1768         struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1769
1770         BUG_ON(!PageLocked(page));
1771         BUG_ON(from > PAGE_CACHE_SIZE);
1772         BUG_ON(to > PAGE_CACHE_SIZE);
1773         BUG_ON(from > to);
1774
1775         blocksize = 1 << inode->i_blkbits;
1776         if (!page_has_buffers(page))
1777                 create_empty_buffers(page, blocksize, 0);
1778         head = page_buffers(page);
1779
1780         bbits = inode->i_blkbits;
1781         block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1782
1783         for(bh = head, block_start = 0; bh != head || !block_start;
1784             block++, block_start=block_end, bh = bh->b_this_page) {
1785                 block_end = block_start + blocksize;
1786                 if (block_end <= from || block_start >= to) {
1787                         if (PageUptodate(page)) {
1788                                 if (!buffer_uptodate(bh))
1789                                         set_buffer_uptodate(bh);
1790                         }
1791                         continue;
1792                 }
1793                 if (buffer_new(bh))
1794                         clear_buffer_new(bh);
1795                 if (!buffer_mapped(bh)) {
1796                         WARN_ON(bh->b_size != blocksize);
1797                         err = get_block(inode, block, bh, 1);
1798                         if (err)
1799                                 break;
1800                         if (buffer_new(bh)) {
1801                                 unmap_underlying_metadata(bh->b_bdev,
1802                                                         bh->b_blocknr);
1803                                 if (PageUptodate(page)) {
1804                                         clear_buffer_new(bh);
1805                                         set_buffer_uptodate(bh);
1806                                         mark_buffer_dirty(bh);
1807                                         continue;
1808                                 }
1809                                 if (block_end > to || block_start < from)
1810                                         zero_user_segments(page,
1811                                                 to, block_end,
1812                                                 block_start, from);
1813                                 continue;
1814                         }
1815                 }
1816                 if (PageUptodate(page)) {
1817                         if (!buffer_uptodate(bh))
1818                                 set_buffer_uptodate(bh);
1819                         continue; 
1820                 }
1821                 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1822                     !buffer_unwritten(bh) &&
1823                      (block_start < from || block_end > to)) {
1824                         ll_rw_block(READ, 1, &bh);
1825                         *wait_bh++=bh;
1826                 }
1827         }
1828         /*
1829          * If we issued read requests - let them complete.
1830          */
1831         while(wait_bh > wait) {
1832                 wait_on_buffer(*--wait_bh);
1833                 if (!buffer_uptodate(*wait_bh))
1834                         err = -EIO;
1835         }
1836         if (unlikely(err))
1837                 page_zero_new_buffers(page, from, to);
1838         return err;
1839 }
1840
1841 static int __block_commit_write(struct inode *inode, struct page *page,
1842                 unsigned from, unsigned to)
1843 {
1844         unsigned block_start, block_end;
1845         int partial = 0;
1846         unsigned blocksize;
1847         struct buffer_head *bh, *head;
1848
1849         blocksize = 1 << inode->i_blkbits;
1850
1851         for(bh = head = page_buffers(page), block_start = 0;
1852             bh != head || !block_start;
1853             block_start=block_end, bh = bh->b_this_page) {
1854                 block_end = block_start + blocksize;
1855                 if (block_end <= from || block_start >= to) {
1856                         if (!buffer_uptodate(bh))
1857                                 partial = 1;
1858                 } else {
1859                         set_buffer_uptodate(bh);
1860                         mark_buffer_dirty(bh);
1861                 }
1862                 clear_buffer_new(bh);
1863         }
1864
1865         /*
1866          * If this is a partial write which happened to make all buffers
1867          * uptodate then we can optimize away a bogus readpage() for
1868          * the next read(). Here we 'discover' whether the page went
1869          * uptodate as a result of this (potentially partial) write.
1870          */
1871         if (!partial)
1872                 SetPageUptodate(page);
1873         return 0;
1874 }
1875
1876 /*
1877  * block_write_begin takes care of the basic task of block allocation and
1878  * bringing partial write blocks uptodate first.
1879  *
1880  * If *pagep is not NULL, then block_write_begin uses the locked page
1881  * at *pagep rather than allocating its own. In this case, the page will
1882  * not be unlocked or deallocated on failure.
1883  */
1884 int block_write_begin(struct file *file, struct address_space *mapping,
1885                         loff_t pos, unsigned len, unsigned flags,
1886                         struct page **pagep, void **fsdata,
1887                         get_block_t *get_block)
1888 {
1889         struct inode *inode = mapping->host;
1890         int status = 0;
1891         struct page *page;
1892         pgoff_t index;
1893         unsigned start, end;
1894         int ownpage = 0;
1895
1896         index = pos >> PAGE_CACHE_SHIFT;
1897         start = pos & (PAGE_CACHE_SIZE - 1);
1898         end = start + len;
1899
1900         page = *pagep;
1901         if (page == NULL) {
1902                 ownpage = 1;
1903                 page = grab_cache_page_write_begin(mapping, index, flags);
1904                 if (!page) {
1905                         status = -ENOMEM;
1906                         goto out;
1907                 }
1908                 *pagep = page;
1909         } else
1910                 BUG_ON(!PageLocked(page));
1911
1912         status = __block_prepare_write(inode, page, start, end, get_block);
1913         if (unlikely(status)) {
1914                 ClearPageUptodate(page);
1915
1916                 if (ownpage) {
1917                         unlock_page(page);
1918                         page_cache_release(page);
1919                         *pagep = NULL;
1920
1921                         /*
1922                          * prepare_write() may have instantiated a few blocks
1923                          * outside i_size.  Trim these off again. Don't need
1924                          * i_size_read because we hold i_mutex.
1925                          */
1926                         if (pos + len > inode->i_size)
1927                                 vmtruncate(inode, inode->i_size);
1928                 }
1929         }
1930
1931 out:
1932         return status;
1933 }
1934 EXPORT_SYMBOL(block_write_begin);
1935
1936 int block_write_end(struct file *file, struct address_space *mapping,
1937                         loff_t pos, unsigned len, unsigned copied,
1938                         struct page *page, void *fsdata)
1939 {
1940         struct inode *inode = mapping->host;
1941         unsigned start;
1942
1943         start = pos & (PAGE_CACHE_SIZE - 1);
1944
1945         if (unlikely(copied < len)) {
1946                 /*
1947                  * The buffers that were written will now be uptodate, so we
1948                  * don't have to worry about a readpage reading them and
1949                  * overwriting a partial write. However if we have encountered
1950                  * a short write and only partially written into a buffer, it
1951                  * will not be marked uptodate, so a readpage might come in and
1952                  * destroy our partial write.
1953                  *
1954                  * Do the simplest thing, and just treat any short write to a
1955                  * non uptodate page as a zero-length write, and force the
1956                  * caller to redo the whole thing.
1957                  */
1958                 if (!PageUptodate(page))
1959                         copied = 0;
1960
1961                 page_zero_new_buffers(page, start+copied, start+len);
1962         }
1963         flush_dcache_page(page);
1964
1965         /* This could be a short (even 0-length) commit */
1966         __block_commit_write(inode, page, start, start+copied);
1967
1968         return copied;
1969 }
1970 EXPORT_SYMBOL(block_write_end);
1971
1972 int generic_write_end(struct file *file, struct address_space *mapping,
1973                         loff_t pos, unsigned len, unsigned copied,
1974                         struct page *page, void *fsdata)
1975 {
1976         struct inode *inode = mapping->host;
1977         int i_size_changed = 0;
1978
1979         copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
1980
1981         /*
1982          * No need to use i_size_read() here, the i_size
1983          * cannot change under us because we hold i_mutex.
1984          *
1985          * But it's important to update i_size while still holding page lock:
1986          * page writeout could otherwise come in and zero beyond i_size.
1987          */
1988         if (pos+copied > inode->i_size) {
1989                 i_size_write(inode, pos+copied);
1990                 i_size_changed = 1;
1991         }
1992
1993         unlock_page(page);
1994         page_cache_release(page);
1995
1996         /*
1997          * Don't mark the inode dirty under page lock. First, it unnecessarily
1998          * makes the holding time of page lock longer. Second, it forces lock
1999          * ordering of page lock and transaction start for journaling
2000          * filesystems.
2001          */
2002         if (i_size_changed)
2003                 mark_inode_dirty(inode);
2004
2005         return copied;
2006 }
2007 EXPORT_SYMBOL(generic_write_end);
2008
2009 /*
2010  * block_is_partially_uptodate checks whether buffers within a page are
2011  * uptodate or not.
2012  *
2013  * Returns true if all buffers which correspond to a file portion
2014  * we want to read are uptodate.
2015  */
2016 int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
2017                                         unsigned long from)
2018 {
2019         struct inode *inode = page->mapping->host;
2020         unsigned block_start, block_end, blocksize;
2021         unsigned to;
2022         struct buffer_head *bh, *head;
2023         int ret = 1;
2024
2025         if (!page_has_buffers(page))
2026                 return 0;
2027
2028         blocksize = 1 << inode->i_blkbits;
2029         to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
2030         to = from + to;
2031         if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
2032                 return 0;
2033
2034         head = page_buffers(page);
2035         bh = head;
2036         block_start = 0;
2037         do {
2038                 block_end = block_start + blocksize;
2039                 if (block_end > from && block_start < to) {
2040                         if (!buffer_uptodate(bh)) {
2041                                 ret = 0;
2042                                 break;
2043                         }
2044                         if (block_end >= to)
2045                                 break;
2046                 }
2047                 block_start = block_end;
2048                 bh = bh->b_this_page;
2049         } while (bh != head);
2050
2051         return ret;
2052 }
2053 EXPORT_SYMBOL(block_is_partially_uptodate);
2054
2055 /*
2056  * Generic "read page" function for block devices that have the normal
2057  * get_block functionality. This is most of the block device filesystems.
2058  * Reads the page asynchronously --- the unlock_buffer() and
2059  * set/clear_buffer_uptodate() functions propagate buffer state into the
2060  * page struct once IO has completed.
2061  */
2062 int block_read_full_page(struct page *page, get_block_t *get_block)
2063 {
2064         struct inode *inode = page->mapping->host;
2065         sector_t iblock, lblock;
2066         struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2067         unsigned int blocksize;
2068         int nr, i;
2069         int fully_mapped = 1;
2070
2071         BUG_ON(!PageLocked(page));
2072         blocksize = 1 << inode->i_blkbits;
2073         if (!page_has_buffers(page))
2074                 create_empty_buffers(page, blocksize, 0);
2075         head = page_buffers(page);
2076
2077         iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2078         lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2079         bh = head;
2080         nr = 0;
2081         i = 0;
2082
2083         do {
2084                 if (buffer_uptodate(bh))
2085                         continue;
2086
2087                 if (!buffer_mapped(bh)) {
2088                         int err = 0;
2089
2090                         fully_mapped = 0;
2091                         if (iblock < lblock) {
2092                                 WARN_ON(bh->b_size != blocksize);
2093                                 err = get_block(inode, iblock, bh, 0);
2094                                 if (err)
2095                                         SetPageError(page);
2096                         }
2097                         if (!buffer_mapped(bh)) {
2098                                 zero_user(page, i * blocksize, blocksize);
2099                                 if (!err)
2100                                         set_buffer_uptodate(bh);
2101                                 continue;
2102                         }
2103                         /*
2104                          * get_block() might have updated the buffer
2105                          * synchronously
2106                          */
2107                         if (buffer_uptodate(bh))
2108                                 continue;
2109                 }
2110                 arr[nr++] = bh;
2111         } while (i++, iblock++, (bh = bh->b_this_page) != head);
2112
2113         if (fully_mapped)
2114                 SetPageMappedToDisk(page);
2115
2116         if (!nr) {
2117                 /*
2118                  * All buffers are uptodate - we can set the page uptodate
2119                  * as well. But not if get_block() returned an error.
2120                  */
2121                 if (!PageError(page))
2122                         SetPageUptodate(page);
2123                 unlock_page(page);
2124                 return 0;
2125         }
2126
2127         /* Stage two: lock the buffers */
2128         for (i = 0; i < nr; i++) {
2129                 bh = arr[i];
2130                 lock_buffer(bh);
2131                 mark_buffer_async_read(bh);
2132         }
2133
2134         /*
2135          * Stage 3: start the IO.  Check for uptodateness
2136          * inside the buffer lock in case another process reading
2137          * the underlying blockdev brought it uptodate (the sct fix).
2138          */
2139         for (i = 0; i < nr; i++) {
2140                 bh = arr[i];
2141                 if (buffer_uptodate(bh))
2142                         end_buffer_async_read(bh, 1);
2143                 else
2144                         submit_bh(READ, bh);
2145         }
2146         return 0;
2147 }
2148
2149 /* utility function for filesystems that need to do work on expanding
2150  * truncates.  Uses filesystem pagecache writes to allow the filesystem to
2151  * deal with the hole.  
2152  */
2153 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2154 {
2155         struct address_space *mapping = inode->i_mapping;
2156         struct page *page;
2157         void *fsdata;
2158         unsigned long limit;
2159         int err;
2160
2161         err = -EFBIG;
2162         limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
2163         if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2164                 send_sig(SIGXFSZ, current, 0);
2165                 goto out;
2166         }
2167         if (size > inode->i_sb->s_maxbytes)
2168                 goto out;
2169
2170         err = pagecache_write_begin(NULL, mapping, size, 0,
2171                                 AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2172                                 &page, &fsdata);
2173         if (err)
2174                 goto out;
2175
2176         err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2177         BUG_ON(err > 0);
2178
2179 out:
2180         return err;
2181 }
2182
2183 static int cont_expand_zero(struct file *file, struct address_space *mapping,
2184                             loff_t pos, loff_t *bytes)
2185 {
2186         struct inode *inode = mapping->host;
2187         unsigned blocksize = 1 << inode->i_blkbits;
2188         struct page *page;
2189         void *fsdata;
2190         pgoff_t index, curidx;
2191         loff_t curpos;
2192         unsigned zerofrom, offset, len;
2193         int err = 0;
2194
2195         index = pos >> PAGE_CACHE_SHIFT;
2196         offset = pos & ~PAGE_CACHE_MASK;
2197
2198         while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2199                 zerofrom = curpos & ~PAGE_CACHE_MASK;
2200                 if (zerofrom & (blocksize-1)) {
2201                         *bytes |= (blocksize-1);
2202                         (*bytes)++;
2203                 }
2204                 len = PAGE_CACHE_SIZE - zerofrom;
2205
2206                 err = pagecache_write_begin(file, mapping, curpos, len,
2207                                                 AOP_FLAG_UNINTERRUPTIBLE,
2208                                                 &page, &fsdata);
2209                 if (err)
2210                         goto out;
2211                 zero_user(page, zerofrom, len);
2212                 err = pagecache_write_end(file, mapping, curpos, len, len,
2213                                                 page, fsdata);
2214                 if (err < 0)
2215                         goto out;
2216                 BUG_ON(err != len);
2217                 err = 0;
2218
2219                 balance_dirty_pages_ratelimited(mapping);
2220         }
2221
2222         /* page covers the boundary, find the boundary offset */
2223         if (index == curidx) {
2224                 zerofrom = curpos & ~PAGE_CACHE_MASK;
2225                 /* if we will expand the thing last block will be filled */
2226                 if (offset <= zerofrom) {
2227                         goto out;
2228                 }
2229                 if (zerofrom & (blocksize-1)) {
2230                         *bytes |= (blocksize-1);
2231                         (*bytes)++;
2232                 }
2233                 len = offset - zerofrom;
2234
2235                 err = pagecache_write_begin(file, mapping, curpos, len,
2236                                                 AOP_FLAG_UNINTERRUPTIBLE,
2237                                                 &page, &fsdata);
2238                 if (err)
2239                         goto out;
2240                 zero_user(page, zerofrom, len);
2241                 err = pagecache_write_end(file, mapping, curpos, len, len,
2242                                                 page, fsdata);
2243                 if (err < 0)
2244                         goto out;
2245                 BUG_ON(err != len);
2246                 err = 0;
2247         }
2248 out:
2249         return err;
2250 }
2251
2252 /*
2253  * For moronic filesystems that do not allow holes in file.
2254  * We may have to extend the file.
2255  */
2256 int cont_write_begin(struct file *file, struct address_space *mapping,
2257                         loff_t pos, unsigned len, unsigned flags,
2258                         struct page **pagep, void **fsdata,
2259                         get_block_t *get_block, loff_t *bytes)
2260 {
2261         struct inode *inode = mapping->host;
2262         unsigned blocksize = 1 << inode->i_blkbits;
2263         unsigned zerofrom;
2264         int err;
2265
2266         err = cont_expand_zero(file, mapping, pos, bytes);
2267         if (err)
2268                 goto out;
2269
2270         zerofrom = *bytes & ~PAGE_CACHE_MASK;
2271         if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2272                 *bytes |= (blocksize-1);
2273                 (*bytes)++;
2274         }
2275
2276         *pagep = NULL;
2277         err = block_write_begin(file, mapping, pos, len,
2278                                 flags, pagep, fsdata, get_block);
2279 out:
2280         return err;
2281 }
2282
2283 int block_prepare_write(struct page *page, unsigned from, unsigned to,
2284                         get_block_t *get_block)
2285 {
2286         struct inode *inode = page->mapping->host;
2287         int err = __block_prepare_write(inode, page, from, to, get_block);
2288         if (err)
2289                 ClearPageUptodate(page);
2290         return err;
2291 }
2292
2293 int block_commit_write(struct page *page, unsigned from, unsigned to)
2294 {
2295         struct inode *inode = page->mapping->host;
2296         __block_commit_write(inode,page,from,to);
2297         return 0;
2298 }
2299
2300 /*
2301  * block_page_mkwrite() is not allowed to change the file size as it gets
2302  * called from a page fault handler when a page is first dirtied. Hence we must
2303  * be careful to check for EOF conditions here. We set the page up correctly
2304  * for a written page which means we get ENOSPC checking when writing into
2305  * holes and correct delalloc and unwritten extent mapping on filesystems that
2306  * support these features.
2307  *
2308  * We are not allowed to take the i_mutex here so we have to play games to
2309  * protect against truncate races as the page could now be beyond EOF.  Because
2310  * vmtruncate() writes the inode size before removing pages, once we have the
2311  * page lock we can determine safely if the page is beyond EOF. If it is not
2312  * beyond EOF, then the page is guaranteed safe against truncation until we
2313  * unlock the page.
2314  */
2315 int
2316 block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2317                    get_block_t get_block)
2318 {
2319         struct page *page = vmf->page;
2320         struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2321         unsigned long end;
2322         loff_t size;
2323         int ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
2324
2325         lock_page(page);
2326         size = i_size_read(inode);
2327         if ((page->mapping != inode->i_mapping) ||
2328             (page_offset(page) > size)) {
2329                 /* page got truncated out from underneath us */
2330                 goto out_unlock;
2331         }
2332
2333         /* page is wholly or partially inside EOF */
2334         if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2335                 end = size & ~PAGE_CACHE_MASK;
2336         else
2337                 end = PAGE_CACHE_SIZE;
2338
2339         ret = block_prepare_write(page, 0, end, get_block);
2340         if (!ret)
2341                 ret = block_commit_write(page, 0, end);
2342
2343         if (unlikely(ret)) {
2344                 if (ret == -ENOMEM)
2345                         ret = VM_FAULT_OOM;
2346                 else /* -ENOSPC, -EIO, etc */
2347                         ret = VM_FAULT_SIGBUS;
2348         }
2349
2350 out_unlock:
2351         unlock_page(page);
2352         return ret;
2353 }
2354
2355 /*
2356  * nobh_write_begin()'s prereads are special: the buffer_heads are freed
2357  * immediately, while under the page lock.  So it needs a special end_io
2358  * handler which does not touch the bh after unlocking it.
2359  */
2360 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2361 {
2362         __end_buffer_read_notouch(bh, uptodate);
2363 }
2364
2365 /*
2366  * Attach the singly-linked list of buffers created by nobh_write_begin, to
2367  * the page (converting it to circular linked list and taking care of page
2368  * dirty races).
2369  */
2370 static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2371 {
2372         struct buffer_head *bh;
2373
2374         BUG_ON(!PageLocked(page));
2375
2376         spin_lock(&page->mapping->private_lock);
2377         bh = head;
2378         do {
2379                 if (PageDirty(page))
2380                         set_buffer_dirty(bh);
2381                 if (!bh->b_this_page)
2382                         bh->b_this_page = head;
2383                 bh = bh->b_this_page;
2384         } while (bh != head);
2385         attach_page_buffers(page, head);
2386         spin_unlock(&page->mapping->private_lock);
2387 }
2388
2389 /*
2390  * On entry, the page is fully not uptodate.
2391  * On exit the page is fully uptodate in the areas outside (from,to)
2392  */
2393 int nobh_write_begin(struct file *file, struct address_space *mapping,
2394                         loff_t pos, unsigned len, unsigned flags,
2395                         struct page **pagep, void **fsdata,
2396                         get_block_t *get_block)
2397 {
2398         struct inode *inode = mapping->host;
2399         const unsigned blkbits = inode->i_blkbits;
2400         const unsigned blocksize = 1 << blkbits;
2401         struct buffer_head *head, *bh;
2402         struct page *page;
2403         pgoff_t index;
2404         unsigned from, to;
2405         unsigned block_in_page;
2406         unsigned block_start, block_end;
2407         sector_t block_in_file;
2408         int nr_reads = 0;
2409         int ret = 0;
2410         int is_mapped_to_disk = 1;
2411
2412         index = pos >> PAGE_CACHE_SHIFT;
2413         from = pos & (PAGE_CACHE_SIZE - 1);
2414         to = from + len;
2415
2416         page = grab_cache_page_write_begin(mapping, index, flags);
2417         if (!page)
2418                 return -ENOMEM;
2419         *pagep = page;
2420         *fsdata = NULL;
2421
2422         if (page_has_buffers(page)) {
2423                 unlock_page(page);
2424                 page_cache_release(page);
2425                 *pagep = NULL;
2426                 return block_write_begin(file, mapping, pos, len, flags, pagep,
2427                                         fsdata, get_block);
2428         }
2429
2430         if (PageMappedToDisk(page))
2431                 return 0;
2432
2433         /*
2434          * Allocate buffers so that we can keep track of state, and potentially
2435          * attach them to the page if an error occurs. In the common case of
2436          * no error, they will just be freed again without ever being attached
2437          * to the page (which is all OK, because we're under the page lock).
2438          *
2439          * Be careful: the buffer linked list is a NULL terminated one, rather
2440          * than the circular one we're used to.
2441          */
2442         head = alloc_page_buffers(page, blocksize, 0);
2443         if (!head) {
2444                 ret = -ENOMEM;
2445                 goto out_release;
2446         }
2447
2448         block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2449
2450         /*
2451          * We loop across all blocks in the page, whether or not they are
2452          * part of the affected region.  This is so we can discover if the
2453          * page is fully mapped-to-disk.
2454          */
2455         for (block_start = 0, block_in_page = 0, bh = head;
2456                   block_start < PAGE_CACHE_SIZE;
2457                   block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
2458                 int create;
2459
2460                 block_end = block_start + blocksize;
2461                 bh->b_state = 0;
2462                 create = 1;
2463                 if (block_start >= to)
2464                         create = 0;
2465                 ret = get_block(inode, block_in_file + block_in_page,
2466                                         bh, create);
2467                 if (ret)
2468                         goto failed;
2469                 if (!buffer_mapped(bh))
2470                         is_mapped_to_disk = 0;
2471                 if (buffer_new(bh))
2472                         unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2473                 if (PageUptodate(page)) {
2474                         set_buffer_uptodate(bh);
2475                         continue;
2476                 }
2477                 if (buffer_new(bh) || !buffer_mapped(bh)) {
2478                         zero_user_segments(page, block_start, from,
2479                                                         to, block_end);
2480                         continue;
2481                 }
2482                 if (buffer_uptodate(bh))
2483                         continue;       /* reiserfs does this */
2484                 if (block_start < from || block_end > to) {
2485                         lock_buffer(bh);
2486                         bh->b_end_io = end_buffer_read_nobh;
2487                         submit_bh(READ, bh);
2488                         nr_reads++;
2489                 }
2490         }
2491
2492         if (nr_reads) {
2493                 /*
2494                  * The page is locked, so these buffers are protected from
2495                  * any VM or truncate activity.  Hence we don't need to care
2496                  * for the buffer_head refcounts.
2497                  */
2498                 for (bh = head; bh; bh = bh->b_this_page) {
2499                         wait_on_buffer(bh);
2500                         if (!buffer_uptodate(bh))
2501                                 ret = -EIO;
2502                 }
2503                 if (ret)
2504                         goto failed;
2505         }
2506
2507         if (is_mapped_to_disk)
2508                 SetPageMappedToDisk(page);
2509
2510         *fsdata = head; /* to be released by nobh_write_end */
2511
2512         return 0;
2513
2514 failed:
2515         BUG_ON(!ret);
2516         /*
2517          * Error recovery is a bit difficult. We need to zero out blocks that
2518          * were newly allocated, and dirty them to ensure they get written out.
2519          * Buffers need to be attached to the page at this point, otherwise
2520          * the handling of potential IO errors during writeout would be hard
2521          * (could try doing synchronous writeout, but what if that fails too?)
2522          */
2523         attach_nobh_buffers(page, head);
2524         page_zero_new_buffers(page, from, to);
2525
2526 out_release:
2527         unlock_page(page);
2528         page_cache_release(page);
2529         *pagep = NULL;
2530
2531         if (pos + len > inode->i_size)
2532                 vmtruncate(inode, inode->i_size);
2533
2534         return ret;
2535 }
2536 EXPORT_SYMBOL(nobh_write_begin);
2537
2538 int nobh_write_end(struct file *file, struct address_space *mapping,
2539                         loff_t pos, unsigned len, unsigned copied,
2540                         struct page *page, void *fsdata)
2541 {
2542         struct inode *inode = page->mapping->host;
2543         struct buffer_head *head = fsdata;
2544         struct buffer_head *bh;
2545         BUG_ON(fsdata != NULL && page_has_buffers(page));
2546
2547         if (unlikely(copied < len) && head)
2548                 attach_nobh_buffers(page, head);
2549         if (page_has_buffers(page))
2550                 return generic_write_end(file, mapping, pos, len,
2551                                         copied, page, fsdata);
2552
2553         SetPageUptodate(page);
2554         set_page_dirty(page);
2555         if (pos+copied > inode->i_size) {
2556                 i_size_write(inode, pos+copied);
2557                 mark_inode_dirty(inode);
2558         }
2559
2560         unlock_page(page);
2561         page_cache_release(page);
2562
2563         while (head) {
2564                 bh = head;
2565                 head = head->b_this_page;
2566                 free_buffer_head(bh);
2567         }
2568
2569         return copied;
2570 }
2571 EXPORT_SYMBOL(nobh_write_end);
2572
2573 /*
2574  * nobh_writepage() - based on block_full_write_page() except
2575  * that it tries to operate without attaching bufferheads to
2576  * the page.
2577  */
2578 int nobh_writepage(struct page *page, get_block_t *get_block,
2579                         struct writeback_control *wbc)
2580 {
2581         struct inode * const inode = page->mapping->host;
2582         loff_t i_size = i_size_read(inode);
2583         const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2584         unsigned offset;
2585         int ret;
2586
2587         /* Is the page fully inside i_size? */
2588         if (page->index < end_index)
2589                 goto out;
2590
2591         /* Is the page fully outside i_size? (truncate in progress) */
2592         offset = i_size & (PAGE_CACHE_SIZE-1);
2593         if (page->index >= end_index+1 || !offset) {
2594                 /*
2595                  * The page may have dirty, unmapped buffers.  For example,
2596                  * they may have been added in ext3_writepage().  Make them
2597                  * freeable here, so the page does not leak.
2598                  */
2599 #if 0
2600                 /* Not really sure about this  - do we need this ? */
2601                 if (page->mapping->a_ops->invalidatepage)
2602                         page->mapping->a_ops->invalidatepage(page, offset);
2603 #endif
2604                 unlock_page(page);
2605                 return 0; /* don't care */
2606         }
2607
2608         /*
2609          * The page straddles i_size.  It must be zeroed out on each and every
2610          * writepage invocation because it may be mmapped.  "A file is mapped
2611          * in multiples of the page size.  For a file that is not a multiple of
2612          * the  page size, the remaining memory is zeroed when mapped, and
2613          * writes to that region are not written out to the file."
2614          */
2615         zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2616 out:
2617         ret = mpage_writepage(page, get_block, wbc);
2618         if (ret == -EAGAIN)
2619                 ret = __block_write_full_page(inode, page, get_block, wbc);
2620         return ret;
2621 }
2622 EXPORT_SYMBOL(nobh_writepage);
2623
2624 int nobh_truncate_page(struct address_space *mapping,
2625                         loff_t from, get_block_t *get_block)
2626 {
2627         pgoff_t index = from >> PAGE_CACHE_SHIFT;
2628         unsigned offset = from & (PAGE_CACHE_SIZE-1);
2629         unsigned blocksize;
2630         sector_t iblock;
2631         unsigned length, pos;
2632         struct inode *inode = mapping->host;
2633         struct page *page;
2634         struct buffer_head map_bh;
2635         int err;
2636
2637         blocksize = 1 << inode->i_blkbits;
2638         length = offset & (blocksize - 1);
2639
2640         /* Block boundary? Nothing to do */
2641         if (!length)
2642                 return 0;
2643
2644         length = blocksize - length;
2645         iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2646
2647         page = grab_cache_page(mapping, index);
2648         err = -ENOMEM;
2649         if (!page)
2650                 goto out;
2651
2652         if (page_has_buffers(page)) {
2653 has_buffers:
2654                 unlock_page(page);
2655                 page_cache_release(page);
2656                 return block_truncate_page(mapping, from, get_block);
2657         }
2658
2659         /* Find the buffer that contains "offset" */
2660         pos = blocksize;
2661         while (offset >= pos) {
2662                 iblock++;
2663                 pos += blocksize;
2664         }
2665
2666         err = get_block(inode, iblock, &map_bh, 0);
2667         if (err)
2668                 goto unlock;
2669         /* unmapped? It's a hole - nothing to do */
2670         if (!buffer_mapped(&map_bh))
2671                 goto unlock;
2672
2673         /* Ok, it's mapped. Make sure it's up-to-date */
2674         if (!PageUptodate(page)) {
2675                 err = mapping->a_ops->readpage(NULL, page);
2676                 if (err) {
2677                         page_cache_release(page);
2678                         goto out;
2679                 }
2680                 lock_page(page);
2681                 if (!PageUptodate(page)) {
2682                         err = -EIO;
2683                         goto unlock;
2684                 }
2685                 if (page_has_buffers(page))
2686                         goto has_buffers;
2687         }
2688         zero_user(page, offset, length);
2689         set_page_dirty(page);
2690         err = 0;
2691
2692 unlock:
2693         unlock_page(page);
2694         page_cache_release(page);
2695 out:
2696         return err;
2697 }
2698 EXPORT_SYMBOL(nobh_truncate_page);
2699
2700 int block_truncate_page(struct address_space *mapping,
2701                         loff_t from, get_block_t *get_block)
2702 {
2703         pgoff_t index = from >> PAGE_CACHE_SHIFT;
2704         unsigned offset = from & (PAGE_CACHE_SIZE-1);
2705         unsigned blocksize;
2706         sector_t iblock;
2707         unsigned length, pos;
2708         struct inode *inode = mapping->host;
2709         struct page *page;
2710         struct buffer_head *bh;
2711         int err;
2712
2713         blocksize = 1 << inode->i_blkbits;
2714         length = offset & (blocksize - 1);
2715
2716         /* Block boundary? Nothing to do */
2717         if (!length)
2718                 return 0;
2719
2720         length = blocksize - length;
2721         iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2722         
2723         page = grab_cache_page(mapping, index);
2724         err = -ENOMEM;
2725         if (!page)
2726                 goto out;
2727
2728         if (!page_has_buffers(page))
2729                 create_empty_buffers(page, blocksize, 0);
2730
2731         /* Find the buffer that contains "offset" */
2732         bh = page_buffers(page);
2733         pos = blocksize;
2734         while (offset >= pos) {
2735                 bh = bh->b_this_page;
2736                 iblock++;
2737                 pos += blocksize;
2738         }
2739
2740         err = 0;
2741         if (!buffer_mapped(bh)) {
2742                 WARN_ON(bh->b_size != blocksize);
2743                 err = get_block(inode, iblock, bh, 0);
2744                 if (err)
2745                         goto unlock;
2746                 /* unmapped? It's a hole - nothing to do */
2747                 if (!buffer_mapped(bh))
2748                         goto unlock;
2749         }
2750
2751         /* Ok, it's mapped. Make sure it's up-to-date */
2752         if (PageUptodate(page))
2753                 set_buffer_uptodate(bh);
2754
2755         if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2756                 err = -EIO;
2757                 ll_rw_block(READ, 1, &bh);
2758                 wait_on_buffer(bh);
2759                 /* Uhhuh. Read error. Complain and punt. */
2760                 if (!buffer_uptodate(bh))
2761                         goto unlock;
2762         }
2763
2764         zero_user(page, offset, length);
2765         mark_buffer_dirty(bh);
2766         err = 0;
2767
2768 unlock:
2769         unlock_page(page);
2770         page_cache_release(page);
2771 out:
2772         return err;
2773 }
2774
2775 /*
2776  * The generic ->writepage function for buffer-backed address_spaces
2777  */
2778 int block_write_full_page(struct page *page, get_block_t *get_block,
2779                         struct writeback_control *wbc)
2780 {
2781         struct inode * const inode = page->mapping->host;
2782         loff_t i_size = i_size_read(inode);
2783         const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2784         unsigned offset;
2785
2786         /* Is the page fully inside i_size? */
2787         if (page->index < end_index)
2788                 return __block_write_full_page(inode, page, get_block, wbc);
2789
2790         /* Is the page fully outside i_size? (truncate in progress) */
2791         offset = i_size & (PAGE_CACHE_SIZE-1);
2792         if (page->index >= end_index+1 || !offset) {
2793                 /*
2794                  * The page may have dirty, unmapped buffers.  For example,
2795                  * they may have been added in ext3_writepage().  Make them
2796                  * freeable here, so the page does not leak.
2797                  */
2798                 do_invalidatepage(page, 0);
2799                 unlock_page(page);
2800                 return 0; /* don't care */
2801         }
2802
2803         /*
2804          * The page straddles i_size.  It must be zeroed out on each and every
2805          * writepage invokation because it may be mmapped.  "A file is mapped
2806          * in multiples of the page size.  For a file that is not a multiple of
2807          * the  page size, the remaining memory is zeroed when mapped, and
2808          * writes to that region are not written out to the file."
2809          */
2810         zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2811         return __block_write_full_page(inode, page, get_block, wbc);
2812 }
2813
2814 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2815                             get_block_t *get_block)
2816 {
2817         struct buffer_head tmp;
2818         struct inode *inode = mapping->host;
2819         tmp.b_state = 0;
2820         tmp.b_blocknr = 0;
2821         tmp.b_size = 1 << inode->i_blkbits;
2822         get_block(inode, block, &tmp, 0);
2823         return tmp.b_blocknr;
2824 }
2825
2826 static void end_bio_bh_io_sync(struct bio *bio, int err)
2827 {
2828         struct buffer_head *bh = bio->bi_private;
2829
2830         if (err == -EOPNOTSUPP) {
2831                 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2832                 set_bit(BH_Eopnotsupp, &bh->b_state);
2833         }
2834
2835         if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
2836                 set_bit(BH_Quiet, &bh->b_state);
2837
2838         bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2839         bio_put(bio);
2840 }
2841
2842 int submit_bh(int rw, struct buffer_head * bh)
2843 {
2844         struct bio *bio;
2845         int ret = 0;
2846
2847         BUG_ON(!buffer_locked(bh));
2848         BUG_ON(!buffer_mapped(bh));
2849         BUG_ON(!bh->b_end_io);
2850
2851         /*
2852          * Mask in barrier bit for a write (could be either a WRITE or a
2853          * WRITE_SYNC
2854          */
2855         if (buffer_ordered(bh) && (rw & WRITE))
2856                 rw |= WRITE_BARRIER;
2857
2858         /*
2859          * Only clear out a write error when rewriting
2860          */
2861         if (test_set_buffer_req(bh) && (rw & WRITE))
2862                 clear_buffer_write_io_error(bh);
2863
2864         /*
2865          * from here on down, it's all bio -- do the initial mapping,
2866          * submit_bio -> generic_make_request may further map this bio around
2867          */
2868         bio = bio_alloc(GFP_NOIO, 1);
2869
2870         bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2871         bio->bi_bdev = bh->b_bdev;
2872         bio->bi_io_vec[0].bv_page = bh->b_page;
2873         bio->bi_io_vec[0].bv_len = bh->b_size;
2874         bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2875
2876         bio->bi_vcnt = 1;
2877         bio->bi_idx = 0;
2878         bio->bi_size = bh->b_size;
2879
2880         bio->bi_end_io = end_bio_bh_io_sync;
2881         bio->bi_private = bh;
2882
2883         bio_get(bio);
2884         submit_bio(rw, bio);
2885
2886         if (bio_flagged(bio, BIO_EOPNOTSUPP))
2887                 ret = -EOPNOTSUPP;
2888
2889         bio_put(bio);
2890         return ret;
2891 }
2892
2893 /**
2894  * ll_rw_block: low-level access to block devices (DEPRECATED)
2895  * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
2896  * @nr: number of &struct buffer_heads in the array
2897  * @bhs: array of pointers to &struct buffer_head
2898  *
2899  * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2900  * requests an I/O operation on them, either a %READ or a %WRITE.  The third
2901  * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2902  * are sent to disk. The fourth %READA option is described in the documentation
2903  * for generic_make_request() which ll_rw_block() calls.
2904  *
2905  * This function drops any buffer that it cannot get a lock on (with the
2906  * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2907  * clean when doing a write request, and any buffer that appears to be
2908  * up-to-date when doing read request.  Further it marks as clean buffers that
2909  * are processed for writing (the buffer cache won't assume that they are
2910  * actually clean until the buffer gets unlocked).
2911  *
2912  * ll_rw_block sets b_end_io to simple completion handler that marks
2913  * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2914  * any waiters. 
2915  *
2916  * All of the buffers must be for the same device, and must also be a
2917  * multiple of the current approved size for the device.
2918  */
2919 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2920 {
2921         int i;
2922
2923         for (i = 0; i < nr; i++) {
2924                 struct buffer_head *bh = bhs[i];
2925
2926                 if (rw == SWRITE || rw == SWRITE_SYNC)
2927                         lock_buffer(bh);
2928                 else if (!trylock_buffer(bh))
2929                         continue;
2930
2931                 if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC) {
2932                         if (test_clear_buffer_dirty(bh)) {
2933                                 bh->b_end_io = end_buffer_write_sync;
2934                                 get_bh(bh);
2935                                 if (rw == SWRITE_SYNC)
2936                                         submit_bh(WRITE_SYNC, bh);
2937                                 else
2938                                         submit_bh(WRITE, bh);
2939                                 continue;
2940                         }
2941                 } else {
2942                         if (!buffer_uptodate(bh)) {
2943                                 bh->b_end_io = end_buffer_read_sync;
2944                                 get_bh(bh);
2945                                 submit_bh(rw, bh);
2946                                 continue;
2947                         }
2948                 }
2949                 unlock_buffer(bh);
2950         }
2951 }
2952
2953 /*
2954  * For a data-integrity writeout, we need to wait upon any in-progress I/O
2955  * and then start new I/O and then wait upon it.  The caller must have a ref on
2956  * the buffer_head.
2957  */
2958 int sync_dirty_buffer(struct buffer_head *bh)
2959 {
2960         int ret = 0;
2961
2962         WARN_ON(atomic_read(&bh->b_count) < 1);
2963         lock_buffer(bh);
2964         if (test_clear_buffer_dirty(bh)) {
2965                 get_bh(bh);
2966                 bh->b_end_io = end_buffer_write_sync;
2967                 ret = submit_bh(WRITE, bh);
2968                 wait_on_buffer(bh);
2969                 if (buffer_eopnotsupp(bh)) {
2970                         clear_buffer_eopnotsupp(bh);
2971                         ret = -EOPNOTSUPP;
2972                 }
2973                 if (!ret && !buffer_uptodate(bh))
2974                         ret = -EIO;
2975         } else {
2976                 unlock_buffer(bh);
2977         }
2978         return ret;
2979 }
2980
2981 /*
2982  * try_to_free_buffers() checks if all the buffers on this particular page
2983  * are unused, and releases them if so.
2984  *
2985  * Exclusion against try_to_free_buffers may be obtained by either
2986  * locking the page or by holding its mapping's private_lock.
2987  *
2988  * If the page is dirty but all the buffers are clean then we need to
2989  * be sure to mark the page clean as well.  This is because the page
2990  * may be against a block device, and a later reattachment of buffers
2991  * to a dirty page will set *all* buffers dirty.  Which would corrupt
2992  * filesystem data on the same device.
2993  *
2994  * The same applies to regular filesystem pages: if all the buffers are
2995  * clean then we set the page clean and proceed.  To do that, we require
2996  * total exclusion from __set_page_dirty_buffers().  That is obtained with
2997  * private_lock.
2998  *
2999  * try_to_free_buffers() is non-blocking.
3000  */
3001 static inline int buffer_busy(struct buffer_head *bh)
3002 {
3003         return atomic_read(&bh->b_count) |
3004                 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3005 }
3006
3007 static int
3008 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3009 {
3010         struct buffer_head *head = page_buffers(page);
3011         struct buffer_head *bh;
3012
3013         bh = head;
3014         do {
3015                 if (buffer_write_io_error(bh) && page->mapping)
3016                         set_bit(AS_EIO, &page->mapping->flags);
3017                 if (buffer_busy(bh))
3018                         goto failed;
3019                 bh = bh->b_this_page;
3020         } while (bh != head);
3021
3022         do {
3023                 struct buffer_head *next = bh->b_this_page;
3024
3025                 if (bh->b_assoc_map)
3026                         __remove_assoc_queue(bh);
3027                 bh = next;
3028         } while (bh != head);
3029         *buffers_to_free = head;
3030         __clear_page_buffers(page);
3031         return 1;
3032 failed:
3033         return 0;
3034 }
3035
3036 int try_to_free_buffers(struct page *page)
3037 {
3038         struct address_space * const mapping = page->mapping;
3039         struct buffer_head *buffers_to_free = NULL;
3040         int ret = 0;
3041
3042         BUG_ON(!PageLocked(page));
3043         if (PageWriteback(page))
3044                 return 0;
3045
3046         if (mapping == NULL) {          /* can this still happen? */
3047                 ret = drop_buffers(page, &buffers_to_free);
3048                 goto out;
3049         }
3050
3051         spin_lock(&mapping->private_lock);
3052         ret = drop_buffers(page, &buffers_to_free);
3053
3054         /*
3055          * If the filesystem writes its buffers by hand (eg ext3)
3056          * then we can have clean buffers against a dirty page.  We
3057          * clean the page here; otherwise the VM will never notice
3058          * that the filesystem did any IO at all.
3059          *
3060          * Also, during truncate, discard_buffer will have marked all
3061          * the page's buffers clean.  We discover that here and clean
3062          * the page also.
3063          *
3064          * private_lock must be held over this entire operation in order
3065          * to synchronise against __set_page_dirty_buffers and prevent the
3066          * dirty bit from being lost.
3067          */
3068         if (ret)
3069                 cancel_dirty_page(page, PAGE_CACHE_SIZE);
3070         spin_unlock(&mapping->private_lock);
3071 out:
3072         if (buffers_to_free) {
3073                 struct buffer_head *bh = buffers_to_free;
3074
3075                 do {
3076                         struct buffer_head *next = bh->b_this_page;
3077                         free_buffer_head(bh);
3078                         bh = next;
3079                 } while (bh != buffers_to_free);
3080         }
3081         return ret;
3082 }
3083 EXPORT_SYMBOL(try_to_free_buffers);
3084
3085 void block_sync_page(struct page *page)
3086 {
3087         struct address_space *mapping;
3088
3089         smp_mb();
3090         mapping = page_mapping(page);
3091         if (mapping)
3092                 blk_run_backing_dev(mapping->backing_dev_info, page);
3093 }
3094
3095 /*
3096  * There are no bdflush tunables left.  But distributions are
3097  * still running obsolete flush daemons, so we terminate them here.
3098  *
3099  * Use of bdflush() is deprecated and will be removed in a future kernel.
3100  * The `pdflush' kernel threads fully replace bdflush daemons and this call.
3101  */
3102 SYSCALL_DEFINE2(bdflush, int, func, long, data)
3103 {
3104         static int msg_count;
3105
3106         if (!capable(CAP_SYS_ADMIN))
3107                 return -EPERM;
3108
3109         if (msg_count < 5) {
3110                 msg_count++;
3111                 printk(KERN_INFO
3112                         "warning: process `%s' used the obsolete bdflush"
3113                         " system call\n", current->comm);
3114                 printk(KERN_INFO "Fix your initscripts?\n");
3115         }
3116
3117         if (func == 1)
3118                 do_exit(0);
3119         return 0;
3120 }
3121
3122 /*
3123  * Buffer-head allocation
3124  */
3125 static struct kmem_cache *bh_cachep;
3126
3127 /*
3128  * Once the number of bh's in the machine exceeds this level, we start
3129  * stripping them in writeback.
3130  */
3131 static int max_buffer_heads;
3132
3133 int buffer_heads_over_limit;
3134
3135 struct bh_accounting {
3136         int nr;                 /* Number of live bh's */
3137         int ratelimit;          /* Limit cacheline bouncing */
3138 };
3139
3140 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3141
3142 static void recalc_bh_state(void)
3143 {
3144         int i;
3145         int tot = 0;
3146
3147         if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3148                 return;
3149         __get_cpu_var(bh_accounting).ratelimit = 0;
3150         for_each_online_cpu(i)
3151                 tot += per_cpu(bh_accounting, i).nr;
3152         buffer_heads_over_limit = (tot > max_buffer_heads);
3153 }
3154         
3155 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3156 {
3157         struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
3158         if (ret) {
3159                 INIT_LIST_HEAD(&ret->b_assoc_buffers);
3160                 get_cpu_var(bh_accounting).nr++;
3161                 recalc_bh_state();
3162                 put_cpu_var(bh_accounting);
3163         }
3164         return ret;
3165 }
3166 EXPORT_SYMBOL(alloc_buffer_head);
3167
3168 void free_buffer_head(struct buffer_head *bh)
3169 {
3170         BUG_ON(!list_empty(&bh->b_assoc_buffers));
3171         kmem_cache_free(bh_cachep, bh);
3172         get_cpu_var(bh_accounting).nr--;
3173         recalc_bh_state();
3174         put_cpu_var(bh_accounting);
3175 }
3176 EXPORT_SYMBOL(free_buffer_head);
3177
3178 static void buffer_exit_cpu(int cpu)
3179 {
3180         int i;
3181         struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3182
3183         for (i = 0; i < BH_LRU_SIZE; i++) {
3184                 brelse(b->bhs[i]);
3185                 b->bhs[i] = NULL;
3186         }
3187         get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
3188         per_cpu(bh_accounting, cpu).nr = 0;
3189         put_cpu_var(bh_accounting);
3190 }
3191
3192 static int buffer_cpu_notify(struct notifier_block *self,
3193                               unsigned long action, void *hcpu)
3194 {
3195         if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
3196                 buffer_exit_cpu((unsigned long)hcpu);
3197         return NOTIFY_OK;
3198 }
3199
3200 /**
3201  * bh_uptodate_or_lock - Test whether the buffer is uptodate
3202  * @bh: struct buffer_head
3203  *
3204  * Return true if the buffer is up-to-date and false,
3205  * with the buffer locked, if not.
3206  */
3207 int bh_uptodate_or_lock(struct buffer_head *bh)
3208 {
3209         if (!buffer_uptodate(bh)) {
3210                 lock_buffer(bh);
3211                 if (!buffer_uptodate(bh))
3212                         return 0;
3213                 unlock_buffer(bh);
3214         }
3215         return 1;
3216 }
3217 EXPORT_SYMBOL(bh_uptodate_or_lock);
3218
3219 /**
3220  * bh_submit_read - Submit a locked buffer for reading
3221  * @bh: struct buffer_head
3222  *
3223  * Returns zero on success and -EIO on error.
3224  */
3225 int bh_submit_read(struct buffer_head *bh)
3226 {
3227         BUG_ON(!buffer_locked(bh));
3228
3229         if (buffer_uptodate(bh)) {
3230                 unlock_buffer(bh);
3231                 return 0;
3232         }
3233
3234         get_bh(bh);
3235         bh->b_end_io = end_buffer_read_sync;
3236         submit_bh(READ, bh);
3237         wait_on_buffer(bh);
3238         if (buffer_uptodate(bh))
3239                 return 0;
3240         return -EIO;
3241 }
3242 EXPORT_SYMBOL(bh_submit_read);
3243
3244 static void
3245 init_buffer_head(void *data)
3246 {
3247         struct buffer_head *bh = data;
3248
3249         memset(bh, 0, sizeof(*bh));
3250         INIT_LIST_HEAD(&bh->b_assoc_buffers);
3251 }
3252
3253 void __init buffer_init(void)
3254 {
3255         int nrpages;
3256
3257         bh_cachep = kmem_cache_create("buffer_head",
3258                         sizeof(struct buffer_head), 0,
3259                                 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3260                                 SLAB_MEM_SPREAD),
3261                                 init_buffer_head);
3262
3263         /*
3264          * Limit the bh occupancy to 10% of ZONE_NORMAL
3265          */
3266         nrpages = (nr_free_buffer_pages() * 10) / 100;
3267         max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3268         hotcpu_notifier(buffer_cpu_notify, 0);
3269 }
3270
3271 EXPORT_SYMBOL(__bforget);
3272 EXPORT_SYMBOL(__brelse);
3273 EXPORT_SYMBOL(__wait_on_buffer);
3274 EXPORT_SYMBOL(block_commit_write);
3275 EXPORT_SYMBOL(block_prepare_write);
3276 EXPORT_SYMBOL(block_page_mkwrite);
3277 EXPORT_SYMBOL(block_read_full_page);
3278 EXPORT_SYMBOL(block_sync_page);
3279 EXPORT_SYMBOL(block_truncate_page);
3280 EXPORT_SYMBOL(block_write_full_page);
3281 EXPORT_SYMBOL(cont_write_begin);
3282 EXPORT_SYMBOL(end_buffer_read_sync);
3283 EXPORT_SYMBOL(end_buffer_write_sync);
3284 EXPORT_SYMBOL(file_fsync);
3285 EXPORT_SYMBOL(fsync_bdev);
3286 EXPORT_SYMBOL(generic_block_bmap);
3287 EXPORT_SYMBOL(generic_cont_expand_simple);
3288 EXPORT_SYMBOL(init_buffer);
3289 EXPORT_SYMBOL(invalidate_bdev);
3290 EXPORT_SYMBOL(ll_rw_block);
3291 EXPORT_SYMBOL(mark_buffer_dirty);
3292 EXPORT_SYMBOL(submit_bh);
3293 EXPORT_SYMBOL(sync_dirty_buffer);
3294 EXPORT_SYMBOL(unlock_buffer);