sh: Use clk_always_enable() on sh7343 / SE77343
[linux-2.6] / fs / buffer.c
1 /*
2  *  linux/fs/buffer.c
3  *
4  *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
5  */
6
7 /*
8  * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9  *
10  * Removed a lot of unnecessary code and simplified things now that
11  * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12  *
13  * Speed up hash, lru, and free list operations.  Use gfp() for allocating
14  * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
15  *
16  * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17  *
18  * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
19  */
20
21 #include <linux/kernel.h>
22 #include <linux/syscalls.h>
23 #include <linux/fs.h>
24 #include <linux/mm.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
27 #include <linux/capability.h>
28 #include <linux/blkdev.h>
29 #include <linux/file.h>
30 #include <linux/quotaops.h>
31 #include <linux/highmem.h>
32 #include <linux/module.h>
33 #include <linux/writeback.h>
34 #include <linux/hash.h>
35 #include <linux/suspend.h>
36 #include <linux/buffer_head.h>
37 #include <linux/task_io_accounting_ops.h>
38 #include <linux/bio.h>
39 #include <linux/notifier.h>
40 #include <linux/cpu.h>
41 #include <linux/bitops.h>
42 #include <linux/mpage.h>
43 #include <linux/bit_spinlock.h>
44
45 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
46
47 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
48
49 inline void
50 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
51 {
52         bh->b_end_io = handler;
53         bh->b_private = private;
54 }
55
56 static int sync_buffer(void *word)
57 {
58         struct block_device *bd;
59         struct buffer_head *bh
60                 = container_of(word, struct buffer_head, b_state);
61
62         smp_mb();
63         bd = bh->b_bdev;
64         if (bd)
65                 blk_run_address_space(bd->bd_inode->i_mapping);
66         io_schedule();
67         return 0;
68 }
69
70 void __lock_buffer(struct buffer_head *bh)
71 {
72         wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
73                                                         TASK_UNINTERRUPTIBLE);
74 }
75 EXPORT_SYMBOL(__lock_buffer);
76
77 void unlock_buffer(struct buffer_head *bh)
78 {
79         smp_mb__before_clear_bit();
80         clear_buffer_locked(bh);
81         smp_mb__after_clear_bit();
82         wake_up_bit(&bh->b_state, BH_Lock);
83 }
84
85 /*
86  * Block until a buffer comes unlocked.  This doesn't stop it
87  * from becoming locked again - you have to lock it yourself
88  * if you want to preserve its state.
89  */
90 void __wait_on_buffer(struct buffer_head * bh)
91 {
92         wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
93 }
94
95 static void
96 __clear_page_buffers(struct page *page)
97 {
98         ClearPagePrivate(page);
99         set_page_private(page, 0);
100         page_cache_release(page);
101 }
102
103 static void buffer_io_error(struct buffer_head *bh)
104 {
105         char b[BDEVNAME_SIZE];
106
107         printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
108                         bdevname(bh->b_bdev, b),
109                         (unsigned long long)bh->b_blocknr);
110 }
111
112 /*
113  * End-of-IO handler helper function which does not touch the bh after
114  * unlocking it.
115  * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
116  * a race there is benign: unlock_buffer() only use the bh's address for
117  * hashing after unlocking the buffer, so it doesn't actually touch the bh
118  * itself.
119  */
120 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
121 {
122         if (uptodate) {
123                 set_buffer_uptodate(bh);
124         } else {
125                 /* This happens, due to failed READA attempts. */
126                 clear_buffer_uptodate(bh);
127         }
128         unlock_buffer(bh);
129 }
130
131 /*
132  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
133  * unlock the buffer. This is what ll_rw_block uses too.
134  */
135 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
136 {
137         __end_buffer_read_notouch(bh, uptodate);
138         put_bh(bh);
139 }
140
141 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
142 {
143         char b[BDEVNAME_SIZE];
144
145         if (uptodate) {
146                 set_buffer_uptodate(bh);
147         } else {
148                 if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
149                         buffer_io_error(bh);
150                         printk(KERN_WARNING "lost page write due to "
151                                         "I/O error on %s\n",
152                                        bdevname(bh->b_bdev, b));
153                 }
154                 set_buffer_write_io_error(bh);
155                 clear_buffer_uptodate(bh);
156         }
157         unlock_buffer(bh);
158         put_bh(bh);
159 }
160
161 /*
162  * Write out and wait upon all the dirty data associated with a block
163  * device via its mapping.  Does not take the superblock lock.
164  */
165 int sync_blockdev(struct block_device *bdev)
166 {
167         int ret = 0;
168
169         if (bdev)
170                 ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
171         return ret;
172 }
173 EXPORT_SYMBOL(sync_blockdev);
174
175 /*
176  * Write out and wait upon all dirty data associated with this
177  * device.   Filesystem data as well as the underlying block
178  * device.  Takes the superblock lock.
179  */
180 int fsync_bdev(struct block_device *bdev)
181 {
182         struct super_block *sb = get_super(bdev);
183         if (sb) {
184                 int res = fsync_super(sb);
185                 drop_super(sb);
186                 return res;
187         }
188         return sync_blockdev(bdev);
189 }
190
191 /**
192  * freeze_bdev  --  lock a filesystem and force it into a consistent state
193  * @bdev:       blockdevice to lock
194  *
195  * This takes the block device bd_mount_sem to make sure no new mounts
196  * happen on bdev until thaw_bdev() is called.
197  * If a superblock is found on this device, we take the s_umount semaphore
198  * on it to make sure nobody unmounts until the snapshot creation is done.
199  */
200 struct super_block *freeze_bdev(struct block_device *bdev)
201 {
202         struct super_block *sb;
203
204         down(&bdev->bd_mount_sem);
205         sb = get_super(bdev);
206         if (sb && !(sb->s_flags & MS_RDONLY)) {
207                 sb->s_frozen = SB_FREEZE_WRITE;
208                 smp_wmb();
209
210                 __fsync_super(sb);
211
212                 sb->s_frozen = SB_FREEZE_TRANS;
213                 smp_wmb();
214
215                 sync_blockdev(sb->s_bdev);
216
217                 if (sb->s_op->write_super_lockfs)
218                         sb->s_op->write_super_lockfs(sb);
219         }
220
221         sync_blockdev(bdev);
222         return sb;      /* thaw_bdev releases s->s_umount and bd_mount_sem */
223 }
224 EXPORT_SYMBOL(freeze_bdev);
225
226 /**
227  * thaw_bdev  -- unlock filesystem
228  * @bdev:       blockdevice to unlock
229  * @sb:         associated superblock
230  *
231  * Unlocks the filesystem and marks it writeable again after freeze_bdev().
232  */
233 void thaw_bdev(struct block_device *bdev, struct super_block *sb)
234 {
235         if (sb) {
236                 BUG_ON(sb->s_bdev != bdev);
237
238                 if (sb->s_op->unlockfs)
239                         sb->s_op->unlockfs(sb);
240                 sb->s_frozen = SB_UNFROZEN;
241                 smp_wmb();
242                 wake_up(&sb->s_wait_unfrozen);
243                 drop_super(sb);
244         }
245
246         up(&bdev->bd_mount_sem);
247 }
248 EXPORT_SYMBOL(thaw_bdev);
249
250 /*
251  * Various filesystems appear to want __find_get_block to be non-blocking.
252  * But it's the page lock which protects the buffers.  To get around this,
253  * we get exclusion from try_to_free_buffers with the blockdev mapping's
254  * private_lock.
255  *
256  * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
257  * may be quite high.  This code could TryLock the page, and if that
258  * succeeds, there is no need to take private_lock. (But if
259  * private_lock is contended then so is mapping->tree_lock).
260  */
261 static struct buffer_head *
262 __find_get_block_slow(struct block_device *bdev, sector_t block)
263 {
264         struct inode *bd_inode = bdev->bd_inode;
265         struct address_space *bd_mapping = bd_inode->i_mapping;
266         struct buffer_head *ret = NULL;
267         pgoff_t index;
268         struct buffer_head *bh;
269         struct buffer_head *head;
270         struct page *page;
271         int all_mapped = 1;
272
273         index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
274         page = find_get_page(bd_mapping, index);
275         if (!page)
276                 goto out;
277
278         spin_lock(&bd_mapping->private_lock);
279         if (!page_has_buffers(page))
280                 goto out_unlock;
281         head = page_buffers(page);
282         bh = head;
283         do {
284                 if (bh->b_blocknr == block) {
285                         ret = bh;
286                         get_bh(bh);
287                         goto out_unlock;
288                 }
289                 if (!buffer_mapped(bh))
290                         all_mapped = 0;
291                 bh = bh->b_this_page;
292         } while (bh != head);
293
294         /* we might be here because some of the buffers on this page are
295          * not mapped.  This is due to various races between
296          * file io on the block device and getblk.  It gets dealt with
297          * elsewhere, don't buffer_error if we had some unmapped buffers
298          */
299         if (all_mapped) {
300                 printk("__find_get_block_slow() failed. "
301                         "block=%llu, b_blocknr=%llu\n",
302                         (unsigned long long)block,
303                         (unsigned long long)bh->b_blocknr);
304                 printk("b_state=0x%08lx, b_size=%zu\n",
305                         bh->b_state, bh->b_size);
306                 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
307         }
308 out_unlock:
309         spin_unlock(&bd_mapping->private_lock);
310         page_cache_release(page);
311 out:
312         return ret;
313 }
314
315 /* If invalidate_buffers() will trash dirty buffers, it means some kind
316    of fs corruption is going on. Trashing dirty data always imply losing
317    information that was supposed to be just stored on the physical layer
318    by the user.
319
320    Thus invalidate_buffers in general usage is not allwowed to trash
321    dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
322    be preserved.  These buffers are simply skipped.
323   
324    We also skip buffers which are still in use.  For example this can
325    happen if a userspace program is reading the block device.
326
327    NOTE: In the case where the user removed a removable-media-disk even if
328    there's still dirty data not synced on disk (due a bug in the device driver
329    or due an error of the user), by not destroying the dirty buffers we could
330    generate corruption also on the next media inserted, thus a parameter is
331    necessary to handle this case in the most safe way possible (trying
332    to not corrupt also the new disk inserted with the data belonging to
333    the old now corrupted disk). Also for the ramdisk the natural thing
334    to do in order to release the ramdisk memory is to destroy dirty buffers.
335
336    These are two special cases. Normal usage imply the device driver
337    to issue a sync on the device (without waiting I/O completion) and
338    then an invalidate_buffers call that doesn't trash dirty buffers.
339
340    For handling cache coherency with the blkdev pagecache the 'update' case
341    is been introduced. It is needed to re-read from disk any pinned
342    buffer. NOTE: re-reading from disk is destructive so we can do it only
343    when we assume nobody is changing the buffercache under our I/O and when
344    we think the disk contains more recent information than the buffercache.
345    The update == 1 pass marks the buffers we need to update, the update == 2
346    pass does the actual I/O. */
347 void invalidate_bdev(struct block_device *bdev)
348 {
349         struct address_space *mapping = bdev->bd_inode->i_mapping;
350
351         if (mapping->nrpages == 0)
352                 return;
353
354         invalidate_bh_lrus();
355         invalidate_mapping_pages(mapping, 0, -1);
356 }
357
358 /*
359  * Kick pdflush then try to free up some ZONE_NORMAL memory.
360  */
361 static void free_more_memory(void)
362 {
363         struct zone *zone;
364         int nid;
365
366         wakeup_pdflush(1024);
367         yield();
368
369         for_each_online_node(nid) {
370                 (void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
371                                                 gfp_zone(GFP_NOFS), NULL,
372                                                 &zone);
373                 if (zone)
374                         try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
375                                                 GFP_NOFS);
376         }
377 }
378
379 /*
380  * I/O completion handler for block_read_full_page() - pages
381  * which come unlocked at the end of I/O.
382  */
383 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
384 {
385         unsigned long flags;
386         struct buffer_head *first;
387         struct buffer_head *tmp;
388         struct page *page;
389         int page_uptodate = 1;
390
391         BUG_ON(!buffer_async_read(bh));
392
393         page = bh->b_page;
394         if (uptodate) {
395                 set_buffer_uptodate(bh);
396         } else {
397                 clear_buffer_uptodate(bh);
398                 if (printk_ratelimit())
399                         buffer_io_error(bh);
400                 SetPageError(page);
401         }
402
403         /*
404          * Be _very_ careful from here on. Bad things can happen if
405          * two buffer heads end IO at almost the same time and both
406          * decide that the page is now completely done.
407          */
408         first = page_buffers(page);
409         local_irq_save(flags);
410         bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
411         clear_buffer_async_read(bh);
412         unlock_buffer(bh);
413         tmp = bh;
414         do {
415                 if (!buffer_uptodate(tmp))
416                         page_uptodate = 0;
417                 if (buffer_async_read(tmp)) {
418                         BUG_ON(!buffer_locked(tmp));
419                         goto still_busy;
420                 }
421                 tmp = tmp->b_this_page;
422         } while (tmp != bh);
423         bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
424         local_irq_restore(flags);
425
426         /*
427          * If none of the buffers had errors and they are all
428          * uptodate then we can set the page uptodate.
429          */
430         if (page_uptodate && !PageError(page))
431                 SetPageUptodate(page);
432         unlock_page(page);
433         return;
434
435 still_busy:
436         bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
437         local_irq_restore(flags);
438         return;
439 }
440
441 /*
442  * Completion handler for block_write_full_page() - pages which are unlocked
443  * during I/O, and which have PageWriteback cleared upon I/O completion.
444  */
445 static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
446 {
447         char b[BDEVNAME_SIZE];
448         unsigned long flags;
449         struct buffer_head *first;
450         struct buffer_head *tmp;
451         struct page *page;
452
453         BUG_ON(!buffer_async_write(bh));
454
455         page = bh->b_page;
456         if (uptodate) {
457                 set_buffer_uptodate(bh);
458         } else {
459                 if (printk_ratelimit()) {
460                         buffer_io_error(bh);
461                         printk(KERN_WARNING "lost page write due to "
462                                         "I/O error on %s\n",
463                                bdevname(bh->b_bdev, b));
464                 }
465                 set_bit(AS_EIO, &page->mapping->flags);
466                 set_buffer_write_io_error(bh);
467                 clear_buffer_uptodate(bh);
468                 SetPageError(page);
469         }
470
471         first = page_buffers(page);
472         local_irq_save(flags);
473         bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
474
475         clear_buffer_async_write(bh);
476         unlock_buffer(bh);
477         tmp = bh->b_this_page;
478         while (tmp != bh) {
479                 if (buffer_async_write(tmp)) {
480                         BUG_ON(!buffer_locked(tmp));
481                         goto still_busy;
482                 }
483                 tmp = tmp->b_this_page;
484         }
485         bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
486         local_irq_restore(flags);
487         end_page_writeback(page);
488         return;
489
490 still_busy:
491         bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
492         local_irq_restore(flags);
493         return;
494 }
495
496 /*
497  * If a page's buffers are under async readin (end_buffer_async_read
498  * completion) then there is a possibility that another thread of
499  * control could lock one of the buffers after it has completed
500  * but while some of the other buffers have not completed.  This
501  * locked buffer would confuse end_buffer_async_read() into not unlocking
502  * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
503  * that this buffer is not under async I/O.
504  *
505  * The page comes unlocked when it has no locked buffer_async buffers
506  * left.
507  *
508  * PageLocked prevents anyone starting new async I/O reads any of
509  * the buffers.
510  *
511  * PageWriteback is used to prevent simultaneous writeout of the same
512  * page.
513  *
514  * PageLocked prevents anyone from starting writeback of a page which is
515  * under read I/O (PageWriteback is only ever set against a locked page).
516  */
517 static void mark_buffer_async_read(struct buffer_head *bh)
518 {
519         bh->b_end_io = end_buffer_async_read;
520         set_buffer_async_read(bh);
521 }
522
523 void mark_buffer_async_write(struct buffer_head *bh)
524 {
525         bh->b_end_io = end_buffer_async_write;
526         set_buffer_async_write(bh);
527 }
528 EXPORT_SYMBOL(mark_buffer_async_write);
529
530
531 /*
532  * fs/buffer.c contains helper functions for buffer-backed address space's
533  * fsync functions.  A common requirement for buffer-based filesystems is
534  * that certain data from the backing blockdev needs to be written out for
535  * a successful fsync().  For example, ext2 indirect blocks need to be
536  * written back and waited upon before fsync() returns.
537  *
538  * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
539  * inode_has_buffers() and invalidate_inode_buffers() are provided for the
540  * management of a list of dependent buffers at ->i_mapping->private_list.
541  *
542  * Locking is a little subtle: try_to_free_buffers() will remove buffers
543  * from their controlling inode's queue when they are being freed.  But
544  * try_to_free_buffers() will be operating against the *blockdev* mapping
545  * at the time, not against the S_ISREG file which depends on those buffers.
546  * So the locking for private_list is via the private_lock in the address_space
547  * which backs the buffers.  Which is different from the address_space 
548  * against which the buffers are listed.  So for a particular address_space,
549  * mapping->private_lock does *not* protect mapping->private_list!  In fact,
550  * mapping->private_list will always be protected by the backing blockdev's
551  * ->private_lock.
552  *
553  * Which introduces a requirement: all buffers on an address_space's
554  * ->private_list must be from the same address_space: the blockdev's.
555  *
556  * address_spaces which do not place buffers at ->private_list via these
557  * utility functions are free to use private_lock and private_list for
558  * whatever they want.  The only requirement is that list_empty(private_list)
559  * be true at clear_inode() time.
560  *
561  * FIXME: clear_inode should not call invalidate_inode_buffers().  The
562  * filesystems should do that.  invalidate_inode_buffers() should just go
563  * BUG_ON(!list_empty).
564  *
565  * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
566  * take an address_space, not an inode.  And it should be called
567  * mark_buffer_dirty_fsync() to clearly define why those buffers are being
568  * queued up.
569  *
570  * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
571  * list if it is already on a list.  Because if the buffer is on a list,
572  * it *must* already be on the right one.  If not, the filesystem is being
573  * silly.  This will save a ton of locking.  But first we have to ensure
574  * that buffers are taken *off* the old inode's list when they are freed
575  * (presumably in truncate).  That requires careful auditing of all
576  * filesystems (do it inside bforget()).  It could also be done by bringing
577  * b_inode back.
578  */
579
580 /*
581  * The buffer's backing address_space's private_lock must be held
582  */
583 static inline void __remove_assoc_queue(struct buffer_head *bh)
584 {
585         list_del_init(&bh->b_assoc_buffers);
586         WARN_ON(!bh->b_assoc_map);
587         if (buffer_write_io_error(bh))
588                 set_bit(AS_EIO, &bh->b_assoc_map->flags);
589         bh->b_assoc_map = NULL;
590 }
591
592 int inode_has_buffers(struct inode *inode)
593 {
594         return !list_empty(&inode->i_data.private_list);
595 }
596
597 /*
598  * osync is designed to support O_SYNC io.  It waits synchronously for
599  * all already-submitted IO to complete, but does not queue any new
600  * writes to the disk.
601  *
602  * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
603  * you dirty the buffers, and then use osync_inode_buffers to wait for
604  * completion.  Any other dirty buffers which are not yet queued for
605  * write will not be flushed to disk by the osync.
606  */
607 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
608 {
609         struct buffer_head *bh;
610         struct list_head *p;
611         int err = 0;
612
613         spin_lock(lock);
614 repeat:
615         list_for_each_prev(p, list) {
616                 bh = BH_ENTRY(p);
617                 if (buffer_locked(bh)) {
618                         get_bh(bh);
619                         spin_unlock(lock);
620                         wait_on_buffer(bh);
621                         if (!buffer_uptodate(bh))
622                                 err = -EIO;
623                         brelse(bh);
624                         spin_lock(lock);
625                         goto repeat;
626                 }
627         }
628         spin_unlock(lock);
629         return err;
630 }
631
632 /**
633  * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
634  * @mapping: the mapping which wants those buffers written
635  *
636  * Starts I/O against the buffers at mapping->private_list, and waits upon
637  * that I/O.
638  *
639  * Basically, this is a convenience function for fsync().
640  * @mapping is a file or directory which needs those buffers to be written for
641  * a successful fsync().
642  */
643 int sync_mapping_buffers(struct address_space *mapping)
644 {
645         struct address_space *buffer_mapping = mapping->assoc_mapping;
646
647         if (buffer_mapping == NULL || list_empty(&mapping->private_list))
648                 return 0;
649
650         return fsync_buffers_list(&buffer_mapping->private_lock,
651                                         &mapping->private_list);
652 }
653 EXPORT_SYMBOL(sync_mapping_buffers);
654
655 /*
656  * Called when we've recently written block `bblock', and it is known that
657  * `bblock' was for a buffer_boundary() buffer.  This means that the block at
658  * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
659  * dirty, schedule it for IO.  So that indirects merge nicely with their data.
660  */
661 void write_boundary_block(struct block_device *bdev,
662                         sector_t bblock, unsigned blocksize)
663 {
664         struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
665         if (bh) {
666                 if (buffer_dirty(bh))
667                         ll_rw_block(WRITE, 1, &bh);
668                 put_bh(bh);
669         }
670 }
671
672 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
673 {
674         struct address_space *mapping = inode->i_mapping;
675         struct address_space *buffer_mapping = bh->b_page->mapping;
676
677         mark_buffer_dirty(bh);
678         if (!mapping->assoc_mapping) {
679                 mapping->assoc_mapping = buffer_mapping;
680         } else {
681                 BUG_ON(mapping->assoc_mapping != buffer_mapping);
682         }
683         if (!bh->b_assoc_map) {
684                 spin_lock(&buffer_mapping->private_lock);
685                 list_move_tail(&bh->b_assoc_buffers,
686                                 &mapping->private_list);
687                 bh->b_assoc_map = mapping;
688                 spin_unlock(&buffer_mapping->private_lock);
689         }
690 }
691 EXPORT_SYMBOL(mark_buffer_dirty_inode);
692
693 /*
694  * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
695  * dirty.
696  *
697  * If warn is true, then emit a warning if the page is not uptodate and has
698  * not been truncated.
699  */
700 static int __set_page_dirty(struct page *page,
701                 struct address_space *mapping, int warn)
702 {
703         if (unlikely(!mapping))
704                 return !TestSetPageDirty(page);
705
706         if (TestSetPageDirty(page))
707                 return 0;
708
709         spin_lock_irq(&mapping->tree_lock);
710         if (page->mapping) {    /* Race with truncate? */
711                 WARN_ON_ONCE(warn && !PageUptodate(page));
712
713                 if (mapping_cap_account_dirty(mapping)) {
714                         __inc_zone_page_state(page, NR_FILE_DIRTY);
715                         __inc_bdi_stat(mapping->backing_dev_info,
716                                         BDI_RECLAIMABLE);
717                         task_io_account_write(PAGE_CACHE_SIZE);
718                 }
719                 radix_tree_tag_set(&mapping->page_tree,
720                                 page_index(page), PAGECACHE_TAG_DIRTY);
721         }
722         spin_unlock_irq(&mapping->tree_lock);
723         __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
724
725         return 1;
726 }
727
728 /*
729  * Add a page to the dirty page list.
730  *
731  * It is a sad fact of life that this function is called from several places
732  * deeply under spinlocking.  It may not sleep.
733  *
734  * If the page has buffers, the uptodate buffers are set dirty, to preserve
735  * dirty-state coherency between the page and the buffers.  It the page does
736  * not have buffers then when they are later attached they will all be set
737  * dirty.
738  *
739  * The buffers are dirtied before the page is dirtied.  There's a small race
740  * window in which a writepage caller may see the page cleanness but not the
741  * buffer dirtiness.  That's fine.  If this code were to set the page dirty
742  * before the buffers, a concurrent writepage caller could clear the page dirty
743  * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
744  * page on the dirty page list.
745  *
746  * We use private_lock to lock against try_to_free_buffers while using the
747  * page's buffer list.  Also use this to protect against clean buffers being
748  * added to the page after it was set dirty.
749  *
750  * FIXME: may need to call ->reservepage here as well.  That's rather up to the
751  * address_space though.
752  */
753 int __set_page_dirty_buffers(struct page *page)
754 {
755         struct address_space *mapping = page_mapping(page);
756
757         if (unlikely(!mapping))
758                 return !TestSetPageDirty(page);
759
760         spin_lock(&mapping->private_lock);
761         if (page_has_buffers(page)) {
762                 struct buffer_head *head = page_buffers(page);
763                 struct buffer_head *bh = head;
764
765                 do {
766                         set_buffer_dirty(bh);
767                         bh = bh->b_this_page;
768                 } while (bh != head);
769         }
770         spin_unlock(&mapping->private_lock);
771
772         return __set_page_dirty(page, mapping, 1);
773 }
774 EXPORT_SYMBOL(__set_page_dirty_buffers);
775
776 /*
777  * Write out and wait upon a list of buffers.
778  *
779  * We have conflicting pressures: we want to make sure that all
780  * initially dirty buffers get waited on, but that any subsequently
781  * dirtied buffers don't.  After all, we don't want fsync to last
782  * forever if somebody is actively writing to the file.
783  *
784  * Do this in two main stages: first we copy dirty buffers to a
785  * temporary inode list, queueing the writes as we go.  Then we clean
786  * up, waiting for those writes to complete.
787  * 
788  * During this second stage, any subsequent updates to the file may end
789  * up refiling the buffer on the original inode's dirty list again, so
790  * there is a chance we will end up with a buffer queued for write but
791  * not yet completed on that list.  So, as a final cleanup we go through
792  * the osync code to catch these locked, dirty buffers without requeuing
793  * any newly dirty buffers for write.
794  */
795 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
796 {
797         struct buffer_head *bh;
798         struct list_head tmp;
799         struct address_space *mapping;
800         int err = 0, err2;
801
802         INIT_LIST_HEAD(&tmp);
803
804         spin_lock(lock);
805         while (!list_empty(list)) {
806                 bh = BH_ENTRY(list->next);
807                 mapping = bh->b_assoc_map;
808                 __remove_assoc_queue(bh);
809                 /* Avoid race with mark_buffer_dirty_inode() which does
810                  * a lockless check and we rely on seeing the dirty bit */
811                 smp_mb();
812                 if (buffer_dirty(bh) || buffer_locked(bh)) {
813                         list_add(&bh->b_assoc_buffers, &tmp);
814                         bh->b_assoc_map = mapping;
815                         if (buffer_dirty(bh)) {
816                                 get_bh(bh);
817                                 spin_unlock(lock);
818                                 /*
819                                  * Ensure any pending I/O completes so that
820                                  * ll_rw_block() actually writes the current
821                                  * contents - it is a noop if I/O is still in
822                                  * flight on potentially older contents.
823                                  */
824                                 ll_rw_block(SWRITE_SYNC, 1, &bh);
825                                 brelse(bh);
826                                 spin_lock(lock);
827                         }
828                 }
829         }
830
831         while (!list_empty(&tmp)) {
832                 bh = BH_ENTRY(tmp.prev);
833                 get_bh(bh);
834                 mapping = bh->b_assoc_map;
835                 __remove_assoc_queue(bh);
836                 /* Avoid race with mark_buffer_dirty_inode() which does
837                  * a lockless check and we rely on seeing the dirty bit */
838                 smp_mb();
839                 if (buffer_dirty(bh)) {
840                         list_add(&bh->b_assoc_buffers,
841                                  &mapping->private_list);
842                         bh->b_assoc_map = mapping;
843                 }
844                 spin_unlock(lock);
845                 wait_on_buffer(bh);
846                 if (!buffer_uptodate(bh))
847                         err = -EIO;
848                 brelse(bh);
849                 spin_lock(lock);
850         }
851         
852         spin_unlock(lock);
853         err2 = osync_buffers_list(lock, list);
854         if (err)
855                 return err;
856         else
857                 return err2;
858 }
859
860 /*
861  * Invalidate any and all dirty buffers on a given inode.  We are
862  * probably unmounting the fs, but that doesn't mean we have already
863  * done a sync().  Just drop the buffers from the inode list.
864  *
865  * NOTE: we take the inode's blockdev's mapping's private_lock.  Which
866  * assumes that all the buffers are against the blockdev.  Not true
867  * for reiserfs.
868  */
869 void invalidate_inode_buffers(struct inode *inode)
870 {
871         if (inode_has_buffers(inode)) {
872                 struct address_space *mapping = &inode->i_data;
873                 struct list_head *list = &mapping->private_list;
874                 struct address_space *buffer_mapping = mapping->assoc_mapping;
875
876                 spin_lock(&buffer_mapping->private_lock);
877                 while (!list_empty(list))
878                         __remove_assoc_queue(BH_ENTRY(list->next));
879                 spin_unlock(&buffer_mapping->private_lock);
880         }
881 }
882
883 /*
884  * Remove any clean buffers from the inode's buffer list.  This is called
885  * when we're trying to free the inode itself.  Those buffers can pin it.
886  *
887  * Returns true if all buffers were removed.
888  */
889 int remove_inode_buffers(struct inode *inode)
890 {
891         int ret = 1;
892
893         if (inode_has_buffers(inode)) {
894                 struct address_space *mapping = &inode->i_data;
895                 struct list_head *list = &mapping->private_list;
896                 struct address_space *buffer_mapping = mapping->assoc_mapping;
897
898                 spin_lock(&buffer_mapping->private_lock);
899                 while (!list_empty(list)) {
900                         struct buffer_head *bh = BH_ENTRY(list->next);
901                         if (buffer_dirty(bh)) {
902                                 ret = 0;
903                                 break;
904                         }
905                         __remove_assoc_queue(bh);
906                 }
907                 spin_unlock(&buffer_mapping->private_lock);
908         }
909         return ret;
910 }
911
912 /*
913  * Create the appropriate buffers when given a page for data area and
914  * the size of each buffer.. Use the bh->b_this_page linked list to
915  * follow the buffers created.  Return NULL if unable to create more
916  * buffers.
917  *
918  * The retry flag is used to differentiate async IO (paging, swapping)
919  * which may not fail from ordinary buffer allocations.
920  */
921 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
922                 int retry)
923 {
924         struct buffer_head *bh, *head;
925         long offset;
926
927 try_again:
928         head = NULL;
929         offset = PAGE_SIZE;
930         while ((offset -= size) >= 0) {
931                 bh = alloc_buffer_head(GFP_NOFS);
932                 if (!bh)
933                         goto no_grow;
934
935                 bh->b_bdev = NULL;
936                 bh->b_this_page = head;
937                 bh->b_blocknr = -1;
938                 head = bh;
939
940                 bh->b_state = 0;
941                 atomic_set(&bh->b_count, 0);
942                 bh->b_private = NULL;
943                 bh->b_size = size;
944
945                 /* Link the buffer to its page */
946                 set_bh_page(bh, page, offset);
947
948                 init_buffer(bh, NULL, NULL);
949         }
950         return head;
951 /*
952  * In case anything failed, we just free everything we got.
953  */
954 no_grow:
955         if (head) {
956                 do {
957                         bh = head;
958                         head = head->b_this_page;
959                         free_buffer_head(bh);
960                 } while (head);
961         }
962
963         /*
964          * Return failure for non-async IO requests.  Async IO requests
965          * are not allowed to fail, so we have to wait until buffer heads
966          * become available.  But we don't want tasks sleeping with 
967          * partially complete buffers, so all were released above.
968          */
969         if (!retry)
970                 return NULL;
971
972         /* We're _really_ low on memory. Now we just
973          * wait for old buffer heads to become free due to
974          * finishing IO.  Since this is an async request and
975          * the reserve list is empty, we're sure there are 
976          * async buffer heads in use.
977          */
978         free_more_memory();
979         goto try_again;
980 }
981 EXPORT_SYMBOL_GPL(alloc_page_buffers);
982
983 static inline void
984 link_dev_buffers(struct page *page, struct buffer_head *head)
985 {
986         struct buffer_head *bh, *tail;
987
988         bh = head;
989         do {
990                 tail = bh;
991                 bh = bh->b_this_page;
992         } while (bh);
993         tail->b_this_page = head;
994         attach_page_buffers(page, head);
995 }
996
997 /*
998  * Initialise the state of a blockdev page's buffers.
999  */ 
1000 static void
1001 init_page_buffers(struct page *page, struct block_device *bdev,
1002                         sector_t block, int size)
1003 {
1004         struct buffer_head *head = page_buffers(page);
1005         struct buffer_head *bh = head;
1006         int uptodate = PageUptodate(page);
1007
1008         do {
1009                 if (!buffer_mapped(bh)) {
1010                         init_buffer(bh, NULL, NULL);
1011                         bh->b_bdev = bdev;
1012                         bh->b_blocknr = block;
1013                         if (uptodate)
1014                                 set_buffer_uptodate(bh);
1015                         set_buffer_mapped(bh);
1016                 }
1017                 block++;
1018                 bh = bh->b_this_page;
1019         } while (bh != head);
1020 }
1021
1022 /*
1023  * Create the page-cache page that contains the requested block.
1024  *
1025  * This is user purely for blockdev mappings.
1026  */
1027 static struct page *
1028 grow_dev_page(struct block_device *bdev, sector_t block,
1029                 pgoff_t index, int size)
1030 {
1031         struct inode *inode = bdev->bd_inode;
1032         struct page *page;
1033         struct buffer_head *bh;
1034
1035         page = find_or_create_page(inode->i_mapping, index,
1036                 (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
1037         if (!page)
1038                 return NULL;
1039
1040         BUG_ON(!PageLocked(page));
1041
1042         if (page_has_buffers(page)) {
1043                 bh = page_buffers(page);
1044                 if (bh->b_size == size) {
1045                         init_page_buffers(page, bdev, block, size);
1046                         return page;
1047                 }
1048                 if (!try_to_free_buffers(page))
1049                         goto failed;
1050         }
1051
1052         /*
1053          * Allocate some buffers for this page
1054          */
1055         bh = alloc_page_buffers(page, size, 0);
1056         if (!bh)
1057                 goto failed;
1058
1059         /*
1060          * Link the page to the buffers and initialise them.  Take the
1061          * lock to be atomic wrt __find_get_block(), which does not
1062          * run under the page lock.
1063          */
1064         spin_lock(&inode->i_mapping->private_lock);
1065         link_dev_buffers(page, bh);
1066         init_page_buffers(page, bdev, block, size);
1067         spin_unlock(&inode->i_mapping->private_lock);
1068         return page;
1069
1070 failed:
1071         BUG();
1072         unlock_page(page);
1073         page_cache_release(page);
1074         return NULL;
1075 }
1076
1077 /*
1078  * Create buffers for the specified block device block's page.  If
1079  * that page was dirty, the buffers are set dirty also.
1080  */
1081 static int
1082 grow_buffers(struct block_device *bdev, sector_t block, int size)
1083 {
1084         struct page *page;
1085         pgoff_t index;
1086         int sizebits;
1087
1088         sizebits = -1;
1089         do {
1090                 sizebits++;
1091         } while ((size << sizebits) < PAGE_SIZE);
1092
1093         index = block >> sizebits;
1094
1095         /*
1096          * Check for a block which wants to lie outside our maximum possible
1097          * pagecache index.  (this comparison is done using sector_t types).
1098          */
1099         if (unlikely(index != block >> sizebits)) {
1100                 char b[BDEVNAME_SIZE];
1101
1102                 printk(KERN_ERR "%s: requested out-of-range block %llu for "
1103                         "device %s\n",
1104                         __func__, (unsigned long long)block,
1105                         bdevname(bdev, b));
1106                 return -EIO;
1107         }
1108         block = index << sizebits;
1109         /* Create a page with the proper size buffers.. */
1110         page = grow_dev_page(bdev, block, index, size);
1111         if (!page)
1112                 return 0;
1113         unlock_page(page);
1114         page_cache_release(page);
1115         return 1;
1116 }
1117
1118 static struct buffer_head *
1119 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1120 {
1121         /* Size must be multiple of hard sectorsize */
1122         if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
1123                         (size < 512 || size > PAGE_SIZE))) {
1124                 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1125                                         size);
1126                 printk(KERN_ERR "hardsect size: %d\n",
1127                                         bdev_hardsect_size(bdev));
1128
1129                 dump_stack();
1130                 return NULL;
1131         }
1132
1133         for (;;) {
1134                 struct buffer_head * bh;
1135                 int ret;
1136
1137                 bh = __find_get_block(bdev, block, size);
1138                 if (bh)
1139                         return bh;
1140
1141                 ret = grow_buffers(bdev, block, size);
1142                 if (ret < 0)
1143                         return NULL;
1144                 if (ret == 0)
1145                         free_more_memory();
1146         }
1147 }
1148
1149 /*
1150  * The relationship between dirty buffers and dirty pages:
1151  *
1152  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1153  * the page is tagged dirty in its radix tree.
1154  *
1155  * At all times, the dirtiness of the buffers represents the dirtiness of
1156  * subsections of the page.  If the page has buffers, the page dirty bit is
1157  * merely a hint about the true dirty state.
1158  *
1159  * When a page is set dirty in its entirety, all its buffers are marked dirty
1160  * (if the page has buffers).
1161  *
1162  * When a buffer is marked dirty, its page is dirtied, but the page's other
1163  * buffers are not.
1164  *
1165  * Also.  When blockdev buffers are explicitly read with bread(), they
1166  * individually become uptodate.  But their backing page remains not
1167  * uptodate - even if all of its buffers are uptodate.  A subsequent
1168  * block_read_full_page() against that page will discover all the uptodate
1169  * buffers, will set the page uptodate and will perform no I/O.
1170  */
1171
1172 /**
1173  * mark_buffer_dirty - mark a buffer_head as needing writeout
1174  * @bh: the buffer_head to mark dirty
1175  *
1176  * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1177  * backing page dirty, then tag the page as dirty in its address_space's radix
1178  * tree and then attach the address_space's inode to its superblock's dirty
1179  * inode list.
1180  *
1181  * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
1182  * mapping->tree_lock and the global inode_lock.
1183  */
1184 void mark_buffer_dirty(struct buffer_head *bh)
1185 {
1186         WARN_ON_ONCE(!buffer_uptodate(bh));
1187
1188         /*
1189          * Very *carefully* optimize the it-is-already-dirty case.
1190          *
1191          * Don't let the final "is it dirty" escape to before we
1192          * perhaps modified the buffer.
1193          */
1194         if (buffer_dirty(bh)) {
1195                 smp_mb();
1196                 if (buffer_dirty(bh))
1197                         return;
1198         }
1199
1200         if (!test_set_buffer_dirty(bh))
1201                 __set_page_dirty(bh->b_page, page_mapping(bh->b_page), 0);
1202 }
1203
1204 /*
1205  * Decrement a buffer_head's reference count.  If all buffers against a page
1206  * have zero reference count, are clean and unlocked, and if the page is clean
1207  * and unlocked then try_to_free_buffers() may strip the buffers from the page
1208  * in preparation for freeing it (sometimes, rarely, buffers are removed from
1209  * a page but it ends up not being freed, and buffers may later be reattached).
1210  */
1211 void __brelse(struct buffer_head * buf)
1212 {
1213         if (atomic_read(&buf->b_count)) {
1214                 put_bh(buf);
1215                 return;
1216         }
1217         WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1218 }
1219
1220 /*
1221  * bforget() is like brelse(), except it discards any
1222  * potentially dirty data.
1223  */
1224 void __bforget(struct buffer_head *bh)
1225 {
1226         clear_buffer_dirty(bh);
1227         if (bh->b_assoc_map) {
1228                 struct address_space *buffer_mapping = bh->b_page->mapping;
1229
1230                 spin_lock(&buffer_mapping->private_lock);
1231                 list_del_init(&bh->b_assoc_buffers);
1232                 bh->b_assoc_map = NULL;
1233                 spin_unlock(&buffer_mapping->private_lock);
1234         }
1235         __brelse(bh);
1236 }
1237
1238 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1239 {
1240         lock_buffer(bh);
1241         if (buffer_uptodate(bh)) {
1242                 unlock_buffer(bh);
1243                 return bh;
1244         } else {
1245                 get_bh(bh);
1246                 bh->b_end_io = end_buffer_read_sync;
1247                 submit_bh(READ, bh);
1248                 wait_on_buffer(bh);
1249                 if (buffer_uptodate(bh))
1250                         return bh;
1251         }
1252         brelse(bh);
1253         return NULL;
1254 }
1255
1256 /*
1257  * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
1258  * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
1259  * refcount elevated by one when they're in an LRU.  A buffer can only appear
1260  * once in a particular CPU's LRU.  A single buffer can be present in multiple
1261  * CPU's LRUs at the same time.
1262  *
1263  * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1264  * sb_find_get_block().
1265  *
1266  * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
1267  * a local interrupt disable for that.
1268  */
1269
1270 #define BH_LRU_SIZE     8
1271
1272 struct bh_lru {
1273         struct buffer_head *bhs[BH_LRU_SIZE];
1274 };
1275
1276 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1277
1278 #ifdef CONFIG_SMP
1279 #define bh_lru_lock()   local_irq_disable()
1280 #define bh_lru_unlock() local_irq_enable()
1281 #else
1282 #define bh_lru_lock()   preempt_disable()
1283 #define bh_lru_unlock() preempt_enable()
1284 #endif
1285
1286 static inline void check_irqs_on(void)
1287 {
1288 #ifdef irqs_disabled
1289         BUG_ON(irqs_disabled());
1290 #endif
1291 }
1292
1293 /*
1294  * The LRU management algorithm is dopey-but-simple.  Sorry.
1295  */
1296 static void bh_lru_install(struct buffer_head *bh)
1297 {
1298         struct buffer_head *evictee = NULL;
1299         struct bh_lru *lru;
1300
1301         check_irqs_on();
1302         bh_lru_lock();
1303         lru = &__get_cpu_var(bh_lrus);
1304         if (lru->bhs[0] != bh) {
1305                 struct buffer_head *bhs[BH_LRU_SIZE];
1306                 int in;
1307                 int out = 0;
1308
1309                 get_bh(bh);
1310                 bhs[out++] = bh;
1311                 for (in = 0; in < BH_LRU_SIZE; in++) {
1312                         struct buffer_head *bh2 = lru->bhs[in];
1313
1314                         if (bh2 == bh) {
1315                                 __brelse(bh2);
1316                         } else {
1317                                 if (out >= BH_LRU_SIZE) {
1318                                         BUG_ON(evictee != NULL);
1319                                         evictee = bh2;
1320                                 } else {
1321                                         bhs[out++] = bh2;
1322                                 }
1323                         }
1324                 }
1325                 while (out < BH_LRU_SIZE)
1326                         bhs[out++] = NULL;
1327                 memcpy(lru->bhs, bhs, sizeof(bhs));
1328         }
1329         bh_lru_unlock();
1330
1331         if (evictee)
1332                 __brelse(evictee);
1333 }
1334
1335 /*
1336  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
1337  */
1338 static struct buffer_head *
1339 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1340 {
1341         struct buffer_head *ret = NULL;
1342         struct bh_lru *lru;
1343         unsigned int i;
1344
1345         check_irqs_on();
1346         bh_lru_lock();
1347         lru = &__get_cpu_var(bh_lrus);
1348         for (i = 0; i < BH_LRU_SIZE; i++) {
1349                 struct buffer_head *bh = lru->bhs[i];
1350
1351                 if (bh && bh->b_bdev == bdev &&
1352                                 bh->b_blocknr == block && bh->b_size == size) {
1353                         if (i) {
1354                                 while (i) {
1355                                         lru->bhs[i] = lru->bhs[i - 1];
1356                                         i--;
1357                                 }
1358                                 lru->bhs[0] = bh;
1359                         }
1360                         get_bh(bh);
1361                         ret = bh;
1362                         break;
1363                 }
1364         }
1365         bh_lru_unlock();
1366         return ret;
1367 }
1368
1369 /*
1370  * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
1371  * it in the LRU and mark it as accessed.  If it is not present then return
1372  * NULL
1373  */
1374 struct buffer_head *
1375 __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1376 {
1377         struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1378
1379         if (bh == NULL) {
1380                 bh = __find_get_block_slow(bdev, block);
1381                 if (bh)
1382                         bh_lru_install(bh);
1383         }
1384         if (bh)
1385                 touch_buffer(bh);
1386         return bh;
1387 }
1388 EXPORT_SYMBOL(__find_get_block);
1389
1390 /*
1391  * __getblk will locate (and, if necessary, create) the buffer_head
1392  * which corresponds to the passed block_device, block and size. The
1393  * returned buffer has its reference count incremented.
1394  *
1395  * __getblk() cannot fail - it just keeps trying.  If you pass it an
1396  * illegal block number, __getblk() will happily return a buffer_head
1397  * which represents the non-existent block.  Very weird.
1398  *
1399  * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1400  * attempt is failing.  FIXME, perhaps?
1401  */
1402 struct buffer_head *
1403 __getblk(struct block_device *bdev, sector_t block, unsigned size)
1404 {
1405         struct buffer_head *bh = __find_get_block(bdev, block, size);
1406
1407         might_sleep();
1408         if (bh == NULL)
1409                 bh = __getblk_slow(bdev, block, size);
1410         return bh;
1411 }
1412 EXPORT_SYMBOL(__getblk);
1413
1414 /*
1415  * Do async read-ahead on a buffer..
1416  */
1417 void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1418 {
1419         struct buffer_head *bh = __getblk(bdev, block, size);
1420         if (likely(bh)) {
1421                 ll_rw_block(READA, 1, &bh);
1422                 brelse(bh);
1423         }
1424 }
1425 EXPORT_SYMBOL(__breadahead);
1426
1427 /**
1428  *  __bread() - reads a specified block and returns the bh
1429  *  @bdev: the block_device to read from
1430  *  @block: number of block
1431  *  @size: size (in bytes) to read
1432  * 
1433  *  Reads a specified block, and returns buffer head that contains it.
1434  *  It returns NULL if the block was unreadable.
1435  */
1436 struct buffer_head *
1437 __bread(struct block_device *bdev, sector_t block, unsigned size)
1438 {
1439         struct buffer_head *bh = __getblk(bdev, block, size);
1440
1441         if (likely(bh) && !buffer_uptodate(bh))
1442                 bh = __bread_slow(bh);
1443         return bh;
1444 }
1445 EXPORT_SYMBOL(__bread);
1446
1447 /*
1448  * invalidate_bh_lrus() is called rarely - but not only at unmount.
1449  * This doesn't race because it runs in each cpu either in irq
1450  * or with preempt disabled.
1451  */
1452 static void invalidate_bh_lru(void *arg)
1453 {
1454         struct bh_lru *b = &get_cpu_var(bh_lrus);
1455         int i;
1456
1457         for (i = 0; i < BH_LRU_SIZE; i++) {
1458                 brelse(b->bhs[i]);
1459                 b->bhs[i] = NULL;
1460         }
1461         put_cpu_var(bh_lrus);
1462 }
1463         
1464 void invalidate_bh_lrus(void)
1465 {
1466         on_each_cpu(invalidate_bh_lru, NULL, 1);
1467 }
1468 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1469
1470 void set_bh_page(struct buffer_head *bh,
1471                 struct page *page, unsigned long offset)
1472 {
1473         bh->b_page = page;
1474         BUG_ON(offset >= PAGE_SIZE);
1475         if (PageHighMem(page))
1476                 /*
1477                  * This catches illegal uses and preserves the offset:
1478                  */
1479                 bh->b_data = (char *)(0 + offset);
1480         else
1481                 bh->b_data = page_address(page) + offset;
1482 }
1483 EXPORT_SYMBOL(set_bh_page);
1484
1485 /*
1486  * Called when truncating a buffer on a page completely.
1487  */
1488 static void discard_buffer(struct buffer_head * bh)
1489 {
1490         lock_buffer(bh);
1491         clear_buffer_dirty(bh);
1492         bh->b_bdev = NULL;
1493         clear_buffer_mapped(bh);
1494         clear_buffer_req(bh);
1495         clear_buffer_new(bh);
1496         clear_buffer_delay(bh);
1497         clear_buffer_unwritten(bh);
1498         unlock_buffer(bh);
1499 }
1500
1501 /**
1502  * block_invalidatepage - invalidate part of all of a buffer-backed page
1503  *
1504  * @page: the page which is affected
1505  * @offset: the index of the truncation point
1506  *
1507  * block_invalidatepage() is called when all or part of the page has become
1508  * invalidatedby a truncate operation.
1509  *
1510  * block_invalidatepage() does not have to release all buffers, but it must
1511  * ensure that no dirty buffer is left outside @offset and that no I/O
1512  * is underway against any of the blocks which are outside the truncation
1513  * point.  Because the caller is about to free (and possibly reuse) those
1514  * blocks on-disk.
1515  */
1516 void block_invalidatepage(struct page *page, unsigned long offset)
1517 {
1518         struct buffer_head *head, *bh, *next;
1519         unsigned int curr_off = 0;
1520
1521         BUG_ON(!PageLocked(page));
1522         if (!page_has_buffers(page))
1523                 goto out;
1524
1525         head = page_buffers(page);
1526         bh = head;
1527         do {
1528                 unsigned int next_off = curr_off + bh->b_size;
1529                 next = bh->b_this_page;
1530
1531                 /*
1532                  * is this block fully invalidated?
1533                  */
1534                 if (offset <= curr_off)
1535                         discard_buffer(bh);
1536                 curr_off = next_off;
1537                 bh = next;
1538         } while (bh != head);
1539
1540         /*
1541          * We release buffers only if the entire page is being invalidated.
1542          * The get_block cached value has been unconditionally invalidated,
1543          * so real IO is not possible anymore.
1544          */
1545         if (offset == 0)
1546                 try_to_release_page(page, 0);
1547 out:
1548         return;
1549 }
1550 EXPORT_SYMBOL(block_invalidatepage);
1551
1552 /*
1553  * We attach and possibly dirty the buffers atomically wrt
1554  * __set_page_dirty_buffers() via private_lock.  try_to_free_buffers
1555  * is already excluded via the page lock.
1556  */
1557 void create_empty_buffers(struct page *page,
1558                         unsigned long blocksize, unsigned long b_state)
1559 {
1560         struct buffer_head *bh, *head, *tail;
1561
1562         head = alloc_page_buffers(page, blocksize, 1);
1563         bh = head;
1564         do {
1565                 bh->b_state |= b_state;
1566                 tail = bh;
1567                 bh = bh->b_this_page;
1568         } while (bh);
1569         tail->b_this_page = head;
1570
1571         spin_lock(&page->mapping->private_lock);
1572         if (PageUptodate(page) || PageDirty(page)) {
1573                 bh = head;
1574                 do {
1575                         if (PageDirty(page))
1576                                 set_buffer_dirty(bh);
1577                         if (PageUptodate(page))
1578                                 set_buffer_uptodate(bh);
1579                         bh = bh->b_this_page;
1580                 } while (bh != head);
1581         }
1582         attach_page_buffers(page, head);
1583         spin_unlock(&page->mapping->private_lock);
1584 }
1585 EXPORT_SYMBOL(create_empty_buffers);
1586
1587 /*
1588  * We are taking a block for data and we don't want any output from any
1589  * buffer-cache aliases starting from return from that function and
1590  * until the moment when something will explicitly mark the buffer
1591  * dirty (hopefully that will not happen until we will free that block ;-)
1592  * We don't even need to mark it not-uptodate - nobody can expect
1593  * anything from a newly allocated buffer anyway. We used to used
1594  * unmap_buffer() for such invalidation, but that was wrong. We definitely
1595  * don't want to mark the alias unmapped, for example - it would confuse
1596  * anyone who might pick it with bread() afterwards...
1597  *
1598  * Also..  Note that bforget() doesn't lock the buffer.  So there can
1599  * be writeout I/O going on against recently-freed buffers.  We don't
1600  * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1601  * only if we really need to.  That happens here.
1602  */
1603 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1604 {
1605         struct buffer_head *old_bh;
1606
1607         might_sleep();
1608
1609         old_bh = __find_get_block_slow(bdev, block);
1610         if (old_bh) {
1611                 clear_buffer_dirty(old_bh);
1612                 wait_on_buffer(old_bh);
1613                 clear_buffer_req(old_bh);
1614                 __brelse(old_bh);
1615         }
1616 }
1617 EXPORT_SYMBOL(unmap_underlying_metadata);
1618
1619 /*
1620  * NOTE! All mapped/uptodate combinations are valid:
1621  *
1622  *      Mapped  Uptodate        Meaning
1623  *
1624  *      No      No              "unknown" - must do get_block()
1625  *      No      Yes             "hole" - zero-filled
1626  *      Yes     No              "allocated" - allocated on disk, not read in
1627  *      Yes     Yes             "valid" - allocated and up-to-date in memory.
1628  *
1629  * "Dirty" is valid only with the last case (mapped+uptodate).
1630  */
1631
1632 /*
1633  * While block_write_full_page is writing back the dirty buffers under
1634  * the page lock, whoever dirtied the buffers may decide to clean them
1635  * again at any time.  We handle that by only looking at the buffer
1636  * state inside lock_buffer().
1637  *
1638  * If block_write_full_page() is called for regular writeback
1639  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1640  * locked buffer.   This only can happen if someone has written the buffer
1641  * directly, with submit_bh().  At the address_space level PageWriteback
1642  * prevents this contention from occurring.
1643  */
1644 static int __block_write_full_page(struct inode *inode, struct page *page,
1645                         get_block_t *get_block, struct writeback_control *wbc)
1646 {
1647         int err;
1648         sector_t block;
1649         sector_t last_block;
1650         struct buffer_head *bh, *head;
1651         const unsigned blocksize = 1 << inode->i_blkbits;
1652         int nr_underway = 0;
1653
1654         BUG_ON(!PageLocked(page));
1655
1656         last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1657
1658         if (!page_has_buffers(page)) {
1659                 create_empty_buffers(page, blocksize,
1660                                         (1 << BH_Dirty)|(1 << BH_Uptodate));
1661         }
1662
1663         /*
1664          * Be very careful.  We have no exclusion from __set_page_dirty_buffers
1665          * here, and the (potentially unmapped) buffers may become dirty at
1666          * any time.  If a buffer becomes dirty here after we've inspected it
1667          * then we just miss that fact, and the page stays dirty.
1668          *
1669          * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1670          * handle that here by just cleaning them.
1671          */
1672
1673         block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1674         head = page_buffers(page);
1675         bh = head;
1676
1677         /*
1678          * Get all the dirty buffers mapped to disk addresses and
1679          * handle any aliases from the underlying blockdev's mapping.
1680          */
1681         do {
1682                 if (block > last_block) {
1683                         /*
1684                          * mapped buffers outside i_size will occur, because
1685                          * this page can be outside i_size when there is a
1686                          * truncate in progress.
1687                          */
1688                         /*
1689                          * The buffer was zeroed by block_write_full_page()
1690                          */
1691                         clear_buffer_dirty(bh);
1692                         set_buffer_uptodate(bh);
1693                 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1694                            buffer_dirty(bh)) {
1695                         WARN_ON(bh->b_size != blocksize);
1696                         err = get_block(inode, block, bh, 1);
1697                         if (err)
1698                                 goto recover;
1699                         clear_buffer_delay(bh);
1700                         if (buffer_new(bh)) {
1701                                 /* blockdev mappings never come here */
1702                                 clear_buffer_new(bh);
1703                                 unmap_underlying_metadata(bh->b_bdev,
1704                                                         bh->b_blocknr);
1705                         }
1706                 }
1707                 bh = bh->b_this_page;
1708                 block++;
1709         } while (bh != head);
1710
1711         do {
1712                 if (!buffer_mapped(bh))
1713                         continue;
1714                 /*
1715                  * If it's a fully non-blocking write attempt and we cannot
1716                  * lock the buffer then redirty the page.  Note that this can
1717                  * potentially cause a busy-wait loop from pdflush and kswapd
1718                  * activity, but those code paths have their own higher-level
1719                  * throttling.
1720                  */
1721                 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1722                         lock_buffer(bh);
1723                 } else if (test_set_buffer_locked(bh)) {
1724                         redirty_page_for_writepage(wbc, page);
1725                         continue;
1726                 }
1727                 if (test_clear_buffer_dirty(bh)) {
1728                         mark_buffer_async_write(bh);
1729                 } else {
1730                         unlock_buffer(bh);
1731                 }
1732         } while ((bh = bh->b_this_page) != head);
1733
1734         /*
1735          * The page and its buffers are protected by PageWriteback(), so we can
1736          * drop the bh refcounts early.
1737          */
1738         BUG_ON(PageWriteback(page));
1739         set_page_writeback(page);
1740
1741         do {
1742                 struct buffer_head *next = bh->b_this_page;
1743                 if (buffer_async_write(bh)) {
1744                         submit_bh(WRITE, bh);
1745                         nr_underway++;
1746                 }
1747                 bh = next;
1748         } while (bh != head);
1749         unlock_page(page);
1750
1751         err = 0;
1752 done:
1753         if (nr_underway == 0) {
1754                 /*
1755                  * The page was marked dirty, but the buffers were
1756                  * clean.  Someone wrote them back by hand with
1757                  * ll_rw_block/submit_bh.  A rare case.
1758                  */
1759                 end_page_writeback(page);
1760
1761                 /*
1762                  * The page and buffer_heads can be released at any time from
1763                  * here on.
1764                  */
1765         }
1766         return err;
1767
1768 recover:
1769         /*
1770          * ENOSPC, or some other error.  We may already have added some
1771          * blocks to the file, so we need to write these out to avoid
1772          * exposing stale data.
1773          * The page is currently locked and not marked for writeback
1774          */
1775         bh = head;
1776         /* Recovery: lock and submit the mapped buffers */
1777         do {
1778                 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1779                     !buffer_delay(bh)) {
1780                         lock_buffer(bh);
1781                         mark_buffer_async_write(bh);
1782                 } else {
1783                         /*
1784                          * The buffer may have been set dirty during
1785                          * attachment to a dirty page.
1786                          */
1787                         clear_buffer_dirty(bh);
1788                 }
1789         } while ((bh = bh->b_this_page) != head);
1790         SetPageError(page);
1791         BUG_ON(PageWriteback(page));
1792         mapping_set_error(page->mapping, err);
1793         set_page_writeback(page);
1794         do {
1795                 struct buffer_head *next = bh->b_this_page;
1796                 if (buffer_async_write(bh)) {
1797                         clear_buffer_dirty(bh);
1798                         submit_bh(WRITE, bh);
1799                         nr_underway++;
1800                 }
1801                 bh = next;
1802         } while (bh != head);
1803         unlock_page(page);
1804         goto done;
1805 }
1806
1807 /*
1808  * If a page has any new buffers, zero them out here, and mark them uptodate
1809  * and dirty so they'll be written out (in order to prevent uninitialised
1810  * block data from leaking). And clear the new bit.
1811  */
1812 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1813 {
1814         unsigned int block_start, block_end;
1815         struct buffer_head *head, *bh;
1816
1817         BUG_ON(!PageLocked(page));
1818         if (!page_has_buffers(page))
1819                 return;
1820
1821         bh = head = page_buffers(page);
1822         block_start = 0;
1823         do {
1824                 block_end = block_start + bh->b_size;
1825
1826                 if (buffer_new(bh)) {
1827                         if (block_end > from && block_start < to) {
1828                                 if (!PageUptodate(page)) {
1829                                         unsigned start, size;
1830
1831                                         start = max(from, block_start);
1832                                         size = min(to, block_end) - start;
1833
1834                                         zero_user(page, start, size);
1835                                         set_buffer_uptodate(bh);
1836                                 }
1837
1838                                 clear_buffer_new(bh);
1839                                 mark_buffer_dirty(bh);
1840                         }
1841                 }
1842
1843                 block_start = block_end;
1844                 bh = bh->b_this_page;
1845         } while (bh != head);
1846 }
1847 EXPORT_SYMBOL(page_zero_new_buffers);
1848
1849 static int __block_prepare_write(struct inode *inode, struct page *page,
1850                 unsigned from, unsigned to, get_block_t *get_block)
1851 {
1852         unsigned block_start, block_end;
1853         sector_t block;
1854         int err = 0;
1855         unsigned blocksize, bbits;
1856         struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1857
1858         BUG_ON(!PageLocked(page));
1859         BUG_ON(from > PAGE_CACHE_SIZE);
1860         BUG_ON(to > PAGE_CACHE_SIZE);
1861         BUG_ON(from > to);
1862
1863         blocksize = 1 << inode->i_blkbits;
1864         if (!page_has_buffers(page))
1865                 create_empty_buffers(page, blocksize, 0);
1866         head = page_buffers(page);
1867
1868         bbits = inode->i_blkbits;
1869         block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1870
1871         for(bh = head, block_start = 0; bh != head || !block_start;
1872             block++, block_start=block_end, bh = bh->b_this_page) {
1873                 block_end = block_start + blocksize;
1874                 if (block_end <= from || block_start >= to) {
1875                         if (PageUptodate(page)) {
1876                                 if (!buffer_uptodate(bh))
1877                                         set_buffer_uptodate(bh);
1878                         }
1879                         continue;
1880                 }
1881                 if (buffer_new(bh))
1882                         clear_buffer_new(bh);
1883                 if (!buffer_mapped(bh)) {
1884                         WARN_ON(bh->b_size != blocksize);
1885                         err = get_block(inode, block, bh, 1);
1886                         if (err)
1887                                 break;
1888                         if (buffer_new(bh)) {
1889                                 unmap_underlying_metadata(bh->b_bdev,
1890                                                         bh->b_blocknr);
1891                                 if (PageUptodate(page)) {
1892                                         clear_buffer_new(bh);
1893                                         set_buffer_uptodate(bh);
1894                                         mark_buffer_dirty(bh);
1895                                         continue;
1896                                 }
1897                                 if (block_end > to || block_start < from)
1898                                         zero_user_segments(page,
1899                                                 to, block_end,
1900                                                 block_start, from);
1901                                 continue;
1902                         }
1903                 }
1904                 if (PageUptodate(page)) {
1905                         if (!buffer_uptodate(bh))
1906                                 set_buffer_uptodate(bh);
1907                         continue; 
1908                 }
1909                 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1910                     !buffer_unwritten(bh) &&
1911                      (block_start < from || block_end > to)) {
1912                         ll_rw_block(READ, 1, &bh);
1913                         *wait_bh++=bh;
1914                 }
1915         }
1916         /*
1917          * If we issued read requests - let them complete.
1918          */
1919         while(wait_bh > wait) {
1920                 wait_on_buffer(*--wait_bh);
1921                 if (!buffer_uptodate(*wait_bh))
1922                         err = -EIO;
1923         }
1924         if (unlikely(err))
1925                 page_zero_new_buffers(page, from, to);
1926         return err;
1927 }
1928
1929 static int __block_commit_write(struct inode *inode, struct page *page,
1930                 unsigned from, unsigned to)
1931 {
1932         unsigned block_start, block_end;
1933         int partial = 0;
1934         unsigned blocksize;
1935         struct buffer_head *bh, *head;
1936
1937         blocksize = 1 << inode->i_blkbits;
1938
1939         for(bh = head = page_buffers(page), block_start = 0;
1940             bh != head || !block_start;
1941             block_start=block_end, bh = bh->b_this_page) {
1942                 block_end = block_start + blocksize;
1943                 if (block_end <= from || block_start >= to) {
1944                         if (!buffer_uptodate(bh))
1945                                 partial = 1;
1946                 } else {
1947                         set_buffer_uptodate(bh);
1948                         mark_buffer_dirty(bh);
1949                 }
1950                 clear_buffer_new(bh);
1951         }
1952
1953         /*
1954          * If this is a partial write which happened to make all buffers
1955          * uptodate then we can optimize away a bogus readpage() for
1956          * the next read(). Here we 'discover' whether the page went
1957          * uptodate as a result of this (potentially partial) write.
1958          */
1959         if (!partial)
1960                 SetPageUptodate(page);
1961         return 0;
1962 }
1963
1964 /*
1965  * block_write_begin takes care of the basic task of block allocation and
1966  * bringing partial write blocks uptodate first.
1967  *
1968  * If *pagep is not NULL, then block_write_begin uses the locked page
1969  * at *pagep rather than allocating its own. In this case, the page will
1970  * not be unlocked or deallocated on failure.
1971  */
1972 int block_write_begin(struct file *file, struct address_space *mapping,
1973                         loff_t pos, unsigned len, unsigned flags,
1974                         struct page **pagep, void **fsdata,
1975                         get_block_t *get_block)
1976 {
1977         struct inode *inode = mapping->host;
1978         int status = 0;
1979         struct page *page;
1980         pgoff_t index;
1981         unsigned start, end;
1982         int ownpage = 0;
1983
1984         index = pos >> PAGE_CACHE_SHIFT;
1985         start = pos & (PAGE_CACHE_SIZE - 1);
1986         end = start + len;
1987
1988         page = *pagep;
1989         if (page == NULL) {
1990                 ownpage = 1;
1991                 page = __grab_cache_page(mapping, index);
1992                 if (!page) {
1993                         status = -ENOMEM;
1994                         goto out;
1995                 }
1996                 *pagep = page;
1997         } else
1998                 BUG_ON(!PageLocked(page));
1999
2000         status = __block_prepare_write(inode, page, start, end, get_block);
2001         if (unlikely(status)) {
2002                 ClearPageUptodate(page);
2003
2004                 if (ownpage) {
2005                         unlock_page(page);
2006                         page_cache_release(page);
2007                         *pagep = NULL;
2008
2009                         /*
2010                          * prepare_write() may have instantiated a few blocks
2011                          * outside i_size.  Trim these off again. Don't need
2012                          * i_size_read because we hold i_mutex.
2013                          */
2014                         if (pos + len > inode->i_size)
2015                                 vmtruncate(inode, inode->i_size);
2016                 }
2017                 goto out;
2018         }
2019
2020 out:
2021         return status;
2022 }
2023 EXPORT_SYMBOL(block_write_begin);
2024
2025 int block_write_end(struct file *file, struct address_space *mapping,
2026                         loff_t pos, unsigned len, unsigned copied,
2027                         struct page *page, void *fsdata)
2028 {
2029         struct inode *inode = mapping->host;
2030         unsigned start;
2031
2032         start = pos & (PAGE_CACHE_SIZE - 1);
2033
2034         if (unlikely(copied < len)) {
2035                 /*
2036                  * The buffers that were written will now be uptodate, so we
2037                  * don't have to worry about a readpage reading them and
2038                  * overwriting a partial write. However if we have encountered
2039                  * a short write and only partially written into a buffer, it
2040                  * will not be marked uptodate, so a readpage might come in and
2041                  * destroy our partial write.
2042                  *
2043                  * Do the simplest thing, and just treat any short write to a
2044                  * non uptodate page as a zero-length write, and force the
2045                  * caller to redo the whole thing.
2046                  */
2047                 if (!PageUptodate(page))
2048                         copied = 0;
2049
2050                 page_zero_new_buffers(page, start+copied, start+len);
2051         }
2052         flush_dcache_page(page);
2053
2054         /* This could be a short (even 0-length) commit */
2055         __block_commit_write(inode, page, start, start+copied);
2056
2057         return copied;
2058 }
2059 EXPORT_SYMBOL(block_write_end);
2060
2061 int generic_write_end(struct file *file, struct address_space *mapping,
2062                         loff_t pos, unsigned len, unsigned copied,
2063                         struct page *page, void *fsdata)
2064 {
2065         struct inode *inode = mapping->host;
2066         int i_size_changed = 0;
2067
2068         copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2069
2070         /*
2071          * No need to use i_size_read() here, the i_size
2072          * cannot change under us because we hold i_mutex.
2073          *
2074          * But it's important to update i_size while still holding page lock:
2075          * page writeout could otherwise come in and zero beyond i_size.
2076          */
2077         if (pos+copied > inode->i_size) {
2078                 i_size_write(inode, pos+copied);
2079                 i_size_changed = 1;
2080         }
2081
2082         unlock_page(page);
2083         page_cache_release(page);
2084
2085         /*
2086          * Don't mark the inode dirty under page lock. First, it unnecessarily
2087          * makes the holding time of page lock longer. Second, it forces lock
2088          * ordering of page lock and transaction start for journaling
2089          * filesystems.
2090          */
2091         if (i_size_changed)
2092                 mark_inode_dirty(inode);
2093
2094         return copied;
2095 }
2096 EXPORT_SYMBOL(generic_write_end);
2097
2098 /*
2099  * Generic "read page" function for block devices that have the normal
2100  * get_block functionality. This is most of the block device filesystems.
2101  * Reads the page asynchronously --- the unlock_buffer() and
2102  * set/clear_buffer_uptodate() functions propagate buffer state into the
2103  * page struct once IO has completed.
2104  */
2105 int block_read_full_page(struct page *page, get_block_t *get_block)
2106 {
2107         struct inode *inode = page->mapping->host;
2108         sector_t iblock, lblock;
2109         struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2110         unsigned int blocksize;
2111         int nr, i;
2112         int fully_mapped = 1;
2113
2114         BUG_ON(!PageLocked(page));
2115         blocksize = 1 << inode->i_blkbits;
2116         if (!page_has_buffers(page))
2117                 create_empty_buffers(page, blocksize, 0);
2118         head = page_buffers(page);
2119
2120         iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2121         lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2122         bh = head;
2123         nr = 0;
2124         i = 0;
2125
2126         do {
2127                 if (buffer_uptodate(bh))
2128                         continue;
2129
2130                 if (!buffer_mapped(bh)) {
2131                         int err = 0;
2132
2133                         fully_mapped = 0;
2134                         if (iblock < lblock) {
2135                                 WARN_ON(bh->b_size != blocksize);
2136                                 err = get_block(inode, iblock, bh, 0);
2137                                 if (err)
2138                                         SetPageError(page);
2139                         }
2140                         if (!buffer_mapped(bh)) {
2141                                 zero_user(page, i * blocksize, blocksize);
2142                                 if (!err)
2143                                         set_buffer_uptodate(bh);
2144                                 continue;
2145                         }
2146                         /*
2147                          * get_block() might have updated the buffer
2148                          * synchronously
2149                          */
2150                         if (buffer_uptodate(bh))
2151                                 continue;
2152                 }
2153                 arr[nr++] = bh;
2154         } while (i++, iblock++, (bh = bh->b_this_page) != head);
2155
2156         if (fully_mapped)
2157                 SetPageMappedToDisk(page);
2158
2159         if (!nr) {
2160                 /*
2161                  * All buffers are uptodate - we can set the page uptodate
2162                  * as well. But not if get_block() returned an error.
2163                  */
2164                 if (!PageError(page))
2165                         SetPageUptodate(page);
2166                 unlock_page(page);
2167                 return 0;
2168         }
2169
2170         /* Stage two: lock the buffers */
2171         for (i = 0; i < nr; i++) {
2172                 bh = arr[i];
2173                 lock_buffer(bh);
2174                 mark_buffer_async_read(bh);
2175         }
2176
2177         /*
2178          * Stage 3: start the IO.  Check for uptodateness
2179          * inside the buffer lock in case another process reading
2180          * the underlying blockdev brought it uptodate (the sct fix).
2181          */
2182         for (i = 0; i < nr; i++) {
2183                 bh = arr[i];
2184                 if (buffer_uptodate(bh))
2185                         end_buffer_async_read(bh, 1);
2186                 else
2187                         submit_bh(READ, bh);
2188         }
2189         return 0;
2190 }
2191
2192 /* utility function for filesystems that need to do work on expanding
2193  * truncates.  Uses filesystem pagecache writes to allow the filesystem to
2194  * deal with the hole.  
2195  */
2196 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2197 {
2198         struct address_space *mapping = inode->i_mapping;
2199         struct page *page;
2200         void *fsdata;
2201         unsigned long limit;
2202         int err;
2203
2204         err = -EFBIG;
2205         limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
2206         if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2207                 send_sig(SIGXFSZ, current, 0);
2208                 goto out;
2209         }
2210         if (size > inode->i_sb->s_maxbytes)
2211                 goto out;
2212
2213         err = pagecache_write_begin(NULL, mapping, size, 0,
2214                                 AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2215                                 &page, &fsdata);
2216         if (err)
2217                 goto out;
2218
2219         err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2220         BUG_ON(err > 0);
2221
2222 out:
2223         return err;
2224 }
2225
2226 static int cont_expand_zero(struct file *file, struct address_space *mapping,
2227                             loff_t pos, loff_t *bytes)
2228 {
2229         struct inode *inode = mapping->host;
2230         unsigned blocksize = 1 << inode->i_blkbits;
2231         struct page *page;
2232         void *fsdata;
2233         pgoff_t index, curidx;
2234         loff_t curpos;
2235         unsigned zerofrom, offset, len;
2236         int err = 0;
2237
2238         index = pos >> PAGE_CACHE_SHIFT;
2239         offset = pos & ~PAGE_CACHE_MASK;
2240
2241         while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2242                 zerofrom = curpos & ~PAGE_CACHE_MASK;
2243                 if (zerofrom & (blocksize-1)) {
2244                         *bytes |= (blocksize-1);
2245                         (*bytes)++;
2246                 }
2247                 len = PAGE_CACHE_SIZE - zerofrom;
2248
2249                 err = pagecache_write_begin(file, mapping, curpos, len,
2250                                                 AOP_FLAG_UNINTERRUPTIBLE,
2251                                                 &page, &fsdata);
2252                 if (err)
2253                         goto out;
2254                 zero_user(page, zerofrom, len);
2255                 err = pagecache_write_end(file, mapping, curpos, len, len,
2256                                                 page, fsdata);
2257                 if (err < 0)
2258                         goto out;
2259                 BUG_ON(err != len);
2260                 err = 0;
2261
2262                 balance_dirty_pages_ratelimited(mapping);
2263         }
2264
2265         /* page covers the boundary, find the boundary offset */
2266         if (index == curidx) {
2267                 zerofrom = curpos & ~PAGE_CACHE_MASK;
2268                 /* if we will expand the thing last block will be filled */
2269                 if (offset <= zerofrom) {
2270                         goto out;
2271                 }
2272                 if (zerofrom & (blocksize-1)) {
2273                         *bytes |= (blocksize-1);
2274                         (*bytes)++;
2275                 }
2276                 len = offset - zerofrom;
2277
2278                 err = pagecache_write_begin(file, mapping, curpos, len,
2279                                                 AOP_FLAG_UNINTERRUPTIBLE,
2280                                                 &page, &fsdata);
2281                 if (err)
2282                         goto out;
2283                 zero_user(page, zerofrom, len);
2284                 err = pagecache_write_end(file, mapping, curpos, len, len,
2285                                                 page, fsdata);
2286                 if (err < 0)
2287                         goto out;
2288                 BUG_ON(err != len);
2289                 err = 0;
2290         }
2291 out:
2292         return err;
2293 }
2294
2295 /*
2296  * For moronic filesystems that do not allow holes in file.
2297  * We may have to extend the file.
2298  */
2299 int cont_write_begin(struct file *file, struct address_space *mapping,
2300                         loff_t pos, unsigned len, unsigned flags,
2301                         struct page **pagep, void **fsdata,
2302                         get_block_t *get_block, loff_t *bytes)
2303 {
2304         struct inode *inode = mapping->host;
2305         unsigned blocksize = 1 << inode->i_blkbits;
2306         unsigned zerofrom;
2307         int err;
2308
2309         err = cont_expand_zero(file, mapping, pos, bytes);
2310         if (err)
2311                 goto out;
2312
2313         zerofrom = *bytes & ~PAGE_CACHE_MASK;
2314         if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2315                 *bytes |= (blocksize-1);
2316                 (*bytes)++;
2317         }
2318
2319         *pagep = NULL;
2320         err = block_write_begin(file, mapping, pos, len,
2321                                 flags, pagep, fsdata, get_block);
2322 out:
2323         return err;
2324 }
2325
2326 int block_prepare_write(struct page *page, unsigned from, unsigned to,
2327                         get_block_t *get_block)
2328 {
2329         struct inode *inode = page->mapping->host;
2330         int err = __block_prepare_write(inode, page, from, to, get_block);
2331         if (err)
2332                 ClearPageUptodate(page);
2333         return err;
2334 }
2335
2336 int block_commit_write(struct page *page, unsigned from, unsigned to)
2337 {
2338         struct inode *inode = page->mapping->host;
2339         __block_commit_write(inode,page,from,to);
2340         return 0;
2341 }
2342
2343 /*
2344  * block_page_mkwrite() is not allowed to change the file size as it gets
2345  * called from a page fault handler when a page is first dirtied. Hence we must
2346  * be careful to check for EOF conditions here. We set the page up correctly
2347  * for a written page which means we get ENOSPC checking when writing into
2348  * holes and correct delalloc and unwritten extent mapping on filesystems that
2349  * support these features.
2350  *
2351  * We are not allowed to take the i_mutex here so we have to play games to
2352  * protect against truncate races as the page could now be beyond EOF.  Because
2353  * vmtruncate() writes the inode size before removing pages, once we have the
2354  * page lock we can determine safely if the page is beyond EOF. If it is not
2355  * beyond EOF, then the page is guaranteed safe against truncation until we
2356  * unlock the page.
2357  */
2358 int
2359 block_page_mkwrite(struct vm_area_struct *vma, struct page *page,
2360                    get_block_t get_block)
2361 {
2362         struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2363         unsigned long end;
2364         loff_t size;
2365         int ret = -EINVAL;
2366
2367         lock_page(page);
2368         size = i_size_read(inode);
2369         if ((page->mapping != inode->i_mapping) ||
2370             (page_offset(page) > size)) {
2371                 /* page got truncated out from underneath us */
2372                 goto out_unlock;
2373         }
2374
2375         /* page is wholly or partially inside EOF */
2376         if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2377                 end = size & ~PAGE_CACHE_MASK;
2378         else
2379                 end = PAGE_CACHE_SIZE;
2380
2381         ret = block_prepare_write(page, 0, end, get_block);
2382         if (!ret)
2383                 ret = block_commit_write(page, 0, end);
2384
2385 out_unlock:
2386         unlock_page(page);
2387         return ret;
2388 }
2389
2390 /*
2391  * nobh_write_begin()'s prereads are special: the buffer_heads are freed
2392  * immediately, while under the page lock.  So it needs a special end_io
2393  * handler which does not touch the bh after unlocking it.
2394  */
2395 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2396 {
2397         __end_buffer_read_notouch(bh, uptodate);
2398 }
2399
2400 /*
2401  * Attach the singly-linked list of buffers created by nobh_write_begin, to
2402  * the page (converting it to circular linked list and taking care of page
2403  * dirty races).
2404  */
2405 static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2406 {
2407         struct buffer_head *bh;
2408
2409         BUG_ON(!PageLocked(page));
2410
2411         spin_lock(&page->mapping->private_lock);
2412         bh = head;
2413         do {
2414                 if (PageDirty(page))
2415                         set_buffer_dirty(bh);
2416                 if (!bh->b_this_page)
2417                         bh->b_this_page = head;
2418                 bh = bh->b_this_page;
2419         } while (bh != head);
2420         attach_page_buffers(page, head);
2421         spin_unlock(&page->mapping->private_lock);
2422 }
2423
2424 /*
2425  * On entry, the page is fully not uptodate.
2426  * On exit the page is fully uptodate in the areas outside (from,to)
2427  */
2428 int nobh_write_begin(struct file *file, struct address_space *mapping,
2429                         loff_t pos, unsigned len, unsigned flags,
2430                         struct page **pagep, void **fsdata,
2431                         get_block_t *get_block)
2432 {
2433         struct inode *inode = mapping->host;
2434         const unsigned blkbits = inode->i_blkbits;
2435         const unsigned blocksize = 1 << blkbits;
2436         struct buffer_head *head, *bh;
2437         struct page *page;
2438         pgoff_t index;
2439         unsigned from, to;
2440         unsigned block_in_page;
2441         unsigned block_start, block_end;
2442         sector_t block_in_file;
2443         int nr_reads = 0;
2444         int ret = 0;
2445         int is_mapped_to_disk = 1;
2446
2447         index = pos >> PAGE_CACHE_SHIFT;
2448         from = pos & (PAGE_CACHE_SIZE - 1);
2449         to = from + len;
2450
2451         page = __grab_cache_page(mapping, index);
2452         if (!page)
2453                 return -ENOMEM;
2454         *pagep = page;
2455         *fsdata = NULL;
2456
2457         if (page_has_buffers(page)) {
2458                 unlock_page(page);
2459                 page_cache_release(page);
2460                 *pagep = NULL;
2461                 return block_write_begin(file, mapping, pos, len, flags, pagep,
2462                                         fsdata, get_block);
2463         }
2464
2465         if (PageMappedToDisk(page))
2466                 return 0;
2467
2468         /*
2469          * Allocate buffers so that we can keep track of state, and potentially
2470          * attach them to the page if an error occurs. In the common case of
2471          * no error, they will just be freed again without ever being attached
2472          * to the page (which is all OK, because we're under the page lock).
2473          *
2474          * Be careful: the buffer linked list is a NULL terminated one, rather
2475          * than the circular one we're used to.
2476          */
2477         head = alloc_page_buffers(page, blocksize, 0);
2478         if (!head) {
2479                 ret = -ENOMEM;
2480                 goto out_release;
2481         }
2482
2483         block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2484
2485         /*
2486          * We loop across all blocks in the page, whether or not they are
2487          * part of the affected region.  This is so we can discover if the
2488          * page is fully mapped-to-disk.
2489          */
2490         for (block_start = 0, block_in_page = 0, bh = head;
2491                   block_start < PAGE_CACHE_SIZE;
2492                   block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
2493                 int create;
2494
2495                 block_end = block_start + blocksize;
2496                 bh->b_state = 0;
2497                 create = 1;
2498                 if (block_start >= to)
2499                         create = 0;
2500                 ret = get_block(inode, block_in_file + block_in_page,
2501                                         bh, create);
2502                 if (ret)
2503                         goto failed;
2504                 if (!buffer_mapped(bh))
2505                         is_mapped_to_disk = 0;
2506                 if (buffer_new(bh))
2507                         unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2508                 if (PageUptodate(page)) {
2509                         set_buffer_uptodate(bh);
2510                         continue;
2511                 }
2512                 if (buffer_new(bh) || !buffer_mapped(bh)) {
2513                         zero_user_segments(page, block_start, from,
2514                                                         to, block_end);
2515                         continue;
2516                 }
2517                 if (buffer_uptodate(bh))
2518                         continue;       /* reiserfs does this */
2519                 if (block_start < from || block_end > to) {
2520                         lock_buffer(bh);
2521                         bh->b_end_io = end_buffer_read_nobh;
2522                         submit_bh(READ, bh);
2523                         nr_reads++;
2524                 }
2525         }
2526
2527         if (nr_reads) {
2528                 /*
2529                  * The page is locked, so these buffers are protected from
2530                  * any VM or truncate activity.  Hence we don't need to care
2531                  * for the buffer_head refcounts.
2532                  */
2533                 for (bh = head; bh; bh = bh->b_this_page) {
2534                         wait_on_buffer(bh);
2535                         if (!buffer_uptodate(bh))
2536                                 ret = -EIO;
2537                 }
2538                 if (ret)
2539                         goto failed;
2540         }
2541
2542         if (is_mapped_to_disk)
2543                 SetPageMappedToDisk(page);
2544
2545         *fsdata = head; /* to be released by nobh_write_end */
2546
2547         return 0;
2548
2549 failed:
2550         BUG_ON(!ret);
2551         /*
2552          * Error recovery is a bit difficult. We need to zero out blocks that
2553          * were newly allocated, and dirty them to ensure they get written out.
2554          * Buffers need to be attached to the page at this point, otherwise
2555          * the handling of potential IO errors during writeout would be hard
2556          * (could try doing synchronous writeout, but what if that fails too?)
2557          */
2558         attach_nobh_buffers(page, head);
2559         page_zero_new_buffers(page, from, to);
2560
2561 out_release:
2562         unlock_page(page);
2563         page_cache_release(page);
2564         *pagep = NULL;
2565
2566         if (pos + len > inode->i_size)
2567                 vmtruncate(inode, inode->i_size);
2568
2569         return ret;
2570 }
2571 EXPORT_SYMBOL(nobh_write_begin);
2572
2573 int nobh_write_end(struct file *file, struct address_space *mapping,
2574                         loff_t pos, unsigned len, unsigned copied,
2575                         struct page *page, void *fsdata)
2576 {
2577         struct inode *inode = page->mapping->host;
2578         struct buffer_head *head = fsdata;
2579         struct buffer_head *bh;
2580         BUG_ON(fsdata != NULL && page_has_buffers(page));
2581
2582         if (unlikely(copied < len) && !page_has_buffers(page))
2583                 attach_nobh_buffers(page, head);
2584         if (page_has_buffers(page))
2585                 return generic_write_end(file, mapping, pos, len,
2586                                         copied, page, fsdata);
2587
2588         SetPageUptodate(page);
2589         set_page_dirty(page);
2590         if (pos+copied > inode->i_size) {
2591                 i_size_write(inode, pos+copied);
2592                 mark_inode_dirty(inode);
2593         }
2594
2595         unlock_page(page);
2596         page_cache_release(page);
2597
2598         while (head) {
2599                 bh = head;
2600                 head = head->b_this_page;
2601                 free_buffer_head(bh);
2602         }
2603
2604         return copied;
2605 }
2606 EXPORT_SYMBOL(nobh_write_end);
2607
2608 /*
2609  * nobh_writepage() - based on block_full_write_page() except
2610  * that it tries to operate without attaching bufferheads to
2611  * the page.
2612  */
2613 int nobh_writepage(struct page *page, get_block_t *get_block,
2614                         struct writeback_control *wbc)
2615 {
2616         struct inode * const inode = page->mapping->host;
2617         loff_t i_size = i_size_read(inode);
2618         const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2619         unsigned offset;
2620         int ret;
2621
2622         /* Is the page fully inside i_size? */
2623         if (page->index < end_index)
2624                 goto out;
2625
2626         /* Is the page fully outside i_size? (truncate in progress) */
2627         offset = i_size & (PAGE_CACHE_SIZE-1);
2628         if (page->index >= end_index+1 || !offset) {
2629                 /*
2630                  * The page may have dirty, unmapped buffers.  For example,
2631                  * they may have been added in ext3_writepage().  Make them
2632                  * freeable here, so the page does not leak.
2633                  */
2634 #if 0
2635                 /* Not really sure about this  - do we need this ? */
2636                 if (page->mapping->a_ops->invalidatepage)
2637                         page->mapping->a_ops->invalidatepage(page, offset);
2638 #endif
2639                 unlock_page(page);
2640                 return 0; /* don't care */
2641         }
2642
2643         /*
2644          * The page straddles i_size.  It must be zeroed out on each and every
2645          * writepage invocation because it may be mmapped.  "A file is mapped
2646          * in multiples of the page size.  For a file that is not a multiple of
2647          * the  page size, the remaining memory is zeroed when mapped, and
2648          * writes to that region are not written out to the file."
2649          */
2650         zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2651 out:
2652         ret = mpage_writepage(page, get_block, wbc);
2653         if (ret == -EAGAIN)
2654                 ret = __block_write_full_page(inode, page, get_block, wbc);
2655         return ret;
2656 }
2657 EXPORT_SYMBOL(nobh_writepage);
2658
2659 int nobh_truncate_page(struct address_space *mapping,
2660                         loff_t from, get_block_t *get_block)
2661 {
2662         pgoff_t index = from >> PAGE_CACHE_SHIFT;
2663         unsigned offset = from & (PAGE_CACHE_SIZE-1);
2664         unsigned blocksize;
2665         sector_t iblock;
2666         unsigned length, pos;
2667         struct inode *inode = mapping->host;
2668         struct page *page;
2669         struct buffer_head map_bh;
2670         int err;
2671
2672         blocksize = 1 << inode->i_blkbits;
2673         length = offset & (blocksize - 1);
2674
2675         /* Block boundary? Nothing to do */
2676         if (!length)
2677                 return 0;
2678
2679         length = blocksize - length;
2680         iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2681
2682         page = grab_cache_page(mapping, index);
2683         err = -ENOMEM;
2684         if (!page)
2685                 goto out;
2686
2687         if (page_has_buffers(page)) {
2688 has_buffers:
2689                 unlock_page(page);
2690                 page_cache_release(page);
2691                 return block_truncate_page(mapping, from, get_block);
2692         }
2693
2694         /* Find the buffer that contains "offset" */
2695         pos = blocksize;
2696         while (offset >= pos) {
2697                 iblock++;
2698                 pos += blocksize;
2699         }
2700
2701         err = get_block(inode, iblock, &map_bh, 0);
2702         if (err)
2703                 goto unlock;
2704         /* unmapped? It's a hole - nothing to do */
2705         if (!buffer_mapped(&map_bh))
2706                 goto unlock;
2707
2708         /* Ok, it's mapped. Make sure it's up-to-date */
2709         if (!PageUptodate(page)) {
2710                 err = mapping->a_ops->readpage(NULL, page);
2711                 if (err) {
2712                         page_cache_release(page);
2713                         goto out;
2714                 }
2715                 lock_page(page);
2716                 if (!PageUptodate(page)) {
2717                         err = -EIO;
2718                         goto unlock;
2719                 }
2720                 if (page_has_buffers(page))
2721                         goto has_buffers;
2722         }
2723         zero_user(page, offset, length);
2724         set_page_dirty(page);
2725         err = 0;
2726
2727 unlock:
2728         unlock_page(page);
2729         page_cache_release(page);
2730 out:
2731         return err;
2732 }
2733 EXPORT_SYMBOL(nobh_truncate_page);
2734
2735 int block_truncate_page(struct address_space *mapping,
2736                         loff_t from, get_block_t *get_block)
2737 {
2738         pgoff_t index = from >> PAGE_CACHE_SHIFT;
2739         unsigned offset = from & (PAGE_CACHE_SIZE-1);
2740         unsigned blocksize;
2741         sector_t iblock;
2742         unsigned length, pos;
2743         struct inode *inode = mapping->host;
2744         struct page *page;
2745         struct buffer_head *bh;
2746         int err;
2747
2748         blocksize = 1 << inode->i_blkbits;
2749         length = offset & (blocksize - 1);
2750
2751         /* Block boundary? Nothing to do */
2752         if (!length)
2753                 return 0;
2754
2755         length = blocksize - length;
2756         iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2757         
2758         page = grab_cache_page(mapping, index);
2759         err = -ENOMEM;
2760         if (!page)
2761                 goto out;
2762
2763         if (!page_has_buffers(page))
2764                 create_empty_buffers(page, blocksize, 0);
2765
2766         /* Find the buffer that contains "offset" */
2767         bh = page_buffers(page);
2768         pos = blocksize;
2769         while (offset >= pos) {
2770                 bh = bh->b_this_page;
2771                 iblock++;
2772                 pos += blocksize;
2773         }
2774
2775         err = 0;
2776         if (!buffer_mapped(bh)) {
2777                 WARN_ON(bh->b_size != blocksize);
2778                 err = get_block(inode, iblock, bh, 0);
2779                 if (err)
2780                         goto unlock;
2781                 /* unmapped? It's a hole - nothing to do */
2782                 if (!buffer_mapped(bh))
2783                         goto unlock;
2784         }
2785
2786         /* Ok, it's mapped. Make sure it's up-to-date */
2787         if (PageUptodate(page))
2788                 set_buffer_uptodate(bh);
2789
2790         if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2791                 err = -EIO;
2792                 ll_rw_block(READ, 1, &bh);
2793                 wait_on_buffer(bh);
2794                 /* Uhhuh. Read error. Complain and punt. */
2795                 if (!buffer_uptodate(bh))
2796                         goto unlock;
2797         }
2798
2799         zero_user(page, offset, length);
2800         mark_buffer_dirty(bh);
2801         err = 0;
2802
2803 unlock:
2804         unlock_page(page);
2805         page_cache_release(page);
2806 out:
2807         return err;
2808 }
2809
2810 /*
2811  * The generic ->writepage function for buffer-backed address_spaces
2812  */
2813 int block_write_full_page(struct page *page, get_block_t *get_block,
2814                         struct writeback_control *wbc)
2815 {
2816         struct inode * const inode = page->mapping->host;
2817         loff_t i_size = i_size_read(inode);
2818         const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2819         unsigned offset;
2820
2821         /* Is the page fully inside i_size? */
2822         if (page->index < end_index)
2823                 return __block_write_full_page(inode, page, get_block, wbc);
2824
2825         /* Is the page fully outside i_size? (truncate in progress) */
2826         offset = i_size & (PAGE_CACHE_SIZE-1);
2827         if (page->index >= end_index+1 || !offset) {
2828                 /*
2829                  * The page may have dirty, unmapped buffers.  For example,
2830                  * they may have been added in ext3_writepage().  Make them
2831                  * freeable here, so the page does not leak.
2832                  */
2833                 do_invalidatepage(page, 0);
2834                 unlock_page(page);
2835                 return 0; /* don't care */
2836         }
2837
2838         /*
2839          * The page straddles i_size.  It must be zeroed out on each and every
2840          * writepage invokation because it may be mmapped.  "A file is mapped
2841          * in multiples of the page size.  For a file that is not a multiple of
2842          * the  page size, the remaining memory is zeroed when mapped, and
2843          * writes to that region are not written out to the file."
2844          */
2845         zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2846         return __block_write_full_page(inode, page, get_block, wbc);
2847 }
2848
2849 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2850                             get_block_t *get_block)
2851 {
2852         struct buffer_head tmp;
2853         struct inode *inode = mapping->host;
2854         tmp.b_state = 0;
2855         tmp.b_blocknr = 0;
2856         tmp.b_size = 1 << inode->i_blkbits;
2857         get_block(inode, block, &tmp, 0);
2858         return tmp.b_blocknr;
2859 }
2860
2861 static void end_bio_bh_io_sync(struct bio *bio, int err)
2862 {
2863         struct buffer_head *bh = bio->bi_private;
2864
2865         if (err == -EOPNOTSUPP) {
2866                 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2867                 set_bit(BH_Eopnotsupp, &bh->b_state);
2868         }
2869
2870         bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2871         bio_put(bio);
2872 }
2873
2874 int submit_bh(int rw, struct buffer_head * bh)
2875 {
2876         struct bio *bio;
2877         int ret = 0;
2878
2879         BUG_ON(!buffer_locked(bh));
2880         BUG_ON(!buffer_mapped(bh));
2881         BUG_ON(!bh->b_end_io);
2882
2883         if (buffer_ordered(bh) && (rw == WRITE))
2884                 rw = WRITE_BARRIER;
2885
2886         /*
2887          * Only clear out a write error when rewriting, should this
2888          * include WRITE_SYNC as well?
2889          */
2890         if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
2891                 clear_buffer_write_io_error(bh);
2892
2893         /*
2894          * from here on down, it's all bio -- do the initial mapping,
2895          * submit_bio -> generic_make_request may further map this bio around
2896          */
2897         bio = bio_alloc(GFP_NOIO, 1);
2898
2899         bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2900         bio->bi_bdev = bh->b_bdev;
2901         bio->bi_io_vec[0].bv_page = bh->b_page;
2902         bio->bi_io_vec[0].bv_len = bh->b_size;
2903         bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2904
2905         bio->bi_vcnt = 1;
2906         bio->bi_idx = 0;
2907         bio->bi_size = bh->b_size;
2908
2909         bio->bi_end_io = end_bio_bh_io_sync;
2910         bio->bi_private = bh;
2911
2912         bio_get(bio);
2913         submit_bio(rw, bio);
2914
2915         if (bio_flagged(bio, BIO_EOPNOTSUPP))
2916                 ret = -EOPNOTSUPP;
2917
2918         bio_put(bio);
2919         return ret;
2920 }
2921
2922 /**
2923  * ll_rw_block: low-level access to block devices (DEPRECATED)
2924  * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
2925  * @nr: number of &struct buffer_heads in the array
2926  * @bhs: array of pointers to &struct buffer_head
2927  *
2928  * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2929  * requests an I/O operation on them, either a %READ or a %WRITE.  The third
2930  * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2931  * are sent to disk. The fourth %READA option is described in the documentation
2932  * for generic_make_request() which ll_rw_block() calls.
2933  *
2934  * This function drops any buffer that it cannot get a lock on (with the
2935  * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2936  * clean when doing a write request, and any buffer that appears to be
2937  * up-to-date when doing read request.  Further it marks as clean buffers that
2938  * are processed for writing (the buffer cache won't assume that they are
2939  * actually clean until the buffer gets unlocked).
2940  *
2941  * ll_rw_block sets b_end_io to simple completion handler that marks
2942  * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2943  * any waiters. 
2944  *
2945  * All of the buffers must be for the same device, and must also be a
2946  * multiple of the current approved size for the device.
2947  */
2948 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2949 {
2950         int i;
2951
2952         for (i = 0; i < nr; i++) {
2953                 struct buffer_head *bh = bhs[i];
2954
2955                 if (rw == SWRITE || rw == SWRITE_SYNC)
2956                         lock_buffer(bh);
2957                 else if (test_set_buffer_locked(bh))
2958                         continue;
2959
2960                 if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC) {
2961                         if (test_clear_buffer_dirty(bh)) {
2962                                 bh->b_end_io = end_buffer_write_sync;
2963                                 get_bh(bh);
2964                                 if (rw == SWRITE_SYNC)
2965                                         submit_bh(WRITE_SYNC, bh);
2966                                 else
2967                                         submit_bh(WRITE, bh);
2968                                 continue;
2969                         }
2970                 } else {
2971                         if (!buffer_uptodate(bh)) {
2972                                 bh->b_end_io = end_buffer_read_sync;
2973                                 get_bh(bh);
2974                                 submit_bh(rw, bh);
2975                                 continue;
2976                         }
2977                 }
2978                 unlock_buffer(bh);
2979         }
2980 }
2981
2982 /*
2983  * For a data-integrity writeout, we need to wait upon any in-progress I/O
2984  * and then start new I/O and then wait upon it.  The caller must have a ref on
2985  * the buffer_head.
2986  */
2987 int sync_dirty_buffer(struct buffer_head *bh)
2988 {
2989         int ret = 0;
2990
2991         WARN_ON(atomic_read(&bh->b_count) < 1);
2992         lock_buffer(bh);
2993         if (test_clear_buffer_dirty(bh)) {
2994                 get_bh(bh);
2995                 bh->b_end_io = end_buffer_write_sync;
2996                 ret = submit_bh(WRITE_SYNC, bh);
2997                 wait_on_buffer(bh);
2998                 if (buffer_eopnotsupp(bh)) {
2999                         clear_buffer_eopnotsupp(bh);
3000                         ret = -EOPNOTSUPP;
3001                 }
3002                 if (!ret && !buffer_uptodate(bh))
3003                         ret = -EIO;
3004         } else {
3005                 unlock_buffer(bh);
3006         }
3007         return ret;
3008 }
3009
3010 /*
3011  * try_to_free_buffers() checks if all the buffers on this particular page
3012  * are unused, and releases them if so.
3013  *
3014  * Exclusion against try_to_free_buffers may be obtained by either
3015  * locking the page or by holding its mapping's private_lock.
3016  *
3017  * If the page is dirty but all the buffers are clean then we need to
3018  * be sure to mark the page clean as well.  This is because the page
3019  * may be against a block device, and a later reattachment of buffers
3020  * to a dirty page will set *all* buffers dirty.  Which would corrupt
3021  * filesystem data on the same device.
3022  *
3023  * The same applies to regular filesystem pages: if all the buffers are
3024  * clean then we set the page clean and proceed.  To do that, we require
3025  * total exclusion from __set_page_dirty_buffers().  That is obtained with
3026  * private_lock.
3027  *
3028  * try_to_free_buffers() is non-blocking.
3029  */
3030 static inline int buffer_busy(struct buffer_head *bh)
3031 {
3032         return atomic_read(&bh->b_count) |
3033                 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3034 }
3035
3036 static int
3037 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3038 {
3039         struct buffer_head *head = page_buffers(page);
3040         struct buffer_head *bh;
3041
3042         bh = head;
3043         do {
3044                 if (buffer_write_io_error(bh) && page->mapping)
3045                         set_bit(AS_EIO, &page->mapping->flags);
3046                 if (buffer_busy(bh))
3047                         goto failed;
3048                 bh = bh->b_this_page;
3049         } while (bh != head);
3050
3051         do {
3052                 struct buffer_head *next = bh->b_this_page;
3053
3054                 if (bh->b_assoc_map)
3055                         __remove_assoc_queue(bh);
3056                 bh = next;
3057         } while (bh != head);
3058         *buffers_to_free = head;
3059         __clear_page_buffers(page);
3060         return 1;
3061 failed:
3062         return 0;
3063 }
3064
3065 int try_to_free_buffers(struct page *page)
3066 {
3067         struct address_space * const mapping = page->mapping;
3068         struct buffer_head *buffers_to_free = NULL;
3069         int ret = 0;
3070
3071         BUG_ON(!PageLocked(page));
3072         if (PageWriteback(page))
3073                 return 0;
3074
3075         if (mapping == NULL) {          /* can this still happen? */
3076                 ret = drop_buffers(page, &buffers_to_free);
3077                 goto out;
3078         }
3079
3080         spin_lock(&mapping->private_lock);
3081         ret = drop_buffers(page, &buffers_to_free);
3082
3083         /*
3084          * If the filesystem writes its buffers by hand (eg ext3)
3085          * then we can have clean buffers against a dirty page.  We
3086          * clean the page here; otherwise the VM will never notice
3087          * that the filesystem did any IO at all.
3088          *
3089          * Also, during truncate, discard_buffer will have marked all
3090          * the page's buffers clean.  We discover that here and clean
3091          * the page also.
3092          *
3093          * private_lock must be held over this entire operation in order
3094          * to synchronise against __set_page_dirty_buffers and prevent the
3095          * dirty bit from being lost.
3096          */
3097         if (ret)
3098                 cancel_dirty_page(page, PAGE_CACHE_SIZE);
3099         spin_unlock(&mapping->private_lock);
3100 out:
3101         if (buffers_to_free) {
3102                 struct buffer_head *bh = buffers_to_free;
3103
3104                 do {
3105                         struct buffer_head *next = bh->b_this_page;
3106                         free_buffer_head(bh);
3107                         bh = next;
3108                 } while (bh != buffers_to_free);
3109         }
3110         return ret;
3111 }
3112 EXPORT_SYMBOL(try_to_free_buffers);
3113
3114 void block_sync_page(struct page *page)
3115 {
3116         struct address_space *mapping;
3117
3118         smp_mb();
3119         mapping = page_mapping(page);
3120         if (mapping)
3121                 blk_run_backing_dev(mapping->backing_dev_info, page);
3122 }
3123
3124 /*
3125  * There are no bdflush tunables left.  But distributions are
3126  * still running obsolete flush daemons, so we terminate them here.
3127  *
3128  * Use of bdflush() is deprecated and will be removed in a future kernel.
3129  * The `pdflush' kernel threads fully replace bdflush daemons and this call.
3130  */
3131 asmlinkage long sys_bdflush(int func, long data)
3132 {
3133         static int msg_count;
3134
3135         if (!capable(CAP_SYS_ADMIN))
3136                 return -EPERM;
3137
3138         if (msg_count < 5) {
3139                 msg_count++;
3140                 printk(KERN_INFO
3141                         "warning: process `%s' used the obsolete bdflush"
3142                         " system call\n", current->comm);
3143                 printk(KERN_INFO "Fix your initscripts?\n");
3144         }
3145
3146         if (func == 1)
3147                 do_exit(0);
3148         return 0;
3149 }
3150
3151 /*
3152  * Buffer-head allocation
3153  */
3154 static struct kmem_cache *bh_cachep;
3155
3156 /*
3157  * Once the number of bh's in the machine exceeds this level, we start
3158  * stripping them in writeback.
3159  */
3160 static int max_buffer_heads;
3161
3162 int buffer_heads_over_limit;
3163
3164 struct bh_accounting {
3165         int nr;                 /* Number of live bh's */
3166         int ratelimit;          /* Limit cacheline bouncing */
3167 };
3168
3169 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3170
3171 static void recalc_bh_state(void)
3172 {
3173         int i;
3174         int tot = 0;
3175
3176         if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3177                 return;
3178         __get_cpu_var(bh_accounting).ratelimit = 0;
3179         for_each_online_cpu(i)
3180                 tot += per_cpu(bh_accounting, i).nr;
3181         buffer_heads_over_limit = (tot > max_buffer_heads);
3182 }
3183         
3184 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3185 {
3186         struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
3187         if (ret) {
3188                 INIT_LIST_HEAD(&ret->b_assoc_buffers);
3189                 get_cpu_var(bh_accounting).nr++;
3190                 recalc_bh_state();
3191                 put_cpu_var(bh_accounting);
3192         }
3193         return ret;
3194 }
3195 EXPORT_SYMBOL(alloc_buffer_head);
3196
3197 void free_buffer_head(struct buffer_head *bh)
3198 {
3199         BUG_ON(!list_empty(&bh->b_assoc_buffers));
3200         kmem_cache_free(bh_cachep, bh);
3201         get_cpu_var(bh_accounting).nr--;
3202         recalc_bh_state();
3203         put_cpu_var(bh_accounting);
3204 }
3205 EXPORT_SYMBOL(free_buffer_head);
3206
3207 static void buffer_exit_cpu(int cpu)
3208 {
3209         int i;
3210         struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3211
3212         for (i = 0; i < BH_LRU_SIZE; i++) {
3213                 brelse(b->bhs[i]);
3214                 b->bhs[i] = NULL;
3215         }
3216         get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
3217         per_cpu(bh_accounting, cpu).nr = 0;
3218         put_cpu_var(bh_accounting);
3219 }
3220
3221 static int buffer_cpu_notify(struct notifier_block *self,
3222                               unsigned long action, void *hcpu)
3223 {
3224         if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
3225                 buffer_exit_cpu((unsigned long)hcpu);
3226         return NOTIFY_OK;
3227 }
3228
3229 /**
3230  * bh_uptodate_or_lock - Test whether the buffer is uptodate
3231  * @bh: struct buffer_head
3232  *
3233  * Return true if the buffer is up-to-date and false,
3234  * with the buffer locked, if not.
3235  */
3236 int bh_uptodate_or_lock(struct buffer_head *bh)
3237 {
3238         if (!buffer_uptodate(bh)) {
3239                 lock_buffer(bh);
3240                 if (!buffer_uptodate(bh))
3241                         return 0;
3242                 unlock_buffer(bh);
3243         }
3244         return 1;
3245 }
3246 EXPORT_SYMBOL(bh_uptodate_or_lock);
3247
3248 /**
3249  * bh_submit_read - Submit a locked buffer for reading
3250  * @bh: struct buffer_head
3251  *
3252  * Returns zero on success and -EIO on error.
3253  */
3254 int bh_submit_read(struct buffer_head *bh)
3255 {
3256         BUG_ON(!buffer_locked(bh));
3257
3258         if (buffer_uptodate(bh)) {
3259                 unlock_buffer(bh);
3260                 return 0;
3261         }
3262
3263         get_bh(bh);
3264         bh->b_end_io = end_buffer_read_sync;
3265         submit_bh(READ, bh);
3266         wait_on_buffer(bh);
3267         if (buffer_uptodate(bh))
3268                 return 0;
3269         return -EIO;
3270 }
3271 EXPORT_SYMBOL(bh_submit_read);
3272
3273 static void
3274 init_buffer_head(void *data)
3275 {
3276         struct buffer_head *bh = data;
3277
3278         memset(bh, 0, sizeof(*bh));
3279         INIT_LIST_HEAD(&bh->b_assoc_buffers);
3280 }
3281
3282 void __init buffer_init(void)
3283 {
3284         int nrpages;
3285
3286         bh_cachep = kmem_cache_create("buffer_head",
3287                         sizeof(struct buffer_head), 0,
3288                                 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3289                                 SLAB_MEM_SPREAD),
3290                                 init_buffer_head);
3291
3292         /*
3293          * Limit the bh occupancy to 10% of ZONE_NORMAL
3294          */
3295         nrpages = (nr_free_buffer_pages() * 10) / 100;
3296         max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3297         hotcpu_notifier(buffer_cpu_notify, 0);
3298 }
3299
3300 EXPORT_SYMBOL(__bforget);
3301 EXPORT_SYMBOL(__brelse);
3302 EXPORT_SYMBOL(__wait_on_buffer);
3303 EXPORT_SYMBOL(block_commit_write);
3304 EXPORT_SYMBOL(block_prepare_write);
3305 EXPORT_SYMBOL(block_page_mkwrite);
3306 EXPORT_SYMBOL(block_read_full_page);
3307 EXPORT_SYMBOL(block_sync_page);
3308 EXPORT_SYMBOL(block_truncate_page);
3309 EXPORT_SYMBOL(block_write_full_page);
3310 EXPORT_SYMBOL(cont_write_begin);
3311 EXPORT_SYMBOL(end_buffer_read_sync);
3312 EXPORT_SYMBOL(end_buffer_write_sync);
3313 EXPORT_SYMBOL(file_fsync);
3314 EXPORT_SYMBOL(fsync_bdev);
3315 EXPORT_SYMBOL(generic_block_bmap);
3316 EXPORT_SYMBOL(generic_cont_expand_simple);
3317 EXPORT_SYMBOL(init_buffer);
3318 EXPORT_SYMBOL(invalidate_bdev);
3319 EXPORT_SYMBOL(ll_rw_block);
3320 EXPORT_SYMBOL(mark_buffer_dirty);
3321 EXPORT_SYMBOL(submit_bh);
3322 EXPORT_SYMBOL(sync_dirty_buffer);
3323 EXPORT_SYMBOL(unlock_buffer);