4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
21 #include <linux/kernel.h>
22 #include <linux/syscalls.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
27 #include <linux/capability.h>
28 #include <linux/blkdev.h>
29 #include <linux/file.h>
30 #include <linux/quotaops.h>
31 #include <linux/highmem.h>
32 #include <linux/module.h>
33 #include <linux/writeback.h>
34 #include <linux/hash.h>
35 #include <linux/suspend.h>
36 #include <linux/buffer_head.h>
37 #include <linux/task_io_accounting_ops.h>
38 #include <linux/bio.h>
39 #include <linux/notifier.h>
40 #include <linux/cpu.h>
41 #include <linux/bitops.h>
42 #include <linux/mpage.h>
43 #include <linux/bit_spinlock.h>
45 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
47 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
50 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
52 bh->b_end_io = handler;
53 bh->b_private = private;
56 static int sync_buffer(void *word)
58 struct block_device *bd;
59 struct buffer_head *bh
60 = container_of(word, struct buffer_head, b_state);
65 blk_run_address_space(bd->bd_inode->i_mapping);
70 void __lock_buffer(struct buffer_head *bh)
72 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
73 TASK_UNINTERRUPTIBLE);
75 EXPORT_SYMBOL(__lock_buffer);
77 void unlock_buffer(struct buffer_head *bh)
79 smp_mb__before_clear_bit();
80 clear_buffer_locked(bh);
81 smp_mb__after_clear_bit();
82 wake_up_bit(&bh->b_state, BH_Lock);
86 * Block until a buffer comes unlocked. This doesn't stop it
87 * from becoming locked again - you have to lock it yourself
88 * if you want to preserve its state.
90 void __wait_on_buffer(struct buffer_head * bh)
92 wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
96 __clear_page_buffers(struct page *page)
98 ClearPagePrivate(page);
99 set_page_private(page, 0);
100 page_cache_release(page);
103 static void buffer_io_error(struct buffer_head *bh)
105 char b[BDEVNAME_SIZE];
107 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
108 bdevname(bh->b_bdev, b),
109 (unsigned long long)bh->b_blocknr);
113 * End-of-IO handler helper function which does not touch the bh after
115 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
116 * a race there is benign: unlock_buffer() only use the bh's address for
117 * hashing after unlocking the buffer, so it doesn't actually touch the bh
120 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
123 set_buffer_uptodate(bh);
125 /* This happens, due to failed READA attempts. */
126 clear_buffer_uptodate(bh);
132 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
133 * unlock the buffer. This is what ll_rw_block uses too.
135 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
137 __end_buffer_read_notouch(bh, uptodate);
141 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
143 char b[BDEVNAME_SIZE];
146 set_buffer_uptodate(bh);
148 if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
150 printk(KERN_WARNING "lost page write due to "
152 bdevname(bh->b_bdev, b));
154 set_buffer_write_io_error(bh);
155 clear_buffer_uptodate(bh);
162 * Write out and wait upon all the dirty data associated with a block
163 * device via its mapping. Does not take the superblock lock.
165 int sync_blockdev(struct block_device *bdev)
170 ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
173 EXPORT_SYMBOL(sync_blockdev);
176 * Write out and wait upon all dirty data associated with this
177 * device. Filesystem data as well as the underlying block
178 * device. Takes the superblock lock.
180 int fsync_bdev(struct block_device *bdev)
182 struct super_block *sb = get_super(bdev);
184 int res = fsync_super(sb);
188 return sync_blockdev(bdev);
192 * freeze_bdev -- lock a filesystem and force it into a consistent state
193 * @bdev: blockdevice to lock
195 * This takes the block device bd_mount_sem to make sure no new mounts
196 * happen on bdev until thaw_bdev() is called.
197 * If a superblock is found on this device, we take the s_umount semaphore
198 * on it to make sure nobody unmounts until the snapshot creation is done.
200 struct super_block *freeze_bdev(struct block_device *bdev)
202 struct super_block *sb;
204 down(&bdev->bd_mount_sem);
205 sb = get_super(bdev);
206 if (sb && !(sb->s_flags & MS_RDONLY)) {
207 sb->s_frozen = SB_FREEZE_WRITE;
212 sb->s_frozen = SB_FREEZE_TRANS;
215 sync_blockdev(sb->s_bdev);
217 if (sb->s_op->write_super_lockfs)
218 sb->s_op->write_super_lockfs(sb);
222 return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */
224 EXPORT_SYMBOL(freeze_bdev);
227 * thaw_bdev -- unlock filesystem
228 * @bdev: blockdevice to unlock
229 * @sb: associated superblock
231 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
233 void thaw_bdev(struct block_device *bdev, struct super_block *sb)
236 BUG_ON(sb->s_bdev != bdev);
238 if (sb->s_op->unlockfs)
239 sb->s_op->unlockfs(sb);
240 sb->s_frozen = SB_UNFROZEN;
242 wake_up(&sb->s_wait_unfrozen);
246 up(&bdev->bd_mount_sem);
248 EXPORT_SYMBOL(thaw_bdev);
251 * Various filesystems appear to want __find_get_block to be non-blocking.
252 * But it's the page lock which protects the buffers. To get around this,
253 * we get exclusion from try_to_free_buffers with the blockdev mapping's
256 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
257 * may be quite high. This code could TryLock the page, and if that
258 * succeeds, there is no need to take private_lock. (But if
259 * private_lock is contended then so is mapping->tree_lock).
261 static struct buffer_head *
262 __find_get_block_slow(struct block_device *bdev, sector_t block)
264 struct inode *bd_inode = bdev->bd_inode;
265 struct address_space *bd_mapping = bd_inode->i_mapping;
266 struct buffer_head *ret = NULL;
268 struct buffer_head *bh;
269 struct buffer_head *head;
273 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
274 page = find_get_page(bd_mapping, index);
278 spin_lock(&bd_mapping->private_lock);
279 if (!page_has_buffers(page))
281 head = page_buffers(page);
284 if (bh->b_blocknr == block) {
289 if (!buffer_mapped(bh))
291 bh = bh->b_this_page;
292 } while (bh != head);
294 /* we might be here because some of the buffers on this page are
295 * not mapped. This is due to various races between
296 * file io on the block device and getblk. It gets dealt with
297 * elsewhere, don't buffer_error if we had some unmapped buffers
300 printk("__find_get_block_slow() failed. "
301 "block=%llu, b_blocknr=%llu\n",
302 (unsigned long long)block,
303 (unsigned long long)bh->b_blocknr);
304 printk("b_state=0x%08lx, b_size=%zu\n",
305 bh->b_state, bh->b_size);
306 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
309 spin_unlock(&bd_mapping->private_lock);
310 page_cache_release(page);
315 /* If invalidate_buffers() will trash dirty buffers, it means some kind
316 of fs corruption is going on. Trashing dirty data always imply losing
317 information that was supposed to be just stored on the physical layer
320 Thus invalidate_buffers in general usage is not allwowed to trash
321 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
322 be preserved. These buffers are simply skipped.
324 We also skip buffers which are still in use. For example this can
325 happen if a userspace program is reading the block device.
327 NOTE: In the case where the user removed a removable-media-disk even if
328 there's still dirty data not synced on disk (due a bug in the device driver
329 or due an error of the user), by not destroying the dirty buffers we could
330 generate corruption also on the next media inserted, thus a parameter is
331 necessary to handle this case in the most safe way possible (trying
332 to not corrupt also the new disk inserted with the data belonging to
333 the old now corrupted disk). Also for the ramdisk the natural thing
334 to do in order to release the ramdisk memory is to destroy dirty buffers.
336 These are two special cases. Normal usage imply the device driver
337 to issue a sync on the device (without waiting I/O completion) and
338 then an invalidate_buffers call that doesn't trash dirty buffers.
340 For handling cache coherency with the blkdev pagecache the 'update' case
341 is been introduced. It is needed to re-read from disk any pinned
342 buffer. NOTE: re-reading from disk is destructive so we can do it only
343 when we assume nobody is changing the buffercache under our I/O and when
344 we think the disk contains more recent information than the buffercache.
345 The update == 1 pass marks the buffers we need to update, the update == 2
346 pass does the actual I/O. */
347 void invalidate_bdev(struct block_device *bdev)
349 struct address_space *mapping = bdev->bd_inode->i_mapping;
351 if (mapping->nrpages == 0)
354 invalidate_bh_lrus();
355 invalidate_mapping_pages(mapping, 0, -1);
359 * Kick pdflush then try to free up some ZONE_NORMAL memory.
361 static void free_more_memory(void)
366 wakeup_pdflush(1024);
369 for_each_online_node(nid) {
370 (void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
371 gfp_zone(GFP_NOFS), NULL,
374 try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
380 * I/O completion handler for block_read_full_page() - pages
381 * which come unlocked at the end of I/O.
383 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
386 struct buffer_head *first;
387 struct buffer_head *tmp;
389 int page_uptodate = 1;
391 BUG_ON(!buffer_async_read(bh));
395 set_buffer_uptodate(bh);
397 clear_buffer_uptodate(bh);
398 if (printk_ratelimit())
404 * Be _very_ careful from here on. Bad things can happen if
405 * two buffer heads end IO at almost the same time and both
406 * decide that the page is now completely done.
408 first = page_buffers(page);
409 local_irq_save(flags);
410 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
411 clear_buffer_async_read(bh);
415 if (!buffer_uptodate(tmp))
417 if (buffer_async_read(tmp)) {
418 BUG_ON(!buffer_locked(tmp));
421 tmp = tmp->b_this_page;
423 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
424 local_irq_restore(flags);
427 * If none of the buffers had errors and they are all
428 * uptodate then we can set the page uptodate.
430 if (page_uptodate && !PageError(page))
431 SetPageUptodate(page);
436 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
437 local_irq_restore(flags);
442 * Completion handler for block_write_full_page() - pages which are unlocked
443 * during I/O, and which have PageWriteback cleared upon I/O completion.
445 static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
447 char b[BDEVNAME_SIZE];
449 struct buffer_head *first;
450 struct buffer_head *tmp;
453 BUG_ON(!buffer_async_write(bh));
457 set_buffer_uptodate(bh);
459 if (printk_ratelimit()) {
461 printk(KERN_WARNING "lost page write due to "
463 bdevname(bh->b_bdev, b));
465 set_bit(AS_EIO, &page->mapping->flags);
466 set_buffer_write_io_error(bh);
467 clear_buffer_uptodate(bh);
471 first = page_buffers(page);
472 local_irq_save(flags);
473 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
475 clear_buffer_async_write(bh);
477 tmp = bh->b_this_page;
479 if (buffer_async_write(tmp)) {
480 BUG_ON(!buffer_locked(tmp));
483 tmp = tmp->b_this_page;
485 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
486 local_irq_restore(flags);
487 end_page_writeback(page);
491 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
492 local_irq_restore(flags);
497 * If a page's buffers are under async readin (end_buffer_async_read
498 * completion) then there is a possibility that another thread of
499 * control could lock one of the buffers after it has completed
500 * but while some of the other buffers have not completed. This
501 * locked buffer would confuse end_buffer_async_read() into not unlocking
502 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
503 * that this buffer is not under async I/O.
505 * The page comes unlocked when it has no locked buffer_async buffers
508 * PageLocked prevents anyone starting new async I/O reads any of
511 * PageWriteback is used to prevent simultaneous writeout of the same
514 * PageLocked prevents anyone from starting writeback of a page which is
515 * under read I/O (PageWriteback is only ever set against a locked page).
517 static void mark_buffer_async_read(struct buffer_head *bh)
519 bh->b_end_io = end_buffer_async_read;
520 set_buffer_async_read(bh);
523 void mark_buffer_async_write(struct buffer_head *bh)
525 bh->b_end_io = end_buffer_async_write;
526 set_buffer_async_write(bh);
528 EXPORT_SYMBOL(mark_buffer_async_write);
532 * fs/buffer.c contains helper functions for buffer-backed address space's
533 * fsync functions. A common requirement for buffer-based filesystems is
534 * that certain data from the backing blockdev needs to be written out for
535 * a successful fsync(). For example, ext2 indirect blocks need to be
536 * written back and waited upon before fsync() returns.
538 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
539 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
540 * management of a list of dependent buffers at ->i_mapping->private_list.
542 * Locking is a little subtle: try_to_free_buffers() will remove buffers
543 * from their controlling inode's queue when they are being freed. But
544 * try_to_free_buffers() will be operating against the *blockdev* mapping
545 * at the time, not against the S_ISREG file which depends on those buffers.
546 * So the locking for private_list is via the private_lock in the address_space
547 * which backs the buffers. Which is different from the address_space
548 * against which the buffers are listed. So for a particular address_space,
549 * mapping->private_lock does *not* protect mapping->private_list! In fact,
550 * mapping->private_list will always be protected by the backing blockdev's
553 * Which introduces a requirement: all buffers on an address_space's
554 * ->private_list must be from the same address_space: the blockdev's.
556 * address_spaces which do not place buffers at ->private_list via these
557 * utility functions are free to use private_lock and private_list for
558 * whatever they want. The only requirement is that list_empty(private_list)
559 * be true at clear_inode() time.
561 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
562 * filesystems should do that. invalidate_inode_buffers() should just go
563 * BUG_ON(!list_empty).
565 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
566 * take an address_space, not an inode. And it should be called
567 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
570 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
571 * list if it is already on a list. Because if the buffer is on a list,
572 * it *must* already be on the right one. If not, the filesystem is being
573 * silly. This will save a ton of locking. But first we have to ensure
574 * that buffers are taken *off* the old inode's list when they are freed
575 * (presumably in truncate). That requires careful auditing of all
576 * filesystems (do it inside bforget()). It could also be done by bringing
581 * The buffer's backing address_space's private_lock must be held
583 static inline void __remove_assoc_queue(struct buffer_head *bh)
585 list_del_init(&bh->b_assoc_buffers);
586 WARN_ON(!bh->b_assoc_map);
587 if (buffer_write_io_error(bh))
588 set_bit(AS_EIO, &bh->b_assoc_map->flags);
589 bh->b_assoc_map = NULL;
592 int inode_has_buffers(struct inode *inode)
594 return !list_empty(&inode->i_data.private_list);
598 * osync is designed to support O_SYNC io. It waits synchronously for
599 * all already-submitted IO to complete, but does not queue any new
600 * writes to the disk.
602 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
603 * you dirty the buffers, and then use osync_inode_buffers to wait for
604 * completion. Any other dirty buffers which are not yet queued for
605 * write will not be flushed to disk by the osync.
607 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
609 struct buffer_head *bh;
615 list_for_each_prev(p, list) {
617 if (buffer_locked(bh)) {
621 if (!buffer_uptodate(bh))
633 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
634 * @mapping: the mapping which wants those buffers written
636 * Starts I/O against the buffers at mapping->private_list, and waits upon
639 * Basically, this is a convenience function for fsync().
640 * @mapping is a file or directory which needs those buffers to be written for
641 * a successful fsync().
643 int sync_mapping_buffers(struct address_space *mapping)
645 struct address_space *buffer_mapping = mapping->assoc_mapping;
647 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
650 return fsync_buffers_list(&buffer_mapping->private_lock,
651 &mapping->private_list);
653 EXPORT_SYMBOL(sync_mapping_buffers);
656 * Called when we've recently written block `bblock', and it is known that
657 * `bblock' was for a buffer_boundary() buffer. This means that the block at
658 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
659 * dirty, schedule it for IO. So that indirects merge nicely with their data.
661 void write_boundary_block(struct block_device *bdev,
662 sector_t bblock, unsigned blocksize)
664 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
666 if (buffer_dirty(bh))
667 ll_rw_block(WRITE, 1, &bh);
672 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
674 struct address_space *mapping = inode->i_mapping;
675 struct address_space *buffer_mapping = bh->b_page->mapping;
677 mark_buffer_dirty(bh);
678 if (!mapping->assoc_mapping) {
679 mapping->assoc_mapping = buffer_mapping;
681 BUG_ON(mapping->assoc_mapping != buffer_mapping);
683 if (!bh->b_assoc_map) {
684 spin_lock(&buffer_mapping->private_lock);
685 list_move_tail(&bh->b_assoc_buffers,
686 &mapping->private_list);
687 bh->b_assoc_map = mapping;
688 spin_unlock(&buffer_mapping->private_lock);
691 EXPORT_SYMBOL(mark_buffer_dirty_inode);
694 * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
697 * If warn is true, then emit a warning if the page is not uptodate and has
698 * not been truncated.
700 static int __set_page_dirty(struct page *page,
701 struct address_space *mapping, int warn)
703 if (unlikely(!mapping))
704 return !TestSetPageDirty(page);
706 if (TestSetPageDirty(page))
709 spin_lock_irq(&mapping->tree_lock);
710 if (page->mapping) { /* Race with truncate? */
711 WARN_ON_ONCE(warn && !PageUptodate(page));
713 if (mapping_cap_account_dirty(mapping)) {
714 __inc_zone_page_state(page, NR_FILE_DIRTY);
715 __inc_bdi_stat(mapping->backing_dev_info,
717 task_io_account_write(PAGE_CACHE_SIZE);
719 radix_tree_tag_set(&mapping->page_tree,
720 page_index(page), PAGECACHE_TAG_DIRTY);
722 spin_unlock_irq(&mapping->tree_lock);
723 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
729 * Add a page to the dirty page list.
731 * It is a sad fact of life that this function is called from several places
732 * deeply under spinlocking. It may not sleep.
734 * If the page has buffers, the uptodate buffers are set dirty, to preserve
735 * dirty-state coherency between the page and the buffers. It the page does
736 * not have buffers then when they are later attached they will all be set
739 * The buffers are dirtied before the page is dirtied. There's a small race
740 * window in which a writepage caller may see the page cleanness but not the
741 * buffer dirtiness. That's fine. If this code were to set the page dirty
742 * before the buffers, a concurrent writepage caller could clear the page dirty
743 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
744 * page on the dirty page list.
746 * We use private_lock to lock against try_to_free_buffers while using the
747 * page's buffer list. Also use this to protect against clean buffers being
748 * added to the page after it was set dirty.
750 * FIXME: may need to call ->reservepage here as well. That's rather up to the
751 * address_space though.
753 int __set_page_dirty_buffers(struct page *page)
755 struct address_space *mapping = page_mapping(page);
757 if (unlikely(!mapping))
758 return !TestSetPageDirty(page);
760 spin_lock(&mapping->private_lock);
761 if (page_has_buffers(page)) {
762 struct buffer_head *head = page_buffers(page);
763 struct buffer_head *bh = head;
766 set_buffer_dirty(bh);
767 bh = bh->b_this_page;
768 } while (bh != head);
770 spin_unlock(&mapping->private_lock);
772 return __set_page_dirty(page, mapping, 1);
774 EXPORT_SYMBOL(__set_page_dirty_buffers);
777 * Write out and wait upon a list of buffers.
779 * We have conflicting pressures: we want to make sure that all
780 * initially dirty buffers get waited on, but that any subsequently
781 * dirtied buffers don't. After all, we don't want fsync to last
782 * forever if somebody is actively writing to the file.
784 * Do this in two main stages: first we copy dirty buffers to a
785 * temporary inode list, queueing the writes as we go. Then we clean
786 * up, waiting for those writes to complete.
788 * During this second stage, any subsequent updates to the file may end
789 * up refiling the buffer on the original inode's dirty list again, so
790 * there is a chance we will end up with a buffer queued for write but
791 * not yet completed on that list. So, as a final cleanup we go through
792 * the osync code to catch these locked, dirty buffers without requeuing
793 * any newly dirty buffers for write.
795 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
797 struct buffer_head *bh;
798 struct list_head tmp;
799 struct address_space *mapping;
802 INIT_LIST_HEAD(&tmp);
805 while (!list_empty(list)) {
806 bh = BH_ENTRY(list->next);
807 mapping = bh->b_assoc_map;
808 __remove_assoc_queue(bh);
809 /* Avoid race with mark_buffer_dirty_inode() which does
810 * a lockless check and we rely on seeing the dirty bit */
812 if (buffer_dirty(bh) || buffer_locked(bh)) {
813 list_add(&bh->b_assoc_buffers, &tmp);
814 bh->b_assoc_map = mapping;
815 if (buffer_dirty(bh)) {
819 * Ensure any pending I/O completes so that
820 * ll_rw_block() actually writes the current
821 * contents - it is a noop if I/O is still in
822 * flight on potentially older contents.
824 ll_rw_block(SWRITE_SYNC, 1, &bh);
831 while (!list_empty(&tmp)) {
832 bh = BH_ENTRY(tmp.prev);
834 mapping = bh->b_assoc_map;
835 __remove_assoc_queue(bh);
836 /* Avoid race with mark_buffer_dirty_inode() which does
837 * a lockless check and we rely on seeing the dirty bit */
839 if (buffer_dirty(bh)) {
840 list_add(&bh->b_assoc_buffers,
841 &mapping->private_list);
842 bh->b_assoc_map = mapping;
846 if (!buffer_uptodate(bh))
853 err2 = osync_buffers_list(lock, list);
861 * Invalidate any and all dirty buffers on a given inode. We are
862 * probably unmounting the fs, but that doesn't mean we have already
863 * done a sync(). Just drop the buffers from the inode list.
865 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
866 * assumes that all the buffers are against the blockdev. Not true
869 void invalidate_inode_buffers(struct inode *inode)
871 if (inode_has_buffers(inode)) {
872 struct address_space *mapping = &inode->i_data;
873 struct list_head *list = &mapping->private_list;
874 struct address_space *buffer_mapping = mapping->assoc_mapping;
876 spin_lock(&buffer_mapping->private_lock);
877 while (!list_empty(list))
878 __remove_assoc_queue(BH_ENTRY(list->next));
879 spin_unlock(&buffer_mapping->private_lock);
884 * Remove any clean buffers from the inode's buffer list. This is called
885 * when we're trying to free the inode itself. Those buffers can pin it.
887 * Returns true if all buffers were removed.
889 int remove_inode_buffers(struct inode *inode)
893 if (inode_has_buffers(inode)) {
894 struct address_space *mapping = &inode->i_data;
895 struct list_head *list = &mapping->private_list;
896 struct address_space *buffer_mapping = mapping->assoc_mapping;
898 spin_lock(&buffer_mapping->private_lock);
899 while (!list_empty(list)) {
900 struct buffer_head *bh = BH_ENTRY(list->next);
901 if (buffer_dirty(bh)) {
905 __remove_assoc_queue(bh);
907 spin_unlock(&buffer_mapping->private_lock);
913 * Create the appropriate buffers when given a page for data area and
914 * the size of each buffer.. Use the bh->b_this_page linked list to
915 * follow the buffers created. Return NULL if unable to create more
918 * The retry flag is used to differentiate async IO (paging, swapping)
919 * which may not fail from ordinary buffer allocations.
921 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
924 struct buffer_head *bh, *head;
930 while ((offset -= size) >= 0) {
931 bh = alloc_buffer_head(GFP_NOFS);
936 bh->b_this_page = head;
941 atomic_set(&bh->b_count, 0);
942 bh->b_private = NULL;
945 /* Link the buffer to its page */
946 set_bh_page(bh, page, offset);
948 init_buffer(bh, NULL, NULL);
952 * In case anything failed, we just free everything we got.
958 head = head->b_this_page;
959 free_buffer_head(bh);
964 * Return failure for non-async IO requests. Async IO requests
965 * are not allowed to fail, so we have to wait until buffer heads
966 * become available. But we don't want tasks sleeping with
967 * partially complete buffers, so all were released above.
972 /* We're _really_ low on memory. Now we just
973 * wait for old buffer heads to become free due to
974 * finishing IO. Since this is an async request and
975 * the reserve list is empty, we're sure there are
976 * async buffer heads in use.
981 EXPORT_SYMBOL_GPL(alloc_page_buffers);
984 link_dev_buffers(struct page *page, struct buffer_head *head)
986 struct buffer_head *bh, *tail;
991 bh = bh->b_this_page;
993 tail->b_this_page = head;
994 attach_page_buffers(page, head);
998 * Initialise the state of a blockdev page's buffers.
1001 init_page_buffers(struct page *page, struct block_device *bdev,
1002 sector_t block, int size)
1004 struct buffer_head *head = page_buffers(page);
1005 struct buffer_head *bh = head;
1006 int uptodate = PageUptodate(page);
1009 if (!buffer_mapped(bh)) {
1010 init_buffer(bh, NULL, NULL);
1012 bh->b_blocknr = block;
1014 set_buffer_uptodate(bh);
1015 set_buffer_mapped(bh);
1018 bh = bh->b_this_page;
1019 } while (bh != head);
1023 * Create the page-cache page that contains the requested block.
1025 * This is user purely for blockdev mappings.
1027 static struct page *
1028 grow_dev_page(struct block_device *bdev, sector_t block,
1029 pgoff_t index, int size)
1031 struct inode *inode = bdev->bd_inode;
1033 struct buffer_head *bh;
1035 page = find_or_create_page(inode->i_mapping, index,
1036 (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
1040 BUG_ON(!PageLocked(page));
1042 if (page_has_buffers(page)) {
1043 bh = page_buffers(page);
1044 if (bh->b_size == size) {
1045 init_page_buffers(page, bdev, block, size);
1048 if (!try_to_free_buffers(page))
1053 * Allocate some buffers for this page
1055 bh = alloc_page_buffers(page, size, 0);
1060 * Link the page to the buffers and initialise them. Take the
1061 * lock to be atomic wrt __find_get_block(), which does not
1062 * run under the page lock.
1064 spin_lock(&inode->i_mapping->private_lock);
1065 link_dev_buffers(page, bh);
1066 init_page_buffers(page, bdev, block, size);
1067 spin_unlock(&inode->i_mapping->private_lock);
1073 page_cache_release(page);
1078 * Create buffers for the specified block device block's page. If
1079 * that page was dirty, the buffers are set dirty also.
1082 grow_buffers(struct block_device *bdev, sector_t block, int size)
1091 } while ((size << sizebits) < PAGE_SIZE);
1093 index = block >> sizebits;
1096 * Check for a block which wants to lie outside our maximum possible
1097 * pagecache index. (this comparison is done using sector_t types).
1099 if (unlikely(index != block >> sizebits)) {
1100 char b[BDEVNAME_SIZE];
1102 printk(KERN_ERR "%s: requested out-of-range block %llu for "
1104 __func__, (unsigned long long)block,
1108 block = index << sizebits;
1109 /* Create a page with the proper size buffers.. */
1110 page = grow_dev_page(bdev, block, index, size);
1114 page_cache_release(page);
1118 static struct buffer_head *
1119 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1121 /* Size must be multiple of hard sectorsize */
1122 if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
1123 (size < 512 || size > PAGE_SIZE))) {
1124 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1126 printk(KERN_ERR "hardsect size: %d\n",
1127 bdev_hardsect_size(bdev));
1134 struct buffer_head * bh;
1137 bh = __find_get_block(bdev, block, size);
1141 ret = grow_buffers(bdev, block, size);
1150 * The relationship between dirty buffers and dirty pages:
1152 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1153 * the page is tagged dirty in its radix tree.
1155 * At all times, the dirtiness of the buffers represents the dirtiness of
1156 * subsections of the page. If the page has buffers, the page dirty bit is
1157 * merely a hint about the true dirty state.
1159 * When a page is set dirty in its entirety, all its buffers are marked dirty
1160 * (if the page has buffers).
1162 * When a buffer is marked dirty, its page is dirtied, but the page's other
1165 * Also. When blockdev buffers are explicitly read with bread(), they
1166 * individually become uptodate. But their backing page remains not
1167 * uptodate - even if all of its buffers are uptodate. A subsequent
1168 * block_read_full_page() against that page will discover all the uptodate
1169 * buffers, will set the page uptodate and will perform no I/O.
1173 * mark_buffer_dirty - mark a buffer_head as needing writeout
1174 * @bh: the buffer_head to mark dirty
1176 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1177 * backing page dirty, then tag the page as dirty in its address_space's radix
1178 * tree and then attach the address_space's inode to its superblock's dirty
1181 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1182 * mapping->tree_lock and the global inode_lock.
1184 void mark_buffer_dirty(struct buffer_head *bh)
1186 WARN_ON_ONCE(!buffer_uptodate(bh));
1189 * Very *carefully* optimize the it-is-already-dirty case.
1191 * Don't let the final "is it dirty" escape to before we
1192 * perhaps modified the buffer.
1194 if (buffer_dirty(bh)) {
1196 if (buffer_dirty(bh))
1200 if (!test_set_buffer_dirty(bh))
1201 __set_page_dirty(bh->b_page, page_mapping(bh->b_page), 0);
1205 * Decrement a buffer_head's reference count. If all buffers against a page
1206 * have zero reference count, are clean and unlocked, and if the page is clean
1207 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1208 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1209 * a page but it ends up not being freed, and buffers may later be reattached).
1211 void __brelse(struct buffer_head * buf)
1213 if (atomic_read(&buf->b_count)) {
1217 WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1221 * bforget() is like brelse(), except it discards any
1222 * potentially dirty data.
1224 void __bforget(struct buffer_head *bh)
1226 clear_buffer_dirty(bh);
1227 if (bh->b_assoc_map) {
1228 struct address_space *buffer_mapping = bh->b_page->mapping;
1230 spin_lock(&buffer_mapping->private_lock);
1231 list_del_init(&bh->b_assoc_buffers);
1232 bh->b_assoc_map = NULL;
1233 spin_unlock(&buffer_mapping->private_lock);
1238 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1241 if (buffer_uptodate(bh)) {
1246 bh->b_end_io = end_buffer_read_sync;
1247 submit_bh(READ, bh);
1249 if (buffer_uptodate(bh))
1257 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1258 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1259 * refcount elevated by one when they're in an LRU. A buffer can only appear
1260 * once in a particular CPU's LRU. A single buffer can be present in multiple
1261 * CPU's LRUs at the same time.
1263 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1264 * sb_find_get_block().
1266 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1267 * a local interrupt disable for that.
1270 #define BH_LRU_SIZE 8
1273 struct buffer_head *bhs[BH_LRU_SIZE];
1276 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1279 #define bh_lru_lock() local_irq_disable()
1280 #define bh_lru_unlock() local_irq_enable()
1282 #define bh_lru_lock() preempt_disable()
1283 #define bh_lru_unlock() preempt_enable()
1286 static inline void check_irqs_on(void)
1288 #ifdef irqs_disabled
1289 BUG_ON(irqs_disabled());
1294 * The LRU management algorithm is dopey-but-simple. Sorry.
1296 static void bh_lru_install(struct buffer_head *bh)
1298 struct buffer_head *evictee = NULL;
1303 lru = &__get_cpu_var(bh_lrus);
1304 if (lru->bhs[0] != bh) {
1305 struct buffer_head *bhs[BH_LRU_SIZE];
1311 for (in = 0; in < BH_LRU_SIZE; in++) {
1312 struct buffer_head *bh2 = lru->bhs[in];
1317 if (out >= BH_LRU_SIZE) {
1318 BUG_ON(evictee != NULL);
1325 while (out < BH_LRU_SIZE)
1327 memcpy(lru->bhs, bhs, sizeof(bhs));
1336 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1338 static struct buffer_head *
1339 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1341 struct buffer_head *ret = NULL;
1347 lru = &__get_cpu_var(bh_lrus);
1348 for (i = 0; i < BH_LRU_SIZE; i++) {
1349 struct buffer_head *bh = lru->bhs[i];
1351 if (bh && bh->b_bdev == bdev &&
1352 bh->b_blocknr == block && bh->b_size == size) {
1355 lru->bhs[i] = lru->bhs[i - 1];
1370 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1371 * it in the LRU and mark it as accessed. If it is not present then return
1374 struct buffer_head *
1375 __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1377 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1380 bh = __find_get_block_slow(bdev, block);
1388 EXPORT_SYMBOL(__find_get_block);
1391 * __getblk will locate (and, if necessary, create) the buffer_head
1392 * which corresponds to the passed block_device, block and size. The
1393 * returned buffer has its reference count incremented.
1395 * __getblk() cannot fail - it just keeps trying. If you pass it an
1396 * illegal block number, __getblk() will happily return a buffer_head
1397 * which represents the non-existent block. Very weird.
1399 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1400 * attempt is failing. FIXME, perhaps?
1402 struct buffer_head *
1403 __getblk(struct block_device *bdev, sector_t block, unsigned size)
1405 struct buffer_head *bh = __find_get_block(bdev, block, size);
1409 bh = __getblk_slow(bdev, block, size);
1412 EXPORT_SYMBOL(__getblk);
1415 * Do async read-ahead on a buffer..
1417 void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1419 struct buffer_head *bh = __getblk(bdev, block, size);
1421 ll_rw_block(READA, 1, &bh);
1425 EXPORT_SYMBOL(__breadahead);
1428 * __bread() - reads a specified block and returns the bh
1429 * @bdev: the block_device to read from
1430 * @block: number of block
1431 * @size: size (in bytes) to read
1433 * Reads a specified block, and returns buffer head that contains it.
1434 * It returns NULL if the block was unreadable.
1436 struct buffer_head *
1437 __bread(struct block_device *bdev, sector_t block, unsigned size)
1439 struct buffer_head *bh = __getblk(bdev, block, size);
1441 if (likely(bh) && !buffer_uptodate(bh))
1442 bh = __bread_slow(bh);
1445 EXPORT_SYMBOL(__bread);
1448 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1449 * This doesn't race because it runs in each cpu either in irq
1450 * or with preempt disabled.
1452 static void invalidate_bh_lru(void *arg)
1454 struct bh_lru *b = &get_cpu_var(bh_lrus);
1457 for (i = 0; i < BH_LRU_SIZE; i++) {
1461 put_cpu_var(bh_lrus);
1464 void invalidate_bh_lrus(void)
1466 on_each_cpu(invalidate_bh_lru, NULL, 1);
1468 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1470 void set_bh_page(struct buffer_head *bh,
1471 struct page *page, unsigned long offset)
1474 BUG_ON(offset >= PAGE_SIZE);
1475 if (PageHighMem(page))
1477 * This catches illegal uses and preserves the offset:
1479 bh->b_data = (char *)(0 + offset);
1481 bh->b_data = page_address(page) + offset;
1483 EXPORT_SYMBOL(set_bh_page);
1486 * Called when truncating a buffer on a page completely.
1488 static void discard_buffer(struct buffer_head * bh)
1491 clear_buffer_dirty(bh);
1493 clear_buffer_mapped(bh);
1494 clear_buffer_req(bh);
1495 clear_buffer_new(bh);
1496 clear_buffer_delay(bh);
1497 clear_buffer_unwritten(bh);
1502 * block_invalidatepage - invalidate part of all of a buffer-backed page
1504 * @page: the page which is affected
1505 * @offset: the index of the truncation point
1507 * block_invalidatepage() is called when all or part of the page has become
1508 * invalidatedby a truncate operation.
1510 * block_invalidatepage() does not have to release all buffers, but it must
1511 * ensure that no dirty buffer is left outside @offset and that no I/O
1512 * is underway against any of the blocks which are outside the truncation
1513 * point. Because the caller is about to free (and possibly reuse) those
1516 void block_invalidatepage(struct page *page, unsigned long offset)
1518 struct buffer_head *head, *bh, *next;
1519 unsigned int curr_off = 0;
1521 BUG_ON(!PageLocked(page));
1522 if (!page_has_buffers(page))
1525 head = page_buffers(page);
1528 unsigned int next_off = curr_off + bh->b_size;
1529 next = bh->b_this_page;
1532 * is this block fully invalidated?
1534 if (offset <= curr_off)
1536 curr_off = next_off;
1538 } while (bh != head);
1541 * We release buffers only if the entire page is being invalidated.
1542 * The get_block cached value has been unconditionally invalidated,
1543 * so real IO is not possible anymore.
1546 try_to_release_page(page, 0);
1550 EXPORT_SYMBOL(block_invalidatepage);
1553 * We attach and possibly dirty the buffers atomically wrt
1554 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1555 * is already excluded via the page lock.
1557 void create_empty_buffers(struct page *page,
1558 unsigned long blocksize, unsigned long b_state)
1560 struct buffer_head *bh, *head, *tail;
1562 head = alloc_page_buffers(page, blocksize, 1);
1565 bh->b_state |= b_state;
1567 bh = bh->b_this_page;
1569 tail->b_this_page = head;
1571 spin_lock(&page->mapping->private_lock);
1572 if (PageUptodate(page) || PageDirty(page)) {
1575 if (PageDirty(page))
1576 set_buffer_dirty(bh);
1577 if (PageUptodate(page))
1578 set_buffer_uptodate(bh);
1579 bh = bh->b_this_page;
1580 } while (bh != head);
1582 attach_page_buffers(page, head);
1583 spin_unlock(&page->mapping->private_lock);
1585 EXPORT_SYMBOL(create_empty_buffers);
1588 * We are taking a block for data and we don't want any output from any
1589 * buffer-cache aliases starting from return from that function and
1590 * until the moment when something will explicitly mark the buffer
1591 * dirty (hopefully that will not happen until we will free that block ;-)
1592 * We don't even need to mark it not-uptodate - nobody can expect
1593 * anything from a newly allocated buffer anyway. We used to used
1594 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1595 * don't want to mark the alias unmapped, for example - it would confuse
1596 * anyone who might pick it with bread() afterwards...
1598 * Also.. Note that bforget() doesn't lock the buffer. So there can
1599 * be writeout I/O going on against recently-freed buffers. We don't
1600 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1601 * only if we really need to. That happens here.
1603 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1605 struct buffer_head *old_bh;
1609 old_bh = __find_get_block_slow(bdev, block);
1611 clear_buffer_dirty(old_bh);
1612 wait_on_buffer(old_bh);
1613 clear_buffer_req(old_bh);
1617 EXPORT_SYMBOL(unmap_underlying_metadata);
1620 * NOTE! All mapped/uptodate combinations are valid:
1622 * Mapped Uptodate Meaning
1624 * No No "unknown" - must do get_block()
1625 * No Yes "hole" - zero-filled
1626 * Yes No "allocated" - allocated on disk, not read in
1627 * Yes Yes "valid" - allocated and up-to-date in memory.
1629 * "Dirty" is valid only with the last case (mapped+uptodate).
1633 * While block_write_full_page is writing back the dirty buffers under
1634 * the page lock, whoever dirtied the buffers may decide to clean them
1635 * again at any time. We handle that by only looking at the buffer
1636 * state inside lock_buffer().
1638 * If block_write_full_page() is called for regular writeback
1639 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1640 * locked buffer. This only can happen if someone has written the buffer
1641 * directly, with submit_bh(). At the address_space level PageWriteback
1642 * prevents this contention from occurring.
1644 static int __block_write_full_page(struct inode *inode, struct page *page,
1645 get_block_t *get_block, struct writeback_control *wbc)
1649 sector_t last_block;
1650 struct buffer_head *bh, *head;
1651 const unsigned blocksize = 1 << inode->i_blkbits;
1652 int nr_underway = 0;
1654 BUG_ON(!PageLocked(page));
1656 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1658 if (!page_has_buffers(page)) {
1659 create_empty_buffers(page, blocksize,
1660 (1 << BH_Dirty)|(1 << BH_Uptodate));
1664 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1665 * here, and the (potentially unmapped) buffers may become dirty at
1666 * any time. If a buffer becomes dirty here after we've inspected it
1667 * then we just miss that fact, and the page stays dirty.
1669 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1670 * handle that here by just cleaning them.
1673 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1674 head = page_buffers(page);
1678 * Get all the dirty buffers mapped to disk addresses and
1679 * handle any aliases from the underlying blockdev's mapping.
1682 if (block > last_block) {
1684 * mapped buffers outside i_size will occur, because
1685 * this page can be outside i_size when there is a
1686 * truncate in progress.
1689 * The buffer was zeroed by block_write_full_page()
1691 clear_buffer_dirty(bh);
1692 set_buffer_uptodate(bh);
1693 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1695 WARN_ON(bh->b_size != blocksize);
1696 err = get_block(inode, block, bh, 1);
1699 clear_buffer_delay(bh);
1700 if (buffer_new(bh)) {
1701 /* blockdev mappings never come here */
1702 clear_buffer_new(bh);
1703 unmap_underlying_metadata(bh->b_bdev,
1707 bh = bh->b_this_page;
1709 } while (bh != head);
1712 if (!buffer_mapped(bh))
1715 * If it's a fully non-blocking write attempt and we cannot
1716 * lock the buffer then redirty the page. Note that this can
1717 * potentially cause a busy-wait loop from pdflush and kswapd
1718 * activity, but those code paths have their own higher-level
1721 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1723 } else if (test_set_buffer_locked(bh)) {
1724 redirty_page_for_writepage(wbc, page);
1727 if (test_clear_buffer_dirty(bh)) {
1728 mark_buffer_async_write(bh);
1732 } while ((bh = bh->b_this_page) != head);
1735 * The page and its buffers are protected by PageWriteback(), so we can
1736 * drop the bh refcounts early.
1738 BUG_ON(PageWriteback(page));
1739 set_page_writeback(page);
1742 struct buffer_head *next = bh->b_this_page;
1743 if (buffer_async_write(bh)) {
1744 submit_bh(WRITE, bh);
1748 } while (bh != head);
1753 if (nr_underway == 0) {
1755 * The page was marked dirty, but the buffers were
1756 * clean. Someone wrote them back by hand with
1757 * ll_rw_block/submit_bh. A rare case.
1759 end_page_writeback(page);
1762 * The page and buffer_heads can be released at any time from
1770 * ENOSPC, or some other error. We may already have added some
1771 * blocks to the file, so we need to write these out to avoid
1772 * exposing stale data.
1773 * The page is currently locked and not marked for writeback
1776 /* Recovery: lock and submit the mapped buffers */
1778 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1779 !buffer_delay(bh)) {
1781 mark_buffer_async_write(bh);
1784 * The buffer may have been set dirty during
1785 * attachment to a dirty page.
1787 clear_buffer_dirty(bh);
1789 } while ((bh = bh->b_this_page) != head);
1791 BUG_ON(PageWriteback(page));
1792 mapping_set_error(page->mapping, err);
1793 set_page_writeback(page);
1795 struct buffer_head *next = bh->b_this_page;
1796 if (buffer_async_write(bh)) {
1797 clear_buffer_dirty(bh);
1798 submit_bh(WRITE, bh);
1802 } while (bh != head);
1808 * If a page has any new buffers, zero them out here, and mark them uptodate
1809 * and dirty so they'll be written out (in order to prevent uninitialised
1810 * block data from leaking). And clear the new bit.
1812 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1814 unsigned int block_start, block_end;
1815 struct buffer_head *head, *bh;
1817 BUG_ON(!PageLocked(page));
1818 if (!page_has_buffers(page))
1821 bh = head = page_buffers(page);
1824 block_end = block_start + bh->b_size;
1826 if (buffer_new(bh)) {
1827 if (block_end > from && block_start < to) {
1828 if (!PageUptodate(page)) {
1829 unsigned start, size;
1831 start = max(from, block_start);
1832 size = min(to, block_end) - start;
1834 zero_user(page, start, size);
1835 set_buffer_uptodate(bh);
1838 clear_buffer_new(bh);
1839 mark_buffer_dirty(bh);
1843 block_start = block_end;
1844 bh = bh->b_this_page;
1845 } while (bh != head);
1847 EXPORT_SYMBOL(page_zero_new_buffers);
1849 static int __block_prepare_write(struct inode *inode, struct page *page,
1850 unsigned from, unsigned to, get_block_t *get_block)
1852 unsigned block_start, block_end;
1855 unsigned blocksize, bbits;
1856 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1858 BUG_ON(!PageLocked(page));
1859 BUG_ON(from > PAGE_CACHE_SIZE);
1860 BUG_ON(to > PAGE_CACHE_SIZE);
1863 blocksize = 1 << inode->i_blkbits;
1864 if (!page_has_buffers(page))
1865 create_empty_buffers(page, blocksize, 0);
1866 head = page_buffers(page);
1868 bbits = inode->i_blkbits;
1869 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1871 for(bh = head, block_start = 0; bh != head || !block_start;
1872 block++, block_start=block_end, bh = bh->b_this_page) {
1873 block_end = block_start + blocksize;
1874 if (block_end <= from || block_start >= to) {
1875 if (PageUptodate(page)) {
1876 if (!buffer_uptodate(bh))
1877 set_buffer_uptodate(bh);
1882 clear_buffer_new(bh);
1883 if (!buffer_mapped(bh)) {
1884 WARN_ON(bh->b_size != blocksize);
1885 err = get_block(inode, block, bh, 1);
1888 if (buffer_new(bh)) {
1889 unmap_underlying_metadata(bh->b_bdev,
1891 if (PageUptodate(page)) {
1892 clear_buffer_new(bh);
1893 set_buffer_uptodate(bh);
1894 mark_buffer_dirty(bh);
1897 if (block_end > to || block_start < from)
1898 zero_user_segments(page,
1904 if (PageUptodate(page)) {
1905 if (!buffer_uptodate(bh))
1906 set_buffer_uptodate(bh);
1909 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1910 !buffer_unwritten(bh) &&
1911 (block_start < from || block_end > to)) {
1912 ll_rw_block(READ, 1, &bh);
1917 * If we issued read requests - let them complete.
1919 while(wait_bh > wait) {
1920 wait_on_buffer(*--wait_bh);
1921 if (!buffer_uptodate(*wait_bh))
1925 page_zero_new_buffers(page, from, to);
1929 static int __block_commit_write(struct inode *inode, struct page *page,
1930 unsigned from, unsigned to)
1932 unsigned block_start, block_end;
1935 struct buffer_head *bh, *head;
1937 blocksize = 1 << inode->i_blkbits;
1939 for(bh = head = page_buffers(page), block_start = 0;
1940 bh != head || !block_start;
1941 block_start=block_end, bh = bh->b_this_page) {
1942 block_end = block_start + blocksize;
1943 if (block_end <= from || block_start >= to) {
1944 if (!buffer_uptodate(bh))
1947 set_buffer_uptodate(bh);
1948 mark_buffer_dirty(bh);
1950 clear_buffer_new(bh);
1954 * If this is a partial write which happened to make all buffers
1955 * uptodate then we can optimize away a bogus readpage() for
1956 * the next read(). Here we 'discover' whether the page went
1957 * uptodate as a result of this (potentially partial) write.
1960 SetPageUptodate(page);
1965 * block_write_begin takes care of the basic task of block allocation and
1966 * bringing partial write blocks uptodate first.
1968 * If *pagep is not NULL, then block_write_begin uses the locked page
1969 * at *pagep rather than allocating its own. In this case, the page will
1970 * not be unlocked or deallocated on failure.
1972 int block_write_begin(struct file *file, struct address_space *mapping,
1973 loff_t pos, unsigned len, unsigned flags,
1974 struct page **pagep, void **fsdata,
1975 get_block_t *get_block)
1977 struct inode *inode = mapping->host;
1981 unsigned start, end;
1984 index = pos >> PAGE_CACHE_SHIFT;
1985 start = pos & (PAGE_CACHE_SIZE - 1);
1991 page = __grab_cache_page(mapping, index);
1998 BUG_ON(!PageLocked(page));
2000 status = __block_prepare_write(inode, page, start, end, get_block);
2001 if (unlikely(status)) {
2002 ClearPageUptodate(page);
2006 page_cache_release(page);
2010 * prepare_write() may have instantiated a few blocks
2011 * outside i_size. Trim these off again. Don't need
2012 * i_size_read because we hold i_mutex.
2014 if (pos + len > inode->i_size)
2015 vmtruncate(inode, inode->i_size);
2023 EXPORT_SYMBOL(block_write_begin);
2025 int block_write_end(struct file *file, struct address_space *mapping,
2026 loff_t pos, unsigned len, unsigned copied,
2027 struct page *page, void *fsdata)
2029 struct inode *inode = mapping->host;
2032 start = pos & (PAGE_CACHE_SIZE - 1);
2034 if (unlikely(copied < len)) {
2036 * The buffers that were written will now be uptodate, so we
2037 * don't have to worry about a readpage reading them and
2038 * overwriting a partial write. However if we have encountered
2039 * a short write and only partially written into a buffer, it
2040 * will not be marked uptodate, so a readpage might come in and
2041 * destroy our partial write.
2043 * Do the simplest thing, and just treat any short write to a
2044 * non uptodate page as a zero-length write, and force the
2045 * caller to redo the whole thing.
2047 if (!PageUptodate(page))
2050 page_zero_new_buffers(page, start+copied, start+len);
2052 flush_dcache_page(page);
2054 /* This could be a short (even 0-length) commit */
2055 __block_commit_write(inode, page, start, start+copied);
2059 EXPORT_SYMBOL(block_write_end);
2061 int generic_write_end(struct file *file, struct address_space *mapping,
2062 loff_t pos, unsigned len, unsigned copied,
2063 struct page *page, void *fsdata)
2065 struct inode *inode = mapping->host;
2066 int i_size_changed = 0;
2068 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2071 * No need to use i_size_read() here, the i_size
2072 * cannot change under us because we hold i_mutex.
2074 * But it's important to update i_size while still holding page lock:
2075 * page writeout could otherwise come in and zero beyond i_size.
2077 if (pos+copied > inode->i_size) {
2078 i_size_write(inode, pos+copied);
2083 page_cache_release(page);
2086 * Don't mark the inode dirty under page lock. First, it unnecessarily
2087 * makes the holding time of page lock longer. Second, it forces lock
2088 * ordering of page lock and transaction start for journaling
2092 mark_inode_dirty(inode);
2096 EXPORT_SYMBOL(generic_write_end);
2099 * Generic "read page" function for block devices that have the normal
2100 * get_block functionality. This is most of the block device filesystems.
2101 * Reads the page asynchronously --- the unlock_buffer() and
2102 * set/clear_buffer_uptodate() functions propagate buffer state into the
2103 * page struct once IO has completed.
2105 int block_read_full_page(struct page *page, get_block_t *get_block)
2107 struct inode *inode = page->mapping->host;
2108 sector_t iblock, lblock;
2109 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2110 unsigned int blocksize;
2112 int fully_mapped = 1;
2114 BUG_ON(!PageLocked(page));
2115 blocksize = 1 << inode->i_blkbits;
2116 if (!page_has_buffers(page))
2117 create_empty_buffers(page, blocksize, 0);
2118 head = page_buffers(page);
2120 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2121 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2127 if (buffer_uptodate(bh))
2130 if (!buffer_mapped(bh)) {
2134 if (iblock < lblock) {
2135 WARN_ON(bh->b_size != blocksize);
2136 err = get_block(inode, iblock, bh, 0);
2140 if (!buffer_mapped(bh)) {
2141 zero_user(page, i * blocksize, blocksize);
2143 set_buffer_uptodate(bh);
2147 * get_block() might have updated the buffer
2150 if (buffer_uptodate(bh))
2154 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2157 SetPageMappedToDisk(page);
2161 * All buffers are uptodate - we can set the page uptodate
2162 * as well. But not if get_block() returned an error.
2164 if (!PageError(page))
2165 SetPageUptodate(page);
2170 /* Stage two: lock the buffers */
2171 for (i = 0; i < nr; i++) {
2174 mark_buffer_async_read(bh);
2178 * Stage 3: start the IO. Check for uptodateness
2179 * inside the buffer lock in case another process reading
2180 * the underlying blockdev brought it uptodate (the sct fix).
2182 for (i = 0; i < nr; i++) {
2184 if (buffer_uptodate(bh))
2185 end_buffer_async_read(bh, 1);
2187 submit_bh(READ, bh);
2192 /* utility function for filesystems that need to do work on expanding
2193 * truncates. Uses filesystem pagecache writes to allow the filesystem to
2194 * deal with the hole.
2196 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2198 struct address_space *mapping = inode->i_mapping;
2201 unsigned long limit;
2205 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
2206 if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2207 send_sig(SIGXFSZ, current, 0);
2210 if (size > inode->i_sb->s_maxbytes)
2213 err = pagecache_write_begin(NULL, mapping, size, 0,
2214 AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2219 err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2226 static int cont_expand_zero(struct file *file, struct address_space *mapping,
2227 loff_t pos, loff_t *bytes)
2229 struct inode *inode = mapping->host;
2230 unsigned blocksize = 1 << inode->i_blkbits;
2233 pgoff_t index, curidx;
2235 unsigned zerofrom, offset, len;
2238 index = pos >> PAGE_CACHE_SHIFT;
2239 offset = pos & ~PAGE_CACHE_MASK;
2241 while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2242 zerofrom = curpos & ~PAGE_CACHE_MASK;
2243 if (zerofrom & (blocksize-1)) {
2244 *bytes |= (blocksize-1);
2247 len = PAGE_CACHE_SIZE - zerofrom;
2249 err = pagecache_write_begin(file, mapping, curpos, len,
2250 AOP_FLAG_UNINTERRUPTIBLE,
2254 zero_user(page, zerofrom, len);
2255 err = pagecache_write_end(file, mapping, curpos, len, len,
2262 balance_dirty_pages_ratelimited(mapping);
2265 /* page covers the boundary, find the boundary offset */
2266 if (index == curidx) {
2267 zerofrom = curpos & ~PAGE_CACHE_MASK;
2268 /* if we will expand the thing last block will be filled */
2269 if (offset <= zerofrom) {
2272 if (zerofrom & (blocksize-1)) {
2273 *bytes |= (blocksize-1);
2276 len = offset - zerofrom;
2278 err = pagecache_write_begin(file, mapping, curpos, len,
2279 AOP_FLAG_UNINTERRUPTIBLE,
2283 zero_user(page, zerofrom, len);
2284 err = pagecache_write_end(file, mapping, curpos, len, len,
2296 * For moronic filesystems that do not allow holes in file.
2297 * We may have to extend the file.
2299 int cont_write_begin(struct file *file, struct address_space *mapping,
2300 loff_t pos, unsigned len, unsigned flags,
2301 struct page **pagep, void **fsdata,
2302 get_block_t *get_block, loff_t *bytes)
2304 struct inode *inode = mapping->host;
2305 unsigned blocksize = 1 << inode->i_blkbits;
2309 err = cont_expand_zero(file, mapping, pos, bytes);
2313 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2314 if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2315 *bytes |= (blocksize-1);
2320 err = block_write_begin(file, mapping, pos, len,
2321 flags, pagep, fsdata, get_block);
2326 int block_prepare_write(struct page *page, unsigned from, unsigned to,
2327 get_block_t *get_block)
2329 struct inode *inode = page->mapping->host;
2330 int err = __block_prepare_write(inode, page, from, to, get_block);
2332 ClearPageUptodate(page);
2336 int block_commit_write(struct page *page, unsigned from, unsigned to)
2338 struct inode *inode = page->mapping->host;
2339 __block_commit_write(inode,page,from,to);
2344 * block_page_mkwrite() is not allowed to change the file size as it gets
2345 * called from a page fault handler when a page is first dirtied. Hence we must
2346 * be careful to check for EOF conditions here. We set the page up correctly
2347 * for a written page which means we get ENOSPC checking when writing into
2348 * holes and correct delalloc and unwritten extent mapping on filesystems that
2349 * support these features.
2351 * We are not allowed to take the i_mutex here so we have to play games to
2352 * protect against truncate races as the page could now be beyond EOF. Because
2353 * vmtruncate() writes the inode size before removing pages, once we have the
2354 * page lock we can determine safely if the page is beyond EOF. If it is not
2355 * beyond EOF, then the page is guaranteed safe against truncation until we
2359 block_page_mkwrite(struct vm_area_struct *vma, struct page *page,
2360 get_block_t get_block)
2362 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2368 size = i_size_read(inode);
2369 if ((page->mapping != inode->i_mapping) ||
2370 (page_offset(page) > size)) {
2371 /* page got truncated out from underneath us */
2375 /* page is wholly or partially inside EOF */
2376 if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2377 end = size & ~PAGE_CACHE_MASK;
2379 end = PAGE_CACHE_SIZE;
2381 ret = block_prepare_write(page, 0, end, get_block);
2383 ret = block_commit_write(page, 0, end);
2391 * nobh_write_begin()'s prereads are special: the buffer_heads are freed
2392 * immediately, while under the page lock. So it needs a special end_io
2393 * handler which does not touch the bh after unlocking it.
2395 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2397 __end_buffer_read_notouch(bh, uptodate);
2401 * Attach the singly-linked list of buffers created by nobh_write_begin, to
2402 * the page (converting it to circular linked list and taking care of page
2405 static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2407 struct buffer_head *bh;
2409 BUG_ON(!PageLocked(page));
2411 spin_lock(&page->mapping->private_lock);
2414 if (PageDirty(page))
2415 set_buffer_dirty(bh);
2416 if (!bh->b_this_page)
2417 bh->b_this_page = head;
2418 bh = bh->b_this_page;
2419 } while (bh != head);
2420 attach_page_buffers(page, head);
2421 spin_unlock(&page->mapping->private_lock);
2425 * On entry, the page is fully not uptodate.
2426 * On exit the page is fully uptodate in the areas outside (from,to)
2428 int nobh_write_begin(struct file *file, struct address_space *mapping,
2429 loff_t pos, unsigned len, unsigned flags,
2430 struct page **pagep, void **fsdata,
2431 get_block_t *get_block)
2433 struct inode *inode = mapping->host;
2434 const unsigned blkbits = inode->i_blkbits;
2435 const unsigned blocksize = 1 << blkbits;
2436 struct buffer_head *head, *bh;
2440 unsigned block_in_page;
2441 unsigned block_start, block_end;
2442 sector_t block_in_file;
2445 int is_mapped_to_disk = 1;
2447 index = pos >> PAGE_CACHE_SHIFT;
2448 from = pos & (PAGE_CACHE_SIZE - 1);
2451 page = __grab_cache_page(mapping, index);
2457 if (page_has_buffers(page)) {
2459 page_cache_release(page);
2461 return block_write_begin(file, mapping, pos, len, flags, pagep,
2465 if (PageMappedToDisk(page))
2469 * Allocate buffers so that we can keep track of state, and potentially
2470 * attach them to the page if an error occurs. In the common case of
2471 * no error, they will just be freed again without ever being attached
2472 * to the page (which is all OK, because we're under the page lock).
2474 * Be careful: the buffer linked list is a NULL terminated one, rather
2475 * than the circular one we're used to.
2477 head = alloc_page_buffers(page, blocksize, 0);
2483 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2486 * We loop across all blocks in the page, whether or not they are
2487 * part of the affected region. This is so we can discover if the
2488 * page is fully mapped-to-disk.
2490 for (block_start = 0, block_in_page = 0, bh = head;
2491 block_start < PAGE_CACHE_SIZE;
2492 block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
2495 block_end = block_start + blocksize;
2498 if (block_start >= to)
2500 ret = get_block(inode, block_in_file + block_in_page,
2504 if (!buffer_mapped(bh))
2505 is_mapped_to_disk = 0;
2507 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2508 if (PageUptodate(page)) {
2509 set_buffer_uptodate(bh);
2512 if (buffer_new(bh) || !buffer_mapped(bh)) {
2513 zero_user_segments(page, block_start, from,
2517 if (buffer_uptodate(bh))
2518 continue; /* reiserfs does this */
2519 if (block_start < from || block_end > to) {
2521 bh->b_end_io = end_buffer_read_nobh;
2522 submit_bh(READ, bh);
2529 * The page is locked, so these buffers are protected from
2530 * any VM or truncate activity. Hence we don't need to care
2531 * for the buffer_head refcounts.
2533 for (bh = head; bh; bh = bh->b_this_page) {
2535 if (!buffer_uptodate(bh))
2542 if (is_mapped_to_disk)
2543 SetPageMappedToDisk(page);
2545 *fsdata = head; /* to be released by nobh_write_end */
2552 * Error recovery is a bit difficult. We need to zero out blocks that
2553 * were newly allocated, and dirty them to ensure they get written out.
2554 * Buffers need to be attached to the page at this point, otherwise
2555 * the handling of potential IO errors during writeout would be hard
2556 * (could try doing synchronous writeout, but what if that fails too?)
2558 attach_nobh_buffers(page, head);
2559 page_zero_new_buffers(page, from, to);
2563 page_cache_release(page);
2566 if (pos + len > inode->i_size)
2567 vmtruncate(inode, inode->i_size);
2571 EXPORT_SYMBOL(nobh_write_begin);
2573 int nobh_write_end(struct file *file, struct address_space *mapping,
2574 loff_t pos, unsigned len, unsigned copied,
2575 struct page *page, void *fsdata)
2577 struct inode *inode = page->mapping->host;
2578 struct buffer_head *head = fsdata;
2579 struct buffer_head *bh;
2580 BUG_ON(fsdata != NULL && page_has_buffers(page));
2582 if (unlikely(copied < len) && !page_has_buffers(page))
2583 attach_nobh_buffers(page, head);
2584 if (page_has_buffers(page))
2585 return generic_write_end(file, mapping, pos, len,
2586 copied, page, fsdata);
2588 SetPageUptodate(page);
2589 set_page_dirty(page);
2590 if (pos+copied > inode->i_size) {
2591 i_size_write(inode, pos+copied);
2592 mark_inode_dirty(inode);
2596 page_cache_release(page);
2600 head = head->b_this_page;
2601 free_buffer_head(bh);
2606 EXPORT_SYMBOL(nobh_write_end);
2609 * nobh_writepage() - based on block_full_write_page() except
2610 * that it tries to operate without attaching bufferheads to
2613 int nobh_writepage(struct page *page, get_block_t *get_block,
2614 struct writeback_control *wbc)
2616 struct inode * const inode = page->mapping->host;
2617 loff_t i_size = i_size_read(inode);
2618 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2622 /* Is the page fully inside i_size? */
2623 if (page->index < end_index)
2626 /* Is the page fully outside i_size? (truncate in progress) */
2627 offset = i_size & (PAGE_CACHE_SIZE-1);
2628 if (page->index >= end_index+1 || !offset) {
2630 * The page may have dirty, unmapped buffers. For example,
2631 * they may have been added in ext3_writepage(). Make them
2632 * freeable here, so the page does not leak.
2635 /* Not really sure about this - do we need this ? */
2636 if (page->mapping->a_ops->invalidatepage)
2637 page->mapping->a_ops->invalidatepage(page, offset);
2640 return 0; /* don't care */
2644 * The page straddles i_size. It must be zeroed out on each and every
2645 * writepage invocation because it may be mmapped. "A file is mapped
2646 * in multiples of the page size. For a file that is not a multiple of
2647 * the page size, the remaining memory is zeroed when mapped, and
2648 * writes to that region are not written out to the file."
2650 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2652 ret = mpage_writepage(page, get_block, wbc);
2654 ret = __block_write_full_page(inode, page, get_block, wbc);
2657 EXPORT_SYMBOL(nobh_writepage);
2659 int nobh_truncate_page(struct address_space *mapping,
2660 loff_t from, get_block_t *get_block)
2662 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2663 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2666 unsigned length, pos;
2667 struct inode *inode = mapping->host;
2669 struct buffer_head map_bh;
2672 blocksize = 1 << inode->i_blkbits;
2673 length = offset & (blocksize - 1);
2675 /* Block boundary? Nothing to do */
2679 length = blocksize - length;
2680 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2682 page = grab_cache_page(mapping, index);
2687 if (page_has_buffers(page)) {
2690 page_cache_release(page);
2691 return block_truncate_page(mapping, from, get_block);
2694 /* Find the buffer that contains "offset" */
2696 while (offset >= pos) {
2701 err = get_block(inode, iblock, &map_bh, 0);
2704 /* unmapped? It's a hole - nothing to do */
2705 if (!buffer_mapped(&map_bh))
2708 /* Ok, it's mapped. Make sure it's up-to-date */
2709 if (!PageUptodate(page)) {
2710 err = mapping->a_ops->readpage(NULL, page);
2712 page_cache_release(page);
2716 if (!PageUptodate(page)) {
2720 if (page_has_buffers(page))
2723 zero_user(page, offset, length);
2724 set_page_dirty(page);
2729 page_cache_release(page);
2733 EXPORT_SYMBOL(nobh_truncate_page);
2735 int block_truncate_page(struct address_space *mapping,
2736 loff_t from, get_block_t *get_block)
2738 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2739 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2742 unsigned length, pos;
2743 struct inode *inode = mapping->host;
2745 struct buffer_head *bh;
2748 blocksize = 1 << inode->i_blkbits;
2749 length = offset & (blocksize - 1);
2751 /* Block boundary? Nothing to do */
2755 length = blocksize - length;
2756 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2758 page = grab_cache_page(mapping, index);
2763 if (!page_has_buffers(page))
2764 create_empty_buffers(page, blocksize, 0);
2766 /* Find the buffer that contains "offset" */
2767 bh = page_buffers(page);
2769 while (offset >= pos) {
2770 bh = bh->b_this_page;
2776 if (!buffer_mapped(bh)) {
2777 WARN_ON(bh->b_size != blocksize);
2778 err = get_block(inode, iblock, bh, 0);
2781 /* unmapped? It's a hole - nothing to do */
2782 if (!buffer_mapped(bh))
2786 /* Ok, it's mapped. Make sure it's up-to-date */
2787 if (PageUptodate(page))
2788 set_buffer_uptodate(bh);
2790 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2792 ll_rw_block(READ, 1, &bh);
2794 /* Uhhuh. Read error. Complain and punt. */
2795 if (!buffer_uptodate(bh))
2799 zero_user(page, offset, length);
2800 mark_buffer_dirty(bh);
2805 page_cache_release(page);
2811 * The generic ->writepage function for buffer-backed address_spaces
2813 int block_write_full_page(struct page *page, get_block_t *get_block,
2814 struct writeback_control *wbc)
2816 struct inode * const inode = page->mapping->host;
2817 loff_t i_size = i_size_read(inode);
2818 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2821 /* Is the page fully inside i_size? */
2822 if (page->index < end_index)
2823 return __block_write_full_page(inode, page, get_block, wbc);
2825 /* Is the page fully outside i_size? (truncate in progress) */
2826 offset = i_size & (PAGE_CACHE_SIZE-1);
2827 if (page->index >= end_index+1 || !offset) {
2829 * The page may have dirty, unmapped buffers. For example,
2830 * they may have been added in ext3_writepage(). Make them
2831 * freeable here, so the page does not leak.
2833 do_invalidatepage(page, 0);
2835 return 0; /* don't care */
2839 * The page straddles i_size. It must be zeroed out on each and every
2840 * writepage invokation because it may be mmapped. "A file is mapped
2841 * in multiples of the page size. For a file that is not a multiple of
2842 * the page size, the remaining memory is zeroed when mapped, and
2843 * writes to that region are not written out to the file."
2845 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2846 return __block_write_full_page(inode, page, get_block, wbc);
2849 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2850 get_block_t *get_block)
2852 struct buffer_head tmp;
2853 struct inode *inode = mapping->host;
2856 tmp.b_size = 1 << inode->i_blkbits;
2857 get_block(inode, block, &tmp, 0);
2858 return tmp.b_blocknr;
2861 static void end_bio_bh_io_sync(struct bio *bio, int err)
2863 struct buffer_head *bh = bio->bi_private;
2865 if (err == -EOPNOTSUPP) {
2866 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2867 set_bit(BH_Eopnotsupp, &bh->b_state);
2870 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2874 int submit_bh(int rw, struct buffer_head * bh)
2879 BUG_ON(!buffer_locked(bh));
2880 BUG_ON(!buffer_mapped(bh));
2881 BUG_ON(!bh->b_end_io);
2883 if (buffer_ordered(bh) && (rw == WRITE))
2887 * Only clear out a write error when rewriting, should this
2888 * include WRITE_SYNC as well?
2890 if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
2891 clear_buffer_write_io_error(bh);
2894 * from here on down, it's all bio -- do the initial mapping,
2895 * submit_bio -> generic_make_request may further map this bio around
2897 bio = bio_alloc(GFP_NOIO, 1);
2899 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2900 bio->bi_bdev = bh->b_bdev;
2901 bio->bi_io_vec[0].bv_page = bh->b_page;
2902 bio->bi_io_vec[0].bv_len = bh->b_size;
2903 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2907 bio->bi_size = bh->b_size;
2909 bio->bi_end_io = end_bio_bh_io_sync;
2910 bio->bi_private = bh;
2913 submit_bio(rw, bio);
2915 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2923 * ll_rw_block: low-level access to block devices (DEPRECATED)
2924 * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
2925 * @nr: number of &struct buffer_heads in the array
2926 * @bhs: array of pointers to &struct buffer_head
2928 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2929 * requests an I/O operation on them, either a %READ or a %WRITE. The third
2930 * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2931 * are sent to disk. The fourth %READA option is described in the documentation
2932 * for generic_make_request() which ll_rw_block() calls.
2934 * This function drops any buffer that it cannot get a lock on (with the
2935 * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2936 * clean when doing a write request, and any buffer that appears to be
2937 * up-to-date when doing read request. Further it marks as clean buffers that
2938 * are processed for writing (the buffer cache won't assume that they are
2939 * actually clean until the buffer gets unlocked).
2941 * ll_rw_block sets b_end_io to simple completion handler that marks
2942 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2945 * All of the buffers must be for the same device, and must also be a
2946 * multiple of the current approved size for the device.
2948 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2952 for (i = 0; i < nr; i++) {
2953 struct buffer_head *bh = bhs[i];
2955 if (rw == SWRITE || rw == SWRITE_SYNC)
2957 else if (test_set_buffer_locked(bh))
2960 if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC) {
2961 if (test_clear_buffer_dirty(bh)) {
2962 bh->b_end_io = end_buffer_write_sync;
2964 if (rw == SWRITE_SYNC)
2965 submit_bh(WRITE_SYNC, bh);
2967 submit_bh(WRITE, bh);
2971 if (!buffer_uptodate(bh)) {
2972 bh->b_end_io = end_buffer_read_sync;
2983 * For a data-integrity writeout, we need to wait upon any in-progress I/O
2984 * and then start new I/O and then wait upon it. The caller must have a ref on
2987 int sync_dirty_buffer(struct buffer_head *bh)
2991 WARN_ON(atomic_read(&bh->b_count) < 1);
2993 if (test_clear_buffer_dirty(bh)) {
2995 bh->b_end_io = end_buffer_write_sync;
2996 ret = submit_bh(WRITE_SYNC, bh);
2998 if (buffer_eopnotsupp(bh)) {
2999 clear_buffer_eopnotsupp(bh);
3002 if (!ret && !buffer_uptodate(bh))
3011 * try_to_free_buffers() checks if all the buffers on this particular page
3012 * are unused, and releases them if so.
3014 * Exclusion against try_to_free_buffers may be obtained by either
3015 * locking the page or by holding its mapping's private_lock.
3017 * If the page is dirty but all the buffers are clean then we need to
3018 * be sure to mark the page clean as well. This is because the page
3019 * may be against a block device, and a later reattachment of buffers
3020 * to a dirty page will set *all* buffers dirty. Which would corrupt
3021 * filesystem data on the same device.
3023 * The same applies to regular filesystem pages: if all the buffers are
3024 * clean then we set the page clean and proceed. To do that, we require
3025 * total exclusion from __set_page_dirty_buffers(). That is obtained with
3028 * try_to_free_buffers() is non-blocking.
3030 static inline int buffer_busy(struct buffer_head *bh)
3032 return atomic_read(&bh->b_count) |
3033 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3037 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3039 struct buffer_head *head = page_buffers(page);
3040 struct buffer_head *bh;
3044 if (buffer_write_io_error(bh) && page->mapping)
3045 set_bit(AS_EIO, &page->mapping->flags);
3046 if (buffer_busy(bh))
3048 bh = bh->b_this_page;
3049 } while (bh != head);
3052 struct buffer_head *next = bh->b_this_page;
3054 if (bh->b_assoc_map)
3055 __remove_assoc_queue(bh);
3057 } while (bh != head);
3058 *buffers_to_free = head;
3059 __clear_page_buffers(page);
3065 int try_to_free_buffers(struct page *page)
3067 struct address_space * const mapping = page->mapping;
3068 struct buffer_head *buffers_to_free = NULL;
3071 BUG_ON(!PageLocked(page));
3072 if (PageWriteback(page))
3075 if (mapping == NULL) { /* can this still happen? */
3076 ret = drop_buffers(page, &buffers_to_free);
3080 spin_lock(&mapping->private_lock);
3081 ret = drop_buffers(page, &buffers_to_free);
3084 * If the filesystem writes its buffers by hand (eg ext3)
3085 * then we can have clean buffers against a dirty page. We
3086 * clean the page here; otherwise the VM will never notice
3087 * that the filesystem did any IO at all.
3089 * Also, during truncate, discard_buffer will have marked all
3090 * the page's buffers clean. We discover that here and clean
3093 * private_lock must be held over this entire operation in order
3094 * to synchronise against __set_page_dirty_buffers and prevent the
3095 * dirty bit from being lost.
3098 cancel_dirty_page(page, PAGE_CACHE_SIZE);
3099 spin_unlock(&mapping->private_lock);
3101 if (buffers_to_free) {
3102 struct buffer_head *bh = buffers_to_free;
3105 struct buffer_head *next = bh->b_this_page;
3106 free_buffer_head(bh);
3108 } while (bh != buffers_to_free);
3112 EXPORT_SYMBOL(try_to_free_buffers);
3114 void block_sync_page(struct page *page)
3116 struct address_space *mapping;
3119 mapping = page_mapping(page);
3121 blk_run_backing_dev(mapping->backing_dev_info, page);
3125 * There are no bdflush tunables left. But distributions are
3126 * still running obsolete flush daemons, so we terminate them here.
3128 * Use of bdflush() is deprecated and will be removed in a future kernel.
3129 * The `pdflush' kernel threads fully replace bdflush daemons and this call.
3131 asmlinkage long sys_bdflush(int func, long data)
3133 static int msg_count;
3135 if (!capable(CAP_SYS_ADMIN))
3138 if (msg_count < 5) {
3141 "warning: process `%s' used the obsolete bdflush"
3142 " system call\n", current->comm);
3143 printk(KERN_INFO "Fix your initscripts?\n");
3152 * Buffer-head allocation
3154 static struct kmem_cache *bh_cachep;
3157 * Once the number of bh's in the machine exceeds this level, we start
3158 * stripping them in writeback.
3160 static int max_buffer_heads;
3162 int buffer_heads_over_limit;
3164 struct bh_accounting {
3165 int nr; /* Number of live bh's */
3166 int ratelimit; /* Limit cacheline bouncing */
3169 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3171 static void recalc_bh_state(void)
3176 if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3178 __get_cpu_var(bh_accounting).ratelimit = 0;
3179 for_each_online_cpu(i)
3180 tot += per_cpu(bh_accounting, i).nr;
3181 buffer_heads_over_limit = (tot > max_buffer_heads);
3184 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3186 struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
3188 INIT_LIST_HEAD(&ret->b_assoc_buffers);
3189 get_cpu_var(bh_accounting).nr++;
3191 put_cpu_var(bh_accounting);
3195 EXPORT_SYMBOL(alloc_buffer_head);
3197 void free_buffer_head(struct buffer_head *bh)
3199 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3200 kmem_cache_free(bh_cachep, bh);
3201 get_cpu_var(bh_accounting).nr--;
3203 put_cpu_var(bh_accounting);
3205 EXPORT_SYMBOL(free_buffer_head);
3207 static void buffer_exit_cpu(int cpu)
3210 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3212 for (i = 0; i < BH_LRU_SIZE; i++) {
3216 get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
3217 per_cpu(bh_accounting, cpu).nr = 0;
3218 put_cpu_var(bh_accounting);
3221 static int buffer_cpu_notify(struct notifier_block *self,
3222 unsigned long action, void *hcpu)
3224 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
3225 buffer_exit_cpu((unsigned long)hcpu);
3230 * bh_uptodate_or_lock - Test whether the buffer is uptodate
3231 * @bh: struct buffer_head
3233 * Return true if the buffer is up-to-date and false,
3234 * with the buffer locked, if not.
3236 int bh_uptodate_or_lock(struct buffer_head *bh)
3238 if (!buffer_uptodate(bh)) {
3240 if (!buffer_uptodate(bh))
3246 EXPORT_SYMBOL(bh_uptodate_or_lock);
3249 * bh_submit_read - Submit a locked buffer for reading
3250 * @bh: struct buffer_head
3252 * Returns zero on success and -EIO on error.
3254 int bh_submit_read(struct buffer_head *bh)
3256 BUG_ON(!buffer_locked(bh));
3258 if (buffer_uptodate(bh)) {
3264 bh->b_end_io = end_buffer_read_sync;
3265 submit_bh(READ, bh);
3267 if (buffer_uptodate(bh))
3271 EXPORT_SYMBOL(bh_submit_read);
3274 init_buffer_head(void *data)
3276 struct buffer_head *bh = data;
3278 memset(bh, 0, sizeof(*bh));
3279 INIT_LIST_HEAD(&bh->b_assoc_buffers);
3282 void __init buffer_init(void)
3286 bh_cachep = kmem_cache_create("buffer_head",
3287 sizeof(struct buffer_head), 0,
3288 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3293 * Limit the bh occupancy to 10% of ZONE_NORMAL
3295 nrpages = (nr_free_buffer_pages() * 10) / 100;
3296 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3297 hotcpu_notifier(buffer_cpu_notify, 0);
3300 EXPORT_SYMBOL(__bforget);
3301 EXPORT_SYMBOL(__brelse);
3302 EXPORT_SYMBOL(__wait_on_buffer);
3303 EXPORT_SYMBOL(block_commit_write);
3304 EXPORT_SYMBOL(block_prepare_write);
3305 EXPORT_SYMBOL(block_page_mkwrite);
3306 EXPORT_SYMBOL(block_read_full_page);
3307 EXPORT_SYMBOL(block_sync_page);
3308 EXPORT_SYMBOL(block_truncate_page);
3309 EXPORT_SYMBOL(block_write_full_page);
3310 EXPORT_SYMBOL(cont_write_begin);
3311 EXPORT_SYMBOL(end_buffer_read_sync);
3312 EXPORT_SYMBOL(end_buffer_write_sync);
3313 EXPORT_SYMBOL(file_fsync);
3314 EXPORT_SYMBOL(fsync_bdev);
3315 EXPORT_SYMBOL(generic_block_bmap);
3316 EXPORT_SYMBOL(generic_cont_expand_simple);
3317 EXPORT_SYMBOL(init_buffer);
3318 EXPORT_SYMBOL(invalidate_bdev);
3319 EXPORT_SYMBOL(ll_rw_block);
3320 EXPORT_SYMBOL(mark_buffer_dirty);
3321 EXPORT_SYMBOL(submit_bh);
3322 EXPORT_SYMBOL(sync_dirty_buffer);
3323 EXPORT_SYMBOL(unlock_buffer);