4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
21 #include <linux/kernel.h>
22 #include <linux/syscalls.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
27 #include <linux/capability.h>
28 #include <linux/blkdev.h>
29 #include <linux/file.h>
30 #include <linux/quotaops.h>
31 #include <linux/highmem.h>
32 #include <linux/module.h>
33 #include <linux/writeback.h>
34 #include <linux/hash.h>
35 #include <linux/suspend.h>
36 #include <linux/buffer_head.h>
37 #include <linux/task_io_accounting_ops.h>
38 #include <linux/bio.h>
39 #include <linux/notifier.h>
40 #include <linux/cpu.h>
41 #include <linux/bitops.h>
42 #include <linux/mpage.h>
43 #include <linux/bit_spinlock.h>
45 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
47 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
50 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
52 bh->b_end_io = handler;
53 bh->b_private = private;
56 static int sync_buffer(void *word)
58 struct block_device *bd;
59 struct buffer_head *bh
60 = container_of(word, struct buffer_head, b_state);
65 blk_run_address_space(bd->bd_inode->i_mapping);
70 void __lock_buffer(struct buffer_head *bh)
72 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
73 TASK_UNINTERRUPTIBLE);
75 EXPORT_SYMBOL(__lock_buffer);
77 void unlock_buffer(struct buffer_head *bh)
79 smp_mb__before_clear_bit();
80 clear_buffer_locked(bh);
81 smp_mb__after_clear_bit();
82 wake_up_bit(&bh->b_state, BH_Lock);
86 * Block until a buffer comes unlocked. This doesn't stop it
87 * from becoming locked again - you have to lock it yourself
88 * if you want to preserve its state.
90 void __wait_on_buffer(struct buffer_head * bh)
92 wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
96 __clear_page_buffers(struct page *page)
98 ClearPagePrivate(page);
99 set_page_private(page, 0);
100 page_cache_release(page);
103 static void buffer_io_error(struct buffer_head *bh)
105 char b[BDEVNAME_SIZE];
107 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
108 bdevname(bh->b_bdev, b),
109 (unsigned long long)bh->b_blocknr);
113 * End-of-IO handler helper function which does not touch the bh after
115 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
116 * a race there is benign: unlock_buffer() only use the bh's address for
117 * hashing after unlocking the buffer, so it doesn't actually touch the bh
120 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
123 set_buffer_uptodate(bh);
125 /* This happens, due to failed READA attempts. */
126 clear_buffer_uptodate(bh);
132 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
133 * unlock the buffer. This is what ll_rw_block uses too.
135 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
137 __end_buffer_read_notouch(bh, uptodate);
141 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
143 char b[BDEVNAME_SIZE];
146 set_buffer_uptodate(bh);
148 if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
150 printk(KERN_WARNING "lost page write due to "
152 bdevname(bh->b_bdev, b));
154 set_buffer_write_io_error(bh);
155 clear_buffer_uptodate(bh);
162 * Write out and wait upon all the dirty data associated with a block
163 * device via its mapping. Does not take the superblock lock.
165 int sync_blockdev(struct block_device *bdev)
170 ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
173 EXPORT_SYMBOL(sync_blockdev);
176 * Write out and wait upon all dirty data associated with this
177 * device. Filesystem data as well as the underlying block
178 * device. Takes the superblock lock.
180 int fsync_bdev(struct block_device *bdev)
182 struct super_block *sb = get_super(bdev);
184 int res = fsync_super(sb);
188 return sync_blockdev(bdev);
192 * freeze_bdev -- lock a filesystem and force it into a consistent state
193 * @bdev: blockdevice to lock
195 * This takes the block device bd_mount_sem to make sure no new mounts
196 * happen on bdev until thaw_bdev() is called.
197 * If a superblock is found on this device, we take the s_umount semaphore
198 * on it to make sure nobody unmounts until the snapshot creation is done.
200 struct super_block *freeze_bdev(struct block_device *bdev)
202 struct super_block *sb;
204 down(&bdev->bd_mount_sem);
205 sb = get_super(bdev);
206 if (sb && !(sb->s_flags & MS_RDONLY)) {
207 sb->s_frozen = SB_FREEZE_WRITE;
212 sb->s_frozen = SB_FREEZE_TRANS;
215 sync_blockdev(sb->s_bdev);
217 if (sb->s_op->write_super_lockfs)
218 sb->s_op->write_super_lockfs(sb);
222 return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */
224 EXPORT_SYMBOL(freeze_bdev);
227 * thaw_bdev -- unlock filesystem
228 * @bdev: blockdevice to unlock
229 * @sb: associated superblock
231 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
233 void thaw_bdev(struct block_device *bdev, struct super_block *sb)
236 BUG_ON(sb->s_bdev != bdev);
238 if (sb->s_op->unlockfs)
239 sb->s_op->unlockfs(sb);
240 sb->s_frozen = SB_UNFROZEN;
242 wake_up(&sb->s_wait_unfrozen);
246 up(&bdev->bd_mount_sem);
248 EXPORT_SYMBOL(thaw_bdev);
251 * Various filesystems appear to want __find_get_block to be non-blocking.
252 * But it's the page lock which protects the buffers. To get around this,
253 * we get exclusion from try_to_free_buffers with the blockdev mapping's
256 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
257 * may be quite high. This code could TryLock the page, and if that
258 * succeeds, there is no need to take private_lock. (But if
259 * private_lock is contended then so is mapping->tree_lock).
261 static struct buffer_head *
262 __find_get_block_slow(struct block_device *bdev, sector_t block)
264 struct inode *bd_inode = bdev->bd_inode;
265 struct address_space *bd_mapping = bd_inode->i_mapping;
266 struct buffer_head *ret = NULL;
268 struct buffer_head *bh;
269 struct buffer_head *head;
273 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
274 page = find_get_page(bd_mapping, index);
278 spin_lock(&bd_mapping->private_lock);
279 if (!page_has_buffers(page))
281 head = page_buffers(page);
284 if (bh->b_blocknr == block) {
289 if (!buffer_mapped(bh))
291 bh = bh->b_this_page;
292 } while (bh != head);
294 /* we might be here because some of the buffers on this page are
295 * not mapped. This is due to various races between
296 * file io on the block device and getblk. It gets dealt with
297 * elsewhere, don't buffer_error if we had some unmapped buffers
300 printk("__find_get_block_slow() failed. "
301 "block=%llu, b_blocknr=%llu\n",
302 (unsigned long long)block,
303 (unsigned long long)bh->b_blocknr);
304 printk("b_state=0x%08lx, b_size=%zu\n",
305 bh->b_state, bh->b_size);
306 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
309 spin_unlock(&bd_mapping->private_lock);
310 page_cache_release(page);
315 /* If invalidate_buffers() will trash dirty buffers, it means some kind
316 of fs corruption is going on. Trashing dirty data always imply losing
317 information that was supposed to be just stored on the physical layer
320 Thus invalidate_buffers in general usage is not allwowed to trash
321 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
322 be preserved. These buffers are simply skipped.
324 We also skip buffers which are still in use. For example this can
325 happen if a userspace program is reading the block device.
327 NOTE: In the case where the user removed a removable-media-disk even if
328 there's still dirty data not synced on disk (due a bug in the device driver
329 or due an error of the user), by not destroying the dirty buffers we could
330 generate corruption also on the next media inserted, thus a parameter is
331 necessary to handle this case in the most safe way possible (trying
332 to not corrupt also the new disk inserted with the data belonging to
333 the old now corrupted disk). Also for the ramdisk the natural thing
334 to do in order to release the ramdisk memory is to destroy dirty buffers.
336 These are two special cases. Normal usage imply the device driver
337 to issue a sync on the device (without waiting I/O completion) and
338 then an invalidate_buffers call that doesn't trash dirty buffers.
340 For handling cache coherency with the blkdev pagecache the 'update' case
341 is been introduced. It is needed to re-read from disk any pinned
342 buffer. NOTE: re-reading from disk is destructive so we can do it only
343 when we assume nobody is changing the buffercache under our I/O and when
344 we think the disk contains more recent information than the buffercache.
345 The update == 1 pass marks the buffers we need to update, the update == 2
346 pass does the actual I/O. */
347 void invalidate_bdev(struct block_device *bdev)
349 struct address_space *mapping = bdev->bd_inode->i_mapping;
351 if (mapping->nrpages == 0)
354 invalidate_bh_lrus();
355 invalidate_mapping_pages(mapping, 0, -1);
359 * Kick pdflush then try to free up some ZONE_NORMAL memory.
361 static void free_more_memory(void)
366 wakeup_pdflush(1024);
369 for_each_online_pgdat(pgdat) {
370 zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
372 try_to_free_pages(zones, 0, GFP_NOFS);
377 * I/O completion handler for block_read_full_page() - pages
378 * which come unlocked at the end of I/O.
380 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
383 struct buffer_head *first;
384 struct buffer_head *tmp;
386 int page_uptodate = 1;
388 BUG_ON(!buffer_async_read(bh));
392 set_buffer_uptodate(bh);
394 clear_buffer_uptodate(bh);
395 if (printk_ratelimit())
401 * Be _very_ careful from here on. Bad things can happen if
402 * two buffer heads end IO at almost the same time and both
403 * decide that the page is now completely done.
405 first = page_buffers(page);
406 local_irq_save(flags);
407 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
408 clear_buffer_async_read(bh);
412 if (!buffer_uptodate(tmp))
414 if (buffer_async_read(tmp)) {
415 BUG_ON(!buffer_locked(tmp));
418 tmp = tmp->b_this_page;
420 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
421 local_irq_restore(flags);
424 * If none of the buffers had errors and they are all
425 * uptodate then we can set the page uptodate.
427 if (page_uptodate && !PageError(page))
428 SetPageUptodate(page);
433 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
434 local_irq_restore(flags);
439 * Completion handler for block_write_full_page() - pages which are unlocked
440 * during I/O, and which have PageWriteback cleared upon I/O completion.
442 static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
444 char b[BDEVNAME_SIZE];
446 struct buffer_head *first;
447 struct buffer_head *tmp;
450 BUG_ON(!buffer_async_write(bh));
454 set_buffer_uptodate(bh);
456 if (printk_ratelimit()) {
458 printk(KERN_WARNING "lost page write due to "
460 bdevname(bh->b_bdev, b));
462 set_bit(AS_EIO, &page->mapping->flags);
463 set_buffer_write_io_error(bh);
464 clear_buffer_uptodate(bh);
468 first = page_buffers(page);
469 local_irq_save(flags);
470 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
472 clear_buffer_async_write(bh);
474 tmp = bh->b_this_page;
476 if (buffer_async_write(tmp)) {
477 BUG_ON(!buffer_locked(tmp));
480 tmp = tmp->b_this_page;
482 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
483 local_irq_restore(flags);
484 end_page_writeback(page);
488 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
489 local_irq_restore(flags);
494 * If a page's buffers are under async readin (end_buffer_async_read
495 * completion) then there is a possibility that another thread of
496 * control could lock one of the buffers after it has completed
497 * but while some of the other buffers have not completed. This
498 * locked buffer would confuse end_buffer_async_read() into not unlocking
499 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
500 * that this buffer is not under async I/O.
502 * The page comes unlocked when it has no locked buffer_async buffers
505 * PageLocked prevents anyone starting new async I/O reads any of
508 * PageWriteback is used to prevent simultaneous writeout of the same
511 * PageLocked prevents anyone from starting writeback of a page which is
512 * under read I/O (PageWriteback is only ever set against a locked page).
514 static void mark_buffer_async_read(struct buffer_head *bh)
516 bh->b_end_io = end_buffer_async_read;
517 set_buffer_async_read(bh);
520 void mark_buffer_async_write(struct buffer_head *bh)
522 bh->b_end_io = end_buffer_async_write;
523 set_buffer_async_write(bh);
525 EXPORT_SYMBOL(mark_buffer_async_write);
529 * fs/buffer.c contains helper functions for buffer-backed address space's
530 * fsync functions. A common requirement for buffer-based filesystems is
531 * that certain data from the backing blockdev needs to be written out for
532 * a successful fsync(). For example, ext2 indirect blocks need to be
533 * written back and waited upon before fsync() returns.
535 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
536 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
537 * management of a list of dependent buffers at ->i_mapping->private_list.
539 * Locking is a little subtle: try_to_free_buffers() will remove buffers
540 * from their controlling inode's queue when they are being freed. But
541 * try_to_free_buffers() will be operating against the *blockdev* mapping
542 * at the time, not against the S_ISREG file which depends on those buffers.
543 * So the locking for private_list is via the private_lock in the address_space
544 * which backs the buffers. Which is different from the address_space
545 * against which the buffers are listed. So for a particular address_space,
546 * mapping->private_lock does *not* protect mapping->private_list! In fact,
547 * mapping->private_list will always be protected by the backing blockdev's
550 * Which introduces a requirement: all buffers on an address_space's
551 * ->private_list must be from the same address_space: the blockdev's.
553 * address_spaces which do not place buffers at ->private_list via these
554 * utility functions are free to use private_lock and private_list for
555 * whatever they want. The only requirement is that list_empty(private_list)
556 * be true at clear_inode() time.
558 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
559 * filesystems should do that. invalidate_inode_buffers() should just go
560 * BUG_ON(!list_empty).
562 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
563 * take an address_space, not an inode. And it should be called
564 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
567 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
568 * list if it is already on a list. Because if the buffer is on a list,
569 * it *must* already be on the right one. If not, the filesystem is being
570 * silly. This will save a ton of locking. But first we have to ensure
571 * that buffers are taken *off* the old inode's list when they are freed
572 * (presumably in truncate). That requires careful auditing of all
573 * filesystems (do it inside bforget()). It could also be done by bringing
578 * The buffer's backing address_space's private_lock must be held
580 static inline void __remove_assoc_queue(struct buffer_head *bh)
582 list_del_init(&bh->b_assoc_buffers);
583 WARN_ON(!bh->b_assoc_map);
584 if (buffer_write_io_error(bh))
585 set_bit(AS_EIO, &bh->b_assoc_map->flags);
586 bh->b_assoc_map = NULL;
589 int inode_has_buffers(struct inode *inode)
591 return !list_empty(&inode->i_data.private_list);
595 * osync is designed to support O_SYNC io. It waits synchronously for
596 * all already-submitted IO to complete, but does not queue any new
597 * writes to the disk.
599 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
600 * you dirty the buffers, and then use osync_inode_buffers to wait for
601 * completion. Any other dirty buffers which are not yet queued for
602 * write will not be flushed to disk by the osync.
604 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
606 struct buffer_head *bh;
612 list_for_each_prev(p, list) {
614 if (buffer_locked(bh)) {
618 if (!buffer_uptodate(bh))
630 * sync_mapping_buffers - write out and wait upon a mapping's "associated"
632 * @mapping: the mapping which wants those buffers written
634 * Starts I/O against the buffers at mapping->private_list, and waits upon
637 * Basically, this is a convenience function for fsync().
638 * @mapping is a file or directory which needs those buffers to be written for
639 * a successful fsync().
641 int sync_mapping_buffers(struct address_space *mapping)
643 struct address_space *buffer_mapping = mapping->assoc_mapping;
645 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
648 return fsync_buffers_list(&buffer_mapping->private_lock,
649 &mapping->private_list);
651 EXPORT_SYMBOL(sync_mapping_buffers);
654 * Called when we've recently written block `bblock', and it is known that
655 * `bblock' was for a buffer_boundary() buffer. This means that the block at
656 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
657 * dirty, schedule it for IO. So that indirects merge nicely with their data.
659 void write_boundary_block(struct block_device *bdev,
660 sector_t bblock, unsigned blocksize)
662 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
664 if (buffer_dirty(bh))
665 ll_rw_block(WRITE, 1, &bh);
670 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
672 struct address_space *mapping = inode->i_mapping;
673 struct address_space *buffer_mapping = bh->b_page->mapping;
675 mark_buffer_dirty(bh);
676 if (!mapping->assoc_mapping) {
677 mapping->assoc_mapping = buffer_mapping;
679 BUG_ON(mapping->assoc_mapping != buffer_mapping);
681 if (!bh->b_assoc_map) {
682 spin_lock(&buffer_mapping->private_lock);
683 list_move_tail(&bh->b_assoc_buffers,
684 &mapping->private_list);
685 bh->b_assoc_map = mapping;
686 spin_unlock(&buffer_mapping->private_lock);
689 EXPORT_SYMBOL(mark_buffer_dirty_inode);
692 * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
695 * If warn is true, then emit a warning if the page is not uptodate and has
696 * not been truncated.
698 static int __set_page_dirty(struct page *page,
699 struct address_space *mapping, int warn)
701 if (unlikely(!mapping))
702 return !TestSetPageDirty(page);
704 if (TestSetPageDirty(page))
707 write_lock_irq(&mapping->tree_lock);
708 if (page->mapping) { /* Race with truncate? */
709 WARN_ON_ONCE(warn && !PageUptodate(page));
711 if (mapping_cap_account_dirty(mapping)) {
712 __inc_zone_page_state(page, NR_FILE_DIRTY);
713 __inc_bdi_stat(mapping->backing_dev_info,
715 task_io_account_write(PAGE_CACHE_SIZE);
717 radix_tree_tag_set(&mapping->page_tree,
718 page_index(page), PAGECACHE_TAG_DIRTY);
720 write_unlock_irq(&mapping->tree_lock);
721 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
727 * Add a page to the dirty page list.
729 * It is a sad fact of life that this function is called from several places
730 * deeply under spinlocking. It may not sleep.
732 * If the page has buffers, the uptodate buffers are set dirty, to preserve
733 * dirty-state coherency between the page and the buffers. It the page does
734 * not have buffers then when they are later attached they will all be set
737 * The buffers are dirtied before the page is dirtied. There's a small race
738 * window in which a writepage caller may see the page cleanness but not the
739 * buffer dirtiness. That's fine. If this code were to set the page dirty
740 * before the buffers, a concurrent writepage caller could clear the page dirty
741 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
742 * page on the dirty page list.
744 * We use private_lock to lock against try_to_free_buffers while using the
745 * page's buffer list. Also use this to protect against clean buffers being
746 * added to the page after it was set dirty.
748 * FIXME: may need to call ->reservepage here as well. That's rather up to the
749 * address_space though.
751 int __set_page_dirty_buffers(struct page *page)
753 struct address_space *mapping = page_mapping(page);
755 if (unlikely(!mapping))
756 return !TestSetPageDirty(page);
758 spin_lock(&mapping->private_lock);
759 if (page_has_buffers(page)) {
760 struct buffer_head *head = page_buffers(page);
761 struct buffer_head *bh = head;
764 set_buffer_dirty(bh);
765 bh = bh->b_this_page;
766 } while (bh != head);
768 spin_unlock(&mapping->private_lock);
770 return __set_page_dirty(page, mapping, 1);
772 EXPORT_SYMBOL(__set_page_dirty_buffers);
775 * Write out and wait upon a list of buffers.
777 * We have conflicting pressures: we want to make sure that all
778 * initially dirty buffers get waited on, but that any subsequently
779 * dirtied buffers don't. After all, we don't want fsync to last
780 * forever if somebody is actively writing to the file.
782 * Do this in two main stages: first we copy dirty buffers to a
783 * temporary inode list, queueing the writes as we go. Then we clean
784 * up, waiting for those writes to complete.
786 * During this second stage, any subsequent updates to the file may end
787 * up refiling the buffer on the original inode's dirty list again, so
788 * there is a chance we will end up with a buffer queued for write but
789 * not yet completed on that list. So, as a final cleanup we go through
790 * the osync code to catch these locked, dirty buffers without requeuing
791 * any newly dirty buffers for write.
793 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
795 struct buffer_head *bh;
796 struct list_head tmp;
797 struct address_space *mapping;
800 INIT_LIST_HEAD(&tmp);
803 while (!list_empty(list)) {
804 bh = BH_ENTRY(list->next);
805 mapping = bh->b_assoc_map;
806 __remove_assoc_queue(bh);
807 /* Avoid race with mark_buffer_dirty_inode() which does
808 * a lockless check and we rely on seeing the dirty bit */
810 if (buffer_dirty(bh) || buffer_locked(bh)) {
811 list_add(&bh->b_assoc_buffers, &tmp);
812 bh->b_assoc_map = mapping;
813 if (buffer_dirty(bh)) {
817 * Ensure any pending I/O completes so that
818 * ll_rw_block() actually writes the current
819 * contents - it is a noop if I/O is still in
820 * flight on potentially older contents.
822 ll_rw_block(SWRITE, 1, &bh);
829 while (!list_empty(&tmp)) {
830 bh = BH_ENTRY(tmp.prev);
832 mapping = bh->b_assoc_map;
833 __remove_assoc_queue(bh);
834 /* Avoid race with mark_buffer_dirty_inode() which does
835 * a lockless check and we rely on seeing the dirty bit */
837 if (buffer_dirty(bh)) {
838 list_add(&bh->b_assoc_buffers,
839 &bh->b_assoc_map->private_list);
840 bh->b_assoc_map = mapping;
844 if (!buffer_uptodate(bh))
851 err2 = osync_buffers_list(lock, list);
859 * Invalidate any and all dirty buffers on a given inode. We are
860 * probably unmounting the fs, but that doesn't mean we have already
861 * done a sync(). Just drop the buffers from the inode list.
863 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
864 * assumes that all the buffers are against the blockdev. Not true
867 void invalidate_inode_buffers(struct inode *inode)
869 if (inode_has_buffers(inode)) {
870 struct address_space *mapping = &inode->i_data;
871 struct list_head *list = &mapping->private_list;
872 struct address_space *buffer_mapping = mapping->assoc_mapping;
874 spin_lock(&buffer_mapping->private_lock);
875 while (!list_empty(list))
876 __remove_assoc_queue(BH_ENTRY(list->next));
877 spin_unlock(&buffer_mapping->private_lock);
882 * Remove any clean buffers from the inode's buffer list. This is called
883 * when we're trying to free the inode itself. Those buffers can pin it.
885 * Returns true if all buffers were removed.
887 int remove_inode_buffers(struct inode *inode)
891 if (inode_has_buffers(inode)) {
892 struct address_space *mapping = &inode->i_data;
893 struct list_head *list = &mapping->private_list;
894 struct address_space *buffer_mapping = mapping->assoc_mapping;
896 spin_lock(&buffer_mapping->private_lock);
897 while (!list_empty(list)) {
898 struct buffer_head *bh = BH_ENTRY(list->next);
899 if (buffer_dirty(bh)) {
903 __remove_assoc_queue(bh);
905 spin_unlock(&buffer_mapping->private_lock);
911 * Create the appropriate buffers when given a page for data area and
912 * the size of each buffer.. Use the bh->b_this_page linked list to
913 * follow the buffers created. Return NULL if unable to create more
916 * The retry flag is used to differentiate async IO (paging, swapping)
917 * which may not fail from ordinary buffer allocations.
919 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
922 struct buffer_head *bh, *head;
928 while ((offset -= size) >= 0) {
929 bh = alloc_buffer_head(GFP_NOFS);
934 bh->b_this_page = head;
939 atomic_set(&bh->b_count, 0);
940 bh->b_private = NULL;
943 /* Link the buffer to its page */
944 set_bh_page(bh, page, offset);
946 init_buffer(bh, NULL, NULL);
950 * In case anything failed, we just free everything we got.
956 head = head->b_this_page;
957 free_buffer_head(bh);
962 * Return failure for non-async IO requests. Async IO requests
963 * are not allowed to fail, so we have to wait until buffer heads
964 * become available. But we don't want tasks sleeping with
965 * partially complete buffers, so all were released above.
970 /* We're _really_ low on memory. Now we just
971 * wait for old buffer heads to become free due to
972 * finishing IO. Since this is an async request and
973 * the reserve list is empty, we're sure there are
974 * async buffer heads in use.
979 EXPORT_SYMBOL_GPL(alloc_page_buffers);
982 link_dev_buffers(struct page *page, struct buffer_head *head)
984 struct buffer_head *bh, *tail;
989 bh = bh->b_this_page;
991 tail->b_this_page = head;
992 attach_page_buffers(page, head);
996 * Initialise the state of a blockdev page's buffers.
999 init_page_buffers(struct page *page, struct block_device *bdev,
1000 sector_t block, int size)
1002 struct buffer_head *head = page_buffers(page);
1003 struct buffer_head *bh = head;
1004 int uptodate = PageUptodate(page);
1007 if (!buffer_mapped(bh)) {
1008 init_buffer(bh, NULL, NULL);
1010 bh->b_blocknr = block;
1012 set_buffer_uptodate(bh);
1013 set_buffer_mapped(bh);
1016 bh = bh->b_this_page;
1017 } while (bh != head);
1021 * Create the page-cache page that contains the requested block.
1023 * This is user purely for blockdev mappings.
1025 static struct page *
1026 grow_dev_page(struct block_device *bdev, sector_t block,
1027 pgoff_t index, int size)
1029 struct inode *inode = bdev->bd_inode;
1031 struct buffer_head *bh;
1033 page = find_or_create_page(inode->i_mapping, index,
1034 (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
1038 BUG_ON(!PageLocked(page));
1040 if (page_has_buffers(page)) {
1041 bh = page_buffers(page);
1042 if (bh->b_size == size) {
1043 init_page_buffers(page, bdev, block, size);
1046 if (!try_to_free_buffers(page))
1051 * Allocate some buffers for this page
1053 bh = alloc_page_buffers(page, size, 0);
1058 * Link the page to the buffers and initialise them. Take the
1059 * lock to be atomic wrt __find_get_block(), which does not
1060 * run under the page lock.
1062 spin_lock(&inode->i_mapping->private_lock);
1063 link_dev_buffers(page, bh);
1064 init_page_buffers(page, bdev, block, size);
1065 spin_unlock(&inode->i_mapping->private_lock);
1071 page_cache_release(page);
1076 * Create buffers for the specified block device block's page. If
1077 * that page was dirty, the buffers are set dirty also.
1080 grow_buffers(struct block_device *bdev, sector_t block, int size)
1089 } while ((size << sizebits) < PAGE_SIZE);
1091 index = block >> sizebits;
1094 * Check for a block which wants to lie outside our maximum possible
1095 * pagecache index. (this comparison is done using sector_t types).
1097 if (unlikely(index != block >> sizebits)) {
1098 char b[BDEVNAME_SIZE];
1100 printk(KERN_ERR "%s: requested out-of-range block %llu for "
1102 __FUNCTION__, (unsigned long long)block,
1106 block = index << sizebits;
1107 /* Create a page with the proper size buffers.. */
1108 page = grow_dev_page(bdev, block, index, size);
1112 page_cache_release(page);
1116 static struct buffer_head *
1117 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1119 /* Size must be multiple of hard sectorsize */
1120 if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
1121 (size < 512 || size > PAGE_SIZE))) {
1122 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1124 printk(KERN_ERR "hardsect size: %d\n",
1125 bdev_hardsect_size(bdev));
1132 struct buffer_head * bh;
1135 bh = __find_get_block(bdev, block, size);
1139 ret = grow_buffers(bdev, block, size);
1148 * The relationship between dirty buffers and dirty pages:
1150 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1151 * the page is tagged dirty in its radix tree.
1153 * At all times, the dirtiness of the buffers represents the dirtiness of
1154 * subsections of the page. If the page has buffers, the page dirty bit is
1155 * merely a hint about the true dirty state.
1157 * When a page is set dirty in its entirety, all its buffers are marked dirty
1158 * (if the page has buffers).
1160 * When a buffer is marked dirty, its page is dirtied, but the page's other
1163 * Also. When blockdev buffers are explicitly read with bread(), they
1164 * individually become uptodate. But their backing page remains not
1165 * uptodate - even if all of its buffers are uptodate. A subsequent
1166 * block_read_full_page() against that page will discover all the uptodate
1167 * buffers, will set the page uptodate and will perform no I/O.
1171 * mark_buffer_dirty - mark a buffer_head as needing writeout
1172 * @bh: the buffer_head to mark dirty
1174 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1175 * backing page dirty, then tag the page as dirty in its address_space's radix
1176 * tree and then attach the address_space's inode to its superblock's dirty
1179 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1180 * mapping->tree_lock and the global inode_lock.
1182 void mark_buffer_dirty(struct buffer_head *bh)
1184 WARN_ON_ONCE(!buffer_uptodate(bh));
1185 if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
1186 __set_page_dirty(bh->b_page, page_mapping(bh->b_page), 0);
1190 * Decrement a buffer_head's reference count. If all buffers against a page
1191 * have zero reference count, are clean and unlocked, and if the page is clean
1192 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1193 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1194 * a page but it ends up not being freed, and buffers may later be reattached).
1196 void __brelse(struct buffer_head * buf)
1198 if (atomic_read(&buf->b_count)) {
1202 printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1207 * bforget() is like brelse(), except it discards any
1208 * potentially dirty data.
1210 void __bforget(struct buffer_head *bh)
1212 clear_buffer_dirty(bh);
1213 if (bh->b_assoc_map) {
1214 struct address_space *buffer_mapping = bh->b_page->mapping;
1216 spin_lock(&buffer_mapping->private_lock);
1217 list_del_init(&bh->b_assoc_buffers);
1218 bh->b_assoc_map = NULL;
1219 spin_unlock(&buffer_mapping->private_lock);
1224 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1227 if (buffer_uptodate(bh)) {
1232 bh->b_end_io = end_buffer_read_sync;
1233 submit_bh(READ, bh);
1235 if (buffer_uptodate(bh))
1243 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1244 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1245 * refcount elevated by one when they're in an LRU. A buffer can only appear
1246 * once in a particular CPU's LRU. A single buffer can be present in multiple
1247 * CPU's LRUs at the same time.
1249 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1250 * sb_find_get_block().
1252 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1253 * a local interrupt disable for that.
1256 #define BH_LRU_SIZE 8
1259 struct buffer_head *bhs[BH_LRU_SIZE];
1262 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1265 #define bh_lru_lock() local_irq_disable()
1266 #define bh_lru_unlock() local_irq_enable()
1268 #define bh_lru_lock() preempt_disable()
1269 #define bh_lru_unlock() preempt_enable()
1272 static inline void check_irqs_on(void)
1274 #ifdef irqs_disabled
1275 BUG_ON(irqs_disabled());
1280 * The LRU management algorithm is dopey-but-simple. Sorry.
1282 static void bh_lru_install(struct buffer_head *bh)
1284 struct buffer_head *evictee = NULL;
1289 lru = &__get_cpu_var(bh_lrus);
1290 if (lru->bhs[0] != bh) {
1291 struct buffer_head *bhs[BH_LRU_SIZE];
1297 for (in = 0; in < BH_LRU_SIZE; in++) {
1298 struct buffer_head *bh2 = lru->bhs[in];
1303 if (out >= BH_LRU_SIZE) {
1304 BUG_ON(evictee != NULL);
1311 while (out < BH_LRU_SIZE)
1313 memcpy(lru->bhs, bhs, sizeof(bhs));
1322 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1324 static struct buffer_head *
1325 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1327 struct buffer_head *ret = NULL;
1333 lru = &__get_cpu_var(bh_lrus);
1334 for (i = 0; i < BH_LRU_SIZE; i++) {
1335 struct buffer_head *bh = lru->bhs[i];
1337 if (bh && bh->b_bdev == bdev &&
1338 bh->b_blocknr == block && bh->b_size == size) {
1341 lru->bhs[i] = lru->bhs[i - 1];
1356 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1357 * it in the LRU and mark it as accessed. If it is not present then return
1360 struct buffer_head *
1361 __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1363 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1366 bh = __find_get_block_slow(bdev, block);
1374 EXPORT_SYMBOL(__find_get_block);
1377 * __getblk will locate (and, if necessary, create) the buffer_head
1378 * which corresponds to the passed block_device, block and size. The
1379 * returned buffer has its reference count incremented.
1381 * __getblk() cannot fail - it just keeps trying. If you pass it an
1382 * illegal block number, __getblk() will happily return a buffer_head
1383 * which represents the non-existent block. Very weird.
1385 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1386 * attempt is failing. FIXME, perhaps?
1388 struct buffer_head *
1389 __getblk(struct block_device *bdev, sector_t block, unsigned size)
1391 struct buffer_head *bh = __find_get_block(bdev, block, size);
1395 bh = __getblk_slow(bdev, block, size);
1398 EXPORT_SYMBOL(__getblk);
1401 * Do async read-ahead on a buffer..
1403 void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1405 struct buffer_head *bh = __getblk(bdev, block, size);
1407 ll_rw_block(READA, 1, &bh);
1411 EXPORT_SYMBOL(__breadahead);
1414 * __bread() - reads a specified block and returns the bh
1415 * @bdev: the block_device to read from
1416 * @block: number of block
1417 * @size: size (in bytes) to read
1419 * Reads a specified block, and returns buffer head that contains it.
1420 * It returns NULL if the block was unreadable.
1422 struct buffer_head *
1423 __bread(struct block_device *bdev, sector_t block, unsigned size)
1425 struct buffer_head *bh = __getblk(bdev, block, size);
1427 if (likely(bh) && !buffer_uptodate(bh))
1428 bh = __bread_slow(bh);
1431 EXPORT_SYMBOL(__bread);
1434 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1435 * This doesn't race because it runs in each cpu either in irq
1436 * or with preempt disabled.
1438 static void invalidate_bh_lru(void *arg)
1440 struct bh_lru *b = &get_cpu_var(bh_lrus);
1443 for (i = 0; i < BH_LRU_SIZE; i++) {
1447 put_cpu_var(bh_lrus);
1450 void invalidate_bh_lrus(void)
1452 on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
1454 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1456 void set_bh_page(struct buffer_head *bh,
1457 struct page *page, unsigned long offset)
1460 BUG_ON(offset >= PAGE_SIZE);
1461 if (PageHighMem(page))
1463 * This catches illegal uses and preserves the offset:
1465 bh->b_data = (char *)(0 + offset);
1467 bh->b_data = page_address(page) + offset;
1469 EXPORT_SYMBOL(set_bh_page);
1472 * Called when truncating a buffer on a page completely.
1474 static void discard_buffer(struct buffer_head * bh)
1477 clear_buffer_dirty(bh);
1479 clear_buffer_mapped(bh);
1480 clear_buffer_req(bh);
1481 clear_buffer_new(bh);
1482 clear_buffer_delay(bh);
1483 clear_buffer_unwritten(bh);
1488 * block_invalidatepage - invalidate part of all of a buffer-backed page
1490 * @page: the page which is affected
1491 * @offset: the index of the truncation point
1493 * block_invalidatepage() is called when all or part of the page has become
1494 * invalidatedby a truncate operation.
1496 * block_invalidatepage() does not have to release all buffers, but it must
1497 * ensure that no dirty buffer is left outside @offset and that no I/O
1498 * is underway against any of the blocks which are outside the truncation
1499 * point. Because the caller is about to free (and possibly reuse) those
1502 void block_invalidatepage(struct page *page, unsigned long offset)
1504 struct buffer_head *head, *bh, *next;
1505 unsigned int curr_off = 0;
1507 BUG_ON(!PageLocked(page));
1508 if (!page_has_buffers(page))
1511 head = page_buffers(page);
1514 unsigned int next_off = curr_off + bh->b_size;
1515 next = bh->b_this_page;
1518 * is this block fully invalidated?
1520 if (offset <= curr_off)
1522 curr_off = next_off;
1524 } while (bh != head);
1527 * We release buffers only if the entire page is being invalidated.
1528 * The get_block cached value has been unconditionally invalidated,
1529 * so real IO is not possible anymore.
1532 try_to_release_page(page, 0);
1536 EXPORT_SYMBOL(block_invalidatepage);
1539 * We attach and possibly dirty the buffers atomically wrt
1540 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1541 * is already excluded via the page lock.
1543 void create_empty_buffers(struct page *page,
1544 unsigned long blocksize, unsigned long b_state)
1546 struct buffer_head *bh, *head, *tail;
1548 head = alloc_page_buffers(page, blocksize, 1);
1551 bh->b_state |= b_state;
1553 bh = bh->b_this_page;
1555 tail->b_this_page = head;
1557 spin_lock(&page->mapping->private_lock);
1558 if (PageUptodate(page) || PageDirty(page)) {
1561 if (PageDirty(page))
1562 set_buffer_dirty(bh);
1563 if (PageUptodate(page))
1564 set_buffer_uptodate(bh);
1565 bh = bh->b_this_page;
1566 } while (bh != head);
1568 attach_page_buffers(page, head);
1569 spin_unlock(&page->mapping->private_lock);
1571 EXPORT_SYMBOL(create_empty_buffers);
1574 * We are taking a block for data and we don't want any output from any
1575 * buffer-cache aliases starting from return from that function and
1576 * until the moment when something will explicitly mark the buffer
1577 * dirty (hopefully that will not happen until we will free that block ;-)
1578 * We don't even need to mark it not-uptodate - nobody can expect
1579 * anything from a newly allocated buffer anyway. We used to used
1580 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1581 * don't want to mark the alias unmapped, for example - it would confuse
1582 * anyone who might pick it with bread() afterwards...
1584 * Also.. Note that bforget() doesn't lock the buffer. So there can
1585 * be writeout I/O going on against recently-freed buffers. We don't
1586 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1587 * only if we really need to. That happens here.
1589 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1591 struct buffer_head *old_bh;
1595 old_bh = __find_get_block_slow(bdev, block);
1597 clear_buffer_dirty(old_bh);
1598 wait_on_buffer(old_bh);
1599 clear_buffer_req(old_bh);
1603 EXPORT_SYMBOL(unmap_underlying_metadata);
1606 * NOTE! All mapped/uptodate combinations are valid:
1608 * Mapped Uptodate Meaning
1610 * No No "unknown" - must do get_block()
1611 * No Yes "hole" - zero-filled
1612 * Yes No "allocated" - allocated on disk, not read in
1613 * Yes Yes "valid" - allocated and up-to-date in memory.
1615 * "Dirty" is valid only with the last case (mapped+uptodate).
1619 * While block_write_full_page is writing back the dirty buffers under
1620 * the page lock, whoever dirtied the buffers may decide to clean them
1621 * again at any time. We handle that by only looking at the buffer
1622 * state inside lock_buffer().
1624 * If block_write_full_page() is called for regular writeback
1625 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1626 * locked buffer. This only can happen if someone has written the buffer
1627 * directly, with submit_bh(). At the address_space level PageWriteback
1628 * prevents this contention from occurring.
1630 static int __block_write_full_page(struct inode *inode, struct page *page,
1631 get_block_t *get_block, struct writeback_control *wbc)
1635 sector_t last_block;
1636 struct buffer_head *bh, *head;
1637 const unsigned blocksize = 1 << inode->i_blkbits;
1638 int nr_underway = 0;
1640 BUG_ON(!PageLocked(page));
1642 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1644 if (!page_has_buffers(page)) {
1645 create_empty_buffers(page, blocksize,
1646 (1 << BH_Dirty)|(1 << BH_Uptodate));
1650 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1651 * here, and the (potentially unmapped) buffers may become dirty at
1652 * any time. If a buffer becomes dirty here after we've inspected it
1653 * then we just miss that fact, and the page stays dirty.
1655 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1656 * handle that here by just cleaning them.
1659 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1660 head = page_buffers(page);
1664 * Get all the dirty buffers mapped to disk addresses and
1665 * handle any aliases from the underlying blockdev's mapping.
1668 if (block > last_block) {
1670 * mapped buffers outside i_size will occur, because
1671 * this page can be outside i_size when there is a
1672 * truncate in progress.
1675 * The buffer was zeroed by block_write_full_page()
1677 clear_buffer_dirty(bh);
1678 set_buffer_uptodate(bh);
1679 } else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
1680 WARN_ON(bh->b_size != blocksize);
1681 err = get_block(inode, block, bh, 1);
1684 if (buffer_new(bh)) {
1685 /* blockdev mappings never come here */
1686 clear_buffer_new(bh);
1687 unmap_underlying_metadata(bh->b_bdev,
1691 bh = bh->b_this_page;
1693 } while (bh != head);
1696 if (!buffer_mapped(bh))
1699 * If it's a fully non-blocking write attempt and we cannot
1700 * lock the buffer then redirty the page. Note that this can
1701 * potentially cause a busy-wait loop from pdflush and kswapd
1702 * activity, but those code paths have their own higher-level
1705 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1707 } else if (test_set_buffer_locked(bh)) {
1708 redirty_page_for_writepage(wbc, page);
1711 if (test_clear_buffer_dirty(bh)) {
1712 mark_buffer_async_write(bh);
1716 } while ((bh = bh->b_this_page) != head);
1719 * The page and its buffers are protected by PageWriteback(), so we can
1720 * drop the bh refcounts early.
1722 BUG_ON(PageWriteback(page));
1723 set_page_writeback(page);
1726 struct buffer_head *next = bh->b_this_page;
1727 if (buffer_async_write(bh)) {
1728 submit_bh(WRITE, bh);
1732 } while (bh != head);
1737 if (nr_underway == 0) {
1739 * The page was marked dirty, but the buffers were
1740 * clean. Someone wrote them back by hand with
1741 * ll_rw_block/submit_bh. A rare case.
1743 end_page_writeback(page);
1746 * The page and buffer_heads can be released at any time from
1754 * ENOSPC, or some other error. We may already have added some
1755 * blocks to the file, so we need to write these out to avoid
1756 * exposing stale data.
1757 * The page is currently locked and not marked for writeback
1760 /* Recovery: lock and submit the mapped buffers */
1762 if (buffer_mapped(bh) && buffer_dirty(bh)) {
1764 mark_buffer_async_write(bh);
1767 * The buffer may have been set dirty during
1768 * attachment to a dirty page.
1770 clear_buffer_dirty(bh);
1772 } while ((bh = bh->b_this_page) != head);
1774 BUG_ON(PageWriteback(page));
1775 mapping_set_error(page->mapping, err);
1776 set_page_writeback(page);
1778 struct buffer_head *next = bh->b_this_page;
1779 if (buffer_async_write(bh)) {
1780 clear_buffer_dirty(bh);
1781 submit_bh(WRITE, bh);
1785 } while (bh != head);
1791 * If a page has any new buffers, zero them out here, and mark them uptodate
1792 * and dirty so they'll be written out (in order to prevent uninitialised
1793 * block data from leaking). And clear the new bit.
1795 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1797 unsigned int block_start, block_end;
1798 struct buffer_head *head, *bh;
1800 BUG_ON(!PageLocked(page));
1801 if (!page_has_buffers(page))
1804 bh = head = page_buffers(page);
1807 block_end = block_start + bh->b_size;
1809 if (buffer_new(bh)) {
1810 if (block_end > from && block_start < to) {
1811 if (!PageUptodate(page)) {
1812 unsigned start, size;
1814 start = max(from, block_start);
1815 size = min(to, block_end) - start;
1817 zero_user(page, start, size);
1818 set_buffer_uptodate(bh);
1821 clear_buffer_new(bh);
1822 mark_buffer_dirty(bh);
1826 block_start = block_end;
1827 bh = bh->b_this_page;
1828 } while (bh != head);
1830 EXPORT_SYMBOL(page_zero_new_buffers);
1832 static int __block_prepare_write(struct inode *inode, struct page *page,
1833 unsigned from, unsigned to, get_block_t *get_block)
1835 unsigned block_start, block_end;
1838 unsigned blocksize, bbits;
1839 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1841 BUG_ON(!PageLocked(page));
1842 BUG_ON(from > PAGE_CACHE_SIZE);
1843 BUG_ON(to > PAGE_CACHE_SIZE);
1846 blocksize = 1 << inode->i_blkbits;
1847 if (!page_has_buffers(page))
1848 create_empty_buffers(page, blocksize, 0);
1849 head = page_buffers(page);
1851 bbits = inode->i_blkbits;
1852 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1854 for(bh = head, block_start = 0; bh != head || !block_start;
1855 block++, block_start=block_end, bh = bh->b_this_page) {
1856 block_end = block_start + blocksize;
1857 if (block_end <= from || block_start >= to) {
1858 if (PageUptodate(page)) {
1859 if (!buffer_uptodate(bh))
1860 set_buffer_uptodate(bh);
1865 clear_buffer_new(bh);
1866 if (!buffer_mapped(bh)) {
1867 WARN_ON(bh->b_size != blocksize);
1868 err = get_block(inode, block, bh, 1);
1871 if (buffer_new(bh)) {
1872 unmap_underlying_metadata(bh->b_bdev,
1874 if (PageUptodate(page)) {
1875 clear_buffer_new(bh);
1876 set_buffer_uptodate(bh);
1877 mark_buffer_dirty(bh);
1880 if (block_end > to || block_start < from)
1881 zero_user_segments(page,
1887 if (PageUptodate(page)) {
1888 if (!buffer_uptodate(bh))
1889 set_buffer_uptodate(bh);
1892 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1893 !buffer_unwritten(bh) &&
1894 (block_start < from || block_end > to)) {
1895 ll_rw_block(READ, 1, &bh);
1900 * If we issued read requests - let them complete.
1902 while(wait_bh > wait) {
1903 wait_on_buffer(*--wait_bh);
1904 if (!buffer_uptodate(*wait_bh))
1908 page_zero_new_buffers(page, from, to);
1912 static int __block_commit_write(struct inode *inode, struct page *page,
1913 unsigned from, unsigned to)
1915 unsigned block_start, block_end;
1918 struct buffer_head *bh, *head;
1920 blocksize = 1 << inode->i_blkbits;
1922 for(bh = head = page_buffers(page), block_start = 0;
1923 bh != head || !block_start;
1924 block_start=block_end, bh = bh->b_this_page) {
1925 block_end = block_start + blocksize;
1926 if (block_end <= from || block_start >= to) {
1927 if (!buffer_uptodate(bh))
1930 set_buffer_uptodate(bh);
1931 mark_buffer_dirty(bh);
1933 clear_buffer_new(bh);
1937 * If this is a partial write which happened to make all buffers
1938 * uptodate then we can optimize away a bogus readpage() for
1939 * the next read(). Here we 'discover' whether the page went
1940 * uptodate as a result of this (potentially partial) write.
1943 SetPageUptodate(page);
1948 * block_write_begin takes care of the basic task of block allocation and
1949 * bringing partial write blocks uptodate first.
1951 * If *pagep is not NULL, then block_write_begin uses the locked page
1952 * at *pagep rather than allocating its own. In this case, the page will
1953 * not be unlocked or deallocated on failure.
1955 int block_write_begin(struct file *file, struct address_space *mapping,
1956 loff_t pos, unsigned len, unsigned flags,
1957 struct page **pagep, void **fsdata,
1958 get_block_t *get_block)
1960 struct inode *inode = mapping->host;
1964 unsigned start, end;
1967 index = pos >> PAGE_CACHE_SHIFT;
1968 start = pos & (PAGE_CACHE_SIZE - 1);
1974 page = __grab_cache_page(mapping, index);
1981 BUG_ON(!PageLocked(page));
1983 status = __block_prepare_write(inode, page, start, end, get_block);
1984 if (unlikely(status)) {
1985 ClearPageUptodate(page);
1989 page_cache_release(page);
1993 * prepare_write() may have instantiated a few blocks
1994 * outside i_size. Trim these off again. Don't need
1995 * i_size_read because we hold i_mutex.
1997 if (pos + len > inode->i_size)
1998 vmtruncate(inode, inode->i_size);
2006 EXPORT_SYMBOL(block_write_begin);
2008 int block_write_end(struct file *file, struct address_space *mapping,
2009 loff_t pos, unsigned len, unsigned copied,
2010 struct page *page, void *fsdata)
2012 struct inode *inode = mapping->host;
2015 start = pos & (PAGE_CACHE_SIZE - 1);
2017 if (unlikely(copied < len)) {
2019 * The buffers that were written will now be uptodate, so we
2020 * don't have to worry about a readpage reading them and
2021 * overwriting a partial write. However if we have encountered
2022 * a short write and only partially written into a buffer, it
2023 * will not be marked uptodate, so a readpage might come in and
2024 * destroy our partial write.
2026 * Do the simplest thing, and just treat any short write to a
2027 * non uptodate page as a zero-length write, and force the
2028 * caller to redo the whole thing.
2030 if (!PageUptodate(page))
2033 page_zero_new_buffers(page, start+copied, start+len);
2035 flush_dcache_page(page);
2037 /* This could be a short (even 0-length) commit */
2038 __block_commit_write(inode, page, start, start+copied);
2042 EXPORT_SYMBOL(block_write_end);
2044 int generic_write_end(struct file *file, struct address_space *mapping,
2045 loff_t pos, unsigned len, unsigned copied,
2046 struct page *page, void *fsdata)
2048 struct inode *inode = mapping->host;
2050 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2053 * No need to use i_size_read() here, the i_size
2054 * cannot change under us because we hold i_mutex.
2056 * But it's important to update i_size while still holding page lock:
2057 * page writeout could otherwise come in and zero beyond i_size.
2059 if (pos+copied > inode->i_size) {
2060 i_size_write(inode, pos+copied);
2061 mark_inode_dirty(inode);
2065 page_cache_release(page);
2069 EXPORT_SYMBOL(generic_write_end);
2072 * Generic "read page" function for block devices that have the normal
2073 * get_block functionality. This is most of the block device filesystems.
2074 * Reads the page asynchronously --- the unlock_buffer() and
2075 * set/clear_buffer_uptodate() functions propagate buffer state into the
2076 * page struct once IO has completed.
2078 int block_read_full_page(struct page *page, get_block_t *get_block)
2080 struct inode *inode = page->mapping->host;
2081 sector_t iblock, lblock;
2082 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2083 unsigned int blocksize;
2085 int fully_mapped = 1;
2087 BUG_ON(!PageLocked(page));
2088 blocksize = 1 << inode->i_blkbits;
2089 if (!page_has_buffers(page))
2090 create_empty_buffers(page, blocksize, 0);
2091 head = page_buffers(page);
2093 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2094 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2100 if (buffer_uptodate(bh))
2103 if (!buffer_mapped(bh)) {
2107 if (iblock < lblock) {
2108 WARN_ON(bh->b_size != blocksize);
2109 err = get_block(inode, iblock, bh, 0);
2113 if (!buffer_mapped(bh)) {
2114 zero_user(page, i * blocksize, blocksize);
2116 set_buffer_uptodate(bh);
2120 * get_block() might have updated the buffer
2123 if (buffer_uptodate(bh))
2127 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2130 SetPageMappedToDisk(page);
2134 * All buffers are uptodate - we can set the page uptodate
2135 * as well. But not if get_block() returned an error.
2137 if (!PageError(page))
2138 SetPageUptodate(page);
2143 /* Stage two: lock the buffers */
2144 for (i = 0; i < nr; i++) {
2147 mark_buffer_async_read(bh);
2151 * Stage 3: start the IO. Check for uptodateness
2152 * inside the buffer lock in case another process reading
2153 * the underlying blockdev brought it uptodate (the sct fix).
2155 for (i = 0; i < nr; i++) {
2157 if (buffer_uptodate(bh))
2158 end_buffer_async_read(bh, 1);
2160 submit_bh(READ, bh);
2165 /* utility function for filesystems that need to do work on expanding
2166 * truncates. Uses filesystem pagecache writes to allow the filesystem to
2167 * deal with the hole.
2169 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2171 struct address_space *mapping = inode->i_mapping;
2174 unsigned long limit;
2178 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
2179 if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2180 send_sig(SIGXFSZ, current, 0);
2183 if (size > inode->i_sb->s_maxbytes)
2186 err = pagecache_write_begin(NULL, mapping, size, 0,
2187 AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2192 err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2199 int cont_expand_zero(struct file *file, struct address_space *mapping,
2200 loff_t pos, loff_t *bytes)
2202 struct inode *inode = mapping->host;
2203 unsigned blocksize = 1 << inode->i_blkbits;
2206 pgoff_t index, curidx;
2208 unsigned zerofrom, offset, len;
2211 index = pos >> PAGE_CACHE_SHIFT;
2212 offset = pos & ~PAGE_CACHE_MASK;
2214 while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2215 zerofrom = curpos & ~PAGE_CACHE_MASK;
2216 if (zerofrom & (blocksize-1)) {
2217 *bytes |= (blocksize-1);
2220 len = PAGE_CACHE_SIZE - zerofrom;
2222 err = pagecache_write_begin(file, mapping, curpos, len,
2223 AOP_FLAG_UNINTERRUPTIBLE,
2227 zero_user(page, zerofrom, len);
2228 err = pagecache_write_end(file, mapping, curpos, len, len,
2236 /* page covers the boundary, find the boundary offset */
2237 if (index == curidx) {
2238 zerofrom = curpos & ~PAGE_CACHE_MASK;
2239 /* if we will expand the thing last block will be filled */
2240 if (offset <= zerofrom) {
2243 if (zerofrom & (blocksize-1)) {
2244 *bytes |= (blocksize-1);
2247 len = offset - zerofrom;
2249 err = pagecache_write_begin(file, mapping, curpos, len,
2250 AOP_FLAG_UNINTERRUPTIBLE,
2254 zero_user(page, zerofrom, len);
2255 err = pagecache_write_end(file, mapping, curpos, len, len,
2267 * For moronic filesystems that do not allow holes in file.
2268 * We may have to extend the file.
2270 int cont_write_begin(struct file *file, struct address_space *mapping,
2271 loff_t pos, unsigned len, unsigned flags,
2272 struct page **pagep, void **fsdata,
2273 get_block_t *get_block, loff_t *bytes)
2275 struct inode *inode = mapping->host;
2276 unsigned blocksize = 1 << inode->i_blkbits;
2280 err = cont_expand_zero(file, mapping, pos, bytes);
2284 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2285 if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2286 *bytes |= (blocksize-1);
2291 err = block_write_begin(file, mapping, pos, len,
2292 flags, pagep, fsdata, get_block);
2297 int block_prepare_write(struct page *page, unsigned from, unsigned to,
2298 get_block_t *get_block)
2300 struct inode *inode = page->mapping->host;
2301 int err = __block_prepare_write(inode, page, from, to, get_block);
2303 ClearPageUptodate(page);
2307 int block_commit_write(struct page *page, unsigned from, unsigned to)
2309 struct inode *inode = page->mapping->host;
2310 __block_commit_write(inode,page,from,to);
2314 int generic_commit_write(struct file *file, struct page *page,
2315 unsigned from, unsigned to)
2317 struct inode *inode = page->mapping->host;
2318 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2319 __block_commit_write(inode,page,from,to);
2321 * No need to use i_size_read() here, the i_size
2322 * cannot change under us because we hold i_mutex.
2324 if (pos > inode->i_size) {
2325 i_size_write(inode, pos);
2326 mark_inode_dirty(inode);
2332 * block_page_mkwrite() is not allowed to change the file size as it gets
2333 * called from a page fault handler when a page is first dirtied. Hence we must
2334 * be careful to check for EOF conditions here. We set the page up correctly
2335 * for a written page which means we get ENOSPC checking when writing into
2336 * holes and correct delalloc and unwritten extent mapping on filesystems that
2337 * support these features.
2339 * We are not allowed to take the i_mutex here so we have to play games to
2340 * protect against truncate races as the page could now be beyond EOF. Because
2341 * vmtruncate() writes the inode size before removing pages, once we have the
2342 * page lock we can determine safely if the page is beyond EOF. If it is not
2343 * beyond EOF, then the page is guaranteed safe against truncation until we
2347 block_page_mkwrite(struct vm_area_struct *vma, struct page *page,
2348 get_block_t get_block)
2350 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2356 size = i_size_read(inode);
2357 if ((page->mapping != inode->i_mapping) ||
2358 (page_offset(page) > size)) {
2359 /* page got truncated out from underneath us */
2363 /* page is wholly or partially inside EOF */
2364 if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2365 end = size & ~PAGE_CACHE_MASK;
2367 end = PAGE_CACHE_SIZE;
2369 ret = block_prepare_write(page, 0, end, get_block);
2371 ret = block_commit_write(page, 0, end);
2379 * nobh_write_begin()'s prereads are special: the buffer_heads are freed
2380 * immediately, while under the page lock. So it needs a special end_io
2381 * handler which does not touch the bh after unlocking it.
2383 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2385 __end_buffer_read_notouch(bh, uptodate);
2389 * Attach the singly-linked list of buffers created by nobh_write_begin, to
2390 * the page (converting it to circular linked list and taking care of page
2393 static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2395 struct buffer_head *bh;
2397 BUG_ON(!PageLocked(page));
2399 spin_lock(&page->mapping->private_lock);
2402 if (PageDirty(page))
2403 set_buffer_dirty(bh);
2404 if (!bh->b_this_page)
2405 bh->b_this_page = head;
2406 bh = bh->b_this_page;
2407 } while (bh != head);
2408 attach_page_buffers(page, head);
2409 spin_unlock(&page->mapping->private_lock);
2413 * On entry, the page is fully not uptodate.
2414 * On exit the page is fully uptodate in the areas outside (from,to)
2416 int nobh_write_begin(struct file *file, struct address_space *mapping,
2417 loff_t pos, unsigned len, unsigned flags,
2418 struct page **pagep, void **fsdata,
2419 get_block_t *get_block)
2421 struct inode *inode = mapping->host;
2422 const unsigned blkbits = inode->i_blkbits;
2423 const unsigned blocksize = 1 << blkbits;
2424 struct buffer_head *head, *bh;
2428 unsigned block_in_page;
2429 unsigned block_start, block_end;
2430 sector_t block_in_file;
2433 int is_mapped_to_disk = 1;
2435 index = pos >> PAGE_CACHE_SHIFT;
2436 from = pos & (PAGE_CACHE_SIZE - 1);
2439 page = __grab_cache_page(mapping, index);
2445 if (page_has_buffers(page)) {
2447 page_cache_release(page);
2449 return block_write_begin(file, mapping, pos, len, flags, pagep,
2453 if (PageMappedToDisk(page))
2457 * Allocate buffers so that we can keep track of state, and potentially
2458 * attach them to the page if an error occurs. In the common case of
2459 * no error, they will just be freed again without ever being attached
2460 * to the page (which is all OK, because we're under the page lock).
2462 * Be careful: the buffer linked list is a NULL terminated one, rather
2463 * than the circular one we're used to.
2465 head = alloc_page_buffers(page, blocksize, 0);
2471 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2474 * We loop across all blocks in the page, whether or not they are
2475 * part of the affected region. This is so we can discover if the
2476 * page is fully mapped-to-disk.
2478 for (block_start = 0, block_in_page = 0, bh = head;
2479 block_start < PAGE_CACHE_SIZE;
2480 block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
2483 block_end = block_start + blocksize;
2486 if (block_start >= to)
2488 ret = get_block(inode, block_in_file + block_in_page,
2492 if (!buffer_mapped(bh))
2493 is_mapped_to_disk = 0;
2495 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2496 if (PageUptodate(page)) {
2497 set_buffer_uptodate(bh);
2500 if (buffer_new(bh) || !buffer_mapped(bh)) {
2501 zero_user_segments(page, block_start, from,
2505 if (buffer_uptodate(bh))
2506 continue; /* reiserfs does this */
2507 if (block_start < from || block_end > to) {
2509 bh->b_end_io = end_buffer_read_nobh;
2510 submit_bh(READ, bh);
2517 * The page is locked, so these buffers are protected from
2518 * any VM or truncate activity. Hence we don't need to care
2519 * for the buffer_head refcounts.
2521 for (bh = head; bh; bh = bh->b_this_page) {
2523 if (!buffer_uptodate(bh))
2530 if (is_mapped_to_disk)
2531 SetPageMappedToDisk(page);
2533 *fsdata = head; /* to be released by nobh_write_end */
2540 * Error recovery is a bit difficult. We need to zero out blocks that
2541 * were newly allocated, and dirty them to ensure they get written out.
2542 * Buffers need to be attached to the page at this point, otherwise
2543 * the handling of potential IO errors during writeout would be hard
2544 * (could try doing synchronous writeout, but what if that fails too?)
2546 attach_nobh_buffers(page, head);
2547 page_zero_new_buffers(page, from, to);
2551 page_cache_release(page);
2554 if (pos + len > inode->i_size)
2555 vmtruncate(inode, inode->i_size);
2559 EXPORT_SYMBOL(nobh_write_begin);
2561 int nobh_write_end(struct file *file, struct address_space *mapping,
2562 loff_t pos, unsigned len, unsigned copied,
2563 struct page *page, void *fsdata)
2565 struct inode *inode = page->mapping->host;
2566 struct buffer_head *head = fsdata;
2567 struct buffer_head *bh;
2569 if (!PageMappedToDisk(page)) {
2570 if (unlikely(copied < len) && !page_has_buffers(page))
2571 attach_nobh_buffers(page, head);
2572 if (page_has_buffers(page))
2573 return generic_write_end(file, mapping, pos, len,
2574 copied, page, fsdata);
2577 SetPageUptodate(page);
2578 set_page_dirty(page);
2579 if (pos+copied > inode->i_size) {
2580 i_size_write(inode, pos+copied);
2581 mark_inode_dirty(inode);
2585 page_cache_release(page);
2589 head = head->b_this_page;
2590 free_buffer_head(bh);
2595 EXPORT_SYMBOL(nobh_write_end);
2598 * nobh_writepage() - based on block_full_write_page() except
2599 * that it tries to operate without attaching bufferheads to
2602 int nobh_writepage(struct page *page, get_block_t *get_block,
2603 struct writeback_control *wbc)
2605 struct inode * const inode = page->mapping->host;
2606 loff_t i_size = i_size_read(inode);
2607 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2611 /* Is the page fully inside i_size? */
2612 if (page->index < end_index)
2615 /* Is the page fully outside i_size? (truncate in progress) */
2616 offset = i_size & (PAGE_CACHE_SIZE-1);
2617 if (page->index >= end_index+1 || !offset) {
2619 * The page may have dirty, unmapped buffers. For example,
2620 * they may have been added in ext3_writepage(). Make them
2621 * freeable here, so the page does not leak.
2624 /* Not really sure about this - do we need this ? */
2625 if (page->mapping->a_ops->invalidatepage)
2626 page->mapping->a_ops->invalidatepage(page, offset);
2629 return 0; /* don't care */
2633 * The page straddles i_size. It must be zeroed out on each and every
2634 * writepage invocation because it may be mmapped. "A file is mapped
2635 * in multiples of the page size. For a file that is not a multiple of
2636 * the page size, the remaining memory is zeroed when mapped, and
2637 * writes to that region are not written out to the file."
2639 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2641 ret = mpage_writepage(page, get_block, wbc);
2643 ret = __block_write_full_page(inode, page, get_block, wbc);
2646 EXPORT_SYMBOL(nobh_writepage);
2648 int nobh_truncate_page(struct address_space *mapping,
2649 loff_t from, get_block_t *get_block)
2651 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2652 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2655 unsigned length, pos;
2656 struct inode *inode = mapping->host;
2658 struct buffer_head map_bh;
2661 blocksize = 1 << inode->i_blkbits;
2662 length = offset & (blocksize - 1);
2664 /* Block boundary? Nothing to do */
2668 length = blocksize - length;
2669 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2671 page = grab_cache_page(mapping, index);
2676 if (page_has_buffers(page)) {
2679 page_cache_release(page);
2680 return block_truncate_page(mapping, from, get_block);
2683 /* Find the buffer that contains "offset" */
2685 while (offset >= pos) {
2690 err = get_block(inode, iblock, &map_bh, 0);
2693 /* unmapped? It's a hole - nothing to do */
2694 if (!buffer_mapped(&map_bh))
2697 /* Ok, it's mapped. Make sure it's up-to-date */
2698 if (!PageUptodate(page)) {
2699 err = mapping->a_ops->readpage(NULL, page);
2701 page_cache_release(page);
2705 if (!PageUptodate(page)) {
2709 if (page_has_buffers(page))
2712 zero_user(page, offset, length);
2713 set_page_dirty(page);
2718 page_cache_release(page);
2722 EXPORT_SYMBOL(nobh_truncate_page);
2724 int block_truncate_page(struct address_space *mapping,
2725 loff_t from, get_block_t *get_block)
2727 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2728 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2731 unsigned length, pos;
2732 struct inode *inode = mapping->host;
2734 struct buffer_head *bh;
2737 blocksize = 1 << inode->i_blkbits;
2738 length = offset & (blocksize - 1);
2740 /* Block boundary? Nothing to do */
2744 length = blocksize - length;
2745 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2747 page = grab_cache_page(mapping, index);
2752 if (!page_has_buffers(page))
2753 create_empty_buffers(page, blocksize, 0);
2755 /* Find the buffer that contains "offset" */
2756 bh = page_buffers(page);
2758 while (offset >= pos) {
2759 bh = bh->b_this_page;
2765 if (!buffer_mapped(bh)) {
2766 WARN_ON(bh->b_size != blocksize);
2767 err = get_block(inode, iblock, bh, 0);
2770 /* unmapped? It's a hole - nothing to do */
2771 if (!buffer_mapped(bh))
2775 /* Ok, it's mapped. Make sure it's up-to-date */
2776 if (PageUptodate(page))
2777 set_buffer_uptodate(bh);
2779 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2781 ll_rw_block(READ, 1, &bh);
2783 /* Uhhuh. Read error. Complain and punt. */
2784 if (!buffer_uptodate(bh))
2788 zero_user(page, offset, length);
2789 mark_buffer_dirty(bh);
2794 page_cache_release(page);
2800 * The generic ->writepage function for buffer-backed address_spaces
2802 int block_write_full_page(struct page *page, get_block_t *get_block,
2803 struct writeback_control *wbc)
2805 struct inode * const inode = page->mapping->host;
2806 loff_t i_size = i_size_read(inode);
2807 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2810 /* Is the page fully inside i_size? */
2811 if (page->index < end_index)
2812 return __block_write_full_page(inode, page, get_block, wbc);
2814 /* Is the page fully outside i_size? (truncate in progress) */
2815 offset = i_size & (PAGE_CACHE_SIZE-1);
2816 if (page->index >= end_index+1 || !offset) {
2818 * The page may have dirty, unmapped buffers. For example,
2819 * they may have been added in ext3_writepage(). Make them
2820 * freeable here, so the page does not leak.
2822 do_invalidatepage(page, 0);
2824 return 0; /* don't care */
2828 * The page straddles i_size. It must be zeroed out on each and every
2829 * writepage invokation because it may be mmapped. "A file is mapped
2830 * in multiples of the page size. For a file that is not a multiple of
2831 * the page size, the remaining memory is zeroed when mapped, and
2832 * writes to that region are not written out to the file."
2834 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2835 return __block_write_full_page(inode, page, get_block, wbc);
2838 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2839 get_block_t *get_block)
2841 struct buffer_head tmp;
2842 struct inode *inode = mapping->host;
2845 tmp.b_size = 1 << inode->i_blkbits;
2846 get_block(inode, block, &tmp, 0);
2847 return tmp.b_blocknr;
2850 static void end_bio_bh_io_sync(struct bio *bio, int err)
2852 struct buffer_head *bh = bio->bi_private;
2854 if (err == -EOPNOTSUPP) {
2855 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2856 set_bit(BH_Eopnotsupp, &bh->b_state);
2859 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2863 int submit_bh(int rw, struct buffer_head * bh)
2868 BUG_ON(!buffer_locked(bh));
2869 BUG_ON(!buffer_mapped(bh));
2870 BUG_ON(!bh->b_end_io);
2872 if (buffer_ordered(bh) && (rw == WRITE))
2876 * Only clear out a write error when rewriting, should this
2877 * include WRITE_SYNC as well?
2879 if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
2880 clear_buffer_write_io_error(bh);
2883 * from here on down, it's all bio -- do the initial mapping,
2884 * submit_bio -> generic_make_request may further map this bio around
2886 bio = bio_alloc(GFP_NOIO, 1);
2888 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2889 bio->bi_bdev = bh->b_bdev;
2890 bio->bi_io_vec[0].bv_page = bh->b_page;
2891 bio->bi_io_vec[0].bv_len = bh->b_size;
2892 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2896 bio->bi_size = bh->b_size;
2898 bio->bi_end_io = end_bio_bh_io_sync;
2899 bio->bi_private = bh;
2902 submit_bio(rw, bio);
2904 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2912 * ll_rw_block: low-level access to block devices (DEPRECATED)
2913 * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
2914 * @nr: number of &struct buffer_heads in the array
2915 * @bhs: array of pointers to &struct buffer_head
2917 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2918 * requests an I/O operation on them, either a %READ or a %WRITE. The third
2919 * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2920 * are sent to disk. The fourth %READA option is described in the documentation
2921 * for generic_make_request() which ll_rw_block() calls.
2923 * This function drops any buffer that it cannot get a lock on (with the
2924 * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2925 * clean when doing a write request, and any buffer that appears to be
2926 * up-to-date when doing read request. Further it marks as clean buffers that
2927 * are processed for writing (the buffer cache won't assume that they are
2928 * actually clean until the buffer gets unlocked).
2930 * ll_rw_block sets b_end_io to simple completion handler that marks
2931 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2934 * All of the buffers must be for the same device, and must also be a
2935 * multiple of the current approved size for the device.
2937 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2941 for (i = 0; i < nr; i++) {
2942 struct buffer_head *bh = bhs[i];
2946 else if (test_set_buffer_locked(bh))
2949 if (rw == WRITE || rw == SWRITE) {
2950 if (test_clear_buffer_dirty(bh)) {
2951 bh->b_end_io = end_buffer_write_sync;
2953 submit_bh(WRITE, bh);
2957 if (!buffer_uptodate(bh)) {
2958 bh->b_end_io = end_buffer_read_sync;
2969 * For a data-integrity writeout, we need to wait upon any in-progress I/O
2970 * and then start new I/O and then wait upon it. The caller must have a ref on
2973 int sync_dirty_buffer(struct buffer_head *bh)
2977 WARN_ON(atomic_read(&bh->b_count) < 1);
2979 if (test_clear_buffer_dirty(bh)) {
2981 bh->b_end_io = end_buffer_write_sync;
2982 ret = submit_bh(WRITE, bh);
2984 if (buffer_eopnotsupp(bh)) {
2985 clear_buffer_eopnotsupp(bh);
2988 if (!ret && !buffer_uptodate(bh))
2997 * try_to_free_buffers() checks if all the buffers on this particular page
2998 * are unused, and releases them if so.
3000 * Exclusion against try_to_free_buffers may be obtained by either
3001 * locking the page or by holding its mapping's private_lock.
3003 * If the page is dirty but all the buffers are clean then we need to
3004 * be sure to mark the page clean as well. This is because the page
3005 * may be against a block device, and a later reattachment of buffers
3006 * to a dirty page will set *all* buffers dirty. Which would corrupt
3007 * filesystem data on the same device.
3009 * The same applies to regular filesystem pages: if all the buffers are
3010 * clean then we set the page clean and proceed. To do that, we require
3011 * total exclusion from __set_page_dirty_buffers(). That is obtained with
3014 * try_to_free_buffers() is non-blocking.
3016 static inline int buffer_busy(struct buffer_head *bh)
3018 return atomic_read(&bh->b_count) |
3019 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3023 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3025 struct buffer_head *head = page_buffers(page);
3026 struct buffer_head *bh;
3030 if (buffer_write_io_error(bh) && page->mapping)
3031 set_bit(AS_EIO, &page->mapping->flags);
3032 if (buffer_busy(bh))
3034 bh = bh->b_this_page;
3035 } while (bh != head);
3038 struct buffer_head *next = bh->b_this_page;
3040 if (bh->b_assoc_map)
3041 __remove_assoc_queue(bh);
3043 } while (bh != head);
3044 *buffers_to_free = head;
3045 __clear_page_buffers(page);
3051 int try_to_free_buffers(struct page *page)
3053 struct address_space * const mapping = page->mapping;
3054 struct buffer_head *buffers_to_free = NULL;
3057 BUG_ON(!PageLocked(page));
3058 if (PageWriteback(page))
3061 if (mapping == NULL) { /* can this still happen? */
3062 ret = drop_buffers(page, &buffers_to_free);
3066 spin_lock(&mapping->private_lock);
3067 ret = drop_buffers(page, &buffers_to_free);
3070 * If the filesystem writes its buffers by hand (eg ext3)
3071 * then we can have clean buffers against a dirty page. We
3072 * clean the page here; otherwise the VM will never notice
3073 * that the filesystem did any IO at all.
3075 * Also, during truncate, discard_buffer will have marked all
3076 * the page's buffers clean. We discover that here and clean
3079 * private_lock must be held over this entire operation in order
3080 * to synchronise against __set_page_dirty_buffers and prevent the
3081 * dirty bit from being lost.
3084 cancel_dirty_page(page, PAGE_CACHE_SIZE);
3085 spin_unlock(&mapping->private_lock);
3087 if (buffers_to_free) {
3088 struct buffer_head *bh = buffers_to_free;
3091 struct buffer_head *next = bh->b_this_page;
3092 free_buffer_head(bh);
3094 } while (bh != buffers_to_free);
3098 EXPORT_SYMBOL(try_to_free_buffers);
3100 void block_sync_page(struct page *page)
3102 struct address_space *mapping;
3105 mapping = page_mapping(page);
3107 blk_run_backing_dev(mapping->backing_dev_info, page);
3111 * There are no bdflush tunables left. But distributions are
3112 * still running obsolete flush daemons, so we terminate them here.
3114 * Use of bdflush() is deprecated and will be removed in a future kernel.
3115 * The `pdflush' kernel threads fully replace bdflush daemons and this call.
3117 asmlinkage long sys_bdflush(int func, long data)
3119 static int msg_count;
3121 if (!capable(CAP_SYS_ADMIN))
3124 if (msg_count < 5) {
3127 "warning: process `%s' used the obsolete bdflush"
3128 " system call\n", current->comm);
3129 printk(KERN_INFO "Fix your initscripts?\n");
3138 * Buffer-head allocation
3140 static struct kmem_cache *bh_cachep;
3143 * Once the number of bh's in the machine exceeds this level, we start
3144 * stripping them in writeback.
3146 static int max_buffer_heads;
3148 int buffer_heads_over_limit;
3150 struct bh_accounting {
3151 int nr; /* Number of live bh's */
3152 int ratelimit; /* Limit cacheline bouncing */
3155 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3157 static void recalc_bh_state(void)
3162 if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3164 __get_cpu_var(bh_accounting).ratelimit = 0;
3165 for_each_online_cpu(i)
3166 tot += per_cpu(bh_accounting, i).nr;
3167 buffer_heads_over_limit = (tot > max_buffer_heads);
3170 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3172 struct buffer_head *ret = kmem_cache_alloc(bh_cachep,
3173 set_migrateflags(gfp_flags, __GFP_RECLAIMABLE));
3175 INIT_LIST_HEAD(&ret->b_assoc_buffers);
3176 get_cpu_var(bh_accounting).nr++;
3178 put_cpu_var(bh_accounting);
3182 EXPORT_SYMBOL(alloc_buffer_head);
3184 void free_buffer_head(struct buffer_head *bh)
3186 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3187 kmem_cache_free(bh_cachep, bh);
3188 get_cpu_var(bh_accounting).nr--;
3190 put_cpu_var(bh_accounting);
3192 EXPORT_SYMBOL(free_buffer_head);
3194 static void buffer_exit_cpu(int cpu)
3197 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3199 for (i = 0; i < BH_LRU_SIZE; i++) {
3203 get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
3204 per_cpu(bh_accounting, cpu).nr = 0;
3205 put_cpu_var(bh_accounting);
3208 static int buffer_cpu_notify(struct notifier_block *self,
3209 unsigned long action, void *hcpu)
3211 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
3212 buffer_exit_cpu((unsigned long)hcpu);
3217 * bh_uptodate_or_lock: Test whether the buffer is uptodate
3218 * @bh: struct buffer_head
3220 * Return true if the buffer is up-to-date and false,
3221 * with the buffer locked, if not.
3223 int bh_uptodate_or_lock(struct buffer_head *bh)
3225 if (!buffer_uptodate(bh)) {
3227 if (!buffer_uptodate(bh))
3233 EXPORT_SYMBOL(bh_uptodate_or_lock);
3236 * bh_submit_read: Submit a locked buffer for reading
3237 * @bh: struct buffer_head
3239 * Returns zero on success and -EIO on error.
3241 int bh_submit_read(struct buffer_head *bh)
3243 BUG_ON(!buffer_locked(bh));
3245 if (buffer_uptodate(bh)) {
3251 bh->b_end_io = end_buffer_read_sync;
3252 submit_bh(READ, bh);
3254 if (buffer_uptodate(bh))
3258 EXPORT_SYMBOL(bh_submit_read);
3261 init_buffer_head(struct kmem_cache *cachep, void *data)
3263 struct buffer_head *bh = data;
3265 memset(bh, 0, sizeof(*bh));
3266 INIT_LIST_HEAD(&bh->b_assoc_buffers);
3269 void __init buffer_init(void)
3273 bh_cachep = kmem_cache_create("buffer_head",
3274 sizeof(struct buffer_head), 0,
3275 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3280 * Limit the bh occupancy to 10% of ZONE_NORMAL
3282 nrpages = (nr_free_buffer_pages() * 10) / 100;
3283 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3284 hotcpu_notifier(buffer_cpu_notify, 0);
3287 EXPORT_SYMBOL(__bforget);
3288 EXPORT_SYMBOL(__brelse);
3289 EXPORT_SYMBOL(__wait_on_buffer);
3290 EXPORT_SYMBOL(block_commit_write);
3291 EXPORT_SYMBOL(block_prepare_write);
3292 EXPORT_SYMBOL(block_page_mkwrite);
3293 EXPORT_SYMBOL(block_read_full_page);
3294 EXPORT_SYMBOL(block_sync_page);
3295 EXPORT_SYMBOL(block_truncate_page);
3296 EXPORT_SYMBOL(block_write_full_page);
3297 EXPORT_SYMBOL(cont_write_begin);
3298 EXPORT_SYMBOL(end_buffer_read_sync);
3299 EXPORT_SYMBOL(end_buffer_write_sync);
3300 EXPORT_SYMBOL(file_fsync);
3301 EXPORT_SYMBOL(fsync_bdev);
3302 EXPORT_SYMBOL(generic_block_bmap);
3303 EXPORT_SYMBOL(generic_commit_write);
3304 EXPORT_SYMBOL(generic_cont_expand_simple);
3305 EXPORT_SYMBOL(init_buffer);
3306 EXPORT_SYMBOL(invalidate_bdev);
3307 EXPORT_SYMBOL(ll_rw_block);
3308 EXPORT_SYMBOL(mark_buffer_dirty);
3309 EXPORT_SYMBOL(submit_bh);
3310 EXPORT_SYMBOL(sync_dirty_buffer);
3311 EXPORT_SYMBOL(unlock_buffer);