4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
21 #include <linux/config.h>
22 #include <linux/kernel.h>
23 #include <linux/syscalls.h>
26 #include <linux/percpu.h>
27 #include <linux/slab.h>
28 #include <linux/smp_lock.h>
29 #include <linux/capability.h>
30 #include <linux/blkdev.h>
31 #include <linux/file.h>
32 #include <linux/quotaops.h>
33 #include <linux/highmem.h>
34 #include <linux/module.h>
35 #include <linux/writeback.h>
36 #include <linux/hash.h>
37 #include <linux/suspend.h>
38 #include <linux/buffer_head.h>
39 #include <linux/bio.h>
40 #include <linux/notifier.h>
41 #include <linux/cpu.h>
42 #include <linux/bitops.h>
43 #include <linux/mpage.h>
44 #include <linux/bit_spinlock.h>
46 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
47 static void invalidate_bh_lrus(void);
49 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
52 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
54 bh->b_end_io = handler;
55 bh->b_private = private;
58 static int sync_buffer(void *word)
60 struct block_device *bd;
61 struct buffer_head *bh
62 = container_of(word, struct buffer_head, b_state);
67 blk_run_address_space(bd->bd_inode->i_mapping);
72 void fastcall __lock_buffer(struct buffer_head *bh)
74 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
75 TASK_UNINTERRUPTIBLE);
77 EXPORT_SYMBOL(__lock_buffer);
79 void fastcall unlock_buffer(struct buffer_head *bh)
81 clear_buffer_locked(bh);
82 smp_mb__after_clear_bit();
83 wake_up_bit(&bh->b_state, BH_Lock);
87 * Block until a buffer comes unlocked. This doesn't stop it
88 * from becoming locked again - you have to lock it yourself
89 * if you want to preserve its state.
91 void __wait_on_buffer(struct buffer_head * bh)
93 wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
97 __clear_page_buffers(struct page *page)
99 ClearPagePrivate(page);
100 set_page_private(page, 0);
101 page_cache_release(page);
104 static void buffer_io_error(struct buffer_head *bh)
106 char b[BDEVNAME_SIZE];
108 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
109 bdevname(bh->b_bdev, b),
110 (unsigned long long)bh->b_blocknr);
114 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
115 * unlock the buffer. This is what ll_rw_block uses too.
117 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
120 set_buffer_uptodate(bh);
122 /* This happens, due to failed READA attempts. */
123 clear_buffer_uptodate(bh);
129 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
131 char b[BDEVNAME_SIZE];
134 set_buffer_uptodate(bh);
136 if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
138 printk(KERN_WARNING "lost page write due to "
140 bdevname(bh->b_bdev, b));
142 set_buffer_write_io_error(bh);
143 clear_buffer_uptodate(bh);
150 * Write out and wait upon all the dirty data associated with a block
151 * device via its mapping. Does not take the superblock lock.
153 int sync_blockdev(struct block_device *bdev)
158 ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
161 EXPORT_SYMBOL(sync_blockdev);
164 * Write out and wait upon all dirty data associated with this
165 * superblock. Filesystem data as well as the underlying block
166 * device. Takes the superblock lock.
168 int fsync_super(struct super_block *sb)
170 sync_inodes_sb(sb, 0);
173 if (sb->s_dirt && sb->s_op->write_super)
174 sb->s_op->write_super(sb);
176 if (sb->s_op->sync_fs)
177 sb->s_op->sync_fs(sb, 1);
178 sync_blockdev(sb->s_bdev);
179 sync_inodes_sb(sb, 1);
181 return sync_blockdev(sb->s_bdev);
185 * Write out and wait upon all dirty data associated with this
186 * device. Filesystem data as well as the underlying block
187 * device. Takes the superblock lock.
189 int fsync_bdev(struct block_device *bdev)
191 struct super_block *sb = get_super(bdev);
193 int res = fsync_super(sb);
197 return sync_blockdev(bdev);
201 * freeze_bdev -- lock a filesystem and force it into a consistent state
202 * @bdev: blockdevice to lock
204 * This takes the block device bd_mount_sem to make sure no new mounts
205 * happen on bdev until thaw_bdev() is called.
206 * If a superblock is found on this device, we take the s_umount semaphore
207 * on it to make sure nobody unmounts until the snapshot creation is done.
209 struct super_block *freeze_bdev(struct block_device *bdev)
211 struct super_block *sb;
213 down(&bdev->bd_mount_sem);
214 sb = get_super(bdev);
215 if (sb && !(sb->s_flags & MS_RDONLY)) {
216 sb->s_frozen = SB_FREEZE_WRITE;
219 sync_inodes_sb(sb, 0);
223 if (sb->s_dirt && sb->s_op->write_super)
224 sb->s_op->write_super(sb);
227 if (sb->s_op->sync_fs)
228 sb->s_op->sync_fs(sb, 1);
230 sync_blockdev(sb->s_bdev);
231 sync_inodes_sb(sb, 1);
233 sb->s_frozen = SB_FREEZE_TRANS;
236 sync_blockdev(sb->s_bdev);
238 if (sb->s_op->write_super_lockfs)
239 sb->s_op->write_super_lockfs(sb);
243 return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */
245 EXPORT_SYMBOL(freeze_bdev);
248 * thaw_bdev -- unlock filesystem
249 * @bdev: blockdevice to unlock
250 * @sb: associated superblock
252 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
254 void thaw_bdev(struct block_device *bdev, struct super_block *sb)
257 BUG_ON(sb->s_bdev != bdev);
259 if (sb->s_op->unlockfs)
260 sb->s_op->unlockfs(sb);
261 sb->s_frozen = SB_UNFROZEN;
263 wake_up(&sb->s_wait_unfrozen);
267 up(&bdev->bd_mount_sem);
269 EXPORT_SYMBOL(thaw_bdev);
272 * sync everything. Start out by waking pdflush, because that writes back
273 * all queues in parallel.
275 static void do_sync(unsigned long wait)
278 sync_inodes(0); /* All mappings, inodes and their blockdevs */
280 sync_supers(); /* Write the superblocks */
281 sync_filesystems(0); /* Start syncing the filesystems */
282 sync_filesystems(wait); /* Waitingly sync the filesystems */
283 sync_inodes(wait); /* Mappings, inodes and blockdevs, again. */
285 printk("Emergency Sync complete\n");
286 if (unlikely(laptop_mode))
287 laptop_sync_completion();
290 asmlinkage long sys_sync(void)
296 void emergency_sync(void)
298 pdflush_operation(do_sync, 0);
302 * Generic function to fsync a file.
304 * filp may be NULL if called via the msync of a vma.
307 int file_fsync(struct file *filp, struct dentry *dentry, int datasync)
309 struct inode * inode = dentry->d_inode;
310 struct super_block * sb;
313 /* sync the inode to buffers */
314 ret = write_inode_now(inode, 0);
316 /* sync the superblock to buffers */
319 if (sb->s_op->write_super)
320 sb->s_op->write_super(sb);
323 /* .. finally sync the buffers to disk */
324 err = sync_blockdev(sb->s_bdev);
330 static long do_fsync(unsigned int fd, int datasync)
333 struct address_space *mapping;
342 if (!file->f_op || !file->f_op->fsync) {
343 /* Why? We can still call filemap_fdatawrite */
347 mapping = file->f_mapping;
349 current->flags |= PF_SYNCWRITE;
350 ret = filemap_fdatawrite(mapping);
353 * We need to protect against concurrent writers,
354 * which could cause livelocks in fsync_buffers_list
356 mutex_lock(&mapping->host->i_mutex);
357 err = file->f_op->fsync(file, file->f_dentry, datasync);
360 mutex_unlock(&mapping->host->i_mutex);
361 err = filemap_fdatawait(mapping);
364 current->flags &= ~PF_SYNCWRITE;
372 asmlinkage long sys_fsync(unsigned int fd)
374 return do_fsync(fd, 0);
377 asmlinkage long sys_fdatasync(unsigned int fd)
379 return do_fsync(fd, 1);
383 * Various filesystems appear to want __find_get_block to be non-blocking.
384 * But it's the page lock which protects the buffers. To get around this,
385 * we get exclusion from try_to_free_buffers with the blockdev mapping's
388 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
389 * may be quite high. This code could TryLock the page, and if that
390 * succeeds, there is no need to take private_lock. (But if
391 * private_lock is contended then so is mapping->tree_lock).
393 static struct buffer_head *
394 __find_get_block_slow(struct block_device *bdev, sector_t block)
396 struct inode *bd_inode = bdev->bd_inode;
397 struct address_space *bd_mapping = bd_inode->i_mapping;
398 struct buffer_head *ret = NULL;
400 struct buffer_head *bh;
401 struct buffer_head *head;
405 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
406 page = find_get_page(bd_mapping, index);
410 spin_lock(&bd_mapping->private_lock);
411 if (!page_has_buffers(page))
413 head = page_buffers(page);
416 if (bh->b_blocknr == block) {
421 if (!buffer_mapped(bh))
423 bh = bh->b_this_page;
424 } while (bh != head);
426 /* we might be here because some of the buffers on this page are
427 * not mapped. This is due to various races between
428 * file io on the block device and getblk. It gets dealt with
429 * elsewhere, don't buffer_error if we had some unmapped buffers
432 printk("__find_get_block_slow() failed. "
433 "block=%llu, b_blocknr=%llu\n",
434 (unsigned long long)block, (unsigned long long)bh->b_blocknr);
435 printk("b_state=0x%08lx, b_size=%u\n", bh->b_state, bh->b_size);
436 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
439 spin_unlock(&bd_mapping->private_lock);
440 page_cache_release(page);
445 /* If invalidate_buffers() will trash dirty buffers, it means some kind
446 of fs corruption is going on. Trashing dirty data always imply losing
447 information that was supposed to be just stored on the physical layer
450 Thus invalidate_buffers in general usage is not allwowed to trash
451 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
452 be preserved. These buffers are simply skipped.
454 We also skip buffers which are still in use. For example this can
455 happen if a userspace program is reading the block device.
457 NOTE: In the case where the user removed a removable-media-disk even if
458 there's still dirty data not synced on disk (due a bug in the device driver
459 or due an error of the user), by not destroying the dirty buffers we could
460 generate corruption also on the next media inserted, thus a parameter is
461 necessary to handle this case in the most safe way possible (trying
462 to not corrupt also the new disk inserted with the data belonging to
463 the old now corrupted disk). Also for the ramdisk the natural thing
464 to do in order to release the ramdisk memory is to destroy dirty buffers.
466 These are two special cases. Normal usage imply the device driver
467 to issue a sync on the device (without waiting I/O completion) and
468 then an invalidate_buffers call that doesn't trash dirty buffers.
470 For handling cache coherency with the blkdev pagecache the 'update' case
471 is been introduced. It is needed to re-read from disk any pinned
472 buffer. NOTE: re-reading from disk is destructive so we can do it only
473 when we assume nobody is changing the buffercache under our I/O and when
474 we think the disk contains more recent information than the buffercache.
475 The update == 1 pass marks the buffers we need to update, the update == 2
476 pass does the actual I/O. */
477 void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers)
479 invalidate_bh_lrus();
481 * FIXME: what about destroy_dirty_buffers?
482 * We really want to use invalidate_inode_pages2() for
483 * that, but not until that's cleaned up.
485 invalidate_inode_pages(bdev->bd_inode->i_mapping);
489 * Kick pdflush then try to free up some ZONE_NORMAL memory.
491 static void free_more_memory(void)
496 wakeup_pdflush(1024);
499 for_each_pgdat(pgdat) {
500 zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
502 try_to_free_pages(zones, GFP_NOFS);
507 * I/O completion handler for block_read_full_page() - pages
508 * which come unlocked at the end of I/O.
510 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
513 struct buffer_head *first;
514 struct buffer_head *tmp;
516 int page_uptodate = 1;
518 BUG_ON(!buffer_async_read(bh));
522 set_buffer_uptodate(bh);
524 clear_buffer_uptodate(bh);
525 if (printk_ratelimit())
531 * Be _very_ careful from here on. Bad things can happen if
532 * two buffer heads end IO at almost the same time and both
533 * decide that the page is now completely done.
535 first = page_buffers(page);
536 local_irq_save(flags);
537 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
538 clear_buffer_async_read(bh);
542 if (!buffer_uptodate(tmp))
544 if (buffer_async_read(tmp)) {
545 BUG_ON(!buffer_locked(tmp));
548 tmp = tmp->b_this_page;
550 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
551 local_irq_restore(flags);
554 * If none of the buffers had errors and they are all
555 * uptodate then we can set the page uptodate.
557 if (page_uptodate && !PageError(page))
558 SetPageUptodate(page);
563 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
564 local_irq_restore(flags);
569 * Completion handler for block_write_full_page() - pages which are unlocked
570 * during I/O, and which have PageWriteback cleared upon I/O completion.
572 void end_buffer_async_write(struct buffer_head *bh, int uptodate)
574 char b[BDEVNAME_SIZE];
576 struct buffer_head *first;
577 struct buffer_head *tmp;
580 BUG_ON(!buffer_async_write(bh));
584 set_buffer_uptodate(bh);
586 if (printk_ratelimit()) {
588 printk(KERN_WARNING "lost page write due to "
590 bdevname(bh->b_bdev, b));
592 set_bit(AS_EIO, &page->mapping->flags);
593 clear_buffer_uptodate(bh);
597 first = page_buffers(page);
598 local_irq_save(flags);
599 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
601 clear_buffer_async_write(bh);
603 tmp = bh->b_this_page;
605 if (buffer_async_write(tmp)) {
606 BUG_ON(!buffer_locked(tmp));
609 tmp = tmp->b_this_page;
611 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
612 local_irq_restore(flags);
613 end_page_writeback(page);
617 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
618 local_irq_restore(flags);
623 * If a page's buffers are under async readin (end_buffer_async_read
624 * completion) then there is a possibility that another thread of
625 * control could lock one of the buffers after it has completed
626 * but while some of the other buffers have not completed. This
627 * locked buffer would confuse end_buffer_async_read() into not unlocking
628 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
629 * that this buffer is not under async I/O.
631 * The page comes unlocked when it has no locked buffer_async buffers
634 * PageLocked prevents anyone starting new async I/O reads any of
637 * PageWriteback is used to prevent simultaneous writeout of the same
640 * PageLocked prevents anyone from starting writeback of a page which is
641 * under read I/O (PageWriteback is only ever set against a locked page).
643 static void mark_buffer_async_read(struct buffer_head *bh)
645 bh->b_end_io = end_buffer_async_read;
646 set_buffer_async_read(bh);
649 void mark_buffer_async_write(struct buffer_head *bh)
651 bh->b_end_io = end_buffer_async_write;
652 set_buffer_async_write(bh);
654 EXPORT_SYMBOL(mark_buffer_async_write);
658 * fs/buffer.c contains helper functions for buffer-backed address space's
659 * fsync functions. A common requirement for buffer-based filesystems is
660 * that certain data from the backing blockdev needs to be written out for
661 * a successful fsync(). For example, ext2 indirect blocks need to be
662 * written back and waited upon before fsync() returns.
664 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
665 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
666 * management of a list of dependent buffers at ->i_mapping->private_list.
668 * Locking is a little subtle: try_to_free_buffers() will remove buffers
669 * from their controlling inode's queue when they are being freed. But
670 * try_to_free_buffers() will be operating against the *blockdev* mapping
671 * at the time, not against the S_ISREG file which depends on those buffers.
672 * So the locking for private_list is via the private_lock in the address_space
673 * which backs the buffers. Which is different from the address_space
674 * against which the buffers are listed. So for a particular address_space,
675 * mapping->private_lock does *not* protect mapping->private_list! In fact,
676 * mapping->private_list will always be protected by the backing blockdev's
679 * Which introduces a requirement: all buffers on an address_space's
680 * ->private_list must be from the same address_space: the blockdev's.
682 * address_spaces which do not place buffers at ->private_list via these
683 * utility functions are free to use private_lock and private_list for
684 * whatever they want. The only requirement is that list_empty(private_list)
685 * be true at clear_inode() time.
687 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
688 * filesystems should do that. invalidate_inode_buffers() should just go
689 * BUG_ON(!list_empty).
691 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
692 * take an address_space, not an inode. And it should be called
693 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
696 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
697 * list if it is already on a list. Because if the buffer is on a list,
698 * it *must* already be on the right one. If not, the filesystem is being
699 * silly. This will save a ton of locking. But first we have to ensure
700 * that buffers are taken *off* the old inode's list when they are freed
701 * (presumably in truncate). That requires careful auditing of all
702 * filesystems (do it inside bforget()). It could also be done by bringing
707 * The buffer's backing address_space's private_lock must be held
709 static inline void __remove_assoc_queue(struct buffer_head *bh)
711 list_del_init(&bh->b_assoc_buffers);
714 int inode_has_buffers(struct inode *inode)
716 return !list_empty(&inode->i_data.private_list);
720 * osync is designed to support O_SYNC io. It waits synchronously for
721 * all already-submitted IO to complete, but does not queue any new
722 * writes to the disk.
724 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
725 * you dirty the buffers, and then use osync_inode_buffers to wait for
726 * completion. Any other dirty buffers which are not yet queued for
727 * write will not be flushed to disk by the osync.
729 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
731 struct buffer_head *bh;
737 list_for_each_prev(p, list) {
739 if (buffer_locked(bh)) {
743 if (!buffer_uptodate(bh))
755 * sync_mapping_buffers - write out and wait upon a mapping's "associated"
757 * @mapping: the mapping which wants those buffers written
759 * Starts I/O against the buffers at mapping->private_list, and waits upon
762 * Basically, this is a convenience function for fsync().
763 * @mapping is a file or directory which needs those buffers to be written for
764 * a successful fsync().
766 int sync_mapping_buffers(struct address_space *mapping)
768 struct address_space *buffer_mapping = mapping->assoc_mapping;
770 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
773 return fsync_buffers_list(&buffer_mapping->private_lock,
774 &mapping->private_list);
776 EXPORT_SYMBOL(sync_mapping_buffers);
779 * Called when we've recently written block `bblock', and it is known that
780 * `bblock' was for a buffer_boundary() buffer. This means that the block at
781 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
782 * dirty, schedule it for IO. So that indirects merge nicely with their data.
784 void write_boundary_block(struct block_device *bdev,
785 sector_t bblock, unsigned blocksize)
787 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
789 if (buffer_dirty(bh))
790 ll_rw_block(WRITE, 1, &bh);
795 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
797 struct address_space *mapping = inode->i_mapping;
798 struct address_space *buffer_mapping = bh->b_page->mapping;
800 mark_buffer_dirty(bh);
801 if (!mapping->assoc_mapping) {
802 mapping->assoc_mapping = buffer_mapping;
804 if (mapping->assoc_mapping != buffer_mapping)
807 if (list_empty(&bh->b_assoc_buffers)) {
808 spin_lock(&buffer_mapping->private_lock);
809 list_move_tail(&bh->b_assoc_buffers,
810 &mapping->private_list);
811 spin_unlock(&buffer_mapping->private_lock);
814 EXPORT_SYMBOL(mark_buffer_dirty_inode);
817 * Add a page to the dirty page list.
819 * It is a sad fact of life that this function is called from several places
820 * deeply under spinlocking. It may not sleep.
822 * If the page has buffers, the uptodate buffers are set dirty, to preserve
823 * dirty-state coherency between the page and the buffers. It the page does
824 * not have buffers then when they are later attached they will all be set
827 * The buffers are dirtied before the page is dirtied. There's a small race
828 * window in which a writepage caller may see the page cleanness but not the
829 * buffer dirtiness. That's fine. If this code were to set the page dirty
830 * before the buffers, a concurrent writepage caller could clear the page dirty
831 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
832 * page on the dirty page list.
834 * We use private_lock to lock against try_to_free_buffers while using the
835 * page's buffer list. Also use this to protect against clean buffers being
836 * added to the page after it was set dirty.
838 * FIXME: may need to call ->reservepage here as well. That's rather up to the
839 * address_space though.
841 int __set_page_dirty_buffers(struct page *page)
843 struct address_space * const mapping = page->mapping;
845 spin_lock(&mapping->private_lock);
846 if (page_has_buffers(page)) {
847 struct buffer_head *head = page_buffers(page);
848 struct buffer_head *bh = head;
851 set_buffer_dirty(bh);
852 bh = bh->b_this_page;
853 } while (bh != head);
855 spin_unlock(&mapping->private_lock);
857 if (!TestSetPageDirty(page)) {
858 write_lock_irq(&mapping->tree_lock);
859 if (page->mapping) { /* Race with truncate? */
860 if (mapping_cap_account_dirty(mapping))
861 inc_page_state(nr_dirty);
862 radix_tree_tag_set(&mapping->page_tree,
864 PAGECACHE_TAG_DIRTY);
866 write_unlock_irq(&mapping->tree_lock);
867 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
872 EXPORT_SYMBOL(__set_page_dirty_buffers);
875 * Write out and wait upon a list of buffers.
877 * We have conflicting pressures: we want to make sure that all
878 * initially dirty buffers get waited on, but that any subsequently
879 * dirtied buffers don't. After all, we don't want fsync to last
880 * forever if somebody is actively writing to the file.
882 * Do this in two main stages: first we copy dirty buffers to a
883 * temporary inode list, queueing the writes as we go. Then we clean
884 * up, waiting for those writes to complete.
886 * During this second stage, any subsequent updates to the file may end
887 * up refiling the buffer on the original inode's dirty list again, so
888 * there is a chance we will end up with a buffer queued for write but
889 * not yet completed on that list. So, as a final cleanup we go through
890 * the osync code to catch these locked, dirty buffers without requeuing
891 * any newly dirty buffers for write.
893 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
895 struct buffer_head *bh;
896 struct list_head tmp;
899 INIT_LIST_HEAD(&tmp);
902 while (!list_empty(list)) {
903 bh = BH_ENTRY(list->next);
904 list_del_init(&bh->b_assoc_buffers);
905 if (buffer_dirty(bh) || buffer_locked(bh)) {
906 list_add(&bh->b_assoc_buffers, &tmp);
907 if (buffer_dirty(bh)) {
911 * Ensure any pending I/O completes so that
912 * ll_rw_block() actually writes the current
913 * contents - it is a noop if I/O is still in
914 * flight on potentially older contents.
916 ll_rw_block(SWRITE, 1, &bh);
923 while (!list_empty(&tmp)) {
924 bh = BH_ENTRY(tmp.prev);
925 __remove_assoc_queue(bh);
929 if (!buffer_uptodate(bh))
936 err2 = osync_buffers_list(lock, list);
944 * Invalidate any and all dirty buffers on a given inode. We are
945 * probably unmounting the fs, but that doesn't mean we have already
946 * done a sync(). Just drop the buffers from the inode list.
948 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
949 * assumes that all the buffers are against the blockdev. Not true
952 void invalidate_inode_buffers(struct inode *inode)
954 if (inode_has_buffers(inode)) {
955 struct address_space *mapping = &inode->i_data;
956 struct list_head *list = &mapping->private_list;
957 struct address_space *buffer_mapping = mapping->assoc_mapping;
959 spin_lock(&buffer_mapping->private_lock);
960 while (!list_empty(list))
961 __remove_assoc_queue(BH_ENTRY(list->next));
962 spin_unlock(&buffer_mapping->private_lock);
967 * Remove any clean buffers from the inode's buffer list. This is called
968 * when we're trying to free the inode itself. Those buffers can pin it.
970 * Returns true if all buffers were removed.
972 int remove_inode_buffers(struct inode *inode)
976 if (inode_has_buffers(inode)) {
977 struct address_space *mapping = &inode->i_data;
978 struct list_head *list = &mapping->private_list;
979 struct address_space *buffer_mapping = mapping->assoc_mapping;
981 spin_lock(&buffer_mapping->private_lock);
982 while (!list_empty(list)) {
983 struct buffer_head *bh = BH_ENTRY(list->next);
984 if (buffer_dirty(bh)) {
988 __remove_assoc_queue(bh);
990 spin_unlock(&buffer_mapping->private_lock);
996 * Create the appropriate buffers when given a page for data area and
997 * the size of each buffer.. Use the bh->b_this_page linked list to
998 * follow the buffers created. Return NULL if unable to create more
1001 * The retry flag is used to differentiate async IO (paging, swapping)
1002 * which may not fail from ordinary buffer allocations.
1004 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
1007 struct buffer_head *bh, *head;
1013 while ((offset -= size) >= 0) {
1014 bh = alloc_buffer_head(GFP_NOFS);
1019 bh->b_this_page = head;
1024 atomic_set(&bh->b_count, 0);
1025 bh->b_private = NULL;
1028 /* Link the buffer to its page */
1029 set_bh_page(bh, page, offset);
1031 init_buffer(bh, NULL, NULL);
1035 * In case anything failed, we just free everything we got.
1041 head = head->b_this_page;
1042 free_buffer_head(bh);
1047 * Return failure for non-async IO requests. Async IO requests
1048 * are not allowed to fail, so we have to wait until buffer heads
1049 * become available. But we don't want tasks sleeping with
1050 * partially complete buffers, so all were released above.
1055 /* We're _really_ low on memory. Now we just
1056 * wait for old buffer heads to become free due to
1057 * finishing IO. Since this is an async request and
1058 * the reserve list is empty, we're sure there are
1059 * async buffer heads in use.
1064 EXPORT_SYMBOL_GPL(alloc_page_buffers);
1067 link_dev_buffers(struct page *page, struct buffer_head *head)
1069 struct buffer_head *bh, *tail;
1074 bh = bh->b_this_page;
1076 tail->b_this_page = head;
1077 attach_page_buffers(page, head);
1081 * Initialise the state of a blockdev page's buffers.
1084 init_page_buffers(struct page *page, struct block_device *bdev,
1085 sector_t block, int size)
1087 struct buffer_head *head = page_buffers(page);
1088 struct buffer_head *bh = head;
1089 int uptodate = PageUptodate(page);
1092 if (!buffer_mapped(bh)) {
1093 init_buffer(bh, NULL, NULL);
1095 bh->b_blocknr = block;
1097 set_buffer_uptodate(bh);
1098 set_buffer_mapped(bh);
1101 bh = bh->b_this_page;
1102 } while (bh != head);
1106 * Create the page-cache page that contains the requested block.
1108 * This is user purely for blockdev mappings.
1110 static struct page *
1111 grow_dev_page(struct block_device *bdev, sector_t block,
1112 pgoff_t index, int size)
1114 struct inode *inode = bdev->bd_inode;
1116 struct buffer_head *bh;
1118 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
1122 if (!PageLocked(page))
1125 if (page_has_buffers(page)) {
1126 bh = page_buffers(page);
1127 if (bh->b_size == size) {
1128 init_page_buffers(page, bdev, block, size);
1131 if (!try_to_free_buffers(page))
1136 * Allocate some buffers for this page
1138 bh = alloc_page_buffers(page, size, 0);
1143 * Link the page to the buffers and initialise them. Take the
1144 * lock to be atomic wrt __find_get_block(), which does not
1145 * run under the page lock.
1147 spin_lock(&inode->i_mapping->private_lock);
1148 link_dev_buffers(page, bh);
1149 init_page_buffers(page, bdev, block, size);
1150 spin_unlock(&inode->i_mapping->private_lock);
1156 page_cache_release(page);
1161 * Create buffers for the specified block device block's page. If
1162 * that page was dirty, the buffers are set dirty also.
1164 * Except that's a bug. Attaching dirty buffers to a dirty
1165 * blockdev's page can result in filesystem corruption, because
1166 * some of those buffers may be aliases of filesystem data.
1167 * grow_dev_page() will go BUG() if this happens.
1170 grow_buffers(struct block_device *bdev, sector_t block, int size)
1179 } while ((size << sizebits) < PAGE_SIZE);
1181 index = block >> sizebits;
1182 block = index << sizebits;
1184 /* Create a page with the proper size buffers.. */
1185 page = grow_dev_page(bdev, block, index, size);
1189 page_cache_release(page);
1193 static struct buffer_head *
1194 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1196 /* Size must be multiple of hard sectorsize */
1197 if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
1198 (size < 512 || size > PAGE_SIZE))) {
1199 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1201 printk(KERN_ERR "hardsect size: %d\n",
1202 bdev_hardsect_size(bdev));
1209 struct buffer_head * bh;
1211 bh = __find_get_block(bdev, block, size);
1215 if (!grow_buffers(bdev, block, size))
1221 * The relationship between dirty buffers and dirty pages:
1223 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1224 * the page is tagged dirty in its radix tree.
1226 * At all times, the dirtiness of the buffers represents the dirtiness of
1227 * subsections of the page. If the page has buffers, the page dirty bit is
1228 * merely a hint about the true dirty state.
1230 * When a page is set dirty in its entirety, all its buffers are marked dirty
1231 * (if the page has buffers).
1233 * When a buffer is marked dirty, its page is dirtied, but the page's other
1236 * Also. When blockdev buffers are explicitly read with bread(), they
1237 * individually become uptodate. But their backing page remains not
1238 * uptodate - even if all of its buffers are uptodate. A subsequent
1239 * block_read_full_page() against that page will discover all the uptodate
1240 * buffers, will set the page uptodate and will perform no I/O.
1244 * mark_buffer_dirty - mark a buffer_head as needing writeout
1245 * @bh: the buffer_head to mark dirty
1247 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1248 * backing page dirty, then tag the page as dirty in its address_space's radix
1249 * tree and then attach the address_space's inode to its superblock's dirty
1252 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1253 * mapping->tree_lock and the global inode_lock.
1255 void fastcall mark_buffer_dirty(struct buffer_head *bh)
1257 if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
1258 __set_page_dirty_nobuffers(bh->b_page);
1262 * Decrement a buffer_head's reference count. If all buffers against a page
1263 * have zero reference count, are clean and unlocked, and if the page is clean
1264 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1265 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1266 * a page but it ends up not being freed, and buffers may later be reattached).
1268 void __brelse(struct buffer_head * buf)
1270 if (atomic_read(&buf->b_count)) {
1274 printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1279 * bforget() is like brelse(), except it discards any
1280 * potentially dirty data.
1282 void __bforget(struct buffer_head *bh)
1284 clear_buffer_dirty(bh);
1285 if (!list_empty(&bh->b_assoc_buffers)) {
1286 struct address_space *buffer_mapping = bh->b_page->mapping;
1288 spin_lock(&buffer_mapping->private_lock);
1289 list_del_init(&bh->b_assoc_buffers);
1290 spin_unlock(&buffer_mapping->private_lock);
1295 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1298 if (buffer_uptodate(bh)) {
1303 bh->b_end_io = end_buffer_read_sync;
1304 submit_bh(READ, bh);
1306 if (buffer_uptodate(bh))
1314 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1315 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1316 * refcount elevated by one when they're in an LRU. A buffer can only appear
1317 * once in a particular CPU's LRU. A single buffer can be present in multiple
1318 * CPU's LRUs at the same time.
1320 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1321 * sb_find_get_block().
1323 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1324 * a local interrupt disable for that.
1327 #define BH_LRU_SIZE 8
1330 struct buffer_head *bhs[BH_LRU_SIZE];
1333 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1336 #define bh_lru_lock() local_irq_disable()
1337 #define bh_lru_unlock() local_irq_enable()
1339 #define bh_lru_lock() preempt_disable()
1340 #define bh_lru_unlock() preempt_enable()
1343 static inline void check_irqs_on(void)
1345 #ifdef irqs_disabled
1346 BUG_ON(irqs_disabled());
1351 * The LRU management algorithm is dopey-but-simple. Sorry.
1353 static void bh_lru_install(struct buffer_head *bh)
1355 struct buffer_head *evictee = NULL;
1360 lru = &__get_cpu_var(bh_lrus);
1361 if (lru->bhs[0] != bh) {
1362 struct buffer_head *bhs[BH_LRU_SIZE];
1368 for (in = 0; in < BH_LRU_SIZE; in++) {
1369 struct buffer_head *bh2 = lru->bhs[in];
1374 if (out >= BH_LRU_SIZE) {
1375 BUG_ON(evictee != NULL);
1382 while (out < BH_LRU_SIZE)
1384 memcpy(lru->bhs, bhs, sizeof(bhs));
1393 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1395 static struct buffer_head *
1396 lookup_bh_lru(struct block_device *bdev, sector_t block, int size)
1398 struct buffer_head *ret = NULL;
1404 lru = &__get_cpu_var(bh_lrus);
1405 for (i = 0; i < BH_LRU_SIZE; i++) {
1406 struct buffer_head *bh = lru->bhs[i];
1408 if (bh && bh->b_bdev == bdev &&
1409 bh->b_blocknr == block && bh->b_size == size) {
1412 lru->bhs[i] = lru->bhs[i - 1];
1427 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1428 * it in the LRU and mark it as accessed. If it is not present then return
1431 struct buffer_head *
1432 __find_get_block(struct block_device *bdev, sector_t block, int size)
1434 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1437 bh = __find_get_block_slow(bdev, block);
1445 EXPORT_SYMBOL(__find_get_block);
1448 * __getblk will locate (and, if necessary, create) the buffer_head
1449 * which corresponds to the passed block_device, block and size. The
1450 * returned buffer has its reference count incremented.
1452 * __getblk() cannot fail - it just keeps trying. If you pass it an
1453 * illegal block number, __getblk() will happily return a buffer_head
1454 * which represents the non-existent block. Very weird.
1456 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1457 * attempt is failing. FIXME, perhaps?
1459 struct buffer_head *
1460 __getblk(struct block_device *bdev, sector_t block, int size)
1462 struct buffer_head *bh = __find_get_block(bdev, block, size);
1466 bh = __getblk_slow(bdev, block, size);
1469 EXPORT_SYMBOL(__getblk);
1472 * Do async read-ahead on a buffer..
1474 void __breadahead(struct block_device *bdev, sector_t block, int size)
1476 struct buffer_head *bh = __getblk(bdev, block, size);
1478 ll_rw_block(READA, 1, &bh);
1482 EXPORT_SYMBOL(__breadahead);
1485 * __bread() - reads a specified block and returns the bh
1486 * @bdev: the block_device to read from
1487 * @block: number of block
1488 * @size: size (in bytes) to read
1490 * Reads a specified block, and returns buffer head that contains it.
1491 * It returns NULL if the block was unreadable.
1493 struct buffer_head *
1494 __bread(struct block_device *bdev, sector_t block, int size)
1496 struct buffer_head *bh = __getblk(bdev, block, size);
1498 if (likely(bh) && !buffer_uptodate(bh))
1499 bh = __bread_slow(bh);
1502 EXPORT_SYMBOL(__bread);
1505 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1506 * This doesn't race because it runs in each cpu either in irq
1507 * or with preempt disabled.
1509 static void invalidate_bh_lru(void *arg)
1511 struct bh_lru *b = &get_cpu_var(bh_lrus);
1514 for (i = 0; i < BH_LRU_SIZE; i++) {
1518 put_cpu_var(bh_lrus);
1521 static void invalidate_bh_lrus(void)
1523 on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
1526 void set_bh_page(struct buffer_head *bh,
1527 struct page *page, unsigned long offset)
1530 if (offset >= PAGE_SIZE)
1532 if (PageHighMem(page))
1534 * This catches illegal uses and preserves the offset:
1536 bh->b_data = (char *)(0 + offset);
1538 bh->b_data = page_address(page) + offset;
1540 EXPORT_SYMBOL(set_bh_page);
1543 * Called when truncating a buffer on a page completely.
1545 static void discard_buffer(struct buffer_head * bh)
1548 clear_buffer_dirty(bh);
1550 clear_buffer_mapped(bh);
1551 clear_buffer_req(bh);
1552 clear_buffer_new(bh);
1553 clear_buffer_delay(bh);
1558 * try_to_release_page() - release old fs-specific metadata on a page
1560 * @page: the page which the kernel is trying to free
1561 * @gfp_mask: memory allocation flags (and I/O mode)
1563 * The address_space is to try to release any data against the page
1564 * (presumably at page->private). If the release was successful, return `1'.
1565 * Otherwise return zero.
1567 * The @gfp_mask argument specifies whether I/O may be performed to release
1568 * this page (__GFP_IO), and whether the call may block (__GFP_WAIT).
1570 * NOTE: @gfp_mask may go away, and this function may become non-blocking.
1572 int try_to_release_page(struct page *page, gfp_t gfp_mask)
1574 struct address_space * const mapping = page->mapping;
1576 BUG_ON(!PageLocked(page));
1577 if (PageWriteback(page))
1580 if (mapping && mapping->a_ops->releasepage)
1581 return mapping->a_ops->releasepage(page, gfp_mask);
1582 return try_to_free_buffers(page);
1584 EXPORT_SYMBOL(try_to_release_page);
1587 * block_invalidatepage - invalidate part of all of a buffer-backed page
1589 * @page: the page which is affected
1590 * @offset: the index of the truncation point
1592 * block_invalidatepage() is called when all or part of the page has become
1593 * invalidatedby a truncate operation.
1595 * block_invalidatepage() does not have to release all buffers, but it must
1596 * ensure that no dirty buffer is left outside @offset and that no I/O
1597 * is underway against any of the blocks which are outside the truncation
1598 * point. Because the caller is about to free (and possibly reuse) those
1601 int block_invalidatepage(struct page *page, unsigned long offset)
1603 struct buffer_head *head, *bh, *next;
1604 unsigned int curr_off = 0;
1607 BUG_ON(!PageLocked(page));
1608 if (!page_has_buffers(page))
1611 head = page_buffers(page);
1614 unsigned int next_off = curr_off + bh->b_size;
1615 next = bh->b_this_page;
1618 * is this block fully invalidated?
1620 if (offset <= curr_off)
1622 curr_off = next_off;
1624 } while (bh != head);
1627 * We release buffers only if the entire page is being invalidated.
1628 * The get_block cached value has been unconditionally invalidated,
1629 * so real IO is not possible anymore.
1632 ret = try_to_release_page(page, 0);
1636 EXPORT_SYMBOL(block_invalidatepage);
1638 int do_invalidatepage(struct page *page, unsigned long offset)
1640 int (*invalidatepage)(struct page *, unsigned long);
1641 invalidatepage = page->mapping->a_ops->invalidatepage;
1642 if (invalidatepage == NULL)
1643 invalidatepage = block_invalidatepage;
1644 return (*invalidatepage)(page, offset);
1648 * We attach and possibly dirty the buffers atomically wrt
1649 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1650 * is already excluded via the page lock.
1652 void create_empty_buffers(struct page *page,
1653 unsigned long blocksize, unsigned long b_state)
1655 struct buffer_head *bh, *head, *tail;
1657 head = alloc_page_buffers(page, blocksize, 1);
1660 bh->b_state |= b_state;
1662 bh = bh->b_this_page;
1664 tail->b_this_page = head;
1666 spin_lock(&page->mapping->private_lock);
1667 if (PageUptodate(page) || PageDirty(page)) {
1670 if (PageDirty(page))
1671 set_buffer_dirty(bh);
1672 if (PageUptodate(page))
1673 set_buffer_uptodate(bh);
1674 bh = bh->b_this_page;
1675 } while (bh != head);
1677 attach_page_buffers(page, head);
1678 spin_unlock(&page->mapping->private_lock);
1680 EXPORT_SYMBOL(create_empty_buffers);
1683 * We are taking a block for data and we don't want any output from any
1684 * buffer-cache aliases starting from return from that function and
1685 * until the moment when something will explicitly mark the buffer
1686 * dirty (hopefully that will not happen until we will free that block ;-)
1687 * We don't even need to mark it not-uptodate - nobody can expect
1688 * anything from a newly allocated buffer anyway. We used to used
1689 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1690 * don't want to mark the alias unmapped, for example - it would confuse
1691 * anyone who might pick it with bread() afterwards...
1693 * Also.. Note that bforget() doesn't lock the buffer. So there can
1694 * be writeout I/O going on against recently-freed buffers. We don't
1695 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1696 * only if we really need to. That happens here.
1698 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1700 struct buffer_head *old_bh;
1704 old_bh = __find_get_block_slow(bdev, block);
1706 clear_buffer_dirty(old_bh);
1707 wait_on_buffer(old_bh);
1708 clear_buffer_req(old_bh);
1712 EXPORT_SYMBOL(unmap_underlying_metadata);
1715 * NOTE! All mapped/uptodate combinations are valid:
1717 * Mapped Uptodate Meaning
1719 * No No "unknown" - must do get_block()
1720 * No Yes "hole" - zero-filled
1721 * Yes No "allocated" - allocated on disk, not read in
1722 * Yes Yes "valid" - allocated and up-to-date in memory.
1724 * "Dirty" is valid only with the last case (mapped+uptodate).
1728 * While block_write_full_page is writing back the dirty buffers under
1729 * the page lock, whoever dirtied the buffers may decide to clean them
1730 * again at any time. We handle that by only looking at the buffer
1731 * state inside lock_buffer().
1733 * If block_write_full_page() is called for regular writeback
1734 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1735 * locked buffer. This only can happen if someone has written the buffer
1736 * directly, with submit_bh(). At the address_space level PageWriteback
1737 * prevents this contention from occurring.
1739 static int __block_write_full_page(struct inode *inode, struct page *page,
1740 get_block_t *get_block, struct writeback_control *wbc)
1744 sector_t last_block;
1745 struct buffer_head *bh, *head;
1746 int nr_underway = 0;
1748 BUG_ON(!PageLocked(page));
1750 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1752 if (!page_has_buffers(page)) {
1753 create_empty_buffers(page, 1 << inode->i_blkbits,
1754 (1 << BH_Dirty)|(1 << BH_Uptodate));
1758 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1759 * here, and the (potentially unmapped) buffers may become dirty at
1760 * any time. If a buffer becomes dirty here after we've inspected it
1761 * then we just miss that fact, and the page stays dirty.
1763 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1764 * handle that here by just cleaning them.
1767 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1768 head = page_buffers(page);
1772 * Get all the dirty buffers mapped to disk addresses and
1773 * handle any aliases from the underlying blockdev's mapping.
1776 if (block > last_block) {
1778 * mapped buffers outside i_size will occur, because
1779 * this page can be outside i_size when there is a
1780 * truncate in progress.
1783 * The buffer was zeroed by block_write_full_page()
1785 clear_buffer_dirty(bh);
1786 set_buffer_uptodate(bh);
1787 } else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
1788 err = get_block(inode, block, bh, 1);
1791 if (buffer_new(bh)) {
1792 /* blockdev mappings never come here */
1793 clear_buffer_new(bh);
1794 unmap_underlying_metadata(bh->b_bdev,
1798 bh = bh->b_this_page;
1800 } while (bh != head);
1803 if (!buffer_mapped(bh))
1806 * If it's a fully non-blocking write attempt and we cannot
1807 * lock the buffer then redirty the page. Note that this can
1808 * potentially cause a busy-wait loop from pdflush and kswapd
1809 * activity, but those code paths have their own higher-level
1812 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1814 } else if (test_set_buffer_locked(bh)) {
1815 redirty_page_for_writepage(wbc, page);
1818 if (test_clear_buffer_dirty(bh)) {
1819 mark_buffer_async_write(bh);
1823 } while ((bh = bh->b_this_page) != head);
1826 * The page and its buffers are protected by PageWriteback(), so we can
1827 * drop the bh refcounts early.
1829 BUG_ON(PageWriteback(page));
1830 set_page_writeback(page);
1833 struct buffer_head *next = bh->b_this_page;
1834 if (buffer_async_write(bh)) {
1835 submit_bh(WRITE, bh);
1839 } while (bh != head);
1844 if (nr_underway == 0) {
1846 * The page was marked dirty, but the buffers were
1847 * clean. Someone wrote them back by hand with
1848 * ll_rw_block/submit_bh. A rare case.
1852 if (!buffer_uptodate(bh)) {
1856 bh = bh->b_this_page;
1857 } while (bh != head);
1859 SetPageUptodate(page);
1860 end_page_writeback(page);
1862 * The page and buffer_heads can be released at any time from
1865 wbc->pages_skipped++; /* We didn't write this page */
1871 * ENOSPC, or some other error. We may already have added some
1872 * blocks to the file, so we need to write these out to avoid
1873 * exposing stale data.
1874 * The page is currently locked and not marked for writeback
1877 /* Recovery: lock and submit the mapped buffers */
1879 if (buffer_mapped(bh) && buffer_dirty(bh)) {
1881 mark_buffer_async_write(bh);
1884 * The buffer may have been set dirty during
1885 * attachment to a dirty page.
1887 clear_buffer_dirty(bh);
1889 } while ((bh = bh->b_this_page) != head);
1891 BUG_ON(PageWriteback(page));
1892 set_page_writeback(page);
1895 struct buffer_head *next = bh->b_this_page;
1896 if (buffer_async_write(bh)) {
1897 clear_buffer_dirty(bh);
1898 submit_bh(WRITE, bh);
1902 } while (bh != head);
1906 static int __block_prepare_write(struct inode *inode, struct page *page,
1907 unsigned from, unsigned to, get_block_t *get_block)
1909 unsigned block_start, block_end;
1912 unsigned blocksize, bbits;
1913 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1915 BUG_ON(!PageLocked(page));
1916 BUG_ON(from > PAGE_CACHE_SIZE);
1917 BUG_ON(to > PAGE_CACHE_SIZE);
1920 blocksize = 1 << inode->i_blkbits;
1921 if (!page_has_buffers(page))
1922 create_empty_buffers(page, blocksize, 0);
1923 head = page_buffers(page);
1925 bbits = inode->i_blkbits;
1926 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1928 for(bh = head, block_start = 0; bh != head || !block_start;
1929 block++, block_start=block_end, bh = bh->b_this_page) {
1930 block_end = block_start + blocksize;
1931 if (block_end <= from || block_start >= to) {
1932 if (PageUptodate(page)) {
1933 if (!buffer_uptodate(bh))
1934 set_buffer_uptodate(bh);
1939 clear_buffer_new(bh);
1940 if (!buffer_mapped(bh)) {
1941 err = get_block(inode, block, bh, 1);
1944 if (buffer_new(bh)) {
1945 unmap_underlying_metadata(bh->b_bdev,
1947 if (PageUptodate(page)) {
1948 set_buffer_uptodate(bh);
1951 if (block_end > to || block_start < from) {
1954 kaddr = kmap_atomic(page, KM_USER0);
1958 if (block_start < from)
1959 memset(kaddr+block_start,
1960 0, from-block_start);
1961 flush_dcache_page(page);
1962 kunmap_atomic(kaddr, KM_USER0);
1967 if (PageUptodate(page)) {
1968 if (!buffer_uptodate(bh))
1969 set_buffer_uptodate(bh);
1972 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1973 (block_start < from || block_end > to)) {
1974 ll_rw_block(READ, 1, &bh);
1979 * If we issued read requests - let them complete.
1981 while(wait_bh > wait) {
1982 wait_on_buffer(*--wait_bh);
1983 if (!buffer_uptodate(*wait_bh))
1990 clear_buffer_new(bh);
1991 } while ((bh = bh->b_this_page) != head);
1996 * Zero out any newly allocated blocks to avoid exposing stale
1997 * data. If BH_New is set, we know that the block was newly
1998 * allocated in the above loop.
2003 block_end = block_start+blocksize;
2004 if (block_end <= from)
2006 if (block_start >= to)
2008 if (buffer_new(bh)) {
2011 clear_buffer_new(bh);
2012 kaddr = kmap_atomic(page, KM_USER0);
2013 memset(kaddr+block_start, 0, bh->b_size);
2014 kunmap_atomic(kaddr, KM_USER0);
2015 set_buffer_uptodate(bh);
2016 mark_buffer_dirty(bh);
2019 block_start = block_end;
2020 bh = bh->b_this_page;
2021 } while (bh != head);
2025 static int __block_commit_write(struct inode *inode, struct page *page,
2026 unsigned from, unsigned to)
2028 unsigned block_start, block_end;
2031 struct buffer_head *bh, *head;
2033 blocksize = 1 << inode->i_blkbits;
2035 for(bh = head = page_buffers(page), block_start = 0;
2036 bh != head || !block_start;
2037 block_start=block_end, bh = bh->b_this_page) {
2038 block_end = block_start + blocksize;
2039 if (block_end <= from || block_start >= to) {
2040 if (!buffer_uptodate(bh))
2043 set_buffer_uptodate(bh);
2044 mark_buffer_dirty(bh);
2049 * If this is a partial write which happened to make all buffers
2050 * uptodate then we can optimize away a bogus readpage() for
2051 * the next read(). Here we 'discover' whether the page went
2052 * uptodate as a result of this (potentially partial) write.
2055 SetPageUptodate(page);
2060 * Generic "read page" function for block devices that have the normal
2061 * get_block functionality. This is most of the block device filesystems.
2062 * Reads the page asynchronously --- the unlock_buffer() and
2063 * set/clear_buffer_uptodate() functions propagate buffer state into the
2064 * page struct once IO has completed.
2066 int block_read_full_page(struct page *page, get_block_t *get_block)
2068 struct inode *inode = page->mapping->host;
2069 sector_t iblock, lblock;
2070 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2071 unsigned int blocksize;
2073 int fully_mapped = 1;
2075 BUG_ON(!PageLocked(page));
2076 blocksize = 1 << inode->i_blkbits;
2077 if (!page_has_buffers(page))
2078 create_empty_buffers(page, blocksize, 0);
2079 head = page_buffers(page);
2081 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2082 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2088 if (buffer_uptodate(bh))
2091 if (!buffer_mapped(bh)) {
2095 if (iblock < lblock) {
2096 err = get_block(inode, iblock, bh, 0);
2100 if (!buffer_mapped(bh)) {
2101 void *kaddr = kmap_atomic(page, KM_USER0);
2102 memset(kaddr + i * blocksize, 0, blocksize);
2103 flush_dcache_page(page);
2104 kunmap_atomic(kaddr, KM_USER0);
2106 set_buffer_uptodate(bh);
2110 * get_block() might have updated the buffer
2113 if (buffer_uptodate(bh))
2117 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2120 SetPageMappedToDisk(page);
2124 * All buffers are uptodate - we can set the page uptodate
2125 * as well. But not if get_block() returned an error.
2127 if (!PageError(page))
2128 SetPageUptodate(page);
2133 /* Stage two: lock the buffers */
2134 for (i = 0; i < nr; i++) {
2137 mark_buffer_async_read(bh);
2141 * Stage 3: start the IO. Check for uptodateness
2142 * inside the buffer lock in case another process reading
2143 * the underlying blockdev brought it uptodate (the sct fix).
2145 for (i = 0; i < nr; i++) {
2147 if (buffer_uptodate(bh))
2148 end_buffer_async_read(bh, 1);
2150 submit_bh(READ, bh);
2155 /* utility function for filesystems that need to do work on expanding
2156 * truncates. Uses prepare/commit_write to allow the filesystem to
2157 * deal with the hole.
2159 static int __generic_cont_expand(struct inode *inode, loff_t size,
2160 pgoff_t index, unsigned int offset)
2162 struct address_space *mapping = inode->i_mapping;
2164 unsigned long limit;
2168 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
2169 if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2170 send_sig(SIGXFSZ, current, 0);
2173 if (size > inode->i_sb->s_maxbytes)
2177 page = grab_cache_page(mapping, index);
2180 err = mapping->a_ops->prepare_write(NULL, page, offset, offset);
2183 * ->prepare_write() may have instantiated a few blocks
2184 * outside i_size. Trim these off again.
2187 page_cache_release(page);
2188 vmtruncate(inode, inode->i_size);
2192 err = mapping->a_ops->commit_write(NULL, page, offset, offset);
2195 page_cache_release(page);
2202 int generic_cont_expand(struct inode *inode, loff_t size)
2205 unsigned int offset;
2207 offset = (size & (PAGE_CACHE_SIZE - 1)); /* Within page */
2209 /* ugh. in prepare/commit_write, if from==to==start of block, we
2210 ** skip the prepare. make sure we never send an offset for the start
2213 if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
2214 /* caller must handle this extra byte. */
2217 index = size >> PAGE_CACHE_SHIFT;
2219 return __generic_cont_expand(inode, size, index, offset);
2222 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2224 loff_t pos = size - 1;
2225 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2226 unsigned int offset = (pos & (PAGE_CACHE_SIZE - 1)) + 1;
2228 /* prepare/commit_write can handle even if from==to==start of block. */
2229 return __generic_cont_expand(inode, size, index, offset);
2233 * For moronic filesystems that do not allow holes in file.
2234 * We may have to extend the file.
2237 int cont_prepare_write(struct page *page, unsigned offset,
2238 unsigned to, get_block_t *get_block, loff_t *bytes)
2240 struct address_space *mapping = page->mapping;
2241 struct inode *inode = mapping->host;
2242 struct page *new_page;
2246 unsigned blocksize = 1 << inode->i_blkbits;
2249 while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
2251 new_page = grab_cache_page(mapping, pgpos);
2254 /* we might sleep */
2255 if (*bytes>>PAGE_CACHE_SHIFT != pgpos) {
2256 unlock_page(new_page);
2257 page_cache_release(new_page);
2260 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2261 if (zerofrom & (blocksize-1)) {
2262 *bytes |= (blocksize-1);
2265 status = __block_prepare_write(inode, new_page, zerofrom,
2266 PAGE_CACHE_SIZE, get_block);
2269 kaddr = kmap_atomic(new_page, KM_USER0);
2270 memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom);
2271 flush_dcache_page(new_page);
2272 kunmap_atomic(kaddr, KM_USER0);
2273 generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE);
2274 unlock_page(new_page);
2275 page_cache_release(new_page);
2278 if (page->index < pgpos) {
2279 /* completely inside the area */
2282 /* page covers the boundary, find the boundary offset */
2283 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2285 /* if we will expand the thing last block will be filled */
2286 if (to > zerofrom && (zerofrom & (blocksize-1))) {
2287 *bytes |= (blocksize-1);
2291 /* starting below the boundary? Nothing to zero out */
2292 if (offset <= zerofrom)
2295 status = __block_prepare_write(inode, page, zerofrom, to, get_block);
2298 if (zerofrom < offset) {
2299 kaddr = kmap_atomic(page, KM_USER0);
2300 memset(kaddr+zerofrom, 0, offset-zerofrom);
2301 flush_dcache_page(page);
2302 kunmap_atomic(kaddr, KM_USER0);
2303 __block_commit_write(inode, page, zerofrom, offset);
2307 ClearPageUptodate(page);
2311 ClearPageUptodate(new_page);
2312 unlock_page(new_page);
2313 page_cache_release(new_page);
2318 int block_prepare_write(struct page *page, unsigned from, unsigned to,
2319 get_block_t *get_block)
2321 struct inode *inode = page->mapping->host;
2322 int err = __block_prepare_write(inode, page, from, to, get_block);
2324 ClearPageUptodate(page);
2328 int block_commit_write(struct page *page, unsigned from, unsigned to)
2330 struct inode *inode = page->mapping->host;
2331 __block_commit_write(inode,page,from,to);
2335 int generic_commit_write(struct file *file, struct page *page,
2336 unsigned from, unsigned to)
2338 struct inode *inode = page->mapping->host;
2339 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2340 __block_commit_write(inode,page,from,to);
2342 * No need to use i_size_read() here, the i_size
2343 * cannot change under us because we hold i_mutex.
2345 if (pos > inode->i_size) {
2346 i_size_write(inode, pos);
2347 mark_inode_dirty(inode);
2354 * nobh_prepare_write()'s prereads are special: the buffer_heads are freed
2355 * immediately, while under the page lock. So it needs a special end_io
2356 * handler which does not touch the bh after unlocking it.
2358 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
2359 * a race there is benign: unlock_buffer() only use the bh's address for
2360 * hashing after unlocking the buffer, so it doesn't actually touch the bh
2363 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2366 set_buffer_uptodate(bh);
2368 /* This happens, due to failed READA attempts. */
2369 clear_buffer_uptodate(bh);
2375 * On entry, the page is fully not uptodate.
2376 * On exit the page is fully uptodate in the areas outside (from,to)
2378 int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
2379 get_block_t *get_block)
2381 struct inode *inode = page->mapping->host;
2382 const unsigned blkbits = inode->i_blkbits;
2383 const unsigned blocksize = 1 << blkbits;
2384 struct buffer_head map_bh;
2385 struct buffer_head *read_bh[MAX_BUF_PER_PAGE];
2386 unsigned block_in_page;
2387 unsigned block_start;
2388 sector_t block_in_file;
2393 int is_mapped_to_disk = 1;
2396 if (PageMappedToDisk(page))
2399 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2400 map_bh.b_page = page;
2403 * We loop across all blocks in the page, whether or not they are
2404 * part of the affected region. This is so we can discover if the
2405 * page is fully mapped-to-disk.
2407 for (block_start = 0, block_in_page = 0;
2408 block_start < PAGE_CACHE_SIZE;
2409 block_in_page++, block_start += blocksize) {
2410 unsigned block_end = block_start + blocksize;
2415 if (block_start >= to)
2417 ret = get_block(inode, block_in_file + block_in_page,
2421 if (!buffer_mapped(&map_bh))
2422 is_mapped_to_disk = 0;
2423 if (buffer_new(&map_bh))
2424 unmap_underlying_metadata(map_bh.b_bdev,
2426 if (PageUptodate(page))
2428 if (buffer_new(&map_bh) || !buffer_mapped(&map_bh)) {
2429 kaddr = kmap_atomic(page, KM_USER0);
2430 if (block_start < from) {
2431 memset(kaddr+block_start, 0, from-block_start);
2434 if (block_end > to) {
2435 memset(kaddr + to, 0, block_end - to);
2438 flush_dcache_page(page);
2439 kunmap_atomic(kaddr, KM_USER0);
2442 if (buffer_uptodate(&map_bh))
2443 continue; /* reiserfs does this */
2444 if (block_start < from || block_end > to) {
2445 struct buffer_head *bh = alloc_buffer_head(GFP_NOFS);
2451 bh->b_state = map_bh.b_state;
2452 atomic_set(&bh->b_count, 0);
2453 bh->b_this_page = NULL;
2455 bh->b_blocknr = map_bh.b_blocknr;
2456 bh->b_size = blocksize;
2457 bh->b_data = (char *)(long)block_start;
2458 bh->b_bdev = map_bh.b_bdev;
2459 bh->b_private = NULL;
2460 read_bh[nr_reads++] = bh;
2465 struct buffer_head *bh;
2468 * The page is locked, so these buffers are protected from
2469 * any VM or truncate activity. Hence we don't need to care
2470 * for the buffer_head refcounts.
2472 for (i = 0; i < nr_reads; i++) {
2475 bh->b_end_io = end_buffer_read_nobh;
2476 submit_bh(READ, bh);
2478 for (i = 0; i < nr_reads; i++) {
2481 if (!buffer_uptodate(bh))
2483 free_buffer_head(bh);
2490 if (is_mapped_to_disk)
2491 SetPageMappedToDisk(page);
2492 SetPageUptodate(page);
2495 * Setting the page dirty here isn't necessary for the prepare_write
2496 * function - commit_write will do that. But if/when this function is
2497 * used within the pagefault handler to ensure that all mmapped pages
2498 * have backing space in the filesystem, we will need to dirty the page
2499 * if its contents were altered.
2502 set_page_dirty(page);
2507 for (i = 0; i < nr_reads; i++) {
2509 free_buffer_head(read_bh[i]);
2513 * Error recovery is pretty slack. Clear the page and mark it dirty
2514 * so we'll later zero out any blocks which _were_ allocated.
2516 kaddr = kmap_atomic(page, KM_USER0);
2517 memset(kaddr, 0, PAGE_CACHE_SIZE);
2518 kunmap_atomic(kaddr, KM_USER0);
2519 SetPageUptodate(page);
2520 set_page_dirty(page);
2523 EXPORT_SYMBOL(nobh_prepare_write);
2525 int nobh_commit_write(struct file *file, struct page *page,
2526 unsigned from, unsigned to)
2528 struct inode *inode = page->mapping->host;
2529 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2531 set_page_dirty(page);
2532 if (pos > inode->i_size) {
2533 i_size_write(inode, pos);
2534 mark_inode_dirty(inode);
2538 EXPORT_SYMBOL(nobh_commit_write);
2541 * nobh_writepage() - based on block_full_write_page() except
2542 * that it tries to operate without attaching bufferheads to
2545 int nobh_writepage(struct page *page, get_block_t *get_block,
2546 struct writeback_control *wbc)
2548 struct inode * const inode = page->mapping->host;
2549 loff_t i_size = i_size_read(inode);
2550 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2555 /* Is the page fully inside i_size? */
2556 if (page->index < end_index)
2559 /* Is the page fully outside i_size? (truncate in progress) */
2560 offset = i_size & (PAGE_CACHE_SIZE-1);
2561 if (page->index >= end_index+1 || !offset) {
2563 * The page may have dirty, unmapped buffers. For example,
2564 * they may have been added in ext3_writepage(). Make them
2565 * freeable here, so the page does not leak.
2568 /* Not really sure about this - do we need this ? */
2569 if (page->mapping->a_ops->invalidatepage)
2570 page->mapping->a_ops->invalidatepage(page, offset);
2573 return 0; /* don't care */
2577 * The page straddles i_size. It must be zeroed out on each and every
2578 * writepage invocation because it may be mmapped. "A file is mapped
2579 * in multiples of the page size. For a file that is not a multiple of
2580 * the page size, the remaining memory is zeroed when mapped, and
2581 * writes to that region are not written out to the file."
2583 kaddr = kmap_atomic(page, KM_USER0);
2584 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2585 flush_dcache_page(page);
2586 kunmap_atomic(kaddr, KM_USER0);
2588 ret = mpage_writepage(page, get_block, wbc);
2590 ret = __block_write_full_page(inode, page, get_block, wbc);
2593 EXPORT_SYMBOL(nobh_writepage);
2596 * This function assumes that ->prepare_write() uses nobh_prepare_write().
2598 int nobh_truncate_page(struct address_space *mapping, loff_t from)
2600 struct inode *inode = mapping->host;
2601 unsigned blocksize = 1 << inode->i_blkbits;
2602 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2603 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2606 struct address_space_operations *a_ops = mapping->a_ops;
2610 if ((offset & (blocksize - 1)) == 0)
2614 page = grab_cache_page(mapping, index);
2618 to = (offset + blocksize) & ~(blocksize - 1);
2619 ret = a_ops->prepare_write(NULL, page, offset, to);
2621 kaddr = kmap_atomic(page, KM_USER0);
2622 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2623 flush_dcache_page(page);
2624 kunmap_atomic(kaddr, KM_USER0);
2625 set_page_dirty(page);
2628 page_cache_release(page);
2632 EXPORT_SYMBOL(nobh_truncate_page);
2634 int block_truncate_page(struct address_space *mapping,
2635 loff_t from, get_block_t *get_block)
2637 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2638 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2641 unsigned length, pos;
2642 struct inode *inode = mapping->host;
2644 struct buffer_head *bh;
2648 blocksize = 1 << inode->i_blkbits;
2649 length = offset & (blocksize - 1);
2651 /* Block boundary? Nothing to do */
2655 length = blocksize - length;
2656 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2658 page = grab_cache_page(mapping, index);
2663 if (!page_has_buffers(page))
2664 create_empty_buffers(page, blocksize, 0);
2666 /* Find the buffer that contains "offset" */
2667 bh = page_buffers(page);
2669 while (offset >= pos) {
2670 bh = bh->b_this_page;
2676 if (!buffer_mapped(bh)) {
2677 err = get_block(inode, iblock, bh, 0);
2680 /* unmapped? It's a hole - nothing to do */
2681 if (!buffer_mapped(bh))
2685 /* Ok, it's mapped. Make sure it's up-to-date */
2686 if (PageUptodate(page))
2687 set_buffer_uptodate(bh);
2689 if (!buffer_uptodate(bh) && !buffer_delay(bh)) {
2691 ll_rw_block(READ, 1, &bh);
2693 /* Uhhuh. Read error. Complain and punt. */
2694 if (!buffer_uptodate(bh))
2698 kaddr = kmap_atomic(page, KM_USER0);
2699 memset(kaddr + offset, 0, length);
2700 flush_dcache_page(page);
2701 kunmap_atomic(kaddr, KM_USER0);
2703 mark_buffer_dirty(bh);
2708 page_cache_release(page);
2714 * The generic ->writepage function for buffer-backed address_spaces
2716 int block_write_full_page(struct page *page, get_block_t *get_block,
2717 struct writeback_control *wbc)
2719 struct inode * const inode = page->mapping->host;
2720 loff_t i_size = i_size_read(inode);
2721 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2725 /* Is the page fully inside i_size? */
2726 if (page->index < end_index)
2727 return __block_write_full_page(inode, page, get_block, wbc);
2729 /* Is the page fully outside i_size? (truncate in progress) */
2730 offset = i_size & (PAGE_CACHE_SIZE-1);
2731 if (page->index >= end_index+1 || !offset) {
2733 * The page may have dirty, unmapped buffers. For example,
2734 * they may have been added in ext3_writepage(). Make them
2735 * freeable here, so the page does not leak.
2737 do_invalidatepage(page, 0);
2739 return 0; /* don't care */
2743 * The page straddles i_size. It must be zeroed out on each and every
2744 * writepage invokation because it may be mmapped. "A file is mapped
2745 * in multiples of the page size. For a file that is not a multiple of
2746 * the page size, the remaining memory is zeroed when mapped, and
2747 * writes to that region are not written out to the file."
2749 kaddr = kmap_atomic(page, KM_USER0);
2750 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2751 flush_dcache_page(page);
2752 kunmap_atomic(kaddr, KM_USER0);
2753 return __block_write_full_page(inode, page, get_block, wbc);
2756 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2757 get_block_t *get_block)
2759 struct buffer_head tmp;
2760 struct inode *inode = mapping->host;
2763 get_block(inode, block, &tmp, 0);
2764 return tmp.b_blocknr;
2767 static int end_bio_bh_io_sync(struct bio *bio, unsigned int bytes_done, int err)
2769 struct buffer_head *bh = bio->bi_private;
2774 if (err == -EOPNOTSUPP) {
2775 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2776 set_bit(BH_Eopnotsupp, &bh->b_state);
2779 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2784 int submit_bh(int rw, struct buffer_head * bh)
2789 BUG_ON(!buffer_locked(bh));
2790 BUG_ON(!buffer_mapped(bh));
2791 BUG_ON(!bh->b_end_io);
2793 if (buffer_ordered(bh) && (rw == WRITE))
2797 * Only clear out a write error when rewriting, should this
2798 * include WRITE_SYNC as well?
2800 if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
2801 clear_buffer_write_io_error(bh);
2804 * from here on down, it's all bio -- do the initial mapping,
2805 * submit_bio -> generic_make_request may further map this bio around
2807 bio = bio_alloc(GFP_NOIO, 1);
2809 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2810 bio->bi_bdev = bh->b_bdev;
2811 bio->bi_io_vec[0].bv_page = bh->b_page;
2812 bio->bi_io_vec[0].bv_len = bh->b_size;
2813 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2817 bio->bi_size = bh->b_size;
2819 bio->bi_end_io = end_bio_bh_io_sync;
2820 bio->bi_private = bh;
2823 submit_bio(rw, bio);
2825 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2833 * ll_rw_block: low-level access to block devices (DEPRECATED)
2834 * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
2835 * @nr: number of &struct buffer_heads in the array
2836 * @bhs: array of pointers to &struct buffer_head
2838 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2839 * requests an I/O operation on them, either a %READ or a %WRITE. The third
2840 * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2841 * are sent to disk. The fourth %READA option is described in the documentation
2842 * for generic_make_request() which ll_rw_block() calls.
2844 * This function drops any buffer that it cannot get a lock on (with the
2845 * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2846 * clean when doing a write request, and any buffer that appears to be
2847 * up-to-date when doing read request. Further it marks as clean buffers that
2848 * are processed for writing (the buffer cache won't assume that they are
2849 * actually clean until the buffer gets unlocked).
2851 * ll_rw_block sets b_end_io to simple completion handler that marks
2852 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2855 * All of the buffers must be for the same device, and must also be a
2856 * multiple of the current approved size for the device.
2858 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2862 for (i = 0; i < nr; i++) {
2863 struct buffer_head *bh = bhs[i];
2867 else if (test_set_buffer_locked(bh))
2870 if (rw == WRITE || rw == SWRITE) {
2871 if (test_clear_buffer_dirty(bh)) {
2872 bh->b_end_io = end_buffer_write_sync;
2874 submit_bh(WRITE, bh);
2878 if (!buffer_uptodate(bh)) {
2879 bh->b_end_io = end_buffer_read_sync;
2890 * For a data-integrity writeout, we need to wait upon any in-progress I/O
2891 * and then start new I/O and then wait upon it. The caller must have a ref on
2894 int sync_dirty_buffer(struct buffer_head *bh)
2898 WARN_ON(atomic_read(&bh->b_count) < 1);
2900 if (test_clear_buffer_dirty(bh)) {
2902 bh->b_end_io = end_buffer_write_sync;
2903 ret = submit_bh(WRITE, bh);
2905 if (buffer_eopnotsupp(bh)) {
2906 clear_buffer_eopnotsupp(bh);
2909 if (!ret && !buffer_uptodate(bh))
2918 * try_to_free_buffers() checks if all the buffers on this particular page
2919 * are unused, and releases them if so.
2921 * Exclusion against try_to_free_buffers may be obtained by either
2922 * locking the page or by holding its mapping's private_lock.
2924 * If the page is dirty but all the buffers are clean then we need to
2925 * be sure to mark the page clean as well. This is because the page
2926 * may be against a block device, and a later reattachment of buffers
2927 * to a dirty page will set *all* buffers dirty. Which would corrupt
2928 * filesystem data on the same device.
2930 * The same applies to regular filesystem pages: if all the buffers are
2931 * clean then we set the page clean and proceed. To do that, we require
2932 * total exclusion from __set_page_dirty_buffers(). That is obtained with
2935 * try_to_free_buffers() is non-blocking.
2937 static inline int buffer_busy(struct buffer_head *bh)
2939 return atomic_read(&bh->b_count) |
2940 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2944 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
2946 struct buffer_head *head = page_buffers(page);
2947 struct buffer_head *bh;
2951 if (buffer_write_io_error(bh) && page->mapping)
2952 set_bit(AS_EIO, &page->mapping->flags);
2953 if (buffer_busy(bh))
2955 bh = bh->b_this_page;
2956 } while (bh != head);
2959 struct buffer_head *next = bh->b_this_page;
2961 if (!list_empty(&bh->b_assoc_buffers))
2962 __remove_assoc_queue(bh);
2964 } while (bh != head);
2965 *buffers_to_free = head;
2966 __clear_page_buffers(page);
2972 int try_to_free_buffers(struct page *page)
2974 struct address_space * const mapping = page->mapping;
2975 struct buffer_head *buffers_to_free = NULL;
2978 BUG_ON(!PageLocked(page));
2979 if (PageWriteback(page))
2982 if (mapping == NULL) { /* can this still happen? */
2983 ret = drop_buffers(page, &buffers_to_free);
2987 spin_lock(&mapping->private_lock);
2988 ret = drop_buffers(page, &buffers_to_free);
2991 * If the filesystem writes its buffers by hand (eg ext3)
2992 * then we can have clean buffers against a dirty page. We
2993 * clean the page here; otherwise later reattachment of buffers
2994 * could encounter a non-uptodate page, which is unresolvable.
2995 * This only applies in the rare case where try_to_free_buffers
2996 * succeeds but the page is not freed.
2998 clear_page_dirty(page);
3000 spin_unlock(&mapping->private_lock);
3002 if (buffers_to_free) {
3003 struct buffer_head *bh = buffers_to_free;
3006 struct buffer_head *next = bh->b_this_page;
3007 free_buffer_head(bh);
3009 } while (bh != buffers_to_free);
3013 EXPORT_SYMBOL(try_to_free_buffers);
3015 int block_sync_page(struct page *page)
3017 struct address_space *mapping;
3020 mapping = page_mapping(page);
3022 blk_run_backing_dev(mapping->backing_dev_info, page);
3027 * There are no bdflush tunables left. But distributions are
3028 * still running obsolete flush daemons, so we terminate them here.
3030 * Use of bdflush() is deprecated and will be removed in a future kernel.
3031 * The `pdflush' kernel threads fully replace bdflush daemons and this call.
3033 asmlinkage long sys_bdflush(int func, long data)
3035 static int msg_count;
3037 if (!capable(CAP_SYS_ADMIN))
3040 if (msg_count < 5) {
3043 "warning: process `%s' used the obsolete bdflush"
3044 " system call\n", current->comm);
3045 printk(KERN_INFO "Fix your initscripts?\n");
3054 * Migration function for pages with buffers. This function can only be used
3055 * if the underlying filesystem guarantees that no other references to "page"
3058 #ifdef CONFIG_MIGRATION
3059 int buffer_migrate_page(struct page *newpage, struct page *page)
3061 struct address_space *mapping = page->mapping;
3062 struct buffer_head *bh, *head;
3067 if (!page_has_buffers(page))
3068 return migrate_page(newpage, page);
3070 head = page_buffers(page);
3072 if (migrate_page_remove_references(newpage, page, 3))
3079 bh = bh->b_this_page;
3081 } while (bh != head);
3083 ClearPagePrivate(page);
3084 set_page_private(newpage, page_private(page));
3085 set_page_private(page, 0);
3091 set_bh_page(bh, newpage, bh_offset(bh));
3092 bh = bh->b_this_page;
3094 } while (bh != head);
3096 SetPagePrivate(newpage);
3098 migrate_page_copy(newpage, page);
3104 bh = bh->b_this_page;
3106 } while (bh != head);
3110 EXPORT_SYMBOL(buffer_migrate_page);
3114 * Buffer-head allocation
3116 static kmem_cache_t *bh_cachep;
3119 * Once the number of bh's in the machine exceeds this level, we start
3120 * stripping them in writeback.
3122 static int max_buffer_heads;
3124 int buffer_heads_over_limit;
3126 struct bh_accounting {
3127 int nr; /* Number of live bh's */
3128 int ratelimit; /* Limit cacheline bouncing */
3131 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3133 static void recalc_bh_state(void)
3138 if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3140 __get_cpu_var(bh_accounting).ratelimit = 0;
3142 tot += per_cpu(bh_accounting, i).nr;
3143 buffer_heads_over_limit = (tot > max_buffer_heads);
3146 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3148 struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
3150 get_cpu_var(bh_accounting).nr++;
3152 put_cpu_var(bh_accounting);
3156 EXPORT_SYMBOL(alloc_buffer_head);
3158 void free_buffer_head(struct buffer_head *bh)
3160 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3161 kmem_cache_free(bh_cachep, bh);
3162 get_cpu_var(bh_accounting).nr--;
3164 put_cpu_var(bh_accounting);
3166 EXPORT_SYMBOL(free_buffer_head);
3169 init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags)
3171 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
3172 SLAB_CTOR_CONSTRUCTOR) {
3173 struct buffer_head * bh = (struct buffer_head *)data;
3175 memset(bh, 0, sizeof(*bh));
3176 INIT_LIST_HEAD(&bh->b_assoc_buffers);
3180 #ifdef CONFIG_HOTPLUG_CPU
3181 static void buffer_exit_cpu(int cpu)
3184 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3186 for (i = 0; i < BH_LRU_SIZE; i++) {
3192 static int buffer_cpu_notify(struct notifier_block *self,
3193 unsigned long action, void *hcpu)
3195 if (action == CPU_DEAD)
3196 buffer_exit_cpu((unsigned long)hcpu);
3199 #endif /* CONFIG_HOTPLUG_CPU */
3201 void __init buffer_init(void)
3205 bh_cachep = kmem_cache_create("buffer_head",
3206 sizeof(struct buffer_head), 0,
3207 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC, init_buffer_head, NULL);
3210 * Limit the bh occupancy to 10% of ZONE_NORMAL
3212 nrpages = (nr_free_buffer_pages() * 10) / 100;
3213 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3214 hotcpu_notifier(buffer_cpu_notify, 0);
3217 EXPORT_SYMBOL(__bforget);
3218 EXPORT_SYMBOL(__brelse);
3219 EXPORT_SYMBOL(__wait_on_buffer);
3220 EXPORT_SYMBOL(block_commit_write);
3221 EXPORT_SYMBOL(block_prepare_write);
3222 EXPORT_SYMBOL(block_read_full_page);
3223 EXPORT_SYMBOL(block_sync_page);
3224 EXPORT_SYMBOL(block_truncate_page);
3225 EXPORT_SYMBOL(block_write_full_page);
3226 EXPORT_SYMBOL(cont_prepare_write);
3227 EXPORT_SYMBOL(end_buffer_async_write);
3228 EXPORT_SYMBOL(end_buffer_read_sync);
3229 EXPORT_SYMBOL(end_buffer_write_sync);
3230 EXPORT_SYMBOL(file_fsync);
3231 EXPORT_SYMBOL(fsync_bdev);
3232 EXPORT_SYMBOL(generic_block_bmap);
3233 EXPORT_SYMBOL(generic_commit_write);
3234 EXPORT_SYMBOL(generic_cont_expand);
3235 EXPORT_SYMBOL(generic_cont_expand_simple);
3236 EXPORT_SYMBOL(init_buffer);
3237 EXPORT_SYMBOL(invalidate_bdev);
3238 EXPORT_SYMBOL(ll_rw_block);
3239 EXPORT_SYMBOL(mark_buffer_dirty);
3240 EXPORT_SYMBOL(submit_bh);
3241 EXPORT_SYMBOL(sync_dirty_buffer);
3242 EXPORT_SYMBOL(unlock_buffer);