4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
21 #include <linux/config.h>
22 #include <linux/kernel.h>
23 #include <linux/syscalls.h>
26 #include <linux/percpu.h>
27 #include <linux/slab.h>
28 #include <linux/smp_lock.h>
29 #include <linux/capability.h>
30 #include <linux/blkdev.h>
31 #include <linux/file.h>
32 #include <linux/quotaops.h>
33 #include <linux/highmem.h>
34 #include <linux/module.h>
35 #include <linux/writeback.h>
36 #include <linux/hash.h>
37 #include <linux/suspend.h>
38 #include <linux/buffer_head.h>
39 #include <linux/bio.h>
40 #include <linux/notifier.h>
41 #include <linux/cpu.h>
42 #include <linux/bitops.h>
43 #include <linux/mpage.h>
44 #include <linux/bit_spinlock.h>
46 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
47 static void invalidate_bh_lrus(void);
49 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
52 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
54 bh->b_end_io = handler;
55 bh->b_private = private;
58 static int sync_buffer(void *word)
60 struct block_device *bd;
61 struct buffer_head *bh
62 = container_of(word, struct buffer_head, b_state);
67 blk_run_address_space(bd->bd_inode->i_mapping);
72 void fastcall __lock_buffer(struct buffer_head *bh)
74 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
75 TASK_UNINTERRUPTIBLE);
77 EXPORT_SYMBOL(__lock_buffer);
79 void fastcall unlock_buffer(struct buffer_head *bh)
81 clear_buffer_locked(bh);
82 smp_mb__after_clear_bit();
83 wake_up_bit(&bh->b_state, BH_Lock);
87 * Block until a buffer comes unlocked. This doesn't stop it
88 * from becoming locked again - you have to lock it yourself
89 * if you want to preserve its state.
91 void __wait_on_buffer(struct buffer_head * bh)
93 wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
97 __clear_page_buffers(struct page *page)
99 ClearPagePrivate(page);
100 set_page_private(page, 0);
101 page_cache_release(page);
104 static void buffer_io_error(struct buffer_head *bh)
106 char b[BDEVNAME_SIZE];
108 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
109 bdevname(bh->b_bdev, b),
110 (unsigned long long)bh->b_blocknr);
114 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
115 * unlock the buffer. This is what ll_rw_block uses too.
117 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
120 set_buffer_uptodate(bh);
122 /* This happens, due to failed READA attempts. */
123 clear_buffer_uptodate(bh);
129 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
131 char b[BDEVNAME_SIZE];
134 set_buffer_uptodate(bh);
136 if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
138 printk(KERN_WARNING "lost page write due to "
140 bdevname(bh->b_bdev, b));
142 set_buffer_write_io_error(bh);
143 clear_buffer_uptodate(bh);
150 * Write out and wait upon all the dirty data associated with a block
151 * device via its mapping. Does not take the superblock lock.
153 int sync_blockdev(struct block_device *bdev)
158 ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
161 EXPORT_SYMBOL(sync_blockdev);
163 static void __fsync_super(struct super_block *sb)
165 sync_inodes_sb(sb, 0);
168 if (sb->s_dirt && sb->s_op->write_super)
169 sb->s_op->write_super(sb);
171 if (sb->s_op->sync_fs)
172 sb->s_op->sync_fs(sb, 1);
173 sync_blockdev(sb->s_bdev);
174 sync_inodes_sb(sb, 1);
178 * Write out and wait upon all dirty data associated with this
179 * superblock. Filesystem data as well as the underlying block
180 * device. Takes the superblock lock.
182 int fsync_super(struct super_block *sb)
185 return sync_blockdev(sb->s_bdev);
189 * Write out and wait upon all dirty data associated with this
190 * device. Filesystem data as well as the underlying block
191 * device. Takes the superblock lock.
193 int fsync_bdev(struct block_device *bdev)
195 struct super_block *sb = get_super(bdev);
197 int res = fsync_super(sb);
201 return sync_blockdev(bdev);
205 * freeze_bdev -- lock a filesystem and force it into a consistent state
206 * @bdev: blockdevice to lock
208 * This takes the block device bd_mount_mutex to make sure no new mounts
209 * happen on bdev until thaw_bdev() is called.
210 * If a superblock is found on this device, we take the s_umount semaphore
211 * on it to make sure nobody unmounts until the snapshot creation is done.
213 struct super_block *freeze_bdev(struct block_device *bdev)
215 struct super_block *sb;
217 mutex_lock(&bdev->bd_mount_mutex);
218 sb = get_super(bdev);
219 if (sb && !(sb->s_flags & MS_RDONLY)) {
220 sb->s_frozen = SB_FREEZE_WRITE;
225 sb->s_frozen = SB_FREEZE_TRANS;
228 sync_blockdev(sb->s_bdev);
230 if (sb->s_op->write_super_lockfs)
231 sb->s_op->write_super_lockfs(sb);
235 return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */
237 EXPORT_SYMBOL(freeze_bdev);
240 * thaw_bdev -- unlock filesystem
241 * @bdev: blockdevice to unlock
242 * @sb: associated superblock
244 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
246 void thaw_bdev(struct block_device *bdev, struct super_block *sb)
249 BUG_ON(sb->s_bdev != bdev);
251 if (sb->s_op->unlockfs)
252 sb->s_op->unlockfs(sb);
253 sb->s_frozen = SB_UNFROZEN;
255 wake_up(&sb->s_wait_unfrozen);
259 mutex_unlock(&bdev->bd_mount_mutex);
261 EXPORT_SYMBOL(thaw_bdev);
264 * sync everything. Start out by waking pdflush, because that writes back
265 * all queues in parallel.
267 static void do_sync(unsigned long wait)
270 sync_inodes(0); /* All mappings, inodes and their blockdevs */
272 sync_supers(); /* Write the superblocks */
273 sync_filesystems(0); /* Start syncing the filesystems */
274 sync_filesystems(wait); /* Waitingly sync the filesystems */
275 sync_inodes(wait); /* Mappings, inodes and blockdevs, again. */
277 printk("Emergency Sync complete\n");
278 if (unlikely(laptop_mode))
279 laptop_sync_completion();
282 asmlinkage long sys_sync(void)
288 void emergency_sync(void)
290 pdflush_operation(do_sync, 0);
294 * Generic function to fsync a file.
296 * filp may be NULL if called via the msync of a vma.
299 int file_fsync(struct file *filp, struct dentry *dentry, int datasync)
301 struct inode * inode = dentry->d_inode;
302 struct super_block * sb;
305 /* sync the inode to buffers */
306 ret = write_inode_now(inode, 0);
308 /* sync the superblock to buffers */
311 if (sb->s_op->write_super)
312 sb->s_op->write_super(sb);
315 /* .. finally sync the buffers to disk */
316 err = sync_blockdev(sb->s_bdev);
322 long do_fsync(struct file *file, int datasync)
326 struct address_space *mapping = file->f_mapping;
328 if (!file->f_op || !file->f_op->fsync) {
329 /* Why? We can still call filemap_fdatawrite */
334 ret = filemap_fdatawrite(mapping);
337 * We need to protect against concurrent writers, which could cause
338 * livelocks in fsync_buffers_list().
340 mutex_lock(&mapping->host->i_mutex);
341 err = file->f_op->fsync(file, file->f_dentry, datasync);
344 mutex_unlock(&mapping->host->i_mutex);
345 err = filemap_fdatawait(mapping);
352 static long __do_fsync(unsigned int fd, int datasync)
359 ret = do_fsync(file, datasync);
365 asmlinkage long sys_fsync(unsigned int fd)
367 return __do_fsync(fd, 0);
370 asmlinkage long sys_fdatasync(unsigned int fd)
372 return __do_fsync(fd, 1);
376 * Various filesystems appear to want __find_get_block to be non-blocking.
377 * But it's the page lock which protects the buffers. To get around this,
378 * we get exclusion from try_to_free_buffers with the blockdev mapping's
381 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
382 * may be quite high. This code could TryLock the page, and if that
383 * succeeds, there is no need to take private_lock. (But if
384 * private_lock is contended then so is mapping->tree_lock).
386 static struct buffer_head *
387 __find_get_block_slow(struct block_device *bdev, sector_t block)
389 struct inode *bd_inode = bdev->bd_inode;
390 struct address_space *bd_mapping = bd_inode->i_mapping;
391 struct buffer_head *ret = NULL;
393 struct buffer_head *bh;
394 struct buffer_head *head;
398 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
399 page = find_get_page(bd_mapping, index);
403 spin_lock(&bd_mapping->private_lock);
404 if (!page_has_buffers(page))
406 head = page_buffers(page);
409 if (bh->b_blocknr == block) {
414 if (!buffer_mapped(bh))
416 bh = bh->b_this_page;
417 } while (bh != head);
419 /* we might be here because some of the buffers on this page are
420 * not mapped. This is due to various races between
421 * file io on the block device and getblk. It gets dealt with
422 * elsewhere, don't buffer_error if we had some unmapped buffers
425 printk("__find_get_block_slow() failed. "
426 "block=%llu, b_blocknr=%llu\n",
427 (unsigned long long)block,
428 (unsigned long long)bh->b_blocknr);
429 printk("b_state=0x%08lx, b_size=%zu\n",
430 bh->b_state, bh->b_size);
431 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
434 spin_unlock(&bd_mapping->private_lock);
435 page_cache_release(page);
440 /* If invalidate_buffers() will trash dirty buffers, it means some kind
441 of fs corruption is going on. Trashing dirty data always imply losing
442 information that was supposed to be just stored on the physical layer
445 Thus invalidate_buffers in general usage is not allwowed to trash
446 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
447 be preserved. These buffers are simply skipped.
449 We also skip buffers which are still in use. For example this can
450 happen if a userspace program is reading the block device.
452 NOTE: In the case where the user removed a removable-media-disk even if
453 there's still dirty data not synced on disk (due a bug in the device driver
454 or due an error of the user), by not destroying the dirty buffers we could
455 generate corruption also on the next media inserted, thus a parameter is
456 necessary to handle this case in the most safe way possible (trying
457 to not corrupt also the new disk inserted with the data belonging to
458 the old now corrupted disk). Also for the ramdisk the natural thing
459 to do in order to release the ramdisk memory is to destroy dirty buffers.
461 These are two special cases. Normal usage imply the device driver
462 to issue a sync on the device (without waiting I/O completion) and
463 then an invalidate_buffers call that doesn't trash dirty buffers.
465 For handling cache coherency with the blkdev pagecache the 'update' case
466 is been introduced. It is needed to re-read from disk any pinned
467 buffer. NOTE: re-reading from disk is destructive so we can do it only
468 when we assume nobody is changing the buffercache under our I/O and when
469 we think the disk contains more recent information than the buffercache.
470 The update == 1 pass marks the buffers we need to update, the update == 2
471 pass does the actual I/O. */
472 void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers)
474 invalidate_bh_lrus();
476 * FIXME: what about destroy_dirty_buffers?
477 * We really want to use invalidate_inode_pages2() for
478 * that, but not until that's cleaned up.
480 invalidate_inode_pages(bdev->bd_inode->i_mapping);
484 * Kick pdflush then try to free up some ZONE_NORMAL memory.
486 static void free_more_memory(void)
491 wakeup_pdflush(1024);
494 for_each_online_pgdat(pgdat) {
495 zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
497 try_to_free_pages(zones, GFP_NOFS);
502 * I/O completion handler for block_read_full_page() - pages
503 * which come unlocked at the end of I/O.
505 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
508 struct buffer_head *first;
509 struct buffer_head *tmp;
511 int page_uptodate = 1;
513 BUG_ON(!buffer_async_read(bh));
517 set_buffer_uptodate(bh);
519 clear_buffer_uptodate(bh);
520 if (printk_ratelimit())
526 * Be _very_ careful from here on. Bad things can happen if
527 * two buffer heads end IO at almost the same time and both
528 * decide that the page is now completely done.
530 first = page_buffers(page);
531 local_irq_save(flags);
532 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
533 clear_buffer_async_read(bh);
537 if (!buffer_uptodate(tmp))
539 if (buffer_async_read(tmp)) {
540 BUG_ON(!buffer_locked(tmp));
543 tmp = tmp->b_this_page;
545 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
546 local_irq_restore(flags);
549 * If none of the buffers had errors and they are all
550 * uptodate then we can set the page uptodate.
552 if (page_uptodate && !PageError(page))
553 SetPageUptodate(page);
558 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
559 local_irq_restore(flags);
564 * Completion handler for block_write_full_page() - pages which are unlocked
565 * during I/O, and which have PageWriteback cleared upon I/O completion.
567 static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
569 char b[BDEVNAME_SIZE];
571 struct buffer_head *first;
572 struct buffer_head *tmp;
575 BUG_ON(!buffer_async_write(bh));
579 set_buffer_uptodate(bh);
581 if (printk_ratelimit()) {
583 printk(KERN_WARNING "lost page write due to "
585 bdevname(bh->b_bdev, b));
587 set_bit(AS_EIO, &page->mapping->flags);
588 clear_buffer_uptodate(bh);
592 first = page_buffers(page);
593 local_irq_save(flags);
594 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
596 clear_buffer_async_write(bh);
598 tmp = bh->b_this_page;
600 if (buffer_async_write(tmp)) {
601 BUG_ON(!buffer_locked(tmp));
604 tmp = tmp->b_this_page;
606 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
607 local_irq_restore(flags);
608 end_page_writeback(page);
612 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
613 local_irq_restore(flags);
618 * If a page's buffers are under async readin (end_buffer_async_read
619 * completion) then there is a possibility that another thread of
620 * control could lock one of the buffers after it has completed
621 * but while some of the other buffers have not completed. This
622 * locked buffer would confuse end_buffer_async_read() into not unlocking
623 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
624 * that this buffer is not under async I/O.
626 * The page comes unlocked when it has no locked buffer_async buffers
629 * PageLocked prevents anyone starting new async I/O reads any of
632 * PageWriteback is used to prevent simultaneous writeout of the same
635 * PageLocked prevents anyone from starting writeback of a page which is
636 * under read I/O (PageWriteback is only ever set against a locked page).
638 static void mark_buffer_async_read(struct buffer_head *bh)
640 bh->b_end_io = end_buffer_async_read;
641 set_buffer_async_read(bh);
644 void mark_buffer_async_write(struct buffer_head *bh)
646 bh->b_end_io = end_buffer_async_write;
647 set_buffer_async_write(bh);
649 EXPORT_SYMBOL(mark_buffer_async_write);
653 * fs/buffer.c contains helper functions for buffer-backed address space's
654 * fsync functions. A common requirement for buffer-based filesystems is
655 * that certain data from the backing blockdev needs to be written out for
656 * a successful fsync(). For example, ext2 indirect blocks need to be
657 * written back and waited upon before fsync() returns.
659 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
660 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
661 * management of a list of dependent buffers at ->i_mapping->private_list.
663 * Locking is a little subtle: try_to_free_buffers() will remove buffers
664 * from their controlling inode's queue when they are being freed. But
665 * try_to_free_buffers() will be operating against the *blockdev* mapping
666 * at the time, not against the S_ISREG file which depends on those buffers.
667 * So the locking for private_list is via the private_lock in the address_space
668 * which backs the buffers. Which is different from the address_space
669 * against which the buffers are listed. So for a particular address_space,
670 * mapping->private_lock does *not* protect mapping->private_list! In fact,
671 * mapping->private_list will always be protected by the backing blockdev's
674 * Which introduces a requirement: all buffers on an address_space's
675 * ->private_list must be from the same address_space: the blockdev's.
677 * address_spaces which do not place buffers at ->private_list via these
678 * utility functions are free to use private_lock and private_list for
679 * whatever they want. The only requirement is that list_empty(private_list)
680 * be true at clear_inode() time.
682 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
683 * filesystems should do that. invalidate_inode_buffers() should just go
684 * BUG_ON(!list_empty).
686 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
687 * take an address_space, not an inode. And it should be called
688 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
691 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
692 * list if it is already on a list. Because if the buffer is on a list,
693 * it *must* already be on the right one. If not, the filesystem is being
694 * silly. This will save a ton of locking. But first we have to ensure
695 * that buffers are taken *off* the old inode's list when they are freed
696 * (presumably in truncate). That requires careful auditing of all
697 * filesystems (do it inside bforget()). It could also be done by bringing
702 * The buffer's backing address_space's private_lock must be held
704 static inline void __remove_assoc_queue(struct buffer_head *bh)
706 list_del_init(&bh->b_assoc_buffers);
709 int inode_has_buffers(struct inode *inode)
711 return !list_empty(&inode->i_data.private_list);
715 * osync is designed to support O_SYNC io. It waits synchronously for
716 * all already-submitted IO to complete, but does not queue any new
717 * writes to the disk.
719 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
720 * you dirty the buffers, and then use osync_inode_buffers to wait for
721 * completion. Any other dirty buffers which are not yet queued for
722 * write will not be flushed to disk by the osync.
724 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
726 struct buffer_head *bh;
732 list_for_each_prev(p, list) {
734 if (buffer_locked(bh)) {
738 if (!buffer_uptodate(bh))
750 * sync_mapping_buffers - write out and wait upon a mapping's "associated"
752 * @mapping: the mapping which wants those buffers written
754 * Starts I/O against the buffers at mapping->private_list, and waits upon
757 * Basically, this is a convenience function for fsync().
758 * @mapping is a file or directory which needs those buffers to be written for
759 * a successful fsync().
761 int sync_mapping_buffers(struct address_space *mapping)
763 struct address_space *buffer_mapping = mapping->assoc_mapping;
765 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
768 return fsync_buffers_list(&buffer_mapping->private_lock,
769 &mapping->private_list);
771 EXPORT_SYMBOL(sync_mapping_buffers);
774 * Called when we've recently written block `bblock', and it is known that
775 * `bblock' was for a buffer_boundary() buffer. This means that the block at
776 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
777 * dirty, schedule it for IO. So that indirects merge nicely with their data.
779 void write_boundary_block(struct block_device *bdev,
780 sector_t bblock, unsigned blocksize)
782 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
784 if (buffer_dirty(bh))
785 ll_rw_block(WRITE, 1, &bh);
790 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
792 struct address_space *mapping = inode->i_mapping;
793 struct address_space *buffer_mapping = bh->b_page->mapping;
795 mark_buffer_dirty(bh);
796 if (!mapping->assoc_mapping) {
797 mapping->assoc_mapping = buffer_mapping;
799 BUG_ON(mapping->assoc_mapping != buffer_mapping);
801 if (list_empty(&bh->b_assoc_buffers)) {
802 spin_lock(&buffer_mapping->private_lock);
803 list_move_tail(&bh->b_assoc_buffers,
804 &mapping->private_list);
805 spin_unlock(&buffer_mapping->private_lock);
808 EXPORT_SYMBOL(mark_buffer_dirty_inode);
811 * Add a page to the dirty page list.
813 * It is a sad fact of life that this function is called from several places
814 * deeply under spinlocking. It may not sleep.
816 * If the page has buffers, the uptodate buffers are set dirty, to preserve
817 * dirty-state coherency between the page and the buffers. It the page does
818 * not have buffers then when they are later attached they will all be set
821 * The buffers are dirtied before the page is dirtied. There's a small race
822 * window in which a writepage caller may see the page cleanness but not the
823 * buffer dirtiness. That's fine. If this code were to set the page dirty
824 * before the buffers, a concurrent writepage caller could clear the page dirty
825 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
826 * page on the dirty page list.
828 * We use private_lock to lock against try_to_free_buffers while using the
829 * page's buffer list. Also use this to protect against clean buffers being
830 * added to the page after it was set dirty.
832 * FIXME: may need to call ->reservepage here as well. That's rather up to the
833 * address_space though.
835 int __set_page_dirty_buffers(struct page *page)
837 struct address_space * const mapping = page->mapping;
839 spin_lock(&mapping->private_lock);
840 if (page_has_buffers(page)) {
841 struct buffer_head *head = page_buffers(page);
842 struct buffer_head *bh = head;
845 set_buffer_dirty(bh);
846 bh = bh->b_this_page;
847 } while (bh != head);
849 spin_unlock(&mapping->private_lock);
851 if (!TestSetPageDirty(page)) {
852 write_lock_irq(&mapping->tree_lock);
853 if (page->mapping) { /* Race with truncate? */
854 if (mapping_cap_account_dirty(mapping))
855 inc_page_state(nr_dirty);
856 radix_tree_tag_set(&mapping->page_tree,
858 PAGECACHE_TAG_DIRTY);
860 write_unlock_irq(&mapping->tree_lock);
861 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
866 EXPORT_SYMBOL(__set_page_dirty_buffers);
869 * Write out and wait upon a list of buffers.
871 * We have conflicting pressures: we want to make sure that all
872 * initially dirty buffers get waited on, but that any subsequently
873 * dirtied buffers don't. After all, we don't want fsync to last
874 * forever if somebody is actively writing to the file.
876 * Do this in two main stages: first we copy dirty buffers to a
877 * temporary inode list, queueing the writes as we go. Then we clean
878 * up, waiting for those writes to complete.
880 * During this second stage, any subsequent updates to the file may end
881 * up refiling the buffer on the original inode's dirty list again, so
882 * there is a chance we will end up with a buffer queued for write but
883 * not yet completed on that list. So, as a final cleanup we go through
884 * the osync code to catch these locked, dirty buffers without requeuing
885 * any newly dirty buffers for write.
887 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
889 struct buffer_head *bh;
890 struct list_head tmp;
893 INIT_LIST_HEAD(&tmp);
896 while (!list_empty(list)) {
897 bh = BH_ENTRY(list->next);
898 list_del_init(&bh->b_assoc_buffers);
899 if (buffer_dirty(bh) || buffer_locked(bh)) {
900 list_add(&bh->b_assoc_buffers, &tmp);
901 if (buffer_dirty(bh)) {
905 * Ensure any pending I/O completes so that
906 * ll_rw_block() actually writes the current
907 * contents - it is a noop if I/O is still in
908 * flight on potentially older contents.
910 ll_rw_block(SWRITE, 1, &bh);
917 while (!list_empty(&tmp)) {
918 bh = BH_ENTRY(tmp.prev);
919 __remove_assoc_queue(bh);
923 if (!buffer_uptodate(bh))
930 err2 = osync_buffers_list(lock, list);
938 * Invalidate any and all dirty buffers on a given inode. We are
939 * probably unmounting the fs, but that doesn't mean we have already
940 * done a sync(). Just drop the buffers from the inode list.
942 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
943 * assumes that all the buffers are against the blockdev. Not true
946 void invalidate_inode_buffers(struct inode *inode)
948 if (inode_has_buffers(inode)) {
949 struct address_space *mapping = &inode->i_data;
950 struct list_head *list = &mapping->private_list;
951 struct address_space *buffer_mapping = mapping->assoc_mapping;
953 spin_lock(&buffer_mapping->private_lock);
954 while (!list_empty(list))
955 __remove_assoc_queue(BH_ENTRY(list->next));
956 spin_unlock(&buffer_mapping->private_lock);
961 * Remove any clean buffers from the inode's buffer list. This is called
962 * when we're trying to free the inode itself. Those buffers can pin it.
964 * Returns true if all buffers were removed.
966 int remove_inode_buffers(struct inode *inode)
970 if (inode_has_buffers(inode)) {
971 struct address_space *mapping = &inode->i_data;
972 struct list_head *list = &mapping->private_list;
973 struct address_space *buffer_mapping = mapping->assoc_mapping;
975 spin_lock(&buffer_mapping->private_lock);
976 while (!list_empty(list)) {
977 struct buffer_head *bh = BH_ENTRY(list->next);
978 if (buffer_dirty(bh)) {
982 __remove_assoc_queue(bh);
984 spin_unlock(&buffer_mapping->private_lock);
990 * Create the appropriate buffers when given a page for data area and
991 * the size of each buffer.. Use the bh->b_this_page linked list to
992 * follow the buffers created. Return NULL if unable to create more
995 * The retry flag is used to differentiate async IO (paging, swapping)
996 * which may not fail from ordinary buffer allocations.
998 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
1001 struct buffer_head *bh, *head;
1007 while ((offset -= size) >= 0) {
1008 bh = alloc_buffer_head(GFP_NOFS);
1013 bh->b_this_page = head;
1018 atomic_set(&bh->b_count, 0);
1019 bh->b_private = NULL;
1022 /* Link the buffer to its page */
1023 set_bh_page(bh, page, offset);
1025 init_buffer(bh, NULL, NULL);
1029 * In case anything failed, we just free everything we got.
1035 head = head->b_this_page;
1036 free_buffer_head(bh);
1041 * Return failure for non-async IO requests. Async IO requests
1042 * are not allowed to fail, so we have to wait until buffer heads
1043 * become available. But we don't want tasks sleeping with
1044 * partially complete buffers, so all were released above.
1049 /* We're _really_ low on memory. Now we just
1050 * wait for old buffer heads to become free due to
1051 * finishing IO. Since this is an async request and
1052 * the reserve list is empty, we're sure there are
1053 * async buffer heads in use.
1058 EXPORT_SYMBOL_GPL(alloc_page_buffers);
1061 link_dev_buffers(struct page *page, struct buffer_head *head)
1063 struct buffer_head *bh, *tail;
1068 bh = bh->b_this_page;
1070 tail->b_this_page = head;
1071 attach_page_buffers(page, head);
1075 * Initialise the state of a blockdev page's buffers.
1078 init_page_buffers(struct page *page, struct block_device *bdev,
1079 sector_t block, int size)
1081 struct buffer_head *head = page_buffers(page);
1082 struct buffer_head *bh = head;
1083 int uptodate = PageUptodate(page);
1086 if (!buffer_mapped(bh)) {
1087 init_buffer(bh, NULL, NULL);
1089 bh->b_blocknr = block;
1091 set_buffer_uptodate(bh);
1092 set_buffer_mapped(bh);
1095 bh = bh->b_this_page;
1096 } while (bh != head);
1100 * Create the page-cache page that contains the requested block.
1102 * This is user purely for blockdev mappings.
1104 static struct page *
1105 grow_dev_page(struct block_device *bdev, sector_t block,
1106 pgoff_t index, int size)
1108 struct inode *inode = bdev->bd_inode;
1110 struct buffer_head *bh;
1112 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
1116 BUG_ON(!PageLocked(page));
1118 if (page_has_buffers(page)) {
1119 bh = page_buffers(page);
1120 if (bh->b_size == size) {
1121 init_page_buffers(page, bdev, block, size);
1124 if (!try_to_free_buffers(page))
1129 * Allocate some buffers for this page
1131 bh = alloc_page_buffers(page, size, 0);
1136 * Link the page to the buffers and initialise them. Take the
1137 * lock to be atomic wrt __find_get_block(), which does not
1138 * run under the page lock.
1140 spin_lock(&inode->i_mapping->private_lock);
1141 link_dev_buffers(page, bh);
1142 init_page_buffers(page, bdev, block, size);
1143 spin_unlock(&inode->i_mapping->private_lock);
1149 page_cache_release(page);
1154 * Create buffers for the specified block device block's page. If
1155 * that page was dirty, the buffers are set dirty also.
1157 * Except that's a bug. Attaching dirty buffers to a dirty
1158 * blockdev's page can result in filesystem corruption, because
1159 * some of those buffers may be aliases of filesystem data.
1160 * grow_dev_page() will go BUG() if this happens.
1163 grow_buffers(struct block_device *bdev, sector_t block, int size)
1172 } while ((size << sizebits) < PAGE_SIZE);
1174 index = block >> sizebits;
1175 block = index << sizebits;
1177 /* Create a page with the proper size buffers.. */
1178 page = grow_dev_page(bdev, block, index, size);
1182 page_cache_release(page);
1186 static struct buffer_head *
1187 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1189 /* Size must be multiple of hard sectorsize */
1190 if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
1191 (size < 512 || size > PAGE_SIZE))) {
1192 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1194 printk(KERN_ERR "hardsect size: %d\n",
1195 bdev_hardsect_size(bdev));
1202 struct buffer_head * bh;
1204 bh = __find_get_block(bdev, block, size);
1208 if (!grow_buffers(bdev, block, size))
1214 * The relationship between dirty buffers and dirty pages:
1216 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1217 * the page is tagged dirty in its radix tree.
1219 * At all times, the dirtiness of the buffers represents the dirtiness of
1220 * subsections of the page. If the page has buffers, the page dirty bit is
1221 * merely a hint about the true dirty state.
1223 * When a page is set dirty in its entirety, all its buffers are marked dirty
1224 * (if the page has buffers).
1226 * When a buffer is marked dirty, its page is dirtied, but the page's other
1229 * Also. When blockdev buffers are explicitly read with bread(), they
1230 * individually become uptodate. But their backing page remains not
1231 * uptodate - even if all of its buffers are uptodate. A subsequent
1232 * block_read_full_page() against that page will discover all the uptodate
1233 * buffers, will set the page uptodate and will perform no I/O.
1237 * mark_buffer_dirty - mark a buffer_head as needing writeout
1238 * @bh: the buffer_head to mark dirty
1240 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1241 * backing page dirty, then tag the page as dirty in its address_space's radix
1242 * tree and then attach the address_space's inode to its superblock's dirty
1245 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1246 * mapping->tree_lock and the global inode_lock.
1248 void fastcall mark_buffer_dirty(struct buffer_head *bh)
1250 if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
1251 __set_page_dirty_nobuffers(bh->b_page);
1255 * Decrement a buffer_head's reference count. If all buffers against a page
1256 * have zero reference count, are clean and unlocked, and if the page is clean
1257 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1258 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1259 * a page but it ends up not being freed, and buffers may later be reattached).
1261 void __brelse(struct buffer_head * buf)
1263 if (atomic_read(&buf->b_count)) {
1267 printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1272 * bforget() is like brelse(), except it discards any
1273 * potentially dirty data.
1275 void __bforget(struct buffer_head *bh)
1277 clear_buffer_dirty(bh);
1278 if (!list_empty(&bh->b_assoc_buffers)) {
1279 struct address_space *buffer_mapping = bh->b_page->mapping;
1281 spin_lock(&buffer_mapping->private_lock);
1282 list_del_init(&bh->b_assoc_buffers);
1283 spin_unlock(&buffer_mapping->private_lock);
1288 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1291 if (buffer_uptodate(bh)) {
1296 bh->b_end_io = end_buffer_read_sync;
1297 submit_bh(READ, bh);
1299 if (buffer_uptodate(bh))
1307 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1308 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1309 * refcount elevated by one when they're in an LRU. A buffer can only appear
1310 * once in a particular CPU's LRU. A single buffer can be present in multiple
1311 * CPU's LRUs at the same time.
1313 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1314 * sb_find_get_block().
1316 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1317 * a local interrupt disable for that.
1320 #define BH_LRU_SIZE 8
1323 struct buffer_head *bhs[BH_LRU_SIZE];
1326 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1329 #define bh_lru_lock() local_irq_disable()
1330 #define bh_lru_unlock() local_irq_enable()
1332 #define bh_lru_lock() preempt_disable()
1333 #define bh_lru_unlock() preempt_enable()
1336 static inline void check_irqs_on(void)
1338 #ifdef irqs_disabled
1339 BUG_ON(irqs_disabled());
1344 * The LRU management algorithm is dopey-but-simple. Sorry.
1346 static void bh_lru_install(struct buffer_head *bh)
1348 struct buffer_head *evictee = NULL;
1353 lru = &__get_cpu_var(bh_lrus);
1354 if (lru->bhs[0] != bh) {
1355 struct buffer_head *bhs[BH_LRU_SIZE];
1361 for (in = 0; in < BH_LRU_SIZE; in++) {
1362 struct buffer_head *bh2 = lru->bhs[in];
1367 if (out >= BH_LRU_SIZE) {
1368 BUG_ON(evictee != NULL);
1375 while (out < BH_LRU_SIZE)
1377 memcpy(lru->bhs, bhs, sizeof(bhs));
1386 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1388 static struct buffer_head *
1389 lookup_bh_lru(struct block_device *bdev, sector_t block, int size)
1391 struct buffer_head *ret = NULL;
1397 lru = &__get_cpu_var(bh_lrus);
1398 for (i = 0; i < BH_LRU_SIZE; i++) {
1399 struct buffer_head *bh = lru->bhs[i];
1401 if (bh && bh->b_bdev == bdev &&
1402 bh->b_blocknr == block && bh->b_size == size) {
1405 lru->bhs[i] = lru->bhs[i - 1];
1420 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1421 * it in the LRU and mark it as accessed. If it is not present then return
1424 struct buffer_head *
1425 __find_get_block(struct block_device *bdev, sector_t block, int size)
1427 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1430 bh = __find_get_block_slow(bdev, block);
1438 EXPORT_SYMBOL(__find_get_block);
1441 * __getblk will locate (and, if necessary, create) the buffer_head
1442 * which corresponds to the passed block_device, block and size. The
1443 * returned buffer has its reference count incremented.
1445 * __getblk() cannot fail - it just keeps trying. If you pass it an
1446 * illegal block number, __getblk() will happily return a buffer_head
1447 * which represents the non-existent block. Very weird.
1449 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1450 * attempt is failing. FIXME, perhaps?
1452 struct buffer_head *
1453 __getblk(struct block_device *bdev, sector_t block, int size)
1455 struct buffer_head *bh = __find_get_block(bdev, block, size);
1459 bh = __getblk_slow(bdev, block, size);
1462 EXPORT_SYMBOL(__getblk);
1465 * Do async read-ahead on a buffer..
1467 void __breadahead(struct block_device *bdev, sector_t block, int size)
1469 struct buffer_head *bh = __getblk(bdev, block, size);
1471 ll_rw_block(READA, 1, &bh);
1475 EXPORT_SYMBOL(__breadahead);
1478 * __bread() - reads a specified block and returns the bh
1479 * @bdev: the block_device to read from
1480 * @block: number of block
1481 * @size: size (in bytes) to read
1483 * Reads a specified block, and returns buffer head that contains it.
1484 * It returns NULL if the block was unreadable.
1486 struct buffer_head *
1487 __bread(struct block_device *bdev, sector_t block, int size)
1489 struct buffer_head *bh = __getblk(bdev, block, size);
1491 if (likely(bh) && !buffer_uptodate(bh))
1492 bh = __bread_slow(bh);
1495 EXPORT_SYMBOL(__bread);
1498 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1499 * This doesn't race because it runs in each cpu either in irq
1500 * or with preempt disabled.
1502 static void invalidate_bh_lru(void *arg)
1504 struct bh_lru *b = &get_cpu_var(bh_lrus);
1507 for (i = 0; i < BH_LRU_SIZE; i++) {
1511 put_cpu_var(bh_lrus);
1514 static void invalidate_bh_lrus(void)
1516 on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
1519 void set_bh_page(struct buffer_head *bh,
1520 struct page *page, unsigned long offset)
1523 BUG_ON(offset >= PAGE_SIZE);
1524 if (PageHighMem(page))
1526 * This catches illegal uses and preserves the offset:
1528 bh->b_data = (char *)(0 + offset);
1530 bh->b_data = page_address(page) + offset;
1532 EXPORT_SYMBOL(set_bh_page);
1535 * Called when truncating a buffer on a page completely.
1537 static void discard_buffer(struct buffer_head * bh)
1540 clear_buffer_dirty(bh);
1542 clear_buffer_mapped(bh);
1543 clear_buffer_req(bh);
1544 clear_buffer_new(bh);
1545 clear_buffer_delay(bh);
1550 * try_to_release_page() - release old fs-specific metadata on a page
1552 * @page: the page which the kernel is trying to free
1553 * @gfp_mask: memory allocation flags (and I/O mode)
1555 * The address_space is to try to release any data against the page
1556 * (presumably at page->private). If the release was successful, return `1'.
1557 * Otherwise return zero.
1559 * The @gfp_mask argument specifies whether I/O may be performed to release
1560 * this page (__GFP_IO), and whether the call may block (__GFP_WAIT).
1562 * NOTE: @gfp_mask may go away, and this function may become non-blocking.
1564 int try_to_release_page(struct page *page, gfp_t gfp_mask)
1566 struct address_space * const mapping = page->mapping;
1568 BUG_ON(!PageLocked(page));
1569 if (PageWriteback(page))
1572 if (mapping && mapping->a_ops->releasepage)
1573 return mapping->a_ops->releasepage(page, gfp_mask);
1574 return try_to_free_buffers(page);
1576 EXPORT_SYMBOL(try_to_release_page);
1579 * block_invalidatepage - invalidate part of all of a buffer-backed page
1581 * @page: the page which is affected
1582 * @offset: the index of the truncation point
1584 * block_invalidatepage() is called when all or part of the page has become
1585 * invalidatedby a truncate operation.
1587 * block_invalidatepage() does not have to release all buffers, but it must
1588 * ensure that no dirty buffer is left outside @offset and that no I/O
1589 * is underway against any of the blocks which are outside the truncation
1590 * point. Because the caller is about to free (and possibly reuse) those
1593 void block_invalidatepage(struct page *page, unsigned long offset)
1595 struct buffer_head *head, *bh, *next;
1596 unsigned int curr_off = 0;
1598 BUG_ON(!PageLocked(page));
1599 if (!page_has_buffers(page))
1602 head = page_buffers(page);
1605 unsigned int next_off = curr_off + bh->b_size;
1606 next = bh->b_this_page;
1609 * is this block fully invalidated?
1611 if (offset <= curr_off)
1613 curr_off = next_off;
1615 } while (bh != head);
1618 * We release buffers only if the entire page is being invalidated.
1619 * The get_block cached value has been unconditionally invalidated,
1620 * so real IO is not possible anymore.
1623 try_to_release_page(page, 0);
1627 EXPORT_SYMBOL(block_invalidatepage);
1629 void do_invalidatepage(struct page *page, unsigned long offset)
1631 void (*invalidatepage)(struct page *, unsigned long);
1632 invalidatepage = page->mapping->a_ops->invalidatepage ? :
1633 block_invalidatepage;
1634 (*invalidatepage)(page, offset);
1638 * We attach and possibly dirty the buffers atomically wrt
1639 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1640 * is already excluded via the page lock.
1642 void create_empty_buffers(struct page *page,
1643 unsigned long blocksize, unsigned long b_state)
1645 struct buffer_head *bh, *head, *tail;
1647 head = alloc_page_buffers(page, blocksize, 1);
1650 bh->b_state |= b_state;
1652 bh = bh->b_this_page;
1654 tail->b_this_page = head;
1656 spin_lock(&page->mapping->private_lock);
1657 if (PageUptodate(page) || PageDirty(page)) {
1660 if (PageDirty(page))
1661 set_buffer_dirty(bh);
1662 if (PageUptodate(page))
1663 set_buffer_uptodate(bh);
1664 bh = bh->b_this_page;
1665 } while (bh != head);
1667 attach_page_buffers(page, head);
1668 spin_unlock(&page->mapping->private_lock);
1670 EXPORT_SYMBOL(create_empty_buffers);
1673 * We are taking a block for data and we don't want any output from any
1674 * buffer-cache aliases starting from return from that function and
1675 * until the moment when something will explicitly mark the buffer
1676 * dirty (hopefully that will not happen until we will free that block ;-)
1677 * We don't even need to mark it not-uptodate - nobody can expect
1678 * anything from a newly allocated buffer anyway. We used to used
1679 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1680 * don't want to mark the alias unmapped, for example - it would confuse
1681 * anyone who might pick it with bread() afterwards...
1683 * Also.. Note that bforget() doesn't lock the buffer. So there can
1684 * be writeout I/O going on against recently-freed buffers. We don't
1685 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1686 * only if we really need to. That happens here.
1688 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1690 struct buffer_head *old_bh;
1694 old_bh = __find_get_block_slow(bdev, block);
1696 clear_buffer_dirty(old_bh);
1697 wait_on_buffer(old_bh);
1698 clear_buffer_req(old_bh);
1702 EXPORT_SYMBOL(unmap_underlying_metadata);
1705 * NOTE! All mapped/uptodate combinations are valid:
1707 * Mapped Uptodate Meaning
1709 * No No "unknown" - must do get_block()
1710 * No Yes "hole" - zero-filled
1711 * Yes No "allocated" - allocated on disk, not read in
1712 * Yes Yes "valid" - allocated and up-to-date in memory.
1714 * "Dirty" is valid only with the last case (mapped+uptodate).
1718 * While block_write_full_page is writing back the dirty buffers under
1719 * the page lock, whoever dirtied the buffers may decide to clean them
1720 * again at any time. We handle that by only looking at the buffer
1721 * state inside lock_buffer().
1723 * If block_write_full_page() is called for regular writeback
1724 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1725 * locked buffer. This only can happen if someone has written the buffer
1726 * directly, with submit_bh(). At the address_space level PageWriteback
1727 * prevents this contention from occurring.
1729 static int __block_write_full_page(struct inode *inode, struct page *page,
1730 get_block_t *get_block, struct writeback_control *wbc)
1734 sector_t last_block;
1735 struct buffer_head *bh, *head;
1736 const unsigned blocksize = 1 << inode->i_blkbits;
1737 int nr_underway = 0;
1739 BUG_ON(!PageLocked(page));
1741 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1743 if (!page_has_buffers(page)) {
1744 create_empty_buffers(page, blocksize,
1745 (1 << BH_Dirty)|(1 << BH_Uptodate));
1749 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1750 * here, and the (potentially unmapped) buffers may become dirty at
1751 * any time. If a buffer becomes dirty here after we've inspected it
1752 * then we just miss that fact, and the page stays dirty.
1754 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1755 * handle that here by just cleaning them.
1758 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1759 head = page_buffers(page);
1763 * Get all the dirty buffers mapped to disk addresses and
1764 * handle any aliases from the underlying blockdev's mapping.
1767 if (block > last_block) {
1769 * mapped buffers outside i_size will occur, because
1770 * this page can be outside i_size when there is a
1771 * truncate in progress.
1774 * The buffer was zeroed by block_write_full_page()
1776 clear_buffer_dirty(bh);
1777 set_buffer_uptodate(bh);
1778 } else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
1779 WARN_ON(bh->b_size != blocksize);
1780 err = get_block(inode, block, bh, 1);
1783 if (buffer_new(bh)) {
1784 /* blockdev mappings never come here */
1785 clear_buffer_new(bh);
1786 unmap_underlying_metadata(bh->b_bdev,
1790 bh = bh->b_this_page;
1792 } while (bh != head);
1795 if (!buffer_mapped(bh))
1798 * If it's a fully non-blocking write attempt and we cannot
1799 * lock the buffer then redirty the page. Note that this can
1800 * potentially cause a busy-wait loop from pdflush and kswapd
1801 * activity, but those code paths have their own higher-level
1804 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1806 } else if (test_set_buffer_locked(bh)) {
1807 redirty_page_for_writepage(wbc, page);
1810 if (test_clear_buffer_dirty(bh)) {
1811 mark_buffer_async_write(bh);
1815 } while ((bh = bh->b_this_page) != head);
1818 * The page and its buffers are protected by PageWriteback(), so we can
1819 * drop the bh refcounts early.
1821 BUG_ON(PageWriteback(page));
1822 set_page_writeback(page);
1825 struct buffer_head *next = bh->b_this_page;
1826 if (buffer_async_write(bh)) {
1827 submit_bh(WRITE, bh);
1831 } while (bh != head);
1836 if (nr_underway == 0) {
1838 * The page was marked dirty, but the buffers were
1839 * clean. Someone wrote them back by hand with
1840 * ll_rw_block/submit_bh. A rare case.
1844 if (!buffer_uptodate(bh)) {
1848 bh = bh->b_this_page;
1849 } while (bh != head);
1851 SetPageUptodate(page);
1852 end_page_writeback(page);
1854 * The page and buffer_heads can be released at any time from
1857 wbc->pages_skipped++; /* We didn't write this page */
1863 * ENOSPC, or some other error. We may already have added some
1864 * blocks to the file, so we need to write these out to avoid
1865 * exposing stale data.
1866 * The page is currently locked and not marked for writeback
1869 /* Recovery: lock and submit the mapped buffers */
1871 if (buffer_mapped(bh) && buffer_dirty(bh)) {
1873 mark_buffer_async_write(bh);
1876 * The buffer may have been set dirty during
1877 * attachment to a dirty page.
1879 clear_buffer_dirty(bh);
1881 } while ((bh = bh->b_this_page) != head);
1883 BUG_ON(PageWriteback(page));
1884 set_page_writeback(page);
1887 struct buffer_head *next = bh->b_this_page;
1888 if (buffer_async_write(bh)) {
1889 clear_buffer_dirty(bh);
1890 submit_bh(WRITE, bh);
1894 } while (bh != head);
1898 static int __block_prepare_write(struct inode *inode, struct page *page,
1899 unsigned from, unsigned to, get_block_t *get_block)
1901 unsigned block_start, block_end;
1904 unsigned blocksize, bbits;
1905 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1907 BUG_ON(!PageLocked(page));
1908 BUG_ON(from > PAGE_CACHE_SIZE);
1909 BUG_ON(to > PAGE_CACHE_SIZE);
1912 blocksize = 1 << inode->i_blkbits;
1913 if (!page_has_buffers(page))
1914 create_empty_buffers(page, blocksize, 0);
1915 head = page_buffers(page);
1917 bbits = inode->i_blkbits;
1918 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1920 for(bh = head, block_start = 0; bh != head || !block_start;
1921 block++, block_start=block_end, bh = bh->b_this_page) {
1922 block_end = block_start + blocksize;
1923 if (block_end <= from || block_start >= to) {
1924 if (PageUptodate(page)) {
1925 if (!buffer_uptodate(bh))
1926 set_buffer_uptodate(bh);
1931 clear_buffer_new(bh);
1932 if (!buffer_mapped(bh)) {
1933 WARN_ON(bh->b_size != blocksize);
1934 err = get_block(inode, block, bh, 1);
1937 if (buffer_new(bh)) {
1938 unmap_underlying_metadata(bh->b_bdev,
1940 if (PageUptodate(page)) {
1941 set_buffer_uptodate(bh);
1944 if (block_end > to || block_start < from) {
1947 kaddr = kmap_atomic(page, KM_USER0);
1951 if (block_start < from)
1952 memset(kaddr+block_start,
1953 0, from-block_start);
1954 flush_dcache_page(page);
1955 kunmap_atomic(kaddr, KM_USER0);
1960 if (PageUptodate(page)) {
1961 if (!buffer_uptodate(bh))
1962 set_buffer_uptodate(bh);
1965 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1966 (block_start < from || block_end > to)) {
1967 ll_rw_block(READ, 1, &bh);
1972 * If we issued read requests - let them complete.
1974 while(wait_bh > wait) {
1975 wait_on_buffer(*--wait_bh);
1976 if (!buffer_uptodate(*wait_bh))
1983 clear_buffer_new(bh);
1984 } while ((bh = bh->b_this_page) != head);
1989 * Zero out any newly allocated blocks to avoid exposing stale
1990 * data. If BH_New is set, we know that the block was newly
1991 * allocated in the above loop.
1996 block_end = block_start+blocksize;
1997 if (block_end <= from)
1999 if (block_start >= to)
2001 if (buffer_new(bh)) {
2004 clear_buffer_new(bh);
2005 kaddr = kmap_atomic(page, KM_USER0);
2006 memset(kaddr+block_start, 0, bh->b_size);
2007 kunmap_atomic(kaddr, KM_USER0);
2008 set_buffer_uptodate(bh);
2009 mark_buffer_dirty(bh);
2012 block_start = block_end;
2013 bh = bh->b_this_page;
2014 } while (bh != head);
2018 static int __block_commit_write(struct inode *inode, struct page *page,
2019 unsigned from, unsigned to)
2021 unsigned block_start, block_end;
2024 struct buffer_head *bh, *head;
2026 blocksize = 1 << inode->i_blkbits;
2028 for(bh = head = page_buffers(page), block_start = 0;
2029 bh != head || !block_start;
2030 block_start=block_end, bh = bh->b_this_page) {
2031 block_end = block_start + blocksize;
2032 if (block_end <= from || block_start >= to) {
2033 if (!buffer_uptodate(bh))
2036 set_buffer_uptodate(bh);
2037 mark_buffer_dirty(bh);
2042 * If this is a partial write which happened to make all buffers
2043 * uptodate then we can optimize away a bogus readpage() for
2044 * the next read(). Here we 'discover' whether the page went
2045 * uptodate as a result of this (potentially partial) write.
2048 SetPageUptodate(page);
2053 * Generic "read page" function for block devices that have the normal
2054 * get_block functionality. This is most of the block device filesystems.
2055 * Reads the page asynchronously --- the unlock_buffer() and
2056 * set/clear_buffer_uptodate() functions propagate buffer state into the
2057 * page struct once IO has completed.
2059 int block_read_full_page(struct page *page, get_block_t *get_block)
2061 struct inode *inode = page->mapping->host;
2062 sector_t iblock, lblock;
2063 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2064 unsigned int blocksize;
2066 int fully_mapped = 1;
2068 BUG_ON(!PageLocked(page));
2069 blocksize = 1 << inode->i_blkbits;
2070 if (!page_has_buffers(page))
2071 create_empty_buffers(page, blocksize, 0);
2072 head = page_buffers(page);
2074 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2075 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2081 if (buffer_uptodate(bh))
2084 if (!buffer_mapped(bh)) {
2088 if (iblock < lblock) {
2089 WARN_ON(bh->b_size != blocksize);
2090 err = get_block(inode, iblock, bh, 0);
2094 if (!buffer_mapped(bh)) {
2095 void *kaddr = kmap_atomic(page, KM_USER0);
2096 memset(kaddr + i * blocksize, 0, blocksize);
2097 flush_dcache_page(page);
2098 kunmap_atomic(kaddr, KM_USER0);
2100 set_buffer_uptodate(bh);
2104 * get_block() might have updated the buffer
2107 if (buffer_uptodate(bh))
2111 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2114 SetPageMappedToDisk(page);
2118 * All buffers are uptodate - we can set the page uptodate
2119 * as well. But not if get_block() returned an error.
2121 if (!PageError(page))
2122 SetPageUptodate(page);
2127 /* Stage two: lock the buffers */
2128 for (i = 0; i < nr; i++) {
2131 mark_buffer_async_read(bh);
2135 * Stage 3: start the IO. Check for uptodateness
2136 * inside the buffer lock in case another process reading
2137 * the underlying blockdev brought it uptodate (the sct fix).
2139 for (i = 0; i < nr; i++) {
2141 if (buffer_uptodate(bh))
2142 end_buffer_async_read(bh, 1);
2144 submit_bh(READ, bh);
2149 /* utility function for filesystems that need to do work on expanding
2150 * truncates. Uses prepare/commit_write to allow the filesystem to
2151 * deal with the hole.
2153 static int __generic_cont_expand(struct inode *inode, loff_t size,
2154 pgoff_t index, unsigned int offset)
2156 struct address_space *mapping = inode->i_mapping;
2158 unsigned long limit;
2162 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
2163 if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2164 send_sig(SIGXFSZ, current, 0);
2167 if (size > inode->i_sb->s_maxbytes)
2171 page = grab_cache_page(mapping, index);
2174 err = mapping->a_ops->prepare_write(NULL, page, offset, offset);
2177 * ->prepare_write() may have instantiated a few blocks
2178 * outside i_size. Trim these off again.
2181 page_cache_release(page);
2182 vmtruncate(inode, inode->i_size);
2186 err = mapping->a_ops->commit_write(NULL, page, offset, offset);
2189 page_cache_release(page);
2196 int generic_cont_expand(struct inode *inode, loff_t size)
2199 unsigned int offset;
2201 offset = (size & (PAGE_CACHE_SIZE - 1)); /* Within page */
2203 /* ugh. in prepare/commit_write, if from==to==start of block, we
2204 ** skip the prepare. make sure we never send an offset for the start
2207 if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
2208 /* caller must handle this extra byte. */
2211 index = size >> PAGE_CACHE_SHIFT;
2213 return __generic_cont_expand(inode, size, index, offset);
2216 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2218 loff_t pos = size - 1;
2219 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2220 unsigned int offset = (pos & (PAGE_CACHE_SIZE - 1)) + 1;
2222 /* prepare/commit_write can handle even if from==to==start of block. */
2223 return __generic_cont_expand(inode, size, index, offset);
2227 * For moronic filesystems that do not allow holes in file.
2228 * We may have to extend the file.
2231 int cont_prepare_write(struct page *page, unsigned offset,
2232 unsigned to, get_block_t *get_block, loff_t *bytes)
2234 struct address_space *mapping = page->mapping;
2235 struct inode *inode = mapping->host;
2236 struct page *new_page;
2240 unsigned blocksize = 1 << inode->i_blkbits;
2243 while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
2245 new_page = grab_cache_page(mapping, pgpos);
2248 /* we might sleep */
2249 if (*bytes>>PAGE_CACHE_SHIFT != pgpos) {
2250 unlock_page(new_page);
2251 page_cache_release(new_page);
2254 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2255 if (zerofrom & (blocksize-1)) {
2256 *bytes |= (blocksize-1);
2259 status = __block_prepare_write(inode, new_page, zerofrom,
2260 PAGE_CACHE_SIZE, get_block);
2263 kaddr = kmap_atomic(new_page, KM_USER0);
2264 memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom);
2265 flush_dcache_page(new_page);
2266 kunmap_atomic(kaddr, KM_USER0);
2267 generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE);
2268 unlock_page(new_page);
2269 page_cache_release(new_page);
2272 if (page->index < pgpos) {
2273 /* completely inside the area */
2276 /* page covers the boundary, find the boundary offset */
2277 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2279 /* if we will expand the thing last block will be filled */
2280 if (to > zerofrom && (zerofrom & (blocksize-1))) {
2281 *bytes |= (blocksize-1);
2285 /* starting below the boundary? Nothing to zero out */
2286 if (offset <= zerofrom)
2289 status = __block_prepare_write(inode, page, zerofrom, to, get_block);
2292 if (zerofrom < offset) {
2293 kaddr = kmap_atomic(page, KM_USER0);
2294 memset(kaddr+zerofrom, 0, offset-zerofrom);
2295 flush_dcache_page(page);
2296 kunmap_atomic(kaddr, KM_USER0);
2297 __block_commit_write(inode, page, zerofrom, offset);
2301 ClearPageUptodate(page);
2305 ClearPageUptodate(new_page);
2306 unlock_page(new_page);
2307 page_cache_release(new_page);
2312 int block_prepare_write(struct page *page, unsigned from, unsigned to,
2313 get_block_t *get_block)
2315 struct inode *inode = page->mapping->host;
2316 int err = __block_prepare_write(inode, page, from, to, get_block);
2318 ClearPageUptodate(page);
2322 int block_commit_write(struct page *page, unsigned from, unsigned to)
2324 struct inode *inode = page->mapping->host;
2325 __block_commit_write(inode,page,from,to);
2329 int generic_commit_write(struct file *file, struct page *page,
2330 unsigned from, unsigned to)
2332 struct inode *inode = page->mapping->host;
2333 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2334 __block_commit_write(inode,page,from,to);
2336 * No need to use i_size_read() here, the i_size
2337 * cannot change under us because we hold i_mutex.
2339 if (pos > inode->i_size) {
2340 i_size_write(inode, pos);
2341 mark_inode_dirty(inode);
2348 * nobh_prepare_write()'s prereads are special: the buffer_heads are freed
2349 * immediately, while under the page lock. So it needs a special end_io
2350 * handler which does not touch the bh after unlocking it.
2352 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
2353 * a race there is benign: unlock_buffer() only use the bh's address for
2354 * hashing after unlocking the buffer, so it doesn't actually touch the bh
2357 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2360 set_buffer_uptodate(bh);
2362 /* This happens, due to failed READA attempts. */
2363 clear_buffer_uptodate(bh);
2369 * On entry, the page is fully not uptodate.
2370 * On exit the page is fully uptodate in the areas outside (from,to)
2372 int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
2373 get_block_t *get_block)
2375 struct inode *inode = page->mapping->host;
2376 const unsigned blkbits = inode->i_blkbits;
2377 const unsigned blocksize = 1 << blkbits;
2378 struct buffer_head map_bh;
2379 struct buffer_head *read_bh[MAX_BUF_PER_PAGE];
2380 unsigned block_in_page;
2381 unsigned block_start;
2382 sector_t block_in_file;
2387 int is_mapped_to_disk = 1;
2390 if (PageMappedToDisk(page))
2393 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2394 map_bh.b_page = page;
2397 * We loop across all blocks in the page, whether or not they are
2398 * part of the affected region. This is so we can discover if the
2399 * page is fully mapped-to-disk.
2401 for (block_start = 0, block_in_page = 0;
2402 block_start < PAGE_CACHE_SIZE;
2403 block_in_page++, block_start += blocksize) {
2404 unsigned block_end = block_start + blocksize;
2409 if (block_start >= to)
2411 map_bh.b_size = blocksize;
2412 ret = get_block(inode, block_in_file + block_in_page,
2416 if (!buffer_mapped(&map_bh))
2417 is_mapped_to_disk = 0;
2418 if (buffer_new(&map_bh))
2419 unmap_underlying_metadata(map_bh.b_bdev,
2421 if (PageUptodate(page))
2423 if (buffer_new(&map_bh) || !buffer_mapped(&map_bh)) {
2424 kaddr = kmap_atomic(page, KM_USER0);
2425 if (block_start < from) {
2426 memset(kaddr+block_start, 0, from-block_start);
2429 if (block_end > to) {
2430 memset(kaddr + to, 0, block_end - to);
2433 flush_dcache_page(page);
2434 kunmap_atomic(kaddr, KM_USER0);
2437 if (buffer_uptodate(&map_bh))
2438 continue; /* reiserfs does this */
2439 if (block_start < from || block_end > to) {
2440 struct buffer_head *bh = alloc_buffer_head(GFP_NOFS);
2446 bh->b_state = map_bh.b_state;
2447 atomic_set(&bh->b_count, 0);
2448 bh->b_this_page = NULL;
2450 bh->b_blocknr = map_bh.b_blocknr;
2451 bh->b_size = blocksize;
2452 bh->b_data = (char *)(long)block_start;
2453 bh->b_bdev = map_bh.b_bdev;
2454 bh->b_private = NULL;
2455 read_bh[nr_reads++] = bh;
2460 struct buffer_head *bh;
2463 * The page is locked, so these buffers are protected from
2464 * any VM or truncate activity. Hence we don't need to care
2465 * for the buffer_head refcounts.
2467 for (i = 0; i < nr_reads; i++) {
2470 bh->b_end_io = end_buffer_read_nobh;
2471 submit_bh(READ, bh);
2473 for (i = 0; i < nr_reads; i++) {
2476 if (!buffer_uptodate(bh))
2478 free_buffer_head(bh);
2485 if (is_mapped_to_disk)
2486 SetPageMappedToDisk(page);
2487 SetPageUptodate(page);
2490 * Setting the page dirty here isn't necessary for the prepare_write
2491 * function - commit_write will do that. But if/when this function is
2492 * used within the pagefault handler to ensure that all mmapped pages
2493 * have backing space in the filesystem, we will need to dirty the page
2494 * if its contents were altered.
2497 set_page_dirty(page);
2502 for (i = 0; i < nr_reads; i++) {
2504 free_buffer_head(read_bh[i]);
2508 * Error recovery is pretty slack. Clear the page and mark it dirty
2509 * so we'll later zero out any blocks which _were_ allocated.
2511 kaddr = kmap_atomic(page, KM_USER0);
2512 memset(kaddr, 0, PAGE_CACHE_SIZE);
2513 kunmap_atomic(kaddr, KM_USER0);
2514 SetPageUptodate(page);
2515 set_page_dirty(page);
2518 EXPORT_SYMBOL(nobh_prepare_write);
2520 int nobh_commit_write(struct file *file, struct page *page,
2521 unsigned from, unsigned to)
2523 struct inode *inode = page->mapping->host;
2524 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2526 set_page_dirty(page);
2527 if (pos > inode->i_size) {
2528 i_size_write(inode, pos);
2529 mark_inode_dirty(inode);
2533 EXPORT_SYMBOL(nobh_commit_write);
2536 * nobh_writepage() - based on block_full_write_page() except
2537 * that it tries to operate without attaching bufferheads to
2540 int nobh_writepage(struct page *page, get_block_t *get_block,
2541 struct writeback_control *wbc)
2543 struct inode * const inode = page->mapping->host;
2544 loff_t i_size = i_size_read(inode);
2545 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2550 /* Is the page fully inside i_size? */
2551 if (page->index < end_index)
2554 /* Is the page fully outside i_size? (truncate in progress) */
2555 offset = i_size & (PAGE_CACHE_SIZE-1);
2556 if (page->index >= end_index+1 || !offset) {
2558 * The page may have dirty, unmapped buffers. For example,
2559 * they may have been added in ext3_writepage(). Make them
2560 * freeable here, so the page does not leak.
2563 /* Not really sure about this - do we need this ? */
2564 if (page->mapping->a_ops->invalidatepage)
2565 page->mapping->a_ops->invalidatepage(page, offset);
2568 return 0; /* don't care */
2572 * The page straddles i_size. It must be zeroed out on each and every
2573 * writepage invocation because it may be mmapped. "A file is mapped
2574 * in multiples of the page size. For a file that is not a multiple of
2575 * the page size, the remaining memory is zeroed when mapped, and
2576 * writes to that region are not written out to the file."
2578 kaddr = kmap_atomic(page, KM_USER0);
2579 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2580 flush_dcache_page(page);
2581 kunmap_atomic(kaddr, KM_USER0);
2583 ret = mpage_writepage(page, get_block, wbc);
2585 ret = __block_write_full_page(inode, page, get_block, wbc);
2588 EXPORT_SYMBOL(nobh_writepage);
2591 * This function assumes that ->prepare_write() uses nobh_prepare_write().
2593 int nobh_truncate_page(struct address_space *mapping, loff_t from)
2595 struct inode *inode = mapping->host;
2596 unsigned blocksize = 1 << inode->i_blkbits;
2597 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2598 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2601 const struct address_space_operations *a_ops = mapping->a_ops;
2605 if ((offset & (blocksize - 1)) == 0)
2609 page = grab_cache_page(mapping, index);
2613 to = (offset + blocksize) & ~(blocksize - 1);
2614 ret = a_ops->prepare_write(NULL, page, offset, to);
2616 kaddr = kmap_atomic(page, KM_USER0);
2617 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2618 flush_dcache_page(page);
2619 kunmap_atomic(kaddr, KM_USER0);
2620 set_page_dirty(page);
2623 page_cache_release(page);
2627 EXPORT_SYMBOL(nobh_truncate_page);
2629 int block_truncate_page(struct address_space *mapping,
2630 loff_t from, get_block_t *get_block)
2632 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2633 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2636 unsigned length, pos;
2637 struct inode *inode = mapping->host;
2639 struct buffer_head *bh;
2643 blocksize = 1 << inode->i_blkbits;
2644 length = offset & (blocksize - 1);
2646 /* Block boundary? Nothing to do */
2650 length = blocksize - length;
2651 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2653 page = grab_cache_page(mapping, index);
2658 if (!page_has_buffers(page))
2659 create_empty_buffers(page, blocksize, 0);
2661 /* Find the buffer that contains "offset" */
2662 bh = page_buffers(page);
2664 while (offset >= pos) {
2665 bh = bh->b_this_page;
2671 if (!buffer_mapped(bh)) {
2672 WARN_ON(bh->b_size != blocksize);
2673 err = get_block(inode, iblock, bh, 0);
2676 /* unmapped? It's a hole - nothing to do */
2677 if (!buffer_mapped(bh))
2681 /* Ok, it's mapped. Make sure it's up-to-date */
2682 if (PageUptodate(page))
2683 set_buffer_uptodate(bh);
2685 if (!buffer_uptodate(bh) && !buffer_delay(bh)) {
2687 ll_rw_block(READ, 1, &bh);
2689 /* Uhhuh. Read error. Complain and punt. */
2690 if (!buffer_uptodate(bh))
2694 kaddr = kmap_atomic(page, KM_USER0);
2695 memset(kaddr + offset, 0, length);
2696 flush_dcache_page(page);
2697 kunmap_atomic(kaddr, KM_USER0);
2699 mark_buffer_dirty(bh);
2704 page_cache_release(page);
2710 * The generic ->writepage function for buffer-backed address_spaces
2712 int block_write_full_page(struct page *page, get_block_t *get_block,
2713 struct writeback_control *wbc)
2715 struct inode * const inode = page->mapping->host;
2716 loff_t i_size = i_size_read(inode);
2717 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2721 /* Is the page fully inside i_size? */
2722 if (page->index < end_index)
2723 return __block_write_full_page(inode, page, get_block, wbc);
2725 /* Is the page fully outside i_size? (truncate in progress) */
2726 offset = i_size & (PAGE_CACHE_SIZE-1);
2727 if (page->index >= end_index+1 || !offset) {
2729 * The page may have dirty, unmapped buffers. For example,
2730 * they may have been added in ext3_writepage(). Make them
2731 * freeable here, so the page does not leak.
2733 do_invalidatepage(page, 0);
2735 return 0; /* don't care */
2739 * The page straddles i_size. It must be zeroed out on each and every
2740 * writepage invokation because it may be mmapped. "A file is mapped
2741 * in multiples of the page size. For a file that is not a multiple of
2742 * the page size, the remaining memory is zeroed when mapped, and
2743 * writes to that region are not written out to the file."
2745 kaddr = kmap_atomic(page, KM_USER0);
2746 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2747 flush_dcache_page(page);
2748 kunmap_atomic(kaddr, KM_USER0);
2749 return __block_write_full_page(inode, page, get_block, wbc);
2752 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2753 get_block_t *get_block)
2755 struct buffer_head tmp;
2756 struct inode *inode = mapping->host;
2759 tmp.b_size = 1 << inode->i_blkbits;
2760 get_block(inode, block, &tmp, 0);
2761 return tmp.b_blocknr;
2764 static int end_bio_bh_io_sync(struct bio *bio, unsigned int bytes_done, int err)
2766 struct buffer_head *bh = bio->bi_private;
2771 if (err == -EOPNOTSUPP) {
2772 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2773 set_bit(BH_Eopnotsupp, &bh->b_state);
2776 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2781 int submit_bh(int rw, struct buffer_head * bh)
2786 BUG_ON(!buffer_locked(bh));
2787 BUG_ON(!buffer_mapped(bh));
2788 BUG_ON(!bh->b_end_io);
2790 if (buffer_ordered(bh) && (rw == WRITE))
2794 * Only clear out a write error when rewriting, should this
2795 * include WRITE_SYNC as well?
2797 if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
2798 clear_buffer_write_io_error(bh);
2801 * from here on down, it's all bio -- do the initial mapping,
2802 * submit_bio -> generic_make_request may further map this bio around
2804 bio = bio_alloc(GFP_NOIO, 1);
2806 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2807 bio->bi_bdev = bh->b_bdev;
2808 bio->bi_io_vec[0].bv_page = bh->b_page;
2809 bio->bi_io_vec[0].bv_len = bh->b_size;
2810 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2814 bio->bi_size = bh->b_size;
2816 bio->bi_end_io = end_bio_bh_io_sync;
2817 bio->bi_private = bh;
2820 submit_bio(rw, bio);
2822 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2830 * ll_rw_block: low-level access to block devices (DEPRECATED)
2831 * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
2832 * @nr: number of &struct buffer_heads in the array
2833 * @bhs: array of pointers to &struct buffer_head
2835 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2836 * requests an I/O operation on them, either a %READ or a %WRITE. The third
2837 * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2838 * are sent to disk. The fourth %READA option is described in the documentation
2839 * for generic_make_request() which ll_rw_block() calls.
2841 * This function drops any buffer that it cannot get a lock on (with the
2842 * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2843 * clean when doing a write request, and any buffer that appears to be
2844 * up-to-date when doing read request. Further it marks as clean buffers that
2845 * are processed for writing (the buffer cache won't assume that they are
2846 * actually clean until the buffer gets unlocked).
2848 * ll_rw_block sets b_end_io to simple completion handler that marks
2849 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2852 * All of the buffers must be for the same device, and must also be a
2853 * multiple of the current approved size for the device.
2855 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2859 for (i = 0; i < nr; i++) {
2860 struct buffer_head *bh = bhs[i];
2864 else if (test_set_buffer_locked(bh))
2867 if (rw == WRITE || rw == SWRITE) {
2868 if (test_clear_buffer_dirty(bh)) {
2869 bh->b_end_io = end_buffer_write_sync;
2871 submit_bh(WRITE, bh);
2875 if (!buffer_uptodate(bh)) {
2876 bh->b_end_io = end_buffer_read_sync;
2887 * For a data-integrity writeout, we need to wait upon any in-progress I/O
2888 * and then start new I/O and then wait upon it. The caller must have a ref on
2891 int sync_dirty_buffer(struct buffer_head *bh)
2895 WARN_ON(atomic_read(&bh->b_count) < 1);
2897 if (test_clear_buffer_dirty(bh)) {
2899 bh->b_end_io = end_buffer_write_sync;
2900 ret = submit_bh(WRITE, bh);
2902 if (buffer_eopnotsupp(bh)) {
2903 clear_buffer_eopnotsupp(bh);
2906 if (!ret && !buffer_uptodate(bh))
2915 * try_to_free_buffers() checks if all the buffers on this particular page
2916 * are unused, and releases them if so.
2918 * Exclusion against try_to_free_buffers may be obtained by either
2919 * locking the page or by holding its mapping's private_lock.
2921 * If the page is dirty but all the buffers are clean then we need to
2922 * be sure to mark the page clean as well. This is because the page
2923 * may be against a block device, and a later reattachment of buffers
2924 * to a dirty page will set *all* buffers dirty. Which would corrupt
2925 * filesystem data on the same device.
2927 * The same applies to regular filesystem pages: if all the buffers are
2928 * clean then we set the page clean and proceed. To do that, we require
2929 * total exclusion from __set_page_dirty_buffers(). That is obtained with
2932 * try_to_free_buffers() is non-blocking.
2934 static inline int buffer_busy(struct buffer_head *bh)
2936 return atomic_read(&bh->b_count) |
2937 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2941 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
2943 struct buffer_head *head = page_buffers(page);
2944 struct buffer_head *bh;
2948 if (buffer_write_io_error(bh) && page->mapping)
2949 set_bit(AS_EIO, &page->mapping->flags);
2950 if (buffer_busy(bh))
2952 bh = bh->b_this_page;
2953 } while (bh != head);
2956 struct buffer_head *next = bh->b_this_page;
2958 if (!list_empty(&bh->b_assoc_buffers))
2959 __remove_assoc_queue(bh);
2961 } while (bh != head);
2962 *buffers_to_free = head;
2963 __clear_page_buffers(page);
2969 int try_to_free_buffers(struct page *page)
2971 struct address_space * const mapping = page->mapping;
2972 struct buffer_head *buffers_to_free = NULL;
2975 BUG_ON(!PageLocked(page));
2976 if (PageWriteback(page))
2979 if (mapping == NULL) { /* can this still happen? */
2980 ret = drop_buffers(page, &buffers_to_free);
2984 spin_lock(&mapping->private_lock);
2985 ret = drop_buffers(page, &buffers_to_free);
2988 * If the filesystem writes its buffers by hand (eg ext3)
2989 * then we can have clean buffers against a dirty page. We
2990 * clean the page here; otherwise later reattachment of buffers
2991 * could encounter a non-uptodate page, which is unresolvable.
2992 * This only applies in the rare case where try_to_free_buffers
2993 * succeeds but the page is not freed.
2995 clear_page_dirty(page);
2997 spin_unlock(&mapping->private_lock);
2999 if (buffers_to_free) {
3000 struct buffer_head *bh = buffers_to_free;
3003 struct buffer_head *next = bh->b_this_page;
3004 free_buffer_head(bh);
3006 } while (bh != buffers_to_free);
3010 EXPORT_SYMBOL(try_to_free_buffers);
3012 void block_sync_page(struct page *page)
3014 struct address_space *mapping;
3017 mapping = page_mapping(page);
3019 blk_run_backing_dev(mapping->backing_dev_info, page);
3023 * There are no bdflush tunables left. But distributions are
3024 * still running obsolete flush daemons, so we terminate them here.
3026 * Use of bdflush() is deprecated and will be removed in a future kernel.
3027 * The `pdflush' kernel threads fully replace bdflush daemons and this call.
3029 asmlinkage long sys_bdflush(int func, long data)
3031 static int msg_count;
3033 if (!capable(CAP_SYS_ADMIN))
3036 if (msg_count < 5) {
3039 "warning: process `%s' used the obsolete bdflush"
3040 " system call\n", current->comm);
3041 printk(KERN_INFO "Fix your initscripts?\n");
3050 * Buffer-head allocation
3052 static kmem_cache_t *bh_cachep;
3055 * Once the number of bh's in the machine exceeds this level, we start
3056 * stripping them in writeback.
3058 static int max_buffer_heads;
3060 int buffer_heads_over_limit;
3062 struct bh_accounting {
3063 int nr; /* Number of live bh's */
3064 int ratelimit; /* Limit cacheline bouncing */
3067 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3069 static void recalc_bh_state(void)
3074 if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3076 __get_cpu_var(bh_accounting).ratelimit = 0;
3077 for_each_online_cpu(i)
3078 tot += per_cpu(bh_accounting, i).nr;
3079 buffer_heads_over_limit = (tot > max_buffer_heads);
3082 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3084 struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
3086 get_cpu_var(bh_accounting).nr++;
3088 put_cpu_var(bh_accounting);
3092 EXPORT_SYMBOL(alloc_buffer_head);
3094 void free_buffer_head(struct buffer_head *bh)
3096 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3097 kmem_cache_free(bh_cachep, bh);
3098 get_cpu_var(bh_accounting).nr--;
3100 put_cpu_var(bh_accounting);
3102 EXPORT_SYMBOL(free_buffer_head);
3105 init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags)
3107 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
3108 SLAB_CTOR_CONSTRUCTOR) {
3109 struct buffer_head * bh = (struct buffer_head *)data;
3111 memset(bh, 0, sizeof(*bh));
3112 INIT_LIST_HEAD(&bh->b_assoc_buffers);
3116 #ifdef CONFIG_HOTPLUG_CPU
3117 static void buffer_exit_cpu(int cpu)
3120 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3122 for (i = 0; i < BH_LRU_SIZE; i++) {
3126 get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
3127 per_cpu(bh_accounting, cpu).nr = 0;
3128 put_cpu_var(bh_accounting);
3131 static int buffer_cpu_notify(struct notifier_block *self,
3132 unsigned long action, void *hcpu)
3134 if (action == CPU_DEAD)
3135 buffer_exit_cpu((unsigned long)hcpu);
3138 #endif /* CONFIG_HOTPLUG_CPU */
3140 void __init buffer_init(void)
3144 bh_cachep = kmem_cache_create("buffer_head",
3145 sizeof(struct buffer_head), 0,
3146 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3152 * Limit the bh occupancy to 10% of ZONE_NORMAL
3154 nrpages = (nr_free_buffer_pages() * 10) / 100;
3155 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3156 hotcpu_notifier(buffer_cpu_notify, 0);
3159 EXPORT_SYMBOL(__bforget);
3160 EXPORT_SYMBOL(__brelse);
3161 EXPORT_SYMBOL(__wait_on_buffer);
3162 EXPORT_SYMBOL(block_commit_write);
3163 EXPORT_SYMBOL(block_prepare_write);
3164 EXPORT_SYMBOL(block_read_full_page);
3165 EXPORT_SYMBOL(block_sync_page);
3166 EXPORT_SYMBOL(block_truncate_page);
3167 EXPORT_SYMBOL(block_write_full_page);
3168 EXPORT_SYMBOL(cont_prepare_write);
3169 EXPORT_SYMBOL(end_buffer_read_sync);
3170 EXPORT_SYMBOL(end_buffer_write_sync);
3171 EXPORT_SYMBOL(file_fsync);
3172 EXPORT_SYMBOL(fsync_bdev);
3173 EXPORT_SYMBOL(generic_block_bmap);
3174 EXPORT_SYMBOL(generic_commit_write);
3175 EXPORT_SYMBOL(generic_cont_expand);
3176 EXPORT_SYMBOL(generic_cont_expand_simple);
3177 EXPORT_SYMBOL(init_buffer);
3178 EXPORT_SYMBOL(invalidate_bdev);
3179 EXPORT_SYMBOL(ll_rw_block);
3180 EXPORT_SYMBOL(mark_buffer_dirty);
3181 EXPORT_SYMBOL(submit_bh);
3182 EXPORT_SYMBOL(sync_dirty_buffer);
3183 EXPORT_SYMBOL(unlock_buffer);