4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
21 #include <linux/kernel.h>
22 #include <linux/syscalls.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
27 #include <linux/capability.h>
28 #include <linux/blkdev.h>
29 #include <linux/file.h>
30 #include <linux/quotaops.h>
31 #include <linux/highmem.h>
32 #include <linux/module.h>
33 #include <linux/writeback.h>
34 #include <linux/hash.h>
35 #include <linux/suspend.h>
36 #include <linux/buffer_head.h>
37 #include <linux/task_io_accounting_ops.h>
38 #include <linux/bio.h>
39 #include <linux/notifier.h>
40 #include <linux/cpu.h>
41 #include <linux/bitops.h>
42 #include <linux/mpage.h>
43 #include <linux/bit_spinlock.h>
45 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
47 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
50 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
52 bh->b_end_io = handler;
53 bh->b_private = private;
56 static int sync_buffer(void *word)
58 struct block_device *bd;
59 struct buffer_head *bh
60 = container_of(word, struct buffer_head, b_state);
65 blk_run_address_space(bd->bd_inode->i_mapping);
70 void __lock_buffer(struct buffer_head *bh)
72 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
73 TASK_UNINTERRUPTIBLE);
75 EXPORT_SYMBOL(__lock_buffer);
77 void unlock_buffer(struct buffer_head *bh)
79 clear_bit_unlock(BH_Lock, &bh->b_state);
80 smp_mb__after_clear_bit();
81 wake_up_bit(&bh->b_state, BH_Lock);
85 * Block until a buffer comes unlocked. This doesn't stop it
86 * from becoming locked again - you have to lock it yourself
87 * if you want to preserve its state.
89 void __wait_on_buffer(struct buffer_head * bh)
91 wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
95 __clear_page_buffers(struct page *page)
97 ClearPagePrivate(page);
98 set_page_private(page, 0);
99 page_cache_release(page);
103 static int quiet_error(struct buffer_head *bh)
105 if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
111 static void buffer_io_error(struct buffer_head *bh)
113 char b[BDEVNAME_SIZE];
114 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
115 bdevname(bh->b_bdev, b),
116 (unsigned long long)bh->b_blocknr);
120 * End-of-IO handler helper function which does not touch the bh after
122 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
123 * a race there is benign: unlock_buffer() only use the bh's address for
124 * hashing after unlocking the buffer, so it doesn't actually touch the bh
127 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
130 set_buffer_uptodate(bh);
132 /* This happens, due to failed READA attempts. */
133 clear_buffer_uptodate(bh);
139 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
140 * unlock the buffer. This is what ll_rw_block uses too.
142 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
144 __end_buffer_read_notouch(bh, uptodate);
148 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
150 char b[BDEVNAME_SIZE];
153 set_buffer_uptodate(bh);
155 if (!buffer_eopnotsupp(bh) && !quiet_error(bh)) {
157 printk(KERN_WARNING "lost page write due to "
159 bdevname(bh->b_bdev, b));
161 set_buffer_write_io_error(bh);
162 clear_buffer_uptodate(bh);
169 * Various filesystems appear to want __find_get_block to be non-blocking.
170 * But it's the page lock which protects the buffers. To get around this,
171 * we get exclusion from try_to_free_buffers with the blockdev mapping's
174 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
175 * may be quite high. This code could TryLock the page, and if that
176 * succeeds, there is no need to take private_lock. (But if
177 * private_lock is contended then so is mapping->tree_lock).
179 static struct buffer_head *
180 __find_get_block_slow(struct block_device *bdev, sector_t block)
182 struct inode *bd_inode = bdev->bd_inode;
183 struct address_space *bd_mapping = bd_inode->i_mapping;
184 struct buffer_head *ret = NULL;
186 struct buffer_head *bh;
187 struct buffer_head *head;
191 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
192 page = find_get_page(bd_mapping, index);
196 spin_lock(&bd_mapping->private_lock);
197 if (!page_has_buffers(page))
199 head = page_buffers(page);
202 if (!buffer_mapped(bh))
204 else if (bh->b_blocknr == block) {
209 bh = bh->b_this_page;
210 } while (bh != head);
212 /* we might be here because some of the buffers on this page are
213 * not mapped. This is due to various races between
214 * file io on the block device and getblk. It gets dealt with
215 * elsewhere, don't buffer_error if we had some unmapped buffers
218 printk("__find_get_block_slow() failed. "
219 "block=%llu, b_blocknr=%llu\n",
220 (unsigned long long)block,
221 (unsigned long long)bh->b_blocknr);
222 printk("b_state=0x%08lx, b_size=%zu\n",
223 bh->b_state, bh->b_size);
224 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
227 spin_unlock(&bd_mapping->private_lock);
228 page_cache_release(page);
233 /* If invalidate_buffers() will trash dirty buffers, it means some kind
234 of fs corruption is going on. Trashing dirty data always imply losing
235 information that was supposed to be just stored on the physical layer
238 Thus invalidate_buffers in general usage is not allwowed to trash
239 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
240 be preserved. These buffers are simply skipped.
242 We also skip buffers which are still in use. For example this can
243 happen if a userspace program is reading the block device.
245 NOTE: In the case where the user removed a removable-media-disk even if
246 there's still dirty data not synced on disk (due a bug in the device driver
247 or due an error of the user), by not destroying the dirty buffers we could
248 generate corruption also on the next media inserted, thus a parameter is
249 necessary to handle this case in the most safe way possible (trying
250 to not corrupt also the new disk inserted with the data belonging to
251 the old now corrupted disk). Also for the ramdisk the natural thing
252 to do in order to release the ramdisk memory is to destroy dirty buffers.
254 These are two special cases. Normal usage imply the device driver
255 to issue a sync on the device (without waiting I/O completion) and
256 then an invalidate_buffers call that doesn't trash dirty buffers.
258 For handling cache coherency with the blkdev pagecache the 'update' case
259 is been introduced. It is needed to re-read from disk any pinned
260 buffer. NOTE: re-reading from disk is destructive so we can do it only
261 when we assume nobody is changing the buffercache under our I/O and when
262 we think the disk contains more recent information than the buffercache.
263 The update == 1 pass marks the buffers we need to update, the update == 2
264 pass does the actual I/O. */
265 void invalidate_bdev(struct block_device *bdev)
267 struct address_space *mapping = bdev->bd_inode->i_mapping;
269 if (mapping->nrpages == 0)
272 invalidate_bh_lrus();
273 invalidate_mapping_pages(mapping, 0, -1);
277 * Kick pdflush then try to free up some ZONE_NORMAL memory.
279 static void free_more_memory(void)
284 wakeup_pdflush(1024);
287 for_each_online_node(nid) {
288 (void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
289 gfp_zone(GFP_NOFS), NULL,
292 try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
298 * I/O completion handler for block_read_full_page() - pages
299 * which come unlocked at the end of I/O.
301 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
304 struct buffer_head *first;
305 struct buffer_head *tmp;
307 int page_uptodate = 1;
309 BUG_ON(!buffer_async_read(bh));
313 set_buffer_uptodate(bh);
315 clear_buffer_uptodate(bh);
316 if (!quiet_error(bh))
322 * Be _very_ careful from here on. Bad things can happen if
323 * two buffer heads end IO at almost the same time and both
324 * decide that the page is now completely done.
326 first = page_buffers(page);
327 local_irq_save(flags);
328 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
329 clear_buffer_async_read(bh);
333 if (!buffer_uptodate(tmp))
335 if (buffer_async_read(tmp)) {
336 BUG_ON(!buffer_locked(tmp));
339 tmp = tmp->b_this_page;
341 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
342 local_irq_restore(flags);
345 * If none of the buffers had errors and they are all
346 * uptodate then we can set the page uptodate.
348 if (page_uptodate && !PageError(page))
349 SetPageUptodate(page);
354 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
355 local_irq_restore(flags);
360 * Completion handler for block_write_full_page() - pages which are unlocked
361 * during I/O, and which have PageWriteback cleared upon I/O completion.
363 void end_buffer_async_write(struct buffer_head *bh, int uptodate)
365 char b[BDEVNAME_SIZE];
367 struct buffer_head *first;
368 struct buffer_head *tmp;
371 BUG_ON(!buffer_async_write(bh));
375 set_buffer_uptodate(bh);
377 if (!quiet_error(bh)) {
379 printk(KERN_WARNING "lost page write due to "
381 bdevname(bh->b_bdev, b));
383 set_bit(AS_EIO, &page->mapping->flags);
384 set_buffer_write_io_error(bh);
385 clear_buffer_uptodate(bh);
389 first = page_buffers(page);
390 local_irq_save(flags);
391 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
393 clear_buffer_async_write(bh);
395 tmp = bh->b_this_page;
397 if (buffer_async_write(tmp)) {
398 BUG_ON(!buffer_locked(tmp));
401 tmp = tmp->b_this_page;
403 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
404 local_irq_restore(flags);
405 end_page_writeback(page);
409 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
410 local_irq_restore(flags);
415 * If a page's buffers are under async readin (end_buffer_async_read
416 * completion) then there is a possibility that another thread of
417 * control could lock one of the buffers after it has completed
418 * but while some of the other buffers have not completed. This
419 * locked buffer would confuse end_buffer_async_read() into not unlocking
420 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
421 * that this buffer is not under async I/O.
423 * The page comes unlocked when it has no locked buffer_async buffers
426 * PageLocked prevents anyone starting new async I/O reads any of
429 * PageWriteback is used to prevent simultaneous writeout of the same
432 * PageLocked prevents anyone from starting writeback of a page which is
433 * under read I/O (PageWriteback is only ever set against a locked page).
435 static void mark_buffer_async_read(struct buffer_head *bh)
437 bh->b_end_io = end_buffer_async_read;
438 set_buffer_async_read(bh);
441 void mark_buffer_async_write_endio(struct buffer_head *bh,
442 bh_end_io_t *handler)
444 bh->b_end_io = handler;
445 set_buffer_async_write(bh);
448 void mark_buffer_async_write(struct buffer_head *bh)
450 mark_buffer_async_write_endio(bh, end_buffer_async_write);
452 EXPORT_SYMBOL(mark_buffer_async_write);
456 * fs/buffer.c contains helper functions for buffer-backed address space's
457 * fsync functions. A common requirement for buffer-based filesystems is
458 * that certain data from the backing blockdev needs to be written out for
459 * a successful fsync(). For example, ext2 indirect blocks need to be
460 * written back and waited upon before fsync() returns.
462 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
463 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
464 * management of a list of dependent buffers at ->i_mapping->private_list.
466 * Locking is a little subtle: try_to_free_buffers() will remove buffers
467 * from their controlling inode's queue when they are being freed. But
468 * try_to_free_buffers() will be operating against the *blockdev* mapping
469 * at the time, not against the S_ISREG file which depends on those buffers.
470 * So the locking for private_list is via the private_lock in the address_space
471 * which backs the buffers. Which is different from the address_space
472 * against which the buffers are listed. So for a particular address_space,
473 * mapping->private_lock does *not* protect mapping->private_list! In fact,
474 * mapping->private_list will always be protected by the backing blockdev's
477 * Which introduces a requirement: all buffers on an address_space's
478 * ->private_list must be from the same address_space: the blockdev's.
480 * address_spaces which do not place buffers at ->private_list via these
481 * utility functions are free to use private_lock and private_list for
482 * whatever they want. The only requirement is that list_empty(private_list)
483 * be true at clear_inode() time.
485 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
486 * filesystems should do that. invalidate_inode_buffers() should just go
487 * BUG_ON(!list_empty).
489 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
490 * take an address_space, not an inode. And it should be called
491 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
494 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
495 * list if it is already on a list. Because if the buffer is on a list,
496 * it *must* already be on the right one. If not, the filesystem is being
497 * silly. This will save a ton of locking. But first we have to ensure
498 * that buffers are taken *off* the old inode's list when they are freed
499 * (presumably in truncate). That requires careful auditing of all
500 * filesystems (do it inside bforget()). It could also be done by bringing
505 * The buffer's backing address_space's private_lock must be held
507 static void __remove_assoc_queue(struct buffer_head *bh)
509 list_del_init(&bh->b_assoc_buffers);
510 WARN_ON(!bh->b_assoc_map);
511 if (buffer_write_io_error(bh))
512 set_bit(AS_EIO, &bh->b_assoc_map->flags);
513 bh->b_assoc_map = NULL;
516 int inode_has_buffers(struct inode *inode)
518 return !list_empty(&inode->i_data.private_list);
522 * osync is designed to support O_SYNC io. It waits synchronously for
523 * all already-submitted IO to complete, but does not queue any new
524 * writes to the disk.
526 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
527 * you dirty the buffers, and then use osync_inode_buffers to wait for
528 * completion. Any other dirty buffers which are not yet queued for
529 * write will not be flushed to disk by the osync.
531 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
533 struct buffer_head *bh;
539 list_for_each_prev(p, list) {
541 if (buffer_locked(bh)) {
545 if (!buffer_uptodate(bh))
556 void do_thaw_all(struct work_struct *work)
558 struct super_block *sb;
559 char b[BDEVNAME_SIZE];
563 list_for_each_entry(sb, &super_blocks, s_list) {
565 spin_unlock(&sb_lock);
566 down_read(&sb->s_umount);
567 while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
568 printk(KERN_WARNING "Emergency Thaw on %s\n",
569 bdevname(sb->s_bdev, b));
570 up_read(&sb->s_umount);
572 if (__put_super_and_need_restart(sb))
575 spin_unlock(&sb_lock);
577 printk(KERN_WARNING "Emergency Thaw complete\n");
581 * emergency_thaw_all -- forcibly thaw every frozen filesystem
583 * Used for emergency unfreeze of all filesystems via SysRq
585 void emergency_thaw_all(void)
587 struct work_struct *work;
589 work = kmalloc(sizeof(*work), GFP_ATOMIC);
591 INIT_WORK(work, do_thaw_all);
597 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
598 * @mapping: the mapping which wants those buffers written
600 * Starts I/O against the buffers at mapping->private_list, and waits upon
603 * Basically, this is a convenience function for fsync().
604 * @mapping is a file or directory which needs those buffers to be written for
605 * a successful fsync().
607 int sync_mapping_buffers(struct address_space *mapping)
609 struct address_space *buffer_mapping = mapping->assoc_mapping;
611 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
614 return fsync_buffers_list(&buffer_mapping->private_lock,
615 &mapping->private_list);
617 EXPORT_SYMBOL(sync_mapping_buffers);
620 * Called when we've recently written block `bblock', and it is known that
621 * `bblock' was for a buffer_boundary() buffer. This means that the block at
622 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
623 * dirty, schedule it for IO. So that indirects merge nicely with their data.
625 void write_boundary_block(struct block_device *bdev,
626 sector_t bblock, unsigned blocksize)
628 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
630 if (buffer_dirty(bh))
631 ll_rw_block(WRITE, 1, &bh);
636 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
638 struct address_space *mapping = inode->i_mapping;
639 struct address_space *buffer_mapping = bh->b_page->mapping;
641 mark_buffer_dirty(bh);
642 if (!mapping->assoc_mapping) {
643 mapping->assoc_mapping = buffer_mapping;
645 BUG_ON(mapping->assoc_mapping != buffer_mapping);
647 if (!bh->b_assoc_map) {
648 spin_lock(&buffer_mapping->private_lock);
649 list_move_tail(&bh->b_assoc_buffers,
650 &mapping->private_list);
651 bh->b_assoc_map = mapping;
652 spin_unlock(&buffer_mapping->private_lock);
655 EXPORT_SYMBOL(mark_buffer_dirty_inode);
658 * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
661 * If warn is true, then emit a warning if the page is not uptodate and has
662 * not been truncated.
664 static void __set_page_dirty(struct page *page,
665 struct address_space *mapping, int warn)
667 spin_lock_irq(&mapping->tree_lock);
668 if (page->mapping) { /* Race with truncate? */
669 WARN_ON_ONCE(warn && !PageUptodate(page));
670 account_page_dirtied(page, mapping);
671 radix_tree_tag_set(&mapping->page_tree,
672 page_index(page), PAGECACHE_TAG_DIRTY);
674 spin_unlock_irq(&mapping->tree_lock);
675 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
679 * Add a page to the dirty page list.
681 * It is a sad fact of life that this function is called from several places
682 * deeply under spinlocking. It may not sleep.
684 * If the page has buffers, the uptodate buffers are set dirty, to preserve
685 * dirty-state coherency between the page and the buffers. It the page does
686 * not have buffers then when they are later attached they will all be set
689 * The buffers are dirtied before the page is dirtied. There's a small race
690 * window in which a writepage caller may see the page cleanness but not the
691 * buffer dirtiness. That's fine. If this code were to set the page dirty
692 * before the buffers, a concurrent writepage caller could clear the page dirty
693 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
694 * page on the dirty page list.
696 * We use private_lock to lock against try_to_free_buffers while using the
697 * page's buffer list. Also use this to protect against clean buffers being
698 * added to the page after it was set dirty.
700 * FIXME: may need to call ->reservepage here as well. That's rather up to the
701 * address_space though.
703 int __set_page_dirty_buffers(struct page *page)
706 struct address_space *mapping = page_mapping(page);
708 if (unlikely(!mapping))
709 return !TestSetPageDirty(page);
711 spin_lock(&mapping->private_lock);
712 if (page_has_buffers(page)) {
713 struct buffer_head *head = page_buffers(page);
714 struct buffer_head *bh = head;
717 set_buffer_dirty(bh);
718 bh = bh->b_this_page;
719 } while (bh != head);
721 newly_dirty = !TestSetPageDirty(page);
722 spin_unlock(&mapping->private_lock);
725 __set_page_dirty(page, mapping, 1);
728 EXPORT_SYMBOL(__set_page_dirty_buffers);
731 * Write out and wait upon a list of buffers.
733 * We have conflicting pressures: we want to make sure that all
734 * initially dirty buffers get waited on, but that any subsequently
735 * dirtied buffers don't. After all, we don't want fsync to last
736 * forever if somebody is actively writing to the file.
738 * Do this in two main stages: first we copy dirty buffers to a
739 * temporary inode list, queueing the writes as we go. Then we clean
740 * up, waiting for those writes to complete.
742 * During this second stage, any subsequent updates to the file may end
743 * up refiling the buffer on the original inode's dirty list again, so
744 * there is a chance we will end up with a buffer queued for write but
745 * not yet completed on that list. So, as a final cleanup we go through
746 * the osync code to catch these locked, dirty buffers without requeuing
747 * any newly dirty buffers for write.
749 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
751 struct buffer_head *bh;
752 struct list_head tmp;
753 struct address_space *mapping, *prev_mapping = NULL;
756 INIT_LIST_HEAD(&tmp);
759 while (!list_empty(list)) {
760 bh = BH_ENTRY(list->next);
761 mapping = bh->b_assoc_map;
762 __remove_assoc_queue(bh);
763 /* Avoid race with mark_buffer_dirty_inode() which does
764 * a lockless check and we rely on seeing the dirty bit */
766 if (buffer_dirty(bh) || buffer_locked(bh)) {
767 list_add(&bh->b_assoc_buffers, &tmp);
768 bh->b_assoc_map = mapping;
769 if (buffer_dirty(bh)) {
773 * Ensure any pending I/O completes so that
774 * ll_rw_block() actually writes the current
775 * contents - it is a noop if I/O is still in
776 * flight on potentially older contents.
778 ll_rw_block(SWRITE_SYNC_PLUG, 1, &bh);
781 * Kick off IO for the previous mapping. Note
782 * that we will not run the very last mapping,
783 * wait_on_buffer() will do that for us
784 * through sync_buffer().
786 if (prev_mapping && prev_mapping != mapping)
787 blk_run_address_space(prev_mapping);
788 prev_mapping = mapping;
796 while (!list_empty(&tmp)) {
797 bh = BH_ENTRY(tmp.prev);
799 mapping = bh->b_assoc_map;
800 __remove_assoc_queue(bh);
801 /* Avoid race with mark_buffer_dirty_inode() which does
802 * a lockless check and we rely on seeing the dirty bit */
804 if (buffer_dirty(bh)) {
805 list_add(&bh->b_assoc_buffers,
806 &mapping->private_list);
807 bh->b_assoc_map = mapping;
811 if (!buffer_uptodate(bh))
818 err2 = osync_buffers_list(lock, list);
826 * Invalidate any and all dirty buffers on a given inode. We are
827 * probably unmounting the fs, but that doesn't mean we have already
828 * done a sync(). Just drop the buffers from the inode list.
830 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
831 * assumes that all the buffers are against the blockdev. Not true
834 void invalidate_inode_buffers(struct inode *inode)
836 if (inode_has_buffers(inode)) {
837 struct address_space *mapping = &inode->i_data;
838 struct list_head *list = &mapping->private_list;
839 struct address_space *buffer_mapping = mapping->assoc_mapping;
841 spin_lock(&buffer_mapping->private_lock);
842 while (!list_empty(list))
843 __remove_assoc_queue(BH_ENTRY(list->next));
844 spin_unlock(&buffer_mapping->private_lock);
847 EXPORT_SYMBOL(invalidate_inode_buffers);
850 * Remove any clean buffers from the inode's buffer list. This is called
851 * when we're trying to free the inode itself. Those buffers can pin it.
853 * Returns true if all buffers were removed.
855 int remove_inode_buffers(struct inode *inode)
859 if (inode_has_buffers(inode)) {
860 struct address_space *mapping = &inode->i_data;
861 struct list_head *list = &mapping->private_list;
862 struct address_space *buffer_mapping = mapping->assoc_mapping;
864 spin_lock(&buffer_mapping->private_lock);
865 while (!list_empty(list)) {
866 struct buffer_head *bh = BH_ENTRY(list->next);
867 if (buffer_dirty(bh)) {
871 __remove_assoc_queue(bh);
873 spin_unlock(&buffer_mapping->private_lock);
879 * Create the appropriate buffers when given a page for data area and
880 * the size of each buffer.. Use the bh->b_this_page linked list to
881 * follow the buffers created. Return NULL if unable to create more
884 * The retry flag is used to differentiate async IO (paging, swapping)
885 * which may not fail from ordinary buffer allocations.
887 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
890 struct buffer_head *bh, *head;
896 while ((offset -= size) >= 0) {
897 bh = alloc_buffer_head(GFP_NOFS);
902 bh->b_this_page = head;
907 atomic_set(&bh->b_count, 0);
908 bh->b_private = NULL;
911 /* Link the buffer to its page */
912 set_bh_page(bh, page, offset);
914 init_buffer(bh, NULL, NULL);
918 * In case anything failed, we just free everything we got.
924 head = head->b_this_page;
925 free_buffer_head(bh);
930 * Return failure for non-async IO requests. Async IO requests
931 * are not allowed to fail, so we have to wait until buffer heads
932 * become available. But we don't want tasks sleeping with
933 * partially complete buffers, so all were released above.
938 /* We're _really_ low on memory. Now we just
939 * wait for old buffer heads to become free due to
940 * finishing IO. Since this is an async request and
941 * the reserve list is empty, we're sure there are
942 * async buffer heads in use.
947 EXPORT_SYMBOL_GPL(alloc_page_buffers);
950 link_dev_buffers(struct page *page, struct buffer_head *head)
952 struct buffer_head *bh, *tail;
957 bh = bh->b_this_page;
959 tail->b_this_page = head;
960 attach_page_buffers(page, head);
964 * Initialise the state of a blockdev page's buffers.
967 init_page_buffers(struct page *page, struct block_device *bdev,
968 sector_t block, int size)
970 struct buffer_head *head = page_buffers(page);
971 struct buffer_head *bh = head;
972 int uptodate = PageUptodate(page);
975 if (!buffer_mapped(bh)) {
976 init_buffer(bh, NULL, NULL);
978 bh->b_blocknr = block;
980 set_buffer_uptodate(bh);
981 set_buffer_mapped(bh);
984 bh = bh->b_this_page;
985 } while (bh != head);
989 * Create the page-cache page that contains the requested block.
991 * This is user purely for blockdev mappings.
994 grow_dev_page(struct block_device *bdev, sector_t block,
995 pgoff_t index, int size)
997 struct inode *inode = bdev->bd_inode;
999 struct buffer_head *bh;
1001 page = find_or_create_page(inode->i_mapping, index,
1002 (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
1006 BUG_ON(!PageLocked(page));
1008 if (page_has_buffers(page)) {
1009 bh = page_buffers(page);
1010 if (bh->b_size == size) {
1011 init_page_buffers(page, bdev, block, size);
1014 if (!try_to_free_buffers(page))
1019 * Allocate some buffers for this page
1021 bh = alloc_page_buffers(page, size, 0);
1026 * Link the page to the buffers and initialise them. Take the
1027 * lock to be atomic wrt __find_get_block(), which does not
1028 * run under the page lock.
1030 spin_lock(&inode->i_mapping->private_lock);
1031 link_dev_buffers(page, bh);
1032 init_page_buffers(page, bdev, block, size);
1033 spin_unlock(&inode->i_mapping->private_lock);
1039 page_cache_release(page);
1044 * Create buffers for the specified block device block's page. If
1045 * that page was dirty, the buffers are set dirty also.
1048 grow_buffers(struct block_device *bdev, sector_t block, int size)
1057 } while ((size << sizebits) < PAGE_SIZE);
1059 index = block >> sizebits;
1062 * Check for a block which wants to lie outside our maximum possible
1063 * pagecache index. (this comparison is done using sector_t types).
1065 if (unlikely(index != block >> sizebits)) {
1066 char b[BDEVNAME_SIZE];
1068 printk(KERN_ERR "%s: requested out-of-range block %llu for "
1070 __func__, (unsigned long long)block,
1074 block = index << sizebits;
1075 /* Create a page with the proper size buffers.. */
1076 page = grow_dev_page(bdev, block, index, size);
1080 page_cache_release(page);
1084 static struct buffer_head *
1085 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1087 /* Size must be multiple of hard sectorsize */
1088 if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
1089 (size < 512 || size > PAGE_SIZE))) {
1090 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1092 printk(KERN_ERR "hardsect size: %d\n",
1093 bdev_hardsect_size(bdev));
1100 struct buffer_head * bh;
1103 bh = __find_get_block(bdev, block, size);
1107 ret = grow_buffers(bdev, block, size);
1116 * The relationship between dirty buffers and dirty pages:
1118 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1119 * the page is tagged dirty in its radix tree.
1121 * At all times, the dirtiness of the buffers represents the dirtiness of
1122 * subsections of the page. If the page has buffers, the page dirty bit is
1123 * merely a hint about the true dirty state.
1125 * When a page is set dirty in its entirety, all its buffers are marked dirty
1126 * (if the page has buffers).
1128 * When a buffer is marked dirty, its page is dirtied, but the page's other
1131 * Also. When blockdev buffers are explicitly read with bread(), they
1132 * individually become uptodate. But their backing page remains not
1133 * uptodate - even if all of its buffers are uptodate. A subsequent
1134 * block_read_full_page() against that page will discover all the uptodate
1135 * buffers, will set the page uptodate and will perform no I/O.
1139 * mark_buffer_dirty - mark a buffer_head as needing writeout
1140 * @bh: the buffer_head to mark dirty
1142 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1143 * backing page dirty, then tag the page as dirty in its address_space's radix
1144 * tree and then attach the address_space's inode to its superblock's dirty
1147 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1148 * mapping->tree_lock and the global inode_lock.
1150 void mark_buffer_dirty(struct buffer_head *bh)
1152 WARN_ON_ONCE(!buffer_uptodate(bh));
1155 * Very *carefully* optimize the it-is-already-dirty case.
1157 * Don't let the final "is it dirty" escape to before we
1158 * perhaps modified the buffer.
1160 if (buffer_dirty(bh)) {
1162 if (buffer_dirty(bh))
1166 if (!test_set_buffer_dirty(bh)) {
1167 struct page *page = bh->b_page;
1168 if (!TestSetPageDirty(page))
1169 __set_page_dirty(page, page_mapping(page), 0);
1174 * Decrement a buffer_head's reference count. If all buffers against a page
1175 * have zero reference count, are clean and unlocked, and if the page is clean
1176 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1177 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1178 * a page but it ends up not being freed, and buffers may later be reattached).
1180 void __brelse(struct buffer_head * buf)
1182 if (atomic_read(&buf->b_count)) {
1186 WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1190 * bforget() is like brelse(), except it discards any
1191 * potentially dirty data.
1193 void __bforget(struct buffer_head *bh)
1195 clear_buffer_dirty(bh);
1196 if (bh->b_assoc_map) {
1197 struct address_space *buffer_mapping = bh->b_page->mapping;
1199 spin_lock(&buffer_mapping->private_lock);
1200 list_del_init(&bh->b_assoc_buffers);
1201 bh->b_assoc_map = NULL;
1202 spin_unlock(&buffer_mapping->private_lock);
1207 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1210 if (buffer_uptodate(bh)) {
1215 bh->b_end_io = end_buffer_read_sync;
1216 submit_bh(READ, bh);
1218 if (buffer_uptodate(bh))
1226 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1227 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1228 * refcount elevated by one when they're in an LRU. A buffer can only appear
1229 * once in a particular CPU's LRU. A single buffer can be present in multiple
1230 * CPU's LRUs at the same time.
1232 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1233 * sb_find_get_block().
1235 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1236 * a local interrupt disable for that.
1239 #define BH_LRU_SIZE 8
1242 struct buffer_head *bhs[BH_LRU_SIZE];
1245 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1248 #define bh_lru_lock() local_irq_disable()
1249 #define bh_lru_unlock() local_irq_enable()
1251 #define bh_lru_lock() preempt_disable()
1252 #define bh_lru_unlock() preempt_enable()
1255 static inline void check_irqs_on(void)
1257 #ifdef irqs_disabled
1258 BUG_ON(irqs_disabled());
1263 * The LRU management algorithm is dopey-but-simple. Sorry.
1265 static void bh_lru_install(struct buffer_head *bh)
1267 struct buffer_head *evictee = NULL;
1272 lru = &__get_cpu_var(bh_lrus);
1273 if (lru->bhs[0] != bh) {
1274 struct buffer_head *bhs[BH_LRU_SIZE];
1280 for (in = 0; in < BH_LRU_SIZE; in++) {
1281 struct buffer_head *bh2 = lru->bhs[in];
1286 if (out >= BH_LRU_SIZE) {
1287 BUG_ON(evictee != NULL);
1294 while (out < BH_LRU_SIZE)
1296 memcpy(lru->bhs, bhs, sizeof(bhs));
1305 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1307 static struct buffer_head *
1308 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1310 struct buffer_head *ret = NULL;
1316 lru = &__get_cpu_var(bh_lrus);
1317 for (i = 0; i < BH_LRU_SIZE; i++) {
1318 struct buffer_head *bh = lru->bhs[i];
1320 if (bh && bh->b_bdev == bdev &&
1321 bh->b_blocknr == block && bh->b_size == size) {
1324 lru->bhs[i] = lru->bhs[i - 1];
1339 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1340 * it in the LRU and mark it as accessed. If it is not present then return
1343 struct buffer_head *
1344 __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1346 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1349 bh = __find_get_block_slow(bdev, block);
1357 EXPORT_SYMBOL(__find_get_block);
1360 * __getblk will locate (and, if necessary, create) the buffer_head
1361 * which corresponds to the passed block_device, block and size. The
1362 * returned buffer has its reference count incremented.
1364 * __getblk() cannot fail - it just keeps trying. If you pass it an
1365 * illegal block number, __getblk() will happily return a buffer_head
1366 * which represents the non-existent block. Very weird.
1368 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1369 * attempt is failing. FIXME, perhaps?
1371 struct buffer_head *
1372 __getblk(struct block_device *bdev, sector_t block, unsigned size)
1374 struct buffer_head *bh = __find_get_block(bdev, block, size);
1378 bh = __getblk_slow(bdev, block, size);
1381 EXPORT_SYMBOL(__getblk);
1384 * Do async read-ahead on a buffer..
1386 void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1388 struct buffer_head *bh = __getblk(bdev, block, size);
1390 ll_rw_block(READA, 1, &bh);
1394 EXPORT_SYMBOL(__breadahead);
1397 * __bread() - reads a specified block and returns the bh
1398 * @bdev: the block_device to read from
1399 * @block: number of block
1400 * @size: size (in bytes) to read
1402 * Reads a specified block, and returns buffer head that contains it.
1403 * It returns NULL if the block was unreadable.
1405 struct buffer_head *
1406 __bread(struct block_device *bdev, sector_t block, unsigned size)
1408 struct buffer_head *bh = __getblk(bdev, block, size);
1410 if (likely(bh) && !buffer_uptodate(bh))
1411 bh = __bread_slow(bh);
1414 EXPORT_SYMBOL(__bread);
1417 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1418 * This doesn't race because it runs in each cpu either in irq
1419 * or with preempt disabled.
1421 static void invalidate_bh_lru(void *arg)
1423 struct bh_lru *b = &get_cpu_var(bh_lrus);
1426 for (i = 0; i < BH_LRU_SIZE; i++) {
1430 put_cpu_var(bh_lrus);
1433 void invalidate_bh_lrus(void)
1435 on_each_cpu(invalidate_bh_lru, NULL, 1);
1437 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1439 void set_bh_page(struct buffer_head *bh,
1440 struct page *page, unsigned long offset)
1443 BUG_ON(offset >= PAGE_SIZE);
1444 if (PageHighMem(page))
1446 * This catches illegal uses and preserves the offset:
1448 bh->b_data = (char *)(0 + offset);
1450 bh->b_data = page_address(page) + offset;
1452 EXPORT_SYMBOL(set_bh_page);
1455 * Called when truncating a buffer on a page completely.
1457 static void discard_buffer(struct buffer_head * bh)
1460 clear_buffer_dirty(bh);
1462 clear_buffer_mapped(bh);
1463 clear_buffer_req(bh);
1464 clear_buffer_new(bh);
1465 clear_buffer_delay(bh);
1466 clear_buffer_unwritten(bh);
1471 * block_invalidatepage - invalidate part of all of a buffer-backed page
1473 * @page: the page which is affected
1474 * @offset: the index of the truncation point
1476 * block_invalidatepage() is called when all or part of the page has become
1477 * invalidatedby a truncate operation.
1479 * block_invalidatepage() does not have to release all buffers, but it must
1480 * ensure that no dirty buffer is left outside @offset and that no I/O
1481 * is underway against any of the blocks which are outside the truncation
1482 * point. Because the caller is about to free (and possibly reuse) those
1485 void block_invalidatepage(struct page *page, unsigned long offset)
1487 struct buffer_head *head, *bh, *next;
1488 unsigned int curr_off = 0;
1490 BUG_ON(!PageLocked(page));
1491 if (!page_has_buffers(page))
1494 head = page_buffers(page);
1497 unsigned int next_off = curr_off + bh->b_size;
1498 next = bh->b_this_page;
1501 * is this block fully invalidated?
1503 if (offset <= curr_off)
1505 curr_off = next_off;
1507 } while (bh != head);
1510 * We release buffers only if the entire page is being invalidated.
1511 * The get_block cached value has been unconditionally invalidated,
1512 * so real IO is not possible anymore.
1515 try_to_release_page(page, 0);
1519 EXPORT_SYMBOL(block_invalidatepage);
1522 * We attach and possibly dirty the buffers atomically wrt
1523 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1524 * is already excluded via the page lock.
1526 void create_empty_buffers(struct page *page,
1527 unsigned long blocksize, unsigned long b_state)
1529 struct buffer_head *bh, *head, *tail;
1531 head = alloc_page_buffers(page, blocksize, 1);
1534 bh->b_state |= b_state;
1536 bh = bh->b_this_page;
1538 tail->b_this_page = head;
1540 spin_lock(&page->mapping->private_lock);
1541 if (PageUptodate(page) || PageDirty(page)) {
1544 if (PageDirty(page))
1545 set_buffer_dirty(bh);
1546 if (PageUptodate(page))
1547 set_buffer_uptodate(bh);
1548 bh = bh->b_this_page;
1549 } while (bh != head);
1551 attach_page_buffers(page, head);
1552 spin_unlock(&page->mapping->private_lock);
1554 EXPORT_SYMBOL(create_empty_buffers);
1557 * We are taking a block for data and we don't want any output from any
1558 * buffer-cache aliases starting from return from that function and
1559 * until the moment when something will explicitly mark the buffer
1560 * dirty (hopefully that will not happen until we will free that block ;-)
1561 * We don't even need to mark it not-uptodate - nobody can expect
1562 * anything from a newly allocated buffer anyway. We used to used
1563 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1564 * don't want to mark the alias unmapped, for example - it would confuse
1565 * anyone who might pick it with bread() afterwards...
1567 * Also.. Note that bforget() doesn't lock the buffer. So there can
1568 * be writeout I/O going on against recently-freed buffers. We don't
1569 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1570 * only if we really need to. That happens here.
1572 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1574 struct buffer_head *old_bh;
1578 old_bh = __find_get_block_slow(bdev, block);
1580 clear_buffer_dirty(old_bh);
1581 wait_on_buffer(old_bh);
1582 clear_buffer_req(old_bh);
1586 EXPORT_SYMBOL(unmap_underlying_metadata);
1589 * NOTE! All mapped/uptodate combinations are valid:
1591 * Mapped Uptodate Meaning
1593 * No No "unknown" - must do get_block()
1594 * No Yes "hole" - zero-filled
1595 * Yes No "allocated" - allocated on disk, not read in
1596 * Yes Yes "valid" - allocated and up-to-date in memory.
1598 * "Dirty" is valid only with the last case (mapped+uptodate).
1602 * While block_write_full_page is writing back the dirty buffers under
1603 * the page lock, whoever dirtied the buffers may decide to clean them
1604 * again at any time. We handle that by only looking at the buffer
1605 * state inside lock_buffer().
1607 * If block_write_full_page() is called for regular writeback
1608 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1609 * locked buffer. This only can happen if someone has written the buffer
1610 * directly, with submit_bh(). At the address_space level PageWriteback
1611 * prevents this contention from occurring.
1613 * If block_write_full_page() is called with wbc->sync_mode ==
1614 * WB_SYNC_ALL, the writes are posted using WRITE_SYNC_PLUG; this
1615 * causes the writes to be flagged as synchronous writes, but the
1616 * block device queue will NOT be unplugged, since usually many pages
1617 * will be pushed to the out before the higher-level caller actually
1618 * waits for the writes to be completed. The various wait functions,
1619 * such as wait_on_writeback_range() will ultimately call sync_page()
1620 * which will ultimately call blk_run_backing_dev(), which will end up
1621 * unplugging the device queue.
1623 static int __block_write_full_page(struct inode *inode, struct page *page,
1624 get_block_t *get_block, struct writeback_control *wbc,
1625 bh_end_io_t *handler)
1629 sector_t last_block;
1630 struct buffer_head *bh, *head;
1631 const unsigned blocksize = 1 << inode->i_blkbits;
1632 int nr_underway = 0;
1633 int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
1634 WRITE_SYNC_PLUG : WRITE);
1636 BUG_ON(!PageLocked(page));
1638 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1640 if (!page_has_buffers(page)) {
1641 create_empty_buffers(page, blocksize,
1642 (1 << BH_Dirty)|(1 << BH_Uptodate));
1646 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1647 * here, and the (potentially unmapped) buffers may become dirty at
1648 * any time. If a buffer becomes dirty here after we've inspected it
1649 * then we just miss that fact, and the page stays dirty.
1651 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1652 * handle that here by just cleaning them.
1655 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1656 head = page_buffers(page);
1660 * Get all the dirty buffers mapped to disk addresses and
1661 * handle any aliases from the underlying blockdev's mapping.
1664 if (block > last_block) {
1666 * mapped buffers outside i_size will occur, because
1667 * this page can be outside i_size when there is a
1668 * truncate in progress.
1671 * The buffer was zeroed by block_write_full_page()
1673 clear_buffer_dirty(bh);
1674 set_buffer_uptodate(bh);
1675 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1677 WARN_ON(bh->b_size != blocksize);
1678 err = get_block(inode, block, bh, 1);
1681 clear_buffer_delay(bh);
1682 if (buffer_new(bh)) {
1683 /* blockdev mappings never come here */
1684 clear_buffer_new(bh);
1685 unmap_underlying_metadata(bh->b_bdev,
1689 bh = bh->b_this_page;
1691 } while (bh != head);
1694 if (!buffer_mapped(bh))
1697 * If it's a fully non-blocking write attempt and we cannot
1698 * lock the buffer then redirty the page. Note that this can
1699 * potentially cause a busy-wait loop from pdflush and kswapd
1700 * activity, but those code paths have their own higher-level
1703 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1705 } else if (!trylock_buffer(bh)) {
1706 redirty_page_for_writepage(wbc, page);
1709 if (test_clear_buffer_dirty(bh)) {
1710 mark_buffer_async_write_endio(bh, handler);
1714 } while ((bh = bh->b_this_page) != head);
1717 * The page and its buffers are protected by PageWriteback(), so we can
1718 * drop the bh refcounts early.
1720 BUG_ON(PageWriteback(page));
1721 set_page_writeback(page);
1724 struct buffer_head *next = bh->b_this_page;
1725 if (buffer_async_write(bh)) {
1726 submit_bh(write_op, bh);
1730 } while (bh != head);
1735 if (nr_underway == 0) {
1737 * The page was marked dirty, but the buffers were
1738 * clean. Someone wrote them back by hand with
1739 * ll_rw_block/submit_bh. A rare case.
1741 end_page_writeback(page);
1744 * The page and buffer_heads can be released at any time from
1752 * ENOSPC, or some other error. We may already have added some
1753 * blocks to the file, so we need to write these out to avoid
1754 * exposing stale data.
1755 * The page is currently locked and not marked for writeback
1758 /* Recovery: lock and submit the mapped buffers */
1760 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1761 !buffer_delay(bh)) {
1763 mark_buffer_async_write_endio(bh, handler);
1766 * The buffer may have been set dirty during
1767 * attachment to a dirty page.
1769 clear_buffer_dirty(bh);
1771 } while ((bh = bh->b_this_page) != head);
1773 BUG_ON(PageWriteback(page));
1774 mapping_set_error(page->mapping, err);
1775 set_page_writeback(page);
1777 struct buffer_head *next = bh->b_this_page;
1778 if (buffer_async_write(bh)) {
1779 clear_buffer_dirty(bh);
1780 submit_bh(write_op, bh);
1784 } while (bh != head);
1790 * If a page has any new buffers, zero them out here, and mark them uptodate
1791 * and dirty so they'll be written out (in order to prevent uninitialised
1792 * block data from leaking). And clear the new bit.
1794 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1796 unsigned int block_start, block_end;
1797 struct buffer_head *head, *bh;
1799 BUG_ON(!PageLocked(page));
1800 if (!page_has_buffers(page))
1803 bh = head = page_buffers(page);
1806 block_end = block_start + bh->b_size;
1808 if (buffer_new(bh)) {
1809 if (block_end > from && block_start < to) {
1810 if (!PageUptodate(page)) {
1811 unsigned start, size;
1813 start = max(from, block_start);
1814 size = min(to, block_end) - start;
1816 zero_user(page, start, size);
1817 set_buffer_uptodate(bh);
1820 clear_buffer_new(bh);
1821 mark_buffer_dirty(bh);
1825 block_start = block_end;
1826 bh = bh->b_this_page;
1827 } while (bh != head);
1829 EXPORT_SYMBOL(page_zero_new_buffers);
1831 static int __block_prepare_write(struct inode *inode, struct page *page,
1832 unsigned from, unsigned to, get_block_t *get_block)
1834 unsigned block_start, block_end;
1837 unsigned blocksize, bbits;
1838 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1840 BUG_ON(!PageLocked(page));
1841 BUG_ON(from > PAGE_CACHE_SIZE);
1842 BUG_ON(to > PAGE_CACHE_SIZE);
1845 blocksize = 1 << inode->i_blkbits;
1846 if (!page_has_buffers(page))
1847 create_empty_buffers(page, blocksize, 0);
1848 head = page_buffers(page);
1850 bbits = inode->i_blkbits;
1851 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1853 for(bh = head, block_start = 0; bh != head || !block_start;
1854 block++, block_start=block_end, bh = bh->b_this_page) {
1855 block_end = block_start + blocksize;
1856 if (block_end <= from || block_start >= to) {
1857 if (PageUptodate(page)) {
1858 if (!buffer_uptodate(bh))
1859 set_buffer_uptodate(bh);
1864 clear_buffer_new(bh);
1865 if (!buffer_mapped(bh)) {
1866 WARN_ON(bh->b_size != blocksize);
1867 err = get_block(inode, block, bh, 1);
1870 if (buffer_new(bh)) {
1871 unmap_underlying_metadata(bh->b_bdev,
1873 if (PageUptodate(page)) {
1874 clear_buffer_new(bh);
1875 set_buffer_uptodate(bh);
1876 mark_buffer_dirty(bh);
1879 if (block_end > to || block_start < from)
1880 zero_user_segments(page,
1886 if (PageUptodate(page)) {
1887 if (!buffer_uptodate(bh))
1888 set_buffer_uptodate(bh);
1891 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1892 !buffer_unwritten(bh) &&
1893 (block_start < from || block_end > to)) {
1894 ll_rw_block(READ, 1, &bh);
1899 * If we issued read requests - let them complete.
1901 while(wait_bh > wait) {
1902 wait_on_buffer(*--wait_bh);
1903 if (!buffer_uptodate(*wait_bh))
1907 page_zero_new_buffers(page, from, to);
1911 static int __block_commit_write(struct inode *inode, struct page *page,
1912 unsigned from, unsigned to)
1914 unsigned block_start, block_end;
1917 struct buffer_head *bh, *head;
1919 blocksize = 1 << inode->i_blkbits;
1921 for(bh = head = page_buffers(page), block_start = 0;
1922 bh != head || !block_start;
1923 block_start=block_end, bh = bh->b_this_page) {
1924 block_end = block_start + blocksize;
1925 if (block_end <= from || block_start >= to) {
1926 if (!buffer_uptodate(bh))
1929 set_buffer_uptodate(bh);
1930 mark_buffer_dirty(bh);
1932 clear_buffer_new(bh);
1936 * If this is a partial write which happened to make all buffers
1937 * uptodate then we can optimize away a bogus readpage() for
1938 * the next read(). Here we 'discover' whether the page went
1939 * uptodate as a result of this (potentially partial) write.
1942 SetPageUptodate(page);
1947 * block_write_begin takes care of the basic task of block allocation and
1948 * bringing partial write blocks uptodate first.
1950 * If *pagep is not NULL, then block_write_begin uses the locked page
1951 * at *pagep rather than allocating its own. In this case, the page will
1952 * not be unlocked or deallocated on failure.
1954 int block_write_begin(struct file *file, struct address_space *mapping,
1955 loff_t pos, unsigned len, unsigned flags,
1956 struct page **pagep, void **fsdata,
1957 get_block_t *get_block)
1959 struct inode *inode = mapping->host;
1963 unsigned start, end;
1966 index = pos >> PAGE_CACHE_SHIFT;
1967 start = pos & (PAGE_CACHE_SIZE - 1);
1973 page = grab_cache_page_write_begin(mapping, index, flags);
1980 BUG_ON(!PageLocked(page));
1982 status = __block_prepare_write(inode, page, start, end, get_block);
1983 if (unlikely(status)) {
1984 ClearPageUptodate(page);
1988 page_cache_release(page);
1992 * prepare_write() may have instantiated a few blocks
1993 * outside i_size. Trim these off again. Don't need
1994 * i_size_read because we hold i_mutex.
1996 if (pos + len > inode->i_size)
1997 vmtruncate(inode, inode->i_size);
2004 EXPORT_SYMBOL(block_write_begin);
2006 int block_write_end(struct file *file, struct address_space *mapping,
2007 loff_t pos, unsigned len, unsigned copied,
2008 struct page *page, void *fsdata)
2010 struct inode *inode = mapping->host;
2013 start = pos & (PAGE_CACHE_SIZE - 1);
2015 if (unlikely(copied < len)) {
2017 * The buffers that were written will now be uptodate, so we
2018 * don't have to worry about a readpage reading them and
2019 * overwriting a partial write. However if we have encountered
2020 * a short write and only partially written into a buffer, it
2021 * will not be marked uptodate, so a readpage might come in and
2022 * destroy our partial write.
2024 * Do the simplest thing, and just treat any short write to a
2025 * non uptodate page as a zero-length write, and force the
2026 * caller to redo the whole thing.
2028 if (!PageUptodate(page))
2031 page_zero_new_buffers(page, start+copied, start+len);
2033 flush_dcache_page(page);
2035 /* This could be a short (even 0-length) commit */
2036 __block_commit_write(inode, page, start, start+copied);
2040 EXPORT_SYMBOL(block_write_end);
2042 int generic_write_end(struct file *file, struct address_space *mapping,
2043 loff_t pos, unsigned len, unsigned copied,
2044 struct page *page, void *fsdata)
2046 struct inode *inode = mapping->host;
2047 int i_size_changed = 0;
2049 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2052 * No need to use i_size_read() here, the i_size
2053 * cannot change under us because we hold i_mutex.
2055 * But it's important to update i_size while still holding page lock:
2056 * page writeout could otherwise come in and zero beyond i_size.
2058 if (pos+copied > inode->i_size) {
2059 i_size_write(inode, pos+copied);
2064 page_cache_release(page);
2067 * Don't mark the inode dirty under page lock. First, it unnecessarily
2068 * makes the holding time of page lock longer. Second, it forces lock
2069 * ordering of page lock and transaction start for journaling
2073 mark_inode_dirty(inode);
2077 EXPORT_SYMBOL(generic_write_end);
2080 * block_is_partially_uptodate checks whether buffers within a page are
2083 * Returns true if all buffers which correspond to a file portion
2084 * we want to read are uptodate.
2086 int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
2089 struct inode *inode = page->mapping->host;
2090 unsigned block_start, block_end, blocksize;
2092 struct buffer_head *bh, *head;
2095 if (!page_has_buffers(page))
2098 blocksize = 1 << inode->i_blkbits;
2099 to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
2101 if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
2104 head = page_buffers(page);
2108 block_end = block_start + blocksize;
2109 if (block_end > from && block_start < to) {
2110 if (!buffer_uptodate(bh)) {
2114 if (block_end >= to)
2117 block_start = block_end;
2118 bh = bh->b_this_page;
2119 } while (bh != head);
2123 EXPORT_SYMBOL(block_is_partially_uptodate);
2126 * Generic "read page" function for block devices that have the normal
2127 * get_block functionality. This is most of the block device filesystems.
2128 * Reads the page asynchronously --- the unlock_buffer() and
2129 * set/clear_buffer_uptodate() functions propagate buffer state into the
2130 * page struct once IO has completed.
2132 int block_read_full_page(struct page *page, get_block_t *get_block)
2134 struct inode *inode = page->mapping->host;
2135 sector_t iblock, lblock;
2136 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2137 unsigned int blocksize;
2139 int fully_mapped = 1;
2141 BUG_ON(!PageLocked(page));
2142 blocksize = 1 << inode->i_blkbits;
2143 if (!page_has_buffers(page))
2144 create_empty_buffers(page, blocksize, 0);
2145 head = page_buffers(page);
2147 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2148 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2154 if (buffer_uptodate(bh))
2157 if (!buffer_mapped(bh)) {
2161 if (iblock < lblock) {
2162 WARN_ON(bh->b_size != blocksize);
2163 err = get_block(inode, iblock, bh, 0);
2167 if (!buffer_mapped(bh)) {
2168 zero_user(page, i * blocksize, blocksize);
2170 set_buffer_uptodate(bh);
2174 * get_block() might have updated the buffer
2177 if (buffer_uptodate(bh))
2181 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2184 SetPageMappedToDisk(page);
2188 * All buffers are uptodate - we can set the page uptodate
2189 * as well. But not if get_block() returned an error.
2191 if (!PageError(page))
2192 SetPageUptodate(page);
2197 /* Stage two: lock the buffers */
2198 for (i = 0; i < nr; i++) {
2201 mark_buffer_async_read(bh);
2205 * Stage 3: start the IO. Check for uptodateness
2206 * inside the buffer lock in case another process reading
2207 * the underlying blockdev brought it uptodate (the sct fix).
2209 for (i = 0; i < nr; i++) {
2211 if (buffer_uptodate(bh))
2212 end_buffer_async_read(bh, 1);
2214 submit_bh(READ, bh);
2219 /* utility function for filesystems that need to do work on expanding
2220 * truncates. Uses filesystem pagecache writes to allow the filesystem to
2221 * deal with the hole.
2223 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2225 struct address_space *mapping = inode->i_mapping;
2228 unsigned long limit;
2232 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
2233 if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2234 send_sig(SIGXFSZ, current, 0);
2237 if (size > inode->i_sb->s_maxbytes)
2240 err = pagecache_write_begin(NULL, mapping, size, 0,
2241 AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2246 err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2253 static int cont_expand_zero(struct file *file, struct address_space *mapping,
2254 loff_t pos, loff_t *bytes)
2256 struct inode *inode = mapping->host;
2257 unsigned blocksize = 1 << inode->i_blkbits;
2260 pgoff_t index, curidx;
2262 unsigned zerofrom, offset, len;
2265 index = pos >> PAGE_CACHE_SHIFT;
2266 offset = pos & ~PAGE_CACHE_MASK;
2268 while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2269 zerofrom = curpos & ~PAGE_CACHE_MASK;
2270 if (zerofrom & (blocksize-1)) {
2271 *bytes |= (blocksize-1);
2274 len = PAGE_CACHE_SIZE - zerofrom;
2276 err = pagecache_write_begin(file, mapping, curpos, len,
2277 AOP_FLAG_UNINTERRUPTIBLE,
2281 zero_user(page, zerofrom, len);
2282 err = pagecache_write_end(file, mapping, curpos, len, len,
2289 balance_dirty_pages_ratelimited(mapping);
2292 /* page covers the boundary, find the boundary offset */
2293 if (index == curidx) {
2294 zerofrom = curpos & ~PAGE_CACHE_MASK;
2295 /* if we will expand the thing last block will be filled */
2296 if (offset <= zerofrom) {
2299 if (zerofrom & (blocksize-1)) {
2300 *bytes |= (blocksize-1);
2303 len = offset - zerofrom;
2305 err = pagecache_write_begin(file, mapping, curpos, len,
2306 AOP_FLAG_UNINTERRUPTIBLE,
2310 zero_user(page, zerofrom, len);
2311 err = pagecache_write_end(file, mapping, curpos, len, len,
2323 * For moronic filesystems that do not allow holes in file.
2324 * We may have to extend the file.
2326 int cont_write_begin(struct file *file, struct address_space *mapping,
2327 loff_t pos, unsigned len, unsigned flags,
2328 struct page **pagep, void **fsdata,
2329 get_block_t *get_block, loff_t *bytes)
2331 struct inode *inode = mapping->host;
2332 unsigned blocksize = 1 << inode->i_blkbits;
2336 err = cont_expand_zero(file, mapping, pos, bytes);
2340 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2341 if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2342 *bytes |= (blocksize-1);
2347 err = block_write_begin(file, mapping, pos, len,
2348 flags, pagep, fsdata, get_block);
2353 int block_prepare_write(struct page *page, unsigned from, unsigned to,
2354 get_block_t *get_block)
2356 struct inode *inode = page->mapping->host;
2357 int err = __block_prepare_write(inode, page, from, to, get_block);
2359 ClearPageUptodate(page);
2363 int block_commit_write(struct page *page, unsigned from, unsigned to)
2365 struct inode *inode = page->mapping->host;
2366 __block_commit_write(inode,page,from,to);
2371 * block_page_mkwrite() is not allowed to change the file size as it gets
2372 * called from a page fault handler when a page is first dirtied. Hence we must
2373 * be careful to check for EOF conditions here. We set the page up correctly
2374 * for a written page which means we get ENOSPC checking when writing into
2375 * holes and correct delalloc and unwritten extent mapping on filesystems that
2376 * support these features.
2378 * We are not allowed to take the i_mutex here so we have to play games to
2379 * protect against truncate races as the page could now be beyond EOF. Because
2380 * vmtruncate() writes the inode size before removing pages, once we have the
2381 * page lock we can determine safely if the page is beyond EOF. If it is not
2382 * beyond EOF, then the page is guaranteed safe against truncation until we
2386 block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2387 get_block_t get_block)
2389 struct page *page = vmf->page;
2390 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2393 int ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
2396 size = i_size_read(inode);
2397 if ((page->mapping != inode->i_mapping) ||
2398 (page_offset(page) > size)) {
2399 /* page got truncated out from underneath us */
2403 /* page is wholly or partially inside EOF */
2404 if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2405 end = size & ~PAGE_CACHE_MASK;
2407 end = PAGE_CACHE_SIZE;
2409 ret = block_prepare_write(page, 0, end, get_block);
2411 ret = block_commit_write(page, 0, end);
2413 if (unlikely(ret)) {
2416 else /* -ENOSPC, -EIO, etc */
2417 ret = VM_FAULT_SIGBUS;
2426 * nobh_write_begin()'s prereads are special: the buffer_heads are freed
2427 * immediately, while under the page lock. So it needs a special end_io
2428 * handler which does not touch the bh after unlocking it.
2430 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2432 __end_buffer_read_notouch(bh, uptodate);
2436 * Attach the singly-linked list of buffers created by nobh_write_begin, to
2437 * the page (converting it to circular linked list and taking care of page
2440 static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2442 struct buffer_head *bh;
2444 BUG_ON(!PageLocked(page));
2446 spin_lock(&page->mapping->private_lock);
2449 if (PageDirty(page))
2450 set_buffer_dirty(bh);
2451 if (!bh->b_this_page)
2452 bh->b_this_page = head;
2453 bh = bh->b_this_page;
2454 } while (bh != head);
2455 attach_page_buffers(page, head);
2456 spin_unlock(&page->mapping->private_lock);
2460 * On entry, the page is fully not uptodate.
2461 * On exit the page is fully uptodate in the areas outside (from,to)
2463 int nobh_write_begin(struct file *file, struct address_space *mapping,
2464 loff_t pos, unsigned len, unsigned flags,
2465 struct page **pagep, void **fsdata,
2466 get_block_t *get_block)
2468 struct inode *inode = mapping->host;
2469 const unsigned blkbits = inode->i_blkbits;
2470 const unsigned blocksize = 1 << blkbits;
2471 struct buffer_head *head, *bh;
2475 unsigned block_in_page;
2476 unsigned block_start, block_end;
2477 sector_t block_in_file;
2480 int is_mapped_to_disk = 1;
2482 index = pos >> PAGE_CACHE_SHIFT;
2483 from = pos & (PAGE_CACHE_SIZE - 1);
2486 page = grab_cache_page_write_begin(mapping, index, flags);
2492 if (page_has_buffers(page)) {
2494 page_cache_release(page);
2496 return block_write_begin(file, mapping, pos, len, flags, pagep,
2500 if (PageMappedToDisk(page))
2504 * Allocate buffers so that we can keep track of state, and potentially
2505 * attach them to the page if an error occurs. In the common case of
2506 * no error, they will just be freed again without ever being attached
2507 * to the page (which is all OK, because we're under the page lock).
2509 * Be careful: the buffer linked list is a NULL terminated one, rather
2510 * than the circular one we're used to.
2512 head = alloc_page_buffers(page, blocksize, 0);
2518 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2521 * We loop across all blocks in the page, whether or not they are
2522 * part of the affected region. This is so we can discover if the
2523 * page is fully mapped-to-disk.
2525 for (block_start = 0, block_in_page = 0, bh = head;
2526 block_start < PAGE_CACHE_SIZE;
2527 block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
2530 block_end = block_start + blocksize;
2533 if (block_start >= to)
2535 ret = get_block(inode, block_in_file + block_in_page,
2539 if (!buffer_mapped(bh))
2540 is_mapped_to_disk = 0;
2542 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2543 if (PageUptodate(page)) {
2544 set_buffer_uptodate(bh);
2547 if (buffer_new(bh) || !buffer_mapped(bh)) {
2548 zero_user_segments(page, block_start, from,
2552 if (buffer_uptodate(bh))
2553 continue; /* reiserfs does this */
2554 if (block_start < from || block_end > to) {
2556 bh->b_end_io = end_buffer_read_nobh;
2557 submit_bh(READ, bh);
2564 * The page is locked, so these buffers are protected from
2565 * any VM or truncate activity. Hence we don't need to care
2566 * for the buffer_head refcounts.
2568 for (bh = head; bh; bh = bh->b_this_page) {
2570 if (!buffer_uptodate(bh))
2577 if (is_mapped_to_disk)
2578 SetPageMappedToDisk(page);
2580 *fsdata = head; /* to be released by nobh_write_end */
2587 * Error recovery is a bit difficult. We need to zero out blocks that
2588 * were newly allocated, and dirty them to ensure they get written out.
2589 * Buffers need to be attached to the page at this point, otherwise
2590 * the handling of potential IO errors during writeout would be hard
2591 * (could try doing synchronous writeout, but what if that fails too?)
2593 attach_nobh_buffers(page, head);
2594 page_zero_new_buffers(page, from, to);
2598 page_cache_release(page);
2601 if (pos + len > inode->i_size)
2602 vmtruncate(inode, inode->i_size);
2606 EXPORT_SYMBOL(nobh_write_begin);
2608 int nobh_write_end(struct file *file, struct address_space *mapping,
2609 loff_t pos, unsigned len, unsigned copied,
2610 struct page *page, void *fsdata)
2612 struct inode *inode = page->mapping->host;
2613 struct buffer_head *head = fsdata;
2614 struct buffer_head *bh;
2615 BUG_ON(fsdata != NULL && page_has_buffers(page));
2617 if (unlikely(copied < len) && head)
2618 attach_nobh_buffers(page, head);
2619 if (page_has_buffers(page))
2620 return generic_write_end(file, mapping, pos, len,
2621 copied, page, fsdata);
2623 SetPageUptodate(page);
2624 set_page_dirty(page);
2625 if (pos+copied > inode->i_size) {
2626 i_size_write(inode, pos+copied);
2627 mark_inode_dirty(inode);
2631 page_cache_release(page);
2635 head = head->b_this_page;
2636 free_buffer_head(bh);
2641 EXPORT_SYMBOL(nobh_write_end);
2644 * nobh_writepage() - based on block_full_write_page() except
2645 * that it tries to operate without attaching bufferheads to
2648 int nobh_writepage(struct page *page, get_block_t *get_block,
2649 struct writeback_control *wbc)
2651 struct inode * const inode = page->mapping->host;
2652 loff_t i_size = i_size_read(inode);
2653 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2657 /* Is the page fully inside i_size? */
2658 if (page->index < end_index)
2661 /* Is the page fully outside i_size? (truncate in progress) */
2662 offset = i_size & (PAGE_CACHE_SIZE-1);
2663 if (page->index >= end_index+1 || !offset) {
2665 * The page may have dirty, unmapped buffers. For example,
2666 * they may have been added in ext3_writepage(). Make them
2667 * freeable here, so the page does not leak.
2670 /* Not really sure about this - do we need this ? */
2671 if (page->mapping->a_ops->invalidatepage)
2672 page->mapping->a_ops->invalidatepage(page, offset);
2675 return 0; /* don't care */
2679 * The page straddles i_size. It must be zeroed out on each and every
2680 * writepage invocation because it may be mmapped. "A file is mapped
2681 * in multiples of the page size. For a file that is not a multiple of
2682 * the page size, the remaining memory is zeroed when mapped, and
2683 * writes to that region are not written out to the file."
2685 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2687 ret = mpage_writepage(page, get_block, wbc);
2689 ret = __block_write_full_page(inode, page, get_block, wbc,
2690 end_buffer_async_write);
2693 EXPORT_SYMBOL(nobh_writepage);
2695 int nobh_truncate_page(struct address_space *mapping,
2696 loff_t from, get_block_t *get_block)
2698 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2699 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2702 unsigned length, pos;
2703 struct inode *inode = mapping->host;
2705 struct buffer_head map_bh;
2708 blocksize = 1 << inode->i_blkbits;
2709 length = offset & (blocksize - 1);
2711 /* Block boundary? Nothing to do */
2715 length = blocksize - length;
2716 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2718 page = grab_cache_page(mapping, index);
2723 if (page_has_buffers(page)) {
2726 page_cache_release(page);
2727 return block_truncate_page(mapping, from, get_block);
2730 /* Find the buffer that contains "offset" */
2732 while (offset >= pos) {
2737 err = get_block(inode, iblock, &map_bh, 0);
2740 /* unmapped? It's a hole - nothing to do */
2741 if (!buffer_mapped(&map_bh))
2744 /* Ok, it's mapped. Make sure it's up-to-date */
2745 if (!PageUptodate(page)) {
2746 err = mapping->a_ops->readpage(NULL, page);
2748 page_cache_release(page);
2752 if (!PageUptodate(page)) {
2756 if (page_has_buffers(page))
2759 zero_user(page, offset, length);
2760 set_page_dirty(page);
2765 page_cache_release(page);
2769 EXPORT_SYMBOL(nobh_truncate_page);
2771 int block_truncate_page(struct address_space *mapping,
2772 loff_t from, get_block_t *get_block)
2774 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2775 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2778 unsigned length, pos;
2779 struct inode *inode = mapping->host;
2781 struct buffer_head *bh;
2784 blocksize = 1 << inode->i_blkbits;
2785 length = offset & (blocksize - 1);
2787 /* Block boundary? Nothing to do */
2791 length = blocksize - length;
2792 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2794 page = grab_cache_page(mapping, index);
2799 if (!page_has_buffers(page))
2800 create_empty_buffers(page, blocksize, 0);
2802 /* Find the buffer that contains "offset" */
2803 bh = page_buffers(page);
2805 while (offset >= pos) {
2806 bh = bh->b_this_page;
2812 if (!buffer_mapped(bh)) {
2813 WARN_ON(bh->b_size != blocksize);
2814 err = get_block(inode, iblock, bh, 0);
2817 /* unmapped? It's a hole - nothing to do */
2818 if (!buffer_mapped(bh))
2822 /* Ok, it's mapped. Make sure it's up-to-date */
2823 if (PageUptodate(page))
2824 set_buffer_uptodate(bh);
2826 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2828 ll_rw_block(READ, 1, &bh);
2830 /* Uhhuh. Read error. Complain and punt. */
2831 if (!buffer_uptodate(bh))
2835 zero_user(page, offset, length);
2836 mark_buffer_dirty(bh);
2841 page_cache_release(page);
2847 * The generic ->writepage function for buffer-backed address_spaces
2848 * this form passes in the end_io handler used to finish the IO.
2850 int block_write_full_page_endio(struct page *page, get_block_t *get_block,
2851 struct writeback_control *wbc, bh_end_io_t *handler)
2853 struct inode * const inode = page->mapping->host;
2854 loff_t i_size = i_size_read(inode);
2855 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2858 /* Is the page fully inside i_size? */
2859 if (page->index < end_index)
2860 return __block_write_full_page(inode, page, get_block, wbc,
2863 /* Is the page fully outside i_size? (truncate in progress) */
2864 offset = i_size & (PAGE_CACHE_SIZE-1);
2865 if (page->index >= end_index+1 || !offset) {
2867 * The page may have dirty, unmapped buffers. For example,
2868 * they may have been added in ext3_writepage(). Make them
2869 * freeable here, so the page does not leak.
2871 do_invalidatepage(page, 0);
2873 return 0; /* don't care */
2877 * The page straddles i_size. It must be zeroed out on each and every
2878 * writepage invokation because it may be mmapped. "A file is mapped
2879 * in multiples of the page size. For a file that is not a multiple of
2880 * the page size, the remaining memory is zeroed when mapped, and
2881 * writes to that region are not written out to the file."
2883 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2884 return __block_write_full_page(inode, page, get_block, wbc, handler);
2888 * The generic ->writepage function for buffer-backed address_spaces
2890 int block_write_full_page(struct page *page, get_block_t *get_block,
2891 struct writeback_control *wbc)
2893 return block_write_full_page_endio(page, get_block, wbc,
2894 end_buffer_async_write);
2898 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2899 get_block_t *get_block)
2901 struct buffer_head tmp;
2902 struct inode *inode = mapping->host;
2905 tmp.b_size = 1 << inode->i_blkbits;
2906 get_block(inode, block, &tmp, 0);
2907 return tmp.b_blocknr;
2910 static void end_bio_bh_io_sync(struct bio *bio, int err)
2912 struct buffer_head *bh = bio->bi_private;
2914 if (err == -EOPNOTSUPP) {
2915 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2916 set_bit(BH_Eopnotsupp, &bh->b_state);
2919 if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
2920 set_bit(BH_Quiet, &bh->b_state);
2922 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2926 int submit_bh(int rw, struct buffer_head * bh)
2931 BUG_ON(!buffer_locked(bh));
2932 BUG_ON(!buffer_mapped(bh));
2933 BUG_ON(!bh->b_end_io);
2936 * Mask in barrier bit for a write (could be either a WRITE or a
2939 if (buffer_ordered(bh) && (rw & WRITE))
2940 rw |= WRITE_BARRIER;
2943 * Only clear out a write error when rewriting
2945 if (test_set_buffer_req(bh) && (rw & WRITE))
2946 clear_buffer_write_io_error(bh);
2949 * from here on down, it's all bio -- do the initial mapping,
2950 * submit_bio -> generic_make_request may further map this bio around
2952 bio = bio_alloc(GFP_NOIO, 1);
2954 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2955 bio->bi_bdev = bh->b_bdev;
2956 bio->bi_io_vec[0].bv_page = bh->b_page;
2957 bio->bi_io_vec[0].bv_len = bh->b_size;
2958 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2962 bio->bi_size = bh->b_size;
2964 bio->bi_end_io = end_bio_bh_io_sync;
2965 bio->bi_private = bh;
2968 submit_bio(rw, bio);
2970 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2978 * ll_rw_block: low-level access to block devices (DEPRECATED)
2979 * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
2980 * @nr: number of &struct buffer_heads in the array
2981 * @bhs: array of pointers to &struct buffer_head
2983 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2984 * requests an I/O operation on them, either a %READ or a %WRITE. The third
2985 * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2986 * are sent to disk. The fourth %READA option is described in the documentation
2987 * for generic_make_request() which ll_rw_block() calls.
2989 * This function drops any buffer that it cannot get a lock on (with the
2990 * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2991 * clean when doing a write request, and any buffer that appears to be
2992 * up-to-date when doing read request. Further it marks as clean buffers that
2993 * are processed for writing (the buffer cache won't assume that they are
2994 * actually clean until the buffer gets unlocked).
2996 * ll_rw_block sets b_end_io to simple completion handler that marks
2997 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
3000 * All of the buffers must be for the same device, and must also be a
3001 * multiple of the current approved size for the device.
3003 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
3007 for (i = 0; i < nr; i++) {
3008 struct buffer_head *bh = bhs[i];
3010 if (rw == SWRITE || rw == SWRITE_SYNC || rw == SWRITE_SYNC_PLUG)
3012 else if (!trylock_buffer(bh))
3015 if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC ||
3016 rw == SWRITE_SYNC_PLUG) {
3017 if (test_clear_buffer_dirty(bh)) {
3018 bh->b_end_io = end_buffer_write_sync;
3020 if (rw == SWRITE_SYNC)
3021 submit_bh(WRITE_SYNC, bh);
3023 submit_bh(WRITE, bh);
3027 if (!buffer_uptodate(bh)) {
3028 bh->b_end_io = end_buffer_read_sync;
3039 * For a data-integrity writeout, we need to wait upon any in-progress I/O
3040 * and then start new I/O and then wait upon it. The caller must have a ref on
3043 int sync_dirty_buffer(struct buffer_head *bh)
3047 WARN_ON(atomic_read(&bh->b_count) < 1);
3049 if (test_clear_buffer_dirty(bh)) {
3051 bh->b_end_io = end_buffer_write_sync;
3052 ret = submit_bh(WRITE_SYNC, bh);
3054 if (buffer_eopnotsupp(bh)) {
3055 clear_buffer_eopnotsupp(bh);
3058 if (!ret && !buffer_uptodate(bh))
3067 * try_to_free_buffers() checks if all the buffers on this particular page
3068 * are unused, and releases them if so.
3070 * Exclusion against try_to_free_buffers may be obtained by either
3071 * locking the page or by holding its mapping's private_lock.
3073 * If the page is dirty but all the buffers are clean then we need to
3074 * be sure to mark the page clean as well. This is because the page
3075 * may be against a block device, and a later reattachment of buffers
3076 * to a dirty page will set *all* buffers dirty. Which would corrupt
3077 * filesystem data on the same device.
3079 * The same applies to regular filesystem pages: if all the buffers are
3080 * clean then we set the page clean and proceed. To do that, we require
3081 * total exclusion from __set_page_dirty_buffers(). That is obtained with
3084 * try_to_free_buffers() is non-blocking.
3086 static inline int buffer_busy(struct buffer_head *bh)
3088 return atomic_read(&bh->b_count) |
3089 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3093 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3095 struct buffer_head *head = page_buffers(page);
3096 struct buffer_head *bh;
3100 if (buffer_write_io_error(bh) && page->mapping)
3101 set_bit(AS_EIO, &page->mapping->flags);
3102 if (buffer_busy(bh))
3104 bh = bh->b_this_page;
3105 } while (bh != head);
3108 struct buffer_head *next = bh->b_this_page;
3110 if (bh->b_assoc_map)
3111 __remove_assoc_queue(bh);
3113 } while (bh != head);
3114 *buffers_to_free = head;
3115 __clear_page_buffers(page);
3121 int try_to_free_buffers(struct page *page)
3123 struct address_space * const mapping = page->mapping;
3124 struct buffer_head *buffers_to_free = NULL;
3127 BUG_ON(!PageLocked(page));
3128 if (PageWriteback(page))
3131 if (mapping == NULL) { /* can this still happen? */
3132 ret = drop_buffers(page, &buffers_to_free);
3136 spin_lock(&mapping->private_lock);
3137 ret = drop_buffers(page, &buffers_to_free);
3140 * If the filesystem writes its buffers by hand (eg ext3)
3141 * then we can have clean buffers against a dirty page. We
3142 * clean the page here; otherwise the VM will never notice
3143 * that the filesystem did any IO at all.
3145 * Also, during truncate, discard_buffer will have marked all
3146 * the page's buffers clean. We discover that here and clean
3149 * private_lock must be held over this entire operation in order
3150 * to synchronise against __set_page_dirty_buffers and prevent the
3151 * dirty bit from being lost.
3154 cancel_dirty_page(page, PAGE_CACHE_SIZE);
3155 spin_unlock(&mapping->private_lock);
3157 if (buffers_to_free) {
3158 struct buffer_head *bh = buffers_to_free;
3161 struct buffer_head *next = bh->b_this_page;
3162 free_buffer_head(bh);
3164 } while (bh != buffers_to_free);
3168 EXPORT_SYMBOL(try_to_free_buffers);
3170 void block_sync_page(struct page *page)
3172 struct address_space *mapping;
3175 mapping = page_mapping(page);
3177 blk_run_backing_dev(mapping->backing_dev_info, page);
3181 * There are no bdflush tunables left. But distributions are
3182 * still running obsolete flush daemons, so we terminate them here.
3184 * Use of bdflush() is deprecated and will be removed in a future kernel.
3185 * The `pdflush' kernel threads fully replace bdflush daemons and this call.
3187 SYSCALL_DEFINE2(bdflush, int, func, long, data)
3189 static int msg_count;
3191 if (!capable(CAP_SYS_ADMIN))
3194 if (msg_count < 5) {
3197 "warning: process `%s' used the obsolete bdflush"
3198 " system call\n", current->comm);
3199 printk(KERN_INFO "Fix your initscripts?\n");
3208 * Buffer-head allocation
3210 static struct kmem_cache *bh_cachep;
3213 * Once the number of bh's in the machine exceeds this level, we start
3214 * stripping them in writeback.
3216 static int max_buffer_heads;
3218 int buffer_heads_over_limit;
3220 struct bh_accounting {
3221 int nr; /* Number of live bh's */
3222 int ratelimit; /* Limit cacheline bouncing */
3225 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3227 static void recalc_bh_state(void)
3232 if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3234 __get_cpu_var(bh_accounting).ratelimit = 0;
3235 for_each_online_cpu(i)
3236 tot += per_cpu(bh_accounting, i).nr;
3237 buffer_heads_over_limit = (tot > max_buffer_heads);
3240 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3242 struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
3244 INIT_LIST_HEAD(&ret->b_assoc_buffers);
3245 get_cpu_var(bh_accounting).nr++;
3247 put_cpu_var(bh_accounting);
3251 EXPORT_SYMBOL(alloc_buffer_head);
3253 void free_buffer_head(struct buffer_head *bh)
3255 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3256 kmem_cache_free(bh_cachep, bh);
3257 get_cpu_var(bh_accounting).nr--;
3259 put_cpu_var(bh_accounting);
3261 EXPORT_SYMBOL(free_buffer_head);
3263 static void buffer_exit_cpu(int cpu)
3266 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3268 for (i = 0; i < BH_LRU_SIZE; i++) {
3272 get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
3273 per_cpu(bh_accounting, cpu).nr = 0;
3274 put_cpu_var(bh_accounting);
3277 static int buffer_cpu_notify(struct notifier_block *self,
3278 unsigned long action, void *hcpu)
3280 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
3281 buffer_exit_cpu((unsigned long)hcpu);
3286 * bh_uptodate_or_lock - Test whether the buffer is uptodate
3287 * @bh: struct buffer_head
3289 * Return true if the buffer is up-to-date and false,
3290 * with the buffer locked, if not.
3292 int bh_uptodate_or_lock(struct buffer_head *bh)
3294 if (!buffer_uptodate(bh)) {
3296 if (!buffer_uptodate(bh))
3302 EXPORT_SYMBOL(bh_uptodate_or_lock);
3305 * bh_submit_read - Submit a locked buffer for reading
3306 * @bh: struct buffer_head
3308 * Returns zero on success and -EIO on error.
3310 int bh_submit_read(struct buffer_head *bh)
3312 BUG_ON(!buffer_locked(bh));
3314 if (buffer_uptodate(bh)) {
3320 bh->b_end_io = end_buffer_read_sync;
3321 submit_bh(READ, bh);
3323 if (buffer_uptodate(bh))
3327 EXPORT_SYMBOL(bh_submit_read);
3330 init_buffer_head(void *data)
3332 struct buffer_head *bh = data;
3334 memset(bh, 0, sizeof(*bh));
3335 INIT_LIST_HEAD(&bh->b_assoc_buffers);
3338 void __init buffer_init(void)
3342 bh_cachep = kmem_cache_create("buffer_head",
3343 sizeof(struct buffer_head), 0,
3344 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3349 * Limit the bh occupancy to 10% of ZONE_NORMAL
3351 nrpages = (nr_free_buffer_pages() * 10) / 100;
3352 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3353 hotcpu_notifier(buffer_cpu_notify, 0);
3356 EXPORT_SYMBOL(__bforget);
3357 EXPORT_SYMBOL(__brelse);
3358 EXPORT_SYMBOL(__wait_on_buffer);
3359 EXPORT_SYMBOL(block_commit_write);
3360 EXPORT_SYMBOL(block_prepare_write);
3361 EXPORT_SYMBOL(block_page_mkwrite);
3362 EXPORT_SYMBOL(block_read_full_page);
3363 EXPORT_SYMBOL(block_sync_page);
3364 EXPORT_SYMBOL(block_truncate_page);
3365 EXPORT_SYMBOL(block_write_full_page);
3366 EXPORT_SYMBOL(block_write_full_page_endio);
3367 EXPORT_SYMBOL(cont_write_begin);
3368 EXPORT_SYMBOL(end_buffer_read_sync);
3369 EXPORT_SYMBOL(end_buffer_write_sync);
3370 EXPORT_SYMBOL(end_buffer_async_write);
3371 EXPORT_SYMBOL(file_fsync);
3372 EXPORT_SYMBOL(generic_block_bmap);
3373 EXPORT_SYMBOL(generic_cont_expand_simple);
3374 EXPORT_SYMBOL(init_buffer);
3375 EXPORT_SYMBOL(invalidate_bdev);
3376 EXPORT_SYMBOL(ll_rw_block);
3377 EXPORT_SYMBOL(mark_buffer_dirty);
3378 EXPORT_SYMBOL(submit_bh);
3379 EXPORT_SYMBOL(sync_dirty_buffer);
3380 EXPORT_SYMBOL(unlock_buffer);