2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
26 #include "xfs_trans.h"
27 #include "xfs_dmapi.h"
28 #include "xfs_mount.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_alloc_btree.h"
31 #include "xfs_ialloc_btree.h"
32 #include "xfs_dir_sf.h"
33 #include "xfs_dir2_sf.h"
34 #include "xfs_attr_sf.h"
35 #include "xfs_dinode.h"
36 #include "xfs_inode.h"
37 #include "xfs_alloc.h"
38 #include "xfs_btree.h"
39 #include "xfs_error.h"
41 #include "xfs_iomap.h"
42 #include <linux/mpage.h>
43 #include <linux/pagevec.h>
44 #include <linux/writeback.h>
53 struct buffer_head *bh, *head;
55 *delalloc = *unmapped = *unwritten = 0;
57 bh = head = page_buffers(page);
59 if (buffer_uptodate(bh) && !buffer_mapped(bh))
61 else if (buffer_unwritten(bh) && !buffer_delay(bh))
62 clear_buffer_unwritten(bh);
63 else if (buffer_unwritten(bh))
65 else if (buffer_delay(bh))
67 } while ((bh = bh->b_this_page) != head);
70 #if defined(XFS_RW_TRACE)
79 vnode_t *vp = vn_from_inode(inode);
80 loff_t isize = i_size_read(inode);
81 loff_t offset = page_offset(page);
82 int delalloc = -1, unmapped = -1, unwritten = -1;
84 if (page_has_buffers(page))
85 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
91 ktrace_enter(ip->i_rwtrace,
92 (void *)((unsigned long)tag),
96 (void *)((unsigned long)mask),
97 (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
98 (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
99 (void *)((unsigned long)((isize >> 32) & 0xffffffff)),
100 (void *)((unsigned long)(isize & 0xffffffff)),
101 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
102 (void *)((unsigned long)(offset & 0xffffffff)),
103 (void *)((unsigned long)delalloc),
104 (void *)((unsigned long)unmapped),
105 (void *)((unsigned long)unwritten),
106 (void *)((unsigned long)current_pid()),
110 #define xfs_page_trace(tag, inode, page, mask)
114 * Schedule IO completion handling on a xfsdatad if this was
115 * the final hold on this ioend.
121 if (atomic_dec_and_test(&ioend->io_remaining))
122 queue_work(xfsdatad_workqueue, &ioend->io_work);
126 * We're now finished for good with this ioend structure.
127 * Update the page state via the associated buffer_heads,
128 * release holds on the inode and bio, and finally free
129 * up memory. Do not use the ioend after this.
135 struct buffer_head *bh, *next;
137 for (bh = ioend->io_buffer_head; bh; bh = next) {
138 next = bh->b_private;
139 bh->b_end_io(bh, ioend->io_uptodate);
142 vn_iowake(ioend->io_vnode);
143 mempool_free(ioend, xfs_ioend_pool);
147 * Buffered IO write completion for delayed allocate extents.
148 * TODO: Update ondisk isize now that we know the file data
149 * has been flushed (i.e. the notorious "NULL file" problem).
152 xfs_end_bio_delalloc(
155 xfs_ioend_t *ioend = data;
157 xfs_destroy_ioend(ioend);
161 * Buffered IO write completion for regular, written extents.
167 xfs_ioend_t *ioend = data;
169 xfs_destroy_ioend(ioend);
173 * IO write completion for unwritten extents.
175 * Issue transactions to convert a buffer range from unwritten
176 * to written extents.
179 xfs_end_bio_unwritten(
182 xfs_ioend_t *ioend = data;
183 vnode_t *vp = ioend->io_vnode;
184 xfs_off_t offset = ioend->io_offset;
185 size_t size = ioend->io_size;
188 if (ioend->io_uptodate)
189 VOP_BMAP(vp, offset, size, BMAPI_UNWRITTEN, NULL, NULL, error);
190 xfs_destroy_ioend(ioend);
194 * Allocate and initialise an IO completion structure.
195 * We need to track unwritten extent write completion here initially.
196 * We'll need to extend this for updating the ondisk inode size later
206 ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
209 * Set the count to 1 initially, which will prevent an I/O
210 * completion callback from happening before we have started
211 * all the I/O from calling the completion routine too early.
213 atomic_set(&ioend->io_remaining, 1);
214 ioend->io_uptodate = 1; /* cleared if any I/O fails */
215 ioend->io_list = NULL;
216 ioend->io_type = type;
217 ioend->io_vnode = vn_from_inode(inode);
218 ioend->io_buffer_head = NULL;
219 ioend->io_buffer_tail = NULL;
220 atomic_inc(&ioend->io_vnode->v_iocount);
221 ioend->io_offset = 0;
224 if (type == IOMAP_UNWRITTEN)
225 INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten, ioend);
226 else if (type == IOMAP_DELAY)
227 INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc, ioend);
229 INIT_WORK(&ioend->io_work, xfs_end_bio_written, ioend);
242 vnode_t *vp = vn_from_inode(inode);
243 int error, nmaps = 1;
245 VOP_BMAP(vp, offset, count, flags, mapp, &nmaps, error);
246 if (!error && (flags & (BMAPI_WRITE|BMAPI_ALLOCATE)))
256 return offset >= iomapp->iomap_offset &&
257 offset < iomapp->iomap_offset + iomapp->iomap_bsize;
261 * BIO completion handler for buffered IO.
266 unsigned int bytes_done,
269 xfs_ioend_t *ioend = bio->bi_private;
275 ASSERT(atomic_read(&bio->bi_cnt) >= 1);
277 /* Toss bio and pass work off to an xfsdatad thread */
278 if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
279 ioend->io_uptodate = 0;
280 bio->bi_private = NULL;
281 bio->bi_end_io = NULL;
284 xfs_finish_ioend(ioend);
289 xfs_submit_ioend_bio(
293 atomic_inc(&ioend->io_remaining);
295 bio->bi_private = ioend;
296 bio->bi_end_io = xfs_end_bio;
298 submit_bio(WRITE, bio);
299 ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP));
305 struct buffer_head *bh)
308 int nvecs = bio_get_nr_vecs(bh->b_bdev);
311 bio = bio_alloc(GFP_NOIO, nvecs);
315 ASSERT(bio->bi_private == NULL);
316 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
317 bio->bi_bdev = bh->b_bdev;
323 xfs_start_buffer_writeback(
324 struct buffer_head *bh)
326 ASSERT(buffer_mapped(bh));
327 ASSERT(buffer_locked(bh));
328 ASSERT(!buffer_delay(bh));
329 ASSERT(!buffer_unwritten(bh));
331 mark_buffer_async_write(bh);
332 set_buffer_uptodate(bh);
333 clear_buffer_dirty(bh);
337 xfs_start_page_writeback(
339 struct writeback_control *wbc,
343 ASSERT(PageLocked(page));
344 ASSERT(!PageWriteback(page));
345 set_page_writeback(page);
347 clear_page_dirty(page);
350 end_page_writeback(page);
351 wbc->pages_skipped++; /* We didn't write this page */
355 static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
357 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
361 * Submit all of the bios for all of the ioends we have saved up, covering the
362 * initial writepage page and also any probed pages.
364 * Because we may have multiple ioends spanning a page, we need to start
365 * writeback on all the buffers before we submit them for I/O. If we mark the
366 * buffers as we got, then we can end up with a page that only has buffers
367 * marked async write and I/O complete on can occur before we mark the other
368 * buffers async write.
370 * The end result of this is that we trip a bug in end_page_writeback() because
371 * we call it twice for the one page as the code in end_buffer_async_write()
372 * assumes that all buffers on the page are started at the same time.
374 * The fix is two passes across the ioend list - one to start writeback on the
375 * buffer_heads, and then submit them for I/O on the second pass.
381 xfs_ioend_t *head = ioend;
383 struct buffer_head *bh;
385 sector_t lastblock = 0;
387 /* Pass 1 - start writeback */
389 next = ioend->io_list;
390 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
391 xfs_start_buffer_writeback(bh);
393 } while ((ioend = next) != NULL);
395 /* Pass 2 - submit I/O */
398 next = ioend->io_list;
401 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
405 bio = xfs_alloc_ioend_bio(bh);
406 } else if (bh->b_blocknr != lastblock + 1) {
407 xfs_submit_ioend_bio(ioend, bio);
411 if (bio_add_buffer(bio, bh) != bh->b_size) {
412 xfs_submit_ioend_bio(ioend, bio);
416 lastblock = bh->b_blocknr;
419 xfs_submit_ioend_bio(ioend, bio);
420 xfs_finish_ioend(ioend);
421 } while ((ioend = next) != NULL);
425 * Cancel submission of all buffer_heads so far in this endio.
426 * Toss the endio too. Only ever called for the initial page
427 * in a writepage request, so only ever one page.
434 struct buffer_head *bh, *next_bh;
437 next = ioend->io_list;
438 bh = ioend->io_buffer_head;
440 next_bh = bh->b_private;
441 clear_buffer_async_write(bh);
443 } while ((bh = next_bh) != NULL);
445 vn_iowake(ioend->io_vnode);
446 mempool_free(ioend, xfs_ioend_pool);
447 } while ((ioend = next) != NULL);
451 * Test to see if we've been building up a completion structure for
452 * earlier buffers -- if so, we try to append to this ioend if we
453 * can, otherwise we finish off any current ioend and start another.
454 * Return true if we've finished the given ioend.
459 struct buffer_head *bh,
462 xfs_ioend_t **result,
465 xfs_ioend_t *ioend = *result;
467 if (!ioend || need_ioend || type != ioend->io_type) {
468 xfs_ioend_t *previous = *result;
470 ioend = xfs_alloc_ioend(inode, type);
471 ioend->io_offset = offset;
472 ioend->io_buffer_head = bh;
473 ioend->io_buffer_tail = bh;
475 previous->io_list = ioend;
478 ioend->io_buffer_tail->b_private = bh;
479 ioend->io_buffer_tail = bh;
482 bh->b_private = NULL;
483 ioend->io_size += bh->b_size;
488 struct buffer_head *bh,
495 ASSERT(mp->iomap_bn != IOMAP_DADDR_NULL);
497 bn = (mp->iomap_bn >> (block_bits - BBSHIFT)) +
498 ((offset - mp->iomap_offset) >> block_bits);
500 ASSERT(bn || (mp->iomap_flags & IOMAP_REALTIME));
503 set_buffer_mapped(bh);
508 struct buffer_head *bh,
513 ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE));
514 ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY));
517 xfs_map_buffer(bh, iomapp, offset, block_bits);
518 bh->b_bdev = iomapp->iomap_target->bt_bdev;
519 set_buffer_mapped(bh);
520 clear_buffer_delay(bh);
521 clear_buffer_unwritten(bh);
525 * Look for a page at index that is suitable for clustering.
530 unsigned int pg_offset,
535 if (PageWriteback(page))
538 if (page->mapping && PageDirty(page)) {
539 if (page_has_buffers(page)) {
540 struct buffer_head *bh, *head;
542 bh = head = page_buffers(page);
544 if (!buffer_uptodate(bh))
546 if (mapped != buffer_mapped(bh))
549 if (ret >= pg_offset)
551 } while ((bh = bh->b_this_page) != head);
553 ret = mapped ? 0 : PAGE_CACHE_SIZE;
562 struct page *startpage,
563 struct buffer_head *bh,
564 struct buffer_head *head,
568 pgoff_t tindex, tlast, tloff;
572 /* First sum forwards in this page */
574 if (!buffer_uptodate(bh) || (mapped != buffer_mapped(bh)))
577 } while ((bh = bh->b_this_page) != head);
579 /* if we reached the end of the page, sum forwards in following pages */
580 tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
581 tindex = startpage->index + 1;
583 /* Prune this back to avoid pathological behavior */
584 tloff = min(tlast, startpage->index + 64);
586 pagevec_init(&pvec, 0);
587 while (!done && tindex <= tloff) {
588 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
590 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
593 for (i = 0; i < pagevec_count(&pvec); i++) {
594 struct page *page = pvec.pages[i];
595 size_t pg_offset, len = 0;
597 if (tindex == tlast) {
599 i_size_read(inode) & (PAGE_CACHE_SIZE - 1);
605 pg_offset = PAGE_CACHE_SIZE;
607 if (page->index == tindex && !TestSetPageLocked(page)) {
608 len = xfs_probe_page(page, pg_offset, mapped);
621 pagevec_release(&pvec);
629 * Test if a given page is suitable for writing as part of an unwritten
630 * or delayed allocate extent.
637 if (PageWriteback(page))
640 if (page->mapping && page_has_buffers(page)) {
641 struct buffer_head *bh, *head;
644 bh = head = page_buffers(page);
646 if (buffer_unwritten(bh))
647 acceptable = (type == IOMAP_UNWRITTEN);
648 else if (buffer_delay(bh))
649 acceptable = (type == IOMAP_DELAY);
650 else if (buffer_dirty(bh) && buffer_mapped(bh))
651 acceptable = (type == 0);
654 } while ((bh = bh->b_this_page) != head);
664 * Allocate & map buffers for page given the extent map. Write it out.
665 * except for the original page of a writepage, this is called on
666 * delalloc/unwritten pages only, for the original page it is possible
667 * that the page has no mapping at all.
675 xfs_ioend_t **ioendp,
676 struct writeback_control *wbc,
680 struct buffer_head *bh, *head;
681 xfs_off_t end_offset;
682 unsigned long p_offset;
684 int bbits = inode->i_blkbits;
686 int count = 0, done = 0, uptodate = 1;
687 xfs_off_t offset = page_offset(page);
689 if (page->index != tindex)
691 if (TestSetPageLocked(page))
693 if (PageWriteback(page))
694 goto fail_unlock_page;
695 if (page->mapping != inode->i_mapping)
696 goto fail_unlock_page;
697 if (!xfs_is_delayed_page(page, (*ioendp)->io_type))
698 goto fail_unlock_page;
701 * page_dirty is initially a count of buffers on the page before
702 * EOF and is decremented as we move each into a cleanable state.
706 * End offset is the highest offset that this page should represent.
707 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
708 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
709 * hence give us the correct page_dirty count. On any other page,
710 * it will be zero and in that case we need page_dirty to be the
711 * count of buffers on the page.
713 end_offset = min_t(unsigned long long,
714 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
717 len = 1 << inode->i_blkbits;
718 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
720 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
721 page_dirty = p_offset / len;
723 bh = head = page_buffers(page);
725 if (offset >= end_offset)
727 if (!buffer_uptodate(bh))
729 if (!(PageUptodate(page) || buffer_uptodate(bh))) {
734 if (buffer_unwritten(bh) || buffer_delay(bh)) {
735 if (buffer_unwritten(bh))
736 type = IOMAP_UNWRITTEN;
740 if (!xfs_iomap_valid(mp, offset)) {
745 ASSERT(!(mp->iomap_flags & IOMAP_HOLE));
746 ASSERT(!(mp->iomap_flags & IOMAP_DELAY));
748 xfs_map_at_offset(bh, offset, bbits, mp);
750 xfs_add_to_ioend(inode, bh, offset,
753 set_buffer_dirty(bh);
755 mark_buffer_dirty(bh);
761 if (buffer_mapped(bh) && all_bh && startio) {
763 xfs_add_to_ioend(inode, bh, offset,
771 } while (offset += len, (bh = bh->b_this_page) != head);
773 if (uptodate && bh == head)
774 SetPageUptodate(page);
778 struct backing_dev_info *bdi;
780 bdi = inode->i_mapping->backing_dev_info;
782 if (bdi_write_congested(bdi)) {
783 wbc->encountered_congestion = 1;
785 } else if (wbc->nr_to_write <= 0) {
789 xfs_start_page_writeback(page, wbc, !page_dirty, count);
800 * Convert & write out a cluster of pages in the same extent as defined
801 * by mp and following the start page.
808 xfs_ioend_t **ioendp,
809 struct writeback_control *wbc,
817 pagevec_init(&pvec, 0);
818 while (!done && tindex <= tlast) {
819 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
821 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
824 for (i = 0; i < pagevec_count(&pvec); i++) {
825 done = xfs_convert_page(inode, pvec.pages[i], tindex++,
826 iomapp, ioendp, wbc, startio, all_bh);
831 pagevec_release(&pvec);
837 * Calling this without startio set means we are being asked to make a dirty
838 * page ready for freeing it's buffers. When called with startio set then
839 * we are coming from writepage.
841 * When called with startio set it is important that we write the WHOLE
843 * The bh->b_state's cannot know if any of the blocks or which block for
844 * that matter are dirty due to mmap writes, and therefore bh uptodate is
845 * only valid if the page itself isn't completely uptodate. Some layers
846 * may clear the page dirty flag prior to calling write page, under the
847 * assumption the entire page will be written out; by not writing out the
848 * whole page the page can be reused before all valid dirty data is
849 * written out. Note: in the case of a page that has been dirty'd by
850 * mapwrite and but partially setup by block_prepare_write the
851 * bh->b_states's will not agree and only ones setup by BPW/BCW will have
852 * valid state, thus the whole page must be written out thing.
856 xfs_page_state_convert(
859 struct writeback_control *wbc,
861 int unmapped) /* also implies page uptodate */
863 struct buffer_head *bh, *head;
865 xfs_ioend_t *ioend = NULL, *iohead = NULL;
867 unsigned long p_offset = 0;
869 __uint64_t end_offset;
870 pgoff_t end_index, last_index, tlast;
872 int flags, err, iomap_valid = 0, uptodate = 1;
873 int page_dirty, count = 0;
875 int all_bh = unmapped;
878 if (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking)
879 trylock |= BMAPI_TRYLOCK;
882 /* Is this page beyond the end of the file? */
883 offset = i_size_read(inode);
884 end_index = offset >> PAGE_CACHE_SHIFT;
885 last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
886 if (page->index >= end_index) {
887 if ((page->index >= end_index + 1) ||
888 !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
896 * page_dirty is initially a count of buffers on the page before
897 * EOF and is decremented as we move each into a cleanable state.
901 * End offset is the highest offset that this page should represent.
902 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
903 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
904 * hence give us the correct page_dirty count. On any other page,
905 * it will be zero and in that case we need page_dirty to be the
906 * count of buffers on the page.
908 end_offset = min_t(unsigned long long,
909 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset);
910 len = 1 << inode->i_blkbits;
911 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
913 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
914 page_dirty = p_offset / len;
916 bh = head = page_buffers(page);
917 offset = page_offset(page);
921 /* TODO: cleanup count and page_dirty */
924 if (offset >= end_offset)
926 if (!buffer_uptodate(bh))
928 if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio) {
930 * the iomap is actually still valid, but the ioend
931 * isn't. shouldn't happen too often.
938 iomap_valid = xfs_iomap_valid(&iomap, offset);
941 * First case, map an unwritten extent and prepare for
942 * extent state conversion transaction on completion.
944 * Second case, allocate space for a delalloc buffer.
945 * We can return EAGAIN here in the release page case.
947 * Third case, an unmapped buffer was found, and we are
948 * in a path where we need to write the whole page out.
950 if (buffer_unwritten(bh) || buffer_delay(bh) ||
951 ((buffer_uptodate(bh) || PageUptodate(page)) &&
952 !buffer_mapped(bh) && (unmapped || startio))) {
954 * Make sure we don't use a read-only iomap
956 if (flags == BMAPI_READ)
959 if (buffer_unwritten(bh)) {
960 type = IOMAP_UNWRITTEN;
961 flags = BMAPI_WRITE | BMAPI_IGNSTATE;
962 } else if (buffer_delay(bh)) {
964 flags = BMAPI_ALLOCATE | trylock;
967 flags = BMAPI_WRITE | BMAPI_MMAP;
971 if (type == IOMAP_NEW) {
972 size = xfs_probe_cluster(inode,
978 err = xfs_map_blocks(inode, offset, size,
982 iomap_valid = xfs_iomap_valid(&iomap, offset);
985 xfs_map_at_offset(bh, offset,
986 inode->i_blkbits, &iomap);
988 xfs_add_to_ioend(inode, bh, offset,
992 set_buffer_dirty(bh);
994 mark_buffer_dirty(bh);
999 } else if (buffer_uptodate(bh) && startio) {
1001 * we got here because the buffer is already mapped.
1002 * That means it must already have extents allocated
1003 * underneath it. Map the extent by reading it.
1005 if (!iomap_valid || type != 0) {
1007 size = xfs_probe_cluster(inode, page, bh,
1009 err = xfs_map_blocks(inode, offset, size,
1013 iomap_valid = xfs_iomap_valid(&iomap, offset);
1017 if (!test_and_set_bit(BH_Lock, &bh->b_state)) {
1018 ASSERT(buffer_mapped(bh));
1021 xfs_add_to_ioend(inode, bh, offset, type,
1022 &ioend, !iomap_valid);
1028 } else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
1029 (unmapped || startio)) {
1036 } while (offset += len, ((bh = bh->b_this_page) != head));
1038 if (uptodate && bh == head)
1039 SetPageUptodate(page);
1042 xfs_start_page_writeback(page, wbc, 1, count);
1044 if (ioend && iomap_valid) {
1045 offset = (iomap.iomap_offset + iomap.iomap_bsize - 1) >>
1047 tlast = min_t(pgoff_t, offset, last_index);
1048 xfs_cluster_write(inode, page->index + 1, &iomap, &ioend,
1049 wbc, startio, all_bh, tlast);
1053 xfs_submit_ioend(iohead);
1059 xfs_cancel_ioend(iohead);
1062 * If it's delalloc and we have nowhere to put it,
1063 * throw it away, unless the lower layers told
1066 if (err != -EAGAIN) {
1068 block_invalidatepage(page, 0);
1069 ClearPageUptodate(page);
1075 * writepage: Called from one of two places:
1077 * 1. we are flushing a delalloc buffer head.
1079 * 2. we are writing out a dirty page. Typically the page dirty
1080 * state is cleared before we get here. In this case is it
1081 * conceivable we have no buffer heads.
1083 * For delalloc space on the page we need to allocate space and
1084 * flush it. For unmapped buffer heads on the page we should
1085 * allocate space if the page is uptodate. For any other dirty
1086 * buffer heads on the page we should flush them.
1088 * If we detect that a transaction would be required to flush
1089 * the page, we have to check the process flags first, if we
1090 * are already in a transaction or disk I/O during allocations
1091 * is off, we need to fail the writepage and redirty the page.
1097 struct writeback_control *wbc)
1101 int delalloc, unmapped, unwritten;
1102 struct inode *inode = page->mapping->host;
1104 xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0);
1107 * We need a transaction if:
1108 * 1. There are delalloc buffers on the page
1109 * 2. The page is uptodate and we have unmapped buffers
1110 * 3. The page is uptodate and we have no buffers
1111 * 4. There are unwritten buffers on the page
1114 if (!page_has_buffers(page)) {
1118 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1119 if (!PageUptodate(page))
1121 need_trans = delalloc + unmapped + unwritten;
1125 * If we need a transaction and the process flags say
1126 * we are already in a transaction, or no IO is allowed
1127 * then mark the page dirty again and leave the page
1130 if (PFLAGS_TEST_FSTRANS() && need_trans)
1134 * Delay hooking up buffer heads until we have
1135 * made our go/no-go decision.
1137 if (!page_has_buffers(page))
1138 create_empty_buffers(page, 1 << inode->i_blkbits, 0);
1141 * Convert delayed allocate, unwritten or unmapped space
1142 * to real space and flush out to disk.
1144 error = xfs_page_state_convert(inode, page, wbc, 1, unmapped);
1145 if (error == -EAGAIN)
1147 if (unlikely(error < 0))
1153 redirty_page_for_writepage(wbc, page);
1162 * Called to move a page into cleanable state - and from there
1163 * to be released. Possibly the page is already clean. We always
1164 * have buffer heads in this call.
1166 * Returns 0 if the page is ok to release, 1 otherwise.
1168 * Possible scenarios are:
1170 * 1. We are being called to release a page which has been written
1171 * to via regular I/O. buffer heads will be dirty and possibly
1172 * delalloc. If no delalloc buffer heads in this case then we
1173 * can just return zero.
1175 * 2. We are called to release a page which has been written via
1176 * mmap, all we need to do is ensure there is no delalloc
1177 * state in the buffer heads, if not we can let the caller
1178 * free them and we should come back later via writepage.
1185 struct inode *inode = page->mapping->host;
1186 int dirty, delalloc, unmapped, unwritten;
1187 struct writeback_control wbc = {
1188 .sync_mode = WB_SYNC_ALL,
1192 xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, gfp_mask);
1194 if (!page_has_buffers(page))
1197 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1198 if (!delalloc && !unwritten)
1201 if (!(gfp_mask & __GFP_FS))
1204 /* If we are already inside a transaction or the thread cannot
1205 * do I/O, we cannot release this page.
1207 if (PFLAGS_TEST_FSTRANS())
1211 * Convert delalloc space to real space, do not flush the
1212 * data out to disk, that will be done by the caller.
1213 * Never need to allocate space here - we will always
1214 * come back to writepage in that case.
1216 dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0);
1217 if (dirty == 0 && !unwritten)
1222 return try_to_free_buffers(page);
1227 struct inode *inode,
1229 struct buffer_head *bh_result,
1232 bmapi_flags_t flags)
1234 vnode_t *vp = vn_from_inode(inode);
1241 offset = (xfs_off_t)iblock << inode->i_blkbits;
1242 ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
1243 size = bh_result->b_size;
1244 VOP_BMAP(vp, offset, size,
1245 create ? flags : BMAPI_READ, &iomap, &niomap, error);
1251 if (iomap.iomap_bn != IOMAP_DADDR_NULL) {
1253 * For unwritten extents do not report a disk address on
1254 * the read case (treat as if we're reading into a hole).
1256 if (create || !(iomap.iomap_flags & IOMAP_UNWRITTEN)) {
1257 xfs_map_buffer(bh_result, &iomap, offset,
1260 if (create && (iomap.iomap_flags & IOMAP_UNWRITTEN)) {
1262 bh_result->b_private = inode;
1263 set_buffer_unwritten(bh_result);
1264 set_buffer_delay(bh_result);
1269 * If this is a realtime file, data may be on a different device.
1270 * to that pointed to from the buffer_head b_bdev currently.
1272 bh_result->b_bdev = iomap.iomap_target->bt_bdev;
1275 * If we previously allocated a block out beyond eof and we are
1276 * now coming back to use it then we will need to flag it as new
1277 * even if it has a disk address.
1280 ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
1281 (offset >= i_size_read(inode)) || (iomap.iomap_flags & IOMAP_NEW)))
1282 set_buffer_new(bh_result);
1284 if (iomap.iomap_flags & IOMAP_DELAY) {
1287 set_buffer_uptodate(bh_result);
1288 set_buffer_mapped(bh_result);
1289 set_buffer_delay(bh_result);
1293 if (direct || size > (1 << inode->i_blkbits)) {
1294 ASSERT(iomap.iomap_bsize - iomap.iomap_delta > 0);
1295 offset = min_t(xfs_off_t,
1296 iomap.iomap_bsize - iomap.iomap_delta, size);
1297 bh_result->b_size = (ssize_t)min_t(xfs_off_t, LONG_MAX, offset);
1305 struct inode *inode,
1307 struct buffer_head *bh_result,
1310 return __xfs_get_blocks(inode, iblock,
1311 bh_result, create, 0, BMAPI_WRITE);
1315 xfs_get_blocks_direct(
1316 struct inode *inode,
1318 struct buffer_head *bh_result,
1321 return __xfs_get_blocks(inode, iblock,
1322 bh_result, create, 1, BMAPI_WRITE|BMAPI_DIRECT);
1332 xfs_ioend_t *ioend = iocb->private;
1335 * Non-NULL private data means we need to issue a transaction to
1336 * convert a range from unwritten to written extents. This needs
1337 * to happen from process context but aio+dio I/O completion
1338 * happens from irq context so we need to defer it to a workqueue.
1339 * This is not necessary for synchronous direct I/O, but we do
1340 * it anyway to keep the code uniform and simpler.
1342 * The core direct I/O code might be changed to always call the
1343 * completion handler in the future, in which case all this can
1346 if (private && size > 0) {
1347 ioend->io_offset = offset;
1348 ioend->io_size = size;
1349 xfs_finish_ioend(ioend);
1352 xfs_destroy_ioend(ioend);
1356 * blockdev_direct_IO can return an error even after the I/O
1357 * completion handler was called. Thus we need to protect
1358 * against double-freeing.
1360 iocb->private = NULL;
1367 const struct iovec *iov,
1369 unsigned long nr_segs)
1371 struct file *file = iocb->ki_filp;
1372 struct inode *inode = file->f_mapping->host;
1373 vnode_t *vp = vn_from_inode(inode);
1379 VOP_BMAP(vp, offset, 0, BMAPI_DEVICE, &iomap, &maps, error);
1383 iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN);
1385 ret = blockdev_direct_IO_own_locking(rw, iocb, inode,
1386 iomap.iomap_target->bt_bdev,
1387 iov, offset, nr_segs,
1388 xfs_get_blocks_direct,
1391 if (unlikely(ret <= 0 && iocb->private))
1392 xfs_destroy_ioend(iocb->private);
1397 xfs_vm_prepare_write(
1403 return block_prepare_write(page, from, to, xfs_get_blocks);
1408 struct address_space *mapping,
1411 struct inode *inode = (struct inode *)mapping->host;
1412 vnode_t *vp = vn_from_inode(inode);
1415 vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
1417 VOP_RWLOCK(vp, VRWLOCK_READ);
1418 VOP_FLUSH_PAGES(vp, (xfs_off_t)0, -1, 0, FI_REMAPF, error);
1419 VOP_RWUNLOCK(vp, VRWLOCK_READ);
1420 return generic_block_bmap(mapping, block, xfs_get_blocks);
1425 struct file *unused,
1428 return mpage_readpage(page, xfs_get_blocks);
1433 struct file *unused,
1434 struct address_space *mapping,
1435 struct list_head *pages,
1438 return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
1442 xfs_vm_invalidatepage(
1444 unsigned long offset)
1446 xfs_page_trace(XFS_INVALIDPAGE_ENTER,
1447 page->mapping->host, page, offset);
1448 block_invalidatepage(page, offset);
1451 struct address_space_operations xfs_address_space_operations = {
1452 .readpage = xfs_vm_readpage,
1453 .readpages = xfs_vm_readpages,
1454 .writepage = xfs_vm_writepage,
1455 .sync_page = block_sync_page,
1456 .releasepage = xfs_vm_releasepage,
1457 .invalidatepage = xfs_vm_invalidatepage,
1458 .prepare_write = xfs_vm_prepare_write,
1459 .commit_write = generic_commit_write,
1460 .bmap = xfs_vm_bmap,
1461 .direct_IO = xfs_vm_direct_IO,
1462 .migratepage = buffer_migrate_page,