2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
26 #include "xfs_trans.h"
27 #include "xfs_dmapi.h"
28 #include "xfs_mount.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_alloc_btree.h"
31 #include "xfs_ialloc_btree.h"
32 #include "xfs_dir_sf.h"
33 #include "xfs_dir2_sf.h"
34 #include "xfs_attr_sf.h"
35 #include "xfs_dinode.h"
36 #include "xfs_inode.h"
37 #include "xfs_alloc.h"
38 #include "xfs_btree.h"
39 #include "xfs_error.h"
41 #include "xfs_iomap.h"
42 #include <linux/mpage.h>
43 #include <linux/pagevec.h>
44 #include <linux/writeback.h>
46 STATIC void xfs_count_page_state(struct page *, int *, int *, int *);
48 #if defined(XFS_RW_TRACE)
57 vnode_t *vp = LINVFS_GET_VP(inode);
58 loff_t isize = i_size_read(inode);
59 loff_t offset = page_offset(page);
60 int delalloc = -1, unmapped = -1, unwritten = -1;
62 if (page_has_buffers(page))
63 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
69 ktrace_enter(ip->i_rwtrace,
70 (void *)((unsigned long)tag),
74 (void *)((unsigned long)mask),
75 (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
76 (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
77 (void *)((unsigned long)((isize >> 32) & 0xffffffff)),
78 (void *)((unsigned long)(isize & 0xffffffff)),
79 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
80 (void *)((unsigned long)(offset & 0xffffffff)),
81 (void *)((unsigned long)delalloc),
82 (void *)((unsigned long)unmapped),
83 (void *)((unsigned long)unwritten),
88 #define xfs_page_trace(tag, inode, page, mask)
92 * Schedule IO completion handling on a xfsdatad if this was
93 * the final hold on this ioend.
99 if (atomic_dec_and_test(&ioend->io_remaining))
100 queue_work(xfsdatad_workqueue, &ioend->io_work);
104 * We're now finished for good with this ioend structure.
105 * Update the page state via the associated buffer_heads,
106 * release holds on the inode and bio, and finally free
107 * up memory. Do not use the ioend after this.
113 struct buffer_head *bh, *next;
115 for (bh = ioend->io_buffer_head; bh; bh = next) {
116 next = bh->b_private;
117 bh->b_end_io(bh, ioend->io_uptodate);
120 vn_iowake(ioend->io_vnode);
121 mempool_free(ioend, xfs_ioend_pool);
125 * Buffered IO write completion for delayed allocate extents.
126 * TODO: Update ondisk isize now that we know the file data
127 * has been flushed (i.e. the notorious "NULL file" problem).
130 xfs_end_bio_delalloc(
133 xfs_ioend_t *ioend = data;
135 xfs_destroy_ioend(ioend);
139 * Buffered IO write completion for regular, written extents.
145 xfs_ioend_t *ioend = data;
147 xfs_destroy_ioend(ioend);
151 * IO write completion for unwritten extents.
153 * Issue transactions to convert a buffer range from unwritten
154 * to written extents.
157 xfs_end_bio_unwritten(
160 xfs_ioend_t *ioend = data;
161 vnode_t *vp = ioend->io_vnode;
162 xfs_off_t offset = ioend->io_offset;
163 size_t size = ioend->io_size;
166 if (ioend->io_uptodate)
167 VOP_BMAP(vp, offset, size, BMAPI_UNWRITTEN, NULL, NULL, error);
168 xfs_destroy_ioend(ioend);
172 * Allocate and initialise an IO completion structure.
173 * We need to track unwritten extent write completion here initially.
174 * We'll need to extend this for updating the ondisk inode size later
184 ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
187 * Set the count to 1 initially, which will prevent an I/O
188 * completion callback from happening before we have started
189 * all the I/O from calling the completion routine too early.
191 atomic_set(&ioend->io_remaining, 1);
192 ioend->io_uptodate = 1; /* cleared if any I/O fails */
193 ioend->io_list = NULL;
194 ioend->io_type = type;
195 ioend->io_vnode = LINVFS_GET_VP(inode);
196 ioend->io_buffer_head = NULL;
197 ioend->io_buffer_tail = NULL;
198 atomic_inc(&ioend->io_vnode->v_iocount);
199 ioend->io_offset = 0;
202 if (type == IOMAP_UNWRITTEN)
203 INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten, ioend);
204 else if (type == IOMAP_DELAY)
205 INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc, ioend);
207 INIT_WORK(&ioend->io_work, xfs_end_bio_written, ioend);
220 vnode_t *vp = LINVFS_GET_VP(inode);
221 int error, nmaps = 1;
223 VOP_BMAP(vp, offset, count, flags, mapp, &nmaps, error);
224 if (!error && (flags & (BMAPI_WRITE|BMAPI_ALLOCATE)))
234 return offset >= iomapp->iomap_offset &&
235 offset < iomapp->iomap_offset + iomapp->iomap_bsize;
239 * BIO completion handler for buffered IO.
244 unsigned int bytes_done,
247 xfs_ioend_t *ioend = bio->bi_private;
253 ASSERT(atomic_read(&bio->bi_cnt) >= 1);
255 /* Toss bio and pass work off to an xfsdatad thread */
256 if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
257 ioend->io_uptodate = 0;
258 bio->bi_private = NULL;
259 bio->bi_end_io = NULL;
262 xfs_finish_ioend(ioend);
267 xfs_submit_ioend_bio(
271 atomic_inc(&ioend->io_remaining);
273 bio->bi_private = ioend;
274 bio->bi_end_io = xfs_end_bio;
276 submit_bio(WRITE, bio);
277 ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP));
283 struct buffer_head *bh)
286 int nvecs = bio_get_nr_vecs(bh->b_bdev);
289 bio = bio_alloc(GFP_NOIO, nvecs);
293 ASSERT(bio->bi_private == NULL);
294 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
295 bio->bi_bdev = bh->b_bdev;
301 xfs_start_buffer_writeback(
302 struct buffer_head *bh)
304 ASSERT(buffer_mapped(bh));
305 ASSERT(buffer_locked(bh));
306 ASSERT(!buffer_delay(bh));
307 ASSERT(!buffer_unwritten(bh));
309 mark_buffer_async_write(bh);
310 set_buffer_uptodate(bh);
311 clear_buffer_dirty(bh);
315 xfs_start_page_writeback(
317 struct writeback_control *wbc,
321 ASSERT(PageLocked(page));
322 ASSERT(!PageWriteback(page));
323 set_page_writeback(page);
325 clear_page_dirty(page);
328 end_page_writeback(page);
329 wbc->pages_skipped++; /* We didn't write this page */
333 static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
335 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
339 * Submit all of the bios for all of the ioends we have saved up,
340 * covering the initial writepage page and also any probed pages.
347 struct buffer_head *bh;
349 sector_t lastblock = 0;
352 next = ioend->io_list;
355 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
356 xfs_start_buffer_writeback(bh);
360 bio = xfs_alloc_ioend_bio(bh);
361 } else if (bh->b_blocknr != lastblock + 1) {
362 xfs_submit_ioend_bio(ioend, bio);
366 if (bio_add_buffer(bio, bh) != bh->b_size) {
367 xfs_submit_ioend_bio(ioend, bio);
371 lastblock = bh->b_blocknr;
374 xfs_submit_ioend_bio(ioend, bio);
375 xfs_finish_ioend(ioend);
376 } while ((ioend = next) != NULL);
380 * Cancel submission of all buffer_heads so far in this endio.
381 * Toss the endio too. Only ever called for the initial page
382 * in a writepage request, so only ever one page.
389 struct buffer_head *bh, *next_bh;
392 next = ioend->io_list;
393 bh = ioend->io_buffer_head;
395 next_bh = bh->b_private;
396 clear_buffer_async_write(bh);
398 } while ((bh = next_bh) != NULL);
400 vn_iowake(ioend->io_vnode);
401 mempool_free(ioend, xfs_ioend_pool);
402 } while ((ioend = next) != NULL);
406 * Test to see if we've been building up a completion structure for
407 * earlier buffers -- if so, we try to append to this ioend if we
408 * can, otherwise we finish off any current ioend and start another.
409 * Return true if we've finished the given ioend.
414 struct buffer_head *bh,
417 xfs_ioend_t **result,
420 xfs_ioend_t *ioend = *result;
422 if (!ioend || need_ioend || type != ioend->io_type) {
423 xfs_ioend_t *previous = *result;
425 ioend = xfs_alloc_ioend(inode, type);
426 ioend->io_offset = offset;
427 ioend->io_buffer_head = bh;
428 ioend->io_buffer_tail = bh;
430 previous->io_list = ioend;
433 ioend->io_buffer_tail->b_private = bh;
434 ioend->io_buffer_tail = bh;
437 bh->b_private = NULL;
438 ioend->io_size += bh->b_size;
443 struct buffer_head *bh,
451 ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE));
452 ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY));
453 ASSERT(iomapp->iomap_bn != IOMAP_DADDR_NULL);
455 sector_shift = block_bits - BBSHIFT;
456 bn = (iomapp->iomap_bn >> sector_shift) +
457 ((offset - iomapp->iomap_offset) >> block_bits);
459 ASSERT(bn || (iomapp->iomap_flags & IOMAP_REALTIME));
460 ASSERT((bn << sector_shift) >= iomapp->iomap_bn);
464 bh->b_bdev = iomapp->iomap_target->bt_bdev;
465 set_buffer_mapped(bh);
466 clear_buffer_delay(bh);
467 clear_buffer_unwritten(bh);
471 * Look for a page at index that is suitable for clustering.
476 unsigned int pg_offset,
481 if (PageWriteback(page))
484 if (page->mapping && PageDirty(page)) {
485 if (page_has_buffers(page)) {
486 struct buffer_head *bh, *head;
488 bh = head = page_buffers(page);
490 if (!buffer_uptodate(bh))
492 if (mapped != buffer_mapped(bh))
495 if (ret >= pg_offset)
497 } while ((bh = bh->b_this_page) != head);
499 ret = mapped ? 0 : PAGE_CACHE_SIZE;
508 struct page *startpage,
509 struct buffer_head *bh,
510 struct buffer_head *head,
514 pgoff_t tindex, tlast, tloff;
518 /* First sum forwards in this page */
520 if (mapped != buffer_mapped(bh))
523 } while ((bh = bh->b_this_page) != head);
525 /* if we reached the end of the page, sum forwards in following pages */
526 tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
527 tindex = startpage->index + 1;
529 /* Prune this back to avoid pathological behavior */
530 tloff = min(tlast, startpage->index + 64);
532 pagevec_init(&pvec, 0);
533 while (!done && tindex <= tloff) {
534 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
536 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
539 for (i = 0; i < pagevec_count(&pvec); i++) {
540 struct page *page = pvec.pages[i];
541 size_t pg_offset, len = 0;
543 if (tindex == tlast) {
545 i_size_read(inode) & (PAGE_CACHE_SIZE - 1);
551 pg_offset = PAGE_CACHE_SIZE;
553 if (page->index == tindex && !TestSetPageLocked(page)) {
554 len = xfs_probe_page(page, pg_offset, mapped);
567 pagevec_release(&pvec);
575 * Test if a given page is suitable for writing as part of an unwritten
576 * or delayed allocate extent.
583 if (PageWriteback(page))
586 if (page->mapping && page_has_buffers(page)) {
587 struct buffer_head *bh, *head;
590 bh = head = page_buffers(page);
592 if (buffer_unwritten(bh))
593 acceptable = (type == IOMAP_UNWRITTEN);
594 else if (buffer_delay(bh))
595 acceptable = (type == IOMAP_DELAY);
596 else if (buffer_mapped(bh))
597 acceptable = (type == 0);
600 } while ((bh = bh->b_this_page) != head);
610 * Allocate & map buffers for page given the extent map. Write it out.
611 * except for the original page of a writepage, this is called on
612 * delalloc/unwritten pages only, for the original page it is possible
613 * that the page has no mapping at all.
621 xfs_ioend_t **ioendp,
622 struct writeback_control *wbc,
626 struct buffer_head *bh, *head;
627 xfs_off_t end_offset;
628 unsigned long p_offset;
630 int bbits = inode->i_blkbits;
632 int count = 0, done = 0, uptodate = 1;
633 xfs_off_t offset = page_offset(page);
635 if (page->index != tindex)
637 if (TestSetPageLocked(page))
639 if (PageWriteback(page))
640 goto fail_unlock_page;
641 if (page->mapping != inode->i_mapping)
642 goto fail_unlock_page;
643 if (!xfs_is_delayed_page(page, (*ioendp)->io_type))
644 goto fail_unlock_page;
647 * page_dirty is initially a count of buffers on the page before
648 * EOF and is decrememted as we move each into a cleanable state.
652 * End offset is the highest offset that this page should represent.
653 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
654 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
655 * hence give us the correct page_dirty count. On any other page,
656 * it will be zero and in that case we need page_dirty to be the
657 * count of buffers on the page.
659 end_offset = min_t(unsigned long long,
660 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
663 len = 1 << inode->i_blkbits;
664 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
666 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
667 page_dirty = p_offset / len;
669 bh = head = page_buffers(page);
671 if (offset >= end_offset)
673 if (!buffer_uptodate(bh))
675 if (!(PageUptodate(page) || buffer_uptodate(bh))) {
680 if (buffer_unwritten(bh) || buffer_delay(bh)) {
681 if (buffer_unwritten(bh))
682 type = IOMAP_UNWRITTEN;
686 if (!xfs_iomap_valid(mp, offset)) {
691 ASSERT(!(mp->iomap_flags & IOMAP_HOLE));
692 ASSERT(!(mp->iomap_flags & IOMAP_DELAY));
694 xfs_map_at_offset(bh, offset, bbits, mp);
696 xfs_add_to_ioend(inode, bh, offset,
699 set_buffer_dirty(bh);
701 mark_buffer_dirty(bh);
707 if (buffer_mapped(bh) && all_bh && startio) {
709 xfs_add_to_ioend(inode, bh, offset,
717 } while (offset += len, (bh = bh->b_this_page) != head);
719 if (uptodate && bh == head)
720 SetPageUptodate(page);
724 struct backing_dev_info *bdi;
726 bdi = inode->i_mapping->backing_dev_info;
727 if (bdi_write_congested(bdi)) {
728 wbc->encountered_congestion = 1;
730 } else if (--wbc->nr_to_write <= 0) {
734 xfs_start_page_writeback(page, wbc, !page_dirty, count);
745 * Convert & write out a cluster of pages in the same extent as defined
746 * by mp and following the start page.
753 xfs_ioend_t **ioendp,
754 struct writeback_control *wbc,
762 pagevec_init(&pvec, 0);
763 while (!done && tindex <= tlast) {
764 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
766 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
769 for (i = 0; i < pagevec_count(&pvec); i++) {
770 done = xfs_convert_page(inode, pvec.pages[i], tindex++,
771 iomapp, ioendp, wbc, startio, all_bh);
776 pagevec_release(&pvec);
782 * Calling this without startio set means we are being asked to make a dirty
783 * page ready for freeing it's buffers. When called with startio set then
784 * we are coming from writepage.
786 * When called with startio set it is important that we write the WHOLE
788 * The bh->b_state's cannot know if any of the blocks or which block for
789 * that matter are dirty due to mmap writes, and therefore bh uptodate is
790 * only vaild if the page itself isn't completely uptodate. Some layers
791 * may clear the page dirty flag prior to calling write page, under the
792 * assumption the entire page will be written out; by not writing out the
793 * whole page the page can be reused before all valid dirty data is
794 * written out. Note: in the case of a page that has been dirty'd by
795 * mapwrite and but partially setup by block_prepare_write the
796 * bh->b_states's will not agree and only ones setup by BPW/BCW will have
797 * valid state, thus the whole page must be written out thing.
801 xfs_page_state_convert(
804 struct writeback_control *wbc,
806 int unmapped) /* also implies page uptodate */
808 struct buffer_head *bh, *head;
810 xfs_ioend_t *ioend = NULL, *iohead = NULL;
812 unsigned long p_offset = 0;
814 __uint64_t end_offset;
815 pgoff_t end_index, last_index, tlast;
817 int flags, err, iomap_valid = 0, uptodate = 1;
818 int page_dirty, count = 0, trylock_flag = 0;
819 int all_bh = unmapped;
821 /* wait for other IO threads? */
822 if (startio && (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking))
823 trylock_flag |= BMAPI_TRYLOCK;
825 /* Is this page beyond the end of the file? */
826 offset = i_size_read(inode);
827 end_index = offset >> PAGE_CACHE_SHIFT;
828 last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
829 if (page->index >= end_index) {
830 if ((page->index >= end_index + 1) ||
831 !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
839 * page_dirty is initially a count of buffers on the page before
840 * EOF and is decrememted as we move each into a cleanable state.
844 * End offset is the highest offset that this page should represent.
845 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
846 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
847 * hence give us the correct page_dirty count. On any other page,
848 * it will be zero and in that case we need page_dirty to be the
849 * count of buffers on the page.
851 end_offset = min_t(unsigned long long,
852 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset);
853 len = 1 << inode->i_blkbits;
854 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
856 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
857 page_dirty = p_offset / len;
859 bh = head = page_buffers(page);
860 offset = page_offset(page);
864 /* TODO: cleanup count and page_dirty */
867 if (offset >= end_offset)
869 if (!buffer_uptodate(bh))
871 if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio) {
873 * the iomap is actually still valid, but the ioend
874 * isn't. shouldn't happen too often.
881 iomap_valid = xfs_iomap_valid(&iomap, offset);
884 * First case, map an unwritten extent and prepare for
885 * extent state conversion transaction on completion.
887 * Second case, allocate space for a delalloc buffer.
888 * We can return EAGAIN here in the release page case.
890 * Third case, an unmapped buffer was found, and we are
891 * in a path where we need to write the whole page out.
893 if (buffer_unwritten(bh) || buffer_delay(bh) ||
894 ((buffer_uptodate(bh) || PageUptodate(page)) &&
895 !buffer_mapped(bh) && (unmapped || startio))) {
897 * Make sure we don't use a read-only iomap
899 if (flags == BMAPI_READ)
902 if (buffer_unwritten(bh)) {
903 type = IOMAP_UNWRITTEN;
904 flags = BMAPI_WRITE|BMAPI_IGNSTATE;
905 } else if (buffer_delay(bh)) {
907 flags = BMAPI_ALLOCATE;
909 flags |= trylock_flag;
912 flags = BMAPI_WRITE|BMAPI_MMAP;
916 if (type == IOMAP_NEW) {
917 size = xfs_probe_cluster(inode,
923 err = xfs_map_blocks(inode, offset, size,
927 iomap_valid = xfs_iomap_valid(&iomap, offset);
930 xfs_map_at_offset(bh, offset,
931 inode->i_blkbits, &iomap);
933 xfs_add_to_ioend(inode, bh, offset,
937 set_buffer_dirty(bh);
939 mark_buffer_dirty(bh);
944 } else if (buffer_uptodate(bh) && startio) {
946 * we got here because the buffer is already mapped.
947 * That means it must already have extents allocated
948 * underneath it. Map the extent by reading it.
950 if (!iomap_valid || type != 0) {
952 size = xfs_probe_cluster(inode, page, bh,
954 err = xfs_map_blocks(inode, offset, size,
958 iomap_valid = xfs_iomap_valid(&iomap, offset);
962 if (!test_and_set_bit(BH_Lock, &bh->b_state)) {
963 ASSERT(buffer_mapped(bh));
966 xfs_add_to_ioend(inode, bh, offset, type,
967 &ioend, !iomap_valid);
973 } else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
974 (unmapped || startio)) {
981 } while (offset += len, ((bh = bh->b_this_page) != head));
983 if (uptodate && bh == head)
984 SetPageUptodate(page);
987 xfs_start_page_writeback(page, wbc, 1, count);
989 if (ioend && iomap_valid) {
990 offset = (iomap.iomap_offset + iomap.iomap_bsize - 1) >>
992 tlast = min_t(pgoff_t, offset, last_index);
993 xfs_cluster_write(inode, page->index + 1, &iomap, &ioend,
994 wbc, startio, all_bh, tlast);
998 xfs_submit_ioend(iohead);
1004 xfs_cancel_ioend(iohead);
1007 * If it's delalloc and we have nowhere to put it,
1008 * throw it away, unless the lower layers told
1011 if (err != -EAGAIN) {
1013 block_invalidatepage(page, 0);
1014 ClearPageUptodate(page);
1021 struct inode *inode,
1023 unsigned long blocks,
1024 struct buffer_head *bh_result,
1027 bmapi_flags_t flags)
1029 vnode_t *vp = LINVFS_GET_VP(inode);
1036 offset = (xfs_off_t)iblock << inode->i_blkbits;
1038 size = (ssize_t) min_t(xfs_off_t, LONG_MAX,
1039 (xfs_off_t)blocks << inode->i_blkbits);
1041 size = 1 << inode->i_blkbits;
1043 VOP_BMAP(vp, offset, size,
1044 create ? flags : BMAPI_READ, &iomap, &retpbbm, error);
1051 if (iomap.iomap_bn != IOMAP_DADDR_NULL) {
1055 /* For unwritten extents do not report a disk address on
1056 * the read case (treat as if we're reading into a hole).
1058 if (create || !(iomap.iomap_flags & IOMAP_UNWRITTEN)) {
1059 delta = offset - iomap.iomap_offset;
1060 delta >>= inode->i_blkbits;
1062 bn = iomap.iomap_bn >> (inode->i_blkbits - BBSHIFT);
1064 BUG_ON(!bn && !(iomap.iomap_flags & IOMAP_REALTIME));
1065 bh_result->b_blocknr = bn;
1066 set_buffer_mapped(bh_result);
1068 if (create && (iomap.iomap_flags & IOMAP_UNWRITTEN)) {
1070 bh_result->b_private = inode;
1071 set_buffer_unwritten(bh_result);
1072 set_buffer_delay(bh_result);
1076 /* If this is a realtime file, data might be on a new device */
1077 bh_result->b_bdev = iomap.iomap_target->bt_bdev;
1079 /* If we previously allocated a block out beyond eof and
1080 * we are now coming back to use it then we will need to
1081 * flag it as new even if it has a disk address.
1084 ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
1085 (offset >= i_size_read(inode)) || (iomap.iomap_flags & IOMAP_NEW)))
1086 set_buffer_new(bh_result);
1088 if (iomap.iomap_flags & IOMAP_DELAY) {
1091 set_buffer_uptodate(bh_result);
1092 set_buffer_mapped(bh_result);
1093 set_buffer_delay(bh_result);
1098 ASSERT(iomap.iomap_bsize - iomap.iomap_delta > 0);
1099 offset = min_t(xfs_off_t,
1100 iomap.iomap_bsize - iomap.iomap_delta,
1101 (xfs_off_t)blocks << inode->i_blkbits);
1102 bh_result->b_size = (u32) min_t(xfs_off_t, UINT_MAX, offset);
1110 struct inode *inode,
1112 struct buffer_head *bh_result,
1115 return __linvfs_get_block(inode, iblock, 0, bh_result,
1116 create, 0, BMAPI_WRITE);
1120 linvfs_get_blocks_direct(
1121 struct inode *inode,
1123 unsigned long max_blocks,
1124 struct buffer_head *bh_result,
1127 return __linvfs_get_block(inode, iblock, max_blocks, bh_result,
1128 create, 1, BMAPI_WRITE|BMAPI_DIRECT);
1132 linvfs_end_io_direct(
1138 xfs_ioend_t *ioend = iocb->private;
1141 * Non-NULL private data means we need to issue a transaction to
1142 * convert a range from unwritten to written extents. This needs
1143 * to happen from process contect but aio+dio I/O completion
1144 * happens from irq context so we need to defer it to a workqueue.
1145 * This is not nessecary for synchronous direct I/O, but we do
1146 * it anyway to keep the code uniform and simpler.
1148 * The core direct I/O code might be changed to always call the
1149 * completion handler in the future, in which case all this can
1152 if (private && size > 0) {
1153 ioend->io_offset = offset;
1154 ioend->io_size = size;
1155 xfs_finish_ioend(ioend);
1158 xfs_destroy_ioend(ioend);
1162 * blockdev_direct_IO can return an error even afer the I/O
1163 * completion handler was called. Thus we need to protect
1164 * against double-freeing.
1166 iocb->private = NULL;
1173 const struct iovec *iov,
1175 unsigned long nr_segs)
1177 struct file *file = iocb->ki_filp;
1178 struct inode *inode = file->f_mapping->host;
1179 vnode_t *vp = LINVFS_GET_VP(inode);
1185 VOP_BMAP(vp, offset, 0, BMAPI_DEVICE, &iomap, &maps, error);
1189 iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN);
1191 ret = blockdev_direct_IO_own_locking(rw, iocb, inode,
1192 iomap.iomap_target->bt_bdev,
1193 iov, offset, nr_segs,
1194 linvfs_get_blocks_direct,
1195 linvfs_end_io_direct);
1197 if (unlikely(ret <= 0 && iocb->private))
1198 xfs_destroy_ioend(iocb->private);
1205 struct address_space *mapping,
1208 struct inode *inode = (struct inode *)mapping->host;
1209 vnode_t *vp = LINVFS_GET_VP(inode);
1212 vn_trace_entry(vp, "linvfs_bmap", (inst_t *)__return_address);
1214 VOP_RWLOCK(vp, VRWLOCK_READ);
1215 VOP_FLUSH_PAGES(vp, (xfs_off_t)0, -1, 0, FI_REMAPF, error);
1216 VOP_RWUNLOCK(vp, VRWLOCK_READ);
1217 return generic_block_bmap(mapping, block, linvfs_get_block);
1222 struct file *unused,
1225 return mpage_readpage(page, linvfs_get_block);
1230 struct file *unused,
1231 struct address_space *mapping,
1232 struct list_head *pages,
1235 return mpage_readpages(mapping, pages, nr_pages, linvfs_get_block);
1239 xfs_count_page_state(
1245 struct buffer_head *bh, *head;
1247 *delalloc = *unmapped = *unwritten = 0;
1249 bh = head = page_buffers(page);
1251 if (buffer_uptodate(bh) && !buffer_mapped(bh))
1253 else if (buffer_unwritten(bh) && !buffer_delay(bh))
1254 clear_buffer_unwritten(bh);
1255 else if (buffer_unwritten(bh))
1257 else if (buffer_delay(bh))
1259 } while ((bh = bh->b_this_page) != head);
1264 * writepage: Called from one of two places:
1266 * 1. we are flushing a delalloc buffer head.
1268 * 2. we are writing out a dirty page. Typically the page dirty
1269 * state is cleared before we get here. In this case is it
1270 * conceivable we have no buffer heads.
1272 * For delalloc space on the page we need to allocate space and
1273 * flush it. For unmapped buffer heads on the page we should
1274 * allocate space if the page is uptodate. For any other dirty
1275 * buffer heads on the page we should flush them.
1277 * If we detect that a transaction would be required to flush
1278 * the page, we have to check the process flags first, if we
1279 * are already in a transaction or disk I/O during allocations
1280 * is off, we need to fail the writepage and redirty the page.
1286 struct writeback_control *wbc)
1290 int delalloc, unmapped, unwritten;
1291 struct inode *inode = page->mapping->host;
1293 xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0);
1296 * We need a transaction if:
1297 * 1. There are delalloc buffers on the page
1298 * 2. The page is uptodate and we have unmapped buffers
1299 * 3. The page is uptodate and we have no buffers
1300 * 4. There are unwritten buffers on the page
1303 if (!page_has_buffers(page)) {
1307 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1308 if (!PageUptodate(page))
1310 need_trans = delalloc + unmapped + unwritten;
1314 * If we need a transaction and the process flags say
1315 * we are already in a transaction, or no IO is allowed
1316 * then mark the page dirty again and leave the page
1319 if (PFLAGS_TEST_FSTRANS() && need_trans)
1323 * Delay hooking up buffer heads until we have
1324 * made our go/no-go decision.
1326 if (!page_has_buffers(page))
1327 create_empty_buffers(page, 1 << inode->i_blkbits, 0);
1330 * Convert delayed allocate, unwritten or unmapped space
1331 * to real space and flush out to disk.
1333 error = xfs_page_state_convert(inode, page, wbc, 1, unmapped);
1334 if (error == -EAGAIN)
1336 if (unlikely(error < 0))
1342 redirty_page_for_writepage(wbc, page);
1351 linvfs_invalidate_page(
1353 unsigned long offset)
1355 xfs_page_trace(XFS_INVALIDPAGE_ENTER,
1356 page->mapping->host, page, offset);
1357 return block_invalidatepage(page, offset);
1361 * Called to move a page into cleanable state - and from there
1362 * to be released. Possibly the page is already clean. We always
1363 * have buffer heads in this call.
1365 * Returns 0 if the page is ok to release, 1 otherwise.
1367 * Possible scenarios are:
1369 * 1. We are being called to release a page which has been written
1370 * to via regular I/O. buffer heads will be dirty and possibly
1371 * delalloc. If no delalloc buffer heads in this case then we
1372 * can just return zero.
1374 * 2. We are called to release a page which has been written via
1375 * mmap, all we need to do is ensure there is no delalloc
1376 * state in the buffer heads, if not we can let the caller
1377 * free them and we should come back later via writepage.
1380 linvfs_release_page(
1384 struct inode *inode = page->mapping->host;
1385 int dirty, delalloc, unmapped, unwritten;
1386 struct writeback_control wbc = {
1387 .sync_mode = WB_SYNC_ALL,
1391 xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, gfp_mask);
1393 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1394 if (!delalloc && !unwritten)
1397 if (!(gfp_mask & __GFP_FS))
1400 /* If we are already inside a transaction or the thread cannot
1401 * do I/O, we cannot release this page.
1403 if (PFLAGS_TEST_FSTRANS())
1407 * Convert delalloc space to real space, do not flush the
1408 * data out to disk, that will be done by the caller.
1409 * Never need to allocate space here - we will always
1410 * come back to writepage in that case.
1412 dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0);
1413 if (dirty == 0 && !unwritten)
1418 return try_to_free_buffers(page);
1422 linvfs_prepare_write(
1428 return block_prepare_write(page, from, to, linvfs_get_block);
1431 struct address_space_operations linvfs_aops = {
1432 .readpage = linvfs_readpage,
1433 .readpages = linvfs_readpages,
1434 .writepage = linvfs_writepage,
1435 .sync_page = block_sync_page,
1436 .releasepage = linvfs_release_page,
1437 .invalidatepage = linvfs_invalidate_page,
1438 .prepare_write = linvfs_prepare_write,
1439 .commit_write = generic_commit_write,
1440 .bmap = linvfs_bmap,
1441 .direct_IO = linvfs_direct_IO,