2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "xfs_trans.h"
28 #include "xfs_alloc.h"
29 #include "xfs_dmapi.h"
30 #include "xfs_quota.h"
31 #include "xfs_mount.h"
32 #include "xfs_bmap_btree.h"
33 #include "xfs_alloc_btree.h"
34 #include "xfs_ialloc_btree.h"
35 #include "xfs_dir_sf.h"
36 #include "xfs_dir2_sf.h"
37 #include "xfs_attr_sf.h"
38 #include "xfs_dinode.h"
39 #include "xfs_inode.h"
41 #include "xfs_btree.h"
42 #include "xfs_ialloc.h"
43 #include "xfs_rtalloc.h"
44 #include "xfs_error.h"
45 #include "xfs_itable.h"
51 #include "xfs_inode_item.h"
52 #include "xfs_buf_item.h"
53 #include "xfs_utils.h"
54 #include "xfs_iomap.h"
56 #include <linux/capability.h>
57 #include <linux/writeback.h>
60 #if defined(XFS_RW_TRACE)
70 xfs_inode_t *ip = XFS_IO_INODE(io);
72 if (ip->i_rwtrace == NULL)
74 ktrace_enter(ip->i_rwtrace,
75 (void *)(unsigned long)tag,
77 (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
78 (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
80 (void *)((unsigned long)segs),
81 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
82 (void *)((unsigned long)(offset & 0xffffffff)),
83 (void *)((unsigned long)ioflags),
84 (void *)((unsigned long)((io->io_new_size >> 32) & 0xffffffff)),
85 (void *)((unsigned long)(io->io_new_size & 0xffffffff)),
86 (void *)((unsigned long)current_pid()),
94 xfs_inval_cached_trace(
101 xfs_inode_t *ip = XFS_IO_INODE(io);
103 if (ip->i_rwtrace == NULL)
105 ktrace_enter(ip->i_rwtrace,
106 (void *)(__psint_t)XFS_INVAL_CACHED,
108 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
109 (void *)((unsigned long)(offset & 0xffffffff)),
110 (void *)((unsigned long)((len >> 32) & 0xffffffff)),
111 (void *)((unsigned long)(len & 0xffffffff)),
112 (void *)((unsigned long)((first >> 32) & 0xffffffff)),
113 (void *)((unsigned long)(first & 0xffffffff)),
114 (void *)((unsigned long)((last >> 32) & 0xffffffff)),
115 (void *)((unsigned long)(last & 0xffffffff)),
116 (void *)((unsigned long)current_pid()),
128 * xfs_iozero clears the specified range of buffer supplied,
129 * and marks all the affected blocks as valid and modified. If
130 * an affected block is not allocated, it will be allocated. If
131 * an affected block is not completely overwritten, and is not
132 * valid before the operation, it will be read from disk before
133 * being partially zeroed.
137 struct inode *ip, /* inode */
138 loff_t pos, /* offset in file */
139 size_t count, /* size of data to zero */
140 loff_t end_size) /* max file size to set */
144 struct address_space *mapping;
148 mapping = ip->i_mapping;
150 unsigned long index, offset;
152 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
153 index = pos >> PAGE_CACHE_SHIFT;
154 bytes = PAGE_CACHE_SIZE - offset;
159 page = grab_cache_page(mapping, index);
164 status = mapping->a_ops->prepare_write(NULL, page, offset,
170 memset((void *) (kaddr + offset), 0, bytes);
171 flush_dcache_page(page);
172 status = mapping->a_ops->commit_write(NULL, page, offset,
177 if (pos > i_size_read(ip))
178 i_size_write(ip, pos < end_size ? pos : end_size);
184 page_cache_release(page);
192 ssize_t /* bytes read, or (-) error */
196 const struct iovec *iovp,
202 struct file *file = iocb->ki_filp;
203 struct inode *inode = file->f_mapping->host;
212 ip = XFS_BHVTOI(bdp);
213 vp = BHV_TO_VNODE(bdp);
216 XFS_STATS_INC(xs_read_calls);
218 /* START copy & waste from filemap.c */
219 for (seg = 0; seg < segs; seg++) {
220 const struct iovec *iv = &iovp[seg];
223 * If any segment has a negative length, or the cumulative
224 * length ever wraps negative then return -EINVAL.
227 if (unlikely((ssize_t)(size|iv->iov_len) < 0))
228 return XFS_ERROR(-EINVAL);
230 /* END copy & waste from filemap.c */
232 if (unlikely(ioflags & IO_ISDIRECT)) {
233 xfs_buftarg_t *target =
234 (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
235 mp->m_rtdev_targp : mp->m_ddev_targp;
236 if ((*offset & target->bt_smask) ||
237 (size & target->bt_smask)) {
238 if (*offset == ip->i_d.di_size) {
241 return -XFS_ERROR(EINVAL);
245 n = XFS_MAXIOFFSET(mp) - *offset;
246 if ((n <= 0) || (size == 0))
252 if (XFS_FORCED_SHUTDOWN(mp))
255 if (unlikely(ioflags & IO_ISDIRECT))
256 mutex_lock(&inode->i_mutex);
257 xfs_ilock(ip, XFS_IOLOCK_SHARED);
259 if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ) &&
260 !(ioflags & IO_INVIS)) {
261 vrwlock_t locktype = VRWLOCK_READ;
262 int dmflags = FILP_DELAY_FLAG(file) | DM_SEM_FLAG_RD(ioflags);
264 ret = -XFS_SEND_DATA(mp, DM_EVENT_READ,
265 BHV_TO_VNODE(bdp), *offset, size,
268 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
273 if (unlikely((ioflags & IO_ISDIRECT) && VN_CACHED(vp)))
274 VOP_FLUSHINVAL_PAGES(vp, ctooff(offtoct(*offset)),
275 -1, FI_REMAPF_LOCKED);
277 xfs_rw_enter_trace(XFS_READ_ENTER, &ip->i_iocore,
278 (void *)iovp, segs, *offset, ioflags);
279 ret = __generic_file_aio_read(iocb, iovp, segs, offset);
280 if (ret == -EIOCBQUEUED && !(ioflags & IO_ISAIO))
281 ret = wait_on_sync_kiocb(iocb);
283 XFS_STATS_ADD(xs_read_bytes, ret);
285 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
288 if (unlikely(ioflags & IO_ISDIRECT))
289 mutex_unlock(&inode->i_mutex);
304 xfs_inode_t *ip = XFS_BHVTOI(bdp);
305 xfs_mount_t *mp = ip->i_mount;
308 XFS_STATS_INC(xs_read_calls);
309 if (XFS_FORCED_SHUTDOWN(mp))
312 xfs_ilock(ip, XFS_IOLOCK_SHARED);
314 if (DM_EVENT_ENABLED(BHV_TO_VNODE(bdp)->v_vfsp, ip, DM_EVENT_READ) &&
315 (!(ioflags & IO_INVIS))) {
316 vrwlock_t locktype = VRWLOCK_READ;
319 error = XFS_SEND_DATA(mp, DM_EVENT_READ, BHV_TO_VNODE(bdp),
321 FILP_DELAY_FLAG(filp), &locktype);
323 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
327 xfs_rw_enter_trace(XFS_SENDFILE_ENTER, &ip->i_iocore,
328 (void *)(unsigned long)target, count, *offset, ioflags);
329 ret = generic_file_sendfile(filp, offset, count, actor, target);
331 XFS_STATS_ADD(xs_read_bytes, ret);
333 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
342 struct pipe_inode_info *pipe,
348 xfs_inode_t *ip = XFS_BHVTOI(bdp);
349 xfs_mount_t *mp = ip->i_mount;
352 XFS_STATS_INC(xs_read_calls);
353 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
356 xfs_ilock(ip, XFS_IOLOCK_SHARED);
358 if (DM_EVENT_ENABLED(BHV_TO_VNODE(bdp)->v_vfsp, ip, DM_EVENT_READ) &&
359 (!(ioflags & IO_INVIS))) {
360 vrwlock_t locktype = VRWLOCK_READ;
363 error = XFS_SEND_DATA(mp, DM_EVENT_READ, BHV_TO_VNODE(bdp),
365 FILP_DELAY_FLAG(infilp), &locktype);
367 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
371 xfs_rw_enter_trace(XFS_SPLICE_READ_ENTER, &ip->i_iocore,
372 pipe, count, *ppos, ioflags);
373 ret = generic_file_splice_read(infilp, ppos, pipe, count, flags);
375 XFS_STATS_ADD(xs_read_bytes, ret);
377 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
384 struct pipe_inode_info *pipe,
385 struct file *outfilp,
392 xfs_inode_t *ip = XFS_BHVTOI(bdp);
393 xfs_mount_t *mp = ip->i_mount;
396 XFS_STATS_INC(xs_write_calls);
397 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
400 xfs_ilock(ip, XFS_IOLOCK_EXCL);
402 if (DM_EVENT_ENABLED(BHV_TO_VNODE(bdp)->v_vfsp, ip, DM_EVENT_WRITE) &&
403 (!(ioflags & IO_INVIS))) {
404 vrwlock_t locktype = VRWLOCK_WRITE;
407 error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, BHV_TO_VNODE(bdp),
409 FILP_DELAY_FLAG(outfilp), &locktype);
411 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
415 xfs_rw_enter_trace(XFS_SPLICE_WRITE_ENTER, &ip->i_iocore,
416 pipe, count, *ppos, ioflags);
417 ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags);
419 XFS_STATS_ADD(xs_write_bytes, ret);
421 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
426 * This routine is called to handle zeroing any space in the last
427 * block of the file that is beyond the EOF. We do this since the
428 * size is being increased without writing anything to that block
429 * and we don't want anyone to read the garbage on the disk.
431 STATIC int /* error (positive) */
436 xfs_fsize_t end_size)
438 xfs_fileoff_t last_fsb;
439 xfs_mount_t *mp = io->io_mount;
444 xfs_bmbt_irec_t imap;
447 ASSERT(ismrlocked(io->io_lock, MR_UPDATE) != 0);
449 zero_offset = XFS_B_FSB_OFFSET(mp, isize);
450 if (zero_offset == 0) {
452 * There are no extra bytes in the last block on disk to
458 last_fsb = XFS_B_TO_FSBT(mp, isize);
460 error = XFS_BMAPI(mp, NULL, io, last_fsb, 1, 0, NULL, 0, &imap,
467 * If the block underlying isize is just a hole, then there
468 * is nothing to zero.
470 if (imap.br_startblock == HOLESTARTBLOCK) {
474 * Zero the part of the last block beyond the EOF, and write it
475 * out sync. We need to drop the ilock while we do this so we
476 * don't deadlock when the buffer cache calls back to us.
478 XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL| XFS_EXTSIZE_RD);
480 loff = XFS_FSB_TO_B(mp, last_fsb);
481 zero_len = mp->m_sb.sb_blocksize - zero_offset;
482 error = xfs_iozero(ip, loff + zero_offset, zero_len, end_size);
484 XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
490 * Zero any on disk space between the current EOF and the new,
491 * larger EOF. This handles the normal case of zeroing the remainder
492 * of the last block in the file and the unusual case of zeroing blocks
493 * out beyond the size of the file. This second case only happens
494 * with fixed size extents and when the system crashes before the inode
495 * size was updated but after blocks were allocated. If fill is set,
496 * then any holes in the range are filled and zeroed. If not, the holes
497 * are left alone as holes.
500 int /* error (positive) */
504 xfs_off_t offset, /* starting I/O offset */
505 xfs_fsize_t isize, /* current inode size */
506 xfs_fsize_t end_size) /* terminal inode size */
508 struct inode *ip = vn_to_inode(vp);
509 xfs_fileoff_t start_zero_fsb;
510 xfs_fileoff_t end_zero_fsb;
511 xfs_fileoff_t zero_count_fsb;
512 xfs_fileoff_t last_fsb;
513 xfs_extlen_t buf_len_fsb;
514 xfs_mount_t *mp = io->io_mount;
517 xfs_bmbt_irec_t imap;
519 ASSERT(ismrlocked(io->io_lock, MR_UPDATE));
520 ASSERT(ismrlocked(io->io_iolock, MR_UPDATE));
521 ASSERT(offset > isize);
524 * First handle zeroing the block on which isize resides.
525 * We only zero a part of that block so it is handled specially.
527 error = xfs_zero_last_block(ip, io, isize, end_size);
529 ASSERT(ismrlocked(io->io_lock, MR_UPDATE));
530 ASSERT(ismrlocked(io->io_iolock, MR_UPDATE));
535 * Calculate the range between the new size and the old
536 * where blocks needing to be zeroed may exist. To get the
537 * block where the last byte in the file currently resides,
538 * we need to subtract one from the size and truncate back
539 * to a block boundary. We subtract 1 in case the size is
540 * exactly on a block boundary.
542 last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1;
543 start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
544 end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1);
545 ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb);
546 if (last_fsb == end_zero_fsb) {
548 * The size was only incremented on its last block.
549 * We took care of that above, so just return.
554 ASSERT(start_zero_fsb <= end_zero_fsb);
555 while (start_zero_fsb <= end_zero_fsb) {
557 zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;
558 error = XFS_BMAPI(mp, NULL, io, start_zero_fsb, zero_count_fsb,
559 0, NULL, 0, &imap, &nimaps, NULL);
561 ASSERT(ismrlocked(io->io_lock, MR_UPDATE));
562 ASSERT(ismrlocked(io->io_iolock, MR_UPDATE));
567 if (imap.br_state == XFS_EXT_UNWRITTEN ||
568 imap.br_startblock == HOLESTARTBLOCK) {
570 * This loop handles initializing pages that were
571 * partially initialized by the code below this
572 * loop. It basically zeroes the part of the page
573 * that sits on a hole and sets the page as P_HOLE
574 * and calls remapf if it is a mapped file.
576 start_zero_fsb = imap.br_startoff + imap.br_blockcount;
577 ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
582 * There are blocks in the range requested.
583 * Zero them a single write at a time. We actually
584 * don't zero the entire range returned if it is
585 * too big and simply loop around to get the rest.
586 * That is not the most efficient thing to do, but it
587 * is simple and this path should not be exercised often.
589 buf_len_fsb = XFS_FILBLKS_MIN(imap.br_blockcount,
590 mp->m_writeio_blocks << 8);
592 * Drop the inode lock while we're doing the I/O.
593 * We'll still have the iolock to protect us.
595 XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
597 error = xfs_iozero(ip,
598 XFS_FSB_TO_B(mp, start_zero_fsb),
599 XFS_FSB_TO_B(mp, buf_len_fsb),
606 start_zero_fsb = imap.br_startoff + buf_len_fsb;
607 ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
609 XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
616 XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
621 ssize_t /* bytes written, or (-) error */
625 const struct iovec *iovp,
631 struct file *file = iocb->ki_filp;
632 struct address_space *mapping = file->f_mapping;
633 struct inode *inode = mapping->host;
634 unsigned long segs = nsegs;
637 ssize_t ret = 0, error = 0;
638 xfs_fsize_t isize, new_size;
645 size_t ocount = 0, count;
647 int need_i_mutex = 1, need_flush = 0;
649 XFS_STATS_INC(xs_write_calls);
651 vp = BHV_TO_VNODE(bdp);
652 xip = XFS_BHVTOI(bdp);
654 for (seg = 0; seg < segs; seg++) {
655 const struct iovec *iv = &iovp[seg];
658 * If any segment has a negative length, or the cumulative
659 * length ever wraps negative then return -EINVAL.
661 ocount += iv->iov_len;
662 if (unlikely((ssize_t)(ocount|iv->iov_len) < 0))
664 if (access_ok(VERIFY_READ, iv->iov_base, iv->iov_len))
669 ocount -= iv->iov_len; /* This segment is no good */
682 if (XFS_FORCED_SHUTDOWN(mp))
685 fs_check_frozen(vp->v_vfsp, SB_FREEZE_WRITE);
687 if (ioflags & IO_ISDIRECT) {
688 xfs_buftarg_t *target =
689 (xip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
690 mp->m_rtdev_targp : mp->m_ddev_targp;
692 if ((pos & target->bt_smask) || (count & target->bt_smask))
693 return XFS_ERROR(-EINVAL);
695 if (!VN_CACHED(vp) && pos < i_size_read(inode))
704 iolock = XFS_IOLOCK_EXCL;
705 locktype = VRWLOCK_WRITE;
707 mutex_lock(&inode->i_mutex);
709 iolock = XFS_IOLOCK_SHARED;
710 locktype = VRWLOCK_WRITE_DIRECT;
713 xfs_ilock(xip, XFS_ILOCK_EXCL|iolock);
715 isize = i_size_read(inode);
717 if (file->f_flags & O_APPEND)
721 error = -generic_write_checks(file, &pos, &count,
722 S_ISBLK(inode->i_mode));
724 xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
725 goto out_unlock_mutex;
728 new_size = pos + count;
729 if (new_size > isize)
730 io->io_new_size = new_size;
732 if ((DM_EVENT_ENABLED(vp->v_vfsp, xip, DM_EVENT_WRITE) &&
733 !(ioflags & IO_INVIS) && !eventsent)) {
734 loff_t savedsize = pos;
735 int dmflags = FILP_DELAY_FLAG(file);
738 dmflags |= DM_FLAGS_IMUX;
740 xfs_iunlock(xip, XFS_ILOCK_EXCL);
741 error = XFS_SEND_DATA(xip->i_mount, DM_EVENT_WRITE, vp,
745 xfs_iunlock(xip, iolock);
746 goto out_unlock_mutex;
748 xfs_ilock(xip, XFS_ILOCK_EXCL);
752 * The iolock was dropped and reacquired in XFS_SEND_DATA
753 * so we have to recheck the size when appending.
754 * We will only "goto start;" once, since having sent the
755 * event prevents another call to XFS_SEND_DATA, which is
756 * what allows the size to change in the first place.
758 if ((file->f_flags & O_APPEND) && savedsize != isize) {
759 pos = isize = xip->i_d.di_size;
764 if (likely(!(ioflags & IO_INVIS))) {
765 file_update_time(file);
766 xfs_ichgtime_fast(xip, inode,
767 XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
771 * If the offset is beyond the size of the file, we have a couple
772 * of things to do. First, if there is already space allocated
773 * we need to either create holes or zero the disk or ...
775 * If there is a page where the previous size lands, we need
776 * to zero it out up to the new size.
780 error = xfs_zero_eof(BHV_TO_VNODE(bdp), io, pos,
783 xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
784 goto out_unlock_mutex;
787 xfs_iunlock(xip, XFS_ILOCK_EXCL);
790 * If we're writing the file then make sure to clear the
791 * setuid and setgid bits if the process is not being run
792 * by root. This keeps people from modifying setuid and
796 if (((xip->i_d.di_mode & S_ISUID) ||
797 ((xip->i_d.di_mode & (S_ISGID | S_IXGRP)) ==
798 (S_ISGID | S_IXGRP))) &&
799 !capable(CAP_FSETID)) {
800 error = xfs_write_clear_setuid(xip);
802 error = -remove_suid(file->f_dentry);
803 if (unlikely(error)) {
804 xfs_iunlock(xip, iolock);
805 goto out_unlock_mutex;
810 /* We can write back this queue in page reclaim */
811 current->backing_dev_info = mapping->backing_dev_info;
813 if ((ioflags & IO_ISDIRECT)) {
815 xfs_inval_cached_trace(io, pos, -1,
816 ctooff(offtoct(pos)), -1);
817 VOP_FLUSHINVAL_PAGES(vp, ctooff(offtoct(pos)),
818 -1, FI_REMAPF_LOCKED);
822 /* demote the lock now the cached pages are gone */
823 XFS_ILOCK_DEMOTE(mp, io, XFS_IOLOCK_EXCL);
824 mutex_unlock(&inode->i_mutex);
826 iolock = XFS_IOLOCK_SHARED;
827 locktype = VRWLOCK_WRITE_DIRECT;
831 xfs_rw_enter_trace(XFS_DIOWR_ENTER, io, (void *)iovp, segs,
833 ret = generic_file_direct_write(iocb, iovp,
834 &segs, pos, offset, count, ocount);
837 * direct-io write to a hole: fall through to buffered I/O
838 * for completing the rest of the request.
840 if (ret >= 0 && ret != count) {
841 XFS_STATS_ADD(xs_write_bytes, ret);
847 ioflags &= ~IO_ISDIRECT;
848 xfs_iunlock(xip, iolock);
852 xfs_rw_enter_trace(XFS_WRITE_ENTER, io, (void *)iovp, segs,
854 ret = generic_file_buffered_write(iocb, iovp, segs,
855 pos, offset, count, ret);
858 current->backing_dev_info = NULL;
860 if (ret == -EIOCBQUEUED && !(ioflags & IO_ISAIO))
861 ret = wait_on_sync_kiocb(iocb);
863 if ((ret == -ENOSPC) &&
864 DM_EVENT_ENABLED(vp->v_vfsp, xip, DM_EVENT_NOSPACE) &&
865 !(ioflags & IO_INVIS)) {
867 xfs_rwunlock(bdp, locktype);
869 mutex_unlock(&inode->i_mutex);
870 error = XFS_SEND_NAMESP(xip->i_mount, DM_EVENT_NOSPACE, vp,
871 DM_RIGHT_NULL, vp, DM_RIGHT_NULL, NULL, NULL,
872 0, 0, 0); /* Delay flag intentionally unused */
876 mutex_lock(&inode->i_mutex);
877 xfs_rwlock(bdp, locktype);
878 pos = xip->i_d.di_size;
883 isize = i_size_read(inode);
884 if (unlikely(ret < 0 && ret != -EFAULT && *offset > isize))
887 if (*offset > xip->i_d.di_size) {
888 xfs_ilock(xip, XFS_ILOCK_EXCL);
889 if (*offset > xip->i_d.di_size) {
890 xip->i_d.di_size = *offset;
891 i_size_write(inode, *offset);
892 xip->i_update_core = 1;
893 xip->i_update_size = 1;
895 xfs_iunlock(xip, XFS_ILOCK_EXCL);
900 goto out_unlock_internal;
902 XFS_STATS_ADD(xs_write_bytes, ret);
904 /* Handle various SYNC-type writes */
905 if ((file->f_flags & O_SYNC) || IS_SYNC(inode)) {
907 * If we're treating this as O_DSYNC and we have not updated the
908 * size, force the log.
910 if (!(mp->m_flags & XFS_MOUNT_OSYNCISOSYNC) &&
911 !(xip->i_update_size)) {
912 xfs_inode_log_item_t *iip = xip->i_itemp;
915 * If an allocation transaction occurred
916 * without extending the size, then we have to force
917 * the log up the proper point to ensure that the
918 * allocation is permanent. We can't count on
919 * the fact that buffered writes lock out direct I/O
920 * writes - the direct I/O write could have extended
921 * the size nontransactionally, then finished before
922 * we started. xfs_write_file will think that the file
923 * didn't grow but the update isn't safe unless the
924 * size change is logged.
926 * Force the log if we've committed a transaction
927 * against the inode or if someone else has and
928 * the commit record hasn't gone to disk (e.g.
929 * the inode is pinned). This guarantees that
930 * all changes affecting the inode are permanent
933 if (iip && iip->ili_last_lsn) {
934 xfs_log_force(mp, iip->ili_last_lsn,
935 XFS_LOG_FORCE | XFS_LOG_SYNC);
936 } else if (xfs_ipincount(xip) > 0) {
937 xfs_log_force(mp, (xfs_lsn_t)0,
938 XFS_LOG_FORCE | XFS_LOG_SYNC);
945 * O_SYNC or O_DSYNC _with_ a size update are handled
948 * If the write was synchronous then we need to make
949 * sure that the inode modification time is permanent.
950 * We'll have updated the timestamp above, so here
951 * we use a synchronous transaction to log the inode.
952 * It's not fast, but it's necessary.
954 * If this a dsync write and the size got changed
955 * non-transactionally, then we need to ensure that
956 * the size change gets logged in a synchronous
960 tp = xfs_trans_alloc(mp, XFS_TRANS_WRITE_SYNC);
961 if ((error = xfs_trans_reserve(tp, 0,
962 XFS_SWRITE_LOG_RES(mp),
964 /* Transaction reserve failed */
965 xfs_trans_cancel(tp, 0);
967 /* Transaction reserve successful */
968 xfs_ilock(xip, XFS_ILOCK_EXCL);
969 xfs_trans_ijoin(tp, xip, XFS_ILOCK_EXCL);
970 xfs_trans_ihold(tp, xip);
971 xfs_trans_log_inode(tp, xip, XFS_ILOG_CORE);
972 xfs_trans_set_sync(tp);
973 error = xfs_trans_commit(tp, 0, NULL);
974 xfs_iunlock(xip, XFS_ILOCK_EXCL);
977 goto out_unlock_internal;
980 xfs_rwunlock(bdp, locktype);
982 mutex_unlock(&inode->i_mutex);
984 error = sync_page_range(inode, mapping, pos, ret);
991 xfs_rwunlock(bdp, locktype);
994 mutex_unlock(&inode->i_mutex);
1000 * All xfs metadata buffers except log state machine buffers
1001 * get this attached as their b_bdstrat callback function.
1002 * This is so that we can catch a buffer
1003 * after prematurely unpinning it to forcibly shutdown the filesystem.
1006 xfs_bdstrat_cb(struct xfs_buf *bp)
1010 mp = XFS_BUF_FSPRIVATE3(bp, xfs_mount_t *);
1011 if (!XFS_FORCED_SHUTDOWN(mp)) {
1012 xfs_buf_iorequest(bp);
1015 xfs_buftrace("XFS__BDSTRAT IOERROR", bp);
1017 * Metadata write that didn't get logged but
1018 * written delayed anyway. These aren't associated
1019 * with a transaction, and can be ignored.
1021 if (XFS_BUF_IODONE_FUNC(bp) == NULL &&
1022 (XFS_BUF_ISREAD(bp)) == 0)
1023 return (xfs_bioerror_relse(bp));
1025 return (xfs_bioerror(bp));
1031 xfs_bmap(bhv_desc_t *bdp,
1035 xfs_iomap_t *iomapp,
1038 xfs_inode_t *ip = XFS_BHVTOI(bdp);
1039 xfs_iocore_t *io = &ip->i_iocore;
1041 ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
1042 ASSERT(((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != 0) ==
1043 ((ip->i_iocore.io_flags & XFS_IOCORE_RT) != 0));
1045 return xfs_iomap(io, offset, count, flags, iomapp, niomaps);
1049 * Wrapper around bdstrat so that we can stop data
1050 * from going to disk in case we are shutting down the filesystem.
1051 * Typically user data goes thru this path; one of the exceptions
1052 * is the superblock.
1056 struct xfs_mount *mp,
1060 if (!XFS_FORCED_SHUTDOWN(mp)) {
1061 /* Grio redirection would go here
1062 * if (XFS_BUF_IS_GRIO(bp)) {
1065 xfs_buf_iorequest(bp);
1069 xfs_buftrace("XFSBDSTRAT IOERROR", bp);
1070 return (xfs_bioerror_relse(bp));
1074 * If the underlying (data/log/rt) device is readonly, there are some
1075 * operations that cannot proceed.
1078 xfs_dev_is_read_only(
1082 if (xfs_readonly_buftarg(mp->m_ddev_targp) ||
1083 xfs_readonly_buftarg(mp->m_logdev_targp) ||
1084 (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp))) {
1086 "XFS: %s required on read-only device.", message);
1088 "XFS: write access unavailable, cannot proceed.");