2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "xfs_trans.h"
27 #include "xfs_alloc.h"
28 #include "xfs_dmapi.h"
29 #include "xfs_quota.h"
30 #include "xfs_mount.h"
31 #include "xfs_bmap_btree.h"
32 #include "xfs_alloc_btree.h"
33 #include "xfs_ialloc_btree.h"
34 #include "xfs_dir2_sf.h"
35 #include "xfs_attr_sf.h"
36 #include "xfs_dinode.h"
37 #include "xfs_inode.h"
39 #include "xfs_btree.h"
40 #include "xfs_ialloc.h"
41 #include "xfs_rtalloc.h"
42 #include "xfs_error.h"
43 #include "xfs_itable.h"
49 #include "xfs_inode_item.h"
50 #include "xfs_buf_item.h"
51 #include "xfs_utils.h"
52 #include "xfs_iomap.h"
54 #include <linux/capability.h>
55 #include <linux/writeback.h>
58 #if defined(XFS_RW_TRACE)
68 xfs_inode_t *ip = XFS_IO_INODE(io);
70 if (ip->i_rwtrace == NULL)
72 ktrace_enter(ip->i_rwtrace,
73 (void *)(unsigned long)tag,
75 (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
76 (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
78 (void *)((unsigned long)segs),
79 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
80 (void *)((unsigned long)(offset & 0xffffffff)),
81 (void *)((unsigned long)ioflags),
82 (void *)((unsigned long)((io->io_new_size >> 32) & 0xffffffff)),
83 (void *)((unsigned long)(io->io_new_size & 0xffffffff)),
84 (void *)((unsigned long)current_pid()),
92 xfs_inval_cached_trace(
99 xfs_inode_t *ip = XFS_IO_INODE(io);
101 if (ip->i_rwtrace == NULL)
103 ktrace_enter(ip->i_rwtrace,
104 (void *)(__psint_t)XFS_INVAL_CACHED,
106 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
107 (void *)((unsigned long)(offset & 0xffffffff)),
108 (void *)((unsigned long)((len >> 32) & 0xffffffff)),
109 (void *)((unsigned long)(len & 0xffffffff)),
110 (void *)((unsigned long)((first >> 32) & 0xffffffff)),
111 (void *)((unsigned long)(first & 0xffffffff)),
112 (void *)((unsigned long)((last >> 32) & 0xffffffff)),
113 (void *)((unsigned long)(last & 0xffffffff)),
114 (void *)((unsigned long)current_pid()),
126 * xfs_iozero clears the specified range of buffer supplied,
127 * and marks all the affected blocks as valid and modified. If
128 * an affected block is not allocated, it will be allocated. If
129 * an affected block is not completely overwritten, and is not
130 * valid before the operation, it will be read from disk before
131 * being partially zeroed.
135 struct inode *ip, /* inode */
136 loff_t pos, /* offset in file */
137 size_t count, /* size of data to zero */
138 loff_t end_size) /* max file size to set */
142 struct address_space *mapping;
146 mapping = ip->i_mapping;
148 unsigned long index, offset;
150 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
151 index = pos >> PAGE_CACHE_SHIFT;
152 bytes = PAGE_CACHE_SIZE - offset;
157 page = grab_cache_page(mapping, index);
162 status = mapping->a_ops->prepare_write(NULL, page, offset,
168 memset((void *) (kaddr + offset), 0, bytes);
169 flush_dcache_page(page);
170 status = mapping->a_ops->commit_write(NULL, page, offset,
175 if (pos > i_size_read(ip))
176 i_size_write(ip, pos < end_size ? pos : end_size);
182 page_cache_release(page);
190 ssize_t /* bytes read, or (-) error */
194 const struct iovec *iovp,
200 struct file *file = iocb->ki_filp;
201 struct inode *inode = file->f_mapping->host;
210 ip = XFS_BHVTOI(bdp);
211 vp = BHV_TO_VNODE(bdp);
214 XFS_STATS_INC(xs_read_calls);
216 /* START copy & waste from filemap.c */
217 for (seg = 0; seg < segs; seg++) {
218 const struct iovec *iv = &iovp[seg];
221 * If any segment has a negative length, or the cumulative
222 * length ever wraps negative then return -EINVAL.
225 if (unlikely((ssize_t)(size|iv->iov_len) < 0))
226 return XFS_ERROR(-EINVAL);
228 /* END copy & waste from filemap.c */
230 if (unlikely(ioflags & IO_ISDIRECT)) {
231 xfs_buftarg_t *target =
232 (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
233 mp->m_rtdev_targp : mp->m_ddev_targp;
234 if ((*offset & target->bt_smask) ||
235 (size & target->bt_smask)) {
236 if (*offset == ip->i_d.di_size) {
239 return -XFS_ERROR(EINVAL);
243 n = XFS_MAXIOFFSET(mp) - *offset;
244 if ((n <= 0) || (size == 0))
250 if (XFS_FORCED_SHUTDOWN(mp))
253 if (unlikely(ioflags & IO_ISDIRECT))
254 mutex_lock(&inode->i_mutex);
255 xfs_ilock(ip, XFS_IOLOCK_SHARED);
257 if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ) &&
258 !(ioflags & IO_INVIS)) {
259 bhv_vrwlock_t locktype = VRWLOCK_READ;
260 int dmflags = FILP_DELAY_FLAG(file) | DM_SEM_FLAG_RD(ioflags);
262 ret = -XFS_SEND_DATA(mp, DM_EVENT_READ,
263 BHV_TO_VNODE(bdp), *offset, size,
266 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
271 if (unlikely((ioflags & IO_ISDIRECT) && VN_CACHED(vp)))
272 bhv_vop_flushinval_pages(vp, ctooff(offtoct(*offset)),
273 -1, FI_REMAPF_LOCKED);
275 xfs_rw_enter_trace(XFS_READ_ENTER, &ip->i_iocore,
276 (void *)iovp, segs, *offset, ioflags);
277 ret = __generic_file_aio_read(iocb, iovp, segs, offset);
278 if (ret == -EIOCBQUEUED && !(ioflags & IO_ISAIO))
279 ret = wait_on_sync_kiocb(iocb);
281 XFS_STATS_ADD(xs_read_bytes, ret);
283 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
286 if (unlikely(ioflags & IO_ISDIRECT))
287 mutex_unlock(&inode->i_mutex);
302 xfs_inode_t *ip = XFS_BHVTOI(bdp);
303 xfs_mount_t *mp = ip->i_mount;
306 XFS_STATS_INC(xs_read_calls);
307 if (XFS_FORCED_SHUTDOWN(mp))
310 xfs_ilock(ip, XFS_IOLOCK_SHARED);
312 if (DM_EVENT_ENABLED(BHV_TO_VNODE(bdp)->v_vfsp, ip, DM_EVENT_READ) &&
313 (!(ioflags & IO_INVIS))) {
314 bhv_vrwlock_t locktype = VRWLOCK_READ;
317 error = XFS_SEND_DATA(mp, DM_EVENT_READ, BHV_TO_VNODE(bdp),
319 FILP_DELAY_FLAG(filp), &locktype);
321 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
325 xfs_rw_enter_trace(XFS_SENDFILE_ENTER, &ip->i_iocore,
326 (void *)(unsigned long)target, count, *offset, ioflags);
327 ret = generic_file_sendfile(filp, offset, count, actor, target);
329 XFS_STATS_ADD(xs_read_bytes, ret);
331 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
340 struct pipe_inode_info *pipe,
346 xfs_inode_t *ip = XFS_BHVTOI(bdp);
347 xfs_mount_t *mp = ip->i_mount;
350 XFS_STATS_INC(xs_read_calls);
351 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
354 xfs_ilock(ip, XFS_IOLOCK_SHARED);
356 if (DM_EVENT_ENABLED(BHV_TO_VNODE(bdp)->v_vfsp, ip, DM_EVENT_READ) &&
357 (!(ioflags & IO_INVIS))) {
358 bhv_vrwlock_t locktype = VRWLOCK_READ;
361 error = XFS_SEND_DATA(mp, DM_EVENT_READ, BHV_TO_VNODE(bdp),
363 FILP_DELAY_FLAG(infilp), &locktype);
365 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
369 xfs_rw_enter_trace(XFS_SPLICE_READ_ENTER, &ip->i_iocore,
370 pipe, count, *ppos, ioflags);
371 ret = generic_file_splice_read(infilp, ppos, pipe, count, flags);
373 XFS_STATS_ADD(xs_read_bytes, ret);
375 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
382 struct pipe_inode_info *pipe,
383 struct file *outfilp,
390 xfs_inode_t *ip = XFS_BHVTOI(bdp);
391 xfs_mount_t *mp = ip->i_mount;
394 XFS_STATS_INC(xs_write_calls);
395 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
398 xfs_ilock(ip, XFS_IOLOCK_EXCL);
400 if (DM_EVENT_ENABLED(BHV_TO_VNODE(bdp)->v_vfsp, ip, DM_EVENT_WRITE) &&
401 (!(ioflags & IO_INVIS))) {
402 bhv_vrwlock_t locktype = VRWLOCK_WRITE;
405 error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, BHV_TO_VNODE(bdp),
407 FILP_DELAY_FLAG(outfilp), &locktype);
409 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
413 xfs_rw_enter_trace(XFS_SPLICE_WRITE_ENTER, &ip->i_iocore,
414 pipe, count, *ppos, ioflags);
415 ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags);
417 XFS_STATS_ADD(xs_write_bytes, ret);
419 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
424 * This routine is called to handle zeroing any space in the last
425 * block of the file that is beyond the EOF. We do this since the
426 * size is being increased without writing anything to that block
427 * and we don't want anyone to read the garbage on the disk.
429 STATIC int /* error (positive) */
434 xfs_fsize_t end_size)
436 xfs_fileoff_t last_fsb;
437 xfs_mount_t *mp = io->io_mount;
442 xfs_bmbt_irec_t imap;
445 ASSERT(ismrlocked(io->io_lock, MR_UPDATE) != 0);
447 zero_offset = XFS_B_FSB_OFFSET(mp, isize);
448 if (zero_offset == 0) {
450 * There are no extra bytes in the last block on disk to
456 last_fsb = XFS_B_TO_FSBT(mp, isize);
458 error = XFS_BMAPI(mp, NULL, io, last_fsb, 1, 0, NULL, 0, &imap,
459 &nimaps, NULL, NULL);
465 * If the block underlying isize is just a hole, then there
466 * is nothing to zero.
468 if (imap.br_startblock == HOLESTARTBLOCK) {
472 * Zero the part of the last block beyond the EOF, and write it
473 * out sync. We need to drop the ilock while we do this so we
474 * don't deadlock when the buffer cache calls back to us.
476 XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL| XFS_EXTSIZE_RD);
478 loff = XFS_FSB_TO_B(mp, last_fsb);
479 zero_len = mp->m_sb.sb_blocksize - zero_offset;
480 error = xfs_iozero(ip, loff + zero_offset, zero_len, end_size);
482 XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
488 * Zero any on disk space between the current EOF and the new,
489 * larger EOF. This handles the normal case of zeroing the remainder
490 * of the last block in the file and the unusual case of zeroing blocks
491 * out beyond the size of the file. This second case only happens
492 * with fixed size extents and when the system crashes before the inode
493 * size was updated but after blocks were allocated. If fill is set,
494 * then any holes in the range are filled and zeroed. If not, the holes
495 * are left alone as holes.
498 int /* error (positive) */
502 xfs_off_t offset, /* starting I/O offset */
503 xfs_fsize_t isize, /* current inode size */
504 xfs_fsize_t end_size) /* terminal inode size */
506 struct inode *ip = vn_to_inode(vp);
507 xfs_fileoff_t start_zero_fsb;
508 xfs_fileoff_t end_zero_fsb;
509 xfs_fileoff_t zero_count_fsb;
510 xfs_fileoff_t last_fsb;
511 xfs_mount_t *mp = io->io_mount;
514 xfs_bmbt_irec_t imap;
516 ASSERT(ismrlocked(io->io_lock, MR_UPDATE));
517 ASSERT(ismrlocked(io->io_iolock, MR_UPDATE));
518 ASSERT(offset > isize);
521 * First handle zeroing the block on which isize resides.
522 * We only zero a part of that block so it is handled specially.
524 error = xfs_zero_last_block(ip, io, isize, end_size);
526 ASSERT(ismrlocked(io->io_lock, MR_UPDATE));
527 ASSERT(ismrlocked(io->io_iolock, MR_UPDATE));
532 * Calculate the range between the new size and the old
533 * where blocks needing to be zeroed may exist. To get the
534 * block where the last byte in the file currently resides,
535 * we need to subtract one from the size and truncate back
536 * to a block boundary. We subtract 1 in case the size is
537 * exactly on a block boundary.
539 last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1;
540 start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
541 end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1);
542 ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb);
543 if (last_fsb == end_zero_fsb) {
545 * The size was only incremented on its last block.
546 * We took care of that above, so just return.
551 ASSERT(start_zero_fsb <= end_zero_fsb);
552 while (start_zero_fsb <= end_zero_fsb) {
554 zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;
555 error = XFS_BMAPI(mp, NULL, io, start_zero_fsb, zero_count_fsb,
556 0, NULL, 0, &imap, &nimaps, NULL, NULL);
558 ASSERT(ismrlocked(io->io_lock, MR_UPDATE));
559 ASSERT(ismrlocked(io->io_iolock, MR_UPDATE));
564 if (imap.br_state == XFS_EXT_UNWRITTEN ||
565 imap.br_startblock == HOLESTARTBLOCK) {
567 * This loop handles initializing pages that were
568 * partially initialized by the code below this
569 * loop. It basically zeroes the part of the page
570 * that sits on a hole and sets the page as P_HOLE
571 * and calls remapf if it is a mapped file.
573 start_zero_fsb = imap.br_startoff + imap.br_blockcount;
574 ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
579 * There are blocks we need to zero.
580 * Drop the inode lock while we're doing the I/O.
581 * We'll still have the iolock to protect us.
583 XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
585 error = xfs_iozero(ip,
586 XFS_FSB_TO_B(mp, start_zero_fsb),
587 XFS_FSB_TO_B(mp, imap.br_blockcount),
593 start_zero_fsb = imap.br_startoff + imap.br_blockcount;
594 ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
596 XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
603 XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
608 ssize_t /* bytes written, or (-) error */
612 const struct iovec *iovp,
618 struct file *file = iocb->ki_filp;
619 struct address_space *mapping = file->f_mapping;
620 struct inode *inode = mapping->host;
621 unsigned long segs = nsegs;
624 ssize_t ret = 0, error = 0;
625 xfs_fsize_t isize, new_size;
631 bhv_vrwlock_t locktype;
632 size_t ocount = 0, count;
634 int need_i_mutex = 1, need_flush = 0;
636 XFS_STATS_INC(xs_write_calls);
638 vp = BHV_TO_VNODE(bdp);
639 xip = XFS_BHVTOI(bdp);
641 for (seg = 0; seg < segs; seg++) {
642 const struct iovec *iv = &iovp[seg];
645 * If any segment has a negative length, or the cumulative
646 * length ever wraps negative then return -EINVAL.
648 ocount += iv->iov_len;
649 if (unlikely((ssize_t)(ocount|iv->iov_len) < 0))
651 if (access_ok(VERIFY_READ, iv->iov_base, iv->iov_len))
656 ocount -= iv->iov_len; /* This segment is no good */
669 vfs_wait_for_freeze(vp->v_vfsp, SB_FREEZE_WRITE);
671 if (XFS_FORCED_SHUTDOWN(mp))
674 if (ioflags & IO_ISDIRECT) {
675 xfs_buftarg_t *target =
676 (xip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
677 mp->m_rtdev_targp : mp->m_ddev_targp;
679 if ((pos & target->bt_smask) || (count & target->bt_smask))
680 return XFS_ERROR(-EINVAL);
682 if (!VN_CACHED(vp) && pos < i_size_read(inode))
691 iolock = XFS_IOLOCK_EXCL;
692 locktype = VRWLOCK_WRITE;
694 mutex_lock(&inode->i_mutex);
696 iolock = XFS_IOLOCK_SHARED;
697 locktype = VRWLOCK_WRITE_DIRECT;
700 xfs_ilock(xip, XFS_ILOCK_EXCL|iolock);
702 isize = i_size_read(inode);
704 if (file->f_flags & O_APPEND)
708 error = -generic_write_checks(file, &pos, &count,
709 S_ISBLK(inode->i_mode));
711 xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
712 goto out_unlock_mutex;
715 new_size = pos + count;
716 if (new_size > isize)
717 io->io_new_size = new_size;
719 if ((DM_EVENT_ENABLED(vp->v_vfsp, xip, DM_EVENT_WRITE) &&
720 !(ioflags & IO_INVIS) && !eventsent)) {
721 loff_t savedsize = pos;
722 int dmflags = FILP_DELAY_FLAG(file);
725 dmflags |= DM_FLAGS_IMUX;
727 xfs_iunlock(xip, XFS_ILOCK_EXCL);
728 error = XFS_SEND_DATA(xip->i_mount, DM_EVENT_WRITE, vp,
732 xfs_iunlock(xip, iolock);
733 goto out_unlock_mutex;
735 xfs_ilock(xip, XFS_ILOCK_EXCL);
739 * The iolock was dropped and reacquired in XFS_SEND_DATA
740 * so we have to recheck the size when appending.
741 * We will only "goto start;" once, since having sent the
742 * event prevents another call to XFS_SEND_DATA, which is
743 * what allows the size to change in the first place.
745 if ((file->f_flags & O_APPEND) && savedsize != isize) {
746 pos = isize = xip->i_d.di_size;
751 if (likely(!(ioflags & IO_INVIS))) {
752 file_update_time(file);
753 xfs_ichgtime_fast(xip, inode,
754 XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
758 * If the offset is beyond the size of the file, we have a couple
759 * of things to do. First, if there is already space allocated
760 * we need to either create holes or zero the disk or ...
762 * If there is a page where the previous size lands, we need
763 * to zero it out up to the new size.
767 error = xfs_zero_eof(BHV_TO_VNODE(bdp), io, pos,
770 xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
771 goto out_unlock_mutex;
774 xfs_iunlock(xip, XFS_ILOCK_EXCL);
777 * If we're writing the file then make sure to clear the
778 * setuid and setgid bits if the process is not being run
779 * by root. This keeps people from modifying setuid and
783 if (((xip->i_d.di_mode & S_ISUID) ||
784 ((xip->i_d.di_mode & (S_ISGID | S_IXGRP)) ==
785 (S_ISGID | S_IXGRP))) &&
786 !capable(CAP_FSETID)) {
787 error = xfs_write_clear_setuid(xip);
789 error = -remove_suid(file->f_dentry);
790 if (unlikely(error)) {
791 xfs_iunlock(xip, iolock);
792 goto out_unlock_mutex;
797 /* We can write back this queue in page reclaim */
798 current->backing_dev_info = mapping->backing_dev_info;
800 if ((ioflags & IO_ISDIRECT)) {
802 xfs_inval_cached_trace(io, pos, -1,
803 ctooff(offtoct(pos)), -1);
804 bhv_vop_flushinval_pages(vp, ctooff(offtoct(pos)),
805 -1, FI_REMAPF_LOCKED);
809 /* demote the lock now the cached pages are gone */
810 XFS_ILOCK_DEMOTE(mp, io, XFS_IOLOCK_EXCL);
811 mutex_unlock(&inode->i_mutex);
813 iolock = XFS_IOLOCK_SHARED;
814 locktype = VRWLOCK_WRITE_DIRECT;
818 xfs_rw_enter_trace(XFS_DIOWR_ENTER, io, (void *)iovp, segs,
820 ret = generic_file_direct_write(iocb, iovp,
821 &segs, pos, offset, count, ocount);
824 * direct-io write to a hole: fall through to buffered I/O
825 * for completing the rest of the request.
827 if (ret >= 0 && ret != count) {
828 XFS_STATS_ADD(xs_write_bytes, ret);
834 ioflags &= ~IO_ISDIRECT;
835 xfs_iunlock(xip, iolock);
839 xfs_rw_enter_trace(XFS_WRITE_ENTER, io, (void *)iovp, segs,
841 ret = generic_file_buffered_write(iocb, iovp, segs,
842 pos, offset, count, ret);
845 current->backing_dev_info = NULL;
847 if (ret == -EIOCBQUEUED && !(ioflags & IO_ISAIO))
848 ret = wait_on_sync_kiocb(iocb);
850 if ((ret == -ENOSPC) &&
851 DM_EVENT_ENABLED(vp->v_vfsp, xip, DM_EVENT_NOSPACE) &&
852 !(ioflags & IO_INVIS)) {
854 xfs_rwunlock(bdp, locktype);
856 mutex_unlock(&inode->i_mutex);
857 error = XFS_SEND_NAMESP(xip->i_mount, DM_EVENT_NOSPACE, vp,
858 DM_RIGHT_NULL, vp, DM_RIGHT_NULL, NULL, NULL,
859 0, 0, 0); /* Delay flag intentionally unused */
863 mutex_lock(&inode->i_mutex);
864 xfs_rwlock(bdp, locktype);
865 pos = xip->i_d.di_size;
870 isize = i_size_read(inode);
871 if (unlikely(ret < 0 && ret != -EFAULT && *offset > isize))
874 if (*offset > xip->i_d.di_size) {
875 xfs_ilock(xip, XFS_ILOCK_EXCL);
876 if (*offset > xip->i_d.di_size) {
877 xip->i_d.di_size = *offset;
878 i_size_write(inode, *offset);
879 xip->i_update_core = 1;
880 xip->i_update_size = 1;
882 xfs_iunlock(xip, XFS_ILOCK_EXCL);
887 goto out_unlock_internal;
889 XFS_STATS_ADD(xs_write_bytes, ret);
891 /* Handle various SYNC-type writes */
892 if ((file->f_flags & O_SYNC) || IS_SYNC(inode)) {
893 error = xfs_write_sync_logforce(mp, xip);
895 goto out_unlock_internal;
897 xfs_rwunlock(bdp, locktype);
899 mutex_unlock(&inode->i_mutex);
901 error = sync_page_range(inode, mapping, pos, ret);
908 xfs_rwunlock(bdp, locktype);
911 mutex_unlock(&inode->i_mutex);
917 * All xfs metadata buffers except log state machine buffers
918 * get this attached as their b_bdstrat callback function.
919 * This is so that we can catch a buffer
920 * after prematurely unpinning it to forcibly shutdown the filesystem.
923 xfs_bdstrat_cb(struct xfs_buf *bp)
927 mp = XFS_BUF_FSPRIVATE3(bp, xfs_mount_t *);
928 if (!XFS_FORCED_SHUTDOWN(mp)) {
929 xfs_buf_iorequest(bp);
932 xfs_buftrace("XFS__BDSTRAT IOERROR", bp);
934 * Metadata write that didn't get logged but
935 * written delayed anyway. These aren't associated
936 * with a transaction, and can be ignored.
938 if (XFS_BUF_IODONE_FUNC(bp) == NULL &&
939 (XFS_BUF_ISREAD(bp)) == 0)
940 return (xfs_bioerror_relse(bp));
942 return (xfs_bioerror(bp));
948 xfs_bmap(bhv_desc_t *bdp,
955 xfs_inode_t *ip = XFS_BHVTOI(bdp);
956 xfs_iocore_t *io = &ip->i_iocore;
958 ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
959 ASSERT(((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != 0) ==
960 ((ip->i_iocore.io_flags & XFS_IOCORE_RT) != 0));
962 return xfs_iomap(io, offset, count, flags, iomapp, niomaps);
966 * Wrapper around bdstrat so that we can stop data
967 * from going to disk in case we are shutting down the filesystem.
968 * Typically user data goes thru this path; one of the exceptions
973 struct xfs_mount *mp,
977 if (!XFS_FORCED_SHUTDOWN(mp)) {
978 /* Grio redirection would go here
979 * if (XFS_BUF_IS_GRIO(bp)) {
982 xfs_buf_iorequest(bp);
986 xfs_buftrace("XFSBDSTRAT IOERROR", bp);
987 return (xfs_bioerror_relse(bp));
991 * If the underlying (data/log/rt) device is readonly, there are some
992 * operations that cannot proceed.
995 xfs_dev_is_read_only(
999 if (xfs_readonly_buftarg(mp->m_ddev_targp) ||
1000 xfs_readonly_buftarg(mp->m_logdev_targp) ||
1001 (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp))) {
1003 "XFS: %s required on read-only device.", message);
1005 "XFS: write access unavailable, cannot proceed.");