2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "xfs_trans.h"
27 #include "xfs_alloc.h"
28 #include "xfs_dmapi.h"
29 #include "xfs_quota.h"
30 #include "xfs_mount.h"
31 #include "xfs_bmap_btree.h"
32 #include "xfs_alloc_btree.h"
33 #include "xfs_ialloc_btree.h"
34 #include "xfs_dir2_sf.h"
35 #include "xfs_attr_sf.h"
36 #include "xfs_dinode.h"
37 #include "xfs_inode.h"
39 #include "xfs_btree.h"
40 #include "xfs_ialloc.h"
41 #include "xfs_rtalloc.h"
42 #include "xfs_error.h"
43 #include "xfs_itable.h"
47 #include "xfs_inode_item.h"
48 #include "xfs_buf_item.h"
49 #include "xfs_utils.h"
50 #include "xfs_iomap.h"
52 #include <linux/capability.h>
53 #include <linux/writeback.h>
56 #if defined(XFS_RW_TRACE)
66 xfs_inode_t *ip = XFS_IO_INODE(io);
68 if (ip->i_rwtrace == NULL)
70 ktrace_enter(ip->i_rwtrace,
71 (void *)(unsigned long)tag,
73 (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
74 (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
76 (void *)((unsigned long)segs),
77 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
78 (void *)((unsigned long)(offset & 0xffffffff)),
79 (void *)((unsigned long)ioflags),
80 (void *)((unsigned long)((io->io_new_size >> 32) & 0xffffffff)),
81 (void *)((unsigned long)(io->io_new_size & 0xffffffff)),
82 (void *)((unsigned long)current_pid()),
90 xfs_inval_cached_trace(
97 xfs_inode_t *ip = XFS_IO_INODE(io);
99 if (ip->i_rwtrace == NULL)
101 ktrace_enter(ip->i_rwtrace,
102 (void *)(__psint_t)XFS_INVAL_CACHED,
104 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
105 (void *)((unsigned long)(offset & 0xffffffff)),
106 (void *)((unsigned long)((len >> 32) & 0xffffffff)),
107 (void *)((unsigned long)(len & 0xffffffff)),
108 (void *)((unsigned long)((first >> 32) & 0xffffffff)),
109 (void *)((unsigned long)(first & 0xffffffff)),
110 (void *)((unsigned long)((last >> 32) & 0xffffffff)),
111 (void *)((unsigned long)(last & 0xffffffff)),
112 (void *)((unsigned long)current_pid()),
124 * xfs_iozero clears the specified range of buffer supplied,
125 * and marks all the affected blocks as valid and modified. If
126 * an affected block is not allocated, it will be allocated. If
127 * an affected block is not completely overwritten, and is not
128 * valid before the operation, it will be read from disk before
129 * being partially zeroed.
133 struct inode *ip, /* inode */
134 loff_t pos, /* offset in file */
135 size_t count) /* size of data to zero */
139 struct address_space *mapping;
142 mapping = ip->i_mapping;
144 unsigned long index, offset;
146 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
147 index = pos >> PAGE_CACHE_SHIFT;
148 bytes = PAGE_CACHE_SIZE - offset;
153 page = grab_cache_page(mapping, index);
157 status = mapping->a_ops->prepare_write(NULL, page, offset,
162 zero_user_page(page, offset, bytes, KM_USER0);
164 status = mapping->a_ops->commit_write(NULL, page, offset,
173 page_cache_release(page);
181 ssize_t /* bytes read, or (-) error */
185 const struct iovec *iovp,
191 struct file *file = iocb->ki_filp;
192 struct inode *inode = file->f_mapping->host;
201 ip = XFS_BHVTOI(bdp);
202 vp = BHV_TO_VNODE(bdp);
205 XFS_STATS_INC(xs_read_calls);
207 /* START copy & waste from filemap.c */
208 for (seg = 0; seg < segs; seg++) {
209 const struct iovec *iv = &iovp[seg];
212 * If any segment has a negative length, or the cumulative
213 * length ever wraps negative then return -EINVAL.
216 if (unlikely((ssize_t)(size|iv->iov_len) < 0))
217 return XFS_ERROR(-EINVAL);
219 /* END copy & waste from filemap.c */
221 if (unlikely(ioflags & IO_ISDIRECT)) {
222 xfs_buftarg_t *target =
223 (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
224 mp->m_rtdev_targp : mp->m_ddev_targp;
225 if ((*offset & target->bt_smask) ||
226 (size & target->bt_smask)) {
227 if (*offset == ip->i_size) {
230 return -XFS_ERROR(EINVAL);
234 n = XFS_MAXIOFFSET(mp) - *offset;
235 if ((n <= 0) || (size == 0))
241 if (XFS_FORCED_SHUTDOWN(mp))
244 if (unlikely(ioflags & IO_ISDIRECT))
245 mutex_lock(&inode->i_mutex);
246 xfs_ilock(ip, XFS_IOLOCK_SHARED);
248 if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ) &&
249 !(ioflags & IO_INVIS)) {
250 bhv_vrwlock_t locktype = VRWLOCK_READ;
251 int dmflags = FILP_DELAY_FLAG(file) | DM_SEM_FLAG_RD(ioflags);
253 ret = -XFS_SEND_DATA(mp, DM_EVENT_READ,
254 BHV_TO_VNODE(bdp), *offset, size,
257 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
258 if (unlikely(ioflags & IO_ISDIRECT))
259 mutex_unlock(&inode->i_mutex);
264 if (unlikely(ioflags & IO_ISDIRECT)) {
266 ret = bhv_vop_flushinval_pages(vp, ctooff(offtoct(*offset)),
267 -1, FI_REMAPF_LOCKED);
268 mutex_unlock(&inode->i_mutex);
270 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
275 xfs_rw_enter_trace(XFS_READ_ENTER, &ip->i_iocore,
276 (void *)iovp, segs, *offset, ioflags);
278 iocb->ki_pos = *offset;
279 ret = generic_file_aio_read(iocb, iovp, segs, *offset);
280 if (ret == -EIOCBQUEUED && !(ioflags & IO_ISAIO))
281 ret = wait_on_sync_kiocb(iocb);
283 XFS_STATS_ADD(xs_read_bytes, ret);
285 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
294 struct pipe_inode_info *pipe,
300 xfs_inode_t *ip = XFS_BHVTOI(bdp);
301 xfs_mount_t *mp = ip->i_mount;
304 XFS_STATS_INC(xs_read_calls);
305 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
308 xfs_ilock(ip, XFS_IOLOCK_SHARED);
310 if (DM_EVENT_ENABLED(BHV_TO_VNODE(bdp)->v_vfsp, ip, DM_EVENT_READ) &&
311 (!(ioflags & IO_INVIS))) {
312 bhv_vrwlock_t locktype = VRWLOCK_READ;
315 error = XFS_SEND_DATA(mp, DM_EVENT_READ, BHV_TO_VNODE(bdp),
317 FILP_DELAY_FLAG(infilp), &locktype);
319 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
323 xfs_rw_enter_trace(XFS_SPLICE_READ_ENTER, &ip->i_iocore,
324 pipe, count, *ppos, ioflags);
325 ret = generic_file_splice_read(infilp, ppos, pipe, count, flags);
327 XFS_STATS_ADD(xs_read_bytes, ret);
329 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
336 struct pipe_inode_info *pipe,
337 struct file *outfilp,
344 xfs_inode_t *ip = XFS_BHVTOI(bdp);
345 xfs_mount_t *mp = ip->i_mount;
346 xfs_iocore_t *io = &ip->i_iocore;
348 struct inode *inode = outfilp->f_mapping->host;
349 xfs_fsize_t isize, new_size;
351 XFS_STATS_INC(xs_write_calls);
352 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
355 xfs_ilock(ip, XFS_IOLOCK_EXCL);
357 if (DM_EVENT_ENABLED(BHV_TO_VNODE(bdp)->v_vfsp, ip, DM_EVENT_WRITE) &&
358 (!(ioflags & IO_INVIS))) {
359 bhv_vrwlock_t locktype = VRWLOCK_WRITE;
362 error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, BHV_TO_VNODE(bdp),
364 FILP_DELAY_FLAG(outfilp), &locktype);
366 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
371 new_size = *ppos + count;
373 xfs_ilock(ip, XFS_ILOCK_EXCL);
374 if (new_size > ip->i_size)
375 io->io_new_size = new_size;
376 xfs_iunlock(ip, XFS_ILOCK_EXCL);
378 xfs_rw_enter_trace(XFS_SPLICE_WRITE_ENTER, &ip->i_iocore,
379 pipe, count, *ppos, ioflags);
380 ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags);
382 XFS_STATS_ADD(xs_write_bytes, ret);
384 isize = i_size_read(inode);
385 if (unlikely(ret < 0 && ret != -EFAULT && *ppos > isize))
388 if (*ppos > ip->i_size) {
389 xfs_ilock(ip, XFS_ILOCK_EXCL);
390 if (*ppos > ip->i_size)
392 xfs_iunlock(ip, XFS_ILOCK_EXCL);
395 if (io->io_new_size) {
396 xfs_ilock(ip, XFS_ILOCK_EXCL);
398 if (ip->i_d.di_size > ip->i_size)
399 ip->i_d.di_size = ip->i_size;
400 xfs_iunlock(ip, XFS_ILOCK_EXCL);
402 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
407 * This routine is called to handle zeroing any space in the last
408 * block of the file that is beyond the EOF. We do this since the
409 * size is being increased without writing anything to that block
410 * and we don't want anyone to read the garbage on the disk.
412 STATIC int /* error (positive) */
419 xfs_fileoff_t last_fsb;
420 xfs_mount_t *mp = io->io_mount;
425 xfs_bmbt_irec_t imap;
427 ASSERT(ismrlocked(io->io_lock, MR_UPDATE) != 0);
429 zero_offset = XFS_B_FSB_OFFSET(mp, isize);
430 if (zero_offset == 0) {
432 * There are no extra bytes in the last block on disk to
438 last_fsb = XFS_B_TO_FSBT(mp, isize);
440 error = XFS_BMAPI(mp, NULL, io, last_fsb, 1, 0, NULL, 0, &imap,
441 &nimaps, NULL, NULL);
447 * If the block underlying isize is just a hole, then there
448 * is nothing to zero.
450 if (imap.br_startblock == HOLESTARTBLOCK) {
454 * Zero the part of the last block beyond the EOF, and write it
455 * out sync. We need to drop the ilock while we do this so we
456 * don't deadlock when the buffer cache calls back to us.
458 XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL| XFS_EXTSIZE_RD);
460 zero_len = mp->m_sb.sb_blocksize - zero_offset;
461 if (isize + zero_len > offset)
462 zero_len = offset - isize;
463 error = xfs_iozero(ip, isize, zero_len);
465 XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
471 * Zero any on disk space between the current EOF and the new,
472 * larger EOF. This handles the normal case of zeroing the remainder
473 * of the last block in the file and the unusual case of zeroing blocks
474 * out beyond the size of the file. This second case only happens
475 * with fixed size extents and when the system crashes before the inode
476 * size was updated but after blocks were allocated. If fill is set,
477 * then any holes in the range are filled and zeroed. If not, the holes
478 * are left alone as holes.
481 int /* error (positive) */
485 xfs_off_t offset, /* starting I/O offset */
486 xfs_fsize_t isize) /* current inode size */
488 struct inode *ip = vn_to_inode(vp);
489 xfs_fileoff_t start_zero_fsb;
490 xfs_fileoff_t end_zero_fsb;
491 xfs_fileoff_t zero_count_fsb;
492 xfs_fileoff_t last_fsb;
493 xfs_fileoff_t zero_off;
494 xfs_fsize_t zero_len;
495 xfs_mount_t *mp = io->io_mount;
498 xfs_bmbt_irec_t imap;
500 ASSERT(ismrlocked(io->io_lock, MR_UPDATE));
501 ASSERT(ismrlocked(io->io_iolock, MR_UPDATE));
502 ASSERT(offset > isize);
505 * First handle zeroing the block on which isize resides.
506 * We only zero a part of that block so it is handled specially.
508 error = xfs_zero_last_block(ip, io, offset, isize);
510 ASSERT(ismrlocked(io->io_lock, MR_UPDATE));
511 ASSERT(ismrlocked(io->io_iolock, MR_UPDATE));
516 * Calculate the range between the new size and the old
517 * where blocks needing to be zeroed may exist. To get the
518 * block where the last byte in the file currently resides,
519 * we need to subtract one from the size and truncate back
520 * to a block boundary. We subtract 1 in case the size is
521 * exactly on a block boundary.
523 last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1;
524 start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
525 end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1);
526 ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb);
527 if (last_fsb == end_zero_fsb) {
529 * The size was only incremented on its last block.
530 * We took care of that above, so just return.
535 ASSERT(start_zero_fsb <= end_zero_fsb);
536 while (start_zero_fsb <= end_zero_fsb) {
538 zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;
539 error = XFS_BMAPI(mp, NULL, io, start_zero_fsb, zero_count_fsb,
540 0, NULL, 0, &imap, &nimaps, NULL, NULL);
542 ASSERT(ismrlocked(io->io_lock, MR_UPDATE));
543 ASSERT(ismrlocked(io->io_iolock, MR_UPDATE));
548 if (imap.br_state == XFS_EXT_UNWRITTEN ||
549 imap.br_startblock == HOLESTARTBLOCK) {
551 * This loop handles initializing pages that were
552 * partially initialized by the code below this
553 * loop. It basically zeroes the part of the page
554 * that sits on a hole and sets the page as P_HOLE
555 * and calls remapf if it is a mapped file.
557 start_zero_fsb = imap.br_startoff + imap.br_blockcount;
558 ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
563 * There are blocks we need to zero.
564 * Drop the inode lock while we're doing the I/O.
565 * We'll still have the iolock to protect us.
567 XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
569 zero_off = XFS_FSB_TO_B(mp, start_zero_fsb);
570 zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount);
572 if ((zero_off + zero_len) > offset)
573 zero_len = offset - zero_off;
575 error = xfs_iozero(ip, zero_off, zero_len);
580 start_zero_fsb = imap.br_startoff + imap.br_blockcount;
581 ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
583 XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
590 XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
595 ssize_t /* bytes written, or (-) error */
599 const struct iovec *iovp,
605 struct file *file = iocb->ki_filp;
606 struct address_space *mapping = file->f_mapping;
607 struct inode *inode = mapping->host;
608 unsigned long segs = nsegs;
611 ssize_t ret = 0, error = 0;
612 xfs_fsize_t isize, new_size;
617 bhv_vrwlock_t locktype;
618 size_t ocount = 0, count;
622 XFS_STATS_INC(xs_write_calls);
624 vp = BHV_TO_VNODE(bdp);
625 xip = XFS_BHVTOI(bdp);
627 error = generic_segment_checks(iovp, &segs, &ocount, VERIFY_READ);
640 vfs_wait_for_freeze(vp->v_vfsp, SB_FREEZE_WRITE);
642 if (XFS_FORCED_SHUTDOWN(mp))
646 if (ioflags & IO_ISDIRECT) {
647 iolock = XFS_IOLOCK_SHARED;
648 locktype = VRWLOCK_WRITE_DIRECT;
651 iolock = XFS_IOLOCK_EXCL;
652 locktype = VRWLOCK_WRITE;
654 mutex_lock(&inode->i_mutex);
657 xfs_ilock(xip, XFS_ILOCK_EXCL|iolock);
660 error = -generic_write_checks(file, &pos, &count,
661 S_ISBLK(inode->i_mode));
663 xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
664 goto out_unlock_mutex;
667 if ((DM_EVENT_ENABLED(vp->v_vfsp, xip, DM_EVENT_WRITE) &&
668 !(ioflags & IO_INVIS) && !eventsent)) {
669 int dmflags = FILP_DELAY_FLAG(file);
672 dmflags |= DM_FLAGS_IMUX;
674 xfs_iunlock(xip, XFS_ILOCK_EXCL);
675 error = XFS_SEND_DATA(xip->i_mount, DM_EVENT_WRITE, vp,
679 goto out_unlock_internal;
681 xfs_ilock(xip, XFS_ILOCK_EXCL);
685 * The iolock was dropped and reacquired in XFS_SEND_DATA
686 * so we have to recheck the size when appending.
687 * We will only "goto start;" once, since having sent the
688 * event prevents another call to XFS_SEND_DATA, which is
689 * what allows the size to change in the first place.
691 if ((file->f_flags & O_APPEND) && pos != xip->i_size)
695 if (ioflags & IO_ISDIRECT) {
696 xfs_buftarg_t *target =
697 (xip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
698 mp->m_rtdev_targp : mp->m_ddev_targp;
700 if ((pos & target->bt_smask) || (count & target->bt_smask)) {
701 xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
702 return XFS_ERROR(-EINVAL);
705 if (!need_i_mutex && (VN_CACHED(vp) || pos > xip->i_size)) {
706 xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
707 iolock = XFS_IOLOCK_EXCL;
708 locktype = VRWLOCK_WRITE;
710 mutex_lock(&inode->i_mutex);
711 xfs_ilock(xip, XFS_ILOCK_EXCL|iolock);
716 new_size = pos + count;
717 if (new_size > xip->i_size)
718 io->io_new_size = new_size;
720 if (likely(!(ioflags & IO_INVIS))) {
721 file_update_time(file);
722 xfs_ichgtime_fast(xip, inode,
723 XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
727 * If the offset is beyond the size of the file, we have a couple
728 * of things to do. First, if there is already space allocated
729 * we need to either create holes or zero the disk or ...
731 * If there is a page where the previous size lands, we need
732 * to zero it out up to the new size.
735 if (pos > xip->i_size) {
736 error = xfs_zero_eof(BHV_TO_VNODE(bdp), io, pos, xip->i_size);
738 xfs_iunlock(xip, XFS_ILOCK_EXCL);
739 goto out_unlock_internal;
742 xfs_iunlock(xip, XFS_ILOCK_EXCL);
745 * If we're writing the file then make sure to clear the
746 * setuid and setgid bits if the process is not being run
747 * by root. This keeps people from modifying setuid and
751 if (((xip->i_d.di_mode & S_ISUID) ||
752 ((xip->i_d.di_mode & (S_ISGID | S_IXGRP)) ==
753 (S_ISGID | S_IXGRP))) &&
754 !capable(CAP_FSETID)) {
755 error = xfs_write_clear_setuid(xip);
757 error = -remove_suid(file->f_path.dentry);
758 if (unlikely(error)) {
759 goto out_unlock_internal;
764 /* We can write back this queue in page reclaim */
765 current->backing_dev_info = mapping->backing_dev_info;
767 if ((ioflags & IO_ISDIRECT)) {
769 WARN_ON(need_i_mutex == 0);
770 xfs_inval_cached_trace(io, pos, -1,
771 ctooff(offtoct(pos)), -1);
772 error = bhv_vop_flushinval_pages(vp, ctooff(offtoct(pos)),
773 -1, FI_REMAPF_LOCKED);
775 goto out_unlock_internal;
779 /* demote the lock now the cached pages are gone */
780 XFS_ILOCK_DEMOTE(mp, io, XFS_IOLOCK_EXCL);
781 mutex_unlock(&inode->i_mutex);
783 iolock = XFS_IOLOCK_SHARED;
784 locktype = VRWLOCK_WRITE_DIRECT;
788 xfs_rw_enter_trace(XFS_DIOWR_ENTER, io, (void *)iovp, segs,
790 ret = generic_file_direct_write(iocb, iovp,
791 &segs, pos, offset, count, ocount);
794 * direct-io write to a hole: fall through to buffered I/O
795 * for completing the rest of the request.
797 if (ret >= 0 && ret != count) {
798 XFS_STATS_ADD(xs_write_bytes, ret);
803 ioflags &= ~IO_ISDIRECT;
804 xfs_iunlock(xip, iolock);
808 xfs_rw_enter_trace(XFS_WRITE_ENTER, io, (void *)iovp, segs,
810 ret = generic_file_buffered_write(iocb, iovp, segs,
811 pos, offset, count, ret);
814 current->backing_dev_info = NULL;
816 if (ret == -EIOCBQUEUED && !(ioflags & IO_ISAIO))
817 ret = wait_on_sync_kiocb(iocb);
819 if ((ret == -ENOSPC) &&
820 DM_EVENT_ENABLED(vp->v_vfsp, xip, DM_EVENT_NOSPACE) &&
821 !(ioflags & IO_INVIS)) {
823 xfs_rwunlock(bdp, locktype);
825 mutex_unlock(&inode->i_mutex);
826 error = XFS_SEND_NAMESP(xip->i_mount, DM_EVENT_NOSPACE, vp,
827 DM_RIGHT_NULL, vp, DM_RIGHT_NULL, NULL, NULL,
828 0, 0, 0); /* Delay flag intentionally unused */
830 mutex_lock(&inode->i_mutex);
831 xfs_rwlock(bdp, locktype);
833 goto out_unlock_internal;
839 isize = i_size_read(inode);
840 if (unlikely(ret < 0 && ret != -EFAULT && *offset > isize))
843 if (*offset > xip->i_size) {
844 xfs_ilock(xip, XFS_ILOCK_EXCL);
845 if (*offset > xip->i_size)
846 xip->i_size = *offset;
847 xfs_iunlock(xip, XFS_ILOCK_EXCL);
852 goto out_unlock_internal;
854 XFS_STATS_ADD(xs_write_bytes, ret);
856 /* Handle various SYNC-type writes */
857 if ((file->f_flags & O_SYNC) || IS_SYNC(inode)) {
858 error = xfs_write_sync_logforce(mp, xip);
860 goto out_unlock_internal;
862 xfs_rwunlock(bdp, locktype);
864 mutex_unlock(&inode->i_mutex);
866 error = sync_page_range(inode, mapping, pos, ret);
870 mutex_lock(&inode->i_mutex);
871 xfs_rwlock(bdp, locktype);
875 if (io->io_new_size) {
876 xfs_ilock(xip, XFS_ILOCK_EXCL);
879 * If this was a direct or synchronous I/O that failed (such
880 * as ENOSPC) then part of the I/O may have been written to
881 * disk before the error occured. In this case the on-disk
882 * file size may have been adjusted beyond the in-memory file
883 * size and now needs to be truncated back.
885 if (xip->i_d.di_size > xip->i_size)
886 xip->i_d.di_size = xip->i_size;
887 xfs_iunlock(xip, XFS_ILOCK_EXCL);
889 xfs_rwunlock(bdp, locktype);
892 mutex_unlock(&inode->i_mutex);
897 * All xfs metadata buffers except log state machine buffers
898 * get this attached as their b_bdstrat callback function.
899 * This is so that we can catch a buffer
900 * after prematurely unpinning it to forcibly shutdown the filesystem.
903 xfs_bdstrat_cb(struct xfs_buf *bp)
907 mp = XFS_BUF_FSPRIVATE3(bp, xfs_mount_t *);
908 if (!XFS_FORCED_SHUTDOWN(mp)) {
909 xfs_buf_iorequest(bp);
912 xfs_buftrace("XFS__BDSTRAT IOERROR", bp);
914 * Metadata write that didn't get logged but
915 * written delayed anyway. These aren't associated
916 * with a transaction, and can be ignored.
918 if (XFS_BUF_IODONE_FUNC(bp) == NULL &&
919 (XFS_BUF_ISREAD(bp)) == 0)
920 return (xfs_bioerror_relse(bp));
922 return (xfs_bioerror(bp));
928 xfs_bmap(bhv_desc_t *bdp,
935 xfs_inode_t *ip = XFS_BHVTOI(bdp);
936 xfs_iocore_t *io = &ip->i_iocore;
938 ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
939 ASSERT(((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != 0) ==
940 ((ip->i_iocore.io_flags & XFS_IOCORE_RT) != 0));
942 return xfs_iomap(io, offset, count, flags, iomapp, niomaps);
946 * Wrapper around bdstrat so that we can stop data
947 * from going to disk in case we are shutting down the filesystem.
948 * Typically user data goes thru this path; one of the exceptions
953 struct xfs_mount *mp,
957 if (!XFS_FORCED_SHUTDOWN(mp)) {
958 /* Grio redirection would go here
959 * if (XFS_BUF_IS_GRIO(bp)) {
962 xfs_buf_iorequest(bp);
966 xfs_buftrace("XFSBDSTRAT IOERROR", bp);
967 return (xfs_bioerror_relse(bp));
971 * If the underlying (data/log/rt) device is readonly, there are some
972 * operations that cannot proceed.
975 xfs_dev_is_read_only(
979 if (xfs_readonly_buftarg(mp->m_ddev_targp) ||
980 xfs_readonly_buftarg(mp->m_logdev_targp) ||
981 (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp))) {
983 "XFS: %s required on read-only device.", message);
985 "XFS: write access unavailable, cannot proceed.");