2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "xfs_trans.h"
27 #include "xfs_alloc.h"
28 #include "xfs_dmapi.h"
29 #include "xfs_mount.h"
30 #include "xfs_bmap_btree.h"
31 #include "xfs_alloc_btree.h"
32 #include "xfs_ialloc_btree.h"
33 #include "xfs_attr_sf.h"
34 #include "xfs_dir2_sf.h"
35 #include "xfs_dinode.h"
36 #include "xfs_inode.h"
37 #include "xfs_btree.h"
38 #include "xfs_ialloc.h"
39 #include "xfs_rtalloc.h"
40 #include "xfs_itable.h"
41 #include "xfs_error.h"
46 #include "xfs_buf_item.h"
47 #include "xfs_utils.h"
48 #include "xfs_dfrag.h"
49 #include "xfs_fsops.h"
50 #include "xfs_vnodeops.h"
51 #include "xfs_quota.h"
52 #include "xfs_inode_item.h"
54 #include <linux/capability.h>
55 #include <linux/dcache.h>
56 #include <linux/mount.h>
57 #include <linux/namei.h>
58 #include <linux/pagemap.h>
61 * xfs_find_handle maps from userspace xfs_fsop_handlereq structure to
62 * a file or fs handle.
64 * XFS_IOC_PATH_TO_FSHANDLE
65 * returns fs handle for a mount point or path within that mount point
66 * XFS_IOC_FD_TO_HANDLE
67 * returns full handle for a FD opened in user space
68 * XFS_IOC_PATH_TO_HANDLE
69 * returns full handle for a path
74 xfs_fsop_handlereq_t *hreq)
80 memset((char *)&handle, 0, sizeof(handle));
83 case XFS_IOC_PATH_TO_FSHANDLE:
84 case XFS_IOC_PATH_TO_HANDLE: {
86 int error = user_lpath((const char __user *)hreq->path, &path);
91 ASSERT(path.dentry->d_inode);
92 inode = igrab(path.dentry->d_inode);
97 case XFS_IOC_FD_TO_HANDLE: {
100 file = fget(hreq->fd);
104 ASSERT(file->f_path.dentry);
105 ASSERT(file->f_path.dentry->d_inode);
106 inode = igrab(file->f_path.dentry->d_inode);
113 return -XFS_ERROR(EINVAL);
116 if (inode->i_sb->s_magic != XFS_SB_MAGIC) {
117 /* we're not in XFS anymore, Toto */
119 return -XFS_ERROR(EINVAL);
122 switch (inode->i_mode & S_IFMT) {
129 return -XFS_ERROR(EBADF);
132 /* now we can grab the fsid */
133 memcpy(&handle.ha_fsid, XFS_I(inode)->i_mount->m_fixedfsid,
135 hsize = sizeof(xfs_fsid_t);
137 if (cmd != XFS_IOC_PATH_TO_FSHANDLE) {
138 xfs_inode_t *ip = XFS_I(inode);
141 /* need to get access to the xfs_inode to read the generation */
142 lock_mode = xfs_ilock_map_shared(ip);
144 /* fill in fid section of handle from inode */
145 handle.ha_fid.fid_len = sizeof(xfs_fid_t) -
146 sizeof(handle.ha_fid.fid_len);
147 handle.ha_fid.fid_pad = 0;
148 handle.ha_fid.fid_gen = ip->i_d.di_gen;
149 handle.ha_fid.fid_ino = ip->i_ino;
151 xfs_iunlock_map_shared(ip, lock_mode);
153 hsize = XFS_HSIZE(handle);
156 /* now copy our handle into the user buffer & write out the size */
157 if (copy_to_user(hreq->ohandle, &handle, hsize) ||
158 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32))) {
160 return -XFS_ERROR(EFAULT);
169 * Convert userspace handle data into inode.
171 * We use the fact that all the fsop_handlereq ioctl calls have a data
172 * structure argument whose first component is always a xfs_fsop_handlereq_t,
173 * so we can pass that sub structure into this handy, shared routine.
175 * If no error, caller must always iput the returned inode.
178 xfs_vget_fsop_handlereq(
180 struct inode *parinode, /* parent inode pointer */
181 xfs_fsop_handlereq_t *hreq,
182 struct inode **inode)
187 xfs_handle_t *handlep;
195 * Only allow handle opens under a directory.
197 if (!S_ISDIR(parinode->i_mode))
198 return XFS_ERROR(ENOTDIR);
200 hanp = hreq->ihandle;
201 hlen = hreq->ihandlen;
204 if (hlen < sizeof(handlep->ha_fsid) || hlen > sizeof(*handlep))
205 return XFS_ERROR(EINVAL);
206 if (copy_from_user(handlep, hanp, hlen))
207 return XFS_ERROR(EFAULT);
208 if (hlen < sizeof(*handlep))
209 memset(((char *)handlep) + hlen, 0, sizeof(*handlep) - hlen);
210 if (hlen > sizeof(handlep->ha_fsid)) {
211 if (handlep->ha_fid.fid_len !=
212 (hlen - sizeof(handlep->ha_fsid) -
213 sizeof(handlep->ha_fid.fid_len)) ||
214 handlep->ha_fid.fid_pad)
215 return XFS_ERROR(EINVAL);
219 * Crack the handle, obtain the inode # & generation #
221 xfid = (struct xfs_fid *)&handlep->ha_fid;
222 if (xfid->fid_len == sizeof(*xfid) - sizeof(xfid->fid_len)) {
224 igen = xfid->fid_gen;
226 return XFS_ERROR(EINVAL);
230 * Get the XFS inode, building a Linux inode to go with it.
232 error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_SHARED, &ip, 0);
236 return XFS_ERROR(EIO);
237 if (ip->i_d.di_gen != igen) {
238 xfs_iput_new(ip, XFS_ILOCK_SHARED);
239 return XFS_ERROR(ENOENT);
242 xfs_iunlock(ip, XFS_ILOCK_SHARED);
251 xfs_fsop_handlereq_t *hreq,
252 struct file *parfilp,
253 struct inode *parinode)
255 const struct cred *cred = current_cred();
261 struct dentry *dentry;
263 if (!capable(CAP_SYS_ADMIN))
264 return -XFS_ERROR(EPERM);
266 error = xfs_vget_fsop_handlereq(mp, parinode, hreq, &inode);
270 /* Restrict xfs_open_by_handle to directories & regular files. */
271 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) {
273 return -XFS_ERROR(EINVAL);
276 #if BITS_PER_LONG != 32
277 hreq->oflags |= O_LARGEFILE;
279 /* Put open permission in namei format. */
280 permflag = hreq->oflags;
281 if ((permflag+1) & O_ACCMODE)
283 if (permflag & O_TRUNC)
286 if ((!(permflag & O_APPEND) || (permflag & O_TRUNC)) &&
287 (permflag & FMODE_WRITE) && IS_APPEND(inode)) {
289 return -XFS_ERROR(EPERM);
292 if ((permflag & FMODE_WRITE) && IS_IMMUTABLE(inode)) {
294 return -XFS_ERROR(EACCES);
297 /* Can't write directories. */
298 if ( S_ISDIR(inode->i_mode) && (permflag & FMODE_WRITE)) {
300 return -XFS_ERROR(EISDIR);
303 if ((new_fd = get_unused_fd()) < 0) {
308 dentry = d_obtain_alias(inode);
309 if (IS_ERR(dentry)) {
310 put_unused_fd(new_fd);
311 return PTR_ERR(dentry);
314 /* Ensure umount returns EBUSY on umounts while this file is open. */
315 mntget(parfilp->f_path.mnt);
317 /* Create file pointer. */
318 filp = dentry_open(dentry, parfilp->f_path.mnt, hreq->oflags, cred);
320 put_unused_fd(new_fd);
321 return -XFS_ERROR(-PTR_ERR(filp));
324 if (inode->i_mode & S_IFREG) {
325 /* invisible operation should not change atime */
326 filp->f_flags |= O_NOATIME;
327 filp->f_mode |= FMODE_NOCMTIME;
330 fd_install(new_fd, filp);
335 * This is a copy from fs/namei.c:vfs_readlink(), except for removing it's
336 * unused first argument.
351 if (len > (unsigned) buflen)
353 if (copy_to_user(buffer, link, len))
361 xfs_readlink_by_handle(
363 xfs_fsop_handlereq_t *hreq,
364 struct inode *parinode)
371 if (!capable(CAP_SYS_ADMIN))
372 return -XFS_ERROR(EPERM);
374 error = xfs_vget_fsop_handlereq(mp, parinode, hreq, &inode);
378 /* Restrict this handle operation to symlinks only. */
379 if (!S_ISLNK(inode->i_mode)) {
380 error = -XFS_ERROR(EINVAL);
384 if (copy_from_user(&olen, hreq->ohandlen, sizeof(__u32))) {
385 error = -XFS_ERROR(EFAULT);
389 link = kmalloc(MAXPATHLEN+1, GFP_KERNEL);
393 error = -xfs_readlink(XFS_I(inode), link);
396 error = do_readlink(hreq->ohandle, olen, link);
408 xfs_fssetdm_by_handle(
411 struct inode *parinode)
414 struct fsdmidata fsd;
415 xfs_fsop_setdm_handlereq_t dmhreq;
418 if (!capable(CAP_MKNOD))
419 return -XFS_ERROR(EPERM);
420 if (copy_from_user(&dmhreq, arg, sizeof(xfs_fsop_setdm_handlereq_t)))
421 return -XFS_ERROR(EFAULT);
423 error = xfs_vget_fsop_handlereq(mp, parinode, &dmhreq.hreq, &inode);
427 if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) {
428 error = -XFS_ERROR(EPERM);
432 if (copy_from_user(&fsd, dmhreq.data, sizeof(fsd))) {
433 error = -XFS_ERROR(EFAULT);
437 error = -xfs_set_dmattrs(XFS_I(inode), fsd.fsd_dmevmask,
446 xfs_attrlist_by_handle(
449 struct inode *parinode)
452 attrlist_cursor_kern_t *cursor;
453 xfs_fsop_attrlist_handlereq_t al_hreq;
457 if (!capable(CAP_SYS_ADMIN))
458 return -XFS_ERROR(EPERM);
459 if (copy_from_user(&al_hreq, arg, sizeof(xfs_fsop_attrlist_handlereq_t)))
460 return -XFS_ERROR(EFAULT);
461 if (al_hreq.buflen > XATTR_LIST_MAX)
462 return -XFS_ERROR(EINVAL);
465 * Reject flags, only allow namespaces.
467 if (al_hreq.flags & ~(ATTR_ROOT | ATTR_SECURE))
468 return -XFS_ERROR(EINVAL);
470 error = xfs_vget_fsop_handlereq(mp, parinode, &al_hreq.hreq, &inode);
474 kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
478 cursor = (attrlist_cursor_kern_t *)&al_hreq.pos;
479 error = xfs_attr_list(XFS_I(inode), kbuf, al_hreq.buflen,
480 al_hreq.flags, cursor);
484 if (copy_to_user(al_hreq.buffer, kbuf, al_hreq.buflen))
496 xfs_attrmulti_attr_get(
506 if (*len > XATTR_SIZE_MAX)
508 kbuf = kmalloc(*len, GFP_KERNEL);
512 error = xfs_attr_get(XFS_I(inode), name, kbuf, (int *)len, flags);
516 if (copy_to_user(ubuf, kbuf, *len))
525 xfs_attrmulti_attr_set(
528 const char __user *ubuf,
535 if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
537 if (len > XATTR_SIZE_MAX)
540 kbuf = kmalloc(len, GFP_KERNEL);
544 if (copy_from_user(kbuf, ubuf, len))
547 error = xfs_attr_set(XFS_I(inode), name, kbuf, len, flags);
555 xfs_attrmulti_attr_remove(
560 if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
562 return xfs_attr_remove(XFS_I(inode), name, flags);
566 xfs_attrmulti_by_handle(
569 struct file *parfilp,
570 struct inode *parinode)
573 xfs_attr_multiop_t *ops;
574 xfs_fsop_attrmulti_handlereq_t am_hreq;
576 unsigned int i, size;
579 if (!capable(CAP_SYS_ADMIN))
580 return -XFS_ERROR(EPERM);
581 if (copy_from_user(&am_hreq, arg, sizeof(xfs_fsop_attrmulti_handlereq_t)))
582 return -XFS_ERROR(EFAULT);
584 error = xfs_vget_fsop_handlereq(mp, parinode, &am_hreq.hreq, &inode);
589 size = am_hreq.opcount * sizeof(xfs_attr_multiop_t);
590 if (!size || size > 16 * PAGE_SIZE)
594 ops = kmalloc(size, GFP_KERNEL);
599 if (copy_from_user(ops, am_hreq.ops, size))
602 attr_name = kmalloc(MAXNAMELEN, GFP_KERNEL);
608 for (i = 0; i < am_hreq.opcount; i++) {
609 ops[i].am_error = strncpy_from_user(attr_name,
610 ops[i].am_attrname, MAXNAMELEN);
611 if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN)
613 if (ops[i].am_error < 0)
616 switch (ops[i].am_opcode) {
618 ops[i].am_error = xfs_attrmulti_attr_get(inode,
619 attr_name, ops[i].am_attrvalue,
620 &ops[i].am_length, ops[i].am_flags);
623 ops[i].am_error = mnt_want_write(parfilp->f_path.mnt);
626 ops[i].am_error = xfs_attrmulti_attr_set(inode,
627 attr_name, ops[i].am_attrvalue,
628 ops[i].am_length, ops[i].am_flags);
629 mnt_drop_write(parfilp->f_path.mnt);
632 ops[i].am_error = mnt_want_write(parfilp->f_path.mnt);
635 ops[i].am_error = xfs_attrmulti_attr_remove(inode,
636 attr_name, ops[i].am_flags);
637 mnt_drop_write(parfilp->f_path.mnt);
640 ops[i].am_error = EINVAL;
644 if (copy_to_user(am_hreq.ops, ops, size))
645 error = XFS_ERROR(EFAULT);
658 struct xfs_inode *ip,
669 * Only allow the sys admin to reserve space unless
670 * unwritten extents are enabled.
672 if (!xfs_sb_version_hasextflgbit(&ip->i_mount->m_sb) &&
673 !capable(CAP_SYS_ADMIN))
674 return -XFS_ERROR(EPERM);
676 if (inode->i_flags & (S_IMMUTABLE|S_APPEND))
677 return -XFS_ERROR(EPERM);
679 if (!(filp->f_mode & FMODE_WRITE))
680 return -XFS_ERROR(EBADF);
682 if (!S_ISREG(inode->i_mode))
683 return -XFS_ERROR(EINVAL);
685 if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
686 attr_flags |= XFS_ATTR_NONBLOCK;
687 if (ioflags & IO_INVIS)
688 attr_flags |= XFS_ATTR_DMI;
690 error = xfs_change_file_space(ip, cmd, bf, filp->f_pos, attr_flags);
700 xfs_fsop_bulkreq_t bulkreq;
701 int count; /* # of records returned */
702 xfs_ino_t inlast; /* last inode number */
706 /* done = 1 if there are more stats to get and if bulkstat */
707 /* should be called again (unused here, but used in dmapi) */
709 if (!capable(CAP_SYS_ADMIN))
712 if (XFS_FORCED_SHUTDOWN(mp))
713 return -XFS_ERROR(EIO);
715 if (copy_from_user(&bulkreq, arg, sizeof(xfs_fsop_bulkreq_t)))
716 return -XFS_ERROR(EFAULT);
718 if (copy_from_user(&inlast, bulkreq.lastip, sizeof(__s64)))
719 return -XFS_ERROR(EFAULT);
721 if ((count = bulkreq.icount) <= 0)
722 return -XFS_ERROR(EINVAL);
724 if (bulkreq.ubuffer == NULL)
725 return -XFS_ERROR(EINVAL);
727 if (cmd == XFS_IOC_FSINUMBERS)
728 error = xfs_inumbers(mp, &inlast, &count,
729 bulkreq.ubuffer, xfs_inumbers_fmt);
730 else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE)
731 error = xfs_bulkstat_single(mp, &inlast,
732 bulkreq.ubuffer, &done);
733 else /* XFS_IOC_FSBULKSTAT */
734 error = xfs_bulkstat(mp, &inlast, &count,
735 (bulkstat_one_pf)xfs_bulkstat_one, NULL,
736 sizeof(xfs_bstat_t), bulkreq.ubuffer,
737 BULKSTAT_FG_QUICK, &done);
742 if (bulkreq.ocount != NULL) {
743 if (copy_to_user(bulkreq.lastip, &inlast,
745 return -XFS_ERROR(EFAULT);
747 if (copy_to_user(bulkreq.ocount, &count, sizeof(count)))
748 return -XFS_ERROR(EFAULT);
755 xfs_ioc_fsgeometry_v1(
759 xfs_fsop_geom_v1_t fsgeo;
762 error = xfs_fs_geometry(mp, (xfs_fsop_geom_t *)&fsgeo, 3);
766 if (copy_to_user(arg, &fsgeo, sizeof(fsgeo)))
767 return -XFS_ERROR(EFAULT);
776 xfs_fsop_geom_t fsgeo;
779 error = xfs_fs_geometry(mp, &fsgeo, 4);
783 if (copy_to_user(arg, &fsgeo, sizeof(fsgeo)))
784 return -XFS_ERROR(EFAULT);
789 * Linux extended inode flags interface.
793 xfs_merge_ioc_xflags(
797 unsigned int xflags = start;
799 if (flags & FS_IMMUTABLE_FL)
800 xflags |= XFS_XFLAG_IMMUTABLE;
802 xflags &= ~XFS_XFLAG_IMMUTABLE;
803 if (flags & FS_APPEND_FL)
804 xflags |= XFS_XFLAG_APPEND;
806 xflags &= ~XFS_XFLAG_APPEND;
807 if (flags & FS_SYNC_FL)
808 xflags |= XFS_XFLAG_SYNC;
810 xflags &= ~XFS_XFLAG_SYNC;
811 if (flags & FS_NOATIME_FL)
812 xflags |= XFS_XFLAG_NOATIME;
814 xflags &= ~XFS_XFLAG_NOATIME;
815 if (flags & FS_NODUMP_FL)
816 xflags |= XFS_XFLAG_NODUMP;
818 xflags &= ~XFS_XFLAG_NODUMP;
827 unsigned int flags = 0;
829 if (di_flags & XFS_DIFLAG_IMMUTABLE)
830 flags |= FS_IMMUTABLE_FL;
831 if (di_flags & XFS_DIFLAG_APPEND)
832 flags |= FS_APPEND_FL;
833 if (di_flags & XFS_DIFLAG_SYNC)
835 if (di_flags & XFS_DIFLAG_NOATIME)
836 flags |= FS_NOATIME_FL;
837 if (di_flags & XFS_DIFLAG_NODUMP)
838 flags |= FS_NODUMP_FL;
850 xfs_ilock(ip, XFS_ILOCK_SHARED);
851 fa.fsx_xflags = xfs_ip2xflags(ip);
852 fa.fsx_extsize = ip->i_d.di_extsize << ip->i_mount->m_sb.sb_blocklog;
853 fa.fsx_projid = ip->i_d.di_projid;
857 if (ip->i_afp->if_flags & XFS_IFEXTENTS)
858 fa.fsx_nextents = ip->i_afp->if_bytes /
859 sizeof(xfs_bmbt_rec_t);
861 fa.fsx_nextents = ip->i_d.di_anextents;
865 if (ip->i_df.if_flags & XFS_IFEXTENTS)
866 fa.fsx_nextents = ip->i_df.if_bytes /
867 sizeof(xfs_bmbt_rec_t);
869 fa.fsx_nextents = ip->i_d.di_nextents;
871 xfs_iunlock(ip, XFS_ILOCK_SHARED);
873 if (copy_to_user(arg, &fa, sizeof(fa)))
880 struct xfs_inode *ip,
883 unsigned int di_flags;
885 /* can't set PREALLOC this way, just preserve it */
886 di_flags = (ip->i_d.di_flags & XFS_DIFLAG_PREALLOC);
887 if (xflags & XFS_XFLAG_IMMUTABLE)
888 di_flags |= XFS_DIFLAG_IMMUTABLE;
889 if (xflags & XFS_XFLAG_APPEND)
890 di_flags |= XFS_DIFLAG_APPEND;
891 if (xflags & XFS_XFLAG_SYNC)
892 di_flags |= XFS_DIFLAG_SYNC;
893 if (xflags & XFS_XFLAG_NOATIME)
894 di_flags |= XFS_DIFLAG_NOATIME;
895 if (xflags & XFS_XFLAG_NODUMP)
896 di_flags |= XFS_DIFLAG_NODUMP;
897 if (xflags & XFS_XFLAG_PROJINHERIT)
898 di_flags |= XFS_DIFLAG_PROJINHERIT;
899 if (xflags & XFS_XFLAG_NODEFRAG)
900 di_flags |= XFS_DIFLAG_NODEFRAG;
901 if (xflags & XFS_XFLAG_FILESTREAM)
902 di_flags |= XFS_DIFLAG_FILESTREAM;
903 if ((ip->i_d.di_mode & S_IFMT) == S_IFDIR) {
904 if (xflags & XFS_XFLAG_RTINHERIT)
905 di_flags |= XFS_DIFLAG_RTINHERIT;
906 if (xflags & XFS_XFLAG_NOSYMLINKS)
907 di_flags |= XFS_DIFLAG_NOSYMLINKS;
908 if (xflags & XFS_XFLAG_EXTSZINHERIT)
909 di_flags |= XFS_DIFLAG_EXTSZINHERIT;
910 } else if ((ip->i_d.di_mode & S_IFMT) == S_IFREG) {
911 if (xflags & XFS_XFLAG_REALTIME)
912 di_flags |= XFS_DIFLAG_REALTIME;
913 if (xflags & XFS_XFLAG_EXTSIZE)
914 di_flags |= XFS_DIFLAG_EXTSIZE;
917 ip->i_d.di_flags = di_flags;
921 xfs_diflags_to_linux(
922 struct xfs_inode *ip)
924 struct inode *inode = VFS_I(ip);
925 unsigned int xflags = xfs_ip2xflags(ip);
927 if (xflags & XFS_XFLAG_IMMUTABLE)
928 inode->i_flags |= S_IMMUTABLE;
930 inode->i_flags &= ~S_IMMUTABLE;
931 if (xflags & XFS_XFLAG_APPEND)
932 inode->i_flags |= S_APPEND;
934 inode->i_flags &= ~S_APPEND;
935 if (xflags & XFS_XFLAG_SYNC)
936 inode->i_flags |= S_SYNC;
938 inode->i_flags &= ~S_SYNC;
939 if (xflags & XFS_XFLAG_NOATIME)
940 inode->i_flags |= S_NOATIME;
942 inode->i_flags &= ~S_NOATIME;
946 #define FSX_EXTSIZE 2
948 #define FSX_NONBLOCK 8
956 struct xfs_mount *mp = ip->i_mount;
957 struct xfs_trans *tp;
958 unsigned int lock_flags = 0;
959 struct xfs_dquot *udqp = NULL, *gdqp = NULL;
960 struct xfs_dquot *olddquot = NULL;
963 xfs_itrace_entry(ip);
965 if (mp->m_flags & XFS_MOUNT_RDONLY)
966 return XFS_ERROR(EROFS);
967 if (XFS_FORCED_SHUTDOWN(mp))
968 return XFS_ERROR(EIO);
971 * If disk quotas is on, we make sure that the dquots do exist on disk,
972 * before we start any other transactions. Trying to do this later
973 * is messy. We don't care to take a readlock to look at the ids
974 * in inode here, because we can't hold it across the trans_reserve.
975 * If the IDs do change before we take the ilock, we're covered
976 * because the i_*dquot fields will get updated anyway.
978 if (XFS_IS_QUOTA_ON(mp) && (mask & FSX_PROJID)) {
979 code = XFS_QM_DQVOPALLOC(mp, ip, ip->i_d.di_uid,
980 ip->i_d.di_gid, fa->fsx_projid,
981 XFS_QMOPT_PQUOTA, &udqp, &gdqp);
987 * For the other attributes, we acquire the inode lock and
988 * first do an error checking pass.
990 tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE);
991 code = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0);
995 lock_flags = XFS_ILOCK_EXCL;
996 xfs_ilock(ip, lock_flags);
999 * CAP_FOWNER overrides the following restrictions:
1001 * The user ID of the calling process must be equal
1002 * to the file owner ID, except in cases where the
1003 * CAP_FSETID capability is applicable.
1005 if (current_fsuid() != ip->i_d.di_uid && !capable(CAP_FOWNER)) {
1006 code = XFS_ERROR(EPERM);
1011 * Do a quota reservation only if projid is actually going to change.
1013 if (mask & FSX_PROJID) {
1014 if (XFS_IS_PQUOTA_ON(mp) &&
1015 ip->i_d.di_projid != fa->fsx_projid) {
1017 code = XFS_QM_DQVOPCHOWNRESV(mp, tp, ip, udqp, gdqp,
1018 capable(CAP_FOWNER) ?
1019 XFS_QMOPT_FORCE_RES : 0);
1020 if (code) /* out of quota */
1025 if (mask & FSX_EXTSIZE) {
1027 * Can't change extent size if any extents are allocated.
1029 if (ip->i_d.di_nextents &&
1030 ((ip->i_d.di_extsize << mp->m_sb.sb_blocklog) !=
1032 code = XFS_ERROR(EINVAL); /* EFBIG? */
1037 * Extent size must be a multiple of the appropriate block
1038 * size, if set at all.
1040 if (fa->fsx_extsize != 0) {
1043 if (XFS_IS_REALTIME_INODE(ip) ||
1044 ((mask & FSX_XFLAGS) &&
1045 (fa->fsx_xflags & XFS_XFLAG_REALTIME))) {
1046 size = mp->m_sb.sb_rextsize <<
1047 mp->m_sb.sb_blocklog;
1049 size = mp->m_sb.sb_blocksize;
1052 if (fa->fsx_extsize % size) {
1053 code = XFS_ERROR(EINVAL);
1060 if (mask & FSX_XFLAGS) {
1062 * Can't change realtime flag if any extents are allocated.
1064 if ((ip->i_d.di_nextents || ip->i_delayed_blks) &&
1065 (XFS_IS_REALTIME_INODE(ip)) !=
1066 (fa->fsx_xflags & XFS_XFLAG_REALTIME)) {
1067 code = XFS_ERROR(EINVAL); /* EFBIG? */
1072 * If realtime flag is set then must have realtime data.
1074 if ((fa->fsx_xflags & XFS_XFLAG_REALTIME)) {
1075 if ((mp->m_sb.sb_rblocks == 0) ||
1076 (mp->m_sb.sb_rextsize == 0) ||
1077 (ip->i_d.di_extsize % mp->m_sb.sb_rextsize)) {
1078 code = XFS_ERROR(EINVAL);
1084 * Can't modify an immutable/append-only file unless
1085 * we have appropriate permission.
1087 if ((ip->i_d.di_flags &
1088 (XFS_DIFLAG_IMMUTABLE|XFS_DIFLAG_APPEND) ||
1090 (XFS_XFLAG_IMMUTABLE | XFS_XFLAG_APPEND))) &&
1091 !capable(CAP_LINUX_IMMUTABLE)) {
1092 code = XFS_ERROR(EPERM);
1097 xfs_trans_ijoin(tp, ip, lock_flags);
1098 xfs_trans_ihold(tp, ip);
1101 * Change file ownership. Must be the owner or privileged.
1103 if (mask & FSX_PROJID) {
1105 * CAP_FSETID overrides the following restrictions:
1107 * The set-user-ID and set-group-ID bits of a file will be
1108 * cleared upon successful return from chown()
1110 if ((ip->i_d.di_mode & (S_ISUID|S_ISGID)) &&
1111 !capable(CAP_FSETID))
1112 ip->i_d.di_mode &= ~(S_ISUID|S_ISGID);
1115 * Change the ownerships and register quota modifications
1116 * in the transaction.
1118 if (ip->i_d.di_projid != fa->fsx_projid) {
1119 if (XFS_IS_PQUOTA_ON(mp)) {
1120 olddquot = XFS_QM_DQVOPCHOWN(mp, tp, ip,
1121 &ip->i_gdquot, gdqp);
1123 ip->i_d.di_projid = fa->fsx_projid;
1126 * We may have to rev the inode as well as
1127 * the superblock version number since projids didn't
1128 * exist before DINODE_VERSION_2 and SB_VERSION_NLINK.
1130 if (ip->i_d.di_version == 1)
1131 xfs_bump_ino_vers2(tp, ip);
1136 if (mask & FSX_EXTSIZE)
1137 ip->i_d.di_extsize = fa->fsx_extsize >> mp->m_sb.sb_blocklog;
1138 if (mask & FSX_XFLAGS) {
1139 xfs_set_diflags(ip, fa->fsx_xflags);
1140 xfs_diflags_to_linux(ip);
1143 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1144 xfs_ichgtime(ip, XFS_ICHGTIME_CHG);
1146 XFS_STATS_INC(xs_ig_attrchg);
1149 * If this is a synchronous mount, make sure that the
1150 * transaction goes to disk before returning to the user.
1151 * This is slightly sub-optimal in that truncates require
1152 * two sync transactions instead of one for wsync filesystems.
1153 * One for the truncate and one for the timestamps since we
1154 * don't want to change the timestamps unless we're sure the
1155 * truncate worked. Truncates are less than 1% of the laddis
1156 * mix so this probably isn't worth the trouble to optimize.
1158 if (mp->m_flags & XFS_MOUNT_WSYNC)
1159 xfs_trans_set_sync(tp);
1160 code = xfs_trans_commit(tp, 0);
1161 xfs_iunlock(ip, lock_flags);
1164 * Release any dquot(s) the inode had kept before chown.
1166 XFS_QM_DQRELE(mp, olddquot);
1167 XFS_QM_DQRELE(mp, udqp);
1168 XFS_QM_DQRELE(mp, gdqp);
1173 if (DM_EVENT_ENABLED(ip, DM_EVENT_ATTRIBUTE)) {
1174 XFS_SEND_NAMESP(mp, DM_EVENT_ATTRIBUTE, ip, DM_RIGHT_NULL,
1175 NULL, DM_RIGHT_NULL, NULL, NULL, 0, 0,
1176 (mask & FSX_NONBLOCK) ? DM_FLAGS_NDELAY : 0);
1182 XFS_QM_DQRELE(mp, udqp);
1183 XFS_QM_DQRELE(mp, gdqp);
1184 xfs_trans_cancel(tp, 0);
1186 xfs_iunlock(ip, lock_flags);
1199 if (copy_from_user(&fa, arg, sizeof(fa)))
1202 mask = FSX_XFLAGS | FSX_EXTSIZE | FSX_PROJID;
1203 if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
1204 mask |= FSX_NONBLOCK;
1206 return -xfs_ioctl_setattr(ip, &fa, mask);
1216 flags = xfs_di2lxflags(ip->i_d.di_flags);
1217 if (copy_to_user(arg, &flags, sizeof(flags)))
1232 if (copy_from_user(&flags, arg, sizeof(flags)))
1235 if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \
1236 FS_NOATIME_FL | FS_NODUMP_FL | \
1241 if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
1242 mask |= FSX_NONBLOCK;
1243 fa.fsx_xflags = xfs_merge_ioc_xflags(flags, xfs_ip2xflags(ip));
1245 return -xfs_ioctl_setattr(ip, &fa, mask);
1249 xfs_getbmap_format(void **ap, struct getbmapx *bmv, int *full)
1251 struct getbmap __user *base = *ap;
1253 /* copy only getbmap portion (not getbmapx) */
1254 if (copy_to_user(base, bmv, sizeof(struct getbmap)))
1255 return XFS_ERROR(EFAULT);
1257 *ap += sizeof(struct getbmap);
1263 struct xfs_inode *ip,
1268 struct getbmapx bmx;
1271 if (copy_from_user(&bmx, arg, sizeof(struct getbmapx)))
1272 return -XFS_ERROR(EFAULT);
1274 if (bmx.bmv_count < 2)
1275 return -XFS_ERROR(EINVAL);
1277 bmx.bmv_iflags = (cmd == XFS_IOC_GETBMAPA ? BMV_IF_ATTRFORK : 0);
1278 if (ioflags & IO_INVIS)
1279 bmx.bmv_iflags |= BMV_IF_NO_DMAPI_READ;
1281 error = xfs_getbmap(ip, &bmx, xfs_getbmap_format,
1282 (struct getbmap *)arg+1);
1286 /* copy back header - only size of getbmap */
1287 if (copy_to_user(arg, &bmx, sizeof(struct getbmap)))
1288 return -XFS_ERROR(EFAULT);
1293 xfs_getbmapx_format(void **ap, struct getbmapx *bmv, int *full)
1295 struct getbmapx __user *base = *ap;
1297 if (copy_to_user(base, bmv, sizeof(struct getbmapx)))
1298 return XFS_ERROR(EFAULT);
1300 *ap += sizeof(struct getbmapx);
1306 struct xfs_inode *ip,
1309 struct getbmapx bmx;
1312 if (copy_from_user(&bmx, arg, sizeof(bmx)))
1313 return -XFS_ERROR(EFAULT);
1315 if (bmx.bmv_count < 2)
1316 return -XFS_ERROR(EINVAL);
1318 if (bmx.bmv_iflags & (~BMV_IF_VALID))
1319 return -XFS_ERROR(EINVAL);
1321 error = xfs_getbmap(ip, &bmx, xfs_getbmapx_format,
1322 (struct getbmapx *)arg+1);
1326 /* copy back header */
1327 if (copy_to_user(arg, &bmx, sizeof(struct getbmapx)))
1328 return -XFS_ERROR(EFAULT);
1334 * Note: some of the ioctl's return positive numbers as a
1335 * byte count indicating success, such as readlink_by_handle.
1336 * So we don't "sign flip" like most other routines. This means
1337 * true errors need to be returned as a negative value.
1345 struct inode *inode = filp->f_path.dentry->d_inode;
1346 struct xfs_inode *ip = XFS_I(inode);
1347 struct xfs_mount *mp = ip->i_mount;
1348 void __user *arg = (void __user *)p;
1352 if (filp->f_mode & FMODE_NOCMTIME)
1353 ioflags |= IO_INVIS;
1355 xfs_itrace_entry(ip);
1358 case XFS_IOC_ALLOCSP:
1359 case XFS_IOC_FREESP:
1360 case XFS_IOC_RESVSP:
1361 case XFS_IOC_UNRESVSP:
1362 case XFS_IOC_ALLOCSP64:
1363 case XFS_IOC_FREESP64:
1364 case XFS_IOC_RESVSP64:
1365 case XFS_IOC_UNRESVSP64: {
1368 if (copy_from_user(&bf, arg, sizeof(bf)))
1369 return -XFS_ERROR(EFAULT);
1370 return xfs_ioc_space(ip, inode, filp, ioflags, cmd, &bf);
1372 case XFS_IOC_DIOINFO: {
1374 xfs_buftarg_t *target =
1375 XFS_IS_REALTIME_INODE(ip) ?
1376 mp->m_rtdev_targp : mp->m_ddev_targp;
1378 da.d_mem = da.d_miniosz = 1 << target->bt_sshift;
1379 da.d_maxiosz = INT_MAX & ~(da.d_miniosz - 1);
1381 if (copy_to_user(arg, &da, sizeof(da)))
1382 return -XFS_ERROR(EFAULT);
1386 case XFS_IOC_FSBULKSTAT_SINGLE:
1387 case XFS_IOC_FSBULKSTAT:
1388 case XFS_IOC_FSINUMBERS:
1389 return xfs_ioc_bulkstat(mp, cmd, arg);
1391 case XFS_IOC_FSGEOMETRY_V1:
1392 return xfs_ioc_fsgeometry_v1(mp, arg);
1394 case XFS_IOC_FSGEOMETRY:
1395 return xfs_ioc_fsgeometry(mp, arg);
1397 case XFS_IOC_GETVERSION:
1398 return put_user(inode->i_generation, (int __user *)arg);
1400 case XFS_IOC_FSGETXATTR:
1401 return xfs_ioc_fsgetxattr(ip, 0, arg);
1402 case XFS_IOC_FSGETXATTRA:
1403 return xfs_ioc_fsgetxattr(ip, 1, arg);
1404 case XFS_IOC_FSSETXATTR:
1405 return xfs_ioc_fssetxattr(ip, filp, arg);
1406 case XFS_IOC_GETXFLAGS:
1407 return xfs_ioc_getxflags(ip, arg);
1408 case XFS_IOC_SETXFLAGS:
1409 return xfs_ioc_setxflags(ip, filp, arg);
1411 case XFS_IOC_FSSETDM: {
1412 struct fsdmidata dmi;
1414 if (copy_from_user(&dmi, arg, sizeof(dmi)))
1415 return -XFS_ERROR(EFAULT);
1417 error = xfs_set_dmattrs(ip, dmi.fsd_dmevmask,
1422 case XFS_IOC_GETBMAP:
1423 case XFS_IOC_GETBMAPA:
1424 return xfs_ioc_getbmap(ip, ioflags, cmd, arg);
1426 case XFS_IOC_GETBMAPX:
1427 return xfs_ioc_getbmapx(ip, arg);
1429 case XFS_IOC_FD_TO_HANDLE:
1430 case XFS_IOC_PATH_TO_HANDLE:
1431 case XFS_IOC_PATH_TO_FSHANDLE: {
1432 xfs_fsop_handlereq_t hreq;
1434 if (copy_from_user(&hreq, arg, sizeof(hreq)))
1435 return -XFS_ERROR(EFAULT);
1436 return xfs_find_handle(cmd, &hreq);
1438 case XFS_IOC_OPEN_BY_HANDLE: {
1439 xfs_fsop_handlereq_t hreq;
1441 if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t)))
1442 return -XFS_ERROR(EFAULT);
1443 return xfs_open_by_handle(mp, &hreq, filp, inode);
1445 case XFS_IOC_FSSETDM_BY_HANDLE:
1446 return xfs_fssetdm_by_handle(mp, arg, inode);
1448 case XFS_IOC_READLINK_BY_HANDLE: {
1449 xfs_fsop_handlereq_t hreq;
1451 if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t)))
1452 return -XFS_ERROR(EFAULT);
1453 return xfs_readlink_by_handle(mp, &hreq, inode);
1455 case XFS_IOC_ATTRLIST_BY_HANDLE:
1456 return xfs_attrlist_by_handle(mp, arg, inode);
1458 case XFS_IOC_ATTRMULTI_BY_HANDLE:
1459 return xfs_attrmulti_by_handle(mp, arg, filp, inode);
1461 case XFS_IOC_SWAPEXT: {
1462 struct xfs_swapext sxp;
1464 if (copy_from_user(&sxp, arg, sizeof(xfs_swapext_t)))
1465 return -XFS_ERROR(EFAULT);
1466 error = xfs_swapext(&sxp);
1470 case XFS_IOC_FSCOUNTS: {
1471 xfs_fsop_counts_t out;
1473 error = xfs_fs_counts(mp, &out);
1477 if (copy_to_user(arg, &out, sizeof(out)))
1478 return -XFS_ERROR(EFAULT);
1482 case XFS_IOC_SET_RESBLKS: {
1483 xfs_fsop_resblks_t inout;
1486 if (!capable(CAP_SYS_ADMIN))
1489 if (copy_from_user(&inout, arg, sizeof(inout)))
1490 return -XFS_ERROR(EFAULT);
1492 /* input parameter is passed in resblks field of structure */
1494 error = xfs_reserve_blocks(mp, &in, &inout);
1498 if (copy_to_user(arg, &inout, sizeof(inout)))
1499 return -XFS_ERROR(EFAULT);
1503 case XFS_IOC_GET_RESBLKS: {
1504 xfs_fsop_resblks_t out;
1506 if (!capable(CAP_SYS_ADMIN))
1509 error = xfs_reserve_blocks(mp, NULL, &out);
1513 if (copy_to_user(arg, &out, sizeof(out)))
1514 return -XFS_ERROR(EFAULT);
1519 case XFS_IOC_FSGROWFSDATA: {
1520 xfs_growfs_data_t in;
1522 if (copy_from_user(&in, arg, sizeof(in)))
1523 return -XFS_ERROR(EFAULT);
1525 error = xfs_growfs_data(mp, &in);
1529 case XFS_IOC_FSGROWFSLOG: {
1530 xfs_growfs_log_t in;
1532 if (copy_from_user(&in, arg, sizeof(in)))
1533 return -XFS_ERROR(EFAULT);
1535 error = xfs_growfs_log(mp, &in);
1539 case XFS_IOC_FSGROWFSRT: {
1542 if (copy_from_user(&in, arg, sizeof(in)))
1543 return -XFS_ERROR(EFAULT);
1545 error = xfs_growfs_rt(mp, &in);
1549 case XFS_IOC_FREEZE:
1550 if (!capable(CAP_SYS_ADMIN))
1553 if (inode->i_sb->s_frozen == SB_UNFROZEN)
1554 freeze_bdev(inode->i_sb->s_bdev);
1558 if (!capable(CAP_SYS_ADMIN))
1560 if (inode->i_sb->s_frozen != SB_UNFROZEN)
1561 thaw_bdev(inode->i_sb->s_bdev, inode->i_sb);
1564 case XFS_IOC_GOINGDOWN: {
1567 if (!capable(CAP_SYS_ADMIN))
1570 if (get_user(in, (__uint32_t __user *)arg))
1571 return -XFS_ERROR(EFAULT);
1573 error = xfs_fs_goingdown(mp, in);
1577 case XFS_IOC_ERROR_INJECTION: {
1578 xfs_error_injection_t in;
1580 if (!capable(CAP_SYS_ADMIN))
1583 if (copy_from_user(&in, arg, sizeof(in)))
1584 return -XFS_ERROR(EFAULT);
1586 error = xfs_errortag_add(in.errtag, mp);
1590 case XFS_IOC_ERROR_CLEARALL:
1591 if (!capable(CAP_SYS_ADMIN))
1594 error = xfs_errortag_clearall(mp, 1);