2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "xfs_trans.h"
27 #include "xfs_alloc.h"
28 #include "xfs_dmapi.h"
29 #include "xfs_mount.h"
30 #include "xfs_bmap_btree.h"
31 #include "xfs_alloc_btree.h"
32 #include "xfs_ialloc_btree.h"
33 #include "xfs_attr_sf.h"
34 #include "xfs_dir2_sf.h"
35 #include "xfs_dinode.h"
36 #include "xfs_inode.h"
37 #include "xfs_btree.h"
38 #include "xfs_ialloc.h"
39 #include "xfs_rtalloc.h"
40 #include "xfs_itable.h"
41 #include "xfs_error.h"
46 #include "xfs_buf_item.h"
47 #include "xfs_utils.h"
48 #include "xfs_dfrag.h"
49 #include "xfs_fsops.h"
50 #include "xfs_vnodeops.h"
52 #include <linux/capability.h>
53 #include <linux/dcache.h>
54 #include <linux/mount.h>
55 #include <linux/namei.h>
56 #include <linux/pagemap.h>
59 * xfs_find_handle maps from userspace xfs_fsop_handlereq structure to
60 * a file or fs handle.
62 * XFS_IOC_PATH_TO_FSHANDLE
63 * returns fs handle for a mount point or path within that mount point
64 * XFS_IOC_FD_TO_HANDLE
65 * returns full handle for a FD opened in user space
66 * XFS_IOC_PATH_TO_HANDLE
67 * returns full handle for a path
76 xfs_fsop_handlereq_t hreq;
80 if (copy_from_user(&hreq, arg, sizeof(hreq)))
81 return -XFS_ERROR(EFAULT);
83 memset((char *)&handle, 0, sizeof(handle));
86 case XFS_IOC_PATH_TO_FSHANDLE:
87 case XFS_IOC_PATH_TO_HANDLE: {
91 error = user_path_walk_link((const char __user *)hreq.path, &nd);
96 ASSERT(nd.dentry->d_inode);
97 inode = igrab(nd.dentry->d_inode);
102 case XFS_IOC_FD_TO_HANDLE: {
105 file = fget(hreq.fd);
109 ASSERT(file->f_path.dentry);
110 ASSERT(file->f_path.dentry->d_inode);
111 inode = igrab(file->f_path.dentry->d_inode);
118 return -XFS_ERROR(EINVAL);
121 if (inode->i_sb->s_magic != XFS_SB_MAGIC) {
122 /* we're not in XFS anymore, Toto */
124 return -XFS_ERROR(EINVAL);
127 switch (inode->i_mode & S_IFMT) {
134 return -XFS_ERROR(EBADF);
137 /* we need the vnode */
138 vp = vn_from_inode(inode);
140 /* now we can grab the fsid */
141 memcpy(&handle.ha_fsid, XFS_MTOVFS(XFS_I(inode)->i_mount)->vfs_altfsid,
143 hsize = sizeof(xfs_fsid_t);
145 if (cmd != XFS_IOC_PATH_TO_FSHANDLE) {
149 /* need to get access to the xfs_inode to read the generation */
152 lock_mode = xfs_ilock_map_shared(ip);
154 /* fill in fid section of handle from inode */
155 handle.ha_fid.xfs_fid_len = sizeof(xfs_fid_t) -
156 sizeof(handle.ha_fid.xfs_fid_len);
157 handle.ha_fid.xfs_fid_pad = 0;
158 handle.ha_fid.xfs_fid_gen = ip->i_d.di_gen;
159 handle.ha_fid.xfs_fid_ino = ip->i_ino;
161 xfs_iunlock_map_shared(ip, lock_mode);
163 hsize = XFS_HSIZE(handle);
166 /* now copy our handle into the user buffer & write out the size */
167 if (copy_to_user(hreq.ohandle, &handle, hsize) ||
168 copy_to_user(hreq.ohandlen, &hsize, sizeof(__s32))) {
170 return -XFS_ERROR(EFAULT);
179 * Convert userspace handle data into vnode (and inode).
180 * We [ab]use the fact that all the fsop_handlereq ioctl calls
181 * have a data structure argument whose first component is always
182 * a xfs_fsop_handlereq_t, so we can cast to and from this type.
183 * This allows us to optimise the copy_from_user calls and gives
184 * a handy, shared routine.
186 * If no error, caller must always VN_RELE the returned vp.
189 xfs_vget_fsop_handlereq(
191 struct inode *parinode, /* parent inode pointer */
192 xfs_fsop_handlereq_t *hreq,
194 struct inode **inode)
199 xfs_handle_t *handlep;
202 struct inode *inodep;
209 * Only allow handle opens under a directory.
211 if (!S_ISDIR(parinode->i_mode))
212 return XFS_ERROR(ENOTDIR);
214 hanp = hreq->ihandle;
215 hlen = hreq->ihandlen;
218 if (hlen < sizeof(handlep->ha_fsid) || hlen > sizeof(*handlep))
219 return XFS_ERROR(EINVAL);
220 if (copy_from_user(handlep, hanp, hlen))
221 return XFS_ERROR(EFAULT);
222 if (hlen < sizeof(*handlep))
223 memset(((char *)handlep) + hlen, 0, sizeof(*handlep) - hlen);
224 if (hlen > sizeof(handlep->ha_fsid)) {
225 if (handlep->ha_fid.xfs_fid_len !=
226 (hlen - sizeof(handlep->ha_fsid)
227 - sizeof(handlep->ha_fid.xfs_fid_len))
228 || handlep->ha_fid.xfs_fid_pad)
229 return XFS_ERROR(EINVAL);
233 * Crack the handle, obtain the inode # & generation #
235 xfid = (struct xfs_fid *)&handlep->ha_fid;
236 if (xfid->xfs_fid_len == sizeof(*xfid) - sizeof(xfid->xfs_fid_len)) {
237 ino = xfid->xfs_fid_ino;
238 igen = xfid->xfs_fid_gen;
240 return XFS_ERROR(EINVAL);
244 * Get the XFS inode, building a vnode to go with it.
246 error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_SHARED, &ip, 0);
250 return XFS_ERROR(EIO);
251 if (ip->i_d.di_mode == 0 || ip->i_d.di_gen != igen) {
252 xfs_iput_new(ip, XFS_ILOCK_SHARED);
253 return XFS_ERROR(ENOENT);
257 inodep = vn_to_inode(vpp);
258 xfs_iunlock(ip, XFS_ILOCK_SHARED);
269 struct file *parfilp,
270 struct inode *parinode)
277 struct dentry *dentry;
279 xfs_fsop_handlereq_t hreq;
281 if (!capable(CAP_SYS_ADMIN))
282 return -XFS_ERROR(EPERM);
283 if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t)))
284 return -XFS_ERROR(EFAULT);
286 error = xfs_vget_fsop_handlereq(mp, parinode, &hreq, &vp, &inode);
290 /* Restrict xfs_open_by_handle to directories & regular files. */
291 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) {
293 return -XFS_ERROR(EINVAL);
296 #if BITS_PER_LONG != 32
297 hreq.oflags |= O_LARGEFILE;
299 /* Put open permission in namei format. */
300 permflag = hreq.oflags;
301 if ((permflag+1) & O_ACCMODE)
303 if (permflag & O_TRUNC)
306 if ((!(permflag & O_APPEND) || (permflag & O_TRUNC)) &&
307 (permflag & FMODE_WRITE) && IS_APPEND(inode)) {
309 return -XFS_ERROR(EPERM);
312 if ((permflag & FMODE_WRITE) && IS_IMMUTABLE(inode)) {
314 return -XFS_ERROR(EACCES);
317 /* Can't write directories. */
318 if ( S_ISDIR(inode->i_mode) && (permflag & FMODE_WRITE)) {
320 return -XFS_ERROR(EISDIR);
323 if ((new_fd = get_unused_fd()) < 0) {
328 dentry = d_alloc_anon(inode);
329 if (dentry == NULL) {
331 put_unused_fd(new_fd);
332 return -XFS_ERROR(ENOMEM);
335 /* Ensure umount returns EBUSY on umounts while this file is open. */
336 mntget(parfilp->f_path.mnt);
338 /* Create file pointer. */
339 filp = dentry_open(dentry, parfilp->f_path.mnt, hreq.oflags);
341 put_unused_fd(new_fd);
342 return -XFS_ERROR(-PTR_ERR(filp));
344 if (inode->i_mode & S_IFREG) {
345 /* invisible operation should not change atime */
346 filp->f_flags |= O_NOATIME;
347 filp->f_op = &xfs_invis_file_operations;
350 fd_install(new_fd, filp);
355 * This is a copy from fs/namei.c:vfs_readlink(), except for removing it's
356 * unused first argument.
371 if (len > (unsigned) buflen)
373 if (copy_to_user(buffer, link, len))
381 xfs_readlink_by_handle(
384 struct inode *parinode)
387 xfs_fsop_handlereq_t hreq;
393 if (!capable(CAP_SYS_ADMIN))
394 return -XFS_ERROR(EPERM);
395 if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t)))
396 return -XFS_ERROR(EFAULT);
398 error = xfs_vget_fsop_handlereq(mp, parinode, &hreq, &vp, &inode);
402 /* Restrict this handle operation to symlinks only. */
403 if (!S_ISLNK(inode->i_mode)) {
404 error = -XFS_ERROR(EINVAL);
408 if (copy_from_user(&olen, hreq.ohandlen, sizeof(__u32))) {
409 error = -XFS_ERROR(EFAULT);
413 link = kmalloc(MAXPATHLEN+1, GFP_KERNEL);
417 error = -xfs_readlink(XFS_I(inode), link);
420 error = do_readlink(hreq.ohandle, olen, link);
432 xfs_fssetdm_by_handle(
435 struct inode *parinode)
438 struct fsdmidata fsd;
439 xfs_fsop_setdm_handlereq_t dmhreq;
443 if (!capable(CAP_MKNOD))
444 return -XFS_ERROR(EPERM);
445 if (copy_from_user(&dmhreq, arg, sizeof(xfs_fsop_setdm_handlereq_t)))
446 return -XFS_ERROR(EFAULT);
448 error = xfs_vget_fsop_handlereq(mp, parinode, &dmhreq.hreq, &vp, &inode);
452 if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) {
454 return -XFS_ERROR(EPERM);
457 if (copy_from_user(&fsd, dmhreq.data, sizeof(fsd))) {
459 return -XFS_ERROR(EFAULT);
462 error = xfs_set_dmattrs(xfs_vtoi(vp),
463 fsd.fsd_dmevmask, fsd.fsd_dmstate);
472 xfs_attrlist_by_handle(
475 struct inode *parinode)
478 attrlist_cursor_kern_t *cursor;
479 xfs_fsop_attrlist_handlereq_t al_hreq;
484 if (!capable(CAP_SYS_ADMIN))
485 return -XFS_ERROR(EPERM);
486 if (copy_from_user(&al_hreq, arg, sizeof(xfs_fsop_attrlist_handlereq_t)))
487 return -XFS_ERROR(EFAULT);
488 if (al_hreq.buflen > XATTR_LIST_MAX)
489 return -XFS_ERROR(EINVAL);
491 error = xfs_vget_fsop_handlereq(mp, parinode, &al_hreq.hreq,
496 kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
500 cursor = (attrlist_cursor_kern_t *)&al_hreq.pos;
501 error = xfs_attr_list(XFS_I(inode), kbuf, al_hreq.buflen,
502 al_hreq.flags, cursor);
506 if (copy_to_user(al_hreq.buffer, kbuf, al_hreq.buflen))
518 xfs_attrmulti_attr_get(
528 if (*len > XATTR_SIZE_MAX)
530 kbuf = kmalloc(*len, GFP_KERNEL);
534 error = xfs_attr_get(XFS_I(inode), name, kbuf, len, flags, NULL);
538 if (copy_to_user(ubuf, kbuf, *len))
547 xfs_attrmulti_attr_set(
550 const char __user *ubuf,
557 if (IS_RDONLY(inode))
559 if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
561 if (len > XATTR_SIZE_MAX)
564 kbuf = kmalloc(len, GFP_KERNEL);
568 if (copy_from_user(kbuf, ubuf, len))
571 error = xfs_attr_set(XFS_I(inode), name, kbuf, len, flags);
579 xfs_attrmulti_attr_remove(
584 if (IS_RDONLY(inode))
586 if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
588 return xfs_attr_remove(XFS_I(inode), name, flags);
592 xfs_attrmulti_by_handle(
595 struct inode *parinode)
598 xfs_attr_multiop_t *ops;
599 xfs_fsop_attrmulti_handlereq_t am_hreq;
602 unsigned int i, size;
605 if (!capable(CAP_SYS_ADMIN))
606 return -XFS_ERROR(EPERM);
607 if (copy_from_user(&am_hreq, arg, sizeof(xfs_fsop_attrmulti_handlereq_t)))
608 return -XFS_ERROR(EFAULT);
610 error = xfs_vget_fsop_handlereq(mp, parinode, &am_hreq.hreq, &vp, &inode);
615 size = am_hreq.opcount * sizeof(attr_multiop_t);
616 if (!size || size > 16 * PAGE_SIZE)
620 ops = kmalloc(size, GFP_KERNEL);
625 if (copy_from_user(ops, am_hreq.ops, size))
628 attr_name = kmalloc(MAXNAMELEN, GFP_KERNEL);
634 for (i = 0; i < am_hreq.opcount; i++) {
635 ops[i].am_error = strncpy_from_user(attr_name,
636 ops[i].am_attrname, MAXNAMELEN);
637 if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN)
639 if (ops[i].am_error < 0)
642 switch (ops[i].am_opcode) {
644 ops[i].am_error = xfs_attrmulti_attr_get(inode,
645 attr_name, ops[i].am_attrvalue,
646 &ops[i].am_length, ops[i].am_flags);
649 ops[i].am_error = xfs_attrmulti_attr_set(inode,
650 attr_name, ops[i].am_attrvalue,
651 ops[i].am_length, ops[i].am_flags);
654 ops[i].am_error = xfs_attrmulti_attr_remove(inode,
655 attr_name, ops[i].am_flags);
658 ops[i].am_error = EINVAL;
662 if (copy_to_user(am_hreq.ops, ops, size))
663 error = XFS_ERROR(EFAULT);
674 /* prototypes for a few of the stack-hungry cases that have
675 * their own functions. Functions are defined after their use
676 * so gcc doesn't get fancy and inline them with -03 */
680 struct xfs_inode *ip,
694 xfs_ioc_fsgeometry_v1(
713 struct xfs_inode *ip,
720 struct xfs_inode *ip,
731 struct inode *inode = filp->f_path.dentry->d_inode;
732 bhv_vnode_t *vp = vn_from_inode(inode);
733 xfs_mount_t *mp = ip->i_mount;
736 vn_trace_entry(XFS_I(inode), "xfs_ioctl", (inst_t *)__return_address);
740 case XFS_IOC_ALLOCSP:
743 case XFS_IOC_UNRESVSP:
744 case XFS_IOC_ALLOCSP64:
745 case XFS_IOC_FREESP64:
746 case XFS_IOC_RESVSP64:
747 case XFS_IOC_UNRESVSP64:
749 * Only allow the sys admin to reserve space unless
750 * unwritten extents are enabled.
752 if (!XFS_SB_VERSION_HASEXTFLGBIT(&mp->m_sb) &&
753 !capable(CAP_SYS_ADMIN))
756 return xfs_ioc_space(ip, inode, filp, ioflags, cmd, arg);
758 case XFS_IOC_DIOINFO: {
760 xfs_buftarg_t *target =
761 (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
762 mp->m_rtdev_targp : mp->m_ddev_targp;
764 da.d_mem = da.d_miniosz = 1 << target->bt_sshift;
765 da.d_maxiosz = INT_MAX & ~(da.d_miniosz - 1);
767 if (copy_to_user(arg, &da, sizeof(da)))
768 return -XFS_ERROR(EFAULT);
772 case XFS_IOC_FSBULKSTAT_SINGLE:
773 case XFS_IOC_FSBULKSTAT:
774 case XFS_IOC_FSINUMBERS:
775 return xfs_ioc_bulkstat(mp, cmd, arg);
777 case XFS_IOC_FSGEOMETRY_V1:
778 return xfs_ioc_fsgeometry_v1(mp, arg);
780 case XFS_IOC_FSGEOMETRY:
781 return xfs_ioc_fsgeometry(mp, arg);
783 case XFS_IOC_GETVERSION:
784 return put_user(inode->i_generation, (int __user *)arg);
786 case XFS_IOC_GETXFLAGS:
787 case XFS_IOC_SETXFLAGS:
788 case XFS_IOC_FSGETXATTR:
789 case XFS_IOC_FSSETXATTR:
790 case XFS_IOC_FSGETXATTRA:
791 return xfs_ioc_xattr(vp, ip, filp, cmd, arg);
793 case XFS_IOC_FSSETDM: {
794 struct fsdmidata dmi;
796 if (copy_from_user(&dmi, arg, sizeof(dmi)))
797 return -XFS_ERROR(EFAULT);
799 error = xfs_set_dmattrs(ip, dmi.fsd_dmevmask,
804 case XFS_IOC_GETBMAP:
805 case XFS_IOC_GETBMAPA:
806 return xfs_ioc_getbmap(ip, ioflags, cmd, arg);
808 case XFS_IOC_GETBMAPX:
809 return xfs_ioc_getbmapx(ip, arg);
811 case XFS_IOC_FD_TO_HANDLE:
812 case XFS_IOC_PATH_TO_HANDLE:
813 case XFS_IOC_PATH_TO_FSHANDLE:
814 return xfs_find_handle(cmd, arg);
816 case XFS_IOC_OPEN_BY_HANDLE:
817 return xfs_open_by_handle(mp, arg, filp, inode);
819 case XFS_IOC_FSSETDM_BY_HANDLE:
820 return xfs_fssetdm_by_handle(mp, arg, inode);
822 case XFS_IOC_READLINK_BY_HANDLE:
823 return xfs_readlink_by_handle(mp, arg, inode);
825 case XFS_IOC_ATTRLIST_BY_HANDLE:
826 return xfs_attrlist_by_handle(mp, arg, inode);
828 case XFS_IOC_ATTRMULTI_BY_HANDLE:
829 return xfs_attrmulti_by_handle(mp, arg, inode);
831 case XFS_IOC_SWAPEXT: {
832 error = xfs_swapext((struct xfs_swapext __user *)arg);
836 case XFS_IOC_FSCOUNTS: {
837 xfs_fsop_counts_t out;
839 error = xfs_fs_counts(mp, &out);
843 if (copy_to_user(arg, &out, sizeof(out)))
844 return -XFS_ERROR(EFAULT);
848 case XFS_IOC_SET_RESBLKS: {
849 xfs_fsop_resblks_t inout;
852 if (!capable(CAP_SYS_ADMIN))
855 if (copy_from_user(&inout, arg, sizeof(inout)))
856 return -XFS_ERROR(EFAULT);
858 /* input parameter is passed in resblks field of structure */
860 error = xfs_reserve_blocks(mp, &in, &inout);
864 if (copy_to_user(arg, &inout, sizeof(inout)))
865 return -XFS_ERROR(EFAULT);
869 case XFS_IOC_GET_RESBLKS: {
870 xfs_fsop_resblks_t out;
872 if (!capable(CAP_SYS_ADMIN))
875 error = xfs_reserve_blocks(mp, NULL, &out);
879 if (copy_to_user(arg, &out, sizeof(out)))
880 return -XFS_ERROR(EFAULT);
885 case XFS_IOC_FSGROWFSDATA: {
886 xfs_growfs_data_t in;
888 if (!capable(CAP_SYS_ADMIN))
891 if (copy_from_user(&in, arg, sizeof(in)))
892 return -XFS_ERROR(EFAULT);
894 error = xfs_growfs_data(mp, &in);
898 case XFS_IOC_FSGROWFSLOG: {
901 if (!capable(CAP_SYS_ADMIN))
904 if (copy_from_user(&in, arg, sizeof(in)))
905 return -XFS_ERROR(EFAULT);
907 error = xfs_growfs_log(mp, &in);
911 case XFS_IOC_FSGROWFSRT: {
914 if (!capable(CAP_SYS_ADMIN))
917 if (copy_from_user(&in, arg, sizeof(in)))
918 return -XFS_ERROR(EFAULT);
920 error = xfs_growfs_rt(mp, &in);
925 if (!capable(CAP_SYS_ADMIN))
928 if (inode->i_sb->s_frozen == SB_UNFROZEN)
929 freeze_bdev(inode->i_sb->s_bdev);
933 if (!capable(CAP_SYS_ADMIN))
935 if (inode->i_sb->s_frozen != SB_UNFROZEN)
936 thaw_bdev(inode->i_sb->s_bdev, inode->i_sb);
939 case XFS_IOC_GOINGDOWN: {
942 if (!capable(CAP_SYS_ADMIN))
945 if (get_user(in, (__uint32_t __user *)arg))
946 return -XFS_ERROR(EFAULT);
948 error = xfs_fs_goingdown(mp, in);
952 case XFS_IOC_ERROR_INJECTION: {
953 xfs_error_injection_t in;
955 if (!capable(CAP_SYS_ADMIN))
958 if (copy_from_user(&in, arg, sizeof(in)))
959 return -XFS_ERROR(EFAULT);
961 error = xfs_errortag_add(in.errtag, mp);
965 case XFS_IOC_ERROR_CLEARALL:
966 if (!capable(CAP_SYS_ADMIN))
969 error = xfs_errortag_clearall(mp);
979 struct xfs_inode *ip,
990 if (inode->i_flags & (S_IMMUTABLE|S_APPEND))
991 return -XFS_ERROR(EPERM);
993 if (!(filp->f_mode & FMODE_WRITE))
994 return -XFS_ERROR(EBADF);
996 if (!S_ISREG(inode->i_mode))
997 return -XFS_ERROR(EINVAL);
999 if (copy_from_user(&bf, arg, sizeof(bf)))
1000 return -XFS_ERROR(EFAULT);
1002 if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
1003 attr_flags |= ATTR_NONBLOCK;
1004 if (ioflags & IO_INVIS)
1005 attr_flags |= ATTR_DMI;
1007 error = xfs_change_file_space(ip, cmd, &bf, filp->f_pos,
1018 xfs_fsop_bulkreq_t bulkreq;
1019 int count; /* # of records returned */
1020 xfs_ino_t inlast; /* last inode number */
1024 /* done = 1 if there are more stats to get and if bulkstat */
1025 /* should be called again (unused here, but used in dmapi) */
1027 if (!capable(CAP_SYS_ADMIN))
1030 if (XFS_FORCED_SHUTDOWN(mp))
1031 return -XFS_ERROR(EIO);
1033 if (copy_from_user(&bulkreq, arg, sizeof(xfs_fsop_bulkreq_t)))
1034 return -XFS_ERROR(EFAULT);
1036 if (copy_from_user(&inlast, bulkreq.lastip, sizeof(__s64)))
1037 return -XFS_ERROR(EFAULT);
1039 if ((count = bulkreq.icount) <= 0)
1040 return -XFS_ERROR(EINVAL);
1042 if (cmd == XFS_IOC_FSINUMBERS)
1043 error = xfs_inumbers(mp, &inlast, &count,
1044 bulkreq.ubuffer, xfs_inumbers_fmt);
1045 else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE)
1046 error = xfs_bulkstat_single(mp, &inlast,
1047 bulkreq.ubuffer, &done);
1048 else { /* XFS_IOC_FSBULKSTAT */
1049 if (count == 1 && inlast != 0) {
1051 error = xfs_bulkstat_single(mp, &inlast,
1052 bulkreq.ubuffer, &done);
1054 error = xfs_bulkstat(mp, &inlast, &count,
1055 (bulkstat_one_pf)xfs_bulkstat_one, NULL,
1056 sizeof(xfs_bstat_t), bulkreq.ubuffer,
1057 BULKSTAT_FG_QUICK, &done);
1064 if (bulkreq.ocount != NULL) {
1065 if (copy_to_user(bulkreq.lastip, &inlast,
1067 return -XFS_ERROR(EFAULT);
1069 if (copy_to_user(bulkreq.ocount, &count, sizeof(count)))
1070 return -XFS_ERROR(EFAULT);
1077 xfs_ioc_fsgeometry_v1(
1081 xfs_fsop_geom_v1_t fsgeo;
1084 error = xfs_fs_geometry(mp, (xfs_fsop_geom_t *)&fsgeo, 3);
1088 if (copy_to_user(arg, &fsgeo, sizeof(fsgeo)))
1089 return -XFS_ERROR(EFAULT);
1098 xfs_fsop_geom_t fsgeo;
1101 error = xfs_fs_geometry(mp, &fsgeo, 4);
1105 if (copy_to_user(arg, &fsgeo, sizeof(fsgeo)))
1106 return -XFS_ERROR(EFAULT);
1111 * Linux extended inode flags interface.
1115 xfs_merge_ioc_xflags(
1119 unsigned int xflags = start;
1121 if (flags & FS_IMMUTABLE_FL)
1122 xflags |= XFS_XFLAG_IMMUTABLE;
1124 xflags &= ~XFS_XFLAG_IMMUTABLE;
1125 if (flags & FS_APPEND_FL)
1126 xflags |= XFS_XFLAG_APPEND;
1128 xflags &= ~XFS_XFLAG_APPEND;
1129 if (flags & FS_SYNC_FL)
1130 xflags |= XFS_XFLAG_SYNC;
1132 xflags &= ~XFS_XFLAG_SYNC;
1133 if (flags & FS_NOATIME_FL)
1134 xflags |= XFS_XFLAG_NOATIME;
1136 xflags &= ~XFS_XFLAG_NOATIME;
1137 if (flags & FS_NODUMP_FL)
1138 xflags |= XFS_XFLAG_NODUMP;
1140 xflags &= ~XFS_XFLAG_NODUMP;
1147 __uint16_t di_flags)
1149 unsigned int flags = 0;
1151 if (di_flags & XFS_DIFLAG_IMMUTABLE)
1152 flags |= FS_IMMUTABLE_FL;
1153 if (di_flags & XFS_DIFLAG_APPEND)
1154 flags |= FS_APPEND_FL;
1155 if (di_flags & XFS_DIFLAG_SYNC)
1156 flags |= FS_SYNC_FL;
1157 if (di_flags & XFS_DIFLAG_NOATIME)
1158 flags |= FS_NOATIME_FL;
1159 if (di_flags & XFS_DIFLAG_NODUMP)
1160 flags |= FS_NODUMP_FL;
1173 struct bhv_vattr *vattr;
1178 vattr = kmalloc(sizeof(*vattr), GFP_KERNEL);
1179 if (unlikely(!vattr))
1183 case XFS_IOC_FSGETXATTR: {
1184 vattr->va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | \
1185 XFS_AT_NEXTENTS | XFS_AT_PROJID;
1186 error = xfs_getattr(ip, vattr, 0);
1187 if (unlikely(error)) {
1192 fa.fsx_xflags = vattr->va_xflags;
1193 fa.fsx_extsize = vattr->va_extsize;
1194 fa.fsx_nextents = vattr->va_nextents;
1195 fa.fsx_projid = vattr->va_projid;
1197 if (copy_to_user(arg, &fa, sizeof(fa))) {
1204 case XFS_IOC_FSSETXATTR: {
1205 if (copy_from_user(&fa, arg, sizeof(fa))) {
1211 if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
1212 attr_flags |= ATTR_NONBLOCK;
1214 vattr->va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | XFS_AT_PROJID;
1215 vattr->va_xflags = fa.fsx_xflags;
1216 vattr->va_extsize = fa.fsx_extsize;
1217 vattr->va_projid = fa.fsx_projid;
1219 error = xfs_setattr(ip, vattr, attr_flags, NULL);
1221 __vn_revalidate(vp, vattr); /* update flags */
1226 case XFS_IOC_FSGETXATTRA: {
1227 vattr->va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | \
1228 XFS_AT_ANEXTENTS | XFS_AT_PROJID;
1229 error = xfs_getattr(ip, vattr, 0);
1230 if (unlikely(error)) {
1235 fa.fsx_xflags = vattr->va_xflags;
1236 fa.fsx_extsize = vattr->va_extsize;
1237 fa.fsx_nextents = vattr->va_anextents;
1238 fa.fsx_projid = vattr->va_projid;
1240 if (copy_to_user(arg, &fa, sizeof(fa))) {
1247 case XFS_IOC_GETXFLAGS: {
1248 flags = xfs_di2lxflags(ip->i_d.di_flags);
1249 if (copy_to_user(arg, &flags, sizeof(flags)))
1254 case XFS_IOC_SETXFLAGS: {
1255 if (copy_from_user(&flags, arg, sizeof(flags))) {
1260 if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \
1261 FS_NOATIME_FL | FS_NODUMP_FL | \
1263 error = -EOPNOTSUPP;
1268 if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
1269 attr_flags |= ATTR_NONBLOCK;
1271 vattr->va_mask = XFS_AT_XFLAGS;
1272 vattr->va_xflags = xfs_merge_ioc_xflags(flags,
1275 error = xfs_setattr(ip, vattr, attr_flags, NULL);
1277 __vn_revalidate(vp, vattr); /* update flags */
1293 struct xfs_inode *ip,
1302 if (copy_from_user(&bm, arg, sizeof(bm)))
1303 return -XFS_ERROR(EFAULT);
1305 if (bm.bmv_count < 2)
1306 return -XFS_ERROR(EINVAL);
1308 iflags = (cmd == XFS_IOC_GETBMAPA ? BMV_IF_ATTRFORK : 0);
1309 if (ioflags & IO_INVIS)
1310 iflags |= BMV_IF_NO_DMAPI_READ;
1312 error = xfs_getbmap(ip, &bm, (struct getbmap __user *)arg+1, iflags);
1316 if (copy_to_user(arg, &bm, sizeof(bm)))
1317 return -XFS_ERROR(EFAULT);
1323 struct xfs_inode *ip,
1326 struct getbmapx bmx;
1331 if (copy_from_user(&bmx, arg, sizeof(bmx)))
1332 return -XFS_ERROR(EFAULT);
1334 if (bmx.bmv_count < 2)
1335 return -XFS_ERROR(EINVAL);
1338 * Map input getbmapx structure to a getbmap
1339 * structure for xfs_getbmap.
1341 GETBMAP_CONVERT(bmx, bm);
1343 iflags = bmx.bmv_iflags;
1345 if (iflags & (~BMV_IF_VALID))
1346 return -XFS_ERROR(EINVAL);
1348 iflags |= BMV_IF_EXTENDED;
1350 error = xfs_getbmap(ip, &bm, (struct getbmapx __user *)arg+1, iflags);
1354 GETBMAP_CONVERT(bm, bmx);
1356 if (copy_to_user(arg, &bmx, sizeof(bmx)))
1357 return -XFS_ERROR(EFAULT);