2 * Copyright (c) 2004-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #include <linux/compat.h>
19 #include <linux/ioctl.h>
20 #include <linux/mount.h>
21 #include <asm/uaccess.h>
27 #include "xfs_trans.h"
31 #include "xfs_dmapi.h"
32 #include "xfs_mount.h"
33 #include "xfs_bmap_btree.h"
34 #include "xfs_attr_sf.h"
35 #include "xfs_dir2_sf.h"
36 #include "xfs_vnode.h"
37 #include "xfs_dinode.h"
38 #include "xfs_inode.h"
39 #include "xfs_itable.h"
40 #include "xfs_error.h"
41 #include "xfs_dfrag.h"
42 #include "xfs_vnodeops.h"
43 #include "xfs_fsops.h"
44 #include "xfs_alloc.h"
45 #include "xfs_rtalloc.h"
47 #include "xfs_ioctl.h"
48 #include "xfs_ioctl32.h"
50 #define _NATIVE_IOC(cmd, type) \
51 _IOC(_IOC_DIR(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd), sizeof(type))
53 #ifdef BROKEN_X86_ALIGNMENT
55 xfs_compat_flock64_copyin(
57 compat_xfs_flock64_t __user *arg32)
59 if (get_user(bf->l_type, &arg32->l_type) ||
60 get_user(bf->l_whence, &arg32->l_whence) ||
61 get_user(bf->l_start, &arg32->l_start) ||
62 get_user(bf->l_len, &arg32->l_len) ||
63 get_user(bf->l_sysid, &arg32->l_sysid) ||
64 get_user(bf->l_pid, &arg32->l_pid) ||
65 copy_from_user(bf->l_pad, &arg32->l_pad, 4*sizeof(u32)))
66 return -XFS_ERROR(EFAULT);
71 xfs_compat_ioc_fsgeometry_v1(
73 compat_xfs_fsop_geom_v1_t __user *arg32)
75 xfs_fsop_geom_t fsgeo;
78 error = xfs_fs_geometry(mp, &fsgeo, 3);
81 /* The 32-bit variant simply has some padding at the end */
82 if (copy_to_user(arg32, &fsgeo, sizeof(struct compat_xfs_fsop_geom_v1)))
83 return -XFS_ERROR(EFAULT);
88 xfs_compat_growfs_data_copyin(
89 struct xfs_growfs_data *in,
90 compat_xfs_growfs_data_t __user *arg32)
92 if (get_user(in->newblocks, &arg32->newblocks) ||
93 get_user(in->imaxpct, &arg32->imaxpct))
94 return -XFS_ERROR(EFAULT);
99 xfs_compat_growfs_rt_copyin(
100 struct xfs_growfs_rt *in,
101 compat_xfs_growfs_rt_t __user *arg32)
103 if (get_user(in->newblocks, &arg32->newblocks) ||
104 get_user(in->extsize, &arg32->extsize))
105 return -XFS_ERROR(EFAULT);
110 xfs_inumbers_fmt_compat(
111 void __user *ubuffer,
112 const xfs_inogrp_t *buffer,
116 compat_xfs_inogrp_t __user *p32 = ubuffer;
119 for (i = 0; i < count; i++) {
120 if (put_user(buffer[i].xi_startino, &p32[i].xi_startino) ||
121 put_user(buffer[i].xi_alloccount, &p32[i].xi_alloccount) ||
122 put_user(buffer[i].xi_allocmask, &p32[i].xi_allocmask))
123 return -XFS_ERROR(EFAULT);
125 *written = count * sizeof(*p32);
130 #define xfs_inumbers_fmt_compat xfs_inumbers_fmt
131 #endif /* BROKEN_X86_ALIGNMENT */
134 xfs_ioctl32_bstime_copyin(
135 xfs_bstime_t *bstime,
136 compat_xfs_bstime_t __user *bstime32)
138 compat_time_t sec32; /* tv_sec differs on 64 vs. 32 */
140 if (get_user(sec32, &bstime32->tv_sec) ||
141 get_user(bstime->tv_nsec, &bstime32->tv_nsec))
142 return -XFS_ERROR(EFAULT);
143 bstime->tv_sec = sec32;
147 /* xfs_bstat_t has differing alignment on intel, & bstime_t sizes everywhere */
149 xfs_ioctl32_bstat_copyin(
151 compat_xfs_bstat_t __user *bstat32)
153 if (get_user(bstat->bs_ino, &bstat32->bs_ino) ||
154 get_user(bstat->bs_mode, &bstat32->bs_mode) ||
155 get_user(bstat->bs_nlink, &bstat32->bs_nlink) ||
156 get_user(bstat->bs_uid, &bstat32->bs_uid) ||
157 get_user(bstat->bs_gid, &bstat32->bs_gid) ||
158 get_user(bstat->bs_rdev, &bstat32->bs_rdev) ||
159 get_user(bstat->bs_blksize, &bstat32->bs_blksize) ||
160 get_user(bstat->bs_size, &bstat32->bs_size) ||
161 xfs_ioctl32_bstime_copyin(&bstat->bs_atime, &bstat32->bs_atime) ||
162 xfs_ioctl32_bstime_copyin(&bstat->bs_mtime, &bstat32->bs_mtime) ||
163 xfs_ioctl32_bstime_copyin(&bstat->bs_ctime, &bstat32->bs_ctime) ||
164 get_user(bstat->bs_blocks, &bstat32->bs_size) ||
165 get_user(bstat->bs_xflags, &bstat32->bs_size) ||
166 get_user(bstat->bs_extsize, &bstat32->bs_extsize) ||
167 get_user(bstat->bs_extents, &bstat32->bs_extents) ||
168 get_user(bstat->bs_gen, &bstat32->bs_gen) ||
169 get_user(bstat->bs_projid, &bstat32->bs_projid) ||
170 get_user(bstat->bs_dmevmask, &bstat32->bs_dmevmask) ||
171 get_user(bstat->bs_dmstate, &bstat32->bs_dmstate) ||
172 get_user(bstat->bs_aextents, &bstat32->bs_aextents))
173 return -XFS_ERROR(EFAULT);
177 /* XFS_IOC_FSBULKSTAT and friends */
180 xfs_bstime_store_compat(
181 compat_xfs_bstime_t __user *p32,
182 const xfs_bstime_t *p)
187 if (put_user(sec32, &p32->tv_sec) ||
188 put_user(p->tv_nsec, &p32->tv_nsec))
189 return -XFS_ERROR(EFAULT);
193 /* Return 0 on success or positive error (to xfs_bulkstat()) */
195 xfs_bulkstat_one_fmt_compat(
196 void __user *ubuffer,
199 const xfs_bstat_t *buffer)
201 compat_xfs_bstat_t __user *p32 = ubuffer;
203 if (ubsize < sizeof(*p32))
204 return XFS_ERROR(ENOMEM);
206 if (put_user(buffer->bs_ino, &p32->bs_ino) ||
207 put_user(buffer->bs_mode, &p32->bs_mode) ||
208 put_user(buffer->bs_nlink, &p32->bs_nlink) ||
209 put_user(buffer->bs_uid, &p32->bs_uid) ||
210 put_user(buffer->bs_gid, &p32->bs_gid) ||
211 put_user(buffer->bs_rdev, &p32->bs_rdev) ||
212 put_user(buffer->bs_blksize, &p32->bs_blksize) ||
213 put_user(buffer->bs_size, &p32->bs_size) ||
214 xfs_bstime_store_compat(&p32->bs_atime, &buffer->bs_atime) ||
215 xfs_bstime_store_compat(&p32->bs_mtime, &buffer->bs_mtime) ||
216 xfs_bstime_store_compat(&p32->bs_ctime, &buffer->bs_ctime) ||
217 put_user(buffer->bs_blocks, &p32->bs_blocks) ||
218 put_user(buffer->bs_xflags, &p32->bs_xflags) ||
219 put_user(buffer->bs_extsize, &p32->bs_extsize) ||
220 put_user(buffer->bs_extents, &p32->bs_extents) ||
221 put_user(buffer->bs_gen, &p32->bs_gen) ||
222 put_user(buffer->bs_projid, &p32->bs_projid) ||
223 put_user(buffer->bs_dmevmask, &p32->bs_dmevmask) ||
224 put_user(buffer->bs_dmstate, &p32->bs_dmstate) ||
225 put_user(buffer->bs_aextents, &p32->bs_aextents))
226 return XFS_ERROR(EFAULT);
228 *ubused = sizeof(*p32);
233 xfs_bulkstat_one_compat(
234 xfs_mount_t *mp, /* mount point for filesystem */
235 xfs_ino_t ino, /* inode number to get data for */
236 void __user *buffer, /* buffer to place output in */
237 int ubsize, /* size of buffer */
238 void *private_data, /* my private data */
239 xfs_daddr_t bno, /* starting bno of inode cluster */
240 int *ubused, /* bytes used by me */
241 void *dibuff, /* on-disk inode buffer */
242 int *stat) /* BULKSTAT_RV_... */
244 return xfs_bulkstat_one_int(mp, ino, buffer, ubsize,
245 xfs_bulkstat_one_fmt_compat, bno,
246 ubused, dibuff, stat);
249 /* copied from xfs_ioctl.c */
251 xfs_compat_ioc_bulkstat(
254 compat_xfs_fsop_bulkreq_t __user *p32)
257 xfs_fsop_bulkreq_t bulkreq;
258 int count; /* # of records returned */
259 xfs_ino_t inlast; /* last inode number */
263 /* done = 1 if there are more stats to get and if bulkstat */
264 /* should be called again (unused here, but used in dmapi) */
266 if (!capable(CAP_SYS_ADMIN))
267 return -XFS_ERROR(EPERM);
269 if (XFS_FORCED_SHUTDOWN(mp))
270 return -XFS_ERROR(EIO);
272 if (get_user(addr, &p32->lastip))
273 return -XFS_ERROR(EFAULT);
274 bulkreq.lastip = compat_ptr(addr);
275 if (get_user(bulkreq.icount, &p32->icount) ||
276 get_user(addr, &p32->ubuffer))
277 return -XFS_ERROR(EFAULT);
278 bulkreq.ubuffer = compat_ptr(addr);
279 if (get_user(addr, &p32->ocount))
280 return -XFS_ERROR(EFAULT);
281 bulkreq.ocount = compat_ptr(addr);
283 if (copy_from_user(&inlast, bulkreq.lastip, sizeof(__s64)))
284 return -XFS_ERROR(EFAULT);
286 if ((count = bulkreq.icount) <= 0)
287 return -XFS_ERROR(EINVAL);
289 if (bulkreq.ubuffer == NULL)
290 return -XFS_ERROR(EINVAL);
292 if (cmd == XFS_IOC_FSINUMBERS_32) {
293 error = xfs_inumbers(mp, &inlast, &count,
294 bulkreq.ubuffer, xfs_inumbers_fmt_compat);
295 } else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE_32) {
298 error = xfs_bulkstat_one_compat(mp, inlast, bulkreq.ubuffer,
299 sizeof(compat_xfs_bstat_t),
300 NULL, 0, NULL, NULL, &res);
301 } else if (cmd == XFS_IOC_FSBULKSTAT_32) {
302 error = xfs_bulkstat(mp, &inlast, &count,
303 xfs_bulkstat_one_compat, NULL,
304 sizeof(compat_xfs_bstat_t), bulkreq.ubuffer,
305 BULKSTAT_FG_QUICK, &done);
307 error = XFS_ERROR(EINVAL);
311 if (bulkreq.ocount != NULL) {
312 if (copy_to_user(bulkreq.lastip, &inlast,
314 return -XFS_ERROR(EFAULT);
316 if (copy_to_user(bulkreq.ocount, &count, sizeof(count)))
317 return -XFS_ERROR(EFAULT);
324 xfs_compat_handlereq_copyin(
325 xfs_fsop_handlereq_t *hreq,
326 compat_xfs_fsop_handlereq_t __user *arg32)
328 compat_xfs_fsop_handlereq_t hreq32;
330 if (copy_from_user(&hreq32, arg32, sizeof(compat_xfs_fsop_handlereq_t)))
331 return -XFS_ERROR(EFAULT);
333 hreq->fd = hreq32.fd;
334 hreq->path = compat_ptr(hreq32.path);
335 hreq->oflags = hreq32.oflags;
336 hreq->ihandle = compat_ptr(hreq32.ihandle);
337 hreq->ihandlen = hreq32.ihandlen;
338 hreq->ohandle = compat_ptr(hreq32.ohandle);
339 hreq->ohandlen = compat_ptr(hreq32.ohandlen);
344 STATIC struct dentry *
345 xfs_compat_handlereq_to_dentry(
346 struct file *parfilp,
347 compat_xfs_fsop_handlereq_t *hreq)
349 return xfs_handle_to_dentry(parfilp,
350 compat_ptr(hreq->ihandle), hreq->ihandlen);
354 xfs_compat_attrlist_by_handle(
355 struct file *parfilp,
359 attrlist_cursor_kern_t *cursor;
360 compat_xfs_fsop_attrlist_handlereq_t al_hreq;
361 struct dentry *dentry;
364 if (!capable(CAP_SYS_ADMIN))
365 return -XFS_ERROR(EPERM);
366 if (copy_from_user(&al_hreq, arg,
367 sizeof(compat_xfs_fsop_attrlist_handlereq_t)))
368 return -XFS_ERROR(EFAULT);
369 if (al_hreq.buflen > XATTR_LIST_MAX)
370 return -XFS_ERROR(EINVAL);
373 * Reject flags, only allow namespaces.
375 if (al_hreq.flags & ~(ATTR_ROOT | ATTR_SECURE))
376 return -XFS_ERROR(EINVAL);
378 dentry = xfs_compat_handlereq_to_dentry(parfilp, &al_hreq.hreq);
380 return PTR_ERR(dentry);
383 kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
387 cursor = (attrlist_cursor_kern_t *)&al_hreq.pos;
388 error = -xfs_attr_list(XFS_I(dentry->d_inode), kbuf, al_hreq.buflen,
389 al_hreq.flags, cursor);
393 if (copy_to_user(compat_ptr(al_hreq.buffer), kbuf, al_hreq.buflen))
404 xfs_compat_attrmulti_by_handle(
405 struct file *parfilp,
409 compat_xfs_attr_multiop_t *ops;
410 compat_xfs_fsop_attrmulti_handlereq_t am_hreq;
411 struct dentry *dentry;
412 unsigned int i, size;
415 if (!capable(CAP_SYS_ADMIN))
416 return -XFS_ERROR(EPERM);
417 if (copy_from_user(&am_hreq, arg,
418 sizeof(compat_xfs_fsop_attrmulti_handlereq_t)))
419 return -XFS_ERROR(EFAULT);
421 dentry = xfs_compat_handlereq_to_dentry(parfilp, &am_hreq.hreq);
423 return PTR_ERR(dentry);
426 size = am_hreq.opcount * sizeof(compat_xfs_attr_multiop_t);
427 if (!size || size > 16 * PAGE_SIZE)
430 ops = memdup_user(compat_ptr(am_hreq.ops), size);
432 error = PTR_ERR(ops);
436 attr_name = kmalloc(MAXNAMELEN, GFP_KERNEL);
441 for (i = 0; i < am_hreq.opcount; i++) {
442 ops[i].am_error = strncpy_from_user(attr_name,
443 compat_ptr(ops[i].am_attrname),
445 if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN)
447 if (ops[i].am_error < 0)
450 switch (ops[i].am_opcode) {
452 ops[i].am_error = xfs_attrmulti_attr_get(
453 dentry->d_inode, attr_name,
454 compat_ptr(ops[i].am_attrvalue),
455 &ops[i].am_length, ops[i].am_flags);
458 ops[i].am_error = mnt_want_write(parfilp->f_path.mnt);
461 ops[i].am_error = xfs_attrmulti_attr_set(
462 dentry->d_inode, attr_name,
463 compat_ptr(ops[i].am_attrvalue),
464 ops[i].am_length, ops[i].am_flags);
465 mnt_drop_write(parfilp->f_path.mnt);
468 ops[i].am_error = mnt_want_write(parfilp->f_path.mnt);
471 ops[i].am_error = xfs_attrmulti_attr_remove(
472 dentry->d_inode, attr_name,
474 mnt_drop_write(parfilp->f_path.mnt);
477 ops[i].am_error = EINVAL;
481 if (copy_to_user(compat_ptr(am_hreq.ops), ops, size))
482 error = XFS_ERROR(EFAULT);
493 xfs_compat_fssetdm_by_handle(
494 struct file *parfilp,
498 struct fsdmidata fsd;
499 compat_xfs_fsop_setdm_handlereq_t dmhreq;
500 struct dentry *dentry;
502 if (!capable(CAP_MKNOD))
503 return -XFS_ERROR(EPERM);
504 if (copy_from_user(&dmhreq, arg,
505 sizeof(compat_xfs_fsop_setdm_handlereq_t)))
506 return -XFS_ERROR(EFAULT);
508 dentry = xfs_compat_handlereq_to_dentry(parfilp, &dmhreq.hreq);
510 return PTR_ERR(dentry);
512 if (IS_IMMUTABLE(dentry->d_inode) || IS_APPEND(dentry->d_inode)) {
513 error = -XFS_ERROR(EPERM);
517 if (copy_from_user(&fsd, compat_ptr(dmhreq.data), sizeof(fsd))) {
518 error = -XFS_ERROR(EFAULT);
522 error = -xfs_set_dmattrs(XFS_I(dentry->d_inode), fsd.fsd_dmevmask,
531 xfs_file_compat_ioctl(
536 struct inode *inode = filp->f_path.dentry->d_inode;
537 struct xfs_inode *ip = XFS_I(inode);
538 struct xfs_mount *mp = ip->i_mount;
539 void __user *arg = (void __user *)p;
543 if (filp->f_mode & FMODE_NOCMTIME)
546 xfs_itrace_entry(ip);
549 /* No size or alignment issues on any arch */
550 case XFS_IOC_DIOINFO:
551 case XFS_IOC_FSGEOMETRY:
552 case XFS_IOC_FSGETXATTR:
553 case XFS_IOC_FSSETXATTR:
554 case XFS_IOC_FSGETXATTRA:
555 case XFS_IOC_FSSETDM:
556 case XFS_IOC_GETBMAP:
557 case XFS_IOC_GETBMAPA:
558 case XFS_IOC_GETBMAPX:
559 case XFS_IOC_FSCOUNTS:
560 case XFS_IOC_SET_RESBLKS:
561 case XFS_IOC_GET_RESBLKS:
562 case XFS_IOC_FSGROWFSLOG:
563 case XFS_IOC_GOINGDOWN:
564 case XFS_IOC_ERROR_INJECTION:
565 case XFS_IOC_ERROR_CLEARALL:
566 return xfs_file_ioctl(filp, cmd, p);
567 #ifndef BROKEN_X86_ALIGNMENT
568 /* These are handled fine if no alignment issues */
569 case XFS_IOC_ALLOCSP:
572 case XFS_IOC_UNRESVSP:
573 case XFS_IOC_ALLOCSP64:
574 case XFS_IOC_FREESP64:
575 case XFS_IOC_RESVSP64:
576 case XFS_IOC_UNRESVSP64:
577 case XFS_IOC_FSGEOMETRY_V1:
578 case XFS_IOC_FSGROWFSDATA:
579 case XFS_IOC_FSGROWFSRT:
580 return xfs_file_ioctl(filp, cmd, p);
582 case XFS_IOC_ALLOCSP_32:
583 case XFS_IOC_FREESP_32:
584 case XFS_IOC_ALLOCSP64_32:
585 case XFS_IOC_FREESP64_32:
586 case XFS_IOC_RESVSP_32:
587 case XFS_IOC_UNRESVSP_32:
588 case XFS_IOC_RESVSP64_32:
589 case XFS_IOC_UNRESVSP64_32: {
590 struct xfs_flock64 bf;
592 if (xfs_compat_flock64_copyin(&bf, arg))
593 return -XFS_ERROR(EFAULT);
594 cmd = _NATIVE_IOC(cmd, struct xfs_flock64);
595 return xfs_ioc_space(ip, inode, filp, ioflags, cmd, &bf);
597 case XFS_IOC_FSGEOMETRY_V1_32:
598 return xfs_compat_ioc_fsgeometry_v1(mp, arg);
599 case XFS_IOC_FSGROWFSDATA_32: {
600 struct xfs_growfs_data in;
602 if (xfs_compat_growfs_data_copyin(&in, arg))
603 return -XFS_ERROR(EFAULT);
604 error = xfs_growfs_data(mp, &in);
607 case XFS_IOC_FSGROWFSRT_32: {
608 struct xfs_growfs_rt in;
610 if (xfs_compat_growfs_rt_copyin(&in, arg))
611 return -XFS_ERROR(EFAULT);
612 error = xfs_growfs_rt(mp, &in);
616 /* long changes size, but xfs only copiese out 32 bits */
617 case XFS_IOC_GETXFLAGS_32:
618 case XFS_IOC_SETXFLAGS_32:
619 case XFS_IOC_GETVERSION_32:
620 cmd = _NATIVE_IOC(cmd, long);
621 return xfs_file_ioctl(filp, cmd, p);
622 case XFS_IOC_SWAPEXT: {
623 struct xfs_swapext sxp;
624 struct compat_xfs_swapext __user *sxu = arg;
626 /* Bulk copy in up to the sx_stat field, then copy bstat */
627 if (copy_from_user(&sxp, sxu,
628 offsetof(struct xfs_swapext, sx_stat)) ||
629 xfs_ioctl32_bstat_copyin(&sxp.sx_stat, &sxu->sx_stat))
630 return -XFS_ERROR(EFAULT);
631 error = xfs_swapext(&sxp);
634 case XFS_IOC_FSBULKSTAT_32:
635 case XFS_IOC_FSBULKSTAT_SINGLE_32:
636 case XFS_IOC_FSINUMBERS_32:
637 return xfs_compat_ioc_bulkstat(mp, cmd, arg);
638 case XFS_IOC_FD_TO_HANDLE_32:
639 case XFS_IOC_PATH_TO_HANDLE_32:
640 case XFS_IOC_PATH_TO_FSHANDLE_32: {
641 struct xfs_fsop_handlereq hreq;
643 if (xfs_compat_handlereq_copyin(&hreq, arg))
644 return -XFS_ERROR(EFAULT);
645 cmd = _NATIVE_IOC(cmd, struct xfs_fsop_handlereq);
646 return xfs_find_handle(cmd, &hreq);
648 case XFS_IOC_OPEN_BY_HANDLE_32: {
649 struct xfs_fsop_handlereq hreq;
651 if (xfs_compat_handlereq_copyin(&hreq, arg))
652 return -XFS_ERROR(EFAULT);
653 return xfs_open_by_handle(filp, &hreq);
655 case XFS_IOC_READLINK_BY_HANDLE_32: {
656 struct xfs_fsop_handlereq hreq;
658 if (xfs_compat_handlereq_copyin(&hreq, arg))
659 return -XFS_ERROR(EFAULT);
660 return xfs_readlink_by_handle(filp, &hreq);
662 case XFS_IOC_ATTRLIST_BY_HANDLE_32:
663 return xfs_compat_attrlist_by_handle(filp, arg);
664 case XFS_IOC_ATTRMULTI_BY_HANDLE_32:
665 return xfs_compat_attrmulti_by_handle(filp, arg);
666 case XFS_IOC_FSSETDM_BY_HANDLE_32:
667 return xfs_compat_fssetdm_by_handle(filp, arg);
669 return -XFS_ERROR(ENOIOCTLCMD);