2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
26 #include "xfs_trans.h"
27 #include "xfs_dmapi.h"
28 #include "xfs_mount.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_alloc_btree.h"
31 #include "xfs_ialloc_btree.h"
32 #include "xfs_alloc.h"
33 #include "xfs_btree.h"
34 #include "xfs_attr_sf.h"
35 #include "xfs_dir_sf.h"
36 #include "xfs_dir2_sf.h"
37 #include "xfs_dinode.h"
38 #include "xfs_inode.h"
39 #include "xfs_error.h"
41 #include "xfs_ioctl32.h"
43 #include <linux/dcache.h>
44 #include <linux/smp_lock.h>
46 static struct vm_operations_struct xfs_file_vm_ops;
47 #ifdef CONFIG_XFS_DMAPI
48 static struct vm_operations_struct xfs_dmapi_file_vm_ops;
59 struct iovec iov = {buf, count};
60 struct file *file = iocb->ki_filp;
61 vnode_t *vp = vn_from_inode(file->f_dentry->d_inode);
64 BUG_ON(iocb->ki_pos != pos);
66 if (unlikely(file->f_flags & O_DIRECT))
67 ioflags |= IO_ISDIRECT;
68 VOP_READ(vp, iocb, &iov, 1, &iocb->ki_pos, ioflags, NULL, rval);
79 return __xfs_file_read(iocb, buf, IO_ISAIO, count, pos);
83 xfs_file_aio_read_invis(
89 return __xfs_file_read(iocb, buf, IO_ISAIO|IO_INVIS, count, pos);
95 const char __user *buf,
100 struct iovec iov = {(void __user *)buf, count};
101 struct file *file = iocb->ki_filp;
102 struct inode *inode = file->f_mapping->host;
103 vnode_t *vp = vn_from_inode(inode);
106 BUG_ON(iocb->ki_pos != pos);
107 if (unlikely(file->f_flags & O_DIRECT))
108 ioflags |= IO_ISDIRECT;
110 VOP_WRITE(vp, iocb, &iov, 1, &iocb->ki_pos, ioflags, NULL, rval);
117 const char __user *buf,
121 return __xfs_file_write(iocb, buf, IO_ISAIO, count, pos);
125 xfs_file_aio_write_invis(
127 const char __user *buf,
131 return __xfs_file_write(iocb, buf, IO_ISAIO|IO_INVIS, count, pos);
134 STATIC inline ssize_t
137 const struct iovec *iov,
139 unsigned long nr_segs,
142 struct inode *inode = file->f_mapping->host;
143 vnode_t *vp = vn_from_inode(inode);
147 init_sync_kiocb(&kiocb, file);
148 kiocb.ki_pos = *ppos;
150 if (unlikely(file->f_flags & O_DIRECT))
151 ioflags |= IO_ISDIRECT;
152 VOP_READ(vp, &kiocb, iov, nr_segs, &kiocb.ki_pos, ioflags, NULL, rval);
154 *ppos = kiocb.ki_pos;
161 const struct iovec *iov,
162 unsigned long nr_segs,
165 return __xfs_file_readv(file, iov, 0, nr_segs, ppos);
169 xfs_file_readv_invis(
171 const struct iovec *iov,
172 unsigned long nr_segs,
175 return __xfs_file_readv(file, iov, IO_INVIS, nr_segs, ppos);
178 STATIC inline ssize_t
181 const struct iovec *iov,
183 unsigned long nr_segs,
186 struct inode *inode = file->f_mapping->host;
187 vnode_t *vp = vn_from_inode(inode);
191 init_sync_kiocb(&kiocb, file);
192 kiocb.ki_pos = *ppos;
193 if (unlikely(file->f_flags & O_DIRECT))
194 ioflags |= IO_ISDIRECT;
196 VOP_WRITE(vp, &kiocb, iov, nr_segs, &kiocb.ki_pos, ioflags, NULL, rval);
198 *ppos = kiocb.ki_pos;
205 const struct iovec *iov,
206 unsigned long nr_segs,
209 return __xfs_file_writev(file, iov, 0, nr_segs, ppos);
213 xfs_file_writev_invis(
215 const struct iovec *iov,
216 unsigned long nr_segs,
219 return __xfs_file_writev(file, iov, IO_INVIS, nr_segs, ppos);
230 vnode_t *vp = vn_from_inode(filp->f_dentry->d_inode);
233 VOP_SENDFILE(vp, filp, pos, 0, count, actor, target, NULL, rval);
238 xfs_file_sendfile_invis(
245 vnode_t *vp = vn_from_inode(filp->f_dentry->d_inode);
248 VOP_SENDFILE(vp, filp, pos, IO_INVIS, count, actor, target, NULL, rval);
253 xfs_file_splice_read(
259 vnode_t *vp = vn_from_inode(infilp->f_dentry->d_inode);
262 VOP_SPLICE_READ(vp, infilp, pipe, len, flags, 0, NULL, rval);
267 xfs_file_splice_read_invis(
273 vnode_t *vp = vn_from_inode(infilp->f_dentry->d_inode);
276 VOP_SPLICE_READ(vp, infilp, pipe, len, flags, IO_INVIS, NULL, rval);
281 xfs_file_splice_write(
283 struct file *outfilp,
287 vnode_t *vp = vn_from_inode(outfilp->f_dentry->d_inode);
290 VOP_SPLICE_WRITE(vp, pipe, outfilp, len, flags, 0, NULL, rval);
295 xfs_file_splice_write_invis(
297 struct file *outfilp,
301 vnode_t *vp = vn_from_inode(outfilp->f_dentry->d_inode);
304 VOP_SPLICE_WRITE(vp, pipe, outfilp, len, flags, IO_INVIS, NULL, rval);
313 vnode_t *vp = vn_from_inode(inode);
316 if (!(filp->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
318 VOP_OPEN(vp, NULL, error);
327 vnode_t *vp = vn_from_inode(inode);
331 VOP_RELEASE(vp, error);
338 struct dentry *dentry,
341 struct inode *inode = dentry->d_inode;
342 vnode_t *vp = vn_from_inode(inode);
344 int flags = FSYNC_WAIT;
348 VOP_FSYNC(vp, flags, NULL, (xfs_off_t)0, (xfs_off_t)-1, error);
352 #ifdef CONFIG_XFS_DMAPI
355 struct vm_area_struct *area,
356 unsigned long address,
359 struct inode *inode = area->vm_file->f_dentry->d_inode;
360 vnode_t *vp = vn_from_inode(inode);
361 xfs_mount_t *mp = XFS_VFSTOM(vp->v_vfsp);
364 ASSERT_ALWAYS(vp->v_vfsp->vfs_flag & VFS_DMI);
366 error = XFS_SEND_MMAP(mp, area, 0);
370 return filemap_nopage(area, address, type);
372 #endif /* CONFIG_XFS_DMAPI */
381 vnode_t *vp = vn_from_inode(filp->f_dentry->d_inode);
386 int namelen, size = 0;
387 size_t rlen = PAGE_CACHE_SIZE;
388 xfs_off_t start_offset, curr_offset;
389 xfs_dirent_t *dbp = NULL;
391 /* Try fairly hard to get memory */
393 if ((read_buf = (caddr_t)kmalloc(rlen, GFP_KERNEL)))
396 } while (rlen >= 1024);
398 if (read_buf == NULL)
402 uio.uio_segflg = UIO_SYSSPACE;
403 curr_offset = filp->f_pos;
404 if (filp->f_pos != 0x7fffffff)
405 uio.uio_offset = filp->f_pos;
407 uio.uio_offset = 0xffffffff;
410 uio.uio_resid = iov.iov_len = rlen;
411 iov.iov_base = read_buf;
414 start_offset = uio.uio_offset;
416 VOP_READDIR(vp, &uio, NULL, &eof, error);
417 if ((uio.uio_offset == start_offset) || error) {
422 size = rlen - uio.uio_resid;
423 dbp = (xfs_dirent_t *)read_buf;
425 namelen = strlen(dbp->d_name);
427 if (filldir(dirent, dbp->d_name, namelen,
428 (loff_t) curr_offset & 0x7fffffff,
433 size -= dbp->d_reclen;
434 curr_offset = (loff_t)dbp->d_off /* & 0x7fffffff */;
435 dbp = (xfs_dirent_t *)((char *)dbp + dbp->d_reclen);
441 filp->f_pos = uio.uio_offset & 0x7fffffff;
443 filp->f_pos = curr_offset;
453 struct vm_area_struct *vma)
455 struct inode *ip = filp->f_dentry->d_inode;
456 vnode_t *vp = vn_from_inode(ip);
460 vma->vm_ops = &xfs_file_vm_ops;
462 #ifdef CONFIG_XFS_DMAPI
463 if (vp->v_vfsp->vfs_flag & VFS_DMI) {
464 vma->vm_ops = &xfs_dmapi_file_vm_ops;
466 #endif /* CONFIG_XFS_DMAPI */
468 vattr.va_mask = XFS_AT_UPDATIME;
469 VOP_SETATTR(vp, &vattr, XFS_AT_UPDATIME, NULL, error);
471 __vn_revalidate(vp, &vattr); /* update flags */
483 struct inode *inode = filp->f_dentry->d_inode;
484 vnode_t *vp = vn_from_inode(inode);
486 VOP_IOCTL(vp, inode, filp, 0, cmd, (void __user *)arg, error);
489 /* NOTE: some of the ioctl's return positive #'s as a
490 * byte count indicating success, such as
491 * readlink_by_handle. So we don't "sign flip"
492 * like most other routines. This means true
493 * errors need to be returned as a negative value.
499 xfs_file_ioctl_invis(
504 struct inode *inode = filp->f_dentry->d_inode;
505 vnode_t *vp = vn_from_inode(inode);
508 VOP_IOCTL(vp, inode, filp, IO_INVIS, cmd, (void __user *)arg, error);
511 /* NOTE: some of the ioctl's return positive #'s as a
512 * byte count indicating success, such as
513 * readlink_by_handle. So we don't "sign flip"
514 * like most other routines. This means true
515 * errors need to be returned as a negative value.
520 #ifdef CONFIG_XFS_DMAPI
521 #ifdef HAVE_VMOP_MPROTECT
524 struct vm_area_struct *vma,
525 unsigned int newflags)
527 vnode_t *vp = vn_from_inode(vma->vm_file->f_dentry->d_inode);
530 if (vp->v_vfsp->vfs_flag & VFS_DMI) {
531 if ((vma->vm_flags & VM_MAYSHARE) &&
532 (newflags & VM_WRITE) && !(vma->vm_flags & VM_WRITE)) {
533 xfs_mount_t *mp = XFS_VFSTOM(vp->v_vfsp);
535 error = XFS_SEND_MMAP(mp, vma, VM_WRITE);
540 #endif /* HAVE_VMOP_MPROTECT */
541 #endif /* CONFIG_XFS_DMAPI */
543 #ifdef HAVE_FOP_OPEN_EXEC
544 /* If the user is attempting to execute a file that is offline then
545 * we have to trigger a DMAPI READ event before the file is marked as busy
546 * otherwise the invisible I/O will not be able to write to the file to bring
553 vnode_t *vp = vn_from_inode(inode);
554 xfs_mount_t *mp = XFS_VFSTOM(vp->v_vfsp);
558 if (vp->v_vfsp->vfs_flag & VFS_DMI) {
564 if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ)) {
565 error = -XFS_SEND_DATA(mp, DM_EVENT_READ, vp,
572 #endif /* HAVE_FOP_OPEN_EXEC */
574 const struct file_operations xfs_file_operations = {
575 .llseek = generic_file_llseek,
576 .read = do_sync_read,
577 .write = do_sync_write,
578 .readv = xfs_file_readv,
579 .writev = xfs_file_writev,
580 .aio_read = xfs_file_aio_read,
581 .aio_write = xfs_file_aio_write,
582 .sendfile = xfs_file_sendfile,
583 .splice_read = xfs_file_splice_read,
584 .splice_write = xfs_file_splice_write,
585 .unlocked_ioctl = xfs_file_ioctl,
587 .compat_ioctl = xfs_file_compat_ioctl,
589 .mmap = xfs_file_mmap,
590 .open = xfs_file_open,
591 .release = xfs_file_release,
592 .fsync = xfs_file_fsync,
593 #ifdef HAVE_FOP_OPEN_EXEC
594 .open_exec = xfs_file_open_exec,
598 const struct file_operations xfs_invis_file_operations = {
599 .llseek = generic_file_llseek,
600 .read = do_sync_read,
601 .write = do_sync_write,
602 .readv = xfs_file_readv_invis,
603 .writev = xfs_file_writev_invis,
604 .aio_read = xfs_file_aio_read_invis,
605 .aio_write = xfs_file_aio_write_invis,
606 .sendfile = xfs_file_sendfile_invis,
607 .splice_read = xfs_file_splice_read_invis,
608 .splice_write = xfs_file_splice_write_invis,
609 .unlocked_ioctl = xfs_file_ioctl_invis,
611 .compat_ioctl = xfs_file_compat_invis_ioctl,
613 .mmap = xfs_file_mmap,
614 .open = xfs_file_open,
615 .release = xfs_file_release,
616 .fsync = xfs_file_fsync,
620 const struct file_operations xfs_dir_file_operations = {
621 .read = generic_read_dir,
622 .readdir = xfs_file_readdir,
623 .unlocked_ioctl = xfs_file_ioctl,
625 .compat_ioctl = xfs_file_compat_ioctl,
627 .fsync = xfs_file_fsync,
630 static struct vm_operations_struct xfs_file_vm_ops = {
631 .nopage = filemap_nopage,
632 .populate = filemap_populate,
635 #ifdef CONFIG_XFS_DMAPI
636 static struct vm_operations_struct xfs_dmapi_file_vm_ops = {
637 .nopage = xfs_vm_nopage,
638 .populate = filemap_populate,
639 #ifdef HAVE_VMOP_MPROTECT
640 .mprotect = xfs_vm_mprotect,
643 #endif /* CONFIG_XFS_DMAPI */