2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
26 #include "xfs_trans.h"
27 #include "xfs_dmapi.h"
28 #include "xfs_mount.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_alloc_btree.h"
31 #include "xfs_ialloc_btree.h"
32 #include "xfs_alloc.h"
33 #include "xfs_btree.h"
34 #include "xfs_attr_sf.h"
35 #include "xfs_dir_sf.h"
36 #include "xfs_dir2_sf.h"
37 #include "xfs_dinode.h"
38 #include "xfs_inode.h"
39 #include "xfs_error.h"
41 #include "xfs_ioctl32.h"
43 #include <linux/dcache.h>
44 #include <linux/smp_lock.h>
46 static struct vm_operations_struct xfs_file_vm_ops;
47 #ifdef CONFIG_XFS_DMAPI
48 static struct vm_operations_struct xfs_dmapi_file_vm_ops;
59 struct iovec iov = {buf, count};
60 struct file *file = iocb->ki_filp;
61 vnode_t *vp = vn_from_inode(file->f_dentry->d_inode);
64 BUG_ON(iocb->ki_pos != pos);
66 if (unlikely(file->f_flags & O_DIRECT))
67 ioflags |= IO_ISDIRECT;
68 VOP_READ(vp, iocb, &iov, 1, &iocb->ki_pos, ioflags, NULL, rval);
80 return __xfs_file_read(iocb, buf, IO_ISAIO, count, pos);
84 xfs_file_aio_read_invis(
90 return __xfs_file_read(iocb, buf, IO_ISAIO|IO_INVIS, count, pos);
97 const char __user *buf,
102 struct iovec iov = {(void __user *)buf, count};
103 struct file *file = iocb->ki_filp;
104 struct inode *inode = file->f_mapping->host;
105 vnode_t *vp = vn_from_inode(inode);
108 BUG_ON(iocb->ki_pos != pos);
109 if (unlikely(file->f_flags & O_DIRECT))
110 ioflags |= IO_ISDIRECT;
112 VOP_WRITE(vp, iocb, &iov, 1, &iocb->ki_pos, ioflags, NULL, rval);
120 const char __user *buf,
124 return __xfs_file_write(iocb, buf, IO_ISAIO, count, pos);
128 xfs_file_aio_write_invis(
130 const char __user *buf,
134 return __xfs_file_write(iocb, buf, IO_ISAIO|IO_INVIS, count, pos);
138 STATIC inline ssize_t
141 const struct iovec *iov,
143 unsigned long nr_segs,
146 struct inode *inode = file->f_mapping->host;
147 vnode_t *vp = vn_from_inode(inode);
151 init_sync_kiocb(&kiocb, file);
152 kiocb.ki_pos = *ppos;
154 if (unlikely(file->f_flags & O_DIRECT))
155 ioflags |= IO_ISDIRECT;
156 VOP_READ(vp, &kiocb, iov, nr_segs, &kiocb.ki_pos, ioflags, NULL, rval);
158 *ppos = kiocb.ki_pos;
165 const struct iovec *iov,
166 unsigned long nr_segs,
169 return __xfs_file_readv(file, iov, 0, nr_segs, ppos);
173 xfs_file_readv_invis(
175 const struct iovec *iov,
176 unsigned long nr_segs,
179 return __xfs_file_readv(file, iov, IO_INVIS, nr_segs, ppos);
183 STATIC inline ssize_t
186 const struct iovec *iov,
188 unsigned long nr_segs,
191 struct inode *inode = file->f_mapping->host;
192 vnode_t *vp = vn_from_inode(inode);
196 init_sync_kiocb(&kiocb, file);
197 kiocb.ki_pos = *ppos;
198 if (unlikely(file->f_flags & O_DIRECT))
199 ioflags |= IO_ISDIRECT;
201 VOP_WRITE(vp, &kiocb, iov, nr_segs, &kiocb.ki_pos, ioflags, NULL, rval);
203 *ppos = kiocb.ki_pos;
211 const struct iovec *iov,
212 unsigned long nr_segs,
215 return __xfs_file_writev(file, iov, 0, nr_segs, ppos);
219 xfs_file_writev_invis(
221 const struct iovec *iov,
222 unsigned long nr_segs,
225 return __xfs_file_writev(file, iov, IO_INVIS, nr_segs, ppos);
236 vnode_t *vp = vn_from_inode(filp->f_dentry->d_inode);
239 VOP_SENDFILE(vp, filp, ppos, 0, count, actor, target, NULL, rval);
249 vnode_t *vp = vn_from_inode(inode);
252 if (!(filp->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
256 VOP_OPEN(vp, NULL, error);
266 vnode_t *vp = vn_from_inode(inode);
270 VOP_RELEASE(vp, error);
278 struct dentry *dentry,
281 struct inode *inode = dentry->d_inode;
282 vnode_t *vp = vn_from_inode(inode);
284 int flags = FSYNC_WAIT;
290 VOP_FSYNC(vp, flags, NULL, (xfs_off_t)0, (xfs_off_t)-1, error);
295 * xfs_file_readdir maps to VOP_READDIR().
296 * We need to build a uio, cred, ...
299 #define nextdp(dp) ((struct xfs_dirent *)((char *)(dp) + (dp)->d_reclen))
301 #ifdef CONFIG_XFS_DMAPI
305 struct vm_area_struct *area,
306 unsigned long address,
309 struct inode *inode = area->vm_file->f_dentry->d_inode;
310 vnode_t *vp = vn_from_inode(inode);
311 xfs_mount_t *mp = XFS_VFSTOM(vp->v_vfsp);
314 ASSERT_ALWAYS(vp->v_vfsp->vfs_flag & VFS_DMI);
316 error = XFS_SEND_MMAP(mp, area, 0);
320 return filemap_nopage(area, address, type);
323 #endif /* CONFIG_XFS_DMAPI */
338 int namelen, size = 0;
339 size_t rlen = PAGE_CACHE_SIZE;
340 xfs_off_t start_offset, curr_offset;
341 xfs_dirent_t *dbp = NULL;
343 vp = vn_from_inode(filp->f_dentry->d_inode);
346 /* Try fairly hard to get memory */
348 if ((read_buf = (caddr_t)kmalloc(rlen, GFP_KERNEL)))
351 } while (rlen >= 1024);
353 if (read_buf == NULL)
357 uio.uio_segflg = UIO_SYSSPACE;
358 curr_offset = filp->f_pos;
359 if (filp->f_pos != 0x7fffffff)
360 uio.uio_offset = filp->f_pos;
362 uio.uio_offset = 0xffffffff;
365 uio.uio_resid = iov.iov_len = rlen;
366 iov.iov_base = read_buf;
369 start_offset = uio.uio_offset;
371 VOP_READDIR(vp, &uio, NULL, &eof, error);
372 if ((uio.uio_offset == start_offset) || error) {
377 size = rlen - uio.uio_resid;
378 dbp = (xfs_dirent_t *)read_buf;
380 namelen = strlen(dbp->d_name);
382 if (filldir(dirent, dbp->d_name, namelen,
383 (loff_t) curr_offset & 0x7fffffff,
388 size -= dbp->d_reclen;
389 curr_offset = (loff_t)dbp->d_off /* & 0x7fffffff */;
396 filp->f_pos = uio.uio_offset & 0x7fffffff;
398 filp->f_pos = curr_offset;
409 struct vm_area_struct *vma)
411 struct inode *ip = filp->f_dentry->d_inode;
412 vnode_t *vp = vn_from_inode(ip);
416 vma->vm_ops = &xfs_file_vm_ops;
418 #ifdef CONFIG_XFS_DMAPI
419 if (vp->v_vfsp->vfs_flag & VFS_DMI) {
420 vma->vm_ops = &xfs_dmapi_file_vm_ops;
422 #endif /* CONFIG_XFS_DMAPI */
424 vattr.va_mask = XFS_AT_UPDATIME;
425 VOP_SETATTR(vp, &vattr, XFS_AT_UPDATIME, NULL, error);
427 __vn_revalidate(vp, &vattr); /* update flags */
439 struct inode *inode = filp->f_dentry->d_inode;
440 vnode_t *vp = vn_from_inode(inode);
442 VOP_IOCTL(vp, inode, filp, 0, cmd, (void __user *)arg, error);
445 /* NOTE: some of the ioctl's return positive #'s as a
446 * byte count indicating success, such as
447 * readlink_by_handle. So we don't "sign flip"
448 * like most other routines. This means true
449 * errors need to be returned as a negative value.
455 xfs_file_ioctl_invis(
461 struct inode *inode = filp->f_dentry->d_inode;
462 vnode_t *vp = vn_from_inode(inode);
465 VOP_IOCTL(vp, inode, filp, IO_INVIS, cmd, (void __user *)arg, error);
468 /* NOTE: some of the ioctl's return positive #'s as a
469 * byte count indicating success, such as
470 * readlink_by_handle. So we don't "sign flip"
471 * like most other routines. This means true
472 * errors need to be returned as a negative value.
477 #ifdef CONFIG_XFS_DMAPI
478 #ifdef HAVE_VMOP_MPROTECT
481 struct vm_area_struct *vma,
482 unsigned int newflags)
484 vnode_t *vp = vn_from_inode(vma->vm_file->f_dentry->d_inode);
487 if (vp->v_vfsp->vfs_flag & VFS_DMI) {
488 if ((vma->vm_flags & VM_MAYSHARE) &&
489 (newflags & VM_WRITE) && !(vma->vm_flags & VM_WRITE)) {
490 xfs_mount_t *mp = XFS_VFSTOM(vp->v_vfsp);
492 error = XFS_SEND_MMAP(mp, vma, VM_WRITE);
497 #endif /* HAVE_VMOP_MPROTECT */
498 #endif /* CONFIG_XFS_DMAPI */
500 #ifdef HAVE_FOP_OPEN_EXEC
501 /* If the user is attempting to execute a file that is offline then
502 * we have to trigger a DMAPI READ event before the file is marked as busy
503 * otherwise the invisible I/O will not be able to write to the file to bring
510 vnode_t *vp = vn_from_inode(inode);
511 xfs_mount_t *mp = XFS_VFSTOM(vp->v_vfsp);
515 if (vp->v_vfsp->vfs_flag & VFS_DMI) {
521 if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ)) {
522 error = -XFS_SEND_DATA(mp, DM_EVENT_READ, vp,
529 #endif /* HAVE_FOP_OPEN_EXEC */
531 struct file_operations xfs_file_operations = {
532 .llseek = generic_file_llseek,
533 .read = do_sync_read,
534 .write = do_sync_write,
535 .readv = xfs_file_readv,
536 .writev = xfs_file_writev,
537 .aio_read = xfs_file_aio_read,
538 .aio_write = xfs_file_aio_write,
539 .sendfile = xfs_file_sendfile,
540 .unlocked_ioctl = xfs_file_ioctl,
542 .compat_ioctl = xfs_file_compat_ioctl,
544 .mmap = xfs_file_mmap,
545 .open = xfs_file_open,
546 .release = xfs_file_release,
547 .fsync = xfs_file_fsync,
548 #ifdef HAVE_FOP_OPEN_EXEC
549 .open_exec = xfs_file_open_exec,
553 struct file_operations xfs_invis_file_operations = {
554 .llseek = generic_file_llseek,
555 .read = do_sync_read,
556 .write = do_sync_write,
557 .readv = xfs_file_readv_invis,
558 .writev = xfs_file_writev_invis,
559 .aio_read = xfs_file_aio_read_invis,
560 .aio_write = xfs_file_aio_write_invis,
561 .sendfile = xfs_file_sendfile,
562 .unlocked_ioctl = xfs_file_ioctl_invis,
564 .compat_ioctl = xfs_file_compat_invis_ioctl,
566 .mmap = xfs_file_mmap,
567 .open = xfs_file_open,
568 .release = xfs_file_release,
569 .fsync = xfs_file_fsync,
573 struct file_operations xfs_dir_file_operations = {
574 .read = generic_read_dir,
575 .readdir = xfs_file_readdir,
576 .unlocked_ioctl = xfs_file_ioctl,
578 .compat_ioctl = xfs_file_compat_ioctl,
580 .fsync = xfs_file_fsync,
583 static struct vm_operations_struct xfs_file_vm_ops = {
584 .nopage = filemap_nopage,
585 .populate = filemap_populate,
588 #ifdef CONFIG_XFS_DMAPI
589 static struct vm_operations_struct xfs_dmapi_file_vm_ops = {
590 .nopage = xfs_vm_nopage,
591 .populate = filemap_populate,
592 #ifdef HAVE_VMOP_MPROTECT
593 .mprotect = xfs_vm_mprotect,
596 #endif /* CONFIG_XFS_DMAPI */