2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
25 #include "xfs_trans.h"
26 #include "xfs_dmapi.h"
27 #include "xfs_mount.h"
28 #include "xfs_bmap_btree.h"
29 #include "xfs_alloc_btree.h"
30 #include "xfs_ialloc_btree.h"
31 #include "xfs_alloc.h"
32 #include "xfs_btree.h"
33 #include "xfs_attr_sf.h"
34 #include "xfs_dir2_sf.h"
35 #include "xfs_dinode.h"
36 #include "xfs_inode.h"
37 #include "xfs_error.h"
39 #include "xfs_ioctl32.h"
40 #include "xfs_vnodeops.h"
42 #include <linux/dcache.h>
43 #include <linux/smp_lock.h>
45 static struct vm_operations_struct xfs_file_vm_ops;
46 #ifdef CONFIG_XFS_DMAPI
47 static struct vm_operations_struct xfs_dmapi_file_vm_ops;
53 const struct iovec *iov,
54 unsigned long nr_segs,
58 struct file *file = iocb->ki_filp;
60 BUG_ON(iocb->ki_pos != pos);
61 if (unlikely(file->f_flags & O_DIRECT))
62 ioflags |= IO_ISDIRECT;
63 return xfs_read(XFS_I(file->f_path.dentry->d_inode), iocb, iov,
64 nr_segs, &iocb->ki_pos, ioflags);
70 const struct iovec *iov,
71 unsigned long nr_segs,
74 return __xfs_file_read(iocb, iov, nr_segs, IO_ISAIO, pos);
78 xfs_file_aio_read_invis(
80 const struct iovec *iov,
81 unsigned long nr_segs,
84 return __xfs_file_read(iocb, iov, nr_segs, IO_ISAIO|IO_INVIS, pos);
90 const struct iovec *iov,
91 unsigned long nr_segs,
95 struct file *file = iocb->ki_filp;
97 BUG_ON(iocb->ki_pos != pos);
98 if (unlikely(file->f_flags & O_DIRECT))
99 ioflags |= IO_ISDIRECT;
100 return xfs_write(XFS_I(file->f_mapping->host), iocb, iov, nr_segs,
101 &iocb->ki_pos, ioflags);
107 const struct iovec *iov,
108 unsigned long nr_segs,
111 return __xfs_file_write(iocb, iov, nr_segs, IO_ISAIO, pos);
115 xfs_file_aio_write_invis(
117 const struct iovec *iov,
118 unsigned long nr_segs,
121 return __xfs_file_write(iocb, iov, nr_segs, IO_ISAIO|IO_INVIS, pos);
125 xfs_file_splice_read(
128 struct pipe_inode_info *pipe,
132 return xfs_splice_read(XFS_I(infilp->f_path.dentry->d_inode),
133 infilp, ppos, pipe, len, flags, 0);
137 xfs_file_splice_read_invis(
140 struct pipe_inode_info *pipe,
144 return xfs_splice_read(XFS_I(infilp->f_path.dentry->d_inode),
145 infilp, ppos, pipe, len, flags, IO_INVIS);
149 xfs_file_splice_write(
150 struct pipe_inode_info *pipe,
151 struct file *outfilp,
156 return xfs_splice_write(XFS_I(outfilp->f_path.dentry->d_inode),
157 pipe, outfilp, ppos, len, flags, 0);
161 xfs_file_splice_write_invis(
162 struct pipe_inode_info *pipe,
163 struct file *outfilp,
168 return xfs_splice_write(XFS_I(outfilp->f_path.dentry->d_inode),
169 pipe, outfilp, ppos, len, flags, IO_INVIS);
177 if (!(filp->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
179 return -xfs_open(XFS_I(inode));
187 return -xfs_release(XFS_I(inode));
193 struct dentry *dentry,
196 int flags = FSYNC_WAIT;
200 xfs_iflags_clear(XFS_I(dentry->d_inode), XFS_ITRUNCATED);
201 return -xfs_fsync(XFS_I(dentry->d_inode), flags,
202 (xfs_off_t)0, (xfs_off_t)-1);
205 #ifdef CONFIG_XFS_DMAPI
208 struct vm_area_struct *vma,
209 struct vm_fault *vmf)
211 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
212 bhv_vnode_t *vp = vn_from_inode(inode);
214 ASSERT_ALWAYS(vp->v_vfsp->vfs_flag & VFS_DMI);
215 if (XFS_SEND_MMAP(XFS_VFSTOM(vp->v_vfsp), vma, 0))
216 return VM_FAULT_SIGBUS;
217 return filemap_fault(vma, vmf);
219 #endif /* CONFIG_XFS_DMAPI */
222 * Unfortunately we can't just use the clean and simple readdir implementation
223 * below, because nfs might call back into ->lookup from the filldir callback
224 * and that will deadlock the low-level btree code.
226 * Hopefully we'll find a better workaround that allows to use the optimal
227 * version at least for local readdirs for 2.6.25.
236 struct inode *inode = filp->f_path.dentry->d_inode;
237 xfs_inode_t *ip = XFS_I(inode);
242 * The Linux API doesn't pass down the total size of the buffer
243 * we read into down to the filesystem. With the filldir concept
244 * it's not needed for correct information, but the XFS dir2 leaf
245 * code wants an estimate of the buffer size to calculate it's
246 * readahead window and size the buffers used for mapping to
249 * Try to give it an estimate that's good enough, maybe at some
250 * point we can change the ->readdir prototype to include the
253 bufsize = (size_t)min_t(loff_t, PAGE_SIZE, inode->i_size);
255 error = xfs_readdir(ip, dirent, bufsize,
256 (xfs_off_t *)&filp->f_pos, filldir);
271 struct hack_callback {
286 struct hack_callback *buf = __buf;
287 struct hack_dirent *de = (struct hack_dirent *)(buf->dirent + buf->used);
290 reclen = ALIGN(sizeof(struct hack_dirent) + namlen, sizeof(u64));
291 if (buf->used + reclen > buf->len)
298 memcpy(de->name, name, namlen);
309 struct inode *inode = filp->f_path.dentry->d_inode;
310 xfs_inode_t *ip = XFS_I(inode);
311 struct hack_callback buf;
312 struct hack_dirent *de;
316 xfs_off_t start_offset, curr_offset, offset;
319 * Try fairly hard to get memory
321 buf.len = PAGE_CACHE_SIZE;
323 buf.dirent = kmalloc(buf.len, GFP_KERNEL);
327 } while (buf.len >= 1024);
332 curr_offset = filp->f_pos;
333 if (curr_offset == 0x7fffffff)
336 offset = filp->f_pos;
341 start_offset = offset;
344 error = -xfs_readdir(ip, &buf, buf.len, &offset,
346 if (error || offset == start_offset) {
352 de = (struct hack_dirent *)buf.dirent;
353 curr_offset = de->offset /* & 0x7fffffff */;
355 if (filldir(dirent, de->name, de->namlen,
356 curr_offset & 0x7fffffff,
357 de->ino, de->d_type)) {
361 reclen = ALIGN(sizeof(struct hack_dirent) + de->namlen,
364 de = (struct hack_dirent *)((char *)de + reclen);
365 curr_offset = de->offset /* & 0x7fffffff */;
372 filp->f_pos = offset & 0x7fffffff;
374 filp->f_pos = curr_offset;
385 struct vm_area_struct *vma)
387 vma->vm_ops = &xfs_file_vm_ops;
388 vma->vm_flags |= VM_CAN_NONLINEAR;
390 #ifdef CONFIG_XFS_DMAPI
391 if (XFS_M(filp->f_path.dentry->d_inode->i_sb)->m_flags & XFS_MOUNT_DMAPI)
392 vma->vm_ops = &xfs_dmapi_file_vm_ops;
393 #endif /* CONFIG_XFS_DMAPI */
406 struct inode *inode = filp->f_path.dentry->d_inode;
408 error = xfs_ioctl(XFS_I(inode), filp, 0, cmd, (void __user *)p);
409 xfs_iflags_set(XFS_I(inode), XFS_IMODIFIED);
411 /* NOTE: some of the ioctl's return positive #'s as a
412 * byte count indicating success, such as
413 * readlink_by_handle. So we don't "sign flip"
414 * like most other routines. This means true
415 * errors need to be returned as a negative value.
421 xfs_file_ioctl_invis(
427 struct inode *inode = filp->f_path.dentry->d_inode;
429 error = xfs_ioctl(XFS_I(inode), filp, IO_INVIS, cmd, (void __user *)p);
430 xfs_iflags_set(XFS_I(inode), XFS_IMODIFIED);
432 /* NOTE: some of the ioctl's return positive #'s as a
433 * byte count indicating success, such as
434 * readlink_by_handle. So we don't "sign flip"
435 * like most other routines. This means true
436 * errors need to be returned as a negative value.
441 #ifdef CONFIG_XFS_DMAPI
442 #ifdef HAVE_VMOP_MPROTECT
445 struct vm_area_struct *vma,
446 unsigned int newflags)
448 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
449 struct xfs_mount *mp = XFS_M(inode->i_sb);
452 if (mp->m_flags & XFS_MOUNT_DMAPI) {
453 if ((vma->vm_flags & VM_MAYSHARE) &&
454 (newflags & VM_WRITE) && !(vma->vm_flags & VM_WRITE))
455 error = XFS_SEND_MMAP(mp, vma, VM_WRITE);
459 #endif /* HAVE_VMOP_MPROTECT */
460 #endif /* CONFIG_XFS_DMAPI */
462 #ifdef HAVE_FOP_OPEN_EXEC
463 /* If the user is attempting to execute a file that is offline then
464 * we have to trigger a DMAPI READ event before the file is marked as busy
465 * otherwise the invisible I/O will not be able to write to the file to bring
472 struct xfs_mount *mp = XFS_M(inode->i_sb);
474 if (unlikely(mp->m_flags & XFS_MOUNT_DMAPI)) {
475 if (DM_EVENT_ENABLED(XFS_I(inode), DM_EVENT_READ)) {
476 bhv_vnode_t *vp = vn_from_inode(inode);
478 return -XFS_SEND_DATA(mp, DM_EVENT_READ,
485 #endif /* HAVE_FOP_OPEN_EXEC */
488 * mmap()d file has taken write protection fault and is being made
489 * writable. We can set the page state up correctly for a writable
490 * page, which means we can do correct delalloc accounting (ENOSPC
491 * checking!) and unwritten extent mapping.
495 struct vm_area_struct *vma,
498 return block_page_mkwrite(vma, page, xfs_get_blocks);
501 const struct file_operations xfs_file_operations = {
502 .llseek = generic_file_llseek,
503 .read = do_sync_read,
504 .write = do_sync_write,
505 .aio_read = xfs_file_aio_read,
506 .aio_write = xfs_file_aio_write,
507 .splice_read = xfs_file_splice_read,
508 .splice_write = xfs_file_splice_write,
509 .unlocked_ioctl = xfs_file_ioctl,
511 .compat_ioctl = xfs_file_compat_ioctl,
513 .mmap = xfs_file_mmap,
514 .open = xfs_file_open,
515 .release = xfs_file_release,
516 .fsync = xfs_file_fsync,
517 #ifdef HAVE_FOP_OPEN_EXEC
518 .open_exec = xfs_file_open_exec,
522 const struct file_operations xfs_invis_file_operations = {
523 .llseek = generic_file_llseek,
524 .read = do_sync_read,
525 .write = do_sync_write,
526 .aio_read = xfs_file_aio_read_invis,
527 .aio_write = xfs_file_aio_write_invis,
528 .splice_read = xfs_file_splice_read_invis,
529 .splice_write = xfs_file_splice_write_invis,
530 .unlocked_ioctl = xfs_file_ioctl_invis,
532 .compat_ioctl = xfs_file_compat_invis_ioctl,
534 .mmap = xfs_file_mmap,
535 .open = xfs_file_open,
536 .release = xfs_file_release,
537 .fsync = xfs_file_fsync,
541 const struct file_operations xfs_dir_file_operations = {
542 .read = generic_read_dir,
543 .readdir = xfs_file_readdir,
544 .unlocked_ioctl = xfs_file_ioctl,
546 .compat_ioctl = xfs_file_compat_ioctl,
548 .fsync = xfs_file_fsync,
551 static struct vm_operations_struct xfs_file_vm_ops = {
552 .fault = filemap_fault,
553 .page_mkwrite = xfs_vm_page_mkwrite,
556 #ifdef CONFIG_XFS_DMAPI
557 static struct vm_operations_struct xfs_dmapi_file_vm_ops = {
558 .fault = xfs_vm_fault,
559 .page_mkwrite = xfs_vm_page_mkwrite,
560 #ifdef HAVE_VMOP_MPROTECT
561 .mprotect = xfs_vm_mprotect,
564 #endif /* CONFIG_XFS_DMAPI */