2 * linux/fs/nfs/direct.c
4 * Copyright (C) 2003 by Chuck Lever <cel@netapp.com>
6 * High-performance uncached I/O for the Linux NFS client
8 * There are important applications whose performance or correctness
9 * depends on uncached access to file data. Database clusters
10 * (multiple copies of the same instance running on separate hosts)
11 * implement their own cache coherency protocol that subsumes file
12 * system cache protocols. Applications that process datasets
13 * considerably larger than the client's memory do not always benefit
14 * from a local cache. A streaming video server, for instance, has no
15 * need to cache the contents of a file.
17 * When an application requests uncached I/O, all read and write requests
18 * are made directly to the server; data stored or fetched via these
19 * requests is not cached in the Linux page cache. The client does not
20 * correct unaligned requests from applications. All requested bytes are
21 * held on permanent storage before a direct write system call returns to
24 * Solaris implements an uncached I/O facility called directio() that
25 * is used for backups and sequential I/O to very large files. Solaris
26 * also supports uncaching whole NFS partitions with "-o forcedirectio,"
27 * an undocumented mount option.
29 * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with
30 * help from Andrew Morton.
32 * 18 Dec 2001 Initial implementation for 2.4 --cel
33 * 08 Jul 2002 Version for 2.4.19, with bug fixes --trondmy
34 * 08 Jun 2003 Port to 2.5 APIs --cel
35 * 31 Mar 2004 Handle direct I/O without VFS support --cel
36 * 15 Sep 2004 Parallel async reads --cel
40 #include <linux/config.h>
41 #include <linux/errno.h>
42 #include <linux/sched.h>
43 #include <linux/kernel.h>
44 #include <linux/smp_lock.h>
45 #include <linux/file.h>
46 #include <linux/pagemap.h>
47 #include <linux/kref.h>
49 #include <linux/nfs_fs.h>
50 #include <linux/nfs_page.h>
51 #include <linux/sunrpc/clnt.h>
53 #include <asm/system.h>
54 #include <asm/uaccess.h>
55 #include <asm/atomic.h>
59 #define NFSDBG_FACILITY NFSDBG_VFS
60 #define MAX_DIRECTIO_SIZE (4096UL << PAGE_SHIFT)
62 static void nfs_free_user_pages(struct page **pages, int npages, int do_dirty);
63 static kmem_cache_t *nfs_direct_cachep;
66 * This represents a set of asynchronous requests that we're waiting on
68 struct nfs_direct_req {
69 struct kref kref; /* release manager */
70 struct list_head list; /* nfs_read_data structs */
71 wait_queue_head_t wait; /* wait for i/o completion */
72 struct inode * inode; /* target file of I/O */
73 struct page ** pages; /* pages in our buffer */
74 unsigned int npages; /* count of pages */
75 atomic_t complete, /* i/os we're waiting for */
76 count, /* bytes actually processed */
77 error; /* any reported error */
82 * nfs_get_user_pages - find and set up pages underlying user's buffer
83 * rw: direction (read or write)
84 * user_addr: starting address of this segment of user's buffer
85 * count: size of this segment
86 * @pages: returned array of page struct pointers underlying user's buffer
89 nfs_get_user_pages(int rw, unsigned long user_addr, size_t size,
93 unsigned long page_count;
96 /* set an arbitrary limit to prevent type overflow */
97 /* XXX: this can probably be as large as INT_MAX */
98 if (size > MAX_DIRECTIO_SIZE) {
103 page_count = (user_addr + size + PAGE_SIZE - 1) >> PAGE_SHIFT;
104 page_count -= user_addr >> PAGE_SHIFT;
106 array_size = (page_count * sizeof(struct page *));
107 *pages = kmalloc(array_size, GFP_KERNEL);
109 down_read(¤t->mm->mmap_sem);
110 result = get_user_pages(current, current->mm, user_addr,
111 page_count, (rw == READ), 0,
113 up_read(¤t->mm->mmap_sem);
115 * If we got fewer pages than expected from get_user_pages(),
116 * the user buffer runs off the end of a mapping; return EFAULT.
118 if (result >= 0 && result < page_count) {
119 nfs_free_user_pages(*pages, result, 0);
128 * nfs_free_user_pages - tear down page struct array
129 * @pages: array of page struct pointers underlying target buffer
130 * @npages: number of pages in the array
131 * @do_dirty: dirty the pages as we release them
134 nfs_free_user_pages(struct page **pages, int npages, int do_dirty)
137 for (i = 0; i < npages; i++) {
138 struct page *page = pages[i];
139 if (do_dirty && !PageCompound(page))
140 set_page_dirty_lock(page);
141 page_cache_release(page);
147 * nfs_direct_req_release - release nfs_direct_req structure for direct read
148 * @kref: kref object embedded in an nfs_direct_req structure
151 static void nfs_direct_req_release(struct kref *kref)
153 struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
154 kmem_cache_free(nfs_direct_cachep, dreq);
158 * nfs_direct_read_alloc - allocate nfs_read_data structures for direct read
159 * @count: count of bytes for the read request
160 * @rsize: local rsize setting
162 * Note we also set the number of requests we have in the dreq when we are
163 * done. This prevents races with I/O completion so we will always wait
164 * until all requests have been dispatched and completed.
166 static struct nfs_direct_req *nfs_direct_read_alloc(size_t nbytes, unsigned int rsize)
168 struct list_head *list;
169 struct nfs_direct_req *dreq;
170 unsigned int reads = 0;
171 unsigned int rpages = (rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
173 dreq = kmem_cache_alloc(nfs_direct_cachep, SLAB_KERNEL);
177 kref_init(&dreq->kref);
178 init_waitqueue_head(&dreq->wait);
179 INIT_LIST_HEAD(&dreq->list);
180 atomic_set(&dreq->count, 0);
181 atomic_set(&dreq->error, 0);
185 struct nfs_read_data *data = nfs_readdata_alloc(rpages);
187 if (unlikely(!data)) {
188 while (!list_empty(list)) {
189 data = list_entry(list->next,
190 struct nfs_read_data, pages);
191 list_del(&data->pages);
192 nfs_readdata_free(data);
194 kref_put(&dreq->kref, nfs_direct_req_release);
198 INIT_LIST_HEAD(&data->pages);
199 list_add(&data->pages, list);
201 data->req = (struct nfs_page *) dreq;
207 kref_get(&dreq->kref);
208 atomic_set(&dreq->complete, reads);
213 * nfs_direct_read_result - handle a read reply for a direct read request
214 * @data: address of NFS READ operation control block
215 * @status: status of this NFS READ operation
217 * We must hold a reference to all the pages in this direct read request
218 * until the RPCs complete. This could be long *after* we are woken up in
219 * nfs_direct_read_wait (for instance, if someone hits ^C on a slow server).
221 static void nfs_direct_read_result(struct rpc_task *task, void *calldata)
223 struct nfs_read_data *data = calldata;
224 struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
226 if (nfs_readpage_result(task, data) != 0)
228 if (likely(task->tk_status >= 0))
229 atomic_add(data->res.count, &dreq->count);
231 atomic_set(&dreq->error, task->tk_status);
233 if (unlikely(atomic_dec_and_test(&dreq->complete))) {
234 nfs_free_user_pages(dreq->pages, dreq->npages, 1);
235 wake_up(&dreq->wait);
236 kref_put(&dreq->kref, nfs_direct_req_release);
240 static const struct rpc_call_ops nfs_read_direct_ops = {
241 .rpc_call_done = nfs_direct_read_result,
242 .rpc_release = nfs_readdata_release,
246 * nfs_direct_read_schedule - dispatch NFS READ operations for a direct read
247 * @dreq: address of nfs_direct_req struct for this request
248 * @inode: target inode
249 * @ctx: target file open context
250 * @user_addr: starting address of this segment of user's buffer
251 * @count: size of this segment
252 * @file_offset: offset in file to begin the operation
254 * For each nfs_read_data struct that was allocated on the list, dispatch
255 * an NFS READ operation
257 static void nfs_direct_read_schedule(struct nfs_direct_req *dreq,
258 struct inode *inode, struct nfs_open_context *ctx,
259 unsigned long user_addr, size_t count, loff_t file_offset)
261 struct list_head *list = &dreq->list;
262 struct page **pages = dreq->pages;
263 unsigned int curpage, pgbase;
264 unsigned int rsize = NFS_SERVER(inode)->rsize;
267 pgbase = user_addr & ~PAGE_MASK;
269 struct nfs_read_data *data;
276 data = list_entry(list->next, struct nfs_read_data, pages);
277 list_del_init(&data->pages);
280 data->cred = ctx->cred;
281 data->args.fh = NFS_FH(inode);
282 data->args.context = ctx;
283 data->args.offset = file_offset;
284 data->args.pgbase = pgbase;
285 data->args.pages = &pages[curpage];
286 data->args.count = bytes;
287 data->res.fattr = &data->fattr;
289 data->res.count = bytes;
291 rpc_init_task(&data->task, NFS_CLIENT(inode), RPC_TASK_ASYNC,
292 &nfs_read_direct_ops, data);
293 NFS_PROTO(inode)->read_setup(data);
295 data->task.tk_cookie = (unsigned long) inode;
298 rpc_execute(&data->task);
301 dfprintk(VFS, "NFS: %4d initiated direct read call (req %s/%Ld, %u bytes @ offset %Lu)\n",
304 (long long)NFS_FILEID(inode),
306 (unsigned long long)data->args.offset);
308 file_offset += bytes;
310 curpage += pgbase >> PAGE_SHIFT;
311 pgbase &= ~PAGE_MASK;
314 } while (count != 0);
318 * nfs_direct_read_wait - wait for I/O completion for direct reads
319 * @dreq: request on which we are to wait
320 * @intr: whether or not this wait can be interrupted
322 * Collects and returns the final error value/byte-count.
324 static ssize_t nfs_direct_read_wait(struct nfs_direct_req *dreq, int intr)
329 result = wait_event_interruptible(dreq->wait,
330 (atomic_read(&dreq->complete) == 0));
332 wait_event(dreq->wait, (atomic_read(&dreq->complete) == 0));
336 result = atomic_read(&dreq->error);
338 result = atomic_read(&dreq->count);
340 kref_put(&dreq->kref, nfs_direct_req_release);
341 return (ssize_t) result;
345 * nfs_direct_read_seg - Read in one iov segment. Generate separate
346 * read RPCs for each "rsize" bytes.
347 * @inode: target inode
348 * @ctx: target file open context
349 * @user_addr: starting address of this segment of user's buffer
350 * @count: size of this segment
351 * @file_offset: offset in file to begin the operation
352 * @pages: array of addresses of page structs defining user's buffer
353 * @nr_pages: number of pages in the array
356 static ssize_t nfs_direct_read_seg(struct inode *inode,
357 struct nfs_open_context *ctx, unsigned long user_addr,
358 size_t count, loff_t file_offset, struct page **pages,
359 unsigned int nr_pages)
363 struct rpc_clnt *clnt = NFS_CLIENT(inode);
364 struct nfs_direct_req *dreq;
366 dreq = nfs_direct_read_alloc(count, NFS_SERVER(inode)->rsize);
371 dreq->npages = nr_pages;
374 nfs_add_stats(inode, NFSIOS_DIRECTREADBYTES, count);
375 rpc_clnt_sigmask(clnt, &oldset);
376 nfs_direct_read_schedule(dreq, inode, ctx, user_addr, count,
378 result = nfs_direct_read_wait(dreq, clnt->cl_intr);
379 rpc_clnt_sigunmask(clnt, &oldset);
385 * nfs_direct_read - For each iov segment, map the user's buffer
386 * then generate read RPCs.
387 * @inode: target inode
388 * @ctx: target file open context
389 * @iov: array of vectors that define I/O buffer
390 * file_offset: offset in file to begin the operation
391 * nr_segs: size of iovec array
393 * We've already pushed out any non-direct writes so that this read
394 * will see them when we read from the server.
397 nfs_direct_read(struct inode *inode, struct nfs_open_context *ctx,
398 const struct iovec *iov, loff_t file_offset,
399 unsigned long nr_segs)
401 ssize_t tot_bytes = 0;
402 unsigned long seg = 0;
404 while ((seg < nr_segs) && (tot_bytes >= 0)) {
408 const struct iovec *vec = &iov[seg++];
409 unsigned long user_addr = (unsigned long) vec->iov_base;
410 size_t size = vec->iov_len;
412 page_count = nfs_get_user_pages(READ, user_addr, size, &pages);
413 if (page_count < 0) {
414 nfs_free_user_pages(pages, 0, 0);
420 result = nfs_direct_read_seg(inode, ctx, user_addr, size,
421 file_offset, pages, page_count);
429 file_offset += result;
438 * nfs_direct_write_seg - Write out one iov segment. Generate separate
439 * write RPCs for each "wsize" bytes, then commit.
440 * @inode: target inode
441 * @ctx: target file open context
442 * user_addr: starting address of this segment of user's buffer
443 * count: size of this segment
444 * file_offset: offset in file to begin the operation
445 * @pages: array of addresses of page structs defining user's buffer
446 * nr_pages: size of pages array
448 static ssize_t nfs_direct_write_seg(struct inode *inode,
449 struct nfs_open_context *ctx, unsigned long user_addr,
450 size_t count, loff_t file_offset, struct page **pages,
453 const unsigned int wsize = NFS_SERVER(inode)->wsize;
455 int curpage, need_commit;
456 ssize_t result, tot_bytes;
457 struct nfs_writeverf first_verf;
458 struct nfs_write_data *wdata;
460 wdata = nfs_writedata_alloc(NFS_SERVER(inode)->wpages);
464 wdata->inode = inode;
465 wdata->cred = ctx->cred;
466 wdata->args.fh = NFS_FH(inode);
467 wdata->args.context = ctx;
468 wdata->args.stable = NFS_UNSTABLE;
469 if (IS_SYNC(inode) || NFS_PROTO(inode)->version == 2 || count <= wsize)
470 wdata->args.stable = NFS_FILE_SYNC;
471 wdata->res.fattr = &wdata->fattr;
472 wdata->res.verf = &wdata->verf;
474 nfs_begin_data_update(inode);
480 wdata->args.pgbase = user_addr & ~PAGE_MASK;
481 wdata->args.offset = file_offset;
483 wdata->args.count = request;
484 if (wdata->args.count > wsize)
485 wdata->args.count = wsize;
486 wdata->args.pages = &pages[curpage];
488 dprintk("NFS: direct write: c=%u o=%Ld ua=%lu, pb=%u, cp=%u\n",
489 wdata->args.count, (long long) wdata->args.offset,
490 user_addr + tot_bytes, wdata->args.pgbase, curpage);
493 result = NFS_PROTO(inode)->write(wdata);
503 memcpy(&first_verf.verifier, &wdata->verf.verifier,
504 sizeof(first_verf.verifier));
505 if (wdata->verf.committed != NFS_FILE_SYNC) {
507 if (memcmp(&first_verf.verifier, &wdata->verf.verifier,
508 sizeof(first_verf.verifier)))
514 /* in case of a short write: stop now, let the app recover */
515 if (result < wdata->args.count)
518 wdata->args.offset += result;
519 wdata->args.pgbase += result;
520 curpage += wdata->args.pgbase >> PAGE_SHIFT;
521 wdata->args.pgbase &= ~PAGE_MASK;
523 } while (request != 0);
526 * Commit data written so far, even in the event of an error
529 wdata->args.count = tot_bytes;
530 wdata->args.offset = file_offset;
533 result = NFS_PROTO(inode)->commit(wdata);
536 if (result < 0 || memcmp(&first_verf.verifier,
537 &wdata->verf.verifier,
538 sizeof(first_verf.verifier)) != 0)
544 nfs_end_data_update(inode);
545 nfs_writedata_free(wdata);
549 wdata->args.stable = NFS_FILE_SYNC;
554 * nfs_direct_write - For each iov segment, map the user's buffer
555 * then generate write and commit RPCs.
556 * @inode: target inode
557 * @ctx: target file open context
558 * @iov: array of vectors that define I/O buffer
559 * file_offset: offset in file to begin the operation
560 * nr_segs: size of iovec array
562 * Upon return, generic_file_direct_IO invalidates any cached pages
563 * that non-direct readers might access, so they will pick up these
564 * writes immediately.
566 static ssize_t nfs_direct_write(struct inode *inode,
567 struct nfs_open_context *ctx, const struct iovec *iov,
568 loff_t file_offset, unsigned long nr_segs)
570 ssize_t tot_bytes = 0;
571 unsigned long seg = 0;
573 while ((seg < nr_segs) && (tot_bytes >= 0)) {
577 const struct iovec *vec = &iov[seg++];
578 unsigned long user_addr = (unsigned long) vec->iov_base;
579 size_t size = vec->iov_len;
581 page_count = nfs_get_user_pages(WRITE, user_addr, size, &pages);
582 if (page_count < 0) {
583 nfs_free_user_pages(pages, 0, 0);
589 nfs_add_stats(inode, NFSIOS_DIRECTWRITTENBYTES, size);
590 result = nfs_direct_write_seg(inode, ctx, user_addr, size,
591 file_offset, pages, page_count);
592 nfs_free_user_pages(pages, page_count, 0);
599 nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, result);
601 file_offset += result;
609 * nfs_direct_IO - NFS address space operation for direct I/O
610 * rw: direction (read or write)
611 * @iocb: target I/O control block
612 * @iov: array of vectors that define I/O buffer
613 * file_offset: offset in file to begin the operation
614 * nr_segs: size of iovec array
618 nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
619 loff_t file_offset, unsigned long nr_segs)
621 ssize_t result = -EINVAL;
622 struct file *file = iocb->ki_filp;
623 struct nfs_open_context *ctx;
624 struct dentry *dentry = file->f_dentry;
625 struct inode *inode = dentry->d_inode;
628 * No support for async yet
630 if (!is_sync_kiocb(iocb))
633 ctx = (struct nfs_open_context *)file->private_data;
636 dprintk("NFS: direct_IO(read) (%s) off/no(%Lu/%lu)\n",
637 dentry->d_name.name, file_offset, nr_segs);
639 result = nfs_direct_read(inode, ctx, iov,
640 file_offset, nr_segs);
643 dprintk("NFS: direct_IO(write) (%s) off/no(%Lu/%lu)\n",
644 dentry->d_name.name, file_offset, nr_segs);
646 result = nfs_direct_write(inode, ctx, iov,
647 file_offset, nr_segs);
656 * nfs_file_direct_read - file direct read operation for NFS files
657 * @iocb: target I/O control block
658 * @buf: user's buffer into which to read data
659 * count: number of bytes to read
660 * pos: byte offset in file where reading starts
662 * We use this function for direct reads instead of calling
663 * generic_file_aio_read() in order to avoid gfar's check to see if
664 * the request starts before the end of the file. For that check
665 * to work, we must generate a GETATTR before each direct read, and
666 * even then there is a window between the GETATTR and the subsequent
667 * READ where the file size could change. So our preference is simply
668 * to do all reads the application wants, and the server will take
669 * care of managing the end of file boundary.
671 * This function also eliminates unnecessarily updating the file's
672 * atime locally, as the NFS server sets the file's atime, and this
673 * client must read the updated atime from the server back into its
677 nfs_file_direct_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t pos)
679 ssize_t retval = -EINVAL;
680 loff_t *ppos = &iocb->ki_pos;
681 struct file *file = iocb->ki_filp;
682 struct nfs_open_context *ctx =
683 (struct nfs_open_context *) file->private_data;
684 struct address_space *mapping = file->f_mapping;
685 struct inode *inode = mapping->host;
691 dprintk("nfs: direct read(%s/%s, %lu@%Ld)\n",
692 file->f_dentry->d_parent->d_name.name,
693 file->f_dentry->d_name.name,
694 (unsigned long) count, (long long) pos);
696 if (!is_sync_kiocb(iocb))
701 if (!access_ok(VERIFY_WRITE, iov.iov_base, iov.iov_len))
707 retval = nfs_sync_mapping(mapping);
711 retval = nfs_direct_read(inode, ctx, &iov, pos, 1);
713 *ppos = pos + retval;
720 * nfs_file_direct_write - file direct write operation for NFS files
721 * @iocb: target I/O control block
722 * @buf: user's buffer from which to write data
723 * count: number of bytes to write
724 * pos: byte offset in file where writing starts
726 * We use this function for direct writes instead of calling
727 * generic_file_aio_write() in order to avoid taking the inode
728 * semaphore and updating the i_size. The NFS server will set
729 * the new i_size and this client must read the updated size
730 * back into its cache. We let the server do generic write
731 * parameter checking and report problems.
733 * We also avoid an unnecessary invocation of generic_osync_inode(),
734 * as it is fairly meaningless to sync the metadata of an NFS file.
736 * We eliminate local atime updates, see direct read above.
738 * We avoid unnecessary page cache invalidations for normal cached
739 * readers of this file.
741 * Note that O_APPEND is not supported for NFS direct writes, as there
742 * is no atomic O_APPEND write facility in the NFS protocol.
745 nfs_file_direct_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos)
748 struct file *file = iocb->ki_filp;
749 struct nfs_open_context *ctx =
750 (struct nfs_open_context *) file->private_data;
751 struct address_space *mapping = file->f_mapping;
752 struct inode *inode = mapping->host;
754 .iov_base = (char __user *)buf,
757 dfprintk(VFS, "nfs: direct write(%s/%s, %lu@%Ld)\n",
758 file->f_dentry->d_parent->d_name.name,
759 file->f_dentry->d_name.name,
760 (unsigned long) count, (long long) pos);
763 if (!is_sync_kiocb(iocb))
766 retval = generic_write_checks(file, &pos, &count, 0);
771 if ((ssize_t) count < 0)
779 if (!access_ok(VERIFY_READ, iov.iov_base, iov.iov_len))
782 retval = nfs_sync_mapping(mapping);
786 retval = nfs_direct_write(inode, ctx, &iov, pos, 1);
787 if (mapping->nrpages)
788 invalidate_inode_pages2(mapping);
790 iocb->ki_pos = pos + retval;
796 int nfs_init_directcache(void)
798 nfs_direct_cachep = kmem_cache_create("nfs_direct_cache",
799 sizeof(struct nfs_direct_req),
800 0, SLAB_RECLAIM_ACCOUNT,
802 if (nfs_direct_cachep == NULL)
808 void nfs_destroy_directcache(void)
810 if (kmem_cache_destroy(nfs_direct_cachep))
811 printk(KERN_INFO "nfs_direct_cache: not all structures were freed\n");