2 * linux/fs/nfs/direct.c
4 * Copyright (C) 2003 by Chuck Lever <cel@netapp.com>
6 * High-performance uncached I/O for the Linux NFS client
8 * There are important applications whose performance or correctness
9 * depends on uncached access to file data. Database clusters
10 * (multiple copies of the same instance running on separate hosts)
11 * implement their own cache coherency protocol that subsumes file
12 * system cache protocols. Applications that process datasets
13 * considerably larger than the client's memory do not always benefit
14 * from a local cache. A streaming video server, for instance, has no
15 * need to cache the contents of a file.
17 * When an application requests uncached I/O, all read and write requests
18 * are made directly to the server; data stored or fetched via these
19 * requests is not cached in the Linux page cache. The client does not
20 * correct unaligned requests from applications. All requested bytes are
21 * held on permanent storage before a direct write system call returns to
24 * Solaris implements an uncached I/O facility called directio() that
25 * is used for backups and sequential I/O to very large files. Solaris
26 * also supports uncaching whole NFS partitions with "-o forcedirectio,"
27 * an undocumented mount option.
29 * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with
30 * help from Andrew Morton.
32 * 18 Dec 2001 Initial implementation for 2.4 --cel
33 * 08 Jul 2002 Version for 2.4.19, with bug fixes --trondmy
34 * 08 Jun 2003 Port to 2.5 APIs --cel
35 * 31 Mar 2004 Handle direct I/O without VFS support --cel
36 * 15 Sep 2004 Parallel async reads --cel
40 #include <linux/config.h>
41 #include <linux/errno.h>
42 #include <linux/sched.h>
43 #include <linux/kernel.h>
44 #include <linux/smp_lock.h>
45 #include <linux/file.h>
46 #include <linux/pagemap.h>
47 #include <linux/kref.h>
49 #include <linux/nfs_fs.h>
50 #include <linux/nfs_page.h>
51 #include <linux/sunrpc/clnt.h>
53 #include <asm/system.h>
54 #include <asm/uaccess.h>
55 #include <asm/atomic.h>
59 #define NFSDBG_FACILITY NFSDBG_VFS
60 #define MAX_DIRECTIO_SIZE (4096UL << PAGE_SHIFT)
62 static void nfs_free_user_pages(struct page **pages, int npages, int do_dirty);
63 static kmem_cache_t *nfs_direct_cachep;
66 * This represents a set of asynchronous requests that we're waiting on
68 struct nfs_direct_req {
69 struct kref kref; /* release manager */
70 struct list_head list; /* nfs_read_data structs */
71 struct file * filp; /* file descriptor */
72 struct kiocb * iocb; /* controlling i/o request */
73 wait_queue_head_t wait; /* wait for i/o completion */
74 struct inode * inode; /* target file of I/O */
75 struct page ** pages; /* pages in our buffer */
76 unsigned int npages; /* count of pages */
77 atomic_t complete, /* i/os we're waiting for */
78 count, /* bytes actually processed */
79 error; /* any reported error */
84 * nfs_direct_IO - NFS address space operation for direct I/O
85 * @rw: direction (read or write)
86 * @iocb: target I/O control block
87 * @iov: array of vectors that define I/O buffer
88 * @pos: offset in file to begin the operation
89 * @nr_segs: size of iovec array
91 * The presence of this routine in the address space ops vector means
92 * the NFS client supports direct I/O. However, we shunt off direct
93 * read and write requests before the VFS gets them, so this method
94 * should never be called.
96 ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t pos, unsigned long nr_segs)
98 struct dentry *dentry = iocb->ki_filp->f_dentry;
100 dprintk("NFS: nfs_direct_IO (%s) off/no(%Ld/%lu) EINVAL\n",
101 dentry->d_name.name, (long long) pos, nr_segs);
106 static inline int nfs_get_user_pages(int rw, unsigned long user_addr, size_t size, struct page ***pages)
108 int result = -ENOMEM;
109 unsigned long page_count;
112 /* set an arbitrary limit to prevent type overflow */
113 /* XXX: this can probably be as large as INT_MAX */
114 if (size > MAX_DIRECTIO_SIZE) {
119 page_count = (user_addr + size + PAGE_SIZE - 1) >> PAGE_SHIFT;
120 page_count -= user_addr >> PAGE_SHIFT;
122 array_size = (page_count * sizeof(struct page *));
123 *pages = kmalloc(array_size, GFP_KERNEL);
125 down_read(¤t->mm->mmap_sem);
126 result = get_user_pages(current, current->mm, user_addr,
127 page_count, (rw == READ), 0,
129 up_read(¤t->mm->mmap_sem);
131 * If we got fewer pages than expected from get_user_pages(),
132 * the user buffer runs off the end of a mapping; return EFAULT.
134 if (result >= 0 && result < page_count) {
135 nfs_free_user_pages(*pages, result, 0);
143 static void nfs_free_user_pages(struct page **pages, int npages, int do_dirty)
146 for (i = 0; i < npages; i++) {
147 struct page *page = pages[i];
148 if (do_dirty && !PageCompound(page))
149 set_page_dirty_lock(page);
150 page_cache_release(page);
155 static void nfs_direct_req_release(struct kref *kref)
157 struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
158 kmem_cache_free(nfs_direct_cachep, dreq);
162 * Note we also set the number of requests we have in the dreq when we are
163 * done. This prevents races with I/O completion so we will always wait
164 * until all requests have been dispatched and completed.
166 static struct nfs_direct_req *nfs_direct_read_alloc(size_t nbytes, size_t rsize)
168 struct list_head *list;
169 struct nfs_direct_req *dreq;
170 unsigned int reads = 0;
171 unsigned int rpages = (rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
173 dreq = kmem_cache_alloc(nfs_direct_cachep, SLAB_KERNEL);
177 kref_init(&dreq->kref);
178 init_waitqueue_head(&dreq->wait);
179 INIT_LIST_HEAD(&dreq->list);
181 atomic_set(&dreq->count, 0);
182 atomic_set(&dreq->error, 0);
186 struct nfs_read_data *data = nfs_readdata_alloc(rpages);
188 if (unlikely(!data)) {
189 while (!list_empty(list)) {
190 data = list_entry(list->next,
191 struct nfs_read_data, pages);
192 list_del(&data->pages);
193 nfs_readdata_free(data);
195 kref_put(&dreq->kref, nfs_direct_req_release);
199 INIT_LIST_HEAD(&data->pages);
200 list_add(&data->pages, list);
202 data->req = (struct nfs_page *) dreq;
208 kref_get(&dreq->kref);
209 atomic_set(&dreq->complete, reads);
214 * We must hold a reference to all the pages in this direct read request
215 * until the RPCs complete. This could be long *after* we are woken up in
216 * nfs_direct_read_wait (for instance, if someone hits ^C on a slow server).
218 * In addition, synchronous I/O uses a stack-allocated iocb. Thus we
219 * can't trust the iocb is still valid here if this is a synchronous
220 * request. If the waiter is woken prematurely, the iocb is long gone.
222 static void nfs_direct_read_result(struct rpc_task *task, void *calldata)
224 struct nfs_read_data *data = calldata;
225 struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
227 if (nfs_readpage_result(task, data) != 0)
229 if (likely(task->tk_status >= 0))
230 atomic_add(data->res.count, &dreq->count);
232 atomic_set(&dreq->error, task->tk_status);
234 if (unlikely(atomic_dec_and_test(&dreq->complete))) {
235 nfs_free_user_pages(dreq->pages, dreq->npages, 1);
237 long res = atomic_read(&dreq->error);
239 res = atomic_read(&dreq->count);
240 aio_complete(dreq->iocb, res, 0);
242 wake_up(&dreq->wait);
243 kref_put(&dreq->kref, nfs_direct_req_release);
247 static const struct rpc_call_ops nfs_read_direct_ops = {
248 .rpc_call_done = nfs_direct_read_result,
249 .rpc_release = nfs_readdata_release,
253 * For each nfs_read_data struct that was allocated on the list, dispatch
254 * an NFS READ operation
256 static void nfs_direct_read_schedule(struct nfs_direct_req *dreq, unsigned long user_addr, size_t count, loff_t file_offset)
258 struct file *file = dreq->filp;
259 struct inode *inode = file->f_mapping->host;
260 struct nfs_open_context *ctx = (struct nfs_open_context *)
262 struct list_head *list = &dreq->list;
263 struct page **pages = dreq->pages;
264 size_t rsize = NFS_SERVER(inode)->rsize;
265 unsigned int curpage, pgbase;
268 pgbase = user_addr & ~PAGE_MASK;
270 struct nfs_read_data *data;
277 data = list_entry(list->next, struct nfs_read_data, pages);
278 list_del_init(&data->pages);
281 data->cred = ctx->cred;
282 data->args.fh = NFS_FH(inode);
283 data->args.context = ctx;
284 data->args.offset = file_offset;
285 data->args.pgbase = pgbase;
286 data->args.pages = &pages[curpage];
287 data->args.count = bytes;
288 data->res.fattr = &data->fattr;
290 data->res.count = bytes;
292 rpc_init_task(&data->task, NFS_CLIENT(inode), RPC_TASK_ASYNC,
293 &nfs_read_direct_ops, data);
294 NFS_PROTO(inode)->read_setup(data);
296 data->task.tk_cookie = (unsigned long) inode;
299 rpc_execute(&data->task);
302 dfprintk(VFS, "NFS: %4d initiated direct read call (req %s/%Ld, %u bytes @ offset %Lu)\n",
305 (long long)NFS_FILEID(inode),
307 (unsigned long long)data->args.offset);
309 file_offset += bytes;
311 curpage += pgbase >> PAGE_SHIFT;
312 pgbase &= ~PAGE_MASK;
315 } while (count != 0);
319 * Collects and returns the final error value/byte-count.
321 static ssize_t nfs_direct_read_wait(struct nfs_direct_req *dreq, int intr)
323 int result = -EIOCBQUEUED;
325 /* Async requests don't wait here */
331 result = wait_event_interruptible(dreq->wait,
332 (atomic_read(&dreq->complete) == 0));
334 wait_event(dreq->wait, (atomic_read(&dreq->complete) == 0));
338 result = atomic_read(&dreq->error);
340 result = atomic_read(&dreq->count);
343 kref_put(&dreq->kref, nfs_direct_req_release);
344 return (ssize_t) result;
347 static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t file_offset, struct page **pages, unsigned int nr_pages)
351 struct inode *inode = iocb->ki_filp->f_mapping->host;
352 struct rpc_clnt *clnt = NFS_CLIENT(inode);
353 struct nfs_direct_req *dreq;
355 dreq = nfs_direct_read_alloc(count, NFS_SERVER(inode)->rsize);
360 dreq->npages = nr_pages;
362 dreq->filp = iocb->ki_filp;
363 if (!is_sync_kiocb(iocb))
366 nfs_add_stats(inode, NFSIOS_DIRECTREADBYTES, count);
367 rpc_clnt_sigmask(clnt, &oldset);
368 nfs_direct_read_schedule(dreq, user_addr, count, file_offset);
369 result = nfs_direct_read_wait(dreq, clnt->cl_intr);
370 rpc_clnt_sigunmask(clnt, &oldset);
375 static ssize_t nfs_direct_write_seg(struct inode *inode, struct nfs_open_context *ctx, unsigned long user_addr, size_t count, loff_t file_offset, struct page **pages, int nr_pages)
377 const unsigned int wsize = NFS_SERVER(inode)->wsize;
379 int curpage, need_commit;
380 ssize_t result, tot_bytes;
381 struct nfs_writeverf first_verf;
382 struct nfs_write_data *wdata;
384 wdata = nfs_writedata_alloc(NFS_SERVER(inode)->wpages);
388 wdata->inode = inode;
389 wdata->cred = ctx->cred;
390 wdata->args.fh = NFS_FH(inode);
391 wdata->args.context = ctx;
392 wdata->args.stable = NFS_UNSTABLE;
393 if (IS_SYNC(inode) || NFS_PROTO(inode)->version == 2 || count <= wsize)
394 wdata->args.stable = NFS_FILE_SYNC;
395 wdata->res.fattr = &wdata->fattr;
396 wdata->res.verf = &wdata->verf;
398 nfs_begin_data_update(inode);
404 wdata->args.pgbase = user_addr & ~PAGE_MASK;
405 wdata->args.offset = file_offset;
407 wdata->args.count = request;
408 if (wdata->args.count > wsize)
409 wdata->args.count = wsize;
410 wdata->args.pages = &pages[curpage];
412 dprintk("NFS: direct write: c=%u o=%Ld ua=%lu, pb=%u, cp=%u\n",
413 wdata->args.count, (long long) wdata->args.offset,
414 user_addr + tot_bytes, wdata->args.pgbase, curpage);
417 result = NFS_PROTO(inode)->write(wdata);
427 memcpy(&first_verf.verifier, &wdata->verf.verifier,
428 sizeof(first_verf.verifier));
429 if (wdata->verf.committed != NFS_FILE_SYNC) {
431 if (memcmp(&first_verf.verifier, &wdata->verf.verifier,
432 sizeof(first_verf.verifier)))
438 /* in case of a short write: stop now, let the app recover */
439 if (result < wdata->args.count)
442 wdata->args.offset += result;
443 wdata->args.pgbase += result;
444 curpage += wdata->args.pgbase >> PAGE_SHIFT;
445 wdata->args.pgbase &= ~PAGE_MASK;
447 } while (request != 0);
450 * Commit data written so far, even in the event of an error
453 wdata->args.count = tot_bytes;
454 wdata->args.offset = file_offset;
457 result = NFS_PROTO(inode)->commit(wdata);
460 if (result < 0 || memcmp(&first_verf.verifier,
461 &wdata->verf.verifier,
462 sizeof(first_verf.verifier)) != 0)
468 nfs_end_data_update(inode);
469 nfs_writedata_free(wdata);
473 wdata->args.stable = NFS_FILE_SYNC;
478 * Upon return, generic_file_direct_IO invalidates any cached pages
479 * that non-direct readers might access, so they will pick up these
480 * writes immediately.
482 static ssize_t nfs_direct_write(struct inode *inode, struct nfs_open_context *ctx, const struct iovec *iov, loff_t file_offset, unsigned long nr_segs)
484 ssize_t tot_bytes = 0;
485 unsigned long seg = 0;
487 while ((seg < nr_segs) && (tot_bytes >= 0)) {
491 const struct iovec *vec = &iov[seg++];
492 unsigned long user_addr = (unsigned long) vec->iov_base;
493 size_t size = vec->iov_len;
495 page_count = nfs_get_user_pages(WRITE, user_addr, size, &pages);
496 if (page_count < 0) {
497 nfs_free_user_pages(pages, 0, 0);
503 nfs_add_stats(inode, NFSIOS_DIRECTWRITTENBYTES, size);
504 result = nfs_direct_write_seg(inode, ctx, user_addr, size,
505 file_offset, pages, page_count);
506 nfs_free_user_pages(pages, page_count, 0);
513 nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, result);
515 file_offset += result;
523 * nfs_file_direct_read - file direct read operation for NFS files
524 * @iocb: target I/O control block
525 * @buf: user's buffer into which to read data
526 * count: number of bytes to read
527 * pos: byte offset in file where reading starts
529 * We use this function for direct reads instead of calling
530 * generic_file_aio_read() in order to avoid gfar's check to see if
531 * the request starts before the end of the file. For that check
532 * to work, we must generate a GETATTR before each direct read, and
533 * even then there is a window between the GETATTR and the subsequent
534 * READ where the file size could change. So our preference is simply
535 * to do all reads the application wants, and the server will take
536 * care of managing the end of file boundary.
538 * This function also eliminates unnecessarily updating the file's
539 * atime locally, as the NFS server sets the file's atime, and this
540 * client must read the updated atime from the server back into its
543 ssize_t nfs_file_direct_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t pos)
545 ssize_t retval = -EINVAL;
548 struct file *file = iocb->ki_filp;
549 struct address_space *mapping = file->f_mapping;
551 dprintk("nfs: direct read(%s/%s, %lu@%Ld)\n",
552 file->f_dentry->d_parent->d_name.name,
553 file->f_dentry->d_name.name,
554 (unsigned long) count, (long long) pos);
559 if (!access_ok(VERIFY_WRITE, buf, count))
565 retval = nfs_sync_mapping(mapping);
569 page_count = nfs_get_user_pages(READ, (unsigned long) buf,
571 if (page_count < 0) {
572 nfs_free_user_pages(pages, 0, 0);
577 retval = nfs_direct_read(iocb, (unsigned long) buf, count, pos,
580 iocb->ki_pos = pos + retval;
587 * nfs_file_direct_write - file direct write operation for NFS files
588 * @iocb: target I/O control block
589 * @buf: user's buffer from which to write data
590 * count: number of bytes to write
591 * pos: byte offset in file where writing starts
593 * We use this function for direct writes instead of calling
594 * generic_file_aio_write() in order to avoid taking the inode
595 * semaphore and updating the i_size. The NFS server will set
596 * the new i_size and this client must read the updated size
597 * back into its cache. We let the server do generic write
598 * parameter checking and report problems.
600 * We also avoid an unnecessary invocation of generic_osync_inode(),
601 * as it is fairly meaningless to sync the metadata of an NFS file.
603 * We eliminate local atime updates, see direct read above.
605 * We avoid unnecessary page cache invalidations for normal cached
606 * readers of this file.
608 * Note that O_APPEND is not supported for NFS direct writes, as there
609 * is no atomic O_APPEND write facility in the NFS protocol.
611 ssize_t nfs_file_direct_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos)
614 struct file *file = iocb->ki_filp;
615 struct nfs_open_context *ctx =
616 (struct nfs_open_context *) file->private_data;
617 struct address_space *mapping = file->f_mapping;
618 struct inode *inode = mapping->host;
620 .iov_base = (char __user *)buf,
623 dfprintk(VFS, "nfs: direct write(%s/%s, %lu@%Ld)\n",
624 file->f_dentry->d_parent->d_name.name,
625 file->f_dentry->d_name.name,
626 (unsigned long) count, (long long) pos);
629 if (!is_sync_kiocb(iocb))
632 retval = generic_write_checks(file, &pos, &count, 0);
637 if ((ssize_t) count < 0)
645 if (!access_ok(VERIFY_READ, iov.iov_base, iov.iov_len))
648 retval = nfs_sync_mapping(mapping);
652 retval = nfs_direct_write(inode, ctx, &iov, pos, 1);
653 if (mapping->nrpages)
654 invalidate_inode_pages2(mapping);
656 iocb->ki_pos = pos + retval;
662 int nfs_init_directcache(void)
664 nfs_direct_cachep = kmem_cache_create("nfs_direct_cache",
665 sizeof(struct nfs_direct_req),
666 0, SLAB_RECLAIM_ACCOUNT,
668 if (nfs_direct_cachep == NULL)
674 void nfs_destroy_directcache(void)
676 if (kmem_cache_destroy(nfs_direct_cachep))
677 printk(KERN_INFO "nfs_direct_cache: not all structures were freed\n");