Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband
[linux-2.6] / fs / nfs / direct.c
1 /*
2  * linux/fs/nfs/direct.c
3  *
4  * Copyright (C) 2003 by Chuck Lever <cel@netapp.com>
5  *
6  * High-performance uncached I/O for the Linux NFS client
7  *
8  * There are important applications whose performance or correctness
9  * depends on uncached access to file data.  Database clusters
10  * (multiple copies of the same instance running on separate hosts)
11  * implement their own cache coherency protocol that subsumes file
12  * system cache protocols.  Applications that process datasets
13  * considerably larger than the client's memory do not always benefit
14  * from a local cache.  A streaming video server, for instance, has no
15  * need to cache the contents of a file.
16  *
17  * When an application requests uncached I/O, all read and write requests
18  * are made directly to the server; data stored or fetched via these
19  * requests is not cached in the Linux page cache.  The client does not
20  * correct unaligned requests from applications.  All requested bytes are
21  * held on permanent storage before a direct write system call returns to
22  * an application.
23  *
24  * Solaris implements an uncached I/O facility called directio() that
25  * is used for backups and sequential I/O to very large files.  Solaris
26  * also supports uncaching whole NFS partitions with "-o forcedirectio,"
27  * an undocumented mount option.
28  *
29  * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with
30  * help from Andrew Morton.
31  *
32  * 18 Dec 2001  Initial implementation for 2.4  --cel
33  * 08 Jul 2002  Version for 2.4.19, with bug fixes --trondmy
34  * 08 Jun 2003  Port to 2.5 APIs  --cel
35  * 31 Mar 2004  Handle direct I/O without VFS support  --cel
36  * 15 Sep 2004  Parallel async reads  --cel
37  * 04 May 2005  support O_DIRECT with aio  --cel
38  *
39  */
40
41 #include <linux/errno.h>
42 #include <linux/sched.h>
43 #include <linux/kernel.h>
44 #include <linux/file.h>
45 #include <linux/pagemap.h>
46 #include <linux/kref.h>
47
48 #include <linux/nfs_fs.h>
49 #include <linux/nfs_page.h>
50 #include <linux/sunrpc/clnt.h>
51
52 #include <asm/system.h>
53 #include <asm/uaccess.h>
54 #include <asm/atomic.h>
55
56 #include "internal.h"
57 #include "iostat.h"
58
59 #define NFSDBG_FACILITY         NFSDBG_VFS
60
61 static struct kmem_cache *nfs_direct_cachep;
62
63 /*
64  * This represents a set of asynchronous requests that we're waiting on
65  */
66 struct nfs_direct_req {
67         struct kref             kref;           /* release manager */
68
69         /* I/O parameters */
70         struct nfs_open_context *ctx;           /* file open context info */
71         struct kiocb *          iocb;           /* controlling i/o request */
72         struct inode *          inode;          /* target file of i/o */
73
74         /* completion state */
75         atomic_t                io_count;       /* i/os we're waiting for */
76         spinlock_t              lock;           /* protect completion state */
77         ssize_t                 count,          /* bytes actually processed */
78                                 error;          /* any reported error */
79         struct completion       completion;     /* wait for i/o completion */
80
81         /* commit state */
82         struct list_head        rewrite_list;   /* saved nfs_write_data structs */
83         struct nfs_write_data * commit_data;    /* special write_data for commits */
84         int                     flags;
85 #define NFS_ODIRECT_DO_COMMIT           (1)     /* an unstable reply was received */
86 #define NFS_ODIRECT_RESCHED_WRITES      (2)     /* write verification failed */
87         struct nfs_writeverf    verf;           /* unstable write verifier */
88 };
89
90 static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode);
91 static const struct rpc_call_ops nfs_write_direct_ops;
92
93 static inline void get_dreq(struct nfs_direct_req *dreq)
94 {
95         atomic_inc(&dreq->io_count);
96 }
97
98 static inline int put_dreq(struct nfs_direct_req *dreq)
99 {
100         return atomic_dec_and_test(&dreq->io_count);
101 }
102
103 /**
104  * nfs_direct_IO - NFS address space operation for direct I/O
105  * @rw: direction (read or write)
106  * @iocb: target I/O control block
107  * @iov: array of vectors that define I/O buffer
108  * @pos: offset in file to begin the operation
109  * @nr_segs: size of iovec array
110  *
111  * The presence of this routine in the address space ops vector means
112  * the NFS client supports direct I/O.  However, we shunt off direct
113  * read and write requests before the VFS gets them, so this method
114  * should never be called.
115  */
116 ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t pos, unsigned long nr_segs)
117 {
118         dprintk("NFS: nfs_direct_IO (%s) off/no(%Ld/%lu) EINVAL\n",
119                         iocb->ki_filp->f_path.dentry->d_name.name,
120                         (long long) pos, nr_segs);
121
122         return -EINVAL;
123 }
124
125 static void nfs_direct_dirty_pages(struct page **pages, unsigned int pgbase, size_t count)
126 {
127         unsigned int npages;
128         unsigned int i;
129
130         if (count == 0)
131                 return;
132         pages += (pgbase >> PAGE_SHIFT);
133         npages = (count + (pgbase & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
134         for (i = 0; i < npages; i++) {
135                 struct page *page = pages[i];
136                 if (!PageCompound(page))
137                         set_page_dirty(page);
138         }
139 }
140
141 static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
142 {
143         unsigned int i;
144         for (i = 0; i < npages; i++)
145                 page_cache_release(pages[i]);
146 }
147
148 static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
149 {
150         struct nfs_direct_req *dreq;
151
152         dreq = kmem_cache_alloc(nfs_direct_cachep, GFP_KERNEL);
153         if (!dreq)
154                 return NULL;
155
156         kref_init(&dreq->kref);
157         kref_get(&dreq->kref);
158         init_completion(&dreq->completion);
159         INIT_LIST_HEAD(&dreq->rewrite_list);
160         dreq->iocb = NULL;
161         dreq->ctx = NULL;
162         spin_lock_init(&dreq->lock);
163         atomic_set(&dreq->io_count, 0);
164         dreq->count = 0;
165         dreq->error = 0;
166         dreq->flags = 0;
167
168         return dreq;
169 }
170
171 static void nfs_direct_req_release(struct kref *kref)
172 {
173         struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
174
175         if (dreq->ctx != NULL)
176                 put_nfs_open_context(dreq->ctx);
177         kmem_cache_free(nfs_direct_cachep, dreq);
178 }
179
180 /*
181  * Collects and returns the final error value/byte-count.
182  */
183 static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
184 {
185         ssize_t result = -EIOCBQUEUED;
186
187         /* Async requests don't wait here */
188         if (dreq->iocb)
189                 goto out;
190
191         result = wait_for_completion_interruptible(&dreq->completion);
192
193         if (!result)
194                 result = dreq->error;
195         if (!result)
196                 result = dreq->count;
197
198 out:
199         kref_put(&dreq->kref, nfs_direct_req_release);
200         return (ssize_t) result;
201 }
202
203 /*
204  * Synchronous I/O uses a stack-allocated iocb.  Thus we can't trust
205  * the iocb is still valid here if this is a synchronous request.
206  */
207 static void nfs_direct_complete(struct nfs_direct_req *dreq)
208 {
209         if (dreq->iocb) {
210                 long res = (long) dreq->error;
211                 if (!res)
212                         res = (long) dreq->count;
213                 aio_complete(dreq->iocb, res, 0);
214         }
215         complete_all(&dreq->completion);
216
217         kref_put(&dreq->kref, nfs_direct_req_release);
218 }
219
220 /*
221  * We must hold a reference to all the pages in this direct read request
222  * until the RPCs complete.  This could be long *after* we are woken up in
223  * nfs_direct_wait (for instance, if someone hits ^C on a slow server).
224  */
225 static void nfs_direct_read_result(struct rpc_task *task, void *calldata)
226 {
227         struct nfs_read_data *data = calldata;
228         struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
229
230         if (nfs_readpage_result(task, data) != 0)
231                 return;
232
233         spin_lock(&dreq->lock);
234         if (unlikely(task->tk_status < 0)) {
235                 dreq->error = task->tk_status;
236                 spin_unlock(&dreq->lock);
237         } else {
238                 dreq->count += data->res.count;
239                 spin_unlock(&dreq->lock);
240                 nfs_direct_dirty_pages(data->pagevec,
241                                 data->args.pgbase,
242                                 data->res.count);
243         }
244         nfs_direct_release_pages(data->pagevec, data->npages);
245
246         if (put_dreq(dreq))
247                 nfs_direct_complete(dreq);
248 }
249
250 static const struct rpc_call_ops nfs_read_direct_ops = {
251         .rpc_call_done = nfs_direct_read_result,
252         .rpc_release = nfs_readdata_release,
253 };
254
255 /*
256  * For each rsize'd chunk of the user's buffer, dispatch an NFS READ
257  * operation.  If nfs_readdata_alloc() or get_user_pages() fails,
258  * bail and stop sending more reads.  Read length accounting is
259  * handled automatically by nfs_direct_read_result().  Otherwise, if
260  * no requests have been sent, just return an error.
261  */
262 static ssize_t nfs_direct_read_schedule(struct nfs_direct_req *dreq, unsigned long user_addr, size_t count, loff_t pos)
263 {
264         struct nfs_open_context *ctx = dreq->ctx;
265         struct inode *inode = ctx->dentry->d_inode;
266         size_t rsize = NFS_SERVER(inode)->rsize;
267         unsigned int pgbase;
268         int result;
269         ssize_t started = 0;
270
271         get_dreq(dreq);
272
273         do {
274                 struct nfs_read_data *data;
275                 size_t bytes;
276
277                 pgbase = user_addr & ~PAGE_MASK;
278                 bytes = min(rsize,count);
279
280                 result = -ENOMEM;
281                 data = nfs_readdata_alloc(nfs_page_array_len(pgbase, bytes));
282                 if (unlikely(!data))
283                         break;
284
285                 down_read(&current->mm->mmap_sem);
286                 result = get_user_pages(current, current->mm, user_addr,
287                                         data->npages, 1, 0, data->pagevec, NULL);
288                 up_read(&current->mm->mmap_sem);
289                 if (result < 0) {
290                         nfs_readdata_release(data);
291                         break;
292                 }
293                 if ((unsigned)result < data->npages) {
294                         nfs_direct_release_pages(data->pagevec, result);
295                         nfs_readdata_release(data);
296                         break;
297                 }
298
299                 get_dreq(dreq);
300
301                 data->req = (struct nfs_page *) dreq;
302                 data->inode = inode;
303                 data->cred = ctx->cred;
304                 data->args.fh = NFS_FH(inode);
305                 data->args.context = ctx;
306                 data->args.offset = pos;
307                 data->args.pgbase = pgbase;
308                 data->args.pages = data->pagevec;
309                 data->args.count = bytes;
310                 data->res.fattr = &data->fattr;
311                 data->res.eof = 0;
312                 data->res.count = bytes;
313
314                 rpc_init_task(&data->task, NFS_CLIENT(inode), RPC_TASK_ASYNC,
315                                 &nfs_read_direct_ops, data);
316                 NFS_PROTO(inode)->read_setup(data);
317
318                 data->task.tk_cookie = (unsigned long) inode;
319
320                 rpc_execute(&data->task);
321
322                 dprintk("NFS: %5u initiated direct read call "
323                         "(req %s/%Ld, %zu bytes @ offset %Lu)\n",
324                                 data->task.tk_pid,
325                                 inode->i_sb->s_id,
326                                 (long long)NFS_FILEID(inode),
327                                 bytes,
328                                 (unsigned long long)data->args.offset);
329
330                 started += bytes;
331                 user_addr += bytes;
332                 pos += bytes;
333                 /* FIXME: Remove this unnecessary math from final patch */
334                 pgbase += bytes;
335                 pgbase &= ~PAGE_MASK;
336                 BUG_ON(pgbase != (user_addr & ~PAGE_MASK));
337
338                 count -= bytes;
339         } while (count != 0);
340
341         if (put_dreq(dreq))
342                 nfs_direct_complete(dreq);
343
344         if (started)
345                 return 0;
346         return result < 0 ? (ssize_t) result : -EFAULT;
347 }
348
349 static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos)
350 {
351         ssize_t result = 0;
352         sigset_t oldset;
353         struct inode *inode = iocb->ki_filp->f_mapping->host;
354         struct rpc_clnt *clnt = NFS_CLIENT(inode);
355         struct nfs_direct_req *dreq;
356
357         dreq = nfs_direct_req_alloc();
358         if (!dreq)
359                 return -ENOMEM;
360
361         dreq->inode = inode;
362         dreq->ctx = get_nfs_open_context((struct nfs_open_context *)iocb->ki_filp->private_data);
363         if (!is_sync_kiocb(iocb))
364                 dreq->iocb = iocb;
365
366         nfs_add_stats(inode, NFSIOS_DIRECTREADBYTES, count);
367         rpc_clnt_sigmask(clnt, &oldset);
368         result = nfs_direct_read_schedule(dreq, user_addr, count, pos);
369         if (!result)
370                 result = nfs_direct_wait(dreq);
371         rpc_clnt_sigunmask(clnt, &oldset);
372
373         return result;
374 }
375
376 static void nfs_direct_free_writedata(struct nfs_direct_req *dreq)
377 {
378         while (!list_empty(&dreq->rewrite_list)) {
379                 struct nfs_write_data *data = list_entry(dreq->rewrite_list.next, struct nfs_write_data, pages);
380                 list_del(&data->pages);
381                 nfs_direct_release_pages(data->pagevec, data->npages);
382                 nfs_writedata_release(data);
383         }
384 }
385
386 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
387 static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
388 {
389         struct inode *inode = dreq->inode;
390         struct list_head *p;
391         struct nfs_write_data *data;
392
393         dreq->count = 0;
394         get_dreq(dreq);
395
396         list_for_each(p, &dreq->rewrite_list) {
397                 data = list_entry(p, struct nfs_write_data, pages);
398
399                 get_dreq(dreq);
400
401                 /*
402                  * Reset data->res.
403                  */
404                 nfs_fattr_init(&data->fattr);
405                 data->res.count = data->args.count;
406                 memset(&data->verf, 0, sizeof(data->verf));
407
408                 /*
409                  * Reuse data->task; data->args should not have changed
410                  * since the original request was sent.
411                  */
412                 rpc_init_task(&data->task, NFS_CLIENT(inode), RPC_TASK_ASYNC,
413                                 &nfs_write_direct_ops, data);
414                 NFS_PROTO(inode)->write_setup(data, FLUSH_STABLE);
415
416                 data->task.tk_priority = RPC_PRIORITY_NORMAL;
417                 data->task.tk_cookie = (unsigned long) inode;
418
419                 /*
420                  * We're called via an RPC callback, so BKL is already held.
421                  */
422                 rpc_execute(&data->task);
423
424                 dprintk("NFS: %5u rescheduled direct write call (req %s/%Ld, %u bytes @ offset %Lu)\n",
425                                 data->task.tk_pid,
426                                 inode->i_sb->s_id,
427                                 (long long)NFS_FILEID(inode),
428                                 data->args.count,
429                                 (unsigned long long)data->args.offset);
430         }
431
432         if (put_dreq(dreq))
433                 nfs_direct_write_complete(dreq, inode);
434 }
435
436 static void nfs_direct_commit_result(struct rpc_task *task, void *calldata)
437 {
438         struct nfs_write_data *data = calldata;
439         struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
440
441         /* Call the NFS version-specific code */
442         if (NFS_PROTO(data->inode)->commit_done(task, data) != 0)
443                 return;
444         if (unlikely(task->tk_status < 0)) {
445                 dprintk("NFS: %5u commit failed with error %d.\n",
446                                 task->tk_pid, task->tk_status);
447                 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
448         } else if (memcmp(&dreq->verf, &data->verf, sizeof(data->verf))) {
449                 dprintk("NFS: %5u commit verify failed\n", task->tk_pid);
450                 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
451         }
452
453         dprintk("NFS: %5u commit returned %d\n", task->tk_pid, task->tk_status);
454         nfs_direct_write_complete(dreq, data->inode);
455 }
456
457 static const struct rpc_call_ops nfs_commit_direct_ops = {
458         .rpc_call_done = nfs_direct_commit_result,
459         .rpc_release = nfs_commit_release,
460 };
461
462 static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
463 {
464         struct nfs_write_data *data = dreq->commit_data;
465
466         data->inode = dreq->inode;
467         data->cred = dreq->ctx->cred;
468
469         data->args.fh = NFS_FH(data->inode);
470         data->args.offset = 0;
471         data->args.count = 0;
472         data->res.count = 0;
473         data->res.fattr = &data->fattr;
474         data->res.verf = &data->verf;
475
476         rpc_init_task(&data->task, NFS_CLIENT(dreq->inode), RPC_TASK_ASYNC,
477                                 &nfs_commit_direct_ops, data);
478         NFS_PROTO(data->inode)->commit_setup(data, 0);
479
480         data->task.tk_priority = RPC_PRIORITY_NORMAL;
481         data->task.tk_cookie = (unsigned long)data->inode;
482         /* Note: task.tk_ops->rpc_release will free dreq->commit_data */
483         dreq->commit_data = NULL;
484
485         dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);
486
487         rpc_execute(&data->task);
488 }
489
490 static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
491 {
492         int flags = dreq->flags;
493
494         dreq->flags = 0;
495         switch (flags) {
496                 case NFS_ODIRECT_DO_COMMIT:
497                         nfs_direct_commit_schedule(dreq);
498                         break;
499                 case NFS_ODIRECT_RESCHED_WRITES:
500                         nfs_direct_write_reschedule(dreq);
501                         break;
502                 default:
503                         nfs_end_data_update(inode);
504                         if (dreq->commit_data != NULL)
505                                 nfs_commit_free(dreq->commit_data);
506                         nfs_direct_free_writedata(dreq);
507                         nfs_zap_mapping(inode, inode->i_mapping);
508                         nfs_direct_complete(dreq);
509         }
510 }
511
512 static void nfs_alloc_commit_data(struct nfs_direct_req *dreq)
513 {
514         dreq->commit_data = nfs_commit_alloc();
515         if (dreq->commit_data != NULL)
516                 dreq->commit_data->req = (struct nfs_page *) dreq;
517 }
518 #else
519 static inline void nfs_alloc_commit_data(struct nfs_direct_req *dreq)
520 {
521         dreq->commit_data = NULL;
522 }
523
524 static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
525 {
526         nfs_end_data_update(inode);
527         nfs_direct_free_writedata(dreq);
528         nfs_zap_mapping(inode, inode->i_mapping);
529         nfs_direct_complete(dreq);
530 }
531 #endif
532
533 static void nfs_direct_write_result(struct rpc_task *task, void *calldata)
534 {
535         struct nfs_write_data *data = calldata;
536         struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
537         int status = task->tk_status;
538
539         if (nfs_writeback_done(task, data) != 0)
540                 return;
541
542         spin_lock(&dreq->lock);
543
544         if (unlikely(dreq->error != 0))
545                 goto out_unlock;
546         if (unlikely(status < 0)) {
547                 /* An error has occured, so we should not commit */
548                 dreq->flags = 0;
549                 dreq->error = status;
550         }
551
552         dreq->count += data->res.count;
553
554         if (data->res.verf->committed != NFS_FILE_SYNC) {
555                 switch (dreq->flags) {
556                         case 0:
557                                 memcpy(&dreq->verf, &data->verf, sizeof(dreq->verf));
558                                 dreq->flags = NFS_ODIRECT_DO_COMMIT;
559                                 break;
560                         case NFS_ODIRECT_DO_COMMIT:
561                                 if (memcmp(&dreq->verf, &data->verf, sizeof(dreq->verf))) {
562                                         dprintk("NFS: %5u write verify failed\n", task->tk_pid);
563                                         dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
564                                 }
565                 }
566         }
567 out_unlock:
568         spin_unlock(&dreq->lock);
569 }
570
571 /*
572  * NB: Return the value of the first error return code.  Subsequent
573  *     errors after the first one are ignored.
574  */
575 static void nfs_direct_write_release(void *calldata)
576 {
577         struct nfs_write_data *data = calldata;
578         struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
579
580         if (put_dreq(dreq))
581                 nfs_direct_write_complete(dreq, data->inode);
582 }
583
584 static const struct rpc_call_ops nfs_write_direct_ops = {
585         .rpc_call_done = nfs_direct_write_result,
586         .rpc_release = nfs_direct_write_release,
587 };
588
589 /*
590  * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE
591  * operation.  If nfs_writedata_alloc() or get_user_pages() fails,
592  * bail and stop sending more writes.  Write length accounting is
593  * handled automatically by nfs_direct_write_result().  Otherwise, if
594  * no requests have been sent, just return an error.
595  */
596 static ssize_t nfs_direct_write_schedule(struct nfs_direct_req *dreq, unsigned long user_addr, size_t count, loff_t pos, int sync)
597 {
598         struct nfs_open_context *ctx = dreq->ctx;
599         struct inode *inode = ctx->dentry->d_inode;
600         size_t wsize = NFS_SERVER(inode)->wsize;
601         unsigned int pgbase;
602         int result;
603         ssize_t started = 0;
604
605         get_dreq(dreq);
606
607         do {
608                 struct nfs_write_data *data;
609                 size_t bytes;
610
611                 pgbase = user_addr & ~PAGE_MASK;
612                 bytes = min(wsize,count);
613
614                 result = -ENOMEM;
615                 data = nfs_writedata_alloc(nfs_page_array_len(pgbase, bytes));
616                 if (unlikely(!data))
617                         break;
618
619                 down_read(&current->mm->mmap_sem);
620                 result = get_user_pages(current, current->mm, user_addr,
621                                         data->npages, 0, 0, data->pagevec, NULL);
622                 up_read(&current->mm->mmap_sem);
623                 if (result < 0) {
624                         nfs_writedata_release(data);
625                         break;
626                 }
627                 if ((unsigned)result < data->npages) {
628                         nfs_direct_release_pages(data->pagevec, result);
629                         nfs_writedata_release(data);
630                         break;
631                 }
632
633                 get_dreq(dreq);
634
635                 list_move_tail(&data->pages, &dreq->rewrite_list);
636
637                 data->req = (struct nfs_page *) dreq;
638                 data->inode = inode;
639                 data->cred = ctx->cred;
640                 data->args.fh = NFS_FH(inode);
641                 data->args.context = ctx;
642                 data->args.offset = pos;
643                 data->args.pgbase = pgbase;
644                 data->args.pages = data->pagevec;
645                 data->args.count = bytes;
646                 data->res.fattr = &data->fattr;
647                 data->res.count = bytes;
648                 data->res.verf = &data->verf;
649
650                 rpc_init_task(&data->task, NFS_CLIENT(inode), RPC_TASK_ASYNC,
651                                 &nfs_write_direct_ops, data);
652                 NFS_PROTO(inode)->write_setup(data, sync);
653
654                 data->task.tk_priority = RPC_PRIORITY_NORMAL;
655                 data->task.tk_cookie = (unsigned long) inode;
656
657                 rpc_execute(&data->task);
658
659                 dprintk("NFS: %5u initiated direct write call "
660                         "(req %s/%Ld, %zu bytes @ offset %Lu)\n",
661                                 data->task.tk_pid,
662                                 inode->i_sb->s_id,
663                                 (long long)NFS_FILEID(inode),
664                                 bytes,
665                                 (unsigned long long)data->args.offset);
666
667                 started += bytes;
668                 user_addr += bytes;
669                 pos += bytes;
670
671                 /* FIXME: Remove this useless math from the final patch */
672                 pgbase += bytes;
673                 pgbase &= ~PAGE_MASK;
674                 BUG_ON(pgbase != (user_addr & ~PAGE_MASK));
675
676                 count -= bytes;
677         } while (count != 0);
678
679         if (put_dreq(dreq))
680                 nfs_direct_write_complete(dreq, inode);
681
682         if (started)
683                 return 0;
684         return result < 0 ? (ssize_t) result : -EFAULT;
685 }
686
687 static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos)
688 {
689         ssize_t result = 0;
690         sigset_t oldset;
691         struct inode *inode = iocb->ki_filp->f_mapping->host;
692         struct rpc_clnt *clnt = NFS_CLIENT(inode);
693         struct nfs_direct_req *dreq;
694         size_t wsize = NFS_SERVER(inode)->wsize;
695         int sync = 0;
696
697         dreq = nfs_direct_req_alloc();
698         if (!dreq)
699                 return -ENOMEM;
700         nfs_alloc_commit_data(dreq);
701
702         if (dreq->commit_data == NULL || count < wsize)
703                 sync = FLUSH_STABLE;
704
705         dreq->inode = inode;
706         dreq->ctx = get_nfs_open_context((struct nfs_open_context *)iocb->ki_filp->private_data);
707         if (!is_sync_kiocb(iocb))
708                 dreq->iocb = iocb;
709
710         nfs_add_stats(inode, NFSIOS_DIRECTWRITTENBYTES, count);
711
712         nfs_begin_data_update(inode);
713
714         rpc_clnt_sigmask(clnt, &oldset);
715         result = nfs_direct_write_schedule(dreq, user_addr, count, pos, sync);
716         if (!result)
717                 result = nfs_direct_wait(dreq);
718         rpc_clnt_sigunmask(clnt, &oldset);
719
720         return result;
721 }
722
723 /**
724  * nfs_file_direct_read - file direct read operation for NFS files
725  * @iocb: target I/O control block
726  * @iov: vector of user buffers into which to read data
727  * @nr_segs: size of iov vector
728  * @pos: byte offset in file where reading starts
729  *
730  * We use this function for direct reads instead of calling
731  * generic_file_aio_read() in order to avoid gfar's check to see if
732  * the request starts before the end of the file.  For that check
733  * to work, we must generate a GETATTR before each direct read, and
734  * even then there is a window between the GETATTR and the subsequent
735  * READ where the file size could change.  Our preference is simply
736  * to do all reads the application wants, and the server will take
737  * care of managing the end of file boundary.
738  *
739  * This function also eliminates unnecessarily updating the file's
740  * atime locally, as the NFS server sets the file's atime, and this
741  * client must read the updated atime from the server back into its
742  * cache.
743  */
744 ssize_t nfs_file_direct_read(struct kiocb *iocb, const struct iovec *iov,
745                                 unsigned long nr_segs, loff_t pos)
746 {
747         ssize_t retval = -EINVAL;
748         struct file *file = iocb->ki_filp;
749         struct address_space *mapping = file->f_mapping;
750         /* XXX: temporary */
751         const char __user *buf = iov[0].iov_base;
752         size_t count = iov[0].iov_len;
753
754         dprintk("nfs: direct read(%s/%s, %lu@%Ld)\n",
755                 file->f_path.dentry->d_parent->d_name.name,
756                 file->f_path.dentry->d_name.name,
757                 (unsigned long) count, (long long) pos);
758
759         if (nr_segs != 1)
760                 return -EINVAL;
761
762         if (count < 0)
763                 goto out;
764         retval = -EFAULT;
765         if (!access_ok(VERIFY_WRITE, buf, count))
766                 goto out;
767         retval = 0;
768         if (!count)
769                 goto out;
770
771         retval = nfs_sync_mapping(mapping);
772         if (retval)
773                 goto out;
774
775         retval = nfs_direct_read(iocb, (unsigned long) buf, count, pos);
776         if (retval > 0)
777                 iocb->ki_pos = pos + retval;
778
779 out:
780         return retval;
781 }
782
783 /**
784  * nfs_file_direct_write - file direct write operation for NFS files
785  * @iocb: target I/O control block
786  * @iov: vector of user buffers from which to write data
787  * @nr_segs: size of iov vector
788  * @pos: byte offset in file where writing starts
789  *
790  * We use this function for direct writes instead of calling
791  * generic_file_aio_write() in order to avoid taking the inode
792  * semaphore and updating the i_size.  The NFS server will set
793  * the new i_size and this client must read the updated size
794  * back into its cache.  We let the server do generic write
795  * parameter checking and report problems.
796  *
797  * We also avoid an unnecessary invocation of generic_osync_inode(),
798  * as it is fairly meaningless to sync the metadata of an NFS file.
799  *
800  * We eliminate local atime updates, see direct read above.
801  *
802  * We avoid unnecessary page cache invalidations for normal cached
803  * readers of this file.
804  *
805  * Note that O_APPEND is not supported for NFS direct writes, as there
806  * is no atomic O_APPEND write facility in the NFS protocol.
807  */
808 ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
809                                 unsigned long nr_segs, loff_t pos)
810 {
811         ssize_t retval;
812         struct file *file = iocb->ki_filp;
813         struct address_space *mapping = file->f_mapping;
814         /* XXX: temporary */
815         const char __user *buf = iov[0].iov_base;
816         size_t count = iov[0].iov_len;
817
818         dprintk("nfs: direct write(%s/%s, %lu@%Ld)\n",
819                 file->f_path.dentry->d_parent->d_name.name,
820                 file->f_path.dentry->d_name.name,
821                 (unsigned long) count, (long long) pos);
822
823         if (nr_segs != 1)
824                 return -EINVAL;
825
826         retval = generic_write_checks(file, &pos, &count, 0);
827         if (retval)
828                 goto out;
829
830         retval = -EINVAL;
831         if ((ssize_t) count < 0)
832                 goto out;
833         retval = 0;
834         if (!count)
835                 goto out;
836
837         retval = -EFAULT;
838         if (!access_ok(VERIFY_READ, buf, count))
839                 goto out;
840
841         retval = nfs_sync_mapping(mapping);
842         if (retval)
843                 goto out;
844
845         retval = nfs_direct_write(iocb, (unsigned long) buf, count, pos);
846
847         if (retval > 0)
848                 iocb->ki_pos = pos + retval;
849
850 out:
851         return retval;
852 }
853
854 /**
855  * nfs_init_directcache - create a slab cache for nfs_direct_req structures
856  *
857  */
858 int __init nfs_init_directcache(void)
859 {
860         nfs_direct_cachep = kmem_cache_create("nfs_direct_cache",
861                                                 sizeof(struct nfs_direct_req),
862                                                 0, (SLAB_RECLAIM_ACCOUNT|
863                                                         SLAB_MEM_SPREAD),
864                                                 NULL, NULL);
865         if (nfs_direct_cachep == NULL)
866                 return -ENOMEM;
867
868         return 0;
869 }
870
871 /**
872  * nfs_destroy_directcache - destroy the slab cache for nfs_direct_req structures
873  *
874  */
875 void nfs_destroy_directcache(void)
876 {
877         kmem_cache_destroy(nfs_direct_cachep);
878 }