6 * Partial copy of Linus' read cache modifications to fs/nfs/file.c
7 * modified for async RPC by okir@monad.swb.de
10 #include <linux/time.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/fcntl.h>
14 #include <linux/stat.h>
16 #include <linux/slab.h>
17 #include <linux/pagemap.h>
18 #include <linux/sunrpc/clnt.h>
19 #include <linux/nfs_fs.h>
20 #include <linux/nfs_page.h>
21 #include <linux/smp_lock.h>
23 #include <asm/system.h>
29 #define NFSDBG_FACILITY NFSDBG_PAGECACHE
31 static int nfs_pagein_multi(struct inode *, struct list_head *, unsigned int, size_t, int);
32 static int nfs_pagein_one(struct inode *, struct list_head *, unsigned int, size_t, int);
33 static const struct rpc_call_ops nfs_read_partial_ops;
34 static const struct rpc_call_ops nfs_read_full_ops;
36 static struct kmem_cache *nfs_rdata_cachep;
37 static mempool_t *nfs_rdata_mempool;
39 #define MIN_POOL_READ (32)
41 struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount)
43 struct nfs_read_data *p = mempool_alloc(nfs_rdata_mempool, GFP_NOFS);
46 memset(p, 0, sizeof(*p));
47 INIT_LIST_HEAD(&p->pages);
48 p->npages = pagecount;
49 if (pagecount <= ARRAY_SIZE(p->page_array))
50 p->pagevec = p->page_array;
52 p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS);
54 mempool_free(p, nfs_rdata_mempool);
62 static void nfs_readdata_free(struct nfs_read_data *p)
64 if (p && (p->pagevec != &p->page_array[0]))
66 mempool_free(p, nfs_rdata_mempool);
69 void nfs_readdata_release(void *data)
71 struct nfs_read_data *rdata = data;
73 put_nfs_open_context(rdata->args.context);
74 nfs_readdata_free(rdata);
78 int nfs_return_empty_page(struct page *page)
80 zero_user(page, 0, PAGE_CACHE_SIZE);
81 SetPageUptodate(page);
86 static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data)
88 unsigned int remainder = data->args.count - data->res.count;
89 unsigned int base = data->args.pgbase + data->res.count;
93 if (data->res.eof == 0 || remainder == 0)
96 * Note: "remainder" can never be negative, since we check for
97 * this in the XDR code.
99 pages = &data->args.pages[base >> PAGE_CACHE_SHIFT];
100 base &= ~PAGE_CACHE_MASK;
101 pglen = PAGE_CACHE_SIZE - base;
103 if (remainder <= pglen) {
104 zero_user(*pages, base, remainder);
107 zero_user(*pages, base, pglen);
110 pglen = PAGE_CACHE_SIZE;
115 int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
118 LIST_HEAD(one_request);
119 struct nfs_page *new;
122 len = nfs_page_length(page);
124 return nfs_return_empty_page(page);
125 new = nfs_create_request(ctx, inode, page, 0, len);
130 if (len < PAGE_CACHE_SIZE)
131 zero_user_segment(page, len, PAGE_CACHE_SIZE);
133 nfs_list_add_request(new, &one_request);
134 if (NFS_SERVER(inode)->rsize < PAGE_CACHE_SIZE)
135 nfs_pagein_multi(inode, &one_request, 1, len, 0);
137 nfs_pagein_one(inode, &one_request, 1, len, 0);
141 static void nfs_readpage_release(struct nfs_page *req)
143 unlock_page(req->wb_page);
145 dprintk("NFS: read done (%s/%Ld %d@%Ld)\n",
146 req->wb_context->path.dentry->d_inode->i_sb->s_id,
147 (long long)NFS_FILEID(req->wb_context->path.dentry->d_inode),
149 (long long)req_offset(req));
150 nfs_clear_request(req);
151 nfs_release_request(req);
155 * Set up the NFS read request struct
157 static int nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,
158 const struct rpc_call_ops *call_ops,
159 unsigned int count, unsigned int offset)
161 struct inode *inode = req->wb_context->path.dentry->d_inode;
162 int swap_flags = IS_SWAPFILE(inode) ? NFS_RPC_SWAPFLAGS : 0;
163 struct rpc_task *task;
164 struct rpc_message msg = {
165 .rpc_argp = &data->args,
166 .rpc_resp = &data->res,
167 .rpc_cred = req->wb_context->cred,
169 struct rpc_task_setup task_setup_data = {
171 .rpc_client = NFS_CLIENT(inode),
173 .callback_ops = call_ops,
174 .callback_data = data,
175 .workqueue = nfsiod_workqueue,
176 .flags = RPC_TASK_ASYNC | swap_flags,
181 data->cred = msg.rpc_cred;
183 data->args.fh = NFS_FH(inode);
184 data->args.offset = req_offset(req) + offset;
185 data->args.pgbase = req->wb_pgbase + offset;
186 data->args.pages = data->pagevec;
187 data->args.count = count;
188 data->args.context = get_nfs_open_context(req->wb_context);
190 data->res.fattr = &data->fattr;
191 data->res.count = count;
193 nfs_fattr_init(&data->fattr);
195 /* Set up the initial task struct. */
196 NFS_PROTO(inode)->read_setup(data, &msg);
198 dprintk("NFS: %5u initiated read call (req %s/%Ld, %u bytes @ offset %Lu)\n",
201 (long long)NFS_FILEID(inode),
203 (unsigned long long)data->args.offset);
205 task = rpc_run_task(&task_setup_data);
207 return PTR_ERR(task);
213 nfs_async_read_error(struct list_head *head)
215 struct nfs_page *req;
217 while (!list_empty(head)) {
218 req = nfs_list_entry(head->next);
219 nfs_list_remove_request(req);
220 SetPageError(req->wb_page);
221 nfs_readpage_release(req);
226 * Generate multiple requests to fill a single page.
228 * We optimize to reduce the number of read operations on the wire. If we
229 * detect that we're reading a page, or an area of a page, that is past the
230 * end of file, we do not generate NFS read operations but just clear the
231 * parts of the page that would have come back zero from the server anyway.
233 * We rely on the cached value of i_size to make this determination; another
234 * client can fill pages on the server past our cached end-of-file, but we
235 * won't see the new data until our attribute cache is updated. This is more
236 * or less conventional NFS client behavior.
238 static int nfs_pagein_multi(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int flags)
240 struct nfs_page *req = nfs_list_entry(head->next);
241 struct page *page = req->wb_page;
242 struct nfs_read_data *data;
243 size_t rsize = NFS_SERVER(inode)->rsize, nbytes;
249 nfs_list_remove_request(req);
253 size_t len = min(nbytes,rsize);
255 data = nfs_readdata_alloc(1);
258 list_add(&data->pages, &list);
261 } while(nbytes != 0);
262 atomic_set(&req->wb_complete, requests);
264 ClearPageError(page);
270 data = list_entry(list.next, struct nfs_read_data, pages);
271 list_del_init(&data->pages);
273 data->pagevec[0] = page;
277 ret2 = nfs_read_rpcsetup(req, data, &nfs_read_partial_ops,
283 } while (nbytes != 0);
288 while (!list_empty(&list)) {
289 data = list_entry(list.next, struct nfs_read_data, pages);
290 list_del(&data->pages);
291 nfs_readdata_free(data);
294 nfs_readpage_release(req);
298 static int nfs_pagein_one(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int flags)
300 struct nfs_page *req;
302 struct nfs_read_data *data;
305 data = nfs_readdata_alloc(npages);
309 pages = data->pagevec;
310 while (!list_empty(head)) {
311 req = nfs_list_entry(head->next);
312 nfs_list_remove_request(req);
313 nfs_list_add_request(req, &data->pages);
314 ClearPageError(req->wb_page);
315 *pages++ = req->wb_page;
317 req = nfs_list_entry(data->pages.next);
319 return nfs_read_rpcsetup(req, data, &nfs_read_full_ops, count, 0);
321 nfs_async_read_error(head);
326 * This is the callback from RPC telling us whether a reply was
327 * received or some error occurred (timeout or socket shutdown).
329 int nfs_readpage_result(struct rpc_task *task, struct nfs_read_data *data)
333 dprintk("NFS: %s: %5u, (status %d)\n", __func__, task->tk_pid,
336 status = NFS_PROTO(data->inode)->read_done(task, data);
340 nfs_add_stats(data->inode, NFSIOS_SERVERREADBYTES, data->res.count);
342 if (task->tk_status == -ESTALE) {
343 set_bit(NFS_INO_STALE, &NFS_I(data->inode)->flags);
344 nfs_mark_for_revalidate(data->inode);
349 static void nfs_readpage_retry(struct rpc_task *task, struct nfs_read_data *data)
351 struct nfs_readargs *argp = &data->args;
352 struct nfs_readres *resp = &data->res;
354 if (resp->eof || resp->count == argp->count)
357 /* This is a short read! */
358 nfs_inc_stats(data->inode, NFSIOS_SHORTREAD);
359 /* Has the server at least made some progress? */
360 if (resp->count == 0)
363 /* Yes, so retry the read at the end of the data */
364 argp->offset += resp->count;
365 argp->pgbase += resp->count;
366 argp->count -= resp->count;
367 rpc_restart_call(task);
371 * Handle a read reply that fills part of a page.
373 static void nfs_readpage_result_partial(struct rpc_task *task, void *calldata)
375 struct nfs_read_data *data = calldata;
377 if (nfs_readpage_result(task, data) != 0)
379 if (task->tk_status < 0)
382 nfs_readpage_truncate_uninitialised_page(data);
383 nfs_readpage_retry(task, data);
386 static void nfs_readpage_release_partial(void *calldata)
388 struct nfs_read_data *data = calldata;
389 struct nfs_page *req = data->req;
390 struct page *page = req->wb_page;
391 int status = data->task.tk_status;
396 if (atomic_dec_and_test(&req->wb_complete)) {
397 if (!PageError(page))
398 SetPageUptodate(page);
399 nfs_readpage_release(req);
401 nfs_readdata_release(calldata);
404 static const struct rpc_call_ops nfs_read_partial_ops = {
405 .rpc_call_done = nfs_readpage_result_partial,
406 .rpc_release = nfs_readpage_release_partial,
409 static void nfs_readpage_set_pages_uptodate(struct nfs_read_data *data)
411 unsigned int count = data->res.count;
412 unsigned int base = data->args.pgbase;
416 count = data->args.count;
417 if (unlikely(count == 0))
419 pages = &data->args.pages[base >> PAGE_CACHE_SHIFT];
420 base &= ~PAGE_CACHE_MASK;
422 for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++)
423 SetPageUptodate(*pages);
426 /* Was this a short read? */
427 if (data->res.eof || data->res.count == data->args.count)
428 SetPageUptodate(*pages);
432 * This is the callback from RPC telling us whether a reply was
433 * received or some error occurred (timeout or socket shutdown).
435 static void nfs_readpage_result_full(struct rpc_task *task, void *calldata)
437 struct nfs_read_data *data = calldata;
439 if (nfs_readpage_result(task, data) != 0)
441 if (task->tk_status < 0)
444 * Note: nfs_readpage_retry may change the values of
445 * data->args. In the multi-page case, we therefore need
446 * to ensure that we call nfs_readpage_set_pages_uptodate()
449 nfs_readpage_truncate_uninitialised_page(data);
450 nfs_readpage_set_pages_uptodate(data);
451 nfs_readpage_retry(task, data);
454 static void nfs_readpage_release_full(void *calldata)
456 struct nfs_read_data *data = calldata;
458 while (!list_empty(&data->pages)) {
459 struct nfs_page *req = nfs_list_entry(data->pages.next);
461 nfs_list_remove_request(req);
462 nfs_readpage_release(req);
464 nfs_readdata_release(calldata);
467 static const struct rpc_call_ops nfs_read_full_ops = {
468 .rpc_call_done = nfs_readpage_result_full,
469 .rpc_release = nfs_readpage_release_full,
473 * Read a page over NFS.
474 * We read the page synchronously in the following case:
475 * - The error flag is set for this page. This happens only when a
476 * previous async read operation failed.
478 int nfs_readpage(struct file *file, struct page *page)
480 struct nfs_open_context *ctx;
481 struct inode *inode = page->mapping->host;
484 dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
485 page, PAGE_CACHE_SIZE, page->index);
486 nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
487 nfs_add_stats(inode, NFSIOS_READPAGES, 1);
490 * Try to flush any pending writes to the file..
492 * NOTE! Because we own the page lock, there cannot
493 * be any new pending writes generated at this point
494 * for this page (other pages can be written to).
496 error = nfs_wb_page(inode, page);
499 if (PageUptodate(page))
503 if (NFS_STALE(inode))
508 ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
512 ctx = get_nfs_open_context(nfs_file_open_context(file));
514 if (!IS_SYNC(inode)) {
515 error = nfs_readpage_from_fscache(ctx, inode, page);
520 error = nfs_readpage_async(ctx, inode, page);
523 put_nfs_open_context(ctx);
530 struct nfs_readdesc {
531 struct nfs_pageio_descriptor *pgio;
532 struct nfs_open_context *ctx;
536 readpage_async_filler(void *data, struct page *page)
538 struct nfs_readdesc *desc = (struct nfs_readdesc *)data;
539 struct inode *inode = page->mapping->host;
540 struct nfs_page *new;
544 len = nfs_page_length(page);
546 return nfs_return_empty_page(page);
548 new = nfs_create_request(desc->ctx, inode, page, 0, len);
552 if (len < PAGE_CACHE_SIZE)
553 zero_user_segment(page, len, PAGE_CACHE_SIZE);
554 if (!nfs_pageio_add_request(desc->pgio, new)) {
555 error = desc->pgio->pg_error;
560 error = PTR_ERR(new);
567 int nfs_readpages(struct file *filp, struct address_space *mapping,
568 struct list_head *pages, unsigned nr_pages)
570 struct nfs_pageio_descriptor pgio;
571 struct nfs_readdesc desc = {
574 struct inode *inode = mapping->host;
575 struct nfs_server *server = NFS_SERVER(inode);
576 size_t rsize = server->rsize;
577 unsigned long npages;
580 dprintk("NFS: nfs_readpages (%s/%Ld %d)\n",
582 (long long)NFS_FILEID(inode),
584 nfs_inc_stats(inode, NFSIOS_VFSREADPAGES);
586 if (NFS_STALE(inode))
590 desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
591 if (desc.ctx == NULL)
594 desc.ctx = get_nfs_open_context(nfs_file_open_context(filp));
596 /* attempt to read as many of the pages as possible from the cache
597 * - this returns -ENOBUFS immediately if the cookie is negative
599 ret = nfs_readpages_from_fscache(desc.ctx, inode, mapping,
602 goto read_complete; /* all pages were read */
604 if (rsize < PAGE_CACHE_SIZE)
605 nfs_pageio_init(&pgio, inode, nfs_pagein_multi, rsize, 0);
607 nfs_pageio_init(&pgio, inode, nfs_pagein_one, rsize, 0);
609 ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc);
611 nfs_pageio_complete(&pgio);
612 npages = (pgio.pg_bytes_written + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
613 nfs_add_stats(inode, NFSIOS_READPAGES, npages);
615 put_nfs_open_context(desc.ctx);
620 int __init nfs_init_readpagecache(void)
622 nfs_rdata_cachep = kmem_cache_create("nfs_read_data",
623 sizeof(struct nfs_read_data),
624 0, SLAB_HWCACHE_ALIGN,
626 if (nfs_rdata_cachep == NULL)
629 nfs_rdata_mempool = mempool_create_slab_pool(MIN_POOL_READ,
631 if (nfs_rdata_mempool == NULL)
637 void nfs_destroy_readpagecache(void)
639 mempool_destroy(nfs_rdata_mempool);
640 kmem_cache_destroy(nfs_rdata_cachep);