fuse: style fixes
[linux-2.6] / fs / fuse / file.c
1 /*
2   FUSE: Filesystem in Userspace
3   Copyright (C) 2001-2008  Miklos Szeredi <miklos@szeredi.hu>
4
5   This program can be distributed under the terms of the GNU GPL.
6   See the file COPYING.
7 */
8
9 #include "fuse_i.h"
10
11 #include <linux/pagemap.h>
12 #include <linux/slab.h>
13 #include <linux/kernel.h>
14 #include <linux/sched.h>
15
16 static const struct file_operations fuse_direct_io_file_operations;
17
18 static int fuse_send_open(struct inode *inode, struct file *file, int isdir,
19                           struct fuse_open_out *outargp)
20 {
21         struct fuse_conn *fc = get_fuse_conn(inode);
22         struct fuse_open_in inarg;
23         struct fuse_req *req;
24         int err;
25
26         req = fuse_get_req(fc);
27         if (IS_ERR(req))
28                 return PTR_ERR(req);
29
30         memset(&inarg, 0, sizeof(inarg));
31         inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY);
32         if (!fc->atomic_o_trunc)
33                 inarg.flags &= ~O_TRUNC;
34         req->in.h.opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN;
35         req->in.h.nodeid = get_node_id(inode);
36         req->in.numargs = 1;
37         req->in.args[0].size = sizeof(inarg);
38         req->in.args[0].value = &inarg;
39         req->out.numargs = 1;
40         req->out.args[0].size = sizeof(*outargp);
41         req->out.args[0].value = outargp;
42         request_send(fc, req);
43         err = req->out.h.error;
44         fuse_put_request(fc, req);
45
46         return err;
47 }
48
49 struct fuse_file *fuse_file_alloc(void)
50 {
51         struct fuse_file *ff;
52         ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL);
53         if (ff) {
54                 ff->reserved_req = fuse_request_alloc();
55                 if (!ff->reserved_req) {
56                         kfree(ff);
57                         ff = NULL;
58                 } else {
59                         INIT_LIST_HEAD(&ff->write_entry);
60                         atomic_set(&ff->count, 0);
61                 }
62         }
63         return ff;
64 }
65
66 void fuse_file_free(struct fuse_file *ff)
67 {
68         fuse_request_free(ff->reserved_req);
69         kfree(ff);
70 }
71
72 static struct fuse_file *fuse_file_get(struct fuse_file *ff)
73 {
74         atomic_inc(&ff->count);
75         return ff;
76 }
77
78 static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req)
79 {
80         dput(req->misc.release.dentry);
81         mntput(req->misc.release.vfsmount);
82         fuse_put_request(fc, req);
83 }
84
85 static void fuse_file_put(struct fuse_file *ff)
86 {
87         if (atomic_dec_and_test(&ff->count)) {
88                 struct fuse_req *req = ff->reserved_req;
89                 struct inode *inode = req->misc.release.dentry->d_inode;
90                 struct fuse_conn *fc = get_fuse_conn(inode);
91                 req->end = fuse_release_end;
92                 request_send_background(fc, req);
93                 kfree(ff);
94         }
95 }
96
97 void fuse_finish_open(struct inode *inode, struct file *file,
98                       struct fuse_file *ff, struct fuse_open_out *outarg)
99 {
100         if (outarg->open_flags & FOPEN_DIRECT_IO)
101                 file->f_op = &fuse_direct_io_file_operations;
102         if (!(outarg->open_flags & FOPEN_KEEP_CACHE))
103                 invalidate_inode_pages2(inode->i_mapping);
104         if (outarg->open_flags & FOPEN_NONSEEKABLE)
105                 nonseekable_open(inode, file);
106         ff->fh = outarg->fh;
107         file->private_data = fuse_file_get(ff);
108 }
109
110 int fuse_open_common(struct inode *inode, struct file *file, int isdir)
111 {
112         struct fuse_open_out outarg;
113         struct fuse_file *ff;
114         int err;
115
116         /* VFS checks this, but only _after_ ->open() */
117         if (file->f_flags & O_DIRECT)
118                 return -EINVAL;
119
120         err = generic_file_open(inode, file);
121         if (err)
122                 return err;
123
124         ff = fuse_file_alloc();
125         if (!ff)
126                 return -ENOMEM;
127
128         err = fuse_send_open(inode, file, isdir, &outarg);
129         if (err)
130                 fuse_file_free(ff);
131         else {
132                 if (isdir)
133                         outarg.open_flags &= ~FOPEN_DIRECT_IO;
134                 fuse_finish_open(inode, file, ff, &outarg);
135         }
136
137         return err;
138 }
139
140 void fuse_release_fill(struct fuse_file *ff, u64 nodeid, int flags, int opcode)
141 {
142         struct fuse_req *req = ff->reserved_req;
143         struct fuse_release_in *inarg = &req->misc.release.in;
144
145         inarg->fh = ff->fh;
146         inarg->flags = flags;
147         req->in.h.opcode = opcode;
148         req->in.h.nodeid = nodeid;
149         req->in.numargs = 1;
150         req->in.args[0].size = sizeof(struct fuse_release_in);
151         req->in.args[0].value = inarg;
152 }
153
154 int fuse_release_common(struct inode *inode, struct file *file, int isdir)
155 {
156         struct fuse_file *ff = file->private_data;
157         if (ff) {
158                 struct fuse_conn *fc = get_fuse_conn(inode);
159                 struct fuse_req *req = ff->reserved_req;
160
161                 fuse_release_fill(ff, get_node_id(inode), file->f_flags,
162                                   isdir ? FUSE_RELEASEDIR : FUSE_RELEASE);
163
164                 /* Hold vfsmount and dentry until release is finished */
165                 req->misc.release.vfsmount = mntget(file->f_path.mnt);
166                 req->misc.release.dentry = dget(file->f_path.dentry);
167
168                 spin_lock(&fc->lock);
169                 list_del(&ff->write_entry);
170                 spin_unlock(&fc->lock);
171                 /*
172                  * Normally this will send the RELEASE request,
173                  * however if some asynchronous READ or WRITE requests
174                  * are outstanding, the sending will be delayed
175                  */
176                 fuse_file_put(ff);
177         }
178
179         /* Return value is ignored by VFS */
180         return 0;
181 }
182
183 static int fuse_open(struct inode *inode, struct file *file)
184 {
185         return fuse_open_common(inode, file, 0);
186 }
187
188 static int fuse_release(struct inode *inode, struct file *file)
189 {
190         return fuse_release_common(inode, file, 0);
191 }
192
193 /*
194  * Scramble the ID space with XTEA, so that the value of the files_struct
195  * pointer is not exposed to userspace.
196  */
197 u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id)
198 {
199         u32 *k = fc->scramble_key;
200         u64 v = (unsigned long) id;
201         u32 v0 = v;
202         u32 v1 = v >> 32;
203         u32 sum = 0;
204         int i;
205
206         for (i = 0; i < 32; i++) {
207                 v0 += ((v1 << 4 ^ v1 >> 5) + v1) ^ (sum + k[sum & 3]);
208                 sum += 0x9E3779B9;
209                 v1 += ((v0 << 4 ^ v0 >> 5) + v0) ^ (sum + k[sum>>11 & 3]);
210         }
211
212         return (u64) v0 + ((u64) v1 << 32);
213 }
214
215 /*
216  * Check if page is under writeback
217  *
218  * This is currently done by walking the list of writepage requests
219  * for the inode, which can be pretty inefficient.
220  */
221 static bool fuse_page_is_writeback(struct inode *inode, pgoff_t index)
222 {
223         struct fuse_conn *fc = get_fuse_conn(inode);
224         struct fuse_inode *fi = get_fuse_inode(inode);
225         struct fuse_req *req;
226         bool found = false;
227
228         spin_lock(&fc->lock);
229         list_for_each_entry(req, &fi->writepages, writepages_entry) {
230                 pgoff_t curr_index;
231
232                 BUG_ON(req->inode != inode);
233                 curr_index = req->misc.write.in.offset >> PAGE_CACHE_SHIFT;
234                 if (curr_index == index) {
235                         found = true;
236                         break;
237                 }
238         }
239         spin_unlock(&fc->lock);
240
241         return found;
242 }
243
244 /*
245  * Wait for page writeback to be completed.
246  *
247  * Since fuse doesn't rely on the VM writeback tracking, this has to
248  * use some other means.
249  */
250 static int fuse_wait_on_page_writeback(struct inode *inode, pgoff_t index)
251 {
252         struct fuse_inode *fi = get_fuse_inode(inode);
253
254         wait_event(fi->page_waitq, !fuse_page_is_writeback(inode, index));
255         return 0;
256 }
257
258 static int fuse_flush(struct file *file, fl_owner_t id)
259 {
260         struct inode *inode = file->f_path.dentry->d_inode;
261         struct fuse_conn *fc = get_fuse_conn(inode);
262         struct fuse_file *ff = file->private_data;
263         struct fuse_req *req;
264         struct fuse_flush_in inarg;
265         int err;
266
267         if (is_bad_inode(inode))
268                 return -EIO;
269
270         if (fc->no_flush)
271                 return 0;
272
273         req = fuse_get_req_nofail(fc, file);
274         memset(&inarg, 0, sizeof(inarg));
275         inarg.fh = ff->fh;
276         inarg.lock_owner = fuse_lock_owner_id(fc, id);
277         req->in.h.opcode = FUSE_FLUSH;
278         req->in.h.nodeid = get_node_id(inode);
279         req->in.numargs = 1;
280         req->in.args[0].size = sizeof(inarg);
281         req->in.args[0].value = &inarg;
282         req->force = 1;
283         request_send(fc, req);
284         err = req->out.h.error;
285         fuse_put_request(fc, req);
286         if (err == -ENOSYS) {
287                 fc->no_flush = 1;
288                 err = 0;
289         }
290         return err;
291 }
292
293 /*
294  * Wait for all pending writepages on the inode to finish.
295  *
296  * This is currently done by blocking further writes with FUSE_NOWRITE
297  * and waiting for all sent writes to complete.
298  *
299  * This must be called under i_mutex, otherwise the FUSE_NOWRITE usage
300  * could conflict with truncation.
301  */
302 static void fuse_sync_writes(struct inode *inode)
303 {
304         fuse_set_nowrite(inode);
305         fuse_release_nowrite(inode);
306 }
307
308 int fuse_fsync_common(struct file *file, struct dentry *de, int datasync,
309                       int isdir)
310 {
311         struct inode *inode = de->d_inode;
312         struct fuse_conn *fc = get_fuse_conn(inode);
313         struct fuse_file *ff = file->private_data;
314         struct fuse_req *req;
315         struct fuse_fsync_in inarg;
316         int err;
317
318         if (is_bad_inode(inode))
319                 return -EIO;
320
321         if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir))
322                 return 0;
323
324         /*
325          * Start writeback against all dirty pages of the inode, then
326          * wait for all outstanding writes, before sending the FSYNC
327          * request.
328          */
329         err = write_inode_now(inode, 0);
330         if (err)
331                 return err;
332
333         fuse_sync_writes(inode);
334
335         req = fuse_get_req(fc);
336         if (IS_ERR(req))
337                 return PTR_ERR(req);
338
339         memset(&inarg, 0, sizeof(inarg));
340         inarg.fh = ff->fh;
341         inarg.fsync_flags = datasync ? 1 : 0;
342         req->in.h.opcode = isdir ? FUSE_FSYNCDIR : FUSE_FSYNC;
343         req->in.h.nodeid = get_node_id(inode);
344         req->in.numargs = 1;
345         req->in.args[0].size = sizeof(inarg);
346         req->in.args[0].value = &inarg;
347         request_send(fc, req);
348         err = req->out.h.error;
349         fuse_put_request(fc, req);
350         if (err == -ENOSYS) {
351                 if (isdir)
352                         fc->no_fsyncdir = 1;
353                 else
354                         fc->no_fsync = 1;
355                 err = 0;
356         }
357         return err;
358 }
359
360 static int fuse_fsync(struct file *file, struct dentry *de, int datasync)
361 {
362         return fuse_fsync_common(file, de, datasync, 0);
363 }
364
365 void fuse_read_fill(struct fuse_req *req, struct file *file,
366                     struct inode *inode, loff_t pos, size_t count, int opcode)
367 {
368         struct fuse_read_in *inarg = &req->misc.read.in;
369         struct fuse_file *ff = file->private_data;
370
371         inarg->fh = ff->fh;
372         inarg->offset = pos;
373         inarg->size = count;
374         inarg->flags = file->f_flags;
375         req->in.h.opcode = opcode;
376         req->in.h.nodeid = get_node_id(inode);
377         req->in.numargs = 1;
378         req->in.args[0].size = sizeof(struct fuse_read_in);
379         req->in.args[0].value = inarg;
380         req->out.argpages = 1;
381         req->out.argvar = 1;
382         req->out.numargs = 1;
383         req->out.args[0].size = count;
384 }
385
386 static size_t fuse_send_read(struct fuse_req *req, struct file *file,
387                              struct inode *inode, loff_t pos, size_t count,
388                              fl_owner_t owner)
389 {
390         struct fuse_conn *fc = get_fuse_conn(inode);
391
392         fuse_read_fill(req, file, inode, pos, count, FUSE_READ);
393         if (owner != NULL) {
394                 struct fuse_read_in *inarg = &req->misc.read.in;
395
396                 inarg->read_flags |= FUSE_READ_LOCKOWNER;
397                 inarg->lock_owner = fuse_lock_owner_id(fc, owner);
398         }
399         request_send(fc, req);
400         return req->out.args[0].size;
401 }
402
403 static void fuse_read_update_size(struct inode *inode, loff_t size,
404                                   u64 attr_ver)
405 {
406         struct fuse_conn *fc = get_fuse_conn(inode);
407         struct fuse_inode *fi = get_fuse_inode(inode);
408
409         spin_lock(&fc->lock);
410         if (attr_ver == fi->attr_version && size < inode->i_size) {
411                 fi->attr_version = ++fc->attr_version;
412                 i_size_write(inode, size);
413         }
414         spin_unlock(&fc->lock);
415 }
416
417 static int fuse_readpage(struct file *file, struct page *page)
418 {
419         struct inode *inode = page->mapping->host;
420         struct fuse_conn *fc = get_fuse_conn(inode);
421         struct fuse_req *req;
422         size_t num_read;
423         loff_t pos = page_offset(page);
424         size_t count = PAGE_CACHE_SIZE;
425         u64 attr_ver;
426         int err;
427
428         err = -EIO;
429         if (is_bad_inode(inode))
430                 goto out;
431
432         /*
433          * Page writeback can extend beyond the liftime of the
434          * page-cache page, so make sure we read a properly synced
435          * page.
436          */
437         fuse_wait_on_page_writeback(inode, page->index);
438
439         req = fuse_get_req(fc);
440         err = PTR_ERR(req);
441         if (IS_ERR(req))
442                 goto out;
443
444         attr_ver = fuse_get_attr_version(fc);
445
446         req->out.page_zeroing = 1;
447         req->num_pages = 1;
448         req->pages[0] = page;
449         num_read = fuse_send_read(req, file, inode, pos, count, NULL);
450         err = req->out.h.error;
451         fuse_put_request(fc, req);
452
453         if (!err) {
454                 /*
455                  * Short read means EOF.  If file size is larger, truncate it
456                  */
457                 if (num_read < count)
458                         fuse_read_update_size(inode, pos + num_read, attr_ver);
459
460                 SetPageUptodate(page);
461         }
462
463         fuse_invalidate_attr(inode); /* atime changed */
464  out:
465         unlock_page(page);
466         return err;
467 }
468
469 static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req)
470 {
471         int i;
472         size_t count = req->misc.read.in.size;
473         size_t num_read = req->out.args[0].size;
474         struct inode *inode = req->pages[0]->mapping->host;
475
476         /*
477          * Short read means EOF.  If file size is larger, truncate it
478          */
479         if (!req->out.h.error && num_read < count) {
480                 loff_t pos = page_offset(req->pages[0]) + num_read;
481                 fuse_read_update_size(inode, pos, req->misc.read.attr_ver);
482         }
483
484         fuse_invalidate_attr(inode); /* atime changed */
485
486         for (i = 0; i < req->num_pages; i++) {
487                 struct page *page = req->pages[i];
488                 if (!req->out.h.error)
489                         SetPageUptodate(page);
490                 else
491                         SetPageError(page);
492                 unlock_page(page);
493         }
494         if (req->ff)
495                 fuse_file_put(req->ff);
496         fuse_put_request(fc, req);
497 }
498
499 static void fuse_send_readpages(struct fuse_req *req, struct file *file,
500                                 struct inode *inode)
501 {
502         struct fuse_conn *fc = get_fuse_conn(inode);
503         loff_t pos = page_offset(req->pages[0]);
504         size_t count = req->num_pages << PAGE_CACHE_SHIFT;
505         req->out.page_zeroing = 1;
506         fuse_read_fill(req, file, inode, pos, count, FUSE_READ);
507         req->misc.read.attr_ver = fuse_get_attr_version(fc);
508         if (fc->async_read) {
509                 struct fuse_file *ff = file->private_data;
510                 req->ff = fuse_file_get(ff);
511                 req->end = fuse_readpages_end;
512                 request_send_background(fc, req);
513         } else {
514                 request_send(fc, req);
515                 fuse_readpages_end(fc, req);
516         }
517 }
518
519 struct fuse_fill_data {
520         struct fuse_req *req;
521         struct file *file;
522         struct inode *inode;
523 };
524
525 static int fuse_readpages_fill(void *_data, struct page *page)
526 {
527         struct fuse_fill_data *data = _data;
528         struct fuse_req *req = data->req;
529         struct inode *inode = data->inode;
530         struct fuse_conn *fc = get_fuse_conn(inode);
531
532         fuse_wait_on_page_writeback(inode, page->index);
533
534         if (req->num_pages &&
535             (req->num_pages == FUSE_MAX_PAGES_PER_REQ ||
536              (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_read ||
537              req->pages[req->num_pages - 1]->index + 1 != page->index)) {
538                 fuse_send_readpages(req, data->file, inode);
539                 data->req = req = fuse_get_req(fc);
540                 if (IS_ERR(req)) {
541                         unlock_page(page);
542                         return PTR_ERR(req);
543                 }
544         }
545         req->pages[req->num_pages] = page;
546         req->num_pages++;
547         return 0;
548 }
549
550 static int fuse_readpages(struct file *file, struct address_space *mapping,
551                           struct list_head *pages, unsigned nr_pages)
552 {
553         struct inode *inode = mapping->host;
554         struct fuse_conn *fc = get_fuse_conn(inode);
555         struct fuse_fill_data data;
556         int err;
557
558         err = -EIO;
559         if (is_bad_inode(inode))
560                 goto out;
561
562         data.file = file;
563         data.inode = inode;
564         data.req = fuse_get_req(fc);
565         err = PTR_ERR(data.req);
566         if (IS_ERR(data.req))
567                 goto out;
568
569         err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data);
570         if (!err) {
571                 if (data.req->num_pages)
572                         fuse_send_readpages(data.req, file, inode);
573                 else
574                         fuse_put_request(fc, data.req);
575         }
576 out:
577         return err;
578 }
579
580 static ssize_t fuse_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
581                                   unsigned long nr_segs, loff_t pos)
582 {
583         struct inode *inode = iocb->ki_filp->f_mapping->host;
584
585         if (pos + iov_length(iov, nr_segs) > i_size_read(inode)) {
586                 int err;
587                 /*
588                  * If trying to read past EOF, make sure the i_size
589                  * attribute is up-to-date.
590                  */
591                 err = fuse_update_attributes(inode, NULL, iocb->ki_filp, NULL);
592                 if (err)
593                         return err;
594         }
595
596         return generic_file_aio_read(iocb, iov, nr_segs, pos);
597 }
598
599 static void fuse_write_fill(struct fuse_req *req, struct file *file,
600                             struct fuse_file *ff, struct inode *inode,
601                             loff_t pos, size_t count, int writepage)
602 {
603         struct fuse_conn *fc = get_fuse_conn(inode);
604         struct fuse_write_in *inarg = &req->misc.write.in;
605         struct fuse_write_out *outarg = &req->misc.write.out;
606
607         memset(inarg, 0, sizeof(struct fuse_write_in));
608         inarg->fh = ff->fh;
609         inarg->offset = pos;
610         inarg->size = count;
611         inarg->write_flags = writepage ? FUSE_WRITE_CACHE : 0;
612         inarg->flags = file ? file->f_flags : 0;
613         req->in.h.opcode = FUSE_WRITE;
614         req->in.h.nodeid = get_node_id(inode);
615         req->in.argpages = 1;
616         req->in.numargs = 2;
617         if (fc->minor < 9)
618                 req->in.args[0].size = FUSE_COMPAT_WRITE_IN_SIZE;
619         else
620                 req->in.args[0].size = sizeof(struct fuse_write_in);
621         req->in.args[0].value = inarg;
622         req->in.args[1].size = count;
623         req->out.numargs = 1;
624         req->out.args[0].size = sizeof(struct fuse_write_out);
625         req->out.args[0].value = outarg;
626 }
627
628 static size_t fuse_send_write(struct fuse_req *req, struct file *file,
629                               struct inode *inode, loff_t pos, size_t count,
630                               fl_owner_t owner)
631 {
632         struct fuse_conn *fc = get_fuse_conn(inode);
633         fuse_write_fill(req, file, file->private_data, inode, pos, count, 0);
634         if (owner != NULL) {
635                 struct fuse_write_in *inarg = &req->misc.write.in;
636                 inarg->write_flags |= FUSE_WRITE_LOCKOWNER;
637                 inarg->lock_owner = fuse_lock_owner_id(fc, owner);
638         }
639         request_send(fc, req);
640         return req->misc.write.out.size;
641 }
642
643 static int fuse_write_begin(struct file *file, struct address_space *mapping,
644                         loff_t pos, unsigned len, unsigned flags,
645                         struct page **pagep, void **fsdata)
646 {
647         pgoff_t index = pos >> PAGE_CACHE_SHIFT;
648
649         *pagep = __grab_cache_page(mapping, index);
650         if (!*pagep)
651                 return -ENOMEM;
652         return 0;
653 }
654
655 static void fuse_write_update_size(struct inode *inode, loff_t pos)
656 {
657         struct fuse_conn *fc = get_fuse_conn(inode);
658         struct fuse_inode *fi = get_fuse_inode(inode);
659
660         spin_lock(&fc->lock);
661         fi->attr_version = ++fc->attr_version;
662         if (pos > inode->i_size)
663                 i_size_write(inode, pos);
664         spin_unlock(&fc->lock);
665 }
666
667 static int fuse_buffered_write(struct file *file, struct inode *inode,
668                                loff_t pos, unsigned count, struct page *page)
669 {
670         int err;
671         size_t nres;
672         struct fuse_conn *fc = get_fuse_conn(inode);
673         unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
674         struct fuse_req *req;
675
676         if (is_bad_inode(inode))
677                 return -EIO;
678
679         /*
680          * Make sure writepages on the same page are not mixed up with
681          * plain writes.
682          */
683         fuse_wait_on_page_writeback(inode, page->index);
684
685         req = fuse_get_req(fc);
686         if (IS_ERR(req))
687                 return PTR_ERR(req);
688
689         req->num_pages = 1;
690         req->pages[0] = page;
691         req->page_offset = offset;
692         nres = fuse_send_write(req, file, inode, pos, count, NULL);
693         err = req->out.h.error;
694         fuse_put_request(fc, req);
695         if (!err && !nres)
696                 err = -EIO;
697         if (!err) {
698                 pos += nres;
699                 fuse_write_update_size(inode, pos);
700                 if (count == PAGE_CACHE_SIZE)
701                         SetPageUptodate(page);
702         }
703         fuse_invalidate_attr(inode);
704         return err ? err : nres;
705 }
706
707 static int fuse_write_end(struct file *file, struct address_space *mapping,
708                         loff_t pos, unsigned len, unsigned copied,
709                         struct page *page, void *fsdata)
710 {
711         struct inode *inode = mapping->host;
712         int res = 0;
713
714         if (copied)
715                 res = fuse_buffered_write(file, inode, pos, copied, page);
716
717         unlock_page(page);
718         page_cache_release(page);
719         return res;
720 }
721
722 static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file,
723                                     struct inode *inode, loff_t pos,
724                                     size_t count)
725 {
726         size_t res;
727         unsigned offset;
728         unsigned i;
729
730         for (i = 0; i < req->num_pages; i++)
731                 fuse_wait_on_page_writeback(inode, req->pages[i]->index);
732
733         res = fuse_send_write(req, file, inode, pos, count, NULL);
734
735         offset = req->page_offset;
736         count = res;
737         for (i = 0; i < req->num_pages; i++) {
738                 struct page *page = req->pages[i];
739
740                 if (!req->out.h.error && !offset && count >= PAGE_CACHE_SIZE)
741                         SetPageUptodate(page);
742
743                 if (count > PAGE_CACHE_SIZE - offset)
744                         count -= PAGE_CACHE_SIZE - offset;
745                 else
746                         count = 0;
747                 offset = 0;
748
749                 unlock_page(page);
750                 page_cache_release(page);
751         }
752
753         return res;
754 }
755
756 static ssize_t fuse_fill_write_pages(struct fuse_req *req,
757                                struct address_space *mapping,
758                                struct iov_iter *ii, loff_t pos)
759 {
760         struct fuse_conn *fc = get_fuse_conn(mapping->host);
761         unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
762         size_t count = 0;
763         int err;
764
765         req->page_offset = offset;
766
767         do {
768                 size_t tmp;
769                 struct page *page;
770                 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
771                 size_t bytes = min_t(size_t, PAGE_CACHE_SIZE - offset,
772                                      iov_iter_count(ii));
773
774                 bytes = min_t(size_t, bytes, fc->max_write - count);
775
776  again:
777                 err = -EFAULT;
778                 if (iov_iter_fault_in_readable(ii, bytes))
779                         break;
780
781                 err = -ENOMEM;
782                 page = __grab_cache_page(mapping, index);
783                 if (!page)
784                         break;
785
786                 pagefault_disable();
787                 tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes);
788                 pagefault_enable();
789                 flush_dcache_page(page);
790
791                 if (!tmp) {
792                         unlock_page(page);
793                         page_cache_release(page);
794                         bytes = min(bytes, iov_iter_single_seg_count(ii));
795                         goto again;
796                 }
797
798                 err = 0;
799                 req->pages[req->num_pages] = page;
800                 req->num_pages++;
801
802                 iov_iter_advance(ii, tmp);
803                 count += tmp;
804                 pos += tmp;
805                 offset += tmp;
806                 if (offset == PAGE_CACHE_SIZE)
807                         offset = 0;
808
809                 if (!fc->big_writes)
810                         break;
811         } while (iov_iter_count(ii) && count < fc->max_write &&
812                  req->num_pages < FUSE_MAX_PAGES_PER_REQ && offset == 0);
813
814         return count > 0 ? count : err;
815 }
816
817 static ssize_t fuse_perform_write(struct file *file,
818                                   struct address_space *mapping,
819                                   struct iov_iter *ii, loff_t pos)
820 {
821         struct inode *inode = mapping->host;
822         struct fuse_conn *fc = get_fuse_conn(inode);
823         int err = 0;
824         ssize_t res = 0;
825
826         if (is_bad_inode(inode))
827                 return -EIO;
828
829         do {
830                 struct fuse_req *req;
831                 ssize_t count;
832
833                 req = fuse_get_req(fc);
834                 if (IS_ERR(req)) {
835                         err = PTR_ERR(req);
836                         break;
837                 }
838
839                 count = fuse_fill_write_pages(req, mapping, ii, pos);
840                 if (count <= 0) {
841                         err = count;
842                 } else {
843                         size_t num_written;
844
845                         num_written = fuse_send_write_pages(req, file, inode,
846                                                             pos, count);
847                         err = req->out.h.error;
848                         if (!err) {
849                                 res += num_written;
850                                 pos += num_written;
851
852                                 /* break out of the loop on short write */
853                                 if (num_written != count)
854                                         err = -EIO;
855                         }
856                 }
857                 fuse_put_request(fc, req);
858         } while (!err && iov_iter_count(ii));
859
860         if (res > 0)
861                 fuse_write_update_size(inode, pos);
862
863         fuse_invalidate_attr(inode);
864
865         return res > 0 ? res : err;
866 }
867
868 static ssize_t fuse_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
869                                    unsigned long nr_segs, loff_t pos)
870 {
871         struct file *file = iocb->ki_filp;
872         struct address_space *mapping = file->f_mapping;
873         size_t count = 0;
874         ssize_t written = 0;
875         struct inode *inode = mapping->host;
876         ssize_t err;
877         struct iov_iter i;
878
879         WARN_ON(iocb->ki_pos != pos);
880
881         err = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ);
882         if (err)
883                 return err;
884
885         mutex_lock(&inode->i_mutex);
886         vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
887
888         /* We can write back this queue in page reclaim */
889         current->backing_dev_info = mapping->backing_dev_info;
890
891         err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
892         if (err)
893                 goto out;
894
895         if (count == 0)
896                 goto out;
897
898         err = file_remove_suid(file);
899         if (err)
900                 goto out;
901
902         file_update_time(file);
903
904         iov_iter_init(&i, iov, nr_segs, count, 0);
905         written = fuse_perform_write(file, mapping, &i, pos);
906         if (written >= 0)
907                 iocb->ki_pos = pos + written;
908
909 out:
910         current->backing_dev_info = NULL;
911         mutex_unlock(&inode->i_mutex);
912
913         return written ? written : err;
914 }
915
916 static void fuse_release_user_pages(struct fuse_req *req, int write)
917 {
918         unsigned i;
919
920         for (i = 0; i < req->num_pages; i++) {
921                 struct page *page = req->pages[i];
922                 if (write)
923                         set_page_dirty_lock(page);
924                 put_page(page);
925         }
926 }
927
928 static int fuse_get_user_pages(struct fuse_req *req, const char __user *buf,
929                                unsigned nbytes, int write)
930 {
931         unsigned long user_addr = (unsigned long) buf;
932         unsigned offset = user_addr & ~PAGE_MASK;
933         int npages;
934
935         /* This doesn't work with nfsd */
936         if (!current->mm)
937                 return -EPERM;
938
939         nbytes = min(nbytes, (unsigned) FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT);
940         npages = (nbytes + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
941         npages = clamp(npages, 1, FUSE_MAX_PAGES_PER_REQ);
942         down_read(&current->mm->mmap_sem);
943         npages = get_user_pages(current, current->mm, user_addr, npages, write,
944                                 0, req->pages, NULL);
945         up_read(&current->mm->mmap_sem);
946         if (npages < 0)
947                 return npages;
948
949         req->num_pages = npages;
950         req->page_offset = offset;
951         return 0;
952 }
953
954 static ssize_t fuse_direct_io(struct file *file, const char __user *buf,
955                               size_t count, loff_t *ppos, int write)
956 {
957         struct inode *inode = file->f_path.dentry->d_inode;
958         struct fuse_conn *fc = get_fuse_conn(inode);
959         size_t nmax = write ? fc->max_write : fc->max_read;
960         loff_t pos = *ppos;
961         ssize_t res = 0;
962         struct fuse_req *req;
963
964         if (is_bad_inode(inode))
965                 return -EIO;
966
967         req = fuse_get_req(fc);
968         if (IS_ERR(req))
969                 return PTR_ERR(req);
970
971         while (count) {
972                 size_t nres;
973                 size_t nbytes_limit = min(count, nmax);
974                 size_t nbytes;
975                 int err = fuse_get_user_pages(req, buf, nbytes_limit, !write);
976                 if (err) {
977                         res = err;
978                         break;
979                 }
980                 nbytes = (req->num_pages << PAGE_SHIFT) - req->page_offset;
981                 nbytes = min(nbytes_limit, nbytes);
982                 if (write)
983                         nres = fuse_send_write(req, file, inode, pos, nbytes,
984                                                current->files);
985                 else
986                         nres = fuse_send_read(req, file, inode, pos, nbytes,
987                                               current->files);
988                 fuse_release_user_pages(req, !write);
989                 if (req->out.h.error) {
990                         if (!res)
991                                 res = req->out.h.error;
992                         break;
993                 } else if (nres > nbytes) {
994                         res = -EIO;
995                         break;
996                 }
997                 count -= nres;
998                 res += nres;
999                 pos += nres;
1000                 buf += nres;
1001                 if (nres != nbytes)
1002                         break;
1003                 if (count) {
1004                         fuse_put_request(fc, req);
1005                         req = fuse_get_req(fc);
1006                         if (IS_ERR(req))
1007                                 break;
1008                 }
1009         }
1010         fuse_put_request(fc, req);
1011         if (res > 0) {
1012                 if (write)
1013                         fuse_write_update_size(inode, pos);
1014                 *ppos = pos;
1015         }
1016         fuse_invalidate_attr(inode);
1017
1018         return res;
1019 }
1020
1021 static ssize_t fuse_direct_read(struct file *file, char __user *buf,
1022                                      size_t count, loff_t *ppos)
1023 {
1024         return fuse_direct_io(file, buf, count, ppos, 0);
1025 }
1026
1027 static ssize_t fuse_direct_write(struct file *file, const char __user *buf,
1028                                  size_t count, loff_t *ppos)
1029 {
1030         struct inode *inode = file->f_path.dentry->d_inode;
1031         ssize_t res;
1032         /* Don't allow parallel writes to the same file */
1033         mutex_lock(&inode->i_mutex);
1034         res = generic_write_checks(file, ppos, &count, 0);
1035         if (!res)
1036                 res = fuse_direct_io(file, buf, count, ppos, 1);
1037         mutex_unlock(&inode->i_mutex);
1038         return res;
1039 }
1040
1041 static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req)
1042 {
1043         __free_page(req->pages[0]);
1044         fuse_file_put(req->ff);
1045         fuse_put_request(fc, req);
1046 }
1047
1048 static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
1049 {
1050         struct inode *inode = req->inode;
1051         struct fuse_inode *fi = get_fuse_inode(inode);
1052         struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info;
1053
1054         list_del(&req->writepages_entry);
1055         dec_bdi_stat(bdi, BDI_WRITEBACK);
1056         dec_zone_page_state(req->pages[0], NR_WRITEBACK_TEMP);
1057         bdi_writeout_inc(bdi);
1058         wake_up(&fi->page_waitq);
1059 }
1060
1061 /* Called under fc->lock, may release and reacquire it */
1062 static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req)
1063 {
1064         struct fuse_inode *fi = get_fuse_inode(req->inode);
1065         loff_t size = i_size_read(req->inode);
1066         struct fuse_write_in *inarg = &req->misc.write.in;
1067
1068         if (!fc->connected)
1069                 goto out_free;
1070
1071         if (inarg->offset + PAGE_CACHE_SIZE <= size) {
1072                 inarg->size = PAGE_CACHE_SIZE;
1073         } else if (inarg->offset < size) {
1074                 inarg->size = size & (PAGE_CACHE_SIZE - 1);
1075         } else {
1076                 /* Got truncated off completely */
1077                 goto out_free;
1078         }
1079
1080         req->in.args[1].size = inarg->size;
1081         fi->writectr++;
1082         request_send_background_locked(fc, req);
1083         return;
1084
1085  out_free:
1086         fuse_writepage_finish(fc, req);
1087         spin_unlock(&fc->lock);
1088         fuse_writepage_free(fc, req);
1089         spin_lock(&fc->lock);
1090 }
1091
1092 /*
1093  * If fi->writectr is positive (no truncate or fsync going on) send
1094  * all queued writepage requests.
1095  *
1096  * Called with fc->lock
1097  */
1098 void fuse_flush_writepages(struct inode *inode)
1099 {
1100         struct fuse_conn *fc = get_fuse_conn(inode);
1101         struct fuse_inode *fi = get_fuse_inode(inode);
1102         struct fuse_req *req;
1103
1104         while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) {
1105                 req = list_entry(fi->queued_writes.next, struct fuse_req, list);
1106                 list_del_init(&req->list);
1107                 fuse_send_writepage(fc, req);
1108         }
1109 }
1110
1111 static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_req *req)
1112 {
1113         struct inode *inode = req->inode;
1114         struct fuse_inode *fi = get_fuse_inode(inode);
1115
1116         mapping_set_error(inode->i_mapping, req->out.h.error);
1117         spin_lock(&fc->lock);
1118         fi->writectr--;
1119         fuse_writepage_finish(fc, req);
1120         spin_unlock(&fc->lock);
1121         fuse_writepage_free(fc, req);
1122 }
1123
1124 static int fuse_writepage_locked(struct page *page)
1125 {
1126         struct address_space *mapping = page->mapping;
1127         struct inode *inode = mapping->host;
1128         struct fuse_conn *fc = get_fuse_conn(inode);
1129         struct fuse_inode *fi = get_fuse_inode(inode);
1130         struct fuse_req *req;
1131         struct fuse_file *ff;
1132         struct page *tmp_page;
1133
1134         set_page_writeback(page);
1135
1136         req = fuse_request_alloc_nofs();
1137         if (!req)
1138                 goto err;
1139
1140         tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1141         if (!tmp_page)
1142                 goto err_free;
1143
1144         spin_lock(&fc->lock);
1145         BUG_ON(list_empty(&fi->write_files));
1146         ff = list_entry(fi->write_files.next, struct fuse_file, write_entry);
1147         req->ff = fuse_file_get(ff);
1148         spin_unlock(&fc->lock);
1149
1150         fuse_write_fill(req, NULL, ff, inode, page_offset(page), 0, 1);
1151
1152         copy_highpage(tmp_page, page);
1153         req->num_pages = 1;
1154         req->pages[0] = tmp_page;
1155         req->page_offset = 0;
1156         req->end = fuse_writepage_end;
1157         req->inode = inode;
1158
1159         inc_bdi_stat(mapping->backing_dev_info, BDI_WRITEBACK);
1160         inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP);
1161         end_page_writeback(page);
1162
1163         spin_lock(&fc->lock);
1164         list_add(&req->writepages_entry, &fi->writepages);
1165         list_add_tail(&req->list, &fi->queued_writes);
1166         fuse_flush_writepages(inode);
1167         spin_unlock(&fc->lock);
1168
1169         return 0;
1170
1171 err_free:
1172         fuse_request_free(req);
1173 err:
1174         end_page_writeback(page);
1175         return -ENOMEM;
1176 }
1177
1178 static int fuse_writepage(struct page *page, struct writeback_control *wbc)
1179 {
1180         int err;
1181
1182         err = fuse_writepage_locked(page);
1183         unlock_page(page);
1184
1185         return err;
1186 }
1187
1188 static int fuse_launder_page(struct page *page)
1189 {
1190         int err = 0;
1191         if (clear_page_dirty_for_io(page)) {
1192                 struct inode *inode = page->mapping->host;
1193                 err = fuse_writepage_locked(page);
1194                 if (!err)
1195                         fuse_wait_on_page_writeback(inode, page->index);
1196         }
1197         return err;
1198 }
1199
1200 /*
1201  * Write back dirty pages now, because there may not be any suitable
1202  * open files later
1203  */
1204 static void fuse_vma_close(struct vm_area_struct *vma)
1205 {
1206         filemap_write_and_wait(vma->vm_file->f_mapping);
1207 }
1208
1209 /*
1210  * Wait for writeback against this page to complete before allowing it
1211  * to be marked dirty again, and hence written back again, possibly
1212  * before the previous writepage completed.
1213  *
1214  * Block here, instead of in ->writepage(), so that the userspace fs
1215  * can only block processes actually operating on the filesystem.
1216  *
1217  * Otherwise unprivileged userspace fs would be able to block
1218  * unrelated:
1219  *
1220  * - page migration
1221  * - sync(2)
1222  * - try_to_free_pages() with order > PAGE_ALLOC_COSTLY_ORDER
1223  */
1224 static int fuse_page_mkwrite(struct vm_area_struct *vma, struct page *page)
1225 {
1226         /*
1227          * Don't use page->mapping as it may become NULL from a
1228          * concurrent truncate.
1229          */
1230         struct inode *inode = vma->vm_file->f_mapping->host;
1231
1232         fuse_wait_on_page_writeback(inode, page->index);
1233         return 0;
1234 }
1235
1236 static struct vm_operations_struct fuse_file_vm_ops = {
1237         .close          = fuse_vma_close,
1238         .fault          = filemap_fault,
1239         .page_mkwrite   = fuse_page_mkwrite,
1240 };
1241
1242 static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
1243 {
1244         if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) {
1245                 struct inode *inode = file->f_dentry->d_inode;
1246                 struct fuse_conn *fc = get_fuse_conn(inode);
1247                 struct fuse_inode *fi = get_fuse_inode(inode);
1248                 struct fuse_file *ff = file->private_data;
1249                 /*
1250                  * file may be written through mmap, so chain it onto the
1251                  * inodes's write_file list
1252                  */
1253                 spin_lock(&fc->lock);
1254                 if (list_empty(&ff->write_entry))
1255                         list_add(&ff->write_entry, &fi->write_files);
1256                 spin_unlock(&fc->lock);
1257         }
1258         file_accessed(file);
1259         vma->vm_ops = &fuse_file_vm_ops;
1260         return 0;
1261 }
1262
1263 static int convert_fuse_file_lock(const struct fuse_file_lock *ffl,
1264                                   struct file_lock *fl)
1265 {
1266         switch (ffl->type) {
1267         case F_UNLCK:
1268                 break;
1269
1270         case F_RDLCK:
1271         case F_WRLCK:
1272                 if (ffl->start > OFFSET_MAX || ffl->end > OFFSET_MAX ||
1273                     ffl->end < ffl->start)
1274                         return -EIO;
1275
1276                 fl->fl_start = ffl->start;
1277                 fl->fl_end = ffl->end;
1278                 fl->fl_pid = ffl->pid;
1279                 break;
1280
1281         default:
1282                 return -EIO;
1283         }
1284         fl->fl_type = ffl->type;
1285         return 0;
1286 }
1287
1288 static void fuse_lk_fill(struct fuse_req *req, struct file *file,
1289                          const struct file_lock *fl, int opcode, pid_t pid,
1290                          int flock)
1291 {
1292         struct inode *inode = file->f_path.dentry->d_inode;
1293         struct fuse_conn *fc = get_fuse_conn(inode);
1294         struct fuse_file *ff = file->private_data;
1295         struct fuse_lk_in *arg = &req->misc.lk_in;
1296
1297         arg->fh = ff->fh;
1298         arg->owner = fuse_lock_owner_id(fc, fl->fl_owner);
1299         arg->lk.start = fl->fl_start;
1300         arg->lk.end = fl->fl_end;
1301         arg->lk.type = fl->fl_type;
1302         arg->lk.pid = pid;
1303         if (flock)
1304                 arg->lk_flags |= FUSE_LK_FLOCK;
1305         req->in.h.opcode = opcode;
1306         req->in.h.nodeid = get_node_id(inode);
1307         req->in.numargs = 1;
1308         req->in.args[0].size = sizeof(*arg);
1309         req->in.args[0].value = arg;
1310 }
1311
1312 static int fuse_getlk(struct file *file, struct file_lock *fl)
1313 {
1314         struct inode *inode = file->f_path.dentry->d_inode;
1315         struct fuse_conn *fc = get_fuse_conn(inode);
1316         struct fuse_req *req;
1317         struct fuse_lk_out outarg;
1318         int err;
1319
1320         req = fuse_get_req(fc);
1321         if (IS_ERR(req))
1322                 return PTR_ERR(req);
1323
1324         fuse_lk_fill(req, file, fl, FUSE_GETLK, 0, 0);
1325         req->out.numargs = 1;
1326         req->out.args[0].size = sizeof(outarg);
1327         req->out.args[0].value = &outarg;
1328         request_send(fc, req);
1329         err = req->out.h.error;
1330         fuse_put_request(fc, req);
1331         if (!err)
1332                 err = convert_fuse_file_lock(&outarg.lk, fl);
1333
1334         return err;
1335 }
1336
1337 static int fuse_setlk(struct file *file, struct file_lock *fl, int flock)
1338 {
1339         struct inode *inode = file->f_path.dentry->d_inode;
1340         struct fuse_conn *fc = get_fuse_conn(inode);
1341         struct fuse_req *req;
1342         int opcode = (fl->fl_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK;
1343         pid_t pid = fl->fl_type != F_UNLCK ? current->tgid : 0;
1344         int err;
1345
1346         if (fl->fl_lmops && fl->fl_lmops->fl_grant) {
1347                 /* NLM needs asynchronous locks, which we don't support yet */
1348                 return -ENOLCK;
1349         }
1350
1351         /* Unlock on close is handled by the flush method */
1352         if (fl->fl_flags & FL_CLOSE)
1353                 return 0;
1354
1355         req = fuse_get_req(fc);
1356         if (IS_ERR(req))
1357                 return PTR_ERR(req);
1358
1359         fuse_lk_fill(req, file, fl, opcode, pid, flock);
1360         request_send(fc, req);
1361         err = req->out.h.error;
1362         /* locking is restartable */
1363         if (err == -EINTR)
1364                 err = -ERESTARTSYS;
1365         fuse_put_request(fc, req);
1366         return err;
1367 }
1368
1369 static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl)
1370 {
1371         struct inode *inode = file->f_path.dentry->d_inode;
1372         struct fuse_conn *fc = get_fuse_conn(inode);
1373         int err;
1374
1375         if (cmd == F_CANCELLK) {
1376                 err = 0;
1377         } else if (cmd == F_GETLK) {
1378                 if (fc->no_lock) {
1379                         posix_test_lock(file, fl);
1380                         err = 0;
1381                 } else
1382                         err = fuse_getlk(file, fl);
1383         } else {
1384                 if (fc->no_lock)
1385                         err = posix_lock_file(file, fl, NULL);
1386                 else
1387                         err = fuse_setlk(file, fl, 0);
1388         }
1389         return err;
1390 }
1391
1392 static int fuse_file_flock(struct file *file, int cmd, struct file_lock *fl)
1393 {
1394         struct inode *inode = file->f_path.dentry->d_inode;
1395         struct fuse_conn *fc = get_fuse_conn(inode);
1396         int err;
1397
1398         if (fc->no_lock) {
1399                 err = flock_lock_file_wait(file, fl);
1400         } else {
1401                 /* emulate flock with POSIX locks */
1402                 fl->fl_owner = (fl_owner_t) file;
1403                 err = fuse_setlk(file, fl, 1);
1404         }
1405
1406         return err;
1407 }
1408
1409 static sector_t fuse_bmap(struct address_space *mapping, sector_t block)
1410 {
1411         struct inode *inode = mapping->host;
1412         struct fuse_conn *fc = get_fuse_conn(inode);
1413         struct fuse_req *req;
1414         struct fuse_bmap_in inarg;
1415         struct fuse_bmap_out outarg;
1416         int err;
1417
1418         if (!inode->i_sb->s_bdev || fc->no_bmap)
1419                 return 0;
1420
1421         req = fuse_get_req(fc);
1422         if (IS_ERR(req))
1423                 return 0;
1424
1425         memset(&inarg, 0, sizeof(inarg));
1426         inarg.block = block;
1427         inarg.blocksize = inode->i_sb->s_blocksize;
1428         req->in.h.opcode = FUSE_BMAP;
1429         req->in.h.nodeid = get_node_id(inode);
1430         req->in.numargs = 1;
1431         req->in.args[0].size = sizeof(inarg);
1432         req->in.args[0].value = &inarg;
1433         req->out.numargs = 1;
1434         req->out.args[0].size = sizeof(outarg);
1435         req->out.args[0].value = &outarg;
1436         request_send(fc, req);
1437         err = req->out.h.error;
1438         fuse_put_request(fc, req);
1439         if (err == -ENOSYS)
1440                 fc->no_bmap = 1;
1441
1442         return err ? 0 : outarg.block;
1443 }
1444
1445 static loff_t fuse_file_llseek(struct file *file, loff_t offset, int origin)
1446 {
1447         loff_t retval;
1448         struct inode *inode = file->f_path.dentry->d_inode;
1449
1450         mutex_lock(&inode->i_mutex);
1451         switch (origin) {
1452         case SEEK_END:
1453                 retval = fuse_update_attributes(inode, NULL, file, NULL);
1454                 if (retval)
1455                         return retval;
1456                 offset += i_size_read(inode);
1457                 break;
1458         case SEEK_CUR:
1459                 offset += file->f_pos;
1460         }
1461         retval = -EINVAL;
1462         if (offset >= 0 && offset <= inode->i_sb->s_maxbytes) {
1463                 if (offset != file->f_pos) {
1464                         file->f_pos = offset;
1465                         file->f_version = 0;
1466                 }
1467                 retval = offset;
1468         }
1469         mutex_unlock(&inode->i_mutex);
1470         return retval;
1471 }
1472
1473 static const struct file_operations fuse_file_operations = {
1474         .llseek         = fuse_file_llseek,
1475         .read           = do_sync_read,
1476         .aio_read       = fuse_file_aio_read,
1477         .write          = do_sync_write,
1478         .aio_write      = fuse_file_aio_write,
1479         .mmap           = fuse_file_mmap,
1480         .open           = fuse_open,
1481         .flush          = fuse_flush,
1482         .release        = fuse_release,
1483         .fsync          = fuse_fsync,
1484         .lock           = fuse_file_lock,
1485         .flock          = fuse_file_flock,
1486         .splice_read    = generic_file_splice_read,
1487 };
1488
1489 static const struct file_operations fuse_direct_io_file_operations = {
1490         .llseek         = fuse_file_llseek,
1491         .read           = fuse_direct_read,
1492         .write          = fuse_direct_write,
1493         .open           = fuse_open,
1494         .flush          = fuse_flush,
1495         .release        = fuse_release,
1496         .fsync          = fuse_fsync,
1497         .lock           = fuse_file_lock,
1498         .flock          = fuse_file_flock,
1499         /* no mmap and splice_read */
1500 };
1501
1502 static const struct address_space_operations fuse_file_aops  = {
1503         .readpage       = fuse_readpage,
1504         .writepage      = fuse_writepage,
1505         .launder_page   = fuse_launder_page,
1506         .write_begin    = fuse_write_begin,
1507         .write_end      = fuse_write_end,
1508         .readpages      = fuse_readpages,
1509         .set_page_dirty = __set_page_dirty_nobuffers,
1510         .bmap           = fuse_bmap,
1511 };
1512
1513 void fuse_init_file_inode(struct inode *inode)
1514 {
1515         inode->i_fop = &fuse_file_operations;
1516         inode->i_data.a_ops = &fuse_file_aops;
1517 }