2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2006 Miklos Szeredi <miklos@szeredi.hu>
5 This program can be distributed under the terms of the GNU GPL.
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/poll.h>
14 #include <linux/uio.h>
15 #include <linux/miscdevice.h>
16 #include <linux/pagemap.h>
17 #include <linux/file.h>
18 #include <linux/slab.h>
20 MODULE_ALIAS_MISCDEV(FUSE_MINOR);
22 static struct kmem_cache *fuse_req_cachep;
24 static struct fuse_conn *fuse_get_conn(struct file *file)
27 * Lockless access is OK, because file->private data is set
28 * once during mount and is valid until the file is released.
30 return file->private_data;
33 static void fuse_request_init(struct fuse_req *req)
35 memset(req, 0, sizeof(*req));
36 INIT_LIST_HEAD(&req->list);
37 INIT_LIST_HEAD(&req->intr_entry);
38 init_waitqueue_head(&req->waitq);
39 atomic_set(&req->count, 1);
42 struct fuse_req *fuse_request_alloc(void)
44 struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_KERNEL);
46 fuse_request_init(req);
50 struct fuse_req *fuse_request_alloc_nofs(void)
52 struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_NOFS);
54 fuse_request_init(req);
58 void fuse_request_free(struct fuse_req *req)
60 kmem_cache_free(fuse_req_cachep, req);
63 static void block_sigs(sigset_t *oldset)
67 siginitsetinv(&mask, sigmask(SIGKILL));
68 sigprocmask(SIG_BLOCK, &mask, oldset);
71 static void restore_sigs(sigset_t *oldset)
73 sigprocmask(SIG_SETMASK, oldset, NULL);
76 static void __fuse_get_request(struct fuse_req *req)
78 atomic_inc(&req->count);
81 /* Must be called with > 1 refcount */
82 static void __fuse_put_request(struct fuse_req *req)
84 BUG_ON(atomic_read(&req->count) < 2);
85 atomic_dec(&req->count);
88 static void fuse_req_init_context(struct fuse_req *req)
90 req->in.h.uid = current->fsuid;
91 req->in.h.gid = current->fsgid;
92 req->in.h.pid = current->pid;
95 struct fuse_req *fuse_get_req(struct fuse_conn *fc)
102 atomic_inc(&fc->num_waiting);
104 intr = wait_event_interruptible(fc->blocked_waitq, !fc->blocked);
105 restore_sigs(&oldset);
114 req = fuse_request_alloc();
119 fuse_req_init_context(req);
124 atomic_dec(&fc->num_waiting);
129 * Return request in fuse_file->reserved_req. However that may
130 * currently be in use. If that is the case, wait for it to become
133 static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
136 struct fuse_req *req = NULL;
137 struct fuse_file *ff = file->private_data;
140 wait_event(fc->reserved_req_waitq, ff->reserved_req);
141 spin_lock(&fc->lock);
142 if (ff->reserved_req) {
143 req = ff->reserved_req;
144 ff->reserved_req = NULL;
146 req->stolen_file = file;
148 spin_unlock(&fc->lock);
155 * Put stolen request back into fuse_file->reserved_req
157 static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
159 struct file *file = req->stolen_file;
160 struct fuse_file *ff = file->private_data;
162 spin_lock(&fc->lock);
163 fuse_request_init(req);
164 BUG_ON(ff->reserved_req);
165 ff->reserved_req = req;
166 wake_up_all(&fc->reserved_req_waitq);
167 spin_unlock(&fc->lock);
172 * Gets a requests for a file operation, always succeeds
174 * This is used for sending the FLUSH request, which must get to
175 * userspace, due to POSIX locks which may need to be unlocked.
177 * If allocation fails due to OOM, use the reserved request in
180 * This is very unlikely to deadlock accidentally, since the
181 * filesystem should not have it's own file open. If deadlock is
182 * intentional, it can still be broken by "aborting" the filesystem.
184 struct fuse_req *fuse_get_req_nofail(struct fuse_conn *fc, struct file *file)
186 struct fuse_req *req;
188 atomic_inc(&fc->num_waiting);
189 wait_event(fc->blocked_waitq, !fc->blocked);
190 req = fuse_request_alloc();
192 req = get_reserved_req(fc, file);
194 fuse_req_init_context(req);
199 void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
201 if (atomic_dec_and_test(&req->count)) {
203 atomic_dec(&fc->num_waiting);
205 if (req->stolen_file)
206 put_reserved_req(fc, req);
208 fuse_request_free(req);
212 static unsigned len_args(unsigned numargs, struct fuse_arg *args)
217 for (i = 0; i < numargs; i++)
218 nbytes += args[i].size;
223 static u64 fuse_get_unique(struct fuse_conn *fc)
226 /* zero is special */
233 static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
235 req->in.h.unique = fuse_get_unique(fc);
236 req->in.h.len = sizeof(struct fuse_in_header) +
237 len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
238 list_add_tail(&req->list, &fc->pending);
239 req->state = FUSE_REQ_PENDING;
242 atomic_inc(&fc->num_waiting);
245 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
248 static void flush_bg_queue(struct fuse_conn *fc)
250 while (fc->active_background < FUSE_MAX_BACKGROUND &&
251 !list_empty(&fc->bg_queue)) {
252 struct fuse_req *req;
254 req = list_entry(fc->bg_queue.next, struct fuse_req, list);
255 list_del(&req->list);
256 fc->active_background++;
257 queue_request(fc, req);
262 * This function is called when a request is finished. Either a reply
263 * has arrived or it was aborted (and not yet sent) or some error
264 * occurred during communication with userspace, or the device file
265 * was closed. The requester thread is woken up (if still waiting),
266 * the 'end' callback is called if given, else the reference to the
267 * request is released
269 * Called with fc->lock, unlocks it
271 static void request_end(struct fuse_conn *fc, struct fuse_req *req)
274 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
276 list_del(&req->list);
277 list_del(&req->intr_entry);
278 req->state = FUSE_REQ_FINISHED;
279 if (req->background) {
280 if (fc->num_background == FUSE_MAX_BACKGROUND) {
282 wake_up_all(&fc->blocked_waitq);
284 if (fc->num_background == FUSE_CONGESTION_THRESHOLD) {
285 clear_bdi_congested(&fc->bdi, READ);
286 clear_bdi_congested(&fc->bdi, WRITE);
288 fc->num_background--;
289 fc->active_background--;
292 spin_unlock(&fc->lock);
293 wake_up(&req->waitq);
297 fuse_put_request(fc, req);
300 static void wait_answer_interruptible(struct fuse_conn *fc,
301 struct fuse_req *req)
302 __releases(fc->lock) __acquires(fc->lock)
304 if (signal_pending(current))
307 spin_unlock(&fc->lock);
308 wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED);
309 spin_lock(&fc->lock);
312 static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req)
314 list_add_tail(&req->intr_entry, &fc->interrupts);
316 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
319 static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
320 __releases(fc->lock) __acquires(fc->lock)
322 if (!fc->no_interrupt) {
323 /* Any signal may interrupt this */
324 wait_answer_interruptible(fc, req);
328 if (req->state == FUSE_REQ_FINISHED)
331 req->interrupted = 1;
332 if (req->state == FUSE_REQ_SENT)
333 queue_interrupt(fc, req);
339 /* Only fatal signals may interrupt this */
341 wait_answer_interruptible(fc, req);
342 restore_sigs(&oldset);
346 if (req->state == FUSE_REQ_FINISHED)
349 /* Request is not yet in userspace, bail out */
350 if (req->state == FUSE_REQ_PENDING) {
351 list_del(&req->list);
352 __fuse_put_request(req);
353 req->out.h.error = -EINTR;
359 * Either request is already in userspace, or it was forced.
362 spin_unlock(&fc->lock);
363 wait_event(req->waitq, req->state == FUSE_REQ_FINISHED);
364 spin_lock(&fc->lock);
370 BUG_ON(req->state != FUSE_REQ_FINISHED);
372 /* This is uninterruptible sleep, because data is
373 being copied to/from the buffers of req. During
374 locked state, there mustn't be any filesystem
375 operation (e.g. page fault), since that could lead
377 spin_unlock(&fc->lock);
378 wait_event(req->waitq, !req->locked);
379 spin_lock(&fc->lock);
383 void request_send(struct fuse_conn *fc, struct fuse_req *req)
386 spin_lock(&fc->lock);
388 req->out.h.error = -ENOTCONN;
389 else if (fc->conn_error)
390 req->out.h.error = -ECONNREFUSED;
392 queue_request(fc, req);
393 /* acquire extra reference, since request is still needed
394 after request_end() */
395 __fuse_get_request(req);
397 request_wait_answer(fc, req);
399 spin_unlock(&fc->lock);
402 static void request_send_nowait_locked(struct fuse_conn *fc,
403 struct fuse_req *req)
406 fc->num_background++;
407 if (fc->num_background == FUSE_MAX_BACKGROUND)
409 if (fc->num_background == FUSE_CONGESTION_THRESHOLD) {
410 set_bdi_congested(&fc->bdi, READ);
411 set_bdi_congested(&fc->bdi, WRITE);
413 list_add_tail(&req->list, &fc->bg_queue);
417 static void request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
419 spin_lock(&fc->lock);
421 request_send_nowait_locked(fc, req);
422 spin_unlock(&fc->lock);
424 req->out.h.error = -ENOTCONN;
425 request_end(fc, req);
429 void request_send_noreply(struct fuse_conn *fc, struct fuse_req *req)
432 request_send_nowait(fc, req);
435 void request_send_background(struct fuse_conn *fc, struct fuse_req *req)
438 request_send_nowait(fc, req);
442 * Called under fc->lock
444 * fc->connected must have been checked previously
446 void request_send_background_locked(struct fuse_conn *fc, struct fuse_req *req)
449 request_send_nowait_locked(fc, req);
453 * Lock the request. Up to the next unlock_request() there mustn't be
454 * anything that could cause a page-fault. If the request was already
457 static int lock_request(struct fuse_conn *fc, struct fuse_req *req)
461 spin_lock(&fc->lock);
466 spin_unlock(&fc->lock);
472 * Unlock request. If it was aborted during being locked, the
473 * requester thread is currently waiting for it to be unlocked, so
476 static void unlock_request(struct fuse_conn *fc, struct fuse_req *req)
479 spin_lock(&fc->lock);
482 wake_up(&req->waitq);
483 spin_unlock(&fc->lock);
487 struct fuse_copy_state {
488 struct fuse_conn *fc;
490 struct fuse_req *req;
491 const struct iovec *iov;
492 unsigned long nr_segs;
493 unsigned long seglen;
501 static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc,
502 int write, struct fuse_req *req,
503 const struct iovec *iov, unsigned long nr_segs)
505 memset(cs, 0, sizeof(*cs));
510 cs->nr_segs = nr_segs;
513 /* Unmap and put previous page of userspace buffer */
514 static void fuse_copy_finish(struct fuse_copy_state *cs)
517 kunmap_atomic(cs->mapaddr, KM_USER0);
519 flush_dcache_page(cs->pg);
520 set_page_dirty_lock(cs->pg);
528 * Get another pagefull of userspace buffer, and map it to kernel
529 * address space, and lock request
531 static int fuse_copy_fill(struct fuse_copy_state *cs)
533 unsigned long offset;
536 unlock_request(cs->fc, cs->req);
537 fuse_copy_finish(cs);
539 BUG_ON(!cs->nr_segs);
540 cs->seglen = cs->iov[0].iov_len;
541 cs->addr = (unsigned long) cs->iov[0].iov_base;
545 down_read(¤t->mm->mmap_sem);
546 err = get_user_pages(current, current->mm, cs->addr, 1, cs->write, 0,
548 up_read(¤t->mm->mmap_sem);
552 offset = cs->addr % PAGE_SIZE;
553 cs->mapaddr = kmap_atomic(cs->pg, KM_USER0);
554 cs->buf = cs->mapaddr + offset;
555 cs->len = min(PAGE_SIZE - offset, cs->seglen);
556 cs->seglen -= cs->len;
559 return lock_request(cs->fc, cs->req);
562 /* Do as much copy to/from userspace buffer as we can */
563 static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
565 unsigned ncpy = min(*size, cs->len);
568 memcpy(cs->buf, *val, ncpy);
570 memcpy(*val, cs->buf, ncpy);
580 * Copy a page in the request to/from the userspace buffer. Must be
583 static int fuse_copy_page(struct fuse_copy_state *cs, struct page *page,
584 unsigned offset, unsigned count, int zeroing)
586 if (page && zeroing && count < PAGE_SIZE) {
587 void *mapaddr = kmap_atomic(page, KM_USER1);
588 memset(mapaddr, 0, PAGE_SIZE);
589 kunmap_atomic(mapaddr, KM_USER1);
593 if (!cs->len && (err = fuse_copy_fill(cs)))
596 void *mapaddr = kmap_atomic(page, KM_USER1);
597 void *buf = mapaddr + offset;
598 offset += fuse_copy_do(cs, &buf, &count);
599 kunmap_atomic(mapaddr, KM_USER1);
601 offset += fuse_copy_do(cs, NULL, &count);
603 if (page && !cs->write)
604 flush_dcache_page(page);
608 /* Copy pages in the request to/from userspace buffer */
609 static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
613 struct fuse_req *req = cs->req;
614 unsigned offset = req->page_offset;
615 unsigned count = min(nbytes, (unsigned) PAGE_SIZE - offset);
617 for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
618 struct page *page = req->pages[i];
619 int err = fuse_copy_page(cs, page, offset, count, zeroing);
624 count = min(nbytes, (unsigned) PAGE_SIZE);
630 /* Copy a single argument in the request to/from userspace buffer */
631 static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
635 if (!cs->len && (err = fuse_copy_fill(cs)))
637 fuse_copy_do(cs, &val, &size);
642 /* Copy request arguments to/from userspace buffer */
643 static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
644 unsigned argpages, struct fuse_arg *args,
650 for (i = 0; !err && i < numargs; i++) {
651 struct fuse_arg *arg = &args[i];
652 if (i == numargs - 1 && argpages)
653 err = fuse_copy_pages(cs, arg->size, zeroing);
655 err = fuse_copy_one(cs, arg->value, arg->size);
660 static int request_pending(struct fuse_conn *fc)
662 return !list_empty(&fc->pending) || !list_empty(&fc->interrupts);
665 /* Wait until a request is available on the pending list */
666 static void request_wait(struct fuse_conn *fc)
668 DECLARE_WAITQUEUE(wait, current);
670 add_wait_queue_exclusive(&fc->waitq, &wait);
671 while (fc->connected && !request_pending(fc)) {
672 set_current_state(TASK_INTERRUPTIBLE);
673 if (signal_pending(current))
676 spin_unlock(&fc->lock);
678 spin_lock(&fc->lock);
680 set_current_state(TASK_RUNNING);
681 remove_wait_queue(&fc->waitq, &wait);
685 * Transfer an interrupt request to userspace
687 * Unlike other requests this is assembled on demand, without a need
688 * to allocate a separate fuse_req structure.
690 * Called with fc->lock held, releases it
692 static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_req *req,
693 const struct iovec *iov, unsigned long nr_segs)
696 struct fuse_copy_state cs;
697 struct fuse_in_header ih;
698 struct fuse_interrupt_in arg;
699 unsigned reqsize = sizeof(ih) + sizeof(arg);
702 list_del_init(&req->intr_entry);
703 req->intr_unique = fuse_get_unique(fc);
704 memset(&ih, 0, sizeof(ih));
705 memset(&arg, 0, sizeof(arg));
707 ih.opcode = FUSE_INTERRUPT;
708 ih.unique = req->intr_unique;
709 arg.unique = req->in.h.unique;
711 spin_unlock(&fc->lock);
712 if (iov_length(iov, nr_segs) < reqsize)
715 fuse_copy_init(&cs, fc, 1, NULL, iov, nr_segs);
716 err = fuse_copy_one(&cs, &ih, sizeof(ih));
718 err = fuse_copy_one(&cs, &arg, sizeof(arg));
719 fuse_copy_finish(&cs);
721 return err ? err : reqsize;
725 * Read a single request into the userspace filesystem's buffer. This
726 * function waits until a request is available, then removes it from
727 * the pending list and copies request data to userspace buffer. If
728 * no reply is needed (FORGET) or request has been aborted or there
729 * was an error during the copying then it's finished by calling
730 * request_end(). Otherwise add it to the processing list, and set
733 static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
734 unsigned long nr_segs, loff_t pos)
737 struct fuse_req *req;
739 struct fuse_copy_state cs;
741 struct file *file = iocb->ki_filp;
742 struct fuse_conn *fc = fuse_get_conn(file);
747 spin_lock(&fc->lock);
749 if ((file->f_flags & O_NONBLOCK) && fc->connected &&
750 !request_pending(fc))
758 if (!request_pending(fc))
761 if (!list_empty(&fc->interrupts)) {
762 req = list_entry(fc->interrupts.next, struct fuse_req,
764 return fuse_read_interrupt(fc, req, iov, nr_segs);
767 req = list_entry(fc->pending.next, struct fuse_req, list);
768 req->state = FUSE_REQ_READING;
769 list_move(&req->list, &fc->io);
773 /* If request is too large, reply with an error and restart the read */
774 if (iov_length(iov, nr_segs) < reqsize) {
775 req->out.h.error = -EIO;
776 /* SETXATTR is special, since it may contain too large data */
777 if (in->h.opcode == FUSE_SETXATTR)
778 req->out.h.error = -E2BIG;
779 request_end(fc, req);
782 spin_unlock(&fc->lock);
783 fuse_copy_init(&cs, fc, 1, req, iov, nr_segs);
784 err = fuse_copy_one(&cs, &in->h, sizeof(in->h));
786 err = fuse_copy_args(&cs, in->numargs, in->argpages,
787 (struct fuse_arg *) in->args, 0);
788 fuse_copy_finish(&cs);
789 spin_lock(&fc->lock);
792 request_end(fc, req);
796 req->out.h.error = -EIO;
797 request_end(fc, req);
801 request_end(fc, req);
803 req->state = FUSE_REQ_SENT;
804 list_move_tail(&req->list, &fc->processing);
805 if (req->interrupted)
806 queue_interrupt(fc, req);
807 spin_unlock(&fc->lock);
812 spin_unlock(&fc->lock);
816 /* Look up request on processing list by unique ID */
817 static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique)
819 struct list_head *entry;
821 list_for_each(entry, &fc->processing) {
822 struct fuse_req *req;
823 req = list_entry(entry, struct fuse_req, list);
824 if (req->in.h.unique == unique || req->intr_unique == unique)
830 static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
833 unsigned reqsize = sizeof(struct fuse_out_header);
836 return nbytes != reqsize ? -EINVAL : 0;
838 reqsize += len_args(out->numargs, out->args);
840 if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
842 else if (reqsize > nbytes) {
843 struct fuse_arg *lastarg = &out->args[out->numargs-1];
844 unsigned diffsize = reqsize - nbytes;
845 if (diffsize > lastarg->size)
847 lastarg->size -= diffsize;
849 return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
854 * Write a single reply to a request. First the header is copied from
855 * the write buffer. The request is then searched on the processing
856 * list by the unique ID found in the header. If found, then remove
857 * it from the list and copy the rest of the buffer to the request.
858 * The request is finished by calling request_end()
860 static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
861 unsigned long nr_segs, loff_t pos)
864 unsigned nbytes = iov_length(iov, nr_segs);
865 struct fuse_req *req;
866 struct fuse_out_header oh;
867 struct fuse_copy_state cs;
868 struct fuse_conn *fc = fuse_get_conn(iocb->ki_filp);
872 fuse_copy_init(&cs, fc, 0, NULL, iov, nr_segs);
873 if (nbytes < sizeof(struct fuse_out_header))
876 err = fuse_copy_one(&cs, &oh, sizeof(oh));
880 if (!oh.unique || oh.error <= -1000 || oh.error > 0 ||
884 spin_lock(&fc->lock);
889 req = request_find(fc, oh.unique);
894 spin_unlock(&fc->lock);
895 fuse_copy_finish(&cs);
896 spin_lock(&fc->lock);
897 request_end(fc, req);
900 /* Is it an interrupt reply? */
901 if (req->intr_unique == oh.unique) {
903 if (nbytes != sizeof(struct fuse_out_header))
906 if (oh.error == -ENOSYS)
907 fc->no_interrupt = 1;
908 else if (oh.error == -EAGAIN)
909 queue_interrupt(fc, req);
911 spin_unlock(&fc->lock);
912 fuse_copy_finish(&cs);
916 req->state = FUSE_REQ_WRITING;
917 list_move(&req->list, &fc->io);
921 spin_unlock(&fc->lock);
923 err = copy_out_args(&cs, &req->out, nbytes);
924 fuse_copy_finish(&cs);
926 spin_lock(&fc->lock);
931 } else if (!req->aborted)
932 req->out.h.error = -EIO;
933 request_end(fc, req);
935 return err ? err : nbytes;
938 spin_unlock(&fc->lock);
940 fuse_copy_finish(&cs);
944 static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
946 unsigned mask = POLLOUT | POLLWRNORM;
947 struct fuse_conn *fc = fuse_get_conn(file);
951 poll_wait(file, &fc->waitq, wait);
953 spin_lock(&fc->lock);
956 else if (request_pending(fc))
957 mask |= POLLIN | POLLRDNORM;
958 spin_unlock(&fc->lock);
964 * Abort all requests on the given list (pending or processing)
966 * This function releases and reacquires fc->lock
968 static void end_requests(struct fuse_conn *fc, struct list_head *head)
970 while (!list_empty(head)) {
971 struct fuse_req *req;
972 req = list_entry(head->next, struct fuse_req, list);
973 req->out.h.error = -ECONNABORTED;
974 request_end(fc, req);
975 spin_lock(&fc->lock);
980 * Abort requests under I/O
982 * The requests are set to aborted and finished, and the request
983 * waiter is woken up. This will make request_wait_answer() wait
984 * until the request is unlocked and then return.
986 * If the request is asynchronous, then the end function needs to be
987 * called after waiting for the request to be unlocked (if it was
990 static void end_io_requests(struct fuse_conn *fc)
991 __releases(fc->lock) __acquires(fc->lock)
993 while (!list_empty(&fc->io)) {
994 struct fuse_req *req =
995 list_entry(fc->io.next, struct fuse_req, list);
996 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
999 req->out.h.error = -ECONNABORTED;
1000 req->state = FUSE_REQ_FINISHED;
1001 list_del_init(&req->list);
1002 wake_up(&req->waitq);
1005 /* The end function will consume this reference */
1006 __fuse_get_request(req);
1007 spin_unlock(&fc->lock);
1008 wait_event(req->waitq, !req->locked);
1010 spin_lock(&fc->lock);
1016 * Abort all requests.
1018 * Emergency exit in case of a malicious or accidental deadlock, or
1019 * just a hung filesystem.
1021 * The same effect is usually achievable through killing the
1022 * filesystem daemon and all users of the filesystem. The exception
1023 * is the combination of an asynchronous request and the tricky
1024 * deadlock (see Documentation/filesystems/fuse.txt).
1026 * During the aborting, progression of requests from the pending and
1027 * processing lists onto the io list, and progression of new requests
1028 * onto the pending list is prevented by req->connected being false.
1030 * Progression of requests under I/O to the processing list is
1031 * prevented by the req->aborted flag being true for these requests.
1032 * For this reason requests on the io list must be aborted first.
1034 void fuse_abort_conn(struct fuse_conn *fc)
1036 spin_lock(&fc->lock);
1037 if (fc->connected) {
1040 end_io_requests(fc);
1041 end_requests(fc, &fc->pending);
1042 end_requests(fc, &fc->processing);
1043 wake_up_all(&fc->waitq);
1044 wake_up_all(&fc->blocked_waitq);
1045 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
1047 spin_unlock(&fc->lock);
1050 static int fuse_dev_release(struct inode *inode, struct file *file)
1052 struct fuse_conn *fc = fuse_get_conn(file);
1054 spin_lock(&fc->lock);
1056 end_requests(fc, &fc->pending);
1057 end_requests(fc, &fc->processing);
1058 spin_unlock(&fc->lock);
1065 static int fuse_dev_fasync(int fd, struct file *file, int on)
1067 struct fuse_conn *fc = fuse_get_conn(file);
1071 /* No locking - fasync_helper does its own locking */
1072 return fasync_helper(fd, file, on, &fc->fasync);
1075 const struct file_operations fuse_dev_operations = {
1076 .owner = THIS_MODULE,
1077 .llseek = no_llseek,
1078 .read = do_sync_read,
1079 .aio_read = fuse_dev_read,
1080 .write = do_sync_write,
1081 .aio_write = fuse_dev_write,
1082 .poll = fuse_dev_poll,
1083 .release = fuse_dev_release,
1084 .fasync = fuse_dev_fasync,
1087 static struct miscdevice fuse_miscdevice = {
1088 .minor = FUSE_MINOR,
1090 .fops = &fuse_dev_operations,
1093 int __init fuse_dev_init(void)
1096 fuse_req_cachep = kmem_cache_create("fuse_request",
1097 sizeof(struct fuse_req),
1099 if (!fuse_req_cachep)
1102 err = misc_register(&fuse_miscdevice);
1104 goto out_cache_clean;
1109 kmem_cache_destroy(fuse_req_cachep);
1114 void fuse_dev_cleanup(void)
1116 misc_deregister(&fuse_miscdevice);
1117 kmem_cache_destroy(fuse_req_cachep);