2 * net/sunrpc/rpc_pipe.c
4 * Userland/kernel interface for rpcauth_gss.
5 * Code shamelessly plagiarized from fs/nfsd/nfsctl.c
8 * Copyright (c) 2002, Trond Myklebust <trond.myklebust@fys.uio.no>
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/string.h>
14 #include <linux/pagemap.h>
15 #include <linux/mount.h>
16 #include <linux/namei.h>
17 #include <linux/fsnotify.h>
18 #include <linux/kernel.h>
20 #include <asm/ioctls.h>
22 #include <linux/poll.h>
23 #include <linux/wait.h>
24 #include <linux/seq_file.h>
26 #include <linux/sunrpc/clnt.h>
27 #include <linux/workqueue.h>
28 #include <linux/sunrpc/rpc_pipe_fs.h>
30 static struct vfsmount *rpc_mount __read_mostly;
31 static int rpc_mount_count;
33 static struct file_system_type rpc_pipe_fs_type;
36 static struct kmem_cache *rpc_inode_cachep __read_mostly;
38 #define RPC_UPCALL_TIMEOUT (30*HZ)
40 static void rpc_purge_list(struct rpc_inode *rpci, struct list_head *head,
41 void (*destroy_msg)(struct rpc_pipe_msg *), int err)
43 struct rpc_pipe_msg *msg;
48 msg = list_entry(head->next, struct rpc_pipe_msg, list);
52 } while (!list_empty(head));
53 wake_up(&rpci->waitq);
57 rpc_timeout_upcall_queue(struct work_struct *work)
60 struct rpc_inode *rpci =
61 container_of(work, struct rpc_inode, queue_timeout.work);
62 struct inode *inode = &rpci->vfs_inode;
63 void (*destroy_msg)(struct rpc_pipe_msg *);
65 spin_lock(&inode->i_lock);
66 if (rpci->ops == NULL) {
67 spin_unlock(&inode->i_lock);
70 destroy_msg = rpci->ops->destroy_msg;
71 if (rpci->nreaders == 0) {
72 list_splice_init(&rpci->pipe, &free_list);
75 spin_unlock(&inode->i_lock);
76 rpc_purge_list(rpci, &free_list, destroy_msg, -ETIMEDOUT);
81 * @inode: inode of upcall pipe on which to queue given message
82 * @msg: message to queue
84 * Call with an @inode created by rpc_mkpipe() to queue an upcall.
85 * A userspace process may then later read the upcall by performing a
86 * read on an open file for this inode. It is up to the caller to
87 * initialize the fields of @msg (other than @msg->list) appropriately.
90 rpc_queue_upcall(struct inode *inode, struct rpc_pipe_msg *msg)
92 struct rpc_inode *rpci = RPC_I(inode);
95 spin_lock(&inode->i_lock);
96 if (rpci->ops == NULL)
99 list_add_tail(&msg->list, &rpci->pipe);
100 rpci->pipelen += msg->len;
102 } else if (rpci->flags & RPC_PIPE_WAIT_FOR_OPEN) {
103 if (list_empty(&rpci->pipe))
104 queue_delayed_work(rpciod_workqueue,
105 &rpci->queue_timeout,
107 list_add_tail(&msg->list, &rpci->pipe);
108 rpci->pipelen += msg->len;
112 spin_unlock(&inode->i_lock);
113 wake_up(&rpci->waitq);
116 EXPORT_SYMBOL(rpc_queue_upcall);
119 rpc_inode_setowner(struct inode *inode, void *private)
121 RPC_I(inode)->private = private;
125 rpc_close_pipes(struct inode *inode)
127 struct rpc_inode *rpci = RPC_I(inode);
128 struct rpc_pipe_ops *ops;
130 mutex_lock(&inode->i_mutex);
133 LIST_HEAD(free_list);
135 spin_lock(&inode->i_lock);
137 list_splice_init(&rpci->in_upcall, &free_list);
138 list_splice_init(&rpci->pipe, &free_list);
141 spin_unlock(&inode->i_lock);
142 rpc_purge_list(rpci, &free_list, ops->destroy_msg, -EPIPE);
144 if (ops->release_pipe)
145 ops->release_pipe(inode);
146 cancel_delayed_work_sync(&rpci->queue_timeout);
148 rpc_inode_setowner(inode, NULL);
149 mutex_unlock(&inode->i_mutex);
152 static struct inode *
153 rpc_alloc_inode(struct super_block *sb)
155 struct rpc_inode *rpci;
156 rpci = (struct rpc_inode *)kmem_cache_alloc(rpc_inode_cachep, GFP_KERNEL);
159 return &rpci->vfs_inode;
163 rpc_destroy_inode(struct inode *inode)
165 kmem_cache_free(rpc_inode_cachep, RPC_I(inode));
169 rpc_pipe_open(struct inode *inode, struct file *filp)
171 struct rpc_inode *rpci = RPC_I(inode);
174 mutex_lock(&inode->i_mutex);
175 if (rpci->ops != NULL) {
176 if (filp->f_mode & FMODE_READ)
178 if (filp->f_mode & FMODE_WRITE)
182 mutex_unlock(&inode->i_mutex);
187 rpc_pipe_release(struct inode *inode, struct file *filp)
189 struct rpc_inode *rpci = RPC_I(inode);
190 struct rpc_pipe_msg *msg;
192 mutex_lock(&inode->i_mutex);
193 if (rpci->ops == NULL)
195 msg = (struct rpc_pipe_msg *)filp->private_data;
197 spin_lock(&inode->i_lock);
198 msg->errno = -EAGAIN;
199 list_del(&msg->list);
200 spin_unlock(&inode->i_lock);
201 rpci->ops->destroy_msg(msg);
203 if (filp->f_mode & FMODE_WRITE)
205 if (filp->f_mode & FMODE_READ) {
207 if (rpci->nreaders == 0) {
208 LIST_HEAD(free_list);
209 spin_lock(&inode->i_lock);
210 list_splice_init(&rpci->pipe, &free_list);
212 spin_unlock(&inode->i_lock);
213 rpc_purge_list(rpci, &free_list,
214 rpci->ops->destroy_msg, -EAGAIN);
217 if (rpci->ops->release_pipe)
218 rpci->ops->release_pipe(inode);
220 mutex_unlock(&inode->i_mutex);
225 rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset)
227 struct inode *inode = filp->f_path.dentry->d_inode;
228 struct rpc_inode *rpci = RPC_I(inode);
229 struct rpc_pipe_msg *msg;
232 mutex_lock(&inode->i_mutex);
233 if (rpci->ops == NULL) {
237 msg = filp->private_data;
239 spin_lock(&inode->i_lock);
240 if (!list_empty(&rpci->pipe)) {
241 msg = list_entry(rpci->pipe.next,
244 list_move(&msg->list, &rpci->in_upcall);
245 rpci->pipelen -= msg->len;
246 filp->private_data = msg;
249 spin_unlock(&inode->i_lock);
253 /* NOTE: it is up to the callback to update msg->copied */
254 res = rpci->ops->upcall(filp, msg, buf, len);
255 if (res < 0 || msg->len == msg->copied) {
256 filp->private_data = NULL;
257 spin_lock(&inode->i_lock);
258 list_del(&msg->list);
259 spin_unlock(&inode->i_lock);
260 rpci->ops->destroy_msg(msg);
263 mutex_unlock(&inode->i_mutex);
268 rpc_pipe_write(struct file *filp, const char __user *buf, size_t len, loff_t *offset)
270 struct inode *inode = filp->f_path.dentry->d_inode;
271 struct rpc_inode *rpci = RPC_I(inode);
274 mutex_lock(&inode->i_mutex);
276 if (rpci->ops != NULL)
277 res = rpci->ops->downcall(filp, buf, len);
278 mutex_unlock(&inode->i_mutex);
283 rpc_pipe_poll(struct file *filp, struct poll_table_struct *wait)
285 struct rpc_inode *rpci;
286 unsigned int mask = 0;
288 rpci = RPC_I(filp->f_path.dentry->d_inode);
289 poll_wait(filp, &rpci->waitq, wait);
291 mask = POLLOUT | POLLWRNORM;
292 if (rpci->ops == NULL)
293 mask |= POLLERR | POLLHUP;
294 if (filp->private_data || !list_empty(&rpci->pipe))
295 mask |= POLLIN | POLLRDNORM;
300 rpc_pipe_ioctl(struct inode *ino, struct file *filp,
301 unsigned int cmd, unsigned long arg)
303 struct rpc_inode *rpci = RPC_I(filp->f_path.dentry->d_inode);
308 if (rpci->ops == NULL)
311 if (filp->private_data) {
312 struct rpc_pipe_msg *msg;
313 msg = (struct rpc_pipe_msg *)filp->private_data;
314 len += msg->len - msg->copied;
316 return put_user(len, (int __user *)arg);
322 static const struct file_operations rpc_pipe_fops = {
323 .owner = THIS_MODULE,
325 .read = rpc_pipe_read,
326 .write = rpc_pipe_write,
327 .poll = rpc_pipe_poll,
328 .ioctl = rpc_pipe_ioctl,
329 .open = rpc_pipe_open,
330 .release = rpc_pipe_release,
334 rpc_show_info(struct seq_file *m, void *v)
336 struct rpc_clnt *clnt = m->private;
338 seq_printf(m, "RPC server: %s\n", clnt->cl_server);
339 seq_printf(m, "service: %s (%d) version %d\n", clnt->cl_protname,
340 clnt->cl_prog, clnt->cl_vers);
341 seq_printf(m, "address: %s\n", rpc_peeraddr2str(clnt, RPC_DISPLAY_ADDR));
342 seq_printf(m, "protocol: %s\n", rpc_peeraddr2str(clnt, RPC_DISPLAY_PROTO));
343 seq_printf(m, "port: %s\n", rpc_peeraddr2str(clnt, RPC_DISPLAY_PORT));
348 rpc_info_open(struct inode *inode, struct file *file)
350 struct rpc_clnt *clnt;
351 int ret = single_open(file, rpc_show_info, NULL);
354 struct seq_file *m = file->private_data;
355 mutex_lock(&inode->i_mutex);
356 clnt = RPC_I(inode)->private;
358 kref_get(&clnt->cl_kref);
361 single_release(inode, file);
364 mutex_unlock(&inode->i_mutex);
370 rpc_info_release(struct inode *inode, struct file *file)
372 struct seq_file *m = file->private_data;
373 struct rpc_clnt *clnt = (struct rpc_clnt *)m->private;
376 rpc_release_client(clnt);
377 return single_release(inode, file);
380 static const struct file_operations rpc_info_operations = {
381 .owner = THIS_MODULE,
382 .open = rpc_info_open,
385 .release = rpc_info_release,
390 * We have a single directory with 1 node in it.
403 * Description of fs contents.
405 struct rpc_filelist {
407 const struct file_operations *i_fop;
411 static struct rpc_filelist files[] = {
414 .mode = S_IFDIR | S_IRUGO | S_IXUGO,
418 .mode = S_IFDIR | S_IRUGO | S_IXUGO,
422 .mode = S_IFDIR | S_IRUGO | S_IXUGO,
424 [RPCAUTH_portmap] = {
426 .mode = S_IFDIR | S_IRUGO | S_IXUGO,
430 .mode = S_IFDIR | S_IRUGO | S_IXUGO,
439 static struct rpc_filelist authfiles[] = {
442 .i_fop = &rpc_info_operations,
443 .mode = S_IFREG | S_IRUSR,
447 struct vfsmount *rpc_get_mount(void)
451 err = simple_pin_fs(&rpc_pipe_fs_type, &rpc_mount, &rpc_mount_count);
457 void rpc_put_mount(void)
459 simple_release_fs(&rpc_mount, &rpc_mount_count);
462 static int rpc_delete_dentry(struct dentry *dentry)
467 static struct dentry_operations rpc_dentry_operations = {
468 .d_delete = rpc_delete_dentry,
472 rpc_lookup_parent(char *path, struct nameidata *nd)
474 struct vfsmount *mnt;
479 mnt = rpc_get_mount();
481 printk(KERN_WARNING "%s: %s failed to mount "
482 "pseudofilesystem \n", __FILE__, __FUNCTION__);
486 if (vfs_path_lookup(mnt->mnt_root, mnt, path, LOOKUP_PARENT, nd)) {
487 printk(KERN_WARNING "%s: %s failed to find path %s\n",
488 __FILE__, __FUNCTION__, path);
496 rpc_release_path(struct nameidata *nd)
502 static struct inode *
503 rpc_get_inode(struct super_block *sb, int mode)
505 struct inode *inode = new_inode(sb);
508 inode->i_mode = mode;
509 inode->i_uid = inode->i_gid = 0;
511 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
512 switch(mode & S_IFMT) {
514 inode->i_fop = &simple_dir_operations;
515 inode->i_op = &simple_dir_inode_operations;
524 * FIXME: This probably has races.
526 static void rpc_depopulate(struct dentry *parent,
527 unsigned long start, unsigned long eof)
529 struct inode *dir = parent->d_inode;
530 struct list_head *pos, *next;
531 struct dentry *dentry, *dvec[10];
534 mutex_lock_nested(&dir->i_mutex, I_MUTEX_CHILD);
536 spin_lock(&dcache_lock);
537 list_for_each_safe(pos, next, &parent->d_subdirs) {
538 dentry = list_entry(pos, struct dentry, d_u.d_child);
539 if (!dentry->d_inode ||
540 dentry->d_inode->i_ino < start ||
541 dentry->d_inode->i_ino >= eof)
543 spin_lock(&dentry->d_lock);
544 if (!d_unhashed(dentry)) {
547 spin_unlock(&dentry->d_lock);
549 if (n == ARRAY_SIZE(dvec))
552 spin_unlock(&dentry->d_lock);
554 spin_unlock(&dcache_lock);
558 if (S_ISREG(dentry->d_inode->i_mode))
559 simple_unlink(dir, dentry);
560 else if (S_ISDIR(dentry->d_inode->i_mode))
561 simple_rmdir(dir, dentry);
567 mutex_unlock(&dir->i_mutex);
571 rpc_populate(struct dentry *parent,
572 struct rpc_filelist *files,
575 struct inode *inode, *dir = parent->d_inode;
576 void *private = RPC_I(dir)->private;
577 struct dentry *dentry;
580 mutex_lock(&dir->i_mutex);
581 for (i = start; i < eof; i++) {
582 dentry = d_alloc_name(parent, files[i].name);
585 dentry->d_op = &rpc_dentry_operations;
586 mode = files[i].mode;
587 inode = rpc_get_inode(dir->i_sb, mode);
594 inode->i_fop = files[i].i_fop;
596 rpc_inode_setowner(inode, private);
599 d_add(dentry, inode);
600 fsnotify_create(dir, dentry);
602 mutex_unlock(&dir->i_mutex);
605 mutex_unlock(&dir->i_mutex);
606 printk(KERN_WARNING "%s: %s failed to populate directory %s\n",
607 __FILE__, __FUNCTION__, parent->d_name.name);
612 __rpc_mkdir(struct inode *dir, struct dentry *dentry)
616 inode = rpc_get_inode(dir->i_sb, S_IFDIR | S_IRUGO | S_IXUGO);
619 inode->i_ino = iunique(dir->i_sb, 100);
620 d_instantiate(dentry, inode);
622 fsnotify_mkdir(dir, dentry);
625 printk(KERN_WARNING "%s: %s failed to allocate inode for dentry %s\n",
626 __FILE__, __FUNCTION__, dentry->d_name.name);
631 __rpc_rmdir(struct inode *dir, struct dentry *dentry)
634 error = simple_rmdir(dir, dentry);
640 static struct dentry *
641 rpc_lookup_create(struct dentry *parent, const char *name, int len, int exclusive)
643 struct inode *dir = parent->d_inode;
644 struct dentry *dentry;
646 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
647 dentry = lookup_one_len(name, parent, len);
650 if (!dentry->d_inode)
651 dentry->d_op = &rpc_dentry_operations;
652 else if (exclusive) {
654 dentry = ERR_PTR(-EEXIST);
659 mutex_unlock(&dir->i_mutex);
663 static struct dentry *
664 rpc_lookup_negative(char *path, struct nameidata *nd)
666 struct dentry *dentry;
669 if ((error = rpc_lookup_parent(path, nd)) != 0)
670 return ERR_PTR(error);
671 dentry = rpc_lookup_create(nd->path.dentry, nd->last.name, nd->last.len,
674 rpc_release_path(nd);
679 * rpc_mkdir - Create a new directory in rpc_pipefs
680 * @path: path from the rpc_pipefs root to the new directory
681 * @rpc_client: rpc client to associate with this directory
683 * This creates a directory at the given @path associated with
684 * @rpc_clnt, which will contain a file named "info" with some basic
685 * information about the client, together with any "pipes" that may
686 * later be created using rpc_mkpipe().
689 rpc_mkdir(char *path, struct rpc_clnt *rpc_client)
692 struct dentry *dentry;
696 dentry = rpc_lookup_negative(path, &nd);
699 dir = nd.path.dentry->d_inode;
700 if ((error = __rpc_mkdir(dir, dentry)) != 0)
702 RPC_I(dentry->d_inode)->private = rpc_client;
703 error = rpc_populate(dentry, authfiles,
704 RPCAUTH_info, RPCAUTH_EOF);
709 mutex_unlock(&dir->i_mutex);
710 rpc_release_path(&nd);
713 rpc_depopulate(dentry, RPCAUTH_info, RPCAUTH_EOF);
714 __rpc_rmdir(dir, dentry);
717 printk(KERN_WARNING "%s: %s() failed to create directory %s (errno = %d)\n",
718 __FILE__, __FUNCTION__, path, error);
719 dentry = ERR_PTR(error);
724 * rpc_rmdir - Remove a directory created with rpc_mkdir()
725 * @dentry: directory to remove
728 rpc_rmdir(struct dentry *dentry)
730 struct dentry *parent;
734 parent = dget_parent(dentry);
735 dir = parent->d_inode;
736 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
737 rpc_depopulate(dentry, RPCAUTH_info, RPCAUTH_EOF);
738 error = __rpc_rmdir(dir, dentry);
740 mutex_unlock(&dir->i_mutex);
746 * rpc_mkpipe - make an rpc_pipefs file for kernel<->userspace communication
747 * @parent: dentry of directory to create new "pipe" in
748 * @name: name of pipe
749 * @private: private data to associate with the pipe, for the caller's use
750 * @ops: operations defining the behavior of the pipe: upcall, downcall,
751 * release_pipe, and destroy_msg.
752 * @flags: rpc_inode flags
754 * Data is made available for userspace to read by calls to
755 * rpc_queue_upcall(). The actual reads will result in calls to
756 * @ops->upcall, which will be called with the file pointer,
757 * message, and userspace buffer to copy to.
759 * Writes can come at any time, and do not necessarily have to be
760 * responses to upcalls. They will result in calls to @msg->downcall.
762 * The @private argument passed here will be available to all these methods
763 * from the file pointer, via RPC_I(file->f_dentry->d_inode)->private.
766 rpc_mkpipe(struct dentry *parent, const char *name, void *private, struct rpc_pipe_ops *ops, int flags)
768 struct dentry *dentry;
769 struct inode *dir, *inode;
770 struct rpc_inode *rpci;
772 dentry = rpc_lookup_create(parent, name, strlen(name), 0);
775 dir = parent->d_inode;
776 if (dentry->d_inode) {
777 rpci = RPC_I(dentry->d_inode);
778 if (rpci->private != private ||
780 rpci->flags != flags) {
782 dentry = ERR_PTR(-EBUSY);
784 rpci->nkern_readwriters++;
787 inode = rpc_get_inode(dir->i_sb, S_IFIFO | S_IRUSR | S_IWUSR);
790 inode->i_ino = iunique(dir->i_sb, 100);
791 inode->i_fop = &rpc_pipe_fops;
792 d_instantiate(dentry, inode);
794 rpci->private = private;
797 rpci->nkern_readwriters = 1;
798 fsnotify_create(dir, dentry);
801 mutex_unlock(&dir->i_mutex);
805 dentry = ERR_PTR(-ENOMEM);
806 printk(KERN_WARNING "%s: %s() failed to create pipe %s/%s (errno = %d)\n",
807 __FILE__, __FUNCTION__, parent->d_name.name, name,
811 EXPORT_SYMBOL(rpc_mkpipe);
814 * rpc_unlink - remove a pipe
815 * @dentry: dentry for the pipe, as returned from rpc_mkpipe
817 * After this call, lookups will no longer find the pipe, and any
818 * attempts to read or write using preexisting opens of the pipe will
822 rpc_unlink(struct dentry *dentry)
824 struct dentry *parent;
828 parent = dget_parent(dentry);
829 dir = parent->d_inode;
830 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
831 if (--RPC_I(dentry->d_inode)->nkern_readwriters == 0) {
832 rpc_close_pipes(dentry->d_inode);
833 error = simple_unlink(dir, dentry);
838 mutex_unlock(&dir->i_mutex);
842 EXPORT_SYMBOL(rpc_unlink);
845 * populate the filesystem
847 static struct super_operations s_ops = {
848 .alloc_inode = rpc_alloc_inode,
849 .destroy_inode = rpc_destroy_inode,
850 .statfs = simple_statfs,
853 #define RPCAUTH_GSSMAGIC 0x67596969
856 rpc_fill_super(struct super_block *sb, void *data, int silent)
861 sb->s_blocksize = PAGE_CACHE_SIZE;
862 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
863 sb->s_magic = RPCAUTH_GSSMAGIC;
867 inode = rpc_get_inode(sb, S_IFDIR | 0755);
870 root = d_alloc_root(inode);
875 if (rpc_populate(root, files, RPCAUTH_Root + 1, RPCAUTH_RootEOF))
886 rpc_get_sb(struct file_system_type *fs_type,
887 int flags, const char *dev_name, void *data, struct vfsmount *mnt)
889 return get_sb_single(fs_type, flags, data, rpc_fill_super, mnt);
892 static struct file_system_type rpc_pipe_fs_type = {
893 .owner = THIS_MODULE,
894 .name = "rpc_pipefs",
895 .get_sb = rpc_get_sb,
896 .kill_sb = kill_litter_super,
900 init_once(struct kmem_cache * cachep, void *foo)
902 struct rpc_inode *rpci = (struct rpc_inode *) foo;
904 inode_init_once(&rpci->vfs_inode);
905 rpci->private = NULL;
908 INIT_LIST_HEAD(&rpci->in_upcall);
909 INIT_LIST_HEAD(&rpci->in_downcall);
910 INIT_LIST_HEAD(&rpci->pipe);
912 init_waitqueue_head(&rpci->waitq);
913 INIT_DELAYED_WORK(&rpci->queue_timeout,
914 rpc_timeout_upcall_queue);
918 int register_rpc_pipefs(void)
922 rpc_inode_cachep = kmem_cache_create("rpc_inode_cache",
923 sizeof(struct rpc_inode),
924 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
927 if (!rpc_inode_cachep)
929 err = register_filesystem(&rpc_pipe_fs_type);
931 kmem_cache_destroy(rpc_inode_cachep);
938 void unregister_rpc_pipefs(void)
940 kmem_cache_destroy(rpc_inode_cachep);
941 unregister_filesystem(&rpc_pipe_fs_type);