2 * net/sunrpc/rpc_pipe.c
4 * Userland/kernel interface for rpcauth_gss.
5 * Code shamelessly plagiarized from fs/nfsd/nfsctl.c
8 * Copyright (c) 2002, Trond Myklebust <trond.myklebust@fys.uio.no>
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/string.h>
14 #include <linux/pagemap.h>
15 #include <linux/mount.h>
16 #include <linux/namei.h>
17 #include <linux/dnotify.h>
18 #include <linux/kernel.h>
20 #include <asm/ioctls.h>
22 #include <linux/poll.h>
23 #include <linux/wait.h>
24 #include <linux/seq_file.h>
26 #include <linux/sunrpc/clnt.h>
27 #include <linux/workqueue.h>
28 #include <linux/sunrpc/rpc_pipe_fs.h>
30 static struct vfsmount *rpc_mount __read_mostly;
31 static int rpc_mount_count;
33 static struct file_system_type rpc_pipe_fs_type;
36 static kmem_cache_t *rpc_inode_cachep __read_mostly;
38 #define RPC_UPCALL_TIMEOUT (30*HZ)
40 static void rpc_purge_list(struct rpc_inode *rpci, struct list_head *head,
41 void (*destroy_msg)(struct rpc_pipe_msg *), int err)
43 struct rpc_pipe_msg *msg;
48 msg = list_entry(head->next, struct rpc_pipe_msg, list);
52 } while (!list_empty(head));
53 wake_up(&rpci->waitq);
57 rpc_timeout_upcall_queue(void *data)
60 struct rpc_inode *rpci = (struct rpc_inode *)data;
61 struct inode *inode = &rpci->vfs_inode;
62 void (*destroy_msg)(struct rpc_pipe_msg *);
64 spin_lock(&inode->i_lock);
65 if (rpci->ops == NULL) {
66 spin_unlock(&inode->i_lock);
69 destroy_msg = rpci->ops->destroy_msg;
70 if (rpci->nreaders == 0) {
71 list_splice_init(&rpci->pipe, &free_list);
74 spin_unlock(&inode->i_lock);
75 rpc_purge_list(rpci, &free_list, destroy_msg, -ETIMEDOUT);
79 rpc_queue_upcall(struct inode *inode, struct rpc_pipe_msg *msg)
81 struct rpc_inode *rpci = RPC_I(inode);
84 spin_lock(&inode->i_lock);
85 if (rpci->ops == NULL)
88 list_add_tail(&msg->list, &rpci->pipe);
89 rpci->pipelen += msg->len;
91 } else if (rpci->flags & RPC_PIPE_WAIT_FOR_OPEN) {
92 if (list_empty(&rpci->pipe))
93 queue_delayed_work(rpciod_workqueue,
96 list_add_tail(&msg->list, &rpci->pipe);
97 rpci->pipelen += msg->len;
101 spin_unlock(&inode->i_lock);
102 wake_up(&rpci->waitq);
107 rpc_inode_setowner(struct inode *inode, void *private)
109 RPC_I(inode)->private = private;
113 rpc_close_pipes(struct inode *inode)
115 struct rpc_inode *rpci = RPC_I(inode);
116 struct rpc_pipe_ops *ops;
118 mutex_lock(&inode->i_mutex);
121 LIST_HEAD(free_list);
123 spin_lock(&inode->i_lock);
125 list_splice_init(&rpci->in_upcall, &free_list);
126 list_splice_init(&rpci->pipe, &free_list);
129 spin_unlock(&inode->i_lock);
130 rpc_purge_list(rpci, &free_list, ops->destroy_msg, -EPIPE);
132 if (ops->release_pipe)
133 ops->release_pipe(inode);
134 cancel_delayed_work(&rpci->queue_timeout);
135 flush_workqueue(rpciod_workqueue);
137 rpc_inode_setowner(inode, NULL);
138 mutex_unlock(&inode->i_mutex);
141 static struct inode *
142 rpc_alloc_inode(struct super_block *sb)
144 struct rpc_inode *rpci;
145 rpci = (struct rpc_inode *)kmem_cache_alloc(rpc_inode_cachep, SLAB_KERNEL);
148 return &rpci->vfs_inode;
152 rpc_destroy_inode(struct inode *inode)
154 kmem_cache_free(rpc_inode_cachep, RPC_I(inode));
158 rpc_pipe_open(struct inode *inode, struct file *filp)
160 struct rpc_inode *rpci = RPC_I(inode);
163 mutex_lock(&inode->i_mutex);
164 if (rpci->ops != NULL) {
165 if (filp->f_mode & FMODE_READ)
167 if (filp->f_mode & FMODE_WRITE)
171 mutex_unlock(&inode->i_mutex);
176 rpc_pipe_release(struct inode *inode, struct file *filp)
178 struct rpc_inode *rpci = RPC_I(inode);
179 struct rpc_pipe_msg *msg;
181 mutex_lock(&inode->i_mutex);
182 if (rpci->ops == NULL)
184 msg = (struct rpc_pipe_msg *)filp->private_data;
186 spin_lock(&inode->i_lock);
187 msg->errno = -EAGAIN;
188 list_del(&msg->list);
189 spin_unlock(&inode->i_lock);
190 rpci->ops->destroy_msg(msg);
192 if (filp->f_mode & FMODE_WRITE)
194 if (filp->f_mode & FMODE_READ) {
196 if (rpci->nreaders == 0) {
197 LIST_HEAD(free_list);
198 spin_lock(&inode->i_lock);
199 list_splice_init(&rpci->pipe, &free_list);
201 spin_unlock(&inode->i_lock);
202 rpc_purge_list(rpci, &free_list,
203 rpci->ops->destroy_msg, -EAGAIN);
206 if (rpci->ops->release_pipe)
207 rpci->ops->release_pipe(inode);
209 mutex_unlock(&inode->i_mutex);
214 rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset)
216 struct inode *inode = filp->f_dentry->d_inode;
217 struct rpc_inode *rpci = RPC_I(inode);
218 struct rpc_pipe_msg *msg;
221 mutex_lock(&inode->i_mutex);
222 if (rpci->ops == NULL) {
226 msg = filp->private_data;
228 spin_lock(&inode->i_lock);
229 if (!list_empty(&rpci->pipe)) {
230 msg = list_entry(rpci->pipe.next,
233 list_move(&msg->list, &rpci->in_upcall);
234 rpci->pipelen -= msg->len;
235 filp->private_data = msg;
238 spin_unlock(&inode->i_lock);
242 /* NOTE: it is up to the callback to update msg->copied */
243 res = rpci->ops->upcall(filp, msg, buf, len);
244 if (res < 0 || msg->len == msg->copied) {
245 filp->private_data = NULL;
246 spin_lock(&inode->i_lock);
247 list_del(&msg->list);
248 spin_unlock(&inode->i_lock);
249 rpci->ops->destroy_msg(msg);
252 mutex_unlock(&inode->i_mutex);
257 rpc_pipe_write(struct file *filp, const char __user *buf, size_t len, loff_t *offset)
259 struct inode *inode = filp->f_dentry->d_inode;
260 struct rpc_inode *rpci = RPC_I(inode);
263 mutex_lock(&inode->i_mutex);
265 if (rpci->ops != NULL)
266 res = rpci->ops->downcall(filp, buf, len);
267 mutex_unlock(&inode->i_mutex);
272 rpc_pipe_poll(struct file *filp, struct poll_table_struct *wait)
274 struct rpc_inode *rpci;
275 unsigned int mask = 0;
277 rpci = RPC_I(filp->f_dentry->d_inode);
278 poll_wait(filp, &rpci->waitq, wait);
280 mask = POLLOUT | POLLWRNORM;
281 if (rpci->ops == NULL)
282 mask |= POLLERR | POLLHUP;
283 if (!list_empty(&rpci->pipe))
284 mask |= POLLIN | POLLRDNORM;
289 rpc_pipe_ioctl(struct inode *ino, struct file *filp,
290 unsigned int cmd, unsigned long arg)
292 struct rpc_inode *rpci = RPC_I(filp->f_dentry->d_inode);
297 if (rpci->ops == NULL)
300 if (filp->private_data) {
301 struct rpc_pipe_msg *msg;
302 msg = (struct rpc_pipe_msg *)filp->private_data;
303 len += msg->len - msg->copied;
305 return put_user(len, (int __user *)arg);
311 static struct file_operations rpc_pipe_fops = {
312 .owner = THIS_MODULE,
314 .read = rpc_pipe_read,
315 .write = rpc_pipe_write,
316 .poll = rpc_pipe_poll,
317 .ioctl = rpc_pipe_ioctl,
318 .open = rpc_pipe_open,
319 .release = rpc_pipe_release,
323 rpc_show_info(struct seq_file *m, void *v)
325 struct rpc_clnt *clnt = m->private;
327 seq_printf(m, "RPC server: %s\n", clnt->cl_server);
328 seq_printf(m, "service: %s (%d) version %d\n", clnt->cl_protname,
329 clnt->cl_prog, clnt->cl_vers);
330 seq_printf(m, "address: %u.%u.%u.%u\n",
331 NIPQUAD(clnt->cl_xprt->addr.sin_addr.s_addr));
332 seq_printf(m, "protocol: %s\n",
333 clnt->cl_xprt->prot == IPPROTO_UDP ? "udp" : "tcp");
338 rpc_info_open(struct inode *inode, struct file *file)
340 struct rpc_clnt *clnt;
341 int ret = single_open(file, rpc_show_info, NULL);
344 struct seq_file *m = file->private_data;
345 mutex_lock(&inode->i_mutex);
346 clnt = RPC_I(inode)->private;
348 atomic_inc(&clnt->cl_users);
351 single_release(inode, file);
354 mutex_unlock(&inode->i_mutex);
360 rpc_info_release(struct inode *inode, struct file *file)
362 struct seq_file *m = file->private_data;
363 struct rpc_clnt *clnt = (struct rpc_clnt *)m->private;
366 rpc_release_client(clnt);
367 return single_release(inode, file);
370 static struct file_operations rpc_info_operations = {
371 .owner = THIS_MODULE,
372 .open = rpc_info_open,
375 .release = rpc_info_release,
380 * We have a single directory with 1 node in it.
393 * Description of fs contents.
395 struct rpc_filelist {
397 const struct file_operations *i_fop;
401 static struct rpc_filelist files[] = {
404 .mode = S_IFDIR | S_IRUGO | S_IXUGO,
408 .mode = S_IFDIR | S_IRUGO | S_IXUGO,
412 .mode = S_IFDIR | S_IRUGO | S_IXUGO,
414 [RPCAUTH_portmap] = {
416 .mode = S_IFDIR | S_IRUGO | S_IXUGO,
420 .mode = S_IFDIR | S_IRUGO | S_IXUGO,
429 static struct rpc_filelist authfiles[] = {
432 .i_fop = &rpc_info_operations,
433 .mode = S_IFREG | S_IRUSR,
437 struct vfsmount *rpc_get_mount(void)
441 err = simple_pin_fs(&rpc_pipe_fs_type, &rpc_mount, &rpc_mount_count);
447 void rpc_put_mount(void)
449 simple_release_fs(&rpc_mount, &rpc_mount_count);
453 rpc_lookup_parent(char *path, struct nameidata *nd)
457 nd->mnt = rpc_get_mount();
458 if (IS_ERR(nd->mnt)) {
459 printk(KERN_WARNING "%s: %s failed to mount "
460 "pseudofilesystem \n", __FILE__, __FUNCTION__);
461 return PTR_ERR(nd->mnt);
464 nd->dentry = dget(rpc_mount->mnt_root);
465 nd->last_type = LAST_ROOT;
466 nd->flags = LOOKUP_PARENT;
469 if (path_walk(path, nd)) {
470 printk(KERN_WARNING "%s: %s failed to find path %s\n",
471 __FILE__, __FUNCTION__, path);
479 rpc_release_path(struct nameidata *nd)
485 static struct inode *
486 rpc_get_inode(struct super_block *sb, int mode)
488 struct inode *inode = new_inode(sb);
491 inode->i_mode = mode;
492 inode->i_uid = inode->i_gid = 0;
493 inode->i_blksize = PAGE_CACHE_SIZE;
495 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
496 switch(mode & S_IFMT) {
498 inode->i_fop = &simple_dir_operations;
499 inode->i_op = &simple_dir_inode_operations;
508 * FIXME: This probably has races.
511 rpc_depopulate(struct dentry *parent)
513 struct inode *dir = parent->d_inode;
514 struct list_head *pos, *next;
515 struct dentry *dentry, *dvec[10];
518 mutex_lock_nested(&dir->i_mutex, I_MUTEX_CHILD);
520 spin_lock(&dcache_lock);
521 list_for_each_safe(pos, next, &parent->d_subdirs) {
522 dentry = list_entry(pos, struct dentry, d_u.d_child);
523 spin_lock(&dentry->d_lock);
524 if (!d_unhashed(dentry)) {
527 spin_unlock(&dentry->d_lock);
529 if (n == ARRAY_SIZE(dvec))
532 spin_unlock(&dentry->d_lock);
534 spin_unlock(&dcache_lock);
538 if (dentry->d_inode) {
539 rpc_close_pipes(dentry->d_inode);
540 simple_unlink(dir, dentry);
546 mutex_unlock(&dir->i_mutex);
550 rpc_populate(struct dentry *parent,
551 struct rpc_filelist *files,
554 struct inode *inode, *dir = parent->d_inode;
555 void *private = RPC_I(dir)->private;
556 struct dentry *dentry;
559 mutex_lock(&dir->i_mutex);
560 for (i = start; i < eof; i++) {
561 dentry = d_alloc_name(parent, files[i].name);
564 mode = files[i].mode;
565 inode = rpc_get_inode(dir->i_sb, mode);
572 inode->i_fop = files[i].i_fop;
574 rpc_inode_setowner(inode, private);
577 d_add(dentry, inode);
579 mutex_unlock(&dir->i_mutex);
582 mutex_unlock(&dir->i_mutex);
583 printk(KERN_WARNING "%s: %s failed to populate directory %s\n",
584 __FILE__, __FUNCTION__, parent->d_name.name);
589 __rpc_mkdir(struct inode *dir, struct dentry *dentry)
593 inode = rpc_get_inode(dir->i_sb, S_IFDIR | S_IRUSR | S_IXUSR);
596 inode->i_ino = iunique(dir->i_sb, 100);
597 d_instantiate(dentry, inode);
599 inode_dir_notify(dir, DN_CREATE);
602 printk(KERN_WARNING "%s: %s failed to allocate inode for dentry %s\n",
603 __FILE__, __FUNCTION__, dentry->d_name.name);
608 __rpc_rmdir(struct inode *dir, struct dentry *dentry)
612 shrink_dcache_parent(dentry);
614 rpc_close_pipes(dentry->d_inode);
615 if ((error = simple_rmdir(dir, dentry)) != 0)
618 inode_dir_notify(dir, DN_DELETE);
624 static struct dentry *
625 rpc_lookup_negative(char *path, struct nameidata *nd)
627 struct dentry *dentry;
631 if ((error = rpc_lookup_parent(path, nd)) != 0)
632 return ERR_PTR(error);
633 dir = nd->dentry->d_inode;
634 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
635 dentry = lookup_one_len(nd->last.name, nd->dentry, nd->last.len);
638 if (dentry->d_inode) {
640 dentry = ERR_PTR(-EEXIST);
645 mutex_unlock(&dir->i_mutex);
646 rpc_release_path(nd);
652 rpc_mkdir(char *path, struct rpc_clnt *rpc_client)
655 struct dentry *dentry;
659 dentry = rpc_lookup_negative(path, &nd);
662 dir = nd.dentry->d_inode;
663 if ((error = __rpc_mkdir(dir, dentry)) != 0)
665 RPC_I(dentry->d_inode)->private = rpc_client;
666 error = rpc_populate(dentry, authfiles,
667 RPCAUTH_info, RPCAUTH_EOF);
671 mutex_unlock(&dir->i_mutex);
672 rpc_release_path(&nd);
675 rpc_depopulate(dentry);
676 __rpc_rmdir(dir, dentry);
679 printk(KERN_WARNING "%s: %s() failed to create directory %s (errno = %d)\n",
680 __FILE__, __FUNCTION__, path, error);
681 dentry = ERR_PTR(error);
686 rpc_rmdir(char *path)
689 struct dentry *dentry;
693 if ((error = rpc_lookup_parent(path, &nd)) != 0)
695 dir = nd.dentry->d_inode;
696 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
697 dentry = lookup_one_len(nd.last.name, nd.dentry, nd.last.len);
698 if (IS_ERR(dentry)) {
699 error = PTR_ERR(dentry);
702 rpc_depopulate(dentry);
703 error = __rpc_rmdir(dir, dentry);
706 mutex_unlock(&dir->i_mutex);
707 rpc_release_path(&nd);
712 rpc_mkpipe(char *path, void *private, struct rpc_pipe_ops *ops, int flags)
715 struct dentry *dentry;
716 struct inode *dir, *inode;
717 struct rpc_inode *rpci;
719 dentry = rpc_lookup_negative(path, &nd);
722 dir = nd.dentry->d_inode;
723 inode = rpc_get_inode(dir->i_sb, S_IFSOCK | S_IRUSR | S_IWUSR);
726 inode->i_ino = iunique(dir->i_sb, 100);
727 inode->i_fop = &rpc_pipe_fops;
728 d_instantiate(dentry, inode);
730 rpci->private = private;
733 inode_dir_notify(dir, DN_CREATE);
735 mutex_unlock(&dir->i_mutex);
736 rpc_release_path(&nd);
740 dentry = ERR_PTR(-ENOMEM);
741 printk(KERN_WARNING "%s: %s() failed to create pipe %s (errno = %d)\n",
742 __FILE__, __FUNCTION__, path, -ENOMEM);
747 rpc_unlink(char *path)
750 struct dentry *dentry;
754 if ((error = rpc_lookup_parent(path, &nd)) != 0)
756 dir = nd.dentry->d_inode;
757 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
758 dentry = lookup_one_len(nd.last.name, nd.dentry, nd.last.len);
759 if (IS_ERR(dentry)) {
760 error = PTR_ERR(dentry);
764 if (dentry->d_inode) {
765 rpc_close_pipes(dentry->d_inode);
766 error = simple_unlink(dir, dentry);
769 inode_dir_notify(dir, DN_DELETE);
771 mutex_unlock(&dir->i_mutex);
772 rpc_release_path(&nd);
777 * populate the filesystem
779 static struct super_operations s_ops = {
780 .alloc_inode = rpc_alloc_inode,
781 .destroy_inode = rpc_destroy_inode,
782 .statfs = simple_statfs,
785 #define RPCAUTH_GSSMAGIC 0x67596969
788 rpc_fill_super(struct super_block *sb, void *data, int silent)
793 sb->s_blocksize = PAGE_CACHE_SIZE;
794 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
795 sb->s_magic = RPCAUTH_GSSMAGIC;
799 inode = rpc_get_inode(sb, S_IFDIR | 0755);
802 root = d_alloc_root(inode);
807 if (rpc_populate(root, files, RPCAUTH_Root + 1, RPCAUTH_RootEOF))
818 rpc_get_sb(struct file_system_type *fs_type,
819 int flags, const char *dev_name, void *data, struct vfsmount *mnt)
821 return get_sb_single(fs_type, flags, data, rpc_fill_super, mnt);
824 static struct file_system_type rpc_pipe_fs_type = {
825 .owner = THIS_MODULE,
826 .name = "rpc_pipefs",
827 .get_sb = rpc_get_sb,
828 .kill_sb = kill_litter_super,
832 init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
834 struct rpc_inode *rpci = (struct rpc_inode *) foo;
836 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
837 SLAB_CTOR_CONSTRUCTOR) {
838 inode_init_once(&rpci->vfs_inode);
839 rpci->private = NULL;
842 INIT_LIST_HEAD(&rpci->in_upcall);
843 INIT_LIST_HEAD(&rpci->pipe);
845 init_waitqueue_head(&rpci->waitq);
846 INIT_WORK(&rpci->queue_timeout, rpc_timeout_upcall_queue, rpci);
851 int register_rpc_pipefs(void)
853 rpc_inode_cachep = kmem_cache_create("rpc_inode_cache",
854 sizeof(struct rpc_inode),
855 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
858 if (!rpc_inode_cachep)
860 register_filesystem(&rpc_pipe_fs_type);
864 void unregister_rpc_pipefs(void)
866 if (kmem_cache_destroy(rpc_inode_cachep))
867 printk(KERN_WARNING "RPC: unable to free inode cache\n");
868 unregister_filesystem(&rpc_pipe_fs_type);