2 * net/sunrpc/rpc_pipe.c
4 * Userland/kernel interface for rpcauth_gss.
5 * Code shamelessly plagiarized from fs/nfsd/nfsctl.c
8 * Copyright (c) 2002, Trond Myklebust <trond.myklebust@fys.uio.no>
11 #include <linux/config.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/string.h>
15 #include <linux/pagemap.h>
16 #include <linux/mount.h>
17 #include <linux/namei.h>
18 #include <linux/dnotify.h>
19 #include <linux/kernel.h>
21 #include <asm/ioctls.h>
23 #include <linux/poll.h>
24 #include <linux/wait.h>
25 #include <linux/seq_file.h>
27 #include <linux/sunrpc/clnt.h>
28 #include <linux/workqueue.h>
29 #include <linux/sunrpc/rpc_pipe_fs.h>
31 static struct vfsmount *rpc_mount __read_mostly;
32 static int rpc_mount_count;
34 static struct file_system_type rpc_pipe_fs_type;
37 static kmem_cache_t *rpc_inode_cachep __read_mostly;
39 #define RPC_UPCALL_TIMEOUT (30*HZ)
41 static void rpc_purge_list(struct rpc_inode *rpci, struct list_head *head,
42 void (*destroy_msg)(struct rpc_pipe_msg *), int err)
44 struct rpc_pipe_msg *msg;
49 msg = list_entry(head->next, struct rpc_pipe_msg, list);
53 } while (!list_empty(head));
54 wake_up(&rpci->waitq);
58 rpc_timeout_upcall_queue(void *data)
61 struct rpc_inode *rpci = (struct rpc_inode *)data;
62 struct inode *inode = &rpci->vfs_inode;
63 void (*destroy_msg)(struct rpc_pipe_msg *);
65 spin_lock(&inode->i_lock);
66 if (rpci->ops == NULL) {
67 spin_unlock(&inode->i_lock);
70 destroy_msg = rpci->ops->destroy_msg;
71 if (rpci->nreaders == 0) {
72 list_splice_init(&rpci->pipe, &free_list);
75 spin_unlock(&inode->i_lock);
76 rpc_purge_list(rpci, &free_list, destroy_msg, -ETIMEDOUT);
80 rpc_queue_upcall(struct inode *inode, struct rpc_pipe_msg *msg)
82 struct rpc_inode *rpci = RPC_I(inode);
85 spin_lock(&inode->i_lock);
86 if (rpci->ops == NULL)
89 list_add_tail(&msg->list, &rpci->pipe);
90 rpci->pipelen += msg->len;
92 } else if (rpci->flags & RPC_PIPE_WAIT_FOR_OPEN) {
93 if (list_empty(&rpci->pipe))
94 queue_delayed_work(rpciod_workqueue,
97 list_add_tail(&msg->list, &rpci->pipe);
98 rpci->pipelen += msg->len;
102 spin_unlock(&inode->i_lock);
103 wake_up(&rpci->waitq);
108 rpc_inode_setowner(struct inode *inode, void *private)
110 RPC_I(inode)->private = private;
114 rpc_close_pipes(struct inode *inode)
116 struct rpc_inode *rpci = RPC_I(inode);
117 struct rpc_pipe_ops *ops;
119 mutex_lock(&inode->i_mutex);
122 LIST_HEAD(free_list);
124 spin_lock(&inode->i_lock);
126 list_splice_init(&rpci->in_upcall, &free_list);
127 list_splice_init(&rpci->pipe, &free_list);
130 spin_unlock(&inode->i_lock);
131 rpc_purge_list(rpci, &free_list, ops->destroy_msg, -EPIPE);
133 if (ops->release_pipe)
134 ops->release_pipe(inode);
135 cancel_delayed_work(&rpci->queue_timeout);
136 flush_workqueue(rpciod_workqueue);
138 rpc_inode_setowner(inode, NULL);
139 mutex_unlock(&inode->i_mutex);
142 static struct inode *
143 rpc_alloc_inode(struct super_block *sb)
145 struct rpc_inode *rpci;
146 rpci = (struct rpc_inode *)kmem_cache_alloc(rpc_inode_cachep, SLAB_KERNEL);
149 return &rpci->vfs_inode;
153 rpc_destroy_inode(struct inode *inode)
155 kmem_cache_free(rpc_inode_cachep, RPC_I(inode));
159 rpc_pipe_open(struct inode *inode, struct file *filp)
161 struct rpc_inode *rpci = RPC_I(inode);
164 mutex_lock(&inode->i_mutex);
165 if (rpci->ops != NULL) {
166 if (filp->f_mode & FMODE_READ)
168 if (filp->f_mode & FMODE_WRITE)
172 mutex_unlock(&inode->i_mutex);
177 rpc_pipe_release(struct inode *inode, struct file *filp)
179 struct rpc_inode *rpci = RPC_I(inode);
180 struct rpc_pipe_msg *msg;
182 mutex_lock(&inode->i_mutex);
183 if (rpci->ops == NULL)
185 msg = (struct rpc_pipe_msg *)filp->private_data;
187 spin_lock(&inode->i_lock);
188 msg->errno = -EAGAIN;
189 list_del(&msg->list);
190 spin_unlock(&inode->i_lock);
191 rpci->ops->destroy_msg(msg);
193 if (filp->f_mode & FMODE_WRITE)
195 if (filp->f_mode & FMODE_READ) {
197 if (rpci->nreaders == 0) {
198 LIST_HEAD(free_list);
199 spin_lock(&inode->i_lock);
200 list_splice_init(&rpci->pipe, &free_list);
202 spin_unlock(&inode->i_lock);
203 rpc_purge_list(rpci, &free_list,
204 rpci->ops->destroy_msg, -EAGAIN);
207 if (rpci->ops->release_pipe)
208 rpci->ops->release_pipe(inode);
210 mutex_unlock(&inode->i_mutex);
215 rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset)
217 struct inode *inode = filp->f_dentry->d_inode;
218 struct rpc_inode *rpci = RPC_I(inode);
219 struct rpc_pipe_msg *msg;
222 mutex_lock(&inode->i_mutex);
223 if (rpci->ops == NULL) {
227 msg = filp->private_data;
229 spin_lock(&inode->i_lock);
230 if (!list_empty(&rpci->pipe)) {
231 msg = list_entry(rpci->pipe.next,
234 list_move(&msg->list, &rpci->in_upcall);
235 rpci->pipelen -= msg->len;
236 filp->private_data = msg;
239 spin_unlock(&inode->i_lock);
243 /* NOTE: it is up to the callback to update msg->copied */
244 res = rpci->ops->upcall(filp, msg, buf, len);
245 if (res < 0 || msg->len == msg->copied) {
246 filp->private_data = NULL;
247 spin_lock(&inode->i_lock);
248 list_del(&msg->list);
249 spin_unlock(&inode->i_lock);
250 rpci->ops->destroy_msg(msg);
253 mutex_unlock(&inode->i_mutex);
258 rpc_pipe_write(struct file *filp, const char __user *buf, size_t len, loff_t *offset)
260 struct inode *inode = filp->f_dentry->d_inode;
261 struct rpc_inode *rpci = RPC_I(inode);
264 mutex_lock(&inode->i_mutex);
266 if (rpci->ops != NULL)
267 res = rpci->ops->downcall(filp, buf, len);
268 mutex_unlock(&inode->i_mutex);
273 rpc_pipe_poll(struct file *filp, struct poll_table_struct *wait)
275 struct rpc_inode *rpci;
276 unsigned int mask = 0;
278 rpci = RPC_I(filp->f_dentry->d_inode);
279 poll_wait(filp, &rpci->waitq, wait);
281 mask = POLLOUT | POLLWRNORM;
282 if (rpci->ops == NULL)
283 mask |= POLLERR | POLLHUP;
284 if (!list_empty(&rpci->pipe))
285 mask |= POLLIN | POLLRDNORM;
290 rpc_pipe_ioctl(struct inode *ino, struct file *filp,
291 unsigned int cmd, unsigned long arg)
293 struct rpc_inode *rpci = RPC_I(filp->f_dentry->d_inode);
298 if (rpci->ops == NULL)
301 if (filp->private_data) {
302 struct rpc_pipe_msg *msg;
303 msg = (struct rpc_pipe_msg *)filp->private_data;
304 len += msg->len - msg->copied;
306 return put_user(len, (int __user *)arg);
312 static struct file_operations rpc_pipe_fops = {
313 .owner = THIS_MODULE,
315 .read = rpc_pipe_read,
316 .write = rpc_pipe_write,
317 .poll = rpc_pipe_poll,
318 .ioctl = rpc_pipe_ioctl,
319 .open = rpc_pipe_open,
320 .release = rpc_pipe_release,
324 rpc_show_info(struct seq_file *m, void *v)
326 struct rpc_clnt *clnt = m->private;
328 seq_printf(m, "RPC server: %s\n", clnt->cl_server);
329 seq_printf(m, "service: %s (%d) version %d\n", clnt->cl_protname,
330 clnt->cl_prog, clnt->cl_vers);
331 seq_printf(m, "address: %u.%u.%u.%u\n",
332 NIPQUAD(clnt->cl_xprt->addr.sin_addr.s_addr));
333 seq_printf(m, "protocol: %s\n",
334 clnt->cl_xprt->prot == IPPROTO_UDP ? "udp" : "tcp");
339 rpc_info_open(struct inode *inode, struct file *file)
341 struct rpc_clnt *clnt;
342 int ret = single_open(file, rpc_show_info, NULL);
345 struct seq_file *m = file->private_data;
346 mutex_lock(&inode->i_mutex);
347 clnt = RPC_I(inode)->private;
349 atomic_inc(&clnt->cl_users);
352 single_release(inode, file);
355 mutex_unlock(&inode->i_mutex);
361 rpc_info_release(struct inode *inode, struct file *file)
363 struct seq_file *m = file->private_data;
364 struct rpc_clnt *clnt = (struct rpc_clnt *)m->private;
367 rpc_release_client(clnt);
368 return single_release(inode, file);
371 static struct file_operations rpc_info_operations = {
372 .owner = THIS_MODULE,
373 .open = rpc_info_open,
376 .release = rpc_info_release,
381 * We have a single directory with 1 node in it.
394 * Description of fs contents.
396 struct rpc_filelist {
398 const struct file_operations *i_fop;
402 static struct rpc_filelist files[] = {
405 .mode = S_IFDIR | S_IRUGO | S_IXUGO,
409 .mode = S_IFDIR | S_IRUGO | S_IXUGO,
413 .mode = S_IFDIR | S_IRUGO | S_IXUGO,
415 [RPCAUTH_portmap] = {
417 .mode = S_IFDIR | S_IRUGO | S_IXUGO,
421 .mode = S_IFDIR | S_IRUGO | S_IXUGO,
430 static struct rpc_filelist authfiles[] = {
433 .i_fop = &rpc_info_operations,
434 .mode = S_IFREG | S_IRUSR,
438 struct vfsmount *rpc_get_mount(void)
442 err = simple_pin_fs(&rpc_pipe_fs_type, &rpc_mount, &rpc_mount_count);
448 void rpc_put_mount(void)
450 simple_release_fs(&rpc_mount, &rpc_mount_count);
454 rpc_lookup_parent(char *path, struct nameidata *nd)
458 nd->mnt = rpc_get_mount();
459 if (IS_ERR(nd->mnt)) {
460 printk(KERN_WARNING "%s: %s failed to mount "
461 "pseudofilesystem \n", __FILE__, __FUNCTION__);
462 return PTR_ERR(nd->mnt);
465 nd->dentry = dget(rpc_mount->mnt_root);
466 nd->last_type = LAST_ROOT;
467 nd->flags = LOOKUP_PARENT;
470 if (path_walk(path, nd)) {
471 printk(KERN_WARNING "%s: %s failed to find path %s\n",
472 __FILE__, __FUNCTION__, path);
480 rpc_release_path(struct nameidata *nd)
486 static struct inode *
487 rpc_get_inode(struct super_block *sb, int mode)
489 struct inode *inode = new_inode(sb);
492 inode->i_mode = mode;
493 inode->i_uid = inode->i_gid = 0;
494 inode->i_blksize = PAGE_CACHE_SIZE;
496 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
497 switch(mode & S_IFMT) {
499 inode->i_fop = &simple_dir_operations;
500 inode->i_op = &simple_dir_inode_operations;
509 * FIXME: This probably has races.
512 rpc_depopulate(struct dentry *parent)
514 struct inode *dir = parent->d_inode;
515 struct list_head *pos, *next;
516 struct dentry *dentry, *dvec[10];
519 mutex_lock(&dir->i_mutex);
521 spin_lock(&dcache_lock);
522 list_for_each_safe(pos, next, &parent->d_subdirs) {
523 dentry = list_entry(pos, struct dentry, d_u.d_child);
524 spin_lock(&dentry->d_lock);
525 if (!d_unhashed(dentry)) {
528 spin_unlock(&dentry->d_lock);
530 if (n == ARRAY_SIZE(dvec))
533 spin_unlock(&dentry->d_lock);
535 spin_unlock(&dcache_lock);
539 if (dentry->d_inode) {
540 rpc_close_pipes(dentry->d_inode);
541 simple_unlink(dir, dentry);
547 mutex_unlock(&dir->i_mutex);
551 rpc_populate(struct dentry *parent,
552 struct rpc_filelist *files,
555 struct inode *inode, *dir = parent->d_inode;
556 void *private = RPC_I(dir)->private;
557 struct dentry *dentry;
560 mutex_lock(&dir->i_mutex);
561 for (i = start; i < eof; i++) {
562 dentry = d_alloc_name(parent, files[i].name);
565 mode = files[i].mode;
566 inode = rpc_get_inode(dir->i_sb, mode);
573 inode->i_fop = files[i].i_fop;
575 rpc_inode_setowner(inode, private);
578 d_add(dentry, inode);
580 mutex_unlock(&dir->i_mutex);
583 mutex_unlock(&dir->i_mutex);
584 printk(KERN_WARNING "%s: %s failed to populate directory %s\n",
585 __FILE__, __FUNCTION__, parent->d_name.name);
590 __rpc_mkdir(struct inode *dir, struct dentry *dentry)
594 inode = rpc_get_inode(dir->i_sb, S_IFDIR | S_IRUSR | S_IXUSR);
597 inode->i_ino = iunique(dir->i_sb, 100);
598 d_instantiate(dentry, inode);
600 inode_dir_notify(dir, DN_CREATE);
603 printk(KERN_WARNING "%s: %s failed to allocate inode for dentry %s\n",
604 __FILE__, __FUNCTION__, dentry->d_name.name);
609 __rpc_rmdir(struct inode *dir, struct dentry *dentry)
613 shrink_dcache_parent(dentry);
615 rpc_close_pipes(dentry->d_inode);
616 if ((error = simple_rmdir(dir, dentry)) != 0)
619 inode_dir_notify(dir, DN_DELETE);
625 static struct dentry *
626 rpc_lookup_negative(char *path, struct nameidata *nd)
628 struct dentry *dentry;
632 if ((error = rpc_lookup_parent(path, nd)) != 0)
633 return ERR_PTR(error);
634 dir = nd->dentry->d_inode;
635 mutex_lock(&dir->i_mutex);
636 dentry = lookup_one_len(nd->last.name, nd->dentry, nd->last.len);
639 if (dentry->d_inode) {
641 dentry = ERR_PTR(-EEXIST);
646 mutex_unlock(&dir->i_mutex);
647 rpc_release_path(nd);
653 rpc_mkdir(char *path, struct rpc_clnt *rpc_client)
656 struct dentry *dentry;
660 dentry = rpc_lookup_negative(path, &nd);
663 dir = nd.dentry->d_inode;
664 if ((error = __rpc_mkdir(dir, dentry)) != 0)
666 RPC_I(dentry->d_inode)->private = rpc_client;
667 error = rpc_populate(dentry, authfiles,
668 RPCAUTH_info, RPCAUTH_EOF);
672 mutex_unlock(&dir->i_mutex);
673 rpc_release_path(&nd);
676 rpc_depopulate(dentry);
677 __rpc_rmdir(dir, dentry);
680 printk(KERN_WARNING "%s: %s() failed to create directory %s (errno = %d)\n",
681 __FILE__, __FUNCTION__, path, error);
682 dentry = ERR_PTR(error);
687 rpc_rmdir(char *path)
690 struct dentry *dentry;
694 if ((error = rpc_lookup_parent(path, &nd)) != 0)
696 dir = nd.dentry->d_inode;
697 mutex_lock(&dir->i_mutex);
698 dentry = lookup_one_len(nd.last.name, nd.dentry, nd.last.len);
699 if (IS_ERR(dentry)) {
700 error = PTR_ERR(dentry);
703 rpc_depopulate(dentry);
704 error = __rpc_rmdir(dir, dentry);
707 mutex_unlock(&dir->i_mutex);
708 rpc_release_path(&nd);
713 rpc_mkpipe(char *path, void *private, struct rpc_pipe_ops *ops, int flags)
716 struct dentry *dentry;
717 struct inode *dir, *inode;
718 struct rpc_inode *rpci;
720 dentry = rpc_lookup_negative(path, &nd);
723 dir = nd.dentry->d_inode;
724 inode = rpc_get_inode(dir->i_sb, S_IFSOCK | S_IRUSR | S_IWUSR);
727 inode->i_ino = iunique(dir->i_sb, 100);
728 inode->i_fop = &rpc_pipe_fops;
729 d_instantiate(dentry, inode);
731 rpci->private = private;
734 inode_dir_notify(dir, DN_CREATE);
736 mutex_unlock(&dir->i_mutex);
737 rpc_release_path(&nd);
741 dentry = ERR_PTR(-ENOMEM);
742 printk(KERN_WARNING "%s: %s() failed to create pipe %s (errno = %d)\n",
743 __FILE__, __FUNCTION__, path, -ENOMEM);
748 rpc_unlink(char *path)
751 struct dentry *dentry;
755 if ((error = rpc_lookup_parent(path, &nd)) != 0)
757 dir = nd.dentry->d_inode;
758 mutex_lock(&dir->i_mutex);
759 dentry = lookup_one_len(nd.last.name, nd.dentry, nd.last.len);
760 if (IS_ERR(dentry)) {
761 error = PTR_ERR(dentry);
765 if (dentry->d_inode) {
766 rpc_close_pipes(dentry->d_inode);
767 error = simple_unlink(dir, dentry);
770 inode_dir_notify(dir, DN_DELETE);
772 mutex_unlock(&dir->i_mutex);
773 rpc_release_path(&nd);
778 * populate the filesystem
780 static struct super_operations s_ops = {
781 .alloc_inode = rpc_alloc_inode,
782 .destroy_inode = rpc_destroy_inode,
783 .statfs = simple_statfs,
786 #define RPCAUTH_GSSMAGIC 0x67596969
789 rpc_fill_super(struct super_block *sb, void *data, int silent)
794 sb->s_blocksize = PAGE_CACHE_SIZE;
795 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
796 sb->s_magic = RPCAUTH_GSSMAGIC;
800 inode = rpc_get_inode(sb, S_IFDIR | 0755);
803 root = d_alloc_root(inode);
808 if (rpc_populate(root, files, RPCAUTH_Root + 1, RPCAUTH_RootEOF))
819 rpc_get_sb(struct file_system_type *fs_type,
820 int flags, const char *dev_name, void *data, struct vfsmount *mnt)
822 return get_sb_single(fs_type, flags, data, rpc_fill_super, mnt);
825 static struct file_system_type rpc_pipe_fs_type = {
826 .owner = THIS_MODULE,
827 .name = "rpc_pipefs",
828 .get_sb = rpc_get_sb,
829 .kill_sb = kill_litter_super,
833 init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
835 struct rpc_inode *rpci = (struct rpc_inode *) foo;
837 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
838 SLAB_CTOR_CONSTRUCTOR) {
839 inode_init_once(&rpci->vfs_inode);
840 rpci->private = NULL;
843 INIT_LIST_HEAD(&rpci->in_upcall);
844 INIT_LIST_HEAD(&rpci->pipe);
846 init_waitqueue_head(&rpci->waitq);
847 INIT_WORK(&rpci->queue_timeout, rpc_timeout_upcall_queue, rpci);
852 int register_rpc_pipefs(void)
854 rpc_inode_cachep = kmem_cache_create("rpc_inode_cache",
855 sizeof(struct rpc_inode),
856 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
859 if (!rpc_inode_cachep)
861 register_filesystem(&rpc_pipe_fs_type);
865 void unregister_rpc_pipefs(void)
867 if (kmem_cache_destroy(rpc_inode_cachep))
868 printk(KERN_WARNING "RPC: unable to free inode cache\n");
869 unregister_filesystem(&rpc_pipe_fs_type);