4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include <linux/file.h>
25 #include <linux/backing-dev.h>
26 #include <linux/init.h>
27 #include <linux/ioctl.h>
28 #include <linux/module.h>
29 #include <linux/mount.h>
30 #include <linux/namei.h>
31 #include <linux/pagemap.h>
32 #include <linux/poll.h>
33 #include <linux/slab.h>
34 #include <linux/parser.h>
37 #include <asm/semaphore.h>
39 #include <asm/spu_priv1.h>
40 #include <asm/uaccess.h>
44 static struct kmem_cache *spufs_inode_cache;
45 char *isolated_loader;
48 spufs_alloc_inode(struct super_block *sb)
50 struct spufs_inode_info *ei;
52 ei = kmem_cache_alloc(spufs_inode_cache, GFP_KERNEL);
60 return &ei->vfs_inode;
64 spufs_destroy_inode(struct inode *inode)
66 kmem_cache_free(spufs_inode_cache, SPUFS_I(inode));
70 spufs_init_once(void *p, struct kmem_cache * cachep, unsigned long flags)
72 struct spufs_inode_info *ei = p;
74 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
75 SLAB_CTOR_CONSTRUCTOR) {
76 inode_init_once(&ei->vfs_inode);
81 spufs_new_inode(struct super_block *sb, int mode)
85 inode = new_inode(sb);
90 inode->i_uid = current->fsuid;
91 inode->i_gid = current->fsgid;
93 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
99 spufs_setattr(struct dentry *dentry, struct iattr *attr)
101 struct inode *inode = dentry->d_inode;
103 if ((attr->ia_valid & ATTR_SIZE) &&
104 (attr->ia_size != inode->i_size))
106 return inode_setattr(inode, attr);
111 spufs_new_file(struct super_block *sb, struct dentry *dentry,
112 const struct file_operations *fops, int mode,
113 struct spu_context *ctx)
115 static struct inode_operations spufs_file_iops = {
116 .setattr = spufs_setattr,
122 inode = spufs_new_inode(sb, S_IFREG | mode);
127 inode->i_op = &spufs_file_iops;
129 inode->i_private = SPUFS_I(inode)->i_ctx = get_spu_context(ctx);
130 d_add(dentry, inode);
136 spufs_delete_inode(struct inode *inode)
138 struct spufs_inode_info *ei = SPUFS_I(inode);
141 put_spu_context(ei->i_ctx);
143 put_spu_gang(ei->i_gang);
147 static void spufs_prune_dir(struct dentry *dir)
149 struct dentry *dentry, *tmp;
151 mutex_lock(&dir->d_inode->i_mutex);
152 list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_u.d_child) {
153 spin_lock(&dcache_lock);
154 spin_lock(&dentry->d_lock);
155 if (!(d_unhashed(dentry)) && dentry->d_inode) {
158 spin_unlock(&dentry->d_lock);
159 simple_unlink(dir->d_inode, dentry);
160 spin_unlock(&dcache_lock);
163 spin_unlock(&dentry->d_lock);
164 spin_unlock(&dcache_lock);
167 shrink_dcache_parent(dir);
168 mutex_unlock(&dir->d_inode->i_mutex);
171 /* Caller must hold parent->i_mutex */
172 static int spufs_rmdir(struct inode *parent, struct dentry *dir)
174 /* remove all entries */
175 spufs_prune_dir(dir);
177 return simple_rmdir(parent, dir);
180 static int spufs_fill_dir(struct dentry *dir, struct tree_descr *files,
181 int mode, struct spu_context *ctx)
183 struct dentry *dentry;
186 while (files->name && files->name[0]) {
188 dentry = d_alloc_name(dir, files->name);
191 ret = spufs_new_file(dir->d_sb, dentry, files->ops,
192 files->mode & mode, ctx);
199 spufs_prune_dir(dir);
203 static int spufs_dir_close(struct inode *inode, struct file *file)
205 struct spu_context *ctx;
206 struct inode *parent;
210 dir = file->f_path.dentry;
211 parent = dir->d_parent->d_inode;
212 ctx = SPUFS_I(dir->d_inode)->i_ctx;
214 mutex_lock(&parent->i_mutex);
215 ret = spufs_rmdir(parent, dir);
216 mutex_unlock(&parent->i_mutex);
219 /* We have to give up the mm_struct */
222 return dcache_dir_close(inode, file);
225 const struct inode_operations spufs_dir_inode_operations = {
226 .lookup = simple_lookup,
229 const struct file_operations spufs_context_fops = {
230 .open = dcache_dir_open,
231 .release = spufs_dir_close,
232 .llseek = dcache_dir_lseek,
233 .read = generic_read_dir,
234 .readdir = dcache_readdir,
235 .fsync = simple_sync_file,
237 EXPORT_SYMBOL_GPL(spufs_context_fops);
240 spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags,
245 struct spu_context *ctx;
248 inode = spufs_new_inode(dir->i_sb, mode | S_IFDIR);
252 if (dir->i_mode & S_ISGID) {
253 inode->i_gid = dir->i_gid;
254 inode->i_mode &= S_ISGID;
256 ctx = alloc_spu_context(SPUFS_I(dir)->i_gang); /* XXX gang */
257 SPUFS_I(inode)->i_ctx = ctx;
262 inode->i_op = &spufs_dir_inode_operations;
263 inode->i_fop = &simple_dir_operations;
264 if (flags & SPU_CREATE_NOSCHED)
265 ret = spufs_fill_dir(dentry, spufs_dir_nosched_contents,
268 ret = spufs_fill_dir(dentry, spufs_dir_contents, mode, ctx);
273 d_instantiate(dentry, inode);
276 dentry->d_inode->i_nlink++;
280 put_spu_context(ctx);
287 static int spufs_context_open(struct dentry *dentry, struct vfsmount *mnt)
292 ret = get_unused_fd();
299 filp = dentry_open(dentry, mnt, O_RDONLY);
306 filp->f_op = &spufs_context_fops;
307 fd_install(ret, filp);
312 static int spufs_create_context(struct inode *inode,
313 struct dentry *dentry,
314 struct vfsmount *mnt, int flags, int mode)
319 if ((flags & SPU_CREATE_NOSCHED) &&
320 !capable(CAP_SYS_NICE))
324 if ((flags & (SPU_CREATE_NOSCHED | SPU_CREATE_ISOLATE))
325 == SPU_CREATE_ISOLATE)
329 if ((flags & SPU_CREATE_ISOLATE) && !isolated_loader)
332 ret = spufs_mkdir(inode, dentry, flags, mode & S_IRWXUGO);
337 * get references for dget and mntget, will be released
338 * in error path of *_open().
340 ret = spufs_context_open(dget(dentry), mntget(mnt));
342 WARN_ON(spufs_rmdir(inode, dentry));
343 mutex_unlock(&inode->i_mutex);
344 spu_forget(SPUFS_I(dentry->d_inode)->i_ctx);
349 mutex_unlock(&inode->i_mutex);
355 static int spufs_rmgang(struct inode *root, struct dentry *dir)
357 /* FIXME: this fails if the dir is not empty,
358 which causes a leak of gangs. */
359 return simple_rmdir(root, dir);
362 static int spufs_gang_close(struct inode *inode, struct file *file)
364 struct inode *parent;
368 dir = file->f_path.dentry;
369 parent = dir->d_parent->d_inode;
371 ret = spufs_rmgang(parent, dir);
374 return dcache_dir_close(inode, file);
377 const struct file_operations spufs_gang_fops = {
378 .open = dcache_dir_open,
379 .release = spufs_gang_close,
380 .llseek = dcache_dir_lseek,
381 .read = generic_read_dir,
382 .readdir = dcache_readdir,
383 .fsync = simple_sync_file,
387 spufs_mkgang(struct inode *dir, struct dentry *dentry, int mode)
391 struct spu_gang *gang;
394 inode = spufs_new_inode(dir->i_sb, mode | S_IFDIR);
399 if (dir->i_mode & S_ISGID) {
400 inode->i_gid = dir->i_gid;
401 inode->i_mode &= S_ISGID;
403 gang = alloc_spu_gang();
404 SPUFS_I(inode)->i_ctx = NULL;
405 SPUFS_I(inode)->i_gang = gang;
409 inode->i_op = &spufs_dir_inode_operations;
410 inode->i_fop = &simple_dir_operations;
412 d_instantiate(dentry, inode);
415 dentry->d_inode->i_nlink++;
424 static int spufs_gang_open(struct dentry *dentry, struct vfsmount *mnt)
429 ret = get_unused_fd();
436 filp = dentry_open(dentry, mnt, O_RDONLY);
443 filp->f_op = &spufs_gang_fops;
444 fd_install(ret, filp);
449 static int spufs_create_gang(struct inode *inode,
450 struct dentry *dentry,
451 struct vfsmount *mnt, int mode)
455 ret = spufs_mkgang(inode, dentry, mode & S_IRWXUGO);
460 * get references for dget and mntget, will be released
461 * in error path of *_open().
463 ret = spufs_gang_open(dget(dentry), mntget(mnt));
465 WARN_ON(spufs_rmgang(inode, dentry));
468 mutex_unlock(&inode->i_mutex);
474 static struct file_system_type spufs_type;
476 long spufs_create(struct nameidata *nd, unsigned int flags, mode_t mode)
478 struct dentry *dentry;
482 /* check if we are on spufs */
483 if (nd->dentry->d_sb->s_type != &spufs_type)
486 /* don't accept undefined flags */
487 if (flags & (~SPU_CREATE_FLAG_ALL))
490 /* only threads can be underneath a gang */
491 if (nd->dentry != nd->dentry->d_sb->s_root) {
492 if ((flags & SPU_CREATE_GANG) ||
493 !SPUFS_I(nd->dentry->d_inode)->i_gang)
497 dentry = lookup_create(nd, 1);
498 ret = PTR_ERR(dentry);
506 mode &= ~current->fs->umask;
508 if (flags & SPU_CREATE_GANG)
509 return spufs_create_gang(nd->dentry->d_inode,
510 dentry, nd->mnt, mode);
512 return spufs_create_context(nd->dentry->d_inode,
513 dentry, nd->mnt, flags, mode);
518 mutex_unlock(&nd->dentry->d_inode->i_mutex);
523 /* File system initialization */
525 Opt_uid, Opt_gid, Opt_mode, Opt_err,
528 static match_table_t spufs_tokens = {
529 { Opt_uid, "uid=%d" },
530 { Opt_gid, "gid=%d" },
531 { Opt_mode, "mode=%o" },
536 spufs_parse_options(char *options, struct inode *root)
539 substring_t args[MAX_OPT_ARGS];
541 while ((p = strsep(&options, ",")) != NULL) {
547 token = match_token(p, spufs_tokens, args);
550 if (match_int(&args[0], &option))
552 root->i_uid = option;
555 if (match_int(&args[0], &option))
557 root->i_gid = option;
560 if (match_octal(&args[0], &option))
562 root->i_mode = option | S_IFDIR;
571 static void spufs_exit_isolated_loader(void)
573 kfree(isolated_loader);
577 spufs_init_isolated_loader(void)
579 struct device_node *dn;
583 dn = of_find_node_by_path("/spu-isolation");
587 loader = of_get_property(dn, "loader", &size);
591 /* kmalloc should align on a 16 byte boundary..* */
592 isolated_loader = kmalloc(size, GFP_KERNEL);
593 if (!isolated_loader)
596 memcpy(isolated_loader, loader, size);
597 printk(KERN_INFO "spufs: SPU isolation mode enabled\n");
601 spufs_create_root(struct super_block *sb, void *data)
607 inode = spufs_new_inode(sb, S_IFDIR | 0775);
611 inode->i_op = &spufs_dir_inode_operations;
612 inode->i_fop = &simple_dir_operations;
613 SPUFS_I(inode)->i_ctx = NULL;
616 if (!spufs_parse_options(data, inode))
620 sb->s_root = d_alloc_root(inode);
632 spufs_fill_super(struct super_block *sb, void *data, int silent)
634 static struct super_operations s_ops = {
635 .alloc_inode = spufs_alloc_inode,
636 .destroy_inode = spufs_destroy_inode,
637 .statfs = simple_statfs,
638 .delete_inode = spufs_delete_inode,
639 .drop_inode = generic_delete_inode,
642 sb->s_maxbytes = MAX_LFS_FILESIZE;
643 sb->s_blocksize = PAGE_CACHE_SIZE;
644 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
645 sb->s_magic = SPUFS_MAGIC;
648 return spufs_create_root(sb, data);
652 spufs_get_sb(struct file_system_type *fstype, int flags,
653 const char *name, void *data, struct vfsmount *mnt)
655 return get_sb_single(fstype, flags, data, spufs_fill_super, mnt);
658 static struct file_system_type spufs_type = {
659 .owner = THIS_MODULE,
661 .get_sb = spufs_get_sb,
662 .kill_sb = kill_litter_super,
665 static int __init spufs_init(void)
670 if (!spu_management_ops)
674 spufs_inode_cache = kmem_cache_create("spufs_inode_cache",
675 sizeof(struct spufs_inode_info), 0,
676 SLAB_HWCACHE_ALIGN, spufs_init_once, NULL);
678 if (!spufs_inode_cache)
680 ret = spu_sched_init();
683 ret = register_filesystem(&spufs_type);
686 ret = register_spu_syscalls(&spufs_calls);
689 ret = register_arch_coredump_calls(&spufs_coredump_calls);
693 spufs_init_isolated_loader();
698 unregister_spu_syscalls(&spufs_calls);
700 unregister_filesystem(&spufs_type);
704 kmem_cache_destroy(spufs_inode_cache);
708 module_init(spufs_init);
710 static void __exit spufs_exit(void)
713 spufs_exit_isolated_loader();
714 unregister_arch_coredump_calls(&spufs_coredump_calls);
715 unregister_spu_syscalls(&spufs_calls);
716 unregister_filesystem(&spufs_type);
717 kmem_cache_destroy(spufs_inode_cache);
719 module_exit(spufs_exit);
721 MODULE_LICENSE("GPL");
722 MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");