4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include <linux/file.h>
25 #include <linux/backing-dev.h>
26 #include <linux/init.h>
27 #include <linux/ioctl.h>
28 #include <linux/module.h>
29 #include <linux/mount.h>
30 #include <linux/namei.h>
31 #include <linux/pagemap.h>
32 #include <linux/poll.h>
33 #include <linux/slab.h>
34 #include <linux/parser.h>
37 #include <asm/spu_priv1.h>
39 #include <asm/semaphore.h>
41 #include <asm/uaccess.h>
45 static kmem_cache_t *spufs_inode_cache;
46 static char *isolated_loader;
49 spufs_alloc_inode(struct super_block *sb)
51 struct spufs_inode_info *ei;
53 ei = kmem_cache_alloc(spufs_inode_cache, SLAB_KERNEL);
60 return &ei->vfs_inode;
64 spufs_destroy_inode(struct inode *inode)
66 kmem_cache_free(spufs_inode_cache, SPUFS_I(inode));
70 spufs_init_once(void *p, kmem_cache_t * cachep, unsigned long flags)
72 struct spufs_inode_info *ei = p;
74 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
75 SLAB_CTOR_CONSTRUCTOR) {
76 inode_init_once(&ei->vfs_inode);
81 spufs_new_inode(struct super_block *sb, int mode)
85 inode = new_inode(sb);
90 inode->i_uid = current->fsuid;
91 inode->i_gid = current->fsgid;
93 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
99 spufs_setattr(struct dentry *dentry, struct iattr *attr)
101 struct inode *inode = dentry->d_inode;
103 if ((attr->ia_valid & ATTR_SIZE) &&
104 (attr->ia_size != inode->i_size))
106 return inode_setattr(inode, attr);
111 spufs_new_file(struct super_block *sb, struct dentry *dentry,
112 const struct file_operations *fops, int mode,
113 struct spu_context *ctx)
115 static struct inode_operations spufs_file_iops = {
116 .setattr = spufs_setattr,
122 inode = spufs_new_inode(sb, S_IFREG | mode);
127 inode->i_op = &spufs_file_iops;
129 inode->i_private = SPUFS_I(inode)->i_ctx = get_spu_context(ctx);
130 d_add(dentry, inode);
136 spufs_delete_inode(struct inode *inode)
138 struct spufs_inode_info *ei = SPUFS_I(inode);
141 put_spu_context(ei->i_ctx);
143 put_spu_gang(ei->i_gang);
147 static void spufs_prune_dir(struct dentry *dir)
149 struct dentry *dentry, *tmp;
151 mutex_lock(&dir->d_inode->i_mutex);
152 list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_u.d_child) {
153 spin_lock(&dcache_lock);
154 spin_lock(&dentry->d_lock);
155 if (!(d_unhashed(dentry)) && dentry->d_inode) {
158 spin_unlock(&dentry->d_lock);
159 simple_unlink(dir->d_inode, dentry);
160 spin_unlock(&dcache_lock);
163 spin_unlock(&dentry->d_lock);
164 spin_unlock(&dcache_lock);
167 shrink_dcache_parent(dir);
168 mutex_unlock(&dir->d_inode->i_mutex);
171 /* Caller must hold parent->i_mutex */
172 static int spufs_rmdir(struct inode *parent, struct dentry *dir)
174 /* remove all entries */
175 spufs_prune_dir(dir);
177 return simple_rmdir(parent, dir);
180 static int spufs_fill_dir(struct dentry *dir, struct tree_descr *files,
181 int mode, struct spu_context *ctx)
183 struct dentry *dentry;
186 while (files->name && files->name[0]) {
188 dentry = d_alloc_name(dir, files->name);
191 ret = spufs_new_file(dir->d_sb, dentry, files->ops,
192 files->mode & mode, ctx);
199 spufs_prune_dir(dir);
203 static int spufs_dir_close(struct inode *inode, struct file *file)
205 struct spu_context *ctx;
206 struct inode *parent;
210 dir = file->f_dentry;
211 parent = dir->d_parent->d_inode;
212 ctx = SPUFS_I(dir->d_inode)->i_ctx;
214 mutex_lock(&parent->i_mutex);
215 ret = spufs_rmdir(parent, dir);
216 mutex_unlock(&parent->i_mutex);
219 /* We have to give up the mm_struct */
222 return dcache_dir_close(inode, file);
225 struct inode_operations spufs_dir_inode_operations = {
226 .lookup = simple_lookup,
229 struct file_operations spufs_context_fops = {
230 .open = dcache_dir_open,
231 .release = spufs_dir_close,
232 .llseek = dcache_dir_lseek,
233 .read = generic_read_dir,
234 .readdir = dcache_readdir,
235 .fsync = simple_sync_file,
238 static int spu_setup_isolated(struct spu_context *ctx)
241 u64 __iomem *mfc_cntl;
244 unsigned long timeout;
245 const u32 status_loading = SPU_STATUS_RUNNING
246 | SPU_STATUS_ISOLATED_STATE | SPU_STATUS_ISOLATED_LOAD_STATUS;
248 if (!isolated_loader)
251 if ((ret = spu_acquire_exclusive(ctx)) != 0)
254 mfc_cntl = &ctx->spu->priv2->mfc_control_RW;
256 /* purge the MFC DMA queue to ensure no spurious accesses before we
257 * enter kernel mode */
258 timeout = jiffies + HZ;
259 out_be64(mfc_cntl, MFC_CNTL_PURGE_DMA_REQUEST);
260 while ((in_be64(mfc_cntl) & MFC_CNTL_PURGE_DMA_STATUS_MASK)
261 != MFC_CNTL_PURGE_DMA_COMPLETE) {
262 if (time_after(jiffies, timeout)) {
263 printk(KERN_ERR "%s: timeout flushing MFC DMA queue\n",
271 /* put the SPE in kernel mode to allow access to the loader */
272 sr1 = spu_mfc_sr1_get(ctx->spu);
273 sr1 &= ~MFC_STATE1_PROBLEM_STATE_MASK;
274 spu_mfc_sr1_set(ctx->spu, sr1);
276 /* start the loader */
277 ctx->ops->signal1_write(ctx, (unsigned long)isolated_loader >> 32);
278 ctx->ops->signal2_write(ctx,
279 (unsigned long)isolated_loader & 0xffffffff);
281 ctx->ops->runcntl_write(ctx,
282 SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
285 timeout = jiffies + HZ;
286 while (((status = ctx->ops->status_read(ctx)) & status_loading) ==
288 if (time_after(jiffies, timeout)) {
289 printk(KERN_ERR "%s: timeout waiting for loader\n",
297 if (!(status & SPU_STATUS_RUNNING)) {
298 /* If isolated LOAD has failed: run SPU, we will get a stop-and
300 pr_debug("%s: isolated LOAD failed\n", __FUNCTION__);
301 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
304 } else if (!(status & SPU_STATUS_ISOLATED_STATE)) {
305 /* This isn't allowed by the CBEA, but check anyway */
306 pr_debug("%s: SPU fell out of isolated mode?\n", __FUNCTION__);
307 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_STOP);
312 /* Finished accessing the loader. Drop kernel mode */
313 sr1 |= MFC_STATE1_PROBLEM_STATE_MASK;
314 spu_mfc_sr1_set(ctx->spu, sr1);
317 spu_release_exclusive(ctx);
321 int spu_recycle_isolated(struct spu_context *ctx)
323 ctx->ops->runcntl_stop(ctx);
324 return spu_setup_isolated(ctx);
328 spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags,
333 struct spu_context *ctx;
336 inode = spufs_new_inode(dir->i_sb, mode | S_IFDIR);
340 if (dir->i_mode & S_ISGID) {
341 inode->i_gid = dir->i_gid;
342 inode->i_mode &= S_ISGID;
344 ctx = alloc_spu_context(SPUFS_I(dir)->i_gang); /* XXX gang */
345 SPUFS_I(inode)->i_ctx = ctx;
350 inode->i_op = &spufs_dir_inode_operations;
351 inode->i_fop = &simple_dir_operations;
352 if (flags & SPU_CREATE_NOSCHED)
353 ret = spufs_fill_dir(dentry, spufs_dir_nosched_contents,
356 ret = spufs_fill_dir(dentry, spufs_dir_contents, mode, ctx);
361 d_instantiate(dentry, inode);
364 dentry->d_inode->i_nlink++;
368 put_spu_context(ctx);
375 static int spufs_context_open(struct dentry *dentry, struct vfsmount *mnt)
380 ret = get_unused_fd();
387 filp = dentry_open(dentry, mnt, O_RDONLY);
394 filp->f_op = &spufs_context_fops;
395 fd_install(ret, filp);
400 static int spufs_create_context(struct inode *inode,
401 struct dentry *dentry,
402 struct vfsmount *mnt, int flags, int mode)
407 if ((flags & SPU_CREATE_NOSCHED) &&
408 !capable(CAP_SYS_NICE))
412 if ((flags & (SPU_CREATE_NOSCHED | SPU_CREATE_ISOLATE))
413 == SPU_CREATE_ISOLATE)
416 ret = spufs_mkdir(inode, dentry, flags, mode & S_IRWXUGO);
421 * get references for dget and mntget, will be released
422 * in error path of *_open().
424 ret = spufs_context_open(dget(dentry), mntget(mnt));
426 WARN_ON(spufs_rmdir(inode, dentry));
427 mutex_unlock(&inode->i_mutex);
428 spu_forget(SPUFS_I(dentry->d_inode)->i_ctx);
433 mutex_unlock(&inode->i_mutex);
435 if (ret >= 0 && (flags & SPU_CREATE_ISOLATE)) {
436 int setup_err = spu_setup_isolated(
437 SPUFS_I(dentry->d_inode)->i_ctx);
446 static int spufs_rmgang(struct inode *root, struct dentry *dir)
448 /* FIXME: this fails if the dir is not empty,
449 which causes a leak of gangs. */
450 return simple_rmdir(root, dir);
453 static int spufs_gang_close(struct inode *inode, struct file *file)
455 struct inode *parent;
459 dir = file->f_dentry;
460 parent = dir->d_parent->d_inode;
462 ret = spufs_rmgang(parent, dir);
465 return dcache_dir_close(inode, file);
468 struct file_operations spufs_gang_fops = {
469 .open = dcache_dir_open,
470 .release = spufs_gang_close,
471 .llseek = dcache_dir_lseek,
472 .read = generic_read_dir,
473 .readdir = dcache_readdir,
474 .fsync = simple_sync_file,
478 spufs_mkgang(struct inode *dir, struct dentry *dentry, int mode)
482 struct spu_gang *gang;
485 inode = spufs_new_inode(dir->i_sb, mode | S_IFDIR);
490 if (dir->i_mode & S_ISGID) {
491 inode->i_gid = dir->i_gid;
492 inode->i_mode &= S_ISGID;
494 gang = alloc_spu_gang();
495 SPUFS_I(inode)->i_ctx = NULL;
496 SPUFS_I(inode)->i_gang = gang;
500 inode->i_op = &spufs_dir_inode_operations;
501 inode->i_fop = &simple_dir_operations;
503 d_instantiate(dentry, inode);
506 dentry->d_inode->i_nlink++;
515 static int spufs_gang_open(struct dentry *dentry, struct vfsmount *mnt)
520 ret = get_unused_fd();
527 filp = dentry_open(dentry, mnt, O_RDONLY);
534 filp->f_op = &spufs_gang_fops;
535 fd_install(ret, filp);
540 static int spufs_create_gang(struct inode *inode,
541 struct dentry *dentry,
542 struct vfsmount *mnt, int mode)
546 ret = spufs_mkgang(inode, dentry, mode & S_IRWXUGO);
551 * get references for dget and mntget, will be released
552 * in error path of *_open().
554 ret = spufs_gang_open(dget(dentry), mntget(mnt));
556 WARN_ON(spufs_rmgang(inode, dentry));
559 mutex_unlock(&inode->i_mutex);
565 static struct file_system_type spufs_type;
567 long spufs_create(struct nameidata *nd, unsigned int flags, mode_t mode)
569 struct dentry *dentry;
573 /* check if we are on spufs */
574 if (nd->dentry->d_sb->s_type != &spufs_type)
577 /* don't accept undefined flags */
578 if (flags & (~SPU_CREATE_FLAG_ALL))
581 /* only threads can be underneath a gang */
582 if (nd->dentry != nd->dentry->d_sb->s_root) {
583 if ((flags & SPU_CREATE_GANG) ||
584 !SPUFS_I(nd->dentry->d_inode)->i_gang)
588 dentry = lookup_create(nd, 1);
589 ret = PTR_ERR(dentry);
597 mode &= ~current->fs->umask;
599 if (flags & SPU_CREATE_GANG)
600 return spufs_create_gang(nd->dentry->d_inode,
601 dentry, nd->mnt, mode);
603 return spufs_create_context(nd->dentry->d_inode,
604 dentry, nd->mnt, flags, mode);
609 mutex_unlock(&nd->dentry->d_inode->i_mutex);
614 /* File system initialization */
616 Opt_uid, Opt_gid, Opt_err,
619 static match_table_t spufs_tokens = {
620 { Opt_uid, "uid=%d" },
621 { Opt_gid, "gid=%d" },
626 spufs_parse_options(char *options, struct inode *root)
629 substring_t args[MAX_OPT_ARGS];
631 while ((p = strsep(&options, ",")) != NULL) {
637 token = match_token(p, spufs_tokens, args);
640 if (match_int(&args[0], &option))
642 root->i_uid = option;
645 if (match_int(&args[0], &option))
647 root->i_gid = option;
657 spufs_init_isolated_loader(void)
659 struct device_node *dn;
663 dn = of_find_node_by_path("/spu-isolation");
667 loader = get_property(dn, "loader", &size);
671 /* kmalloc should align on a 16 byte boundary..* */
672 isolated_loader = kmalloc(size, GFP_KERNEL);
673 if (!isolated_loader)
676 memcpy(isolated_loader, loader, size);
677 printk(KERN_INFO "spufs: SPU isolation mode enabled\n");
681 spufs_create_root(struct super_block *sb, void *data)
687 inode = spufs_new_inode(sb, S_IFDIR | 0775);
691 inode->i_op = &spufs_dir_inode_operations;
692 inode->i_fop = &simple_dir_operations;
693 SPUFS_I(inode)->i_ctx = NULL;
696 if (!spufs_parse_options(data, inode))
700 sb->s_root = d_alloc_root(inode);
712 spufs_fill_super(struct super_block *sb, void *data, int silent)
714 static struct super_operations s_ops = {
715 .alloc_inode = spufs_alloc_inode,
716 .destroy_inode = spufs_destroy_inode,
717 .statfs = simple_statfs,
718 .delete_inode = spufs_delete_inode,
719 .drop_inode = generic_delete_inode,
722 sb->s_maxbytes = MAX_LFS_FILESIZE;
723 sb->s_blocksize = PAGE_CACHE_SIZE;
724 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
725 sb->s_magic = SPUFS_MAGIC;
728 return spufs_create_root(sb, data);
732 spufs_get_sb(struct file_system_type *fstype, int flags,
733 const char *name, void *data, struct vfsmount *mnt)
735 return get_sb_single(fstype, flags, data, spufs_fill_super, mnt);
738 static struct file_system_type spufs_type = {
739 .owner = THIS_MODULE,
741 .get_sb = spufs_get_sb,
742 .kill_sb = kill_litter_super,
745 static int __init spufs_init(void)
749 spufs_inode_cache = kmem_cache_create("spufs_inode_cache",
750 sizeof(struct spufs_inode_info), 0,
751 SLAB_HWCACHE_ALIGN, spufs_init_once, NULL);
753 if (!spufs_inode_cache)
755 if (spu_sched_init() != 0) {
756 kmem_cache_destroy(spufs_inode_cache);
759 ret = register_filesystem(&spufs_type);
762 ret = register_spu_syscalls(&spufs_calls);
766 spufs_init_isolated_loader();
769 unregister_filesystem(&spufs_type);
771 kmem_cache_destroy(spufs_inode_cache);
775 module_init(spufs_init);
777 static void __exit spufs_exit(void)
780 unregister_spu_syscalls(&spufs_calls);
781 unregister_filesystem(&spufs_type);
782 kmem_cache_destroy(spufs_inode_cache);
784 module_exit(spufs_exit);
786 MODULE_LICENSE("GPL");
787 MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");