2 * SPU file system -- file contents
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/ioctl.h>
27 #include <linux/module.h>
28 #include <linux/pagemap.h>
29 #include <linux/poll.h>
30 #include <linux/ptrace.h>
31 #include <linux/seq_file.h>
32 #include <linux/marker.h>
36 #include <asm/spu_info.h>
37 #include <asm/uaccess.h>
41 #define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
43 /* Simple attribute files */
45 int (*get)(void *, u64 *);
46 int (*set)(void *, u64);
47 char get_buf[24]; /* enough to store a u64 and "\n\0" */
50 const char *fmt; /* format for read operation */
51 struct mutex mutex; /* protects access to these buffers */
54 static int spufs_attr_open(struct inode *inode, struct file *file,
55 int (*get)(void *, u64 *), int (*set)(void *, u64),
58 struct spufs_attr *attr;
60 attr = kmalloc(sizeof(*attr), GFP_KERNEL);
66 attr->data = inode->i_private;
68 mutex_init(&attr->mutex);
69 file->private_data = attr;
71 return nonseekable_open(inode, file);
74 static int spufs_attr_release(struct inode *inode, struct file *file)
76 kfree(file->private_data);
80 static ssize_t spufs_attr_read(struct file *file, char __user *buf,
81 size_t len, loff_t *ppos)
83 struct spufs_attr *attr;
87 attr = file->private_data;
91 ret = mutex_lock_interruptible(&attr->mutex);
95 if (*ppos) { /* continued read */
96 size = strlen(attr->get_buf);
97 } else { /* first read */
99 ret = attr->get(attr->data, &val);
103 size = scnprintf(attr->get_buf, sizeof(attr->get_buf),
104 attr->fmt, (unsigned long long)val);
107 ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size);
109 mutex_unlock(&attr->mutex);
113 static ssize_t spufs_attr_write(struct file *file, const char __user *buf,
114 size_t len, loff_t *ppos)
116 struct spufs_attr *attr;
121 attr = file->private_data;
125 ret = mutex_lock_interruptible(&attr->mutex);
130 size = min(sizeof(attr->set_buf) - 1, len);
131 if (copy_from_user(attr->set_buf, buf, size))
134 ret = len; /* claim we got the whole input */
135 attr->set_buf[size] = '\0';
136 val = simple_strtol(attr->set_buf, NULL, 0);
137 attr->set(attr->data, val);
139 mutex_unlock(&attr->mutex);
143 #define DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \
144 static int __fops ## _open(struct inode *inode, struct file *file) \
146 __simple_attr_check_format(__fmt, 0ull); \
147 return spufs_attr_open(inode, file, __get, __set, __fmt); \
149 static struct file_operations __fops = { \
150 .owner = THIS_MODULE, \
151 .open = __fops ## _open, \
152 .release = spufs_attr_release, \
153 .read = spufs_attr_read, \
154 .write = spufs_attr_write, \
159 spufs_mem_open(struct inode *inode, struct file *file)
161 struct spufs_inode_info *i = SPUFS_I(inode);
162 struct spu_context *ctx = i->i_ctx;
164 mutex_lock(&ctx->mapping_lock);
165 file->private_data = ctx;
167 ctx->local_store = inode->i_mapping;
168 mutex_unlock(&ctx->mapping_lock);
173 spufs_mem_release(struct inode *inode, struct file *file)
175 struct spufs_inode_info *i = SPUFS_I(inode);
176 struct spu_context *ctx = i->i_ctx;
178 mutex_lock(&ctx->mapping_lock);
180 ctx->local_store = NULL;
181 mutex_unlock(&ctx->mapping_lock);
186 __spufs_mem_read(struct spu_context *ctx, char __user *buffer,
187 size_t size, loff_t *pos)
189 char *local_store = ctx->ops->get_ls(ctx);
190 return simple_read_from_buffer(buffer, size, pos, local_store,
195 spufs_mem_read(struct file *file, char __user *buffer,
196 size_t size, loff_t *pos)
198 struct spu_context *ctx = file->private_data;
201 ret = spu_acquire(ctx);
204 ret = __spufs_mem_read(ctx, buffer, size, pos);
211 spufs_mem_write(struct file *file, const char __user *buffer,
212 size_t size, loff_t *ppos)
214 struct spu_context *ctx = file->private_data;
223 if (size > LS_SIZE - pos)
224 size = LS_SIZE - pos;
226 ret = spu_acquire(ctx);
230 local_store = ctx->ops->get_ls(ctx);
231 ret = copy_from_user(local_store + pos, buffer, size);
240 static unsigned long spufs_mem_mmap_nopfn(struct vm_area_struct *vma,
241 unsigned long address)
243 struct spu_context *ctx = vma->vm_file->private_data;
244 unsigned long pfn, offset, addr0 = address;
245 #ifdef CONFIG_SPU_FS_64K_LS
246 struct spu_state *csa = &ctx->csa;
249 /* Check what page size we are using */
250 psize = get_slice_psize(vma->vm_mm, address);
252 /* Some sanity checking */
253 BUG_ON(csa->use_big_pages != (psize == MMU_PAGE_64K));
255 /* Wow, 64K, cool, we need to align the address though */
256 if (csa->use_big_pages) {
257 BUG_ON(vma->vm_start & 0xffff);
258 address &= ~0xfffful;
260 #endif /* CONFIG_SPU_FS_64K_LS */
262 offset = (address - vma->vm_start) + (vma->vm_pgoff << PAGE_SHIFT);
263 if (offset >= LS_SIZE)
266 pr_debug("spufs_mem_mmap_nopfn address=0x%lx -> 0x%lx, offset=0x%lx\n",
267 addr0, address, offset);
269 if (spu_acquire(ctx))
270 return NOPFN_REFAULT;
272 if (ctx->state == SPU_STATE_SAVED) {
273 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
275 pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset);
277 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
279 pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT;
281 vm_insert_pfn(vma, address, pfn);
285 return NOPFN_REFAULT;
289 static struct vm_operations_struct spufs_mem_mmap_vmops = {
290 .nopfn = spufs_mem_mmap_nopfn,
293 static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
295 #ifdef CONFIG_SPU_FS_64K_LS
296 struct spu_context *ctx = file->private_data;
297 struct spu_state *csa = &ctx->csa;
299 /* Sanity check VMA alignment */
300 if (csa->use_big_pages) {
301 pr_debug("spufs_mem_mmap 64K, start=0x%lx, end=0x%lx,"
302 " pgoff=0x%lx\n", vma->vm_start, vma->vm_end,
304 if (vma->vm_start & 0xffff)
306 if (vma->vm_pgoff & 0xf)
309 #endif /* CONFIG_SPU_FS_64K_LS */
311 if (!(vma->vm_flags & VM_SHARED))
314 vma->vm_flags |= VM_IO | VM_PFNMAP;
315 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
318 vma->vm_ops = &spufs_mem_mmap_vmops;
322 #ifdef CONFIG_SPU_FS_64K_LS
323 static unsigned long spufs_get_unmapped_area(struct file *file,
324 unsigned long addr, unsigned long len, unsigned long pgoff,
327 struct spu_context *ctx = file->private_data;
328 struct spu_state *csa = &ctx->csa;
330 /* If not using big pages, fallback to normal MM g_u_a */
331 if (!csa->use_big_pages)
332 return current->mm->get_unmapped_area(file, addr, len,
335 /* Else, try to obtain a 64K pages slice */
336 return slice_get_unmapped_area(addr, len, flags,
339 #endif /* CONFIG_SPU_FS_64K_LS */
341 static const struct file_operations spufs_mem_fops = {
342 .open = spufs_mem_open,
343 .release = spufs_mem_release,
344 .read = spufs_mem_read,
345 .write = spufs_mem_write,
346 .llseek = generic_file_llseek,
347 .mmap = spufs_mem_mmap,
348 #ifdef CONFIG_SPU_FS_64K_LS
349 .get_unmapped_area = spufs_get_unmapped_area,
353 static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
354 unsigned long address,
355 unsigned long ps_offs,
356 unsigned long ps_size)
358 struct spu_context *ctx = vma->vm_file->private_data;
359 unsigned long area, offset = address - vma->vm_start;
362 spu_context_nospu_trace(spufs_ps_nopfn__enter, ctx);
364 offset += vma->vm_pgoff << PAGE_SHIFT;
365 if (offset >= ps_size)
369 * Because we release the mmap_sem, the context may be destroyed while
370 * we're in spu_wait. Grab an extra reference so it isn't destroyed
373 get_spu_context(ctx);
376 * We have to wait for context to be loaded before we have
377 * pages to hand out to the user, but we don't want to wait
378 * with the mmap_sem held.
379 * It is possible to drop the mmap_sem here, but then we need
380 * to return NOPFN_REFAULT because the mappings may have
383 if (spu_acquire(ctx))
386 if (ctx->state == SPU_STATE_SAVED) {
387 up_read(¤t->mm->mmap_sem);
388 spu_context_nospu_trace(spufs_ps_nopfn__sleep, ctx);
389 ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
390 spu_context_trace(spufs_ps_nopfn__wake, ctx, ctx->spu);
391 down_read(¤t->mm->mmap_sem);
393 area = ctx->spu->problem_phys + ps_offs;
394 vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT);
395 spu_context_trace(spufs_ps_nopfn__insert, ctx, ctx->spu);
402 put_spu_context(ctx);
403 return NOPFN_REFAULT;
407 static unsigned long spufs_cntl_mmap_nopfn(struct vm_area_struct *vma,
408 unsigned long address)
410 return spufs_ps_nopfn(vma, address, 0x4000, 0x1000);
413 static struct vm_operations_struct spufs_cntl_mmap_vmops = {
414 .nopfn = spufs_cntl_mmap_nopfn,
418 * mmap support for problem state control area [0x4000 - 0x4fff].
420 static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
422 if (!(vma->vm_flags & VM_SHARED))
425 vma->vm_flags |= VM_IO | VM_PFNMAP;
426 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
427 | _PAGE_NO_CACHE | _PAGE_GUARDED);
429 vma->vm_ops = &spufs_cntl_mmap_vmops;
432 #else /* SPUFS_MMAP_4K */
433 #define spufs_cntl_mmap NULL
434 #endif /* !SPUFS_MMAP_4K */
436 static int spufs_cntl_get(void *data, u64 *val)
438 struct spu_context *ctx = data;
441 ret = spu_acquire(ctx);
444 *val = ctx->ops->status_read(ctx);
450 static int spufs_cntl_set(void *data, u64 val)
452 struct spu_context *ctx = data;
455 ret = spu_acquire(ctx);
458 ctx->ops->runcntl_write(ctx, val);
464 static int spufs_cntl_open(struct inode *inode, struct file *file)
466 struct spufs_inode_info *i = SPUFS_I(inode);
467 struct spu_context *ctx = i->i_ctx;
469 mutex_lock(&ctx->mapping_lock);
470 file->private_data = ctx;
472 ctx->cntl = inode->i_mapping;
473 mutex_unlock(&ctx->mapping_lock);
474 return simple_attr_open(inode, file, spufs_cntl_get,
475 spufs_cntl_set, "0x%08lx");
479 spufs_cntl_release(struct inode *inode, struct file *file)
481 struct spufs_inode_info *i = SPUFS_I(inode);
482 struct spu_context *ctx = i->i_ctx;
484 simple_attr_release(inode, file);
486 mutex_lock(&ctx->mapping_lock);
489 mutex_unlock(&ctx->mapping_lock);
493 static const struct file_operations spufs_cntl_fops = {
494 .open = spufs_cntl_open,
495 .release = spufs_cntl_release,
496 .read = simple_attr_read,
497 .write = simple_attr_write,
498 .mmap = spufs_cntl_mmap,
502 spufs_regs_open(struct inode *inode, struct file *file)
504 struct spufs_inode_info *i = SPUFS_I(inode);
505 file->private_data = i->i_ctx;
510 __spufs_regs_read(struct spu_context *ctx, char __user *buffer,
511 size_t size, loff_t *pos)
513 struct spu_lscsa *lscsa = ctx->csa.lscsa;
514 return simple_read_from_buffer(buffer, size, pos,
515 lscsa->gprs, sizeof lscsa->gprs);
519 spufs_regs_read(struct file *file, char __user *buffer,
520 size_t size, loff_t *pos)
523 struct spu_context *ctx = file->private_data;
525 ret = spu_acquire_saved(ctx);
528 ret = __spufs_regs_read(ctx, buffer, size, pos);
529 spu_release_saved(ctx);
534 spufs_regs_write(struct file *file, const char __user *buffer,
535 size_t size, loff_t *pos)
537 struct spu_context *ctx = file->private_data;
538 struct spu_lscsa *lscsa = ctx->csa.lscsa;
541 size = min_t(ssize_t, sizeof lscsa->gprs - *pos, size);
546 ret = spu_acquire_saved(ctx);
550 ret = copy_from_user(lscsa->gprs + *pos - size,
551 buffer, size) ? -EFAULT : size;
553 spu_release_saved(ctx);
557 static const struct file_operations spufs_regs_fops = {
558 .open = spufs_regs_open,
559 .read = spufs_regs_read,
560 .write = spufs_regs_write,
561 .llseek = generic_file_llseek,
565 __spufs_fpcr_read(struct spu_context *ctx, char __user * buffer,
566 size_t size, loff_t * pos)
568 struct spu_lscsa *lscsa = ctx->csa.lscsa;
569 return simple_read_from_buffer(buffer, size, pos,
570 &lscsa->fpcr, sizeof(lscsa->fpcr));
574 spufs_fpcr_read(struct file *file, char __user * buffer,
575 size_t size, loff_t * pos)
578 struct spu_context *ctx = file->private_data;
580 ret = spu_acquire_saved(ctx);
583 ret = __spufs_fpcr_read(ctx, buffer, size, pos);
584 spu_release_saved(ctx);
589 spufs_fpcr_write(struct file *file, const char __user * buffer,
590 size_t size, loff_t * pos)
592 struct spu_context *ctx = file->private_data;
593 struct spu_lscsa *lscsa = ctx->csa.lscsa;
596 size = min_t(ssize_t, sizeof(lscsa->fpcr) - *pos, size);
600 ret = spu_acquire_saved(ctx);
605 ret = copy_from_user((char *)&lscsa->fpcr + *pos - size,
606 buffer, size) ? -EFAULT : size;
608 spu_release_saved(ctx);
612 static const struct file_operations spufs_fpcr_fops = {
613 .open = spufs_regs_open,
614 .read = spufs_fpcr_read,
615 .write = spufs_fpcr_write,
616 .llseek = generic_file_llseek,
619 /* generic open function for all pipe-like files */
620 static int spufs_pipe_open(struct inode *inode, struct file *file)
622 struct spufs_inode_info *i = SPUFS_I(inode);
623 file->private_data = i->i_ctx;
625 return nonseekable_open(inode, file);
629 * Read as many bytes from the mailbox as possible, until
630 * one of the conditions becomes true:
632 * - no more data available in the mailbox
633 * - end of the user provided buffer
634 * - end of the mapped area
636 static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
637 size_t len, loff_t *pos)
639 struct spu_context *ctx = file->private_data;
640 u32 mbox_data, __user *udata;
646 if (!access_ok(VERIFY_WRITE, buf, len))
649 udata = (void __user *)buf;
651 count = spu_acquire(ctx);
655 for (count = 0; (count + 4) <= len; count += 4, udata++) {
657 ret = ctx->ops->mbox_read(ctx, &mbox_data);
662 * at the end of the mapped area, we can fault
663 * but still need to return the data we have
664 * read successfully so far.
666 ret = __put_user(mbox_data, udata);
681 static const struct file_operations spufs_mbox_fops = {
682 .open = spufs_pipe_open,
683 .read = spufs_mbox_read,
686 static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf,
687 size_t len, loff_t *pos)
689 struct spu_context *ctx = file->private_data;
696 ret = spu_acquire(ctx);
700 mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff;
704 if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat))
710 static const struct file_operations spufs_mbox_stat_fops = {
711 .open = spufs_pipe_open,
712 .read = spufs_mbox_stat_read,
715 /* low-level ibox access function */
716 size_t spu_ibox_read(struct spu_context *ctx, u32 *data)
718 return ctx->ops->ibox_read(ctx, data);
721 static int spufs_ibox_fasync(int fd, struct file *file, int on)
723 struct spu_context *ctx = file->private_data;
725 return fasync_helper(fd, file, on, &ctx->ibox_fasync);
728 /* interrupt-level ibox callback function. */
729 void spufs_ibox_callback(struct spu *spu)
731 struct spu_context *ctx = spu->ctx;
736 wake_up_all(&ctx->ibox_wq);
737 kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN);
741 * Read as many bytes from the interrupt mailbox as possible, until
742 * one of the conditions becomes true:
744 * - no more data available in the mailbox
745 * - end of the user provided buffer
746 * - end of the mapped area
748 * If the file is opened without O_NONBLOCK, we wait here until
749 * any data is available, but return when we have been able to
752 static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
753 size_t len, loff_t *pos)
755 struct spu_context *ctx = file->private_data;
756 u32 ibox_data, __user *udata;
762 if (!access_ok(VERIFY_WRITE, buf, len))
765 udata = (void __user *)buf;
767 count = spu_acquire(ctx);
771 /* wait only for the first element */
773 if (file->f_flags & O_NONBLOCK) {
774 if (!spu_ibox_read(ctx, &ibox_data)) {
779 count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data));
784 /* if we can't write at all, return -EFAULT */
785 count = __put_user(ibox_data, udata);
789 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
791 ret = ctx->ops->ibox_read(ctx, &ibox_data);
795 * at the end of the mapped area, we can fault
796 * but still need to return the data we have
797 * read successfully so far.
799 ret = __put_user(ibox_data, udata);
810 static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait)
812 struct spu_context *ctx = file->private_data;
815 poll_wait(file, &ctx->ibox_wq, wait);
818 * For now keep this uninterruptible and also ignore the rule
819 * that poll should not sleep. Will be fixed later.
821 mutex_lock(&ctx->state_mutex);
822 mask = ctx->ops->mbox_stat_poll(ctx, POLLIN | POLLRDNORM);
828 static const struct file_operations spufs_ibox_fops = {
829 .open = spufs_pipe_open,
830 .read = spufs_ibox_read,
831 .poll = spufs_ibox_poll,
832 .fasync = spufs_ibox_fasync,
835 static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf,
836 size_t len, loff_t *pos)
838 struct spu_context *ctx = file->private_data;
845 ret = spu_acquire(ctx);
848 ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff;
851 if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat))
857 static const struct file_operations spufs_ibox_stat_fops = {
858 .open = spufs_pipe_open,
859 .read = spufs_ibox_stat_read,
862 /* low-level mailbox write */
863 size_t spu_wbox_write(struct spu_context *ctx, u32 data)
865 return ctx->ops->wbox_write(ctx, data);
868 static int spufs_wbox_fasync(int fd, struct file *file, int on)
870 struct spu_context *ctx = file->private_data;
873 ret = fasync_helper(fd, file, on, &ctx->wbox_fasync);
878 /* interrupt-level wbox callback function. */
879 void spufs_wbox_callback(struct spu *spu)
881 struct spu_context *ctx = spu->ctx;
886 wake_up_all(&ctx->wbox_wq);
887 kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT);
891 * Write as many bytes to the interrupt mailbox as possible, until
892 * one of the conditions becomes true:
894 * - the mailbox is full
895 * - end of the user provided buffer
896 * - end of the mapped area
898 * If the file is opened without O_NONBLOCK, we wait here until
899 * space is availabyl, but return when we have been able to
902 static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
903 size_t len, loff_t *pos)
905 struct spu_context *ctx = file->private_data;
906 u32 wbox_data, __user *udata;
912 udata = (void __user *)buf;
913 if (!access_ok(VERIFY_READ, buf, len))
916 if (__get_user(wbox_data, udata))
919 count = spu_acquire(ctx);
924 * make sure we can at least write one element, by waiting
925 * in case of !O_NONBLOCK
928 if (file->f_flags & O_NONBLOCK) {
929 if (!spu_wbox_write(ctx, wbox_data)) {
934 count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data));
940 /* write as much as possible */
941 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
943 ret = __get_user(wbox_data, udata);
947 ret = spu_wbox_write(ctx, wbox_data);
958 static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait)
960 struct spu_context *ctx = file->private_data;
963 poll_wait(file, &ctx->wbox_wq, wait);
966 * For now keep this uninterruptible and also ignore the rule
967 * that poll should not sleep. Will be fixed later.
969 mutex_lock(&ctx->state_mutex);
970 mask = ctx->ops->mbox_stat_poll(ctx, POLLOUT | POLLWRNORM);
976 static const struct file_operations spufs_wbox_fops = {
977 .open = spufs_pipe_open,
978 .write = spufs_wbox_write,
979 .poll = spufs_wbox_poll,
980 .fasync = spufs_wbox_fasync,
983 static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf,
984 size_t len, loff_t *pos)
986 struct spu_context *ctx = file->private_data;
993 ret = spu_acquire(ctx);
996 wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff;
999 if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat))
1005 static const struct file_operations spufs_wbox_stat_fops = {
1006 .open = spufs_pipe_open,
1007 .read = spufs_wbox_stat_read,
1010 static int spufs_signal1_open(struct inode *inode, struct file *file)
1012 struct spufs_inode_info *i = SPUFS_I(inode);
1013 struct spu_context *ctx = i->i_ctx;
1015 mutex_lock(&ctx->mapping_lock);
1016 file->private_data = ctx;
1017 if (!i->i_openers++)
1018 ctx->signal1 = inode->i_mapping;
1019 mutex_unlock(&ctx->mapping_lock);
1020 return nonseekable_open(inode, file);
1024 spufs_signal1_release(struct inode *inode, struct file *file)
1026 struct spufs_inode_info *i = SPUFS_I(inode);
1027 struct spu_context *ctx = i->i_ctx;
1029 mutex_lock(&ctx->mapping_lock);
1030 if (!--i->i_openers)
1031 ctx->signal1 = NULL;
1032 mutex_unlock(&ctx->mapping_lock);
1036 static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf,
1037 size_t len, loff_t *pos)
1045 if (ctx->csa.spu_chnlcnt_RW[3]) {
1046 data = ctx->csa.spu_chnldata_RW[3];
1053 if (copy_to_user(buf, &data, 4))
1060 static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
1061 size_t len, loff_t *pos)
1064 struct spu_context *ctx = file->private_data;
1066 ret = spu_acquire_saved(ctx);
1069 ret = __spufs_signal1_read(ctx, buf, len, pos);
1070 spu_release_saved(ctx);
1075 static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
1076 size_t len, loff_t *pos)
1078 struct spu_context *ctx;
1082 ctx = file->private_data;
1087 if (copy_from_user(&data, buf, 4))
1090 ret = spu_acquire(ctx);
1093 ctx->ops->signal1_write(ctx, data);
1099 static unsigned long spufs_signal1_mmap_nopfn(struct vm_area_struct *vma,
1100 unsigned long address)
1102 #if PAGE_SIZE == 0x1000
1103 return spufs_ps_nopfn(vma, address, 0x14000, 0x1000);
1104 #elif PAGE_SIZE == 0x10000
1105 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
1106 * signal 1 and 2 area
1108 return spufs_ps_nopfn(vma, address, 0x10000, 0x10000);
1110 #error unsupported page size
1114 static struct vm_operations_struct spufs_signal1_mmap_vmops = {
1115 .nopfn = spufs_signal1_mmap_nopfn,
1118 static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
1120 if (!(vma->vm_flags & VM_SHARED))
1123 vma->vm_flags |= VM_IO | VM_PFNMAP;
1124 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1125 | _PAGE_NO_CACHE | _PAGE_GUARDED);
1127 vma->vm_ops = &spufs_signal1_mmap_vmops;
1131 static const struct file_operations spufs_signal1_fops = {
1132 .open = spufs_signal1_open,
1133 .release = spufs_signal1_release,
1134 .read = spufs_signal1_read,
1135 .write = spufs_signal1_write,
1136 .mmap = spufs_signal1_mmap,
1139 static const struct file_operations spufs_signal1_nosched_fops = {
1140 .open = spufs_signal1_open,
1141 .release = spufs_signal1_release,
1142 .write = spufs_signal1_write,
1143 .mmap = spufs_signal1_mmap,
1146 static int spufs_signal2_open(struct inode *inode, struct file *file)
1148 struct spufs_inode_info *i = SPUFS_I(inode);
1149 struct spu_context *ctx = i->i_ctx;
1151 mutex_lock(&ctx->mapping_lock);
1152 file->private_data = ctx;
1153 if (!i->i_openers++)
1154 ctx->signal2 = inode->i_mapping;
1155 mutex_unlock(&ctx->mapping_lock);
1156 return nonseekable_open(inode, file);
1160 spufs_signal2_release(struct inode *inode, struct file *file)
1162 struct spufs_inode_info *i = SPUFS_I(inode);
1163 struct spu_context *ctx = i->i_ctx;
1165 mutex_lock(&ctx->mapping_lock);
1166 if (!--i->i_openers)
1167 ctx->signal2 = NULL;
1168 mutex_unlock(&ctx->mapping_lock);
1172 static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf,
1173 size_t len, loff_t *pos)
1181 if (ctx->csa.spu_chnlcnt_RW[4]) {
1182 data = ctx->csa.spu_chnldata_RW[4];
1189 if (copy_to_user(buf, &data, 4))
1196 static ssize_t spufs_signal2_read(struct file *file, char __user *buf,
1197 size_t len, loff_t *pos)
1199 struct spu_context *ctx = file->private_data;
1202 ret = spu_acquire_saved(ctx);
1205 ret = __spufs_signal2_read(ctx, buf, len, pos);
1206 spu_release_saved(ctx);
1211 static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
1212 size_t len, loff_t *pos)
1214 struct spu_context *ctx;
1218 ctx = file->private_data;
1223 if (copy_from_user(&data, buf, 4))
1226 ret = spu_acquire(ctx);
1229 ctx->ops->signal2_write(ctx, data);
1236 static unsigned long spufs_signal2_mmap_nopfn(struct vm_area_struct *vma,
1237 unsigned long address)
1239 #if PAGE_SIZE == 0x1000
1240 return spufs_ps_nopfn(vma, address, 0x1c000, 0x1000);
1241 #elif PAGE_SIZE == 0x10000
1242 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
1243 * signal 1 and 2 area
1245 return spufs_ps_nopfn(vma, address, 0x10000, 0x10000);
1247 #error unsupported page size
1251 static struct vm_operations_struct spufs_signal2_mmap_vmops = {
1252 .nopfn = spufs_signal2_mmap_nopfn,
1255 static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
1257 if (!(vma->vm_flags & VM_SHARED))
1260 vma->vm_flags |= VM_IO | VM_PFNMAP;
1261 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1262 | _PAGE_NO_CACHE | _PAGE_GUARDED);
1264 vma->vm_ops = &spufs_signal2_mmap_vmops;
1267 #else /* SPUFS_MMAP_4K */
1268 #define spufs_signal2_mmap NULL
1269 #endif /* !SPUFS_MMAP_4K */
1271 static const struct file_operations spufs_signal2_fops = {
1272 .open = spufs_signal2_open,
1273 .release = spufs_signal2_release,
1274 .read = spufs_signal2_read,
1275 .write = spufs_signal2_write,
1276 .mmap = spufs_signal2_mmap,
1279 static const struct file_operations spufs_signal2_nosched_fops = {
1280 .open = spufs_signal2_open,
1281 .release = spufs_signal2_release,
1282 .write = spufs_signal2_write,
1283 .mmap = spufs_signal2_mmap,
1287 * This is a wrapper around DEFINE_SIMPLE_ATTRIBUTE which does the
1288 * work of acquiring (or not) the SPU context before calling through
1289 * to the actual get routine. The set routine is called directly.
1291 #define SPU_ATTR_NOACQUIRE 0
1292 #define SPU_ATTR_ACQUIRE 1
1293 #define SPU_ATTR_ACQUIRE_SAVED 2
1295 #define DEFINE_SPUFS_ATTRIBUTE(__name, __get, __set, __fmt, __acquire) \
1296 static int __##__get(void *data, u64 *val) \
1298 struct spu_context *ctx = data; \
1301 if (__acquire == SPU_ATTR_ACQUIRE) { \
1302 ret = spu_acquire(ctx); \
1305 *val = __get(ctx); \
1307 } else if (__acquire == SPU_ATTR_ACQUIRE_SAVED) { \
1308 ret = spu_acquire_saved(ctx); \
1311 *val = __get(ctx); \
1312 spu_release_saved(ctx); \
1314 *val = __get(ctx); \
1318 DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__name, __##__get, __set, __fmt);
1320 static int spufs_signal1_type_set(void *data, u64 val)
1322 struct spu_context *ctx = data;
1325 ret = spu_acquire(ctx);
1328 ctx->ops->signal1_type_set(ctx, val);
1334 static u64 spufs_signal1_type_get(struct spu_context *ctx)
1336 return ctx->ops->signal1_type_get(ctx);
1338 DEFINE_SPUFS_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get,
1339 spufs_signal1_type_set, "%llu\n", SPU_ATTR_ACQUIRE);
1342 static int spufs_signal2_type_set(void *data, u64 val)
1344 struct spu_context *ctx = data;
1347 ret = spu_acquire(ctx);
1350 ctx->ops->signal2_type_set(ctx, val);
1356 static u64 spufs_signal2_type_get(struct spu_context *ctx)
1358 return ctx->ops->signal2_type_get(ctx);
1360 DEFINE_SPUFS_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
1361 spufs_signal2_type_set, "%llu\n", SPU_ATTR_ACQUIRE);
1364 static unsigned long spufs_mss_mmap_nopfn(struct vm_area_struct *vma,
1365 unsigned long address)
1367 return spufs_ps_nopfn(vma, address, 0x0000, 0x1000);
1370 static struct vm_operations_struct spufs_mss_mmap_vmops = {
1371 .nopfn = spufs_mss_mmap_nopfn,
1375 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1377 static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
1379 if (!(vma->vm_flags & VM_SHARED))
1382 vma->vm_flags |= VM_IO | VM_PFNMAP;
1383 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1384 | _PAGE_NO_CACHE | _PAGE_GUARDED);
1386 vma->vm_ops = &spufs_mss_mmap_vmops;
1389 #else /* SPUFS_MMAP_4K */
1390 #define spufs_mss_mmap NULL
1391 #endif /* !SPUFS_MMAP_4K */
1393 static int spufs_mss_open(struct inode *inode, struct file *file)
1395 struct spufs_inode_info *i = SPUFS_I(inode);
1396 struct spu_context *ctx = i->i_ctx;
1398 file->private_data = i->i_ctx;
1400 mutex_lock(&ctx->mapping_lock);
1401 if (!i->i_openers++)
1402 ctx->mss = inode->i_mapping;
1403 mutex_unlock(&ctx->mapping_lock);
1404 return nonseekable_open(inode, file);
1408 spufs_mss_release(struct inode *inode, struct file *file)
1410 struct spufs_inode_info *i = SPUFS_I(inode);
1411 struct spu_context *ctx = i->i_ctx;
1413 mutex_lock(&ctx->mapping_lock);
1414 if (!--i->i_openers)
1416 mutex_unlock(&ctx->mapping_lock);
1420 static const struct file_operations spufs_mss_fops = {
1421 .open = spufs_mss_open,
1422 .release = spufs_mss_release,
1423 .mmap = spufs_mss_mmap,
1426 static unsigned long spufs_psmap_mmap_nopfn(struct vm_area_struct *vma,
1427 unsigned long address)
1429 return spufs_ps_nopfn(vma, address, 0x0000, 0x20000);
1432 static struct vm_operations_struct spufs_psmap_mmap_vmops = {
1433 .nopfn = spufs_psmap_mmap_nopfn,
1437 * mmap support for full problem state area [0x00000 - 0x1ffff].
1439 static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma)
1441 if (!(vma->vm_flags & VM_SHARED))
1444 vma->vm_flags |= VM_IO | VM_PFNMAP;
1445 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1446 | _PAGE_NO_CACHE | _PAGE_GUARDED);
1448 vma->vm_ops = &spufs_psmap_mmap_vmops;
1452 static int spufs_psmap_open(struct inode *inode, struct file *file)
1454 struct spufs_inode_info *i = SPUFS_I(inode);
1455 struct spu_context *ctx = i->i_ctx;
1457 mutex_lock(&ctx->mapping_lock);
1458 file->private_data = i->i_ctx;
1459 if (!i->i_openers++)
1460 ctx->psmap = inode->i_mapping;
1461 mutex_unlock(&ctx->mapping_lock);
1462 return nonseekable_open(inode, file);
1466 spufs_psmap_release(struct inode *inode, struct file *file)
1468 struct spufs_inode_info *i = SPUFS_I(inode);
1469 struct spu_context *ctx = i->i_ctx;
1471 mutex_lock(&ctx->mapping_lock);
1472 if (!--i->i_openers)
1474 mutex_unlock(&ctx->mapping_lock);
1478 static const struct file_operations spufs_psmap_fops = {
1479 .open = spufs_psmap_open,
1480 .release = spufs_psmap_release,
1481 .mmap = spufs_psmap_mmap,
1486 static unsigned long spufs_mfc_mmap_nopfn(struct vm_area_struct *vma,
1487 unsigned long address)
1489 return spufs_ps_nopfn(vma, address, 0x3000, 0x1000);
1492 static struct vm_operations_struct spufs_mfc_mmap_vmops = {
1493 .nopfn = spufs_mfc_mmap_nopfn,
1497 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1499 static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
1501 if (!(vma->vm_flags & VM_SHARED))
1504 vma->vm_flags |= VM_IO | VM_PFNMAP;
1505 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1506 | _PAGE_NO_CACHE | _PAGE_GUARDED);
1508 vma->vm_ops = &spufs_mfc_mmap_vmops;
1511 #else /* SPUFS_MMAP_4K */
1512 #define spufs_mfc_mmap NULL
1513 #endif /* !SPUFS_MMAP_4K */
1515 static int spufs_mfc_open(struct inode *inode, struct file *file)
1517 struct spufs_inode_info *i = SPUFS_I(inode);
1518 struct spu_context *ctx = i->i_ctx;
1520 /* we don't want to deal with DMA into other processes */
1521 if (ctx->owner != current->mm)
1524 if (atomic_read(&inode->i_count) != 1)
1527 mutex_lock(&ctx->mapping_lock);
1528 file->private_data = ctx;
1529 if (!i->i_openers++)
1530 ctx->mfc = inode->i_mapping;
1531 mutex_unlock(&ctx->mapping_lock);
1532 return nonseekable_open(inode, file);
1536 spufs_mfc_release(struct inode *inode, struct file *file)
1538 struct spufs_inode_info *i = SPUFS_I(inode);
1539 struct spu_context *ctx = i->i_ctx;
1541 mutex_lock(&ctx->mapping_lock);
1542 if (!--i->i_openers)
1544 mutex_unlock(&ctx->mapping_lock);
1548 /* interrupt-level mfc callback function. */
1549 void spufs_mfc_callback(struct spu *spu)
1551 struct spu_context *ctx = spu->ctx;
1556 wake_up_all(&ctx->mfc_wq);
1558 pr_debug("%s %s\n", __func__, spu->name);
1559 if (ctx->mfc_fasync) {
1560 u32 free_elements, tagstatus;
1563 /* no need for spu_acquire in interrupt context */
1564 free_elements = ctx->ops->get_mfc_free_elements(ctx);
1565 tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1568 if (free_elements & 0xffff)
1570 if (tagstatus & ctx->tagwait)
1573 kill_fasync(&ctx->mfc_fasync, SIGIO, mask);
1577 static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status)
1579 /* See if there is one tag group is complete */
1580 /* FIXME we need locking around tagwait */
1581 *status = ctx->ops->read_mfc_tagstatus(ctx) & ctx->tagwait;
1582 ctx->tagwait &= ~*status;
1586 /* enable interrupt waiting for any tag group,
1587 may silently fail if interrupts are already enabled */
1588 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1592 static ssize_t spufs_mfc_read(struct file *file, char __user *buffer,
1593 size_t size, loff_t *pos)
1595 struct spu_context *ctx = file->private_data;
1602 ret = spu_acquire(ctx);
1607 if (file->f_flags & O_NONBLOCK) {
1608 status = ctx->ops->read_mfc_tagstatus(ctx);
1609 if (!(status & ctx->tagwait))
1612 /* XXX(hch): shouldn't we clear ret here? */
1613 ctx->tagwait &= ~status;
1615 ret = spufs_wait(ctx->mfc_wq,
1616 spufs_read_mfc_tagstatus(ctx, &status));
1623 if (copy_to_user(buffer, &status, 4))
1630 static int spufs_check_valid_dma(struct mfc_dma_command *cmd)
1632 pr_debug("queueing DMA %x %lx %x %x %x\n", cmd->lsa,
1633 cmd->ea, cmd->size, cmd->tag, cmd->cmd);
1644 pr_debug("invalid DMA opcode %x\n", cmd->cmd);
1648 if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) {
1649 pr_debug("invalid DMA alignment, ea %lx lsa %x\n",
1654 switch (cmd->size & 0xf) {
1675 pr_debug("invalid DMA alignment %x for size %x\n",
1676 cmd->lsa & 0xf, cmd->size);
1680 if (cmd->size > 16 * 1024) {
1681 pr_debug("invalid DMA size %x\n", cmd->size);
1685 if (cmd->tag & 0xfff0) {
1686 /* we reserve the higher tag numbers for kernel use */
1687 pr_debug("invalid DMA tag\n");
1692 /* not supported in this version */
1693 pr_debug("invalid DMA class\n");
1700 static int spu_send_mfc_command(struct spu_context *ctx,
1701 struct mfc_dma_command cmd,
1704 *error = ctx->ops->send_mfc_command(ctx, &cmd);
1705 if (*error == -EAGAIN) {
1706 /* wait for any tag group to complete
1707 so we have space for the new command */
1708 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1709 /* try again, because the queue might be
1711 *error = ctx->ops->send_mfc_command(ctx, &cmd);
1712 if (*error == -EAGAIN)
1718 static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
1719 size_t size, loff_t *pos)
1721 struct spu_context *ctx = file->private_data;
1722 struct mfc_dma_command cmd;
1725 if (size != sizeof cmd)
1729 if (copy_from_user(&cmd, buffer, sizeof cmd))
1732 ret = spufs_check_valid_dma(&cmd);
1736 ret = spu_acquire(ctx);
1740 ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
1744 if (file->f_flags & O_NONBLOCK) {
1745 ret = ctx->ops->send_mfc_command(ctx, &cmd);
1748 ret = spufs_wait(ctx->mfc_wq,
1749 spu_send_mfc_command(ctx, cmd, &status));
1759 ctx->tagwait |= 1 << cmd.tag;
1768 static unsigned int spufs_mfc_poll(struct file *file,poll_table *wait)
1770 struct spu_context *ctx = file->private_data;
1771 u32 free_elements, tagstatus;
1774 poll_wait(file, &ctx->mfc_wq, wait);
1777 * For now keep this uninterruptible and also ignore the rule
1778 * that poll should not sleep. Will be fixed later.
1780 mutex_lock(&ctx->state_mutex);
1781 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2);
1782 free_elements = ctx->ops->get_mfc_free_elements(ctx);
1783 tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1787 if (free_elements & 0xffff)
1788 mask |= POLLOUT | POLLWRNORM;
1789 if (tagstatus & ctx->tagwait)
1790 mask |= POLLIN | POLLRDNORM;
1792 pr_debug("%s: free %d tagstatus %d tagwait %d\n", __func__,
1793 free_elements, tagstatus, ctx->tagwait);
1798 static int spufs_mfc_flush(struct file *file, fl_owner_t id)
1800 struct spu_context *ctx = file->private_data;
1803 ret = spu_acquire(ctx);
1807 /* this currently hangs */
1808 ret = spufs_wait(ctx->mfc_wq,
1809 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2));
1812 ret = spufs_wait(ctx->mfc_wq,
1813 ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait);
1824 static int spufs_mfc_fsync(struct file *file, struct dentry *dentry,
1827 return spufs_mfc_flush(file, NULL);
1830 static int spufs_mfc_fasync(int fd, struct file *file, int on)
1832 struct spu_context *ctx = file->private_data;
1834 return fasync_helper(fd, file, on, &ctx->mfc_fasync);
1837 static const struct file_operations spufs_mfc_fops = {
1838 .open = spufs_mfc_open,
1839 .release = spufs_mfc_release,
1840 .read = spufs_mfc_read,
1841 .write = spufs_mfc_write,
1842 .poll = spufs_mfc_poll,
1843 .flush = spufs_mfc_flush,
1844 .fsync = spufs_mfc_fsync,
1845 .fasync = spufs_mfc_fasync,
1846 .mmap = spufs_mfc_mmap,
1849 static int spufs_npc_set(void *data, u64 val)
1851 struct spu_context *ctx = data;
1854 ret = spu_acquire(ctx);
1857 ctx->ops->npc_write(ctx, val);
1863 static u64 spufs_npc_get(struct spu_context *ctx)
1865 return ctx->ops->npc_read(ctx);
1867 DEFINE_SPUFS_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set,
1868 "0x%llx\n", SPU_ATTR_ACQUIRE);
1870 static int spufs_decr_set(void *data, u64 val)
1872 struct spu_context *ctx = data;
1873 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1876 ret = spu_acquire_saved(ctx);
1879 lscsa->decr.slot[0] = (u32) val;
1880 spu_release_saved(ctx);
1885 static u64 spufs_decr_get(struct spu_context *ctx)
1887 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1888 return lscsa->decr.slot[0];
1890 DEFINE_SPUFS_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set,
1891 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED);
1893 static int spufs_decr_status_set(void *data, u64 val)
1895 struct spu_context *ctx = data;
1898 ret = spu_acquire_saved(ctx);
1902 ctx->csa.priv2.mfc_control_RW |= MFC_CNTL_DECREMENTER_RUNNING;
1904 ctx->csa.priv2.mfc_control_RW &= ~MFC_CNTL_DECREMENTER_RUNNING;
1905 spu_release_saved(ctx);
1910 static u64 spufs_decr_status_get(struct spu_context *ctx)
1912 if (ctx->csa.priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING)
1913 return SPU_DECR_STATUS_RUNNING;
1917 DEFINE_SPUFS_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get,
1918 spufs_decr_status_set, "0x%llx\n",
1919 SPU_ATTR_ACQUIRE_SAVED);
1921 static int spufs_event_mask_set(void *data, u64 val)
1923 struct spu_context *ctx = data;
1924 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1927 ret = spu_acquire_saved(ctx);
1930 lscsa->event_mask.slot[0] = (u32) val;
1931 spu_release_saved(ctx);
1936 static u64 spufs_event_mask_get(struct spu_context *ctx)
1938 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1939 return lscsa->event_mask.slot[0];
1942 DEFINE_SPUFS_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get,
1943 spufs_event_mask_set, "0x%llx\n",
1944 SPU_ATTR_ACQUIRE_SAVED);
1946 static u64 spufs_event_status_get(struct spu_context *ctx)
1948 struct spu_state *state = &ctx->csa;
1950 stat = state->spu_chnlcnt_RW[0];
1952 return state->spu_chnldata_RW[0];
1955 DEFINE_SPUFS_ATTRIBUTE(spufs_event_status_ops, spufs_event_status_get,
1956 NULL, "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED)
1958 static int spufs_srr0_set(void *data, u64 val)
1960 struct spu_context *ctx = data;
1961 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1964 ret = spu_acquire_saved(ctx);
1967 lscsa->srr0.slot[0] = (u32) val;
1968 spu_release_saved(ctx);
1973 static u64 spufs_srr0_get(struct spu_context *ctx)
1975 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1976 return lscsa->srr0.slot[0];
1978 DEFINE_SPUFS_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set,
1979 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED)
1981 static u64 spufs_id_get(struct spu_context *ctx)
1985 if (ctx->state == SPU_STATE_RUNNABLE)
1986 num = ctx->spu->number;
1988 num = (unsigned int)-1;
1992 DEFINE_SPUFS_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n",
1995 static u64 spufs_object_id_get(struct spu_context *ctx)
1997 /* FIXME: Should there really be no locking here? */
1998 return ctx->object_id;
2001 static int spufs_object_id_set(void *data, u64 id)
2003 struct spu_context *ctx = data;
2004 ctx->object_id = id;
2009 DEFINE_SPUFS_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get,
2010 spufs_object_id_set, "0x%llx\n", SPU_ATTR_NOACQUIRE);
2012 static u64 spufs_lslr_get(struct spu_context *ctx)
2014 return ctx->csa.priv2.spu_lslr_RW;
2016 DEFINE_SPUFS_ATTRIBUTE(spufs_lslr_ops, spufs_lslr_get, NULL, "0x%llx\n",
2017 SPU_ATTR_ACQUIRE_SAVED);
2019 static int spufs_info_open(struct inode *inode, struct file *file)
2021 struct spufs_inode_info *i = SPUFS_I(inode);
2022 struct spu_context *ctx = i->i_ctx;
2023 file->private_data = ctx;
2027 static int spufs_caps_show(struct seq_file *s, void *private)
2029 struct spu_context *ctx = s->private;
2031 if (!(ctx->flags & SPU_CREATE_NOSCHED))
2032 seq_puts(s, "sched\n");
2033 if (!(ctx->flags & SPU_CREATE_ISOLATE))
2034 seq_puts(s, "step\n");
2038 static int spufs_caps_open(struct inode *inode, struct file *file)
2040 return single_open(file, spufs_caps_show, SPUFS_I(inode)->i_ctx);
2043 static const struct file_operations spufs_caps_fops = {
2044 .open = spufs_caps_open,
2046 .llseek = seq_lseek,
2047 .release = single_release,
2050 static ssize_t __spufs_mbox_info_read(struct spu_context *ctx,
2051 char __user *buf, size_t len, loff_t *pos)
2055 /* EOF if there's no entry in the mbox */
2056 if (!(ctx->csa.prob.mb_stat_R & 0x0000ff))
2059 data = ctx->csa.prob.pu_mb_R;
2061 return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
2064 static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
2065 size_t len, loff_t *pos)
2068 struct spu_context *ctx = file->private_data;
2070 if (!access_ok(VERIFY_WRITE, buf, len))
2073 ret = spu_acquire_saved(ctx);
2076 spin_lock(&ctx->csa.register_lock);
2077 ret = __spufs_mbox_info_read(ctx, buf, len, pos);
2078 spin_unlock(&ctx->csa.register_lock);
2079 spu_release_saved(ctx);
2084 static const struct file_operations spufs_mbox_info_fops = {
2085 .open = spufs_info_open,
2086 .read = spufs_mbox_info_read,
2087 .llseek = generic_file_llseek,
2090 static ssize_t __spufs_ibox_info_read(struct spu_context *ctx,
2091 char __user *buf, size_t len, loff_t *pos)
2095 /* EOF if there's no entry in the ibox */
2096 if (!(ctx->csa.prob.mb_stat_R & 0xff0000))
2099 data = ctx->csa.priv2.puint_mb_R;
2101 return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
2104 static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
2105 size_t len, loff_t *pos)
2107 struct spu_context *ctx = file->private_data;
2110 if (!access_ok(VERIFY_WRITE, buf, len))
2113 ret = spu_acquire_saved(ctx);
2116 spin_lock(&ctx->csa.register_lock);
2117 ret = __spufs_ibox_info_read(ctx, buf, len, pos);
2118 spin_unlock(&ctx->csa.register_lock);
2119 spu_release_saved(ctx);
2124 static const struct file_operations spufs_ibox_info_fops = {
2125 .open = spufs_info_open,
2126 .read = spufs_ibox_info_read,
2127 .llseek = generic_file_llseek,
2130 static ssize_t __spufs_wbox_info_read(struct spu_context *ctx,
2131 char __user *buf, size_t len, loff_t *pos)
2137 wbox_stat = ctx->csa.prob.mb_stat_R;
2138 cnt = 4 - ((wbox_stat & 0x00ff00) >> 8);
2139 for (i = 0; i < cnt; i++) {
2140 data[i] = ctx->csa.spu_mailbox_data[i];
2143 return simple_read_from_buffer(buf, len, pos, &data,
2147 static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
2148 size_t len, loff_t *pos)
2150 struct spu_context *ctx = file->private_data;
2153 if (!access_ok(VERIFY_WRITE, buf, len))
2156 ret = spu_acquire_saved(ctx);
2159 spin_lock(&ctx->csa.register_lock);
2160 ret = __spufs_wbox_info_read(ctx, buf, len, pos);
2161 spin_unlock(&ctx->csa.register_lock);
2162 spu_release_saved(ctx);
2167 static const struct file_operations spufs_wbox_info_fops = {
2168 .open = spufs_info_open,
2169 .read = spufs_wbox_info_read,
2170 .llseek = generic_file_llseek,
2173 static ssize_t __spufs_dma_info_read(struct spu_context *ctx,
2174 char __user *buf, size_t len, loff_t *pos)
2176 struct spu_dma_info info;
2177 struct mfc_cq_sr *qp, *spuqp;
2180 info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW;
2181 info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0];
2182 info.dma_info_status = ctx->csa.spu_chnldata_RW[24];
2183 info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25];
2184 info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27];
2185 for (i = 0; i < 16; i++) {
2186 qp = &info.dma_info_command_data[i];
2187 spuqp = &ctx->csa.priv2.spuq[i];
2189 qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW;
2190 qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW;
2191 qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW;
2192 qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW;
2195 return simple_read_from_buffer(buf, len, pos, &info,
2199 static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
2200 size_t len, loff_t *pos)
2202 struct spu_context *ctx = file->private_data;
2205 if (!access_ok(VERIFY_WRITE, buf, len))
2208 ret = spu_acquire_saved(ctx);
2211 spin_lock(&ctx->csa.register_lock);
2212 ret = __spufs_dma_info_read(ctx, buf, len, pos);
2213 spin_unlock(&ctx->csa.register_lock);
2214 spu_release_saved(ctx);
2219 static const struct file_operations spufs_dma_info_fops = {
2220 .open = spufs_info_open,
2221 .read = spufs_dma_info_read,
2224 static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx,
2225 char __user *buf, size_t len, loff_t *pos)
2227 struct spu_proxydma_info info;
2228 struct mfc_cq_sr *qp, *puqp;
2229 int ret = sizeof info;
2235 if (!access_ok(VERIFY_WRITE, buf, len))
2238 info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW;
2239 info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW;
2240 info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R;
2241 for (i = 0; i < 8; i++) {
2242 qp = &info.proxydma_info_command_data[i];
2243 puqp = &ctx->csa.priv2.puq[i];
2245 qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW;
2246 qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW;
2247 qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW;
2248 qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW;
2251 return simple_read_from_buffer(buf, len, pos, &info,
2255 static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf,
2256 size_t len, loff_t *pos)
2258 struct spu_context *ctx = file->private_data;
2261 ret = spu_acquire_saved(ctx);
2264 spin_lock(&ctx->csa.register_lock);
2265 ret = __spufs_proxydma_info_read(ctx, buf, len, pos);
2266 spin_unlock(&ctx->csa.register_lock);
2267 spu_release_saved(ctx);
2272 static const struct file_operations spufs_proxydma_info_fops = {
2273 .open = spufs_info_open,
2274 .read = spufs_proxydma_info_read,
2277 static int spufs_show_tid(struct seq_file *s, void *private)
2279 struct spu_context *ctx = s->private;
2281 seq_printf(s, "%d\n", ctx->tid);
2285 static int spufs_tid_open(struct inode *inode, struct file *file)
2287 return single_open(file, spufs_show_tid, SPUFS_I(inode)->i_ctx);
2290 static const struct file_operations spufs_tid_fops = {
2291 .open = spufs_tid_open,
2293 .llseek = seq_lseek,
2294 .release = single_release,
2297 static const char *ctx_state_names[] = {
2298 "user", "system", "iowait", "loaded"
2301 static unsigned long long spufs_acct_time(struct spu_context *ctx,
2302 enum spu_utilization_state state)
2305 unsigned long long time = ctx->stats.times[state];
2308 * In general, utilization statistics are updated by the controlling
2309 * thread as the spu context moves through various well defined
2310 * state transitions, but if the context is lazily loaded its
2311 * utilization statistics are not updated as the controlling thread
2312 * is not tightly coupled with the execution of the spu context. We
2313 * calculate and apply the time delta from the last recorded state
2314 * of the spu context.
2316 if (ctx->spu && ctx->stats.util_state == state) {
2318 time += timespec_to_ns(&ts) - ctx->stats.tstamp;
2321 return time / NSEC_PER_MSEC;
2324 static unsigned long long spufs_slb_flts(struct spu_context *ctx)
2326 unsigned long long slb_flts = ctx->stats.slb_flt;
2328 if (ctx->state == SPU_STATE_RUNNABLE) {
2329 slb_flts += (ctx->spu->stats.slb_flt -
2330 ctx->stats.slb_flt_base);
2336 static unsigned long long spufs_class2_intrs(struct spu_context *ctx)
2338 unsigned long long class2_intrs = ctx->stats.class2_intr;
2340 if (ctx->state == SPU_STATE_RUNNABLE) {
2341 class2_intrs += (ctx->spu->stats.class2_intr -
2342 ctx->stats.class2_intr_base);
2345 return class2_intrs;
2349 static int spufs_show_stat(struct seq_file *s, void *private)
2351 struct spu_context *ctx = s->private;
2354 ret = spu_acquire(ctx);
2358 seq_printf(s, "%s %llu %llu %llu %llu "
2359 "%llu %llu %llu %llu %llu %llu %llu %llu\n",
2360 ctx_state_names[ctx->stats.util_state],
2361 spufs_acct_time(ctx, SPU_UTIL_USER),
2362 spufs_acct_time(ctx, SPU_UTIL_SYSTEM),
2363 spufs_acct_time(ctx, SPU_UTIL_IOWAIT),
2364 spufs_acct_time(ctx, SPU_UTIL_IDLE_LOADED),
2365 ctx->stats.vol_ctx_switch,
2366 ctx->stats.invol_ctx_switch,
2367 spufs_slb_flts(ctx),
2368 ctx->stats.hash_flt,
2371 spufs_class2_intrs(ctx),
2372 ctx->stats.libassist);
2377 static int spufs_stat_open(struct inode *inode, struct file *file)
2379 return single_open(file, spufs_show_stat, SPUFS_I(inode)->i_ctx);
2382 static const struct file_operations spufs_stat_fops = {
2383 .open = spufs_stat_open,
2385 .llseek = seq_lseek,
2386 .release = single_release,
2390 struct tree_descr spufs_dir_contents[] = {
2391 { "capabilities", &spufs_caps_fops, 0444, },
2392 { "mem", &spufs_mem_fops, 0666, },
2393 { "regs", &spufs_regs_fops, 0666, },
2394 { "mbox", &spufs_mbox_fops, 0444, },
2395 { "ibox", &spufs_ibox_fops, 0444, },
2396 { "wbox", &spufs_wbox_fops, 0222, },
2397 { "mbox_stat", &spufs_mbox_stat_fops, 0444, },
2398 { "ibox_stat", &spufs_ibox_stat_fops, 0444, },
2399 { "wbox_stat", &spufs_wbox_stat_fops, 0444, },
2400 { "signal1", &spufs_signal1_fops, 0666, },
2401 { "signal2", &spufs_signal2_fops, 0666, },
2402 { "signal1_type", &spufs_signal1_type, 0666, },
2403 { "signal2_type", &spufs_signal2_type, 0666, },
2404 { "cntl", &spufs_cntl_fops, 0666, },
2405 { "fpcr", &spufs_fpcr_fops, 0666, },
2406 { "lslr", &spufs_lslr_ops, 0444, },
2407 { "mfc", &spufs_mfc_fops, 0666, },
2408 { "mss", &spufs_mss_fops, 0666, },
2409 { "npc", &spufs_npc_ops, 0666, },
2410 { "srr0", &spufs_srr0_ops, 0666, },
2411 { "decr", &spufs_decr_ops, 0666, },
2412 { "decr_status", &spufs_decr_status_ops, 0666, },
2413 { "event_mask", &spufs_event_mask_ops, 0666, },
2414 { "event_status", &spufs_event_status_ops, 0444, },
2415 { "psmap", &spufs_psmap_fops, 0666, },
2416 { "phys-id", &spufs_id_ops, 0666, },
2417 { "object-id", &spufs_object_id_ops, 0666, },
2418 { "mbox_info", &spufs_mbox_info_fops, 0444, },
2419 { "ibox_info", &spufs_ibox_info_fops, 0444, },
2420 { "wbox_info", &spufs_wbox_info_fops, 0444, },
2421 { "dma_info", &spufs_dma_info_fops, 0444, },
2422 { "proxydma_info", &spufs_proxydma_info_fops, 0444, },
2423 { "tid", &spufs_tid_fops, 0444, },
2424 { "stat", &spufs_stat_fops, 0444, },
2428 struct tree_descr spufs_dir_nosched_contents[] = {
2429 { "capabilities", &spufs_caps_fops, 0444, },
2430 { "mem", &spufs_mem_fops, 0666, },
2431 { "mbox", &spufs_mbox_fops, 0444, },
2432 { "ibox", &spufs_ibox_fops, 0444, },
2433 { "wbox", &spufs_wbox_fops, 0222, },
2434 { "mbox_stat", &spufs_mbox_stat_fops, 0444, },
2435 { "ibox_stat", &spufs_ibox_stat_fops, 0444, },
2436 { "wbox_stat", &spufs_wbox_stat_fops, 0444, },
2437 { "signal1", &spufs_signal1_nosched_fops, 0222, },
2438 { "signal2", &spufs_signal2_nosched_fops, 0222, },
2439 { "signal1_type", &spufs_signal1_type, 0666, },
2440 { "signal2_type", &spufs_signal2_type, 0666, },
2441 { "mss", &spufs_mss_fops, 0666, },
2442 { "mfc", &spufs_mfc_fops, 0666, },
2443 { "cntl", &spufs_cntl_fops, 0666, },
2444 { "npc", &spufs_npc_ops, 0666, },
2445 { "psmap", &spufs_psmap_fops, 0666, },
2446 { "phys-id", &spufs_id_ops, 0666, },
2447 { "object-id", &spufs_object_id_ops, 0666, },
2448 { "tid", &spufs_tid_fops, 0444, },
2449 { "stat", &spufs_stat_fops, 0444, },
2453 struct spufs_coredump_reader spufs_coredump_read[] = {
2454 { "regs", __spufs_regs_read, NULL, sizeof(struct spu_reg128[128])},
2455 { "fpcr", __spufs_fpcr_read, NULL, sizeof(struct spu_reg128) },
2456 { "lslr", NULL, spufs_lslr_get, 19 },
2457 { "decr", NULL, spufs_decr_get, 19 },
2458 { "decr_status", NULL, spufs_decr_status_get, 19 },
2459 { "mem", __spufs_mem_read, NULL, LS_SIZE, },
2460 { "signal1", __spufs_signal1_read, NULL, sizeof(u32) },
2461 { "signal1_type", NULL, spufs_signal1_type_get, 19 },
2462 { "signal2", __spufs_signal2_read, NULL, sizeof(u32) },
2463 { "signal2_type", NULL, spufs_signal2_type_get, 19 },
2464 { "event_mask", NULL, spufs_event_mask_get, 19 },
2465 { "event_status", NULL, spufs_event_status_get, 19 },
2466 { "mbox_info", __spufs_mbox_info_read, NULL, sizeof(u32) },
2467 { "ibox_info", __spufs_ibox_info_read, NULL, sizeof(u32) },
2468 { "wbox_info", __spufs_wbox_info_read, NULL, 4 * sizeof(u32)},
2469 { "dma_info", __spufs_dma_info_read, NULL, sizeof(struct spu_dma_info)},
2470 { "proxydma_info", __spufs_proxydma_info_read,
2471 NULL, sizeof(struct spu_proxydma_info)},
2472 { "object-id", NULL, spufs_object_id_get, 19 },
2473 { "npc", NULL, spufs_npc_get, 19 },