2 * SPU file system -- file contents
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/ioctl.h>
27 #include <linux/module.h>
28 #include <linux/pagemap.h>
29 #include <linux/poll.h>
30 #include <linux/ptrace.h>
31 #include <linux/seq_file.h>
34 #include <asm/semaphore.h>
36 #include <asm/spu_info.h>
37 #include <asm/uaccess.h>
41 #define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
45 spufs_mem_open(struct inode *inode, struct file *file)
47 struct spufs_inode_info *i = SPUFS_I(inode);
48 struct spu_context *ctx = i->i_ctx;
50 mutex_lock(&ctx->mapping_lock);
51 file->private_data = ctx;
53 ctx->local_store = inode->i_mapping;
54 mutex_unlock(&ctx->mapping_lock);
59 spufs_mem_release(struct inode *inode, struct file *file)
61 struct spufs_inode_info *i = SPUFS_I(inode);
62 struct spu_context *ctx = i->i_ctx;
64 mutex_lock(&ctx->mapping_lock);
66 ctx->local_store = NULL;
67 mutex_unlock(&ctx->mapping_lock);
72 __spufs_mem_read(struct spu_context *ctx, char __user *buffer,
73 size_t size, loff_t *pos)
75 char *local_store = ctx->ops->get_ls(ctx);
76 return simple_read_from_buffer(buffer, size, pos, local_store,
81 spufs_mem_read(struct file *file, char __user *buffer,
82 size_t size, loff_t *pos)
84 struct spu_context *ctx = file->private_data;
88 ret = __spufs_mem_read(ctx, buffer, size, pos);
94 spufs_mem_write(struct file *file, const char __user *buffer,
95 size_t size, loff_t *ppos)
97 struct spu_context *ctx = file->private_data;
106 if (size > LS_SIZE - pos)
107 size = LS_SIZE - pos;
110 local_store = ctx->ops->get_ls(ctx);
111 ret = copy_from_user(local_store + pos, buffer, size);
120 static unsigned long spufs_mem_mmap_nopfn(struct vm_area_struct *vma,
121 unsigned long address)
123 struct spu_context *ctx = vma->vm_file->private_data;
124 unsigned long pfn, offset, addr0 = address;
125 #ifdef CONFIG_SPU_FS_64K_LS
126 struct spu_state *csa = &ctx->csa;
129 /* Check what page size we are using */
130 psize = get_slice_psize(vma->vm_mm, address);
132 /* Some sanity checking */
133 BUG_ON(csa->use_big_pages != (psize == MMU_PAGE_64K));
135 /* Wow, 64K, cool, we need to align the address though */
136 if (csa->use_big_pages) {
137 BUG_ON(vma->vm_start & 0xffff);
138 address &= ~0xfffful;
140 #endif /* CONFIG_SPU_FS_64K_LS */
142 offset = (address - vma->vm_start) + (vma->vm_pgoff << PAGE_SHIFT);
143 if (offset >= LS_SIZE)
146 pr_debug("spufs_mem_mmap_nopfn address=0x%lx -> 0x%lx, offset=0x%lx\n",
147 addr0, address, offset);
151 if (ctx->state == SPU_STATE_SAVED) {
152 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
154 pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset);
156 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
158 pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT;
160 vm_insert_pfn(vma, address, pfn);
164 return NOPFN_REFAULT;
168 static struct vm_operations_struct spufs_mem_mmap_vmops = {
169 .nopfn = spufs_mem_mmap_nopfn,
172 static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
174 #ifdef CONFIG_SPU_FS_64K_LS
175 struct spu_context *ctx = file->private_data;
176 struct spu_state *csa = &ctx->csa;
178 /* Sanity check VMA alignment */
179 if (csa->use_big_pages) {
180 pr_debug("spufs_mem_mmap 64K, start=0x%lx, end=0x%lx,"
181 " pgoff=0x%lx\n", vma->vm_start, vma->vm_end,
183 if (vma->vm_start & 0xffff)
185 if (vma->vm_pgoff & 0xf)
188 #endif /* CONFIG_SPU_FS_64K_LS */
190 if (!(vma->vm_flags & VM_SHARED))
193 vma->vm_flags |= VM_IO | VM_PFNMAP;
194 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
197 vma->vm_ops = &spufs_mem_mmap_vmops;
201 #ifdef CONFIG_SPU_FS_64K_LS
202 unsigned long spufs_get_unmapped_area(struct file *file, unsigned long addr,
203 unsigned long len, unsigned long pgoff,
206 struct spu_context *ctx = file->private_data;
207 struct spu_state *csa = &ctx->csa;
209 /* If not using big pages, fallback to normal MM g_u_a */
210 if (!csa->use_big_pages)
211 return current->mm->get_unmapped_area(file, addr, len,
214 /* Else, try to obtain a 64K pages slice */
215 return slice_get_unmapped_area(addr, len, flags,
218 #endif /* CONFIG_SPU_FS_64K_LS */
220 static const struct file_operations spufs_mem_fops = {
221 .open = spufs_mem_open,
222 .release = spufs_mem_release,
223 .read = spufs_mem_read,
224 .write = spufs_mem_write,
225 .llseek = generic_file_llseek,
226 .mmap = spufs_mem_mmap,
227 #ifdef CONFIG_SPU_FS_64K_LS
228 .get_unmapped_area = spufs_get_unmapped_area,
232 static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
233 unsigned long address,
234 unsigned long ps_offs,
235 unsigned long ps_size)
237 struct spu_context *ctx = vma->vm_file->private_data;
238 unsigned long area, offset = address - vma->vm_start;
241 offset += vma->vm_pgoff << PAGE_SHIFT;
242 if (offset >= ps_size)
245 /* error here usually means a signal.. we might want to test
246 * the error code more precisely though
248 ret = spu_acquire_runnable(ctx, 0);
250 return NOPFN_REFAULT;
252 area = ctx->spu->problem_phys + ps_offs;
253 vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT);
256 return NOPFN_REFAULT;
260 static unsigned long spufs_cntl_mmap_nopfn(struct vm_area_struct *vma,
261 unsigned long address)
263 return spufs_ps_nopfn(vma, address, 0x4000, 0x1000);
266 static struct vm_operations_struct spufs_cntl_mmap_vmops = {
267 .nopfn = spufs_cntl_mmap_nopfn,
271 * mmap support for problem state control area [0x4000 - 0x4fff].
273 static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
275 if (!(vma->vm_flags & VM_SHARED))
278 vma->vm_flags |= VM_IO | VM_PFNMAP;
279 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
280 | _PAGE_NO_CACHE | _PAGE_GUARDED);
282 vma->vm_ops = &spufs_cntl_mmap_vmops;
285 #else /* SPUFS_MMAP_4K */
286 #define spufs_cntl_mmap NULL
287 #endif /* !SPUFS_MMAP_4K */
289 static u64 spufs_cntl_get(void *data)
291 struct spu_context *ctx = data;
295 val = ctx->ops->status_read(ctx);
301 static void spufs_cntl_set(void *data, u64 val)
303 struct spu_context *ctx = data;
306 ctx->ops->runcntl_write(ctx, val);
310 static int spufs_cntl_open(struct inode *inode, struct file *file)
312 struct spufs_inode_info *i = SPUFS_I(inode);
313 struct spu_context *ctx = i->i_ctx;
315 mutex_lock(&ctx->mapping_lock);
316 file->private_data = ctx;
318 ctx->cntl = inode->i_mapping;
319 mutex_unlock(&ctx->mapping_lock);
320 return simple_attr_open(inode, file, spufs_cntl_get,
321 spufs_cntl_set, "0x%08lx");
325 spufs_cntl_release(struct inode *inode, struct file *file)
327 struct spufs_inode_info *i = SPUFS_I(inode);
328 struct spu_context *ctx = i->i_ctx;
330 simple_attr_close(inode, file);
332 mutex_lock(&ctx->mapping_lock);
335 mutex_unlock(&ctx->mapping_lock);
339 static const struct file_operations spufs_cntl_fops = {
340 .open = spufs_cntl_open,
341 .release = spufs_cntl_release,
342 .read = simple_attr_read,
343 .write = simple_attr_write,
344 .mmap = spufs_cntl_mmap,
348 spufs_regs_open(struct inode *inode, struct file *file)
350 struct spufs_inode_info *i = SPUFS_I(inode);
351 file->private_data = i->i_ctx;
356 __spufs_regs_read(struct spu_context *ctx, char __user *buffer,
357 size_t size, loff_t *pos)
359 struct spu_lscsa *lscsa = ctx->csa.lscsa;
360 return simple_read_from_buffer(buffer, size, pos,
361 lscsa->gprs, sizeof lscsa->gprs);
365 spufs_regs_read(struct file *file, char __user *buffer,
366 size_t size, loff_t *pos)
369 struct spu_context *ctx = file->private_data;
371 spu_acquire_saved(ctx);
372 ret = __spufs_regs_read(ctx, buffer, size, pos);
373 spu_release_saved(ctx);
378 spufs_regs_write(struct file *file, const char __user *buffer,
379 size_t size, loff_t *pos)
381 struct spu_context *ctx = file->private_data;
382 struct spu_lscsa *lscsa = ctx->csa.lscsa;
385 size = min_t(ssize_t, sizeof lscsa->gprs - *pos, size);
390 spu_acquire_saved(ctx);
392 ret = copy_from_user(lscsa->gprs + *pos - size,
393 buffer, size) ? -EFAULT : size;
395 spu_release_saved(ctx);
399 static const struct file_operations spufs_regs_fops = {
400 .open = spufs_regs_open,
401 .read = spufs_regs_read,
402 .write = spufs_regs_write,
403 .llseek = generic_file_llseek,
407 __spufs_fpcr_read(struct spu_context *ctx, char __user * buffer,
408 size_t size, loff_t * pos)
410 struct spu_lscsa *lscsa = ctx->csa.lscsa;
411 return simple_read_from_buffer(buffer, size, pos,
412 &lscsa->fpcr, sizeof(lscsa->fpcr));
416 spufs_fpcr_read(struct file *file, char __user * buffer,
417 size_t size, loff_t * pos)
420 struct spu_context *ctx = file->private_data;
422 spu_acquire_saved(ctx);
423 ret = __spufs_fpcr_read(ctx, buffer, size, pos);
424 spu_release_saved(ctx);
429 spufs_fpcr_write(struct file *file, const char __user * buffer,
430 size_t size, loff_t * pos)
432 struct spu_context *ctx = file->private_data;
433 struct spu_lscsa *lscsa = ctx->csa.lscsa;
436 size = min_t(ssize_t, sizeof(lscsa->fpcr) - *pos, size);
441 spu_acquire_saved(ctx);
443 ret = copy_from_user((char *)&lscsa->fpcr + *pos - size,
444 buffer, size) ? -EFAULT : size;
446 spu_release_saved(ctx);
450 static const struct file_operations spufs_fpcr_fops = {
451 .open = spufs_regs_open,
452 .read = spufs_fpcr_read,
453 .write = spufs_fpcr_write,
454 .llseek = generic_file_llseek,
457 /* generic open function for all pipe-like files */
458 static int spufs_pipe_open(struct inode *inode, struct file *file)
460 struct spufs_inode_info *i = SPUFS_I(inode);
461 file->private_data = i->i_ctx;
463 return nonseekable_open(inode, file);
467 * Read as many bytes from the mailbox as possible, until
468 * one of the conditions becomes true:
470 * - no more data available in the mailbox
471 * - end of the user provided buffer
472 * - end of the mapped area
474 static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
475 size_t len, loff_t *pos)
477 struct spu_context *ctx = file->private_data;
478 u32 mbox_data, __user *udata;
484 if (!access_ok(VERIFY_WRITE, buf, len))
487 udata = (void __user *)buf;
490 for (count = 0; (count + 4) <= len; count += 4, udata++) {
492 ret = ctx->ops->mbox_read(ctx, &mbox_data);
497 * at the end of the mapped area, we can fault
498 * but still need to return the data we have
499 * read successfully so far.
501 ret = __put_user(mbox_data, udata);
516 static const struct file_operations spufs_mbox_fops = {
517 .open = spufs_pipe_open,
518 .read = spufs_mbox_read,
521 static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf,
522 size_t len, loff_t *pos)
524 struct spu_context *ctx = file->private_data;
532 mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff;
536 if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat))
542 static const struct file_operations spufs_mbox_stat_fops = {
543 .open = spufs_pipe_open,
544 .read = spufs_mbox_stat_read,
547 /* low-level ibox access function */
548 size_t spu_ibox_read(struct spu_context *ctx, u32 *data)
550 return ctx->ops->ibox_read(ctx, data);
553 static int spufs_ibox_fasync(int fd, struct file *file, int on)
555 struct spu_context *ctx = file->private_data;
557 return fasync_helper(fd, file, on, &ctx->ibox_fasync);
560 /* interrupt-level ibox callback function. */
561 void spufs_ibox_callback(struct spu *spu)
563 struct spu_context *ctx = spu->ctx;
565 wake_up_all(&ctx->ibox_wq);
566 kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN);
570 * Read as many bytes from the interrupt mailbox as possible, until
571 * one of the conditions becomes true:
573 * - no more data available in the mailbox
574 * - end of the user provided buffer
575 * - end of the mapped area
577 * If the file is opened without O_NONBLOCK, we wait here until
578 * any data is available, but return when we have been able to
581 static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
582 size_t len, loff_t *pos)
584 struct spu_context *ctx = file->private_data;
585 u32 ibox_data, __user *udata;
591 if (!access_ok(VERIFY_WRITE, buf, len))
594 udata = (void __user *)buf;
598 /* wait only for the first element */
600 if (file->f_flags & O_NONBLOCK) {
601 if (!spu_ibox_read(ctx, &ibox_data))
604 count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data));
609 /* if we can't write at all, return -EFAULT */
610 count = __put_user(ibox_data, udata);
614 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
616 ret = ctx->ops->ibox_read(ctx, &ibox_data);
620 * at the end of the mapped area, we can fault
621 * but still need to return the data we have
622 * read successfully so far.
624 ret = __put_user(ibox_data, udata);
635 static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait)
637 struct spu_context *ctx = file->private_data;
640 poll_wait(file, &ctx->ibox_wq, wait);
643 mask = ctx->ops->mbox_stat_poll(ctx, POLLIN | POLLRDNORM);
649 static const struct file_operations spufs_ibox_fops = {
650 .open = spufs_pipe_open,
651 .read = spufs_ibox_read,
652 .poll = spufs_ibox_poll,
653 .fasync = spufs_ibox_fasync,
656 static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf,
657 size_t len, loff_t *pos)
659 struct spu_context *ctx = file->private_data;
666 ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff;
669 if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat))
675 static const struct file_operations spufs_ibox_stat_fops = {
676 .open = spufs_pipe_open,
677 .read = spufs_ibox_stat_read,
680 /* low-level mailbox write */
681 size_t spu_wbox_write(struct spu_context *ctx, u32 data)
683 return ctx->ops->wbox_write(ctx, data);
686 static int spufs_wbox_fasync(int fd, struct file *file, int on)
688 struct spu_context *ctx = file->private_data;
691 ret = fasync_helper(fd, file, on, &ctx->wbox_fasync);
696 /* interrupt-level wbox callback function. */
697 void spufs_wbox_callback(struct spu *spu)
699 struct spu_context *ctx = spu->ctx;
701 wake_up_all(&ctx->wbox_wq);
702 kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT);
706 * Write as many bytes to the interrupt mailbox as possible, until
707 * one of the conditions becomes true:
709 * - the mailbox is full
710 * - end of the user provided buffer
711 * - end of the mapped area
713 * If the file is opened without O_NONBLOCK, we wait here until
714 * space is availabyl, but return when we have been able to
717 static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
718 size_t len, loff_t *pos)
720 struct spu_context *ctx = file->private_data;
721 u32 wbox_data, __user *udata;
727 udata = (void __user *)buf;
728 if (!access_ok(VERIFY_READ, buf, len))
731 if (__get_user(wbox_data, udata))
737 * make sure we can at least write one element, by waiting
738 * in case of !O_NONBLOCK
741 if (file->f_flags & O_NONBLOCK) {
742 if (!spu_wbox_write(ctx, wbox_data))
745 count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data));
751 /* write aѕ much as possible */
752 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
754 ret = __get_user(wbox_data, udata);
758 ret = spu_wbox_write(ctx, wbox_data);
768 static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait)
770 struct spu_context *ctx = file->private_data;
773 poll_wait(file, &ctx->wbox_wq, wait);
776 mask = ctx->ops->mbox_stat_poll(ctx, POLLOUT | POLLWRNORM);
782 static const struct file_operations spufs_wbox_fops = {
783 .open = spufs_pipe_open,
784 .write = spufs_wbox_write,
785 .poll = spufs_wbox_poll,
786 .fasync = spufs_wbox_fasync,
789 static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf,
790 size_t len, loff_t *pos)
792 struct spu_context *ctx = file->private_data;
799 wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff;
802 if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat))
808 static const struct file_operations spufs_wbox_stat_fops = {
809 .open = spufs_pipe_open,
810 .read = spufs_wbox_stat_read,
813 static int spufs_signal1_open(struct inode *inode, struct file *file)
815 struct spufs_inode_info *i = SPUFS_I(inode);
816 struct spu_context *ctx = i->i_ctx;
818 mutex_lock(&ctx->mapping_lock);
819 file->private_data = ctx;
821 ctx->signal1 = inode->i_mapping;
822 mutex_unlock(&ctx->mapping_lock);
823 return nonseekable_open(inode, file);
827 spufs_signal1_release(struct inode *inode, struct file *file)
829 struct spufs_inode_info *i = SPUFS_I(inode);
830 struct spu_context *ctx = i->i_ctx;
832 mutex_lock(&ctx->mapping_lock);
835 mutex_unlock(&ctx->mapping_lock);
839 static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf,
840 size_t len, loff_t *pos)
848 if (ctx->csa.spu_chnlcnt_RW[3]) {
849 data = ctx->csa.spu_chnldata_RW[3];
856 if (copy_to_user(buf, &data, 4))
863 static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
864 size_t len, loff_t *pos)
867 struct spu_context *ctx = file->private_data;
869 spu_acquire_saved(ctx);
870 ret = __spufs_signal1_read(ctx, buf, len, pos);
871 spu_release_saved(ctx);
876 static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
877 size_t len, loff_t *pos)
879 struct spu_context *ctx;
882 ctx = file->private_data;
887 if (copy_from_user(&data, buf, 4))
891 ctx->ops->signal1_write(ctx, data);
897 static unsigned long spufs_signal1_mmap_nopfn(struct vm_area_struct *vma,
898 unsigned long address)
900 #if PAGE_SIZE == 0x1000
901 return spufs_ps_nopfn(vma, address, 0x14000, 0x1000);
902 #elif PAGE_SIZE == 0x10000
903 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
904 * signal 1 and 2 area
906 return spufs_ps_nopfn(vma, address, 0x10000, 0x10000);
908 #error unsupported page size
912 static struct vm_operations_struct spufs_signal1_mmap_vmops = {
913 .nopfn = spufs_signal1_mmap_nopfn,
916 static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
918 if (!(vma->vm_flags & VM_SHARED))
921 vma->vm_flags |= VM_IO | VM_PFNMAP;
922 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
923 | _PAGE_NO_CACHE | _PAGE_GUARDED);
925 vma->vm_ops = &spufs_signal1_mmap_vmops;
929 static const struct file_operations spufs_signal1_fops = {
930 .open = spufs_signal1_open,
931 .release = spufs_signal1_release,
932 .read = spufs_signal1_read,
933 .write = spufs_signal1_write,
934 .mmap = spufs_signal1_mmap,
937 static const struct file_operations spufs_signal1_nosched_fops = {
938 .open = spufs_signal1_open,
939 .release = spufs_signal1_release,
940 .write = spufs_signal1_write,
941 .mmap = spufs_signal1_mmap,
944 static const struct file_operations spufs_signal1_nosched_fops = {
945 .open = spufs_signal1_open,
946 .release = spufs_signal1_release,
947 .write = spufs_signal1_write,
948 .mmap = spufs_signal1_mmap,
951 static int spufs_signal2_open(struct inode *inode, struct file *file)
953 struct spufs_inode_info *i = SPUFS_I(inode);
954 struct spu_context *ctx = i->i_ctx;
956 mutex_lock(&ctx->mapping_lock);
957 file->private_data = ctx;
959 ctx->signal2 = inode->i_mapping;
960 mutex_unlock(&ctx->mapping_lock);
961 return nonseekable_open(inode, file);
965 spufs_signal2_release(struct inode *inode, struct file *file)
967 struct spufs_inode_info *i = SPUFS_I(inode);
968 struct spu_context *ctx = i->i_ctx;
970 mutex_lock(&ctx->mapping_lock);
973 mutex_unlock(&ctx->mapping_lock);
977 static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf,
978 size_t len, loff_t *pos)
986 if (ctx->csa.spu_chnlcnt_RW[4]) {
987 data = ctx->csa.spu_chnldata_RW[4];
994 if (copy_to_user(buf, &data, 4))
1001 static ssize_t spufs_signal2_read(struct file *file, char __user *buf,
1002 size_t len, loff_t *pos)
1004 struct spu_context *ctx = file->private_data;
1007 spu_acquire_saved(ctx);
1008 ret = __spufs_signal2_read(ctx, buf, len, pos);
1009 spu_release_saved(ctx);
1014 static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
1015 size_t len, loff_t *pos)
1017 struct spu_context *ctx;
1020 ctx = file->private_data;
1025 if (copy_from_user(&data, buf, 4))
1029 ctx->ops->signal2_write(ctx, data);
1036 static unsigned long spufs_signal2_mmap_nopfn(struct vm_area_struct *vma,
1037 unsigned long address)
1039 #if PAGE_SIZE == 0x1000
1040 return spufs_ps_nopfn(vma, address, 0x1c000, 0x1000);
1041 #elif PAGE_SIZE == 0x10000
1042 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
1043 * signal 1 and 2 area
1045 return spufs_ps_nopfn(vma, address, 0x10000, 0x10000);
1047 #error unsupported page size
1051 static struct vm_operations_struct spufs_signal2_mmap_vmops = {
1052 .nopfn = spufs_signal2_mmap_nopfn,
1055 static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
1057 if (!(vma->vm_flags & VM_SHARED))
1060 vma->vm_flags |= VM_IO | VM_PFNMAP;
1061 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1062 | _PAGE_NO_CACHE | _PAGE_GUARDED);
1064 vma->vm_ops = &spufs_signal2_mmap_vmops;
1067 #else /* SPUFS_MMAP_4K */
1068 #define spufs_signal2_mmap NULL
1069 #endif /* !SPUFS_MMAP_4K */
1071 static const struct file_operations spufs_signal2_fops = {
1072 .open = spufs_signal2_open,
1073 .release = spufs_signal2_release,
1074 .read = spufs_signal2_read,
1075 .write = spufs_signal2_write,
1076 .mmap = spufs_signal2_mmap,
1079 static const struct file_operations spufs_signal2_nosched_fops = {
1080 .open = spufs_signal2_open,
1081 .release = spufs_signal2_release,
1082 .write = spufs_signal2_write,
1083 .mmap = spufs_signal2_mmap,
1086 static const struct file_operations spufs_signal2_nosched_fops = {
1087 .open = spufs_signal2_open,
1088 .release = spufs_signal2_release,
1089 .write = spufs_signal2_write,
1090 .mmap = spufs_signal2_mmap,
1093 static void spufs_signal1_type_set(void *data, u64 val)
1095 struct spu_context *ctx = data;
1098 ctx->ops->signal1_type_set(ctx, val);
1102 static u64 __spufs_signal1_type_get(void *data)
1104 struct spu_context *ctx = data;
1105 return ctx->ops->signal1_type_get(ctx);
1108 static u64 spufs_signal1_type_get(void *data)
1110 struct spu_context *ctx = data;
1114 ret = __spufs_signal1_type_get(data);
1119 DEFINE_SIMPLE_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get,
1120 spufs_signal1_type_set, "%llu");
1122 static void spufs_signal2_type_set(void *data, u64 val)
1124 struct spu_context *ctx = data;
1127 ctx->ops->signal2_type_set(ctx, val);
1131 static u64 __spufs_signal2_type_get(void *data)
1133 struct spu_context *ctx = data;
1134 return ctx->ops->signal2_type_get(ctx);
1137 static u64 spufs_signal2_type_get(void *data)
1139 struct spu_context *ctx = data;
1143 ret = __spufs_signal2_type_get(data);
1148 DEFINE_SIMPLE_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
1149 spufs_signal2_type_set, "%llu");
1152 static unsigned long spufs_mss_mmap_nopfn(struct vm_area_struct *vma,
1153 unsigned long address)
1155 return spufs_ps_nopfn(vma, address, 0x0000, 0x1000);
1158 static struct vm_operations_struct spufs_mss_mmap_vmops = {
1159 .nopfn = spufs_mss_mmap_nopfn,
1163 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1165 static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
1167 if (!(vma->vm_flags & VM_SHARED))
1170 vma->vm_flags |= VM_IO | VM_PFNMAP;
1171 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1172 | _PAGE_NO_CACHE | _PAGE_GUARDED);
1174 vma->vm_ops = &spufs_mss_mmap_vmops;
1177 #else /* SPUFS_MMAP_4K */
1178 #define spufs_mss_mmap NULL
1179 #endif /* !SPUFS_MMAP_4K */
1181 static int spufs_mss_open(struct inode *inode, struct file *file)
1183 struct spufs_inode_info *i = SPUFS_I(inode);
1184 struct spu_context *ctx = i->i_ctx;
1186 file->private_data = i->i_ctx;
1188 mutex_lock(&ctx->mapping_lock);
1189 if (!i->i_openers++)
1190 ctx->mss = inode->i_mapping;
1191 mutex_unlock(&ctx->mapping_lock);
1192 return nonseekable_open(inode, file);
1196 spufs_mss_release(struct inode *inode, struct file *file)
1198 struct spufs_inode_info *i = SPUFS_I(inode);
1199 struct spu_context *ctx = i->i_ctx;
1201 mutex_lock(&ctx->mapping_lock);
1202 if (!--i->i_openers)
1204 mutex_unlock(&ctx->mapping_lock);
1208 static const struct file_operations spufs_mss_fops = {
1209 .open = spufs_mss_open,
1210 .release = spufs_mss_release,
1211 .mmap = spufs_mss_mmap,
1214 static unsigned long spufs_psmap_mmap_nopfn(struct vm_area_struct *vma,
1215 unsigned long address)
1217 return spufs_ps_nopfn(vma, address, 0x0000, 0x20000);
1220 static struct vm_operations_struct spufs_psmap_mmap_vmops = {
1221 .nopfn = spufs_psmap_mmap_nopfn,
1225 * mmap support for full problem state area [0x00000 - 0x1ffff].
1227 static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma)
1229 if (!(vma->vm_flags & VM_SHARED))
1232 vma->vm_flags |= VM_IO | VM_PFNMAP;
1233 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1234 | _PAGE_NO_CACHE | _PAGE_GUARDED);
1236 vma->vm_ops = &spufs_psmap_mmap_vmops;
1240 static int spufs_psmap_open(struct inode *inode, struct file *file)
1242 struct spufs_inode_info *i = SPUFS_I(inode);
1243 struct spu_context *ctx = i->i_ctx;
1245 mutex_lock(&ctx->mapping_lock);
1246 file->private_data = i->i_ctx;
1247 if (!i->i_openers++)
1248 ctx->psmap = inode->i_mapping;
1249 mutex_unlock(&ctx->mapping_lock);
1250 return nonseekable_open(inode, file);
1254 spufs_psmap_release(struct inode *inode, struct file *file)
1256 struct spufs_inode_info *i = SPUFS_I(inode);
1257 struct spu_context *ctx = i->i_ctx;
1259 mutex_lock(&ctx->mapping_lock);
1260 if (!--i->i_openers)
1262 mutex_unlock(&ctx->mapping_lock);
1266 static const struct file_operations spufs_psmap_fops = {
1267 .open = spufs_psmap_open,
1268 .release = spufs_psmap_release,
1269 .mmap = spufs_psmap_mmap,
1274 static unsigned long spufs_mfc_mmap_nopfn(struct vm_area_struct *vma,
1275 unsigned long address)
1277 return spufs_ps_nopfn(vma, address, 0x3000, 0x1000);
1280 static struct vm_operations_struct spufs_mfc_mmap_vmops = {
1281 .nopfn = spufs_mfc_mmap_nopfn,
1285 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1287 static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
1289 if (!(vma->vm_flags & VM_SHARED))
1292 vma->vm_flags |= VM_IO | VM_PFNMAP;
1293 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1294 | _PAGE_NO_CACHE | _PAGE_GUARDED);
1296 vma->vm_ops = &spufs_mfc_mmap_vmops;
1299 #else /* SPUFS_MMAP_4K */
1300 #define spufs_mfc_mmap NULL
1301 #endif /* !SPUFS_MMAP_4K */
1303 static int spufs_mfc_open(struct inode *inode, struct file *file)
1305 struct spufs_inode_info *i = SPUFS_I(inode);
1306 struct spu_context *ctx = i->i_ctx;
1308 /* we don't want to deal with DMA into other processes */
1309 if (ctx->owner != current->mm)
1312 if (atomic_read(&inode->i_count) != 1)
1315 mutex_lock(&ctx->mapping_lock);
1316 file->private_data = ctx;
1317 if (!i->i_openers++)
1318 ctx->mfc = inode->i_mapping;
1319 mutex_unlock(&ctx->mapping_lock);
1320 return nonseekable_open(inode, file);
1324 spufs_mfc_release(struct inode *inode, struct file *file)
1326 struct spufs_inode_info *i = SPUFS_I(inode);
1327 struct spu_context *ctx = i->i_ctx;
1329 mutex_lock(&ctx->mapping_lock);
1330 if (!--i->i_openers)
1332 mutex_unlock(&ctx->mapping_lock);
1336 /* interrupt-level mfc callback function. */
1337 void spufs_mfc_callback(struct spu *spu)
1339 struct spu_context *ctx = spu->ctx;
1341 wake_up_all(&ctx->mfc_wq);
1343 pr_debug("%s %s\n", __FUNCTION__, spu->name);
1344 if (ctx->mfc_fasync) {
1345 u32 free_elements, tagstatus;
1348 /* no need for spu_acquire in interrupt context */
1349 free_elements = ctx->ops->get_mfc_free_elements(ctx);
1350 tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1353 if (free_elements & 0xffff)
1355 if (tagstatus & ctx->tagwait)
1358 kill_fasync(&ctx->mfc_fasync, SIGIO, mask);
1362 static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status)
1364 /* See if there is one tag group is complete */
1365 /* FIXME we need locking around tagwait */
1366 *status = ctx->ops->read_mfc_tagstatus(ctx) & ctx->tagwait;
1367 ctx->tagwait &= ~*status;
1371 /* enable interrupt waiting for any tag group,
1372 may silently fail if interrupts are already enabled */
1373 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1377 static ssize_t spufs_mfc_read(struct file *file, char __user *buffer,
1378 size_t size, loff_t *pos)
1380 struct spu_context *ctx = file->private_data;
1388 if (file->f_flags & O_NONBLOCK) {
1389 status = ctx->ops->read_mfc_tagstatus(ctx);
1390 if (!(status & ctx->tagwait))
1393 ctx->tagwait &= ~status;
1395 ret = spufs_wait(ctx->mfc_wq,
1396 spufs_read_mfc_tagstatus(ctx, &status));
1404 if (copy_to_user(buffer, &status, 4))
1411 static int spufs_check_valid_dma(struct mfc_dma_command *cmd)
1413 pr_debug("queueing DMA %x %lx %x %x %x\n", cmd->lsa,
1414 cmd->ea, cmd->size, cmd->tag, cmd->cmd);
1425 pr_debug("invalid DMA opcode %x\n", cmd->cmd);
1429 if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) {
1430 pr_debug("invalid DMA alignment, ea %lx lsa %x\n",
1435 switch (cmd->size & 0xf) {
1456 pr_debug("invalid DMA alignment %x for size %x\n",
1457 cmd->lsa & 0xf, cmd->size);
1461 if (cmd->size > 16 * 1024) {
1462 pr_debug("invalid DMA size %x\n", cmd->size);
1466 if (cmd->tag & 0xfff0) {
1467 /* we reserve the higher tag numbers for kernel use */
1468 pr_debug("invalid DMA tag\n");
1473 /* not supported in this version */
1474 pr_debug("invalid DMA class\n");
1481 static int spu_send_mfc_command(struct spu_context *ctx,
1482 struct mfc_dma_command cmd,
1485 *error = ctx->ops->send_mfc_command(ctx, &cmd);
1486 if (*error == -EAGAIN) {
1487 /* wait for any tag group to complete
1488 so we have space for the new command */
1489 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1490 /* try again, because the queue might be
1492 *error = ctx->ops->send_mfc_command(ctx, &cmd);
1493 if (*error == -EAGAIN)
1499 static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
1500 size_t size, loff_t *pos)
1502 struct spu_context *ctx = file->private_data;
1503 struct mfc_dma_command cmd;
1506 if (size != sizeof cmd)
1510 if (copy_from_user(&cmd, buffer, sizeof cmd))
1513 ret = spufs_check_valid_dma(&cmd);
1517 ret = spu_acquire_runnable(ctx, 0);
1521 if (file->f_flags & O_NONBLOCK) {
1522 ret = ctx->ops->send_mfc_command(ctx, &cmd);
1525 ret = spufs_wait(ctx->mfc_wq,
1526 spu_send_mfc_command(ctx, cmd, &status));
1534 ctx->tagwait |= 1 << cmd.tag;
1543 static unsigned int spufs_mfc_poll(struct file *file,poll_table *wait)
1545 struct spu_context *ctx = file->private_data;
1546 u32 free_elements, tagstatus;
1549 poll_wait(file, &ctx->mfc_wq, wait);
1552 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2);
1553 free_elements = ctx->ops->get_mfc_free_elements(ctx);
1554 tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1558 if (free_elements & 0xffff)
1559 mask |= POLLOUT | POLLWRNORM;
1560 if (tagstatus & ctx->tagwait)
1561 mask |= POLLIN | POLLRDNORM;
1563 pr_debug("%s: free %d tagstatus %d tagwait %d\n", __FUNCTION__,
1564 free_elements, tagstatus, ctx->tagwait);
1569 static int spufs_mfc_flush(struct file *file, fl_owner_t id)
1571 struct spu_context *ctx = file->private_data;
1576 /* this currently hangs */
1577 ret = spufs_wait(ctx->mfc_wq,
1578 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2));
1581 ret = spufs_wait(ctx->mfc_wq,
1582 ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait);
1592 static int spufs_mfc_fsync(struct file *file, struct dentry *dentry,
1595 return spufs_mfc_flush(file, NULL);
1598 static int spufs_mfc_fasync(int fd, struct file *file, int on)
1600 struct spu_context *ctx = file->private_data;
1602 return fasync_helper(fd, file, on, &ctx->mfc_fasync);
1605 static const struct file_operations spufs_mfc_fops = {
1606 .open = spufs_mfc_open,
1607 .release = spufs_mfc_release,
1608 .read = spufs_mfc_read,
1609 .write = spufs_mfc_write,
1610 .poll = spufs_mfc_poll,
1611 .flush = spufs_mfc_flush,
1612 .fsync = spufs_mfc_fsync,
1613 .fasync = spufs_mfc_fasync,
1614 .mmap = spufs_mfc_mmap,
1617 static void spufs_npc_set(void *data, u64 val)
1619 struct spu_context *ctx = data;
1621 ctx->ops->npc_write(ctx, val);
1625 static u64 spufs_npc_get(void *data)
1627 struct spu_context *ctx = data;
1630 ret = ctx->ops->npc_read(ctx);
1634 DEFINE_SIMPLE_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set,
1637 static void spufs_decr_set(void *data, u64 val)
1639 struct spu_context *ctx = data;
1640 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1641 spu_acquire_saved(ctx);
1642 lscsa->decr.slot[0] = (u32) val;
1643 spu_release_saved(ctx);
1646 static u64 __spufs_decr_get(void *data)
1648 struct spu_context *ctx = data;
1649 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1650 return lscsa->decr.slot[0];
1653 static u64 spufs_decr_get(void *data)
1655 struct spu_context *ctx = data;
1657 spu_acquire_saved(ctx);
1658 ret = __spufs_decr_get(data);
1659 spu_release_saved(ctx);
1662 DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set,
1665 static void spufs_decr_status_set(void *data, u64 val)
1667 struct spu_context *ctx = data;
1668 spu_acquire_saved(ctx);
1670 ctx->csa.priv2.mfc_control_RW |= MFC_CNTL_DECREMENTER_RUNNING;
1672 ctx->csa.priv2.mfc_control_RW &= ~MFC_CNTL_DECREMENTER_RUNNING;
1673 spu_release_saved(ctx);
1676 static u64 __spufs_decr_status_get(void *data)
1678 struct spu_context *ctx = data;
1679 if (ctx->csa.priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING)
1680 return SPU_DECR_STATUS_RUNNING;
1685 static u64 spufs_decr_status_get(void *data)
1687 struct spu_context *ctx = data;
1689 spu_acquire_saved(ctx);
1690 ret = __spufs_decr_status_get(data);
1691 spu_release_saved(ctx);
1694 DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get,
1695 spufs_decr_status_set, "0x%llx\n")
1697 static void spufs_event_mask_set(void *data, u64 val)
1699 struct spu_context *ctx = data;
1700 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1701 spu_acquire_saved(ctx);
1702 lscsa->event_mask.slot[0] = (u32) val;
1703 spu_release_saved(ctx);
1706 static u64 __spufs_event_mask_get(void *data)
1708 struct spu_context *ctx = data;
1709 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1710 return lscsa->event_mask.slot[0];
1713 static u64 spufs_event_mask_get(void *data)
1715 struct spu_context *ctx = data;
1717 spu_acquire_saved(ctx);
1718 ret = __spufs_event_mask_get(data);
1719 spu_release_saved(ctx);
1722 DEFINE_SIMPLE_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get,
1723 spufs_event_mask_set, "0x%llx\n")
1725 static u64 __spufs_event_status_get(void *data)
1727 struct spu_context *ctx = data;
1728 struct spu_state *state = &ctx->csa;
1730 stat = state->spu_chnlcnt_RW[0];
1732 return state->spu_chnldata_RW[0];
1736 static u64 spufs_event_status_get(void *data)
1738 struct spu_context *ctx = data;
1741 spu_acquire_saved(ctx);
1742 ret = __spufs_event_status_get(data);
1743 spu_release_saved(ctx);
1746 DEFINE_SIMPLE_ATTRIBUTE(spufs_event_status_ops, spufs_event_status_get,
1749 static void spufs_srr0_set(void *data, u64 val)
1751 struct spu_context *ctx = data;
1752 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1753 spu_acquire_saved(ctx);
1754 lscsa->srr0.slot[0] = (u32) val;
1755 spu_release_saved(ctx);
1758 static u64 spufs_srr0_get(void *data)
1760 struct spu_context *ctx = data;
1761 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1763 spu_acquire_saved(ctx);
1764 ret = lscsa->srr0.slot[0];
1765 spu_release_saved(ctx);
1768 DEFINE_SIMPLE_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set,
1771 static u64 spufs_id_get(void *data)
1773 struct spu_context *ctx = data;
1777 if (ctx->state == SPU_STATE_RUNNABLE)
1778 num = ctx->spu->number;
1780 num = (unsigned int)-1;
1785 DEFINE_SIMPLE_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n")
1787 static u64 __spufs_object_id_get(void *data)
1789 struct spu_context *ctx = data;
1790 return ctx->object_id;
1793 static u64 spufs_object_id_get(void *data)
1795 /* FIXME: Should there really be no locking here? */
1796 return __spufs_object_id_get(data);
1799 static void spufs_object_id_set(void *data, u64 id)
1801 struct spu_context *ctx = data;
1802 ctx->object_id = id;
1805 DEFINE_SIMPLE_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get,
1806 spufs_object_id_set, "0x%llx\n");
1808 static u64 __spufs_lslr_get(void *data)
1810 struct spu_context *ctx = data;
1811 return ctx->csa.priv2.spu_lslr_RW;
1814 static u64 spufs_lslr_get(void *data)
1816 struct spu_context *ctx = data;
1819 spu_acquire_saved(ctx);
1820 ret = __spufs_lslr_get(data);
1821 spu_release_saved(ctx);
1825 DEFINE_SIMPLE_ATTRIBUTE(spufs_lslr_ops, spufs_lslr_get, NULL, "0x%llx\n")
1827 static int spufs_info_open(struct inode *inode, struct file *file)
1829 struct spufs_inode_info *i = SPUFS_I(inode);
1830 struct spu_context *ctx = i->i_ctx;
1831 file->private_data = ctx;
1835 static int spufs_caps_show(struct seq_file *s, void *private)
1837 struct spu_context *ctx = s->private;
1839 if (!(ctx->flags & SPU_CREATE_NOSCHED))
1840 seq_puts(s, "sched\n");
1841 if (!(ctx->flags & SPU_CREATE_ISOLATE))
1842 seq_puts(s, "step\n");
1846 static int spufs_caps_open(struct inode *inode, struct file *file)
1848 return single_open(file, spufs_caps_show, SPUFS_I(inode)->i_ctx);
1851 static const struct file_operations spufs_caps_fops = {
1852 .open = spufs_caps_open,
1854 .llseek = seq_lseek,
1855 .release = single_release,
1858 static ssize_t __spufs_mbox_info_read(struct spu_context *ctx,
1859 char __user *buf, size_t len, loff_t *pos)
1864 mbox_stat = ctx->csa.prob.mb_stat_R;
1865 if (mbox_stat & 0x0000ff) {
1866 data = ctx->csa.prob.pu_mb_R;
1869 return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
1872 static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
1873 size_t len, loff_t *pos)
1876 struct spu_context *ctx = file->private_data;
1878 if (!access_ok(VERIFY_WRITE, buf, len))
1881 spu_acquire_saved(ctx);
1882 spin_lock(&ctx->csa.register_lock);
1883 ret = __spufs_mbox_info_read(ctx, buf, len, pos);
1884 spin_unlock(&ctx->csa.register_lock);
1885 spu_release_saved(ctx);
1890 static const struct file_operations spufs_mbox_info_fops = {
1891 .open = spufs_info_open,
1892 .read = spufs_mbox_info_read,
1893 .llseek = generic_file_llseek,
1896 static ssize_t __spufs_ibox_info_read(struct spu_context *ctx,
1897 char __user *buf, size_t len, loff_t *pos)
1902 ibox_stat = ctx->csa.prob.mb_stat_R;
1903 if (ibox_stat & 0xff0000) {
1904 data = ctx->csa.priv2.puint_mb_R;
1907 return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
1910 static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
1911 size_t len, loff_t *pos)
1913 struct spu_context *ctx = file->private_data;
1916 if (!access_ok(VERIFY_WRITE, buf, len))
1919 spu_acquire_saved(ctx);
1920 spin_lock(&ctx->csa.register_lock);
1921 ret = __spufs_ibox_info_read(ctx, buf, len, pos);
1922 spin_unlock(&ctx->csa.register_lock);
1923 spu_release_saved(ctx);
1928 static const struct file_operations spufs_ibox_info_fops = {
1929 .open = spufs_info_open,
1930 .read = spufs_ibox_info_read,
1931 .llseek = generic_file_llseek,
1934 static ssize_t __spufs_wbox_info_read(struct spu_context *ctx,
1935 char __user *buf, size_t len, loff_t *pos)
1941 wbox_stat = ctx->csa.prob.mb_stat_R;
1942 cnt = 4 - ((wbox_stat & 0x00ff00) >> 8);
1943 for (i = 0; i < cnt; i++) {
1944 data[i] = ctx->csa.spu_mailbox_data[i];
1947 return simple_read_from_buffer(buf, len, pos, &data,
1951 static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
1952 size_t len, loff_t *pos)
1954 struct spu_context *ctx = file->private_data;
1957 if (!access_ok(VERIFY_WRITE, buf, len))
1960 spu_acquire_saved(ctx);
1961 spin_lock(&ctx->csa.register_lock);
1962 ret = __spufs_wbox_info_read(ctx, buf, len, pos);
1963 spin_unlock(&ctx->csa.register_lock);
1964 spu_release_saved(ctx);
1969 static const struct file_operations spufs_wbox_info_fops = {
1970 .open = spufs_info_open,
1971 .read = spufs_wbox_info_read,
1972 .llseek = generic_file_llseek,
1975 static ssize_t __spufs_dma_info_read(struct spu_context *ctx,
1976 char __user *buf, size_t len, loff_t *pos)
1978 struct spu_dma_info info;
1979 struct mfc_cq_sr *qp, *spuqp;
1982 info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW;
1983 info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0];
1984 info.dma_info_status = ctx->csa.spu_chnldata_RW[24];
1985 info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25];
1986 info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27];
1987 for (i = 0; i < 16; i++) {
1988 qp = &info.dma_info_command_data[i];
1989 spuqp = &ctx->csa.priv2.spuq[i];
1991 qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW;
1992 qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW;
1993 qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW;
1994 qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW;
1997 return simple_read_from_buffer(buf, len, pos, &info,
2001 static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
2002 size_t len, loff_t *pos)
2004 struct spu_context *ctx = file->private_data;
2007 if (!access_ok(VERIFY_WRITE, buf, len))
2010 spu_acquire_saved(ctx);
2011 spin_lock(&ctx->csa.register_lock);
2012 ret = __spufs_dma_info_read(ctx, buf, len, pos);
2013 spin_unlock(&ctx->csa.register_lock);
2014 spu_release_saved(ctx);
2019 static const struct file_operations spufs_dma_info_fops = {
2020 .open = spufs_info_open,
2021 .read = spufs_dma_info_read,
2024 static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx,
2025 char __user *buf, size_t len, loff_t *pos)
2027 struct spu_proxydma_info info;
2028 struct mfc_cq_sr *qp, *puqp;
2029 int ret = sizeof info;
2035 if (!access_ok(VERIFY_WRITE, buf, len))
2038 info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW;
2039 info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW;
2040 info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R;
2041 for (i = 0; i < 8; i++) {
2042 qp = &info.proxydma_info_command_data[i];
2043 puqp = &ctx->csa.priv2.puq[i];
2045 qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW;
2046 qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW;
2047 qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW;
2048 qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW;
2051 return simple_read_from_buffer(buf, len, pos, &info,
2055 static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf,
2056 size_t len, loff_t *pos)
2058 struct spu_context *ctx = file->private_data;
2061 spu_acquire_saved(ctx);
2062 spin_lock(&ctx->csa.register_lock);
2063 ret = __spufs_proxydma_info_read(ctx, buf, len, pos);
2064 spin_unlock(&ctx->csa.register_lock);
2065 spu_release_saved(ctx);
2070 static const struct file_operations spufs_proxydma_info_fops = {
2071 .open = spufs_info_open,
2072 .read = spufs_proxydma_info_read,
2075 static int spufs_show_tid(struct seq_file *s, void *private)
2077 struct spu_context *ctx = s->private;
2079 seq_printf(s, "%d\n", ctx->tid);
2083 static int spufs_tid_open(struct inode *inode, struct file *file)
2085 return single_open(file, spufs_show_tid, SPUFS_I(inode)->i_ctx);
2088 static const struct file_operations spufs_tid_fops = {
2089 .open = spufs_tid_open,
2091 .llseek = seq_lseek,
2092 .release = single_release,
2095 static const char *ctx_state_names[] = {
2096 "user", "system", "iowait", "loaded"
2099 static unsigned long long spufs_acct_time(struct spu_context *ctx,
2100 enum spu_utilization_state state)
2103 unsigned long long time = ctx->stats.times[state];
2106 * In general, utilization statistics are updated by the controlling
2107 * thread as the spu context moves through various well defined
2108 * state transitions, but if the context is lazily loaded its
2109 * utilization statistics are not updated as the controlling thread
2110 * is not tightly coupled with the execution of the spu context. We
2111 * calculate and apply the time delta from the last recorded state
2112 * of the spu context.
2114 if (ctx->spu && ctx->stats.util_state == state) {
2116 time += timespec_to_ns(&ts) - ctx->stats.tstamp;
2119 return time / NSEC_PER_MSEC;
2122 static unsigned long long spufs_slb_flts(struct spu_context *ctx)
2124 unsigned long long slb_flts = ctx->stats.slb_flt;
2126 if (ctx->state == SPU_STATE_RUNNABLE) {
2127 slb_flts += (ctx->spu->stats.slb_flt -
2128 ctx->stats.slb_flt_base);
2134 static unsigned long long spufs_class2_intrs(struct spu_context *ctx)
2136 unsigned long long class2_intrs = ctx->stats.class2_intr;
2138 if (ctx->state == SPU_STATE_RUNNABLE) {
2139 class2_intrs += (ctx->spu->stats.class2_intr -
2140 ctx->stats.class2_intr_base);
2143 return class2_intrs;
2147 static int spufs_show_stat(struct seq_file *s, void *private)
2149 struct spu_context *ctx = s->private;
2152 seq_printf(s, "%s %llu %llu %llu %llu "
2153 "%llu %llu %llu %llu %llu %llu %llu %llu\n",
2154 ctx_state_names[ctx->stats.util_state],
2155 spufs_acct_time(ctx, SPU_UTIL_USER),
2156 spufs_acct_time(ctx, SPU_UTIL_SYSTEM),
2157 spufs_acct_time(ctx, SPU_UTIL_IOWAIT),
2158 spufs_acct_time(ctx, SPU_UTIL_IDLE_LOADED),
2159 ctx->stats.vol_ctx_switch,
2160 ctx->stats.invol_ctx_switch,
2161 spufs_slb_flts(ctx),
2162 ctx->stats.hash_flt,
2165 spufs_class2_intrs(ctx),
2166 ctx->stats.libassist);
2171 static int spufs_stat_open(struct inode *inode, struct file *file)
2173 return single_open(file, spufs_show_stat, SPUFS_I(inode)->i_ctx);
2176 static const struct file_operations spufs_stat_fops = {
2177 .open = spufs_stat_open,
2179 .llseek = seq_lseek,
2180 .release = single_release,
2184 struct tree_descr spufs_dir_contents[] = {
2185 { "capabilities", &spufs_caps_fops, 0444, },
2186 { "mem", &spufs_mem_fops, 0666, },
2187 { "regs", &spufs_regs_fops, 0666, },
2188 { "mbox", &spufs_mbox_fops, 0444, },
2189 { "ibox", &spufs_ibox_fops, 0444, },
2190 { "wbox", &spufs_wbox_fops, 0222, },
2191 { "mbox_stat", &spufs_mbox_stat_fops, 0444, },
2192 { "ibox_stat", &spufs_ibox_stat_fops, 0444, },
2193 { "wbox_stat", &spufs_wbox_stat_fops, 0444, },
2194 { "signal1", &spufs_signal1_nosched_fops, 0222, },
2195 { "signal2", &spufs_signal2_nosched_fops, 0222, },
2196 { "signal1_type", &spufs_signal1_type, 0666, },
2197 { "signal2_type", &spufs_signal2_type, 0666, },
2198 { "cntl", &spufs_cntl_fops, 0666, },
2199 { "fpcr", &spufs_fpcr_fops, 0666, },
2200 { "lslr", &spufs_lslr_ops, 0444, },
2201 { "mfc", &spufs_mfc_fops, 0666, },
2202 { "mss", &spufs_mss_fops, 0666, },
2203 { "npc", &spufs_npc_ops, 0666, },
2204 { "srr0", &spufs_srr0_ops, 0666, },
2205 { "decr", &spufs_decr_ops, 0666, },
2206 { "decr_status", &spufs_decr_status_ops, 0666, },
2207 { "event_mask", &spufs_event_mask_ops, 0666, },
2208 { "event_status", &spufs_event_status_ops, 0444, },
2209 { "psmap", &spufs_psmap_fops, 0666, },
2210 { "phys-id", &spufs_id_ops, 0666, },
2211 { "object-id", &spufs_object_id_ops, 0666, },
2212 { "mbox_info", &spufs_mbox_info_fops, 0444, },
2213 { "ibox_info", &spufs_ibox_info_fops, 0444, },
2214 { "wbox_info", &spufs_wbox_info_fops, 0444, },
2215 { "dma_info", &spufs_dma_info_fops, 0444, },
2216 { "proxydma_info", &spufs_proxydma_info_fops, 0444, },
2217 { "tid", &spufs_tid_fops, 0444, },
2218 { "stat", &spufs_stat_fops, 0444, },
2222 struct tree_descr spufs_dir_nosched_contents[] = {
2223 { "capabilities", &spufs_caps_fops, 0444, },
2224 { "mem", &spufs_mem_fops, 0666, },
2225 { "mbox", &spufs_mbox_fops, 0444, },
2226 { "ibox", &spufs_ibox_fops, 0444, },
2227 { "wbox", &spufs_wbox_fops, 0222, },
2228 { "mbox_stat", &spufs_mbox_stat_fops, 0444, },
2229 { "ibox_stat", &spufs_ibox_stat_fops, 0444, },
2230 { "wbox_stat", &spufs_wbox_stat_fops, 0444, },
2231 { "signal1", &spufs_signal1_nosched_fops, 0222, },
2232 { "signal2", &spufs_signal2_nosched_fops, 0222, },
2233 { "signal1_type", &spufs_signal1_type, 0666, },
2234 { "signal2_type", &spufs_signal2_type, 0666, },
2235 { "mss", &spufs_mss_fops, 0666, },
2236 { "mfc", &spufs_mfc_fops, 0666, },
2237 { "cntl", &spufs_cntl_fops, 0666, },
2238 { "npc", &spufs_npc_ops, 0666, },
2239 { "psmap", &spufs_psmap_fops, 0666, },
2240 { "phys-id", &spufs_id_ops, 0666, },
2241 { "object-id", &spufs_object_id_ops, 0666, },
2242 { "tid", &spufs_tid_fops, 0444, },
2243 { "stat", &spufs_stat_fops, 0444, },
2247 struct spufs_coredump_reader spufs_coredump_read[] = {
2248 { "regs", __spufs_regs_read, NULL, 128 * 16 },
2249 { "fpcr", __spufs_fpcr_read, NULL, 16 },
2250 { "lslr", NULL, __spufs_lslr_get, 11 },
2251 { "decr", NULL, __spufs_decr_get, 11 },
2252 { "decr_status", NULL, __spufs_decr_status_get, 11 },
2253 { "mem", __spufs_mem_read, NULL, 256 * 1024, },
2254 { "signal1", __spufs_signal1_read, NULL, 4 },
2255 { "signal1_type", NULL, __spufs_signal1_type_get, 2 },
2256 { "signal2", __spufs_signal2_read, NULL, 4 },
2257 { "signal2_type", NULL, __spufs_signal2_type_get, 2 },
2258 { "event_mask", NULL, __spufs_event_mask_get, 8 },
2259 { "event_status", NULL, __spufs_event_status_get, 8 },
2260 { "mbox_info", __spufs_mbox_info_read, NULL, 4 },
2261 { "ibox_info", __spufs_ibox_info_read, NULL, 4 },
2262 { "wbox_info", __spufs_wbox_info_read, NULL, 16 },
2263 { "dma_info", __spufs_dma_info_read, NULL, 69 * 8 },
2264 { "proxydma_info", __spufs_proxydma_info_read, NULL, 35 * 8 },
2265 { "object-id", NULL, __spufs_object_id_get, 19 },
2268 int spufs_coredump_num_notes = ARRAY_SIZE(spufs_coredump_read) - 1;