2 * SPU file system -- file contents
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/ioctl.h>
27 #include <linux/module.h>
28 #include <linux/pagemap.h>
29 #include <linux/poll.h>
30 #include <linux/ptrace.h>
33 #include <asm/semaphore.h>
35 #include <asm/spu_info.h>
36 #include <asm/uaccess.h>
40 #define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
44 spufs_mem_open(struct inode *inode, struct file *file)
46 struct spufs_inode_info *i = SPUFS_I(inode);
47 struct spu_context *ctx = i->i_ctx;
48 file->private_data = ctx;
49 file->f_mapping = inode->i_mapping;
50 ctx->local_store = inode->i_mapping;
55 spufs_mem_read(struct file *file, char __user *buffer,
56 size_t size, loff_t *pos)
58 struct spu_context *ctx = file->private_data;
64 local_store = ctx->ops->get_ls(ctx);
65 ret = simple_read_from_buffer(buffer, size, pos, local_store, LS_SIZE);
72 spufs_mem_write(struct file *file, const char __user *buffer,
73 size_t size, loff_t *pos)
75 struct spu_context *ctx = file->private_data;
79 size = min_t(ssize_t, LS_SIZE - *pos, size);
86 local_store = ctx->ops->get_ls(ctx);
87 ret = copy_from_user(local_store + *pos - size,
88 buffer, size) ? -EFAULT : size;
95 spufs_mem_mmap_nopage(struct vm_area_struct *vma,
96 unsigned long address, int *type)
98 struct page *page = NOPAGE_SIGBUS;
100 struct spu_context *ctx = vma->vm_file->private_data;
101 unsigned long offset = address - vma->vm_start;
102 offset += vma->vm_pgoff << PAGE_SHIFT;
106 if (ctx->state == SPU_STATE_SAVED) {
107 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
108 & ~(_PAGE_NO_CACHE | _PAGE_GUARDED));
109 page = vmalloc_to_page(ctx->csa.lscsa->ls + offset);
111 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
112 | _PAGE_NO_CACHE | _PAGE_GUARDED);
113 page = pfn_to_page((ctx->spu->local_store_phys + offset)
119 *type = VM_FAULT_MINOR;
121 page_cache_get(page);
125 static struct vm_operations_struct spufs_mem_mmap_vmops = {
126 .nopage = spufs_mem_mmap_nopage,
130 spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
132 if (!(vma->vm_flags & VM_SHARED))
135 vma->vm_flags |= VM_IO;
136 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
139 vma->vm_ops = &spufs_mem_mmap_vmops;
143 static struct file_operations spufs_mem_fops = {
144 .open = spufs_mem_open,
145 .read = spufs_mem_read,
146 .write = spufs_mem_write,
147 .llseek = generic_file_llseek,
148 .mmap = spufs_mem_mmap,
151 static struct page *spufs_ps_nopage(struct vm_area_struct *vma,
152 unsigned long address,
153 int *type, unsigned long ps_offs,
154 unsigned long ps_size)
156 struct page *page = NOPAGE_SIGBUS;
157 int fault_type = VM_FAULT_SIGBUS;
158 struct spu_context *ctx = vma->vm_file->private_data;
159 unsigned long offset = address - vma->vm_start;
163 offset += vma->vm_pgoff << PAGE_SHIFT;
164 if (offset >= ps_size)
167 ret = spu_acquire_runnable(ctx);
171 area = ctx->spu->problem_phys + ps_offs;
172 page = pfn_to_page((area + offset) >> PAGE_SHIFT);
173 fault_type = VM_FAULT_MINOR;
174 page_cache_get(page);
186 static struct page *spufs_cntl_mmap_nopage(struct vm_area_struct *vma,
187 unsigned long address, int *type)
189 return spufs_ps_nopage(vma, address, type, 0x4000, 0x1000);
192 static struct vm_operations_struct spufs_cntl_mmap_vmops = {
193 .nopage = spufs_cntl_mmap_nopage,
197 * mmap support for problem state control area [0x4000 - 0x4fff].
199 static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
201 if (!(vma->vm_flags & VM_SHARED))
204 vma->vm_flags |= VM_IO;
205 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
206 | _PAGE_NO_CACHE | _PAGE_GUARDED);
208 vma->vm_ops = &spufs_cntl_mmap_vmops;
211 #else /* SPUFS_MMAP_4K */
212 #define spufs_cntl_mmap NULL
213 #endif /* !SPUFS_MMAP_4K */
215 static u64 spufs_cntl_get(void *data)
217 struct spu_context *ctx = data;
221 val = ctx->ops->status_read(ctx);
227 static void spufs_cntl_set(void *data, u64 val)
229 struct spu_context *ctx = data;
232 ctx->ops->runcntl_write(ctx, val);
236 static int spufs_cntl_open(struct inode *inode, struct file *file)
238 struct spufs_inode_info *i = SPUFS_I(inode);
239 struct spu_context *ctx = i->i_ctx;
241 file->private_data = ctx;
242 file->f_mapping = inode->i_mapping;
243 ctx->cntl = inode->i_mapping;
244 return simple_attr_open(inode, file, spufs_cntl_get,
245 spufs_cntl_set, "0x%08lx");
248 static struct file_operations spufs_cntl_fops = {
249 .open = spufs_cntl_open,
250 .release = simple_attr_close,
251 .read = simple_attr_read,
252 .write = simple_attr_write,
253 .mmap = spufs_cntl_mmap,
257 spufs_regs_open(struct inode *inode, struct file *file)
259 struct spufs_inode_info *i = SPUFS_I(inode);
260 file->private_data = i->i_ctx;
265 spufs_regs_read(struct file *file, char __user *buffer,
266 size_t size, loff_t *pos)
268 struct spu_context *ctx = file->private_data;
269 struct spu_lscsa *lscsa = ctx->csa.lscsa;
272 spu_acquire_saved(ctx);
274 ret = simple_read_from_buffer(buffer, size, pos,
275 lscsa->gprs, sizeof lscsa->gprs);
282 spufs_regs_write(struct file *file, const char __user *buffer,
283 size_t size, loff_t *pos)
285 struct spu_context *ctx = file->private_data;
286 struct spu_lscsa *lscsa = ctx->csa.lscsa;
289 size = min_t(ssize_t, sizeof lscsa->gprs - *pos, size);
294 spu_acquire_saved(ctx);
296 ret = copy_from_user(lscsa->gprs + *pos - size,
297 buffer, size) ? -EFAULT : size;
303 static struct file_operations spufs_regs_fops = {
304 .open = spufs_regs_open,
305 .read = spufs_regs_read,
306 .write = spufs_regs_write,
307 .llseek = generic_file_llseek,
311 spufs_fpcr_read(struct file *file, char __user * buffer,
312 size_t size, loff_t * pos)
314 struct spu_context *ctx = file->private_data;
315 struct spu_lscsa *lscsa = ctx->csa.lscsa;
318 spu_acquire_saved(ctx);
320 ret = simple_read_from_buffer(buffer, size, pos,
321 &lscsa->fpcr, sizeof(lscsa->fpcr));
328 spufs_fpcr_write(struct file *file, const char __user * buffer,
329 size_t size, loff_t * pos)
331 struct spu_context *ctx = file->private_data;
332 struct spu_lscsa *lscsa = ctx->csa.lscsa;
335 size = min_t(ssize_t, sizeof(lscsa->fpcr) - *pos, size);
340 spu_acquire_saved(ctx);
342 ret = copy_from_user((char *)&lscsa->fpcr + *pos - size,
343 buffer, size) ? -EFAULT : size;
349 static struct file_operations spufs_fpcr_fops = {
350 .open = spufs_regs_open,
351 .read = spufs_fpcr_read,
352 .write = spufs_fpcr_write,
353 .llseek = generic_file_llseek,
356 /* generic open function for all pipe-like files */
357 static int spufs_pipe_open(struct inode *inode, struct file *file)
359 struct spufs_inode_info *i = SPUFS_I(inode);
360 file->private_data = i->i_ctx;
362 return nonseekable_open(inode, file);
366 * Read as many bytes from the mailbox as possible, until
367 * one of the conditions becomes true:
369 * - no more data available in the mailbox
370 * - end of the user provided buffer
371 * - end of the mapped area
373 static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
374 size_t len, loff_t *pos)
376 struct spu_context *ctx = file->private_data;
377 u32 mbox_data, __user *udata;
383 if (!access_ok(VERIFY_WRITE, buf, len))
386 udata = (void __user *)buf;
389 for (count = 0; (count + 4) <= len; count += 4, udata++) {
391 ret = ctx->ops->mbox_read(ctx, &mbox_data);
396 * at the end of the mapped area, we can fault
397 * but still need to return the data we have
398 * read successfully so far.
400 ret = __put_user(mbox_data, udata);
415 static struct file_operations spufs_mbox_fops = {
416 .open = spufs_pipe_open,
417 .read = spufs_mbox_read,
420 static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf,
421 size_t len, loff_t *pos)
423 struct spu_context *ctx = file->private_data;
431 mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff;
435 if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat))
441 static struct file_operations spufs_mbox_stat_fops = {
442 .open = spufs_pipe_open,
443 .read = spufs_mbox_stat_read,
446 /* low-level ibox access function */
447 size_t spu_ibox_read(struct spu_context *ctx, u32 *data)
449 return ctx->ops->ibox_read(ctx, data);
452 static int spufs_ibox_fasync(int fd, struct file *file, int on)
454 struct spu_context *ctx = file->private_data;
456 return fasync_helper(fd, file, on, &ctx->ibox_fasync);
459 /* interrupt-level ibox callback function. */
460 void spufs_ibox_callback(struct spu *spu)
462 struct spu_context *ctx = spu->ctx;
464 wake_up_all(&ctx->ibox_wq);
465 kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN);
469 * Read as many bytes from the interrupt mailbox as possible, until
470 * one of the conditions becomes true:
472 * - no more data available in the mailbox
473 * - end of the user provided buffer
474 * - end of the mapped area
476 * If the file is opened without O_NONBLOCK, we wait here until
477 * any data is available, but return when we have been able to
480 static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
481 size_t len, loff_t *pos)
483 struct spu_context *ctx = file->private_data;
484 u32 ibox_data, __user *udata;
490 if (!access_ok(VERIFY_WRITE, buf, len))
493 udata = (void __user *)buf;
497 /* wait only for the first element */
499 if (file->f_flags & O_NONBLOCK) {
500 if (!spu_ibox_read(ctx, &ibox_data))
503 count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data));
508 /* if we can't write at all, return -EFAULT */
509 count = __put_user(ibox_data, udata);
513 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
515 ret = ctx->ops->ibox_read(ctx, &ibox_data);
519 * at the end of the mapped area, we can fault
520 * but still need to return the data we have
521 * read successfully so far.
523 ret = __put_user(ibox_data, udata);
534 static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait)
536 struct spu_context *ctx = file->private_data;
539 poll_wait(file, &ctx->ibox_wq, wait);
542 mask = ctx->ops->mbox_stat_poll(ctx, POLLIN | POLLRDNORM);
548 static struct file_operations spufs_ibox_fops = {
549 .open = spufs_pipe_open,
550 .read = spufs_ibox_read,
551 .poll = spufs_ibox_poll,
552 .fasync = spufs_ibox_fasync,
555 static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf,
556 size_t len, loff_t *pos)
558 struct spu_context *ctx = file->private_data;
565 ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff;
568 if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat))
574 static struct file_operations spufs_ibox_stat_fops = {
575 .open = spufs_pipe_open,
576 .read = spufs_ibox_stat_read,
579 /* low-level mailbox write */
580 size_t spu_wbox_write(struct spu_context *ctx, u32 data)
582 return ctx->ops->wbox_write(ctx, data);
585 static int spufs_wbox_fasync(int fd, struct file *file, int on)
587 struct spu_context *ctx = file->private_data;
590 ret = fasync_helper(fd, file, on, &ctx->wbox_fasync);
595 /* interrupt-level wbox callback function. */
596 void spufs_wbox_callback(struct spu *spu)
598 struct spu_context *ctx = spu->ctx;
600 wake_up_all(&ctx->wbox_wq);
601 kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT);
605 * Write as many bytes to the interrupt mailbox as possible, until
606 * one of the conditions becomes true:
608 * - the mailbox is full
609 * - end of the user provided buffer
610 * - end of the mapped area
612 * If the file is opened without O_NONBLOCK, we wait here until
613 * space is availabyl, but return when we have been able to
616 static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
617 size_t len, loff_t *pos)
619 struct spu_context *ctx = file->private_data;
620 u32 wbox_data, __user *udata;
626 udata = (void __user *)buf;
627 if (!access_ok(VERIFY_READ, buf, len))
630 if (__get_user(wbox_data, udata))
636 * make sure we can at least write one element, by waiting
637 * in case of !O_NONBLOCK
640 if (file->f_flags & O_NONBLOCK) {
641 if (!spu_wbox_write(ctx, wbox_data))
644 count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data));
650 /* write aѕ much as possible */
651 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
653 ret = __get_user(wbox_data, udata);
657 ret = spu_wbox_write(ctx, wbox_data);
667 static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait)
669 struct spu_context *ctx = file->private_data;
672 poll_wait(file, &ctx->wbox_wq, wait);
675 mask = ctx->ops->mbox_stat_poll(ctx, POLLOUT | POLLWRNORM);
681 static struct file_operations spufs_wbox_fops = {
682 .open = spufs_pipe_open,
683 .write = spufs_wbox_write,
684 .poll = spufs_wbox_poll,
685 .fasync = spufs_wbox_fasync,
688 static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf,
689 size_t len, loff_t *pos)
691 struct spu_context *ctx = file->private_data;
698 wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff;
701 if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat))
707 static struct file_operations spufs_wbox_stat_fops = {
708 .open = spufs_pipe_open,
709 .read = spufs_wbox_stat_read,
712 static int spufs_signal1_open(struct inode *inode, struct file *file)
714 struct spufs_inode_info *i = SPUFS_I(inode);
715 struct spu_context *ctx = i->i_ctx;
716 file->private_data = ctx;
717 file->f_mapping = inode->i_mapping;
718 ctx->signal1 = inode->i_mapping;
719 return nonseekable_open(inode, file);
722 static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
723 size_t len, loff_t *pos)
725 struct spu_context *ctx = file->private_data;
732 spu_acquire_saved(ctx);
733 if (ctx->csa.spu_chnlcnt_RW[3]) {
734 data = ctx->csa.spu_chnldata_RW[3];
742 if (copy_to_user(buf, &data, 4))
749 static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
750 size_t len, loff_t *pos)
752 struct spu_context *ctx;
755 ctx = file->private_data;
760 if (copy_from_user(&data, buf, 4))
764 ctx->ops->signal1_write(ctx, data);
770 static struct page *spufs_signal1_mmap_nopage(struct vm_area_struct *vma,
771 unsigned long address, int *type)
773 #if PAGE_SIZE == 0x1000
774 return spufs_ps_nopage(vma, address, type, 0x14000, 0x1000);
775 #elif PAGE_SIZE == 0x10000
776 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
777 * signal 1 and 2 area
779 return spufs_ps_nopage(vma, address, type, 0x10000, 0x10000);
781 #error unsupported page size
785 static struct vm_operations_struct spufs_signal1_mmap_vmops = {
786 .nopage = spufs_signal1_mmap_nopage,
789 static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
791 if (!(vma->vm_flags & VM_SHARED))
794 vma->vm_flags |= VM_IO;
795 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
796 | _PAGE_NO_CACHE | _PAGE_GUARDED);
798 vma->vm_ops = &spufs_signal1_mmap_vmops;
802 static struct file_operations spufs_signal1_fops = {
803 .open = spufs_signal1_open,
804 .read = spufs_signal1_read,
805 .write = spufs_signal1_write,
806 .mmap = spufs_signal1_mmap,
809 static int spufs_signal2_open(struct inode *inode, struct file *file)
811 struct spufs_inode_info *i = SPUFS_I(inode);
812 struct spu_context *ctx = i->i_ctx;
813 file->private_data = ctx;
814 file->f_mapping = inode->i_mapping;
815 ctx->signal2 = inode->i_mapping;
816 return nonseekable_open(inode, file);
819 static ssize_t spufs_signal2_read(struct file *file, char __user *buf,
820 size_t len, loff_t *pos)
822 struct spu_context *ctx = file->private_data;
829 spu_acquire_saved(ctx);
830 if (ctx->csa.spu_chnlcnt_RW[4]) {
831 data = ctx->csa.spu_chnldata_RW[4];
839 if (copy_to_user(buf, &data, 4))
846 static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
847 size_t len, loff_t *pos)
849 struct spu_context *ctx;
852 ctx = file->private_data;
857 if (copy_from_user(&data, buf, 4))
861 ctx->ops->signal2_write(ctx, data);
868 static struct page *spufs_signal2_mmap_nopage(struct vm_area_struct *vma,
869 unsigned long address, int *type)
871 #if PAGE_SIZE == 0x1000
872 return spufs_ps_nopage(vma, address, type, 0x1c000, 0x1000);
873 #elif PAGE_SIZE == 0x10000
874 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
875 * signal 1 and 2 area
877 return spufs_ps_nopage(vma, address, type, 0x10000, 0x10000);
879 #error unsupported page size
883 static struct vm_operations_struct spufs_signal2_mmap_vmops = {
884 .nopage = spufs_signal2_mmap_nopage,
887 static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
889 if (!(vma->vm_flags & VM_SHARED))
892 vma->vm_flags |= VM_IO;
893 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
894 | _PAGE_NO_CACHE | _PAGE_GUARDED);
896 vma->vm_ops = &spufs_signal2_mmap_vmops;
899 #else /* SPUFS_MMAP_4K */
900 #define spufs_signal2_mmap NULL
901 #endif /* !SPUFS_MMAP_4K */
903 static struct file_operations spufs_signal2_fops = {
904 .open = spufs_signal2_open,
905 .read = spufs_signal2_read,
906 .write = spufs_signal2_write,
907 .mmap = spufs_signal2_mmap,
910 static void spufs_signal1_type_set(void *data, u64 val)
912 struct spu_context *ctx = data;
915 ctx->ops->signal1_type_set(ctx, val);
919 static u64 spufs_signal1_type_get(void *data)
921 struct spu_context *ctx = data;
925 ret = ctx->ops->signal1_type_get(ctx);
930 DEFINE_SIMPLE_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get,
931 spufs_signal1_type_set, "%llu");
933 static void spufs_signal2_type_set(void *data, u64 val)
935 struct spu_context *ctx = data;
938 ctx->ops->signal2_type_set(ctx, val);
942 static u64 spufs_signal2_type_get(void *data)
944 struct spu_context *ctx = data;
948 ret = ctx->ops->signal2_type_get(ctx);
953 DEFINE_SIMPLE_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
954 spufs_signal2_type_set, "%llu");
957 static struct page *spufs_mss_mmap_nopage(struct vm_area_struct *vma,
958 unsigned long address, int *type)
960 return spufs_ps_nopage(vma, address, type, 0x0000, 0x1000);
963 static struct vm_operations_struct spufs_mss_mmap_vmops = {
964 .nopage = spufs_mss_mmap_nopage,
968 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
970 static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
972 if (!(vma->vm_flags & VM_SHARED))
975 vma->vm_flags |= VM_IO;
976 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
977 | _PAGE_NO_CACHE | _PAGE_GUARDED);
979 vma->vm_ops = &spufs_mss_mmap_vmops;
982 #else /* SPUFS_MMAP_4K */
983 #define spufs_mss_mmap NULL
984 #endif /* !SPUFS_MMAP_4K */
986 static int spufs_mss_open(struct inode *inode, struct file *file)
988 struct spufs_inode_info *i = SPUFS_I(inode);
990 file->private_data = i->i_ctx;
991 return nonseekable_open(inode, file);
994 static struct file_operations spufs_mss_fops = {
995 .open = spufs_mss_open,
996 .mmap = spufs_mss_mmap,
999 static struct page *spufs_psmap_mmap_nopage(struct vm_area_struct *vma,
1000 unsigned long address, int *type)
1002 return spufs_ps_nopage(vma, address, type, 0x0000, 0x20000);
1005 static struct vm_operations_struct spufs_psmap_mmap_vmops = {
1006 .nopage = spufs_psmap_mmap_nopage,
1010 * mmap support for full problem state area [0x00000 - 0x1ffff].
1012 static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma)
1014 if (!(vma->vm_flags & VM_SHARED))
1017 vma->vm_flags |= VM_IO;
1018 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1019 | _PAGE_NO_CACHE | _PAGE_GUARDED);
1021 vma->vm_ops = &spufs_psmap_mmap_vmops;
1025 static int spufs_psmap_open(struct inode *inode, struct file *file)
1027 struct spufs_inode_info *i = SPUFS_I(inode);
1029 file->private_data = i->i_ctx;
1030 return nonseekable_open(inode, file);
1033 static struct file_operations spufs_psmap_fops = {
1034 .open = spufs_psmap_open,
1035 .mmap = spufs_psmap_mmap,
1040 static struct page *spufs_mfc_mmap_nopage(struct vm_area_struct *vma,
1041 unsigned long address, int *type)
1043 return spufs_ps_nopage(vma, address, type, 0x3000, 0x1000);
1046 static struct vm_operations_struct spufs_mfc_mmap_vmops = {
1047 .nopage = spufs_mfc_mmap_nopage,
1051 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1053 static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
1055 if (!(vma->vm_flags & VM_SHARED))
1058 vma->vm_flags |= VM_IO;
1059 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1060 | _PAGE_NO_CACHE | _PAGE_GUARDED);
1062 vma->vm_ops = &spufs_mfc_mmap_vmops;
1065 #else /* SPUFS_MMAP_4K */
1066 #define spufs_mfc_mmap NULL
1067 #endif /* !SPUFS_MMAP_4K */
1069 static int spufs_mfc_open(struct inode *inode, struct file *file)
1071 struct spufs_inode_info *i = SPUFS_I(inode);
1072 struct spu_context *ctx = i->i_ctx;
1074 /* we don't want to deal with DMA into other processes */
1075 if (ctx->owner != current->mm)
1078 if (atomic_read(&inode->i_count) != 1)
1081 file->private_data = ctx;
1082 return nonseekable_open(inode, file);
1085 /* interrupt-level mfc callback function. */
1086 void spufs_mfc_callback(struct spu *spu)
1088 struct spu_context *ctx = spu->ctx;
1090 wake_up_all(&ctx->mfc_wq);
1092 pr_debug("%s %s\n", __FUNCTION__, spu->name);
1093 if (ctx->mfc_fasync) {
1094 u32 free_elements, tagstatus;
1097 /* no need for spu_acquire in interrupt context */
1098 free_elements = ctx->ops->get_mfc_free_elements(ctx);
1099 tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1102 if (free_elements & 0xffff)
1104 if (tagstatus & ctx->tagwait)
1107 kill_fasync(&ctx->mfc_fasync, SIGIO, mask);
1111 static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status)
1113 /* See if there is one tag group is complete */
1114 /* FIXME we need locking around tagwait */
1115 *status = ctx->ops->read_mfc_tagstatus(ctx) & ctx->tagwait;
1116 ctx->tagwait &= ~*status;
1120 /* enable interrupt waiting for any tag group,
1121 may silently fail if interrupts are already enabled */
1122 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1126 static ssize_t spufs_mfc_read(struct file *file, char __user *buffer,
1127 size_t size, loff_t *pos)
1129 struct spu_context *ctx = file->private_data;
1137 if (file->f_flags & O_NONBLOCK) {
1138 status = ctx->ops->read_mfc_tagstatus(ctx);
1139 if (!(status & ctx->tagwait))
1142 ctx->tagwait &= ~status;
1144 ret = spufs_wait(ctx->mfc_wq,
1145 spufs_read_mfc_tagstatus(ctx, &status));
1153 if (copy_to_user(buffer, &status, 4))
1160 static int spufs_check_valid_dma(struct mfc_dma_command *cmd)
1162 pr_debug("queueing DMA %x %lx %x %x %x\n", cmd->lsa,
1163 cmd->ea, cmd->size, cmd->tag, cmd->cmd);
1174 pr_debug("invalid DMA opcode %x\n", cmd->cmd);
1178 if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) {
1179 pr_debug("invalid DMA alignment, ea %lx lsa %x\n",
1184 switch (cmd->size & 0xf) {
1205 pr_debug("invalid DMA alignment %x for size %x\n",
1206 cmd->lsa & 0xf, cmd->size);
1210 if (cmd->size > 16 * 1024) {
1211 pr_debug("invalid DMA size %x\n", cmd->size);
1215 if (cmd->tag & 0xfff0) {
1216 /* we reserve the higher tag numbers for kernel use */
1217 pr_debug("invalid DMA tag\n");
1222 /* not supported in this version */
1223 pr_debug("invalid DMA class\n");
1230 static int spu_send_mfc_command(struct spu_context *ctx,
1231 struct mfc_dma_command cmd,
1234 *error = ctx->ops->send_mfc_command(ctx, &cmd);
1235 if (*error == -EAGAIN) {
1236 /* wait for any tag group to complete
1237 so we have space for the new command */
1238 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1239 /* try again, because the queue might be
1241 *error = ctx->ops->send_mfc_command(ctx, &cmd);
1242 if (*error == -EAGAIN)
1248 static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
1249 size_t size, loff_t *pos)
1251 struct spu_context *ctx = file->private_data;
1252 struct mfc_dma_command cmd;
1255 if (size != sizeof cmd)
1259 if (copy_from_user(&cmd, buffer, sizeof cmd))
1262 ret = spufs_check_valid_dma(&cmd);
1266 spu_acquire_runnable(ctx);
1267 if (file->f_flags & O_NONBLOCK) {
1268 ret = ctx->ops->send_mfc_command(ctx, &cmd);
1271 ret = spufs_wait(ctx->mfc_wq,
1272 spu_send_mfc_command(ctx, cmd, &status));
1281 ctx->tagwait |= 1 << cmd.tag;
1287 static unsigned int spufs_mfc_poll(struct file *file,poll_table *wait)
1289 struct spu_context *ctx = file->private_data;
1290 u32 free_elements, tagstatus;
1294 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2);
1295 free_elements = ctx->ops->get_mfc_free_elements(ctx);
1296 tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1299 poll_wait(file, &ctx->mfc_wq, wait);
1302 if (free_elements & 0xffff)
1303 mask |= POLLOUT | POLLWRNORM;
1304 if (tagstatus & ctx->tagwait)
1305 mask |= POLLIN | POLLRDNORM;
1307 pr_debug("%s: free %d tagstatus %d tagwait %d\n", __FUNCTION__,
1308 free_elements, tagstatus, ctx->tagwait);
1313 static int spufs_mfc_flush(struct file *file, fl_owner_t id)
1315 struct spu_context *ctx = file->private_data;
1320 /* this currently hangs */
1321 ret = spufs_wait(ctx->mfc_wq,
1322 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2));
1325 ret = spufs_wait(ctx->mfc_wq,
1326 ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait);
1336 static int spufs_mfc_fsync(struct file *file, struct dentry *dentry,
1339 return spufs_mfc_flush(file, NULL);
1342 static int spufs_mfc_fasync(int fd, struct file *file, int on)
1344 struct spu_context *ctx = file->private_data;
1346 return fasync_helper(fd, file, on, &ctx->mfc_fasync);
1349 static struct file_operations spufs_mfc_fops = {
1350 .open = spufs_mfc_open,
1351 .read = spufs_mfc_read,
1352 .write = spufs_mfc_write,
1353 .poll = spufs_mfc_poll,
1354 .flush = spufs_mfc_flush,
1355 .fsync = spufs_mfc_fsync,
1356 .fasync = spufs_mfc_fasync,
1357 .mmap = spufs_mfc_mmap,
1361 static int spufs_recycle_open(struct inode *inode, struct file *file)
1363 file->private_data = SPUFS_I(inode)->i_ctx;
1364 return nonseekable_open(inode, file);
1367 static ssize_t spufs_recycle_write(struct file *file,
1368 const char __user *buffer, size_t size, loff_t *pos)
1370 struct spu_context *ctx = file->private_data;
1373 if (!(ctx->flags & SPU_CREATE_ISOLATE))
1379 ret = spu_recycle_isolated(ctx);
1386 static struct file_operations spufs_recycle_fops = {
1387 .open = spufs_recycle_open,
1388 .write = spufs_recycle_write,
1391 static void spufs_npc_set(void *data, u64 val)
1393 struct spu_context *ctx = data;
1395 ctx->ops->npc_write(ctx, val);
1399 static u64 spufs_npc_get(void *data)
1401 struct spu_context *ctx = data;
1404 ret = ctx->ops->npc_read(ctx);
1408 DEFINE_SIMPLE_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set,
1411 static void spufs_decr_set(void *data, u64 val)
1413 struct spu_context *ctx = data;
1414 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1415 spu_acquire_saved(ctx);
1416 lscsa->decr.slot[0] = (u32) val;
1420 static u64 spufs_decr_get(void *data)
1422 struct spu_context *ctx = data;
1423 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1425 spu_acquire_saved(ctx);
1426 ret = lscsa->decr.slot[0];
1430 DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set,
1433 static void spufs_decr_status_set(void *data, u64 val)
1435 struct spu_context *ctx = data;
1436 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1437 spu_acquire_saved(ctx);
1438 lscsa->decr_status.slot[0] = (u32) val;
1442 static u64 spufs_decr_status_get(void *data)
1444 struct spu_context *ctx = data;
1445 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1447 spu_acquire_saved(ctx);
1448 ret = lscsa->decr_status.slot[0];
1452 DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get,
1453 spufs_decr_status_set, "0x%llx\n")
1455 static void spufs_event_mask_set(void *data, u64 val)
1457 struct spu_context *ctx = data;
1458 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1459 spu_acquire_saved(ctx);
1460 lscsa->event_mask.slot[0] = (u32) val;
1464 static u64 spufs_event_mask_get(void *data)
1466 struct spu_context *ctx = data;
1467 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1469 spu_acquire_saved(ctx);
1470 ret = lscsa->event_mask.slot[0];
1474 DEFINE_SIMPLE_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get,
1475 spufs_event_mask_set, "0x%llx\n")
1477 static u64 spufs_event_status_get(void *data)
1479 struct spu_context *ctx = data;
1480 struct spu_state *state = &ctx->csa;
1484 spu_acquire_saved(ctx);
1485 stat = state->spu_chnlcnt_RW[0];
1487 ret = state->spu_chnldata_RW[0];
1491 DEFINE_SIMPLE_ATTRIBUTE(spufs_event_status_ops, spufs_event_status_get,
1494 static void spufs_srr0_set(void *data, u64 val)
1496 struct spu_context *ctx = data;
1497 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1498 spu_acquire_saved(ctx);
1499 lscsa->srr0.slot[0] = (u32) val;
1503 static u64 spufs_srr0_get(void *data)
1505 struct spu_context *ctx = data;
1506 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1508 spu_acquire_saved(ctx);
1509 ret = lscsa->srr0.slot[0];
1513 DEFINE_SIMPLE_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set,
1516 static u64 spufs_id_get(void *data)
1518 struct spu_context *ctx = data;
1522 if (ctx->state == SPU_STATE_RUNNABLE)
1523 num = ctx->spu->number;
1525 num = (unsigned int)-1;
1530 DEFINE_SIMPLE_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n")
1532 static u64 spufs_object_id_get(void *data)
1534 struct spu_context *ctx = data;
1535 return ctx->object_id;
1538 static void spufs_object_id_set(void *data, u64 id)
1540 struct spu_context *ctx = data;
1541 ctx->object_id = id;
1544 DEFINE_SIMPLE_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get,
1545 spufs_object_id_set, "0x%llx\n");
1547 static u64 spufs_lslr_get(void *data)
1549 struct spu_context *ctx = data;
1552 spu_acquire_saved(ctx);
1553 ret = ctx->csa.priv2.spu_lslr_RW;
1558 DEFINE_SIMPLE_ATTRIBUTE(spufs_lslr_ops, spufs_lslr_get, NULL, "0x%llx\n")
1560 static int spufs_info_open(struct inode *inode, struct file *file)
1562 struct spufs_inode_info *i = SPUFS_I(inode);
1563 struct spu_context *ctx = i->i_ctx;
1564 file->private_data = ctx;
1568 static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
1569 size_t len, loff_t *pos)
1571 struct spu_context *ctx = file->private_data;
1575 if (!access_ok(VERIFY_WRITE, buf, len))
1578 spu_acquire_saved(ctx);
1579 spin_lock(&ctx->csa.register_lock);
1580 mbox_stat = ctx->csa.prob.mb_stat_R;
1581 if (mbox_stat & 0x0000ff) {
1582 data = ctx->csa.prob.pu_mb_R;
1584 spin_unlock(&ctx->csa.register_lock);
1587 return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
1590 static struct file_operations spufs_mbox_info_fops = {
1591 .open = spufs_info_open,
1592 .read = spufs_mbox_info_read,
1593 .llseek = generic_file_llseek,
1596 static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
1597 size_t len, loff_t *pos)
1599 struct spu_context *ctx = file->private_data;
1603 if (!access_ok(VERIFY_WRITE, buf, len))
1606 spu_acquire_saved(ctx);
1607 spin_lock(&ctx->csa.register_lock);
1608 ibox_stat = ctx->csa.prob.mb_stat_R;
1609 if (ibox_stat & 0xff0000) {
1610 data = ctx->csa.priv2.puint_mb_R;
1612 spin_unlock(&ctx->csa.register_lock);
1615 return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
1618 static struct file_operations spufs_ibox_info_fops = {
1619 .open = spufs_info_open,
1620 .read = spufs_ibox_info_read,
1621 .llseek = generic_file_llseek,
1624 static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
1625 size_t len, loff_t *pos)
1627 struct spu_context *ctx = file->private_data;
1632 if (!access_ok(VERIFY_WRITE, buf, len))
1635 spu_acquire_saved(ctx);
1636 spin_lock(&ctx->csa.register_lock);
1637 wbox_stat = ctx->csa.prob.mb_stat_R;
1638 cnt = (wbox_stat & 0x00ff00) >> 8;
1639 for (i = 0; i < cnt; i++) {
1640 data[i] = ctx->csa.spu_mailbox_data[i];
1642 spin_unlock(&ctx->csa.register_lock);
1645 return simple_read_from_buffer(buf, len, pos, &data,
1649 static struct file_operations spufs_wbox_info_fops = {
1650 .open = spufs_info_open,
1651 .read = spufs_wbox_info_read,
1652 .llseek = generic_file_llseek,
1655 static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
1656 size_t len, loff_t *pos)
1658 struct spu_context *ctx = file->private_data;
1659 struct spu_dma_info info;
1660 struct mfc_cq_sr *qp, *spuqp;
1663 if (!access_ok(VERIFY_WRITE, buf, len))
1666 spu_acquire_saved(ctx);
1667 spin_lock(&ctx->csa.register_lock);
1668 info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW;
1669 info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0];
1670 info.dma_info_status = ctx->csa.spu_chnldata_RW[24];
1671 info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25];
1672 info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27];
1673 for (i = 0; i < 16; i++) {
1674 qp = &info.dma_info_command_data[i];
1675 spuqp = &ctx->csa.priv2.spuq[i];
1677 qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW;
1678 qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW;
1679 qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW;
1680 qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW;
1682 spin_unlock(&ctx->csa.register_lock);
1685 return simple_read_from_buffer(buf, len, pos, &info,
1689 static struct file_operations spufs_dma_info_fops = {
1690 .open = spufs_info_open,
1691 .read = spufs_dma_info_read,
1694 static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf,
1695 size_t len, loff_t *pos)
1697 struct spu_context *ctx = file->private_data;
1698 struct spu_proxydma_info info;
1699 int ret = sizeof info;
1700 struct mfc_cq_sr *qp, *puqp;
1706 if (!access_ok(VERIFY_WRITE, buf, len))
1709 spu_acquire_saved(ctx);
1710 spin_lock(&ctx->csa.register_lock);
1711 info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW;
1712 info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW;
1713 info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R;
1714 for (i = 0; i < 8; i++) {
1715 qp = &info.proxydma_info_command_data[i];
1716 puqp = &ctx->csa.priv2.puq[i];
1718 qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW;
1719 qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW;
1720 qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW;
1721 qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW;
1723 spin_unlock(&ctx->csa.register_lock);
1726 if (copy_to_user(buf, &info, sizeof info))
1732 static struct file_operations spufs_proxydma_info_fops = {
1733 .open = spufs_info_open,
1734 .read = spufs_proxydma_info_read,
1737 struct tree_descr spufs_dir_contents[] = {
1738 { "mem", &spufs_mem_fops, 0666, },
1739 { "regs", &spufs_regs_fops, 0666, },
1740 { "mbox", &spufs_mbox_fops, 0444, },
1741 { "ibox", &spufs_ibox_fops, 0444, },
1742 { "wbox", &spufs_wbox_fops, 0222, },
1743 { "mbox_stat", &spufs_mbox_stat_fops, 0444, },
1744 { "ibox_stat", &spufs_ibox_stat_fops, 0444, },
1745 { "wbox_stat", &spufs_wbox_stat_fops, 0444, },
1746 { "signal1", &spufs_signal1_fops, 0666, },
1747 { "signal2", &spufs_signal2_fops, 0666, },
1748 { "signal1_type", &spufs_signal1_type, 0666, },
1749 { "signal2_type", &spufs_signal2_type, 0666, },
1750 { "cntl", &spufs_cntl_fops, 0666, },
1751 { "fpcr", &spufs_fpcr_fops, 0666, },
1752 { "lslr", &spufs_lslr_ops, 0444, },
1753 { "mfc", &spufs_mfc_fops, 0666, },
1754 { "mss", &spufs_mss_fops, 0666, },
1755 { "npc", &spufs_npc_ops, 0666, },
1756 { "srr0", &spufs_srr0_ops, 0666, },
1757 { "decr", &spufs_decr_ops, 0666, },
1758 { "decr_status", &spufs_decr_status_ops, 0666, },
1759 { "event_mask", &spufs_event_mask_ops, 0666, },
1760 { "event_status", &spufs_event_status_ops, 0444, },
1761 { "psmap", &spufs_psmap_fops, 0666, },
1762 { "phys-id", &spufs_id_ops, 0666, },
1763 { "object-id", &spufs_object_id_ops, 0666, },
1764 { "mbox_info", &spufs_mbox_info_fops, 0444, },
1765 { "ibox_info", &spufs_ibox_info_fops, 0444, },
1766 { "wbox_info", &spufs_wbox_info_fops, 0444, },
1767 { "dma_info", &spufs_dma_info_fops, 0444, },
1768 { "proxydma_info", &spufs_proxydma_info_fops, 0444, },
1772 struct tree_descr spufs_dir_nosched_contents[] = {
1773 { "mem", &spufs_mem_fops, 0666, },
1774 { "mbox", &spufs_mbox_fops, 0444, },
1775 { "ibox", &spufs_ibox_fops, 0444, },
1776 { "wbox", &spufs_wbox_fops, 0222, },
1777 { "mbox_stat", &spufs_mbox_stat_fops, 0444, },
1778 { "ibox_stat", &spufs_ibox_stat_fops, 0444, },
1779 { "wbox_stat", &spufs_wbox_stat_fops, 0444, },
1780 { "signal1", &spufs_signal1_fops, 0666, },
1781 { "signal2", &spufs_signal2_fops, 0666, },
1782 { "signal1_type", &spufs_signal1_type, 0666, },
1783 { "signal2_type", &spufs_signal2_type, 0666, },
1784 { "mss", &spufs_mss_fops, 0666, },
1785 { "mfc", &spufs_mfc_fops, 0666, },
1786 { "cntl", &spufs_cntl_fops, 0666, },
1787 { "npc", &spufs_npc_ops, 0666, },
1788 { "psmap", &spufs_psmap_fops, 0666, },
1789 { "phys-id", &spufs_id_ops, 0666, },
1790 { "object-id", &spufs_object_id_ops, 0666, },
1791 { "recycle", &spufs_recycle_fops, 0222, },