2 * SPU file system -- file contents
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/ioctl.h>
27 #include <linux/module.h>
28 #include <linux/pagemap.h>
29 #include <linux/poll.h>
30 #include <linux/ptrace.h>
33 #include <asm/semaphore.h>
35 #include <asm/uaccess.h>
39 #define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
43 spufs_mem_open(struct inode *inode, struct file *file)
45 struct spufs_inode_info *i = SPUFS_I(inode);
46 struct spu_context *ctx = i->i_ctx;
47 file->private_data = ctx;
48 file->f_mapping = inode->i_mapping;
49 ctx->local_store = inode->i_mapping;
54 spufs_mem_read(struct file *file, char __user *buffer,
55 size_t size, loff_t *pos)
57 struct spu_context *ctx = file->private_data;
63 local_store = ctx->ops->get_ls(ctx);
64 ret = simple_read_from_buffer(buffer, size, pos, local_store, LS_SIZE);
71 spufs_mem_write(struct file *file, const char __user *buffer,
72 size_t size, loff_t *pos)
74 struct spu_context *ctx = file->private_data;
78 size = min_t(ssize_t, LS_SIZE - *pos, size);
85 local_store = ctx->ops->get_ls(ctx);
86 ret = copy_from_user(local_store + *pos - size,
87 buffer, size) ? -EFAULT : size;
94 spufs_mem_mmap_nopage(struct vm_area_struct *vma,
95 unsigned long address, int *type)
97 struct page *page = NOPAGE_SIGBUS;
99 struct spu_context *ctx = vma->vm_file->private_data;
100 unsigned long offset = address - vma->vm_start;
101 offset += vma->vm_pgoff << PAGE_SHIFT;
105 if (ctx->state == SPU_STATE_SAVED) {
106 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
107 & ~(_PAGE_NO_CACHE | _PAGE_GUARDED));
108 page = vmalloc_to_page(ctx->csa.lscsa->ls + offset);
110 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
111 | _PAGE_NO_CACHE | _PAGE_GUARDED);
112 page = pfn_to_page((ctx->spu->local_store_phys + offset)
118 *type = VM_FAULT_MINOR;
120 page_cache_get(page);
124 static struct vm_operations_struct spufs_mem_mmap_vmops = {
125 .nopage = spufs_mem_mmap_nopage,
129 spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
131 if (!(vma->vm_flags & VM_SHARED))
135 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
138 vma->vm_ops = &spufs_mem_mmap_vmops;
142 static struct file_operations spufs_mem_fops = {
143 .open = spufs_mem_open,
144 .read = spufs_mem_read,
145 .write = spufs_mem_write,
146 .llseek = generic_file_llseek,
147 .mmap = spufs_mem_mmap,
150 static struct page *spufs_ps_nopage(struct vm_area_struct *vma,
151 unsigned long address,
152 int *type, unsigned long ps_offs,
153 unsigned long ps_size)
155 struct page *page = NOPAGE_SIGBUS;
156 int fault_type = VM_FAULT_SIGBUS;
157 struct spu_context *ctx = vma->vm_file->private_data;
158 unsigned long offset = address - vma->vm_start;
162 offset += vma->vm_pgoff << PAGE_SHIFT;
163 if (offset >= ps_size)
166 ret = spu_acquire_runnable(ctx);
170 area = ctx->spu->problem_phys + ps_offs;
171 page = pfn_to_page((area + offset) >> PAGE_SHIFT);
172 fault_type = VM_FAULT_MINOR;
173 page_cache_get(page);
185 static struct page *spufs_cntl_mmap_nopage(struct vm_area_struct *vma,
186 unsigned long address, int *type)
188 return spufs_ps_nopage(vma, address, type, 0x4000, 0x1000);
191 static struct vm_operations_struct spufs_cntl_mmap_vmops = {
192 .nopage = spufs_cntl_mmap_nopage,
196 * mmap support for problem state control area [0x4000 - 0x4fff].
198 static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
200 if (!(vma->vm_flags & VM_SHARED))
203 vma->vm_flags |= VM_RESERVED;
204 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
205 | _PAGE_NO_CACHE | _PAGE_GUARDED);
207 vma->vm_ops = &spufs_cntl_mmap_vmops;
210 #else /* SPUFS_MMAP_4K */
211 #define spufs_cntl_mmap NULL
212 #endif /* !SPUFS_MMAP_4K */
214 static u64 spufs_cntl_get(void *data)
216 struct spu_context *ctx = data;
220 val = ctx->ops->status_read(ctx);
226 static void spufs_cntl_set(void *data, u64 val)
228 struct spu_context *ctx = data;
231 ctx->ops->runcntl_write(ctx, val);
235 static int spufs_cntl_open(struct inode *inode, struct file *file)
237 struct spufs_inode_info *i = SPUFS_I(inode);
238 struct spu_context *ctx = i->i_ctx;
240 file->private_data = ctx;
241 file->f_mapping = inode->i_mapping;
242 ctx->cntl = inode->i_mapping;
243 return simple_attr_open(inode, file, spufs_cntl_get,
244 spufs_cntl_set, "0x%08lx");
247 static struct file_operations spufs_cntl_fops = {
248 .open = spufs_cntl_open,
249 .read = simple_attr_read,
250 .write = simple_attr_write,
251 .mmap = spufs_cntl_mmap,
255 spufs_regs_open(struct inode *inode, struct file *file)
257 struct spufs_inode_info *i = SPUFS_I(inode);
258 file->private_data = i->i_ctx;
263 spufs_regs_read(struct file *file, char __user *buffer,
264 size_t size, loff_t *pos)
266 struct spu_context *ctx = file->private_data;
267 struct spu_lscsa *lscsa = ctx->csa.lscsa;
270 spu_acquire_saved(ctx);
272 ret = simple_read_from_buffer(buffer, size, pos,
273 lscsa->gprs, sizeof lscsa->gprs);
280 spufs_regs_write(struct file *file, const char __user *buffer,
281 size_t size, loff_t *pos)
283 struct spu_context *ctx = file->private_data;
284 struct spu_lscsa *lscsa = ctx->csa.lscsa;
287 size = min_t(ssize_t, sizeof lscsa->gprs - *pos, size);
292 spu_acquire_saved(ctx);
294 ret = copy_from_user(lscsa->gprs + *pos - size,
295 buffer, size) ? -EFAULT : size;
301 static struct file_operations spufs_regs_fops = {
302 .open = spufs_regs_open,
303 .read = spufs_regs_read,
304 .write = spufs_regs_write,
305 .llseek = generic_file_llseek,
309 spufs_fpcr_read(struct file *file, char __user * buffer,
310 size_t size, loff_t * pos)
312 struct spu_context *ctx = file->private_data;
313 struct spu_lscsa *lscsa = ctx->csa.lscsa;
316 spu_acquire_saved(ctx);
318 ret = simple_read_from_buffer(buffer, size, pos,
319 &lscsa->fpcr, sizeof(lscsa->fpcr));
326 spufs_fpcr_write(struct file *file, const char __user * buffer,
327 size_t size, loff_t * pos)
329 struct spu_context *ctx = file->private_data;
330 struct spu_lscsa *lscsa = ctx->csa.lscsa;
333 size = min_t(ssize_t, sizeof(lscsa->fpcr) - *pos, size);
338 spu_acquire_saved(ctx);
340 ret = copy_from_user((char *)&lscsa->fpcr + *pos - size,
341 buffer, size) ? -EFAULT : size;
347 static struct file_operations spufs_fpcr_fops = {
348 .open = spufs_regs_open,
349 .read = spufs_fpcr_read,
350 .write = spufs_fpcr_write,
351 .llseek = generic_file_llseek,
354 /* generic open function for all pipe-like files */
355 static int spufs_pipe_open(struct inode *inode, struct file *file)
357 struct spufs_inode_info *i = SPUFS_I(inode);
358 file->private_data = i->i_ctx;
360 return nonseekable_open(inode, file);
364 * Read as many bytes from the mailbox as possible, until
365 * one of the conditions becomes true:
367 * - no more data available in the mailbox
368 * - end of the user provided buffer
369 * - end of the mapped area
371 static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
372 size_t len, loff_t *pos)
374 struct spu_context *ctx = file->private_data;
375 u32 mbox_data, __user *udata;
381 if (!access_ok(VERIFY_WRITE, buf, len))
384 udata = (void __user *)buf;
387 for (count = 0; count <= len; count += 4, udata++) {
389 ret = ctx->ops->mbox_read(ctx, &mbox_data);
394 * at the end of the mapped area, we can fault
395 * but still need to return the data we have
396 * read successfully so far.
398 ret = __put_user(mbox_data, udata);
413 static struct file_operations spufs_mbox_fops = {
414 .open = spufs_pipe_open,
415 .read = spufs_mbox_read,
418 static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf,
419 size_t len, loff_t *pos)
421 struct spu_context *ctx = file->private_data;
429 mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff;
433 if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat))
439 static struct file_operations spufs_mbox_stat_fops = {
440 .open = spufs_pipe_open,
441 .read = spufs_mbox_stat_read,
444 /* low-level ibox access function */
445 size_t spu_ibox_read(struct spu_context *ctx, u32 *data)
447 return ctx->ops->ibox_read(ctx, data);
450 static int spufs_ibox_fasync(int fd, struct file *file, int on)
452 struct spu_context *ctx = file->private_data;
454 return fasync_helper(fd, file, on, &ctx->ibox_fasync);
457 /* interrupt-level ibox callback function. */
458 void spufs_ibox_callback(struct spu *spu)
460 struct spu_context *ctx = spu->ctx;
462 wake_up_all(&ctx->ibox_wq);
463 kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN);
467 * Read as many bytes from the interrupt mailbox as possible, until
468 * one of the conditions becomes true:
470 * - no more data available in the mailbox
471 * - end of the user provided buffer
472 * - end of the mapped area
474 * If the file is opened without O_NONBLOCK, we wait here until
475 * any data is available, but return when we have been able to
478 static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
479 size_t len, loff_t *pos)
481 struct spu_context *ctx = file->private_data;
482 u32 ibox_data, __user *udata;
488 if (!access_ok(VERIFY_WRITE, buf, len))
491 udata = (void __user *)buf;
495 /* wait only for the first element */
497 if (file->f_flags & O_NONBLOCK) {
498 if (!spu_ibox_read(ctx, &ibox_data))
501 count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data));
506 /* if we can't write at all, return -EFAULT */
507 count = __put_user(ibox_data, udata);
511 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
513 ret = ctx->ops->ibox_read(ctx, &ibox_data);
517 * at the end of the mapped area, we can fault
518 * but still need to return the data we have
519 * read successfully so far.
521 ret = __put_user(ibox_data, udata);
532 static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait)
534 struct spu_context *ctx = file->private_data;
537 poll_wait(file, &ctx->ibox_wq, wait);
540 mask = ctx->ops->mbox_stat_poll(ctx, POLLIN | POLLRDNORM);
546 static struct file_operations spufs_ibox_fops = {
547 .open = spufs_pipe_open,
548 .read = spufs_ibox_read,
549 .poll = spufs_ibox_poll,
550 .fasync = spufs_ibox_fasync,
553 static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf,
554 size_t len, loff_t *pos)
556 struct spu_context *ctx = file->private_data;
563 ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff;
566 if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat))
572 static struct file_operations spufs_ibox_stat_fops = {
573 .open = spufs_pipe_open,
574 .read = spufs_ibox_stat_read,
577 /* low-level mailbox write */
578 size_t spu_wbox_write(struct spu_context *ctx, u32 data)
580 return ctx->ops->wbox_write(ctx, data);
583 static int spufs_wbox_fasync(int fd, struct file *file, int on)
585 struct spu_context *ctx = file->private_data;
588 ret = fasync_helper(fd, file, on, &ctx->wbox_fasync);
593 /* interrupt-level wbox callback function. */
594 void spufs_wbox_callback(struct spu *spu)
596 struct spu_context *ctx = spu->ctx;
598 wake_up_all(&ctx->wbox_wq);
599 kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT);
603 * Write as many bytes to the interrupt mailbox as possible, until
604 * one of the conditions becomes true:
606 * - the mailbox is full
607 * - end of the user provided buffer
608 * - end of the mapped area
610 * If the file is opened without O_NONBLOCK, we wait here until
611 * space is availabyl, but return when we have been able to
614 static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
615 size_t len, loff_t *pos)
617 struct spu_context *ctx = file->private_data;
618 u32 wbox_data, __user *udata;
624 udata = (void __user *)buf;
625 if (!access_ok(VERIFY_READ, buf, len))
628 if (__get_user(wbox_data, udata))
634 * make sure we can at least write one element, by waiting
635 * in case of !O_NONBLOCK
638 if (file->f_flags & O_NONBLOCK) {
639 if (!spu_wbox_write(ctx, wbox_data))
642 count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data));
648 /* write aѕ much as possible */
649 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
651 ret = __get_user(wbox_data, udata);
655 ret = spu_wbox_write(ctx, wbox_data);
665 static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait)
667 struct spu_context *ctx = file->private_data;
670 poll_wait(file, &ctx->wbox_wq, wait);
673 mask = ctx->ops->mbox_stat_poll(ctx, POLLOUT | POLLWRNORM);
679 static struct file_operations spufs_wbox_fops = {
680 .open = spufs_pipe_open,
681 .write = spufs_wbox_write,
682 .poll = spufs_wbox_poll,
683 .fasync = spufs_wbox_fasync,
686 static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf,
687 size_t len, loff_t *pos)
689 struct spu_context *ctx = file->private_data;
696 wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff;
699 if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat))
705 static struct file_operations spufs_wbox_stat_fops = {
706 .open = spufs_pipe_open,
707 .read = spufs_wbox_stat_read,
710 static int spufs_signal1_open(struct inode *inode, struct file *file)
712 struct spufs_inode_info *i = SPUFS_I(inode);
713 struct spu_context *ctx = i->i_ctx;
714 file->private_data = ctx;
715 file->f_mapping = inode->i_mapping;
716 ctx->signal1 = inode->i_mapping;
717 return nonseekable_open(inode, file);
720 static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
721 size_t len, loff_t *pos)
723 struct spu_context *ctx = file->private_data;
730 data = ctx->ops->signal1_read(ctx);
733 if (copy_to_user(buf, &data, 4))
739 static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
740 size_t len, loff_t *pos)
742 struct spu_context *ctx;
745 ctx = file->private_data;
750 if (copy_from_user(&data, buf, 4))
754 ctx->ops->signal1_write(ctx, data);
760 static struct page *spufs_signal1_mmap_nopage(struct vm_area_struct *vma,
761 unsigned long address, int *type)
763 #if PAGE_SIZE == 0x1000
764 return spufs_ps_nopage(vma, address, type, 0x14000, 0x1000);
765 #elif PAGE_SIZE == 0x10000
766 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
767 * signal 1 and 2 area
769 return spufs_ps_nopage(vma, address, type, 0x10000, 0x10000);
771 #error unsupported page size
775 static struct vm_operations_struct spufs_signal1_mmap_vmops = {
776 .nopage = spufs_signal1_mmap_nopage,
779 static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
781 if (!(vma->vm_flags & VM_SHARED))
784 vma->vm_flags |= VM_RESERVED;
785 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
786 | _PAGE_NO_CACHE | _PAGE_GUARDED);
788 vma->vm_ops = &spufs_signal1_mmap_vmops;
792 static struct file_operations spufs_signal1_fops = {
793 .open = spufs_signal1_open,
794 .read = spufs_signal1_read,
795 .write = spufs_signal1_write,
796 .mmap = spufs_signal1_mmap,
799 static int spufs_signal2_open(struct inode *inode, struct file *file)
801 struct spufs_inode_info *i = SPUFS_I(inode);
802 struct spu_context *ctx = i->i_ctx;
803 file->private_data = ctx;
804 file->f_mapping = inode->i_mapping;
805 ctx->signal2 = inode->i_mapping;
806 return nonseekable_open(inode, file);
809 static ssize_t spufs_signal2_read(struct file *file, char __user *buf,
810 size_t len, loff_t *pos)
812 struct spu_context *ctx;
815 ctx = file->private_data;
821 data = ctx->ops->signal2_read(ctx);
824 if (copy_to_user(buf, &data, 4))
830 static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
831 size_t len, loff_t *pos)
833 struct spu_context *ctx;
836 ctx = file->private_data;
841 if (copy_from_user(&data, buf, 4))
845 ctx->ops->signal2_write(ctx, data);
852 static struct page *spufs_signal2_mmap_nopage(struct vm_area_struct *vma,
853 unsigned long address, int *type)
855 #if PAGE_SIZE == 0x1000
856 return spufs_ps_nopage(vma, address, type, 0x1c000, 0x1000);
857 #elif PAGE_SIZE == 0x10000
858 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
859 * signal 1 and 2 area
861 return spufs_ps_nopage(vma, address, type, 0x10000, 0x10000);
863 #error unsupported page size
867 static struct vm_operations_struct spufs_signal2_mmap_vmops = {
868 .nopage = spufs_signal2_mmap_nopage,
871 static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
873 if (!(vma->vm_flags & VM_SHARED))
877 vma->vm_flags |= VM_RESERVED;
878 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
879 | _PAGE_NO_CACHE | _PAGE_GUARDED);
881 vma->vm_ops = &spufs_signal2_mmap_vmops;
884 #else /* SPUFS_MMAP_4K */
885 #define spufs_signal2_mmap NULL
886 #endif /* !SPUFS_MMAP_4K */
888 static struct file_operations spufs_signal2_fops = {
889 .open = spufs_signal2_open,
890 .read = spufs_signal2_read,
891 .write = spufs_signal2_write,
892 .mmap = spufs_signal2_mmap,
895 static void spufs_signal1_type_set(void *data, u64 val)
897 struct spu_context *ctx = data;
900 ctx->ops->signal1_type_set(ctx, val);
904 static u64 spufs_signal1_type_get(void *data)
906 struct spu_context *ctx = data;
910 ret = ctx->ops->signal1_type_get(ctx);
915 DEFINE_SIMPLE_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get,
916 spufs_signal1_type_set, "%llu");
918 static void spufs_signal2_type_set(void *data, u64 val)
920 struct spu_context *ctx = data;
923 ctx->ops->signal2_type_set(ctx, val);
927 static u64 spufs_signal2_type_get(void *data)
929 struct spu_context *ctx = data;
933 ret = ctx->ops->signal2_type_get(ctx);
938 DEFINE_SIMPLE_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
939 spufs_signal2_type_set, "%llu");
942 static struct page *spufs_mss_mmap_nopage(struct vm_area_struct *vma,
943 unsigned long address, int *type)
945 return spufs_ps_nopage(vma, address, type, 0x0000, 0x1000);
948 static struct vm_operations_struct spufs_mss_mmap_vmops = {
949 .nopage = spufs_mss_mmap_nopage,
953 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
955 static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
957 if (!(vma->vm_flags & VM_SHARED))
960 vma->vm_flags |= VM_RESERVED;
961 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
962 | _PAGE_NO_CACHE | _PAGE_GUARDED);
964 vma->vm_ops = &spufs_mss_mmap_vmops;
967 #else /* SPUFS_MMAP_4K */
968 #define spufs_mss_mmap NULL
969 #endif /* !SPUFS_MMAP_4K */
971 static int spufs_mss_open(struct inode *inode, struct file *file)
973 struct spufs_inode_info *i = SPUFS_I(inode);
975 file->private_data = i->i_ctx;
976 return nonseekable_open(inode, file);
979 static struct file_operations spufs_mss_fops = {
980 .open = spufs_mss_open,
981 .mmap = spufs_mss_mmap,
984 static struct page *spufs_psmap_mmap_nopage(struct vm_area_struct *vma,
985 unsigned long address, int *type)
987 return spufs_ps_nopage(vma, address, type, 0x0000, 0x20000);
990 static struct vm_operations_struct spufs_psmap_mmap_vmops = {
991 .nopage = spufs_psmap_mmap_nopage,
995 * mmap support for full problem state area [0x00000 - 0x1ffff].
997 static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma)
999 if (!(vma->vm_flags & VM_SHARED))
1002 vma->vm_flags |= VM_RESERVED;
1003 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1004 | _PAGE_NO_CACHE | _PAGE_GUARDED);
1006 vma->vm_ops = &spufs_psmap_mmap_vmops;
1010 static int spufs_psmap_open(struct inode *inode, struct file *file)
1012 struct spufs_inode_info *i = SPUFS_I(inode);
1014 file->private_data = i->i_ctx;
1015 return nonseekable_open(inode, file);
1018 static struct file_operations spufs_psmap_fops = {
1019 .open = spufs_psmap_open,
1020 .mmap = spufs_psmap_mmap,
1025 static struct page *spufs_mfc_mmap_nopage(struct vm_area_struct *vma,
1026 unsigned long address, int *type)
1028 return spufs_ps_nopage(vma, address, type, 0x3000, 0x1000);
1031 static struct vm_operations_struct spufs_mfc_mmap_vmops = {
1032 .nopage = spufs_mfc_mmap_nopage,
1036 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1038 static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
1040 if (!(vma->vm_flags & VM_SHARED))
1043 vma->vm_flags |= VM_RESERVED;
1044 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1045 | _PAGE_NO_CACHE | _PAGE_GUARDED);
1047 vma->vm_ops = &spufs_mfc_mmap_vmops;
1050 #else /* SPUFS_MMAP_4K */
1051 #define spufs_mfc_mmap NULL
1052 #endif /* !SPUFS_MMAP_4K */
1054 static int spufs_mfc_open(struct inode *inode, struct file *file)
1056 struct spufs_inode_info *i = SPUFS_I(inode);
1057 struct spu_context *ctx = i->i_ctx;
1059 /* we don't want to deal with DMA into other processes */
1060 if (ctx->owner != current->mm)
1063 if (atomic_read(&inode->i_count) != 1)
1066 file->private_data = ctx;
1067 return nonseekable_open(inode, file);
1070 /* interrupt-level mfc callback function. */
1071 void spufs_mfc_callback(struct spu *spu)
1073 struct spu_context *ctx = spu->ctx;
1075 wake_up_all(&ctx->mfc_wq);
1077 pr_debug("%s %s\n", __FUNCTION__, spu->name);
1078 if (ctx->mfc_fasync) {
1079 u32 free_elements, tagstatus;
1082 /* no need for spu_acquire in interrupt context */
1083 free_elements = ctx->ops->get_mfc_free_elements(ctx);
1084 tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1087 if (free_elements & 0xffff)
1089 if (tagstatus & ctx->tagwait)
1092 kill_fasync(&ctx->mfc_fasync, SIGIO, mask);
1096 static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status)
1098 /* See if there is one tag group is complete */
1099 /* FIXME we need locking around tagwait */
1100 *status = ctx->ops->read_mfc_tagstatus(ctx) & ctx->tagwait;
1101 ctx->tagwait &= ~*status;
1105 /* enable interrupt waiting for any tag group,
1106 may silently fail if interrupts are already enabled */
1107 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1111 static ssize_t spufs_mfc_read(struct file *file, char __user *buffer,
1112 size_t size, loff_t *pos)
1114 struct spu_context *ctx = file->private_data;
1122 if (file->f_flags & O_NONBLOCK) {
1123 status = ctx->ops->read_mfc_tagstatus(ctx);
1124 if (!(status & ctx->tagwait))
1127 ctx->tagwait &= ~status;
1129 ret = spufs_wait(ctx->mfc_wq,
1130 spufs_read_mfc_tagstatus(ctx, &status));
1138 if (copy_to_user(buffer, &status, 4))
1145 static int spufs_check_valid_dma(struct mfc_dma_command *cmd)
1147 pr_debug("queueing DMA %x %lx %x %x %x\n", cmd->lsa,
1148 cmd->ea, cmd->size, cmd->tag, cmd->cmd);
1159 pr_debug("invalid DMA opcode %x\n", cmd->cmd);
1163 if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) {
1164 pr_debug("invalid DMA alignment, ea %lx lsa %x\n",
1169 switch (cmd->size & 0xf) {
1190 pr_debug("invalid DMA alignment %x for size %x\n",
1191 cmd->lsa & 0xf, cmd->size);
1195 if (cmd->size > 16 * 1024) {
1196 pr_debug("invalid DMA size %x\n", cmd->size);
1200 if (cmd->tag & 0xfff0) {
1201 /* we reserve the higher tag numbers for kernel use */
1202 pr_debug("invalid DMA tag\n");
1207 /* not supported in this version */
1208 pr_debug("invalid DMA class\n");
1215 static int spu_send_mfc_command(struct spu_context *ctx,
1216 struct mfc_dma_command cmd,
1219 *error = ctx->ops->send_mfc_command(ctx, &cmd);
1220 if (*error == -EAGAIN) {
1221 /* wait for any tag group to complete
1222 so we have space for the new command */
1223 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1224 /* try again, because the queue might be
1226 *error = ctx->ops->send_mfc_command(ctx, &cmd);
1227 if (*error == -EAGAIN)
1233 static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
1234 size_t size, loff_t *pos)
1236 struct spu_context *ctx = file->private_data;
1237 struct mfc_dma_command cmd;
1240 if (size != sizeof cmd)
1244 if (copy_from_user(&cmd, buffer, sizeof cmd))
1247 ret = spufs_check_valid_dma(&cmd);
1251 spu_acquire_runnable(ctx);
1252 if (file->f_flags & O_NONBLOCK) {
1253 ret = ctx->ops->send_mfc_command(ctx, &cmd);
1256 ret = spufs_wait(ctx->mfc_wq,
1257 spu_send_mfc_command(ctx, cmd, &status));
1266 ctx->tagwait |= 1 << cmd.tag;
1272 static unsigned int spufs_mfc_poll(struct file *file,poll_table *wait)
1274 struct spu_context *ctx = file->private_data;
1275 u32 free_elements, tagstatus;
1279 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2);
1280 free_elements = ctx->ops->get_mfc_free_elements(ctx);
1281 tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1284 poll_wait(file, &ctx->mfc_wq, wait);
1287 if (free_elements & 0xffff)
1288 mask |= POLLOUT | POLLWRNORM;
1289 if (tagstatus & ctx->tagwait)
1290 mask |= POLLIN | POLLRDNORM;
1292 pr_debug("%s: free %d tagstatus %d tagwait %d\n", __FUNCTION__,
1293 free_elements, tagstatus, ctx->tagwait);
1298 static int spufs_mfc_flush(struct file *file, fl_owner_t id)
1300 struct spu_context *ctx = file->private_data;
1305 /* this currently hangs */
1306 ret = spufs_wait(ctx->mfc_wq,
1307 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2));
1310 ret = spufs_wait(ctx->mfc_wq,
1311 ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait);
1321 static int spufs_mfc_fsync(struct file *file, struct dentry *dentry,
1324 return spufs_mfc_flush(file, NULL);
1327 static int spufs_mfc_fasync(int fd, struct file *file, int on)
1329 struct spu_context *ctx = file->private_data;
1331 return fasync_helper(fd, file, on, &ctx->mfc_fasync);
1334 static struct file_operations spufs_mfc_fops = {
1335 .open = spufs_mfc_open,
1336 .read = spufs_mfc_read,
1337 .write = spufs_mfc_write,
1338 .poll = spufs_mfc_poll,
1339 .flush = spufs_mfc_flush,
1340 .fsync = spufs_mfc_fsync,
1341 .fasync = spufs_mfc_fasync,
1342 .mmap = spufs_mfc_mmap,
1345 static void spufs_npc_set(void *data, u64 val)
1347 struct spu_context *ctx = data;
1349 ctx->ops->npc_write(ctx, val);
1353 static u64 spufs_npc_get(void *data)
1355 struct spu_context *ctx = data;
1358 ret = ctx->ops->npc_read(ctx);
1362 DEFINE_SIMPLE_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set, "%llx\n")
1364 static void spufs_decr_set(void *data, u64 val)
1366 struct spu_context *ctx = data;
1367 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1368 spu_acquire_saved(ctx);
1369 lscsa->decr.slot[0] = (u32) val;
1373 static u64 spufs_decr_get(void *data)
1375 struct spu_context *ctx = data;
1376 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1378 spu_acquire_saved(ctx);
1379 ret = lscsa->decr.slot[0];
1383 DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set,
1386 static void spufs_decr_status_set(void *data, u64 val)
1388 struct spu_context *ctx = data;
1389 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1390 spu_acquire_saved(ctx);
1391 lscsa->decr_status.slot[0] = (u32) val;
1395 static u64 spufs_decr_status_get(void *data)
1397 struct spu_context *ctx = data;
1398 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1400 spu_acquire_saved(ctx);
1401 ret = lscsa->decr_status.slot[0];
1405 DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get,
1406 spufs_decr_status_set, "%llx\n")
1408 static void spufs_spu_tag_mask_set(void *data, u64 val)
1410 struct spu_context *ctx = data;
1411 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1412 spu_acquire_saved(ctx);
1413 lscsa->tag_mask.slot[0] = (u32) val;
1417 static u64 spufs_spu_tag_mask_get(void *data)
1419 struct spu_context *ctx = data;
1420 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1422 spu_acquire_saved(ctx);
1423 ret = lscsa->tag_mask.slot[0];
1427 DEFINE_SIMPLE_ATTRIBUTE(spufs_spu_tag_mask_ops, spufs_spu_tag_mask_get,
1428 spufs_spu_tag_mask_set, "%llx\n")
1430 static void spufs_event_mask_set(void *data, u64 val)
1432 struct spu_context *ctx = data;
1433 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1434 spu_acquire_saved(ctx);
1435 lscsa->event_mask.slot[0] = (u32) val;
1439 static u64 spufs_event_mask_get(void *data)
1441 struct spu_context *ctx = data;
1442 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1444 spu_acquire_saved(ctx);
1445 ret = lscsa->event_mask.slot[0];
1449 DEFINE_SIMPLE_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get,
1450 spufs_event_mask_set, "%llx\n")
1452 static void spufs_srr0_set(void *data, u64 val)
1454 struct spu_context *ctx = data;
1455 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1456 spu_acquire_saved(ctx);
1457 lscsa->srr0.slot[0] = (u32) val;
1461 static u64 spufs_srr0_get(void *data)
1463 struct spu_context *ctx = data;
1464 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1466 spu_acquire_saved(ctx);
1467 ret = lscsa->srr0.slot[0];
1471 DEFINE_SIMPLE_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set,
1474 static u64 spufs_id_get(void *data)
1476 struct spu_context *ctx = data;
1480 if (ctx->state == SPU_STATE_RUNNABLE)
1481 num = ctx->spu->number;
1483 num = (unsigned int)-1;
1488 DEFINE_SIMPLE_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n")
1490 static u64 spufs_object_id_get(void *data)
1492 struct spu_context *ctx = data;
1493 return ctx->object_id;
1496 static void spufs_object_id_set(void *data, u64 id)
1498 struct spu_context *ctx = data;
1499 ctx->object_id = id;
1502 DEFINE_SIMPLE_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get,
1503 spufs_object_id_set, "0x%llx\n");
1505 struct tree_descr spufs_dir_contents[] = {
1506 { "mem", &spufs_mem_fops, 0666, },
1507 { "regs", &spufs_regs_fops, 0666, },
1508 { "mbox", &spufs_mbox_fops, 0444, },
1509 { "ibox", &spufs_ibox_fops, 0444, },
1510 { "wbox", &spufs_wbox_fops, 0222, },
1511 { "mbox_stat", &spufs_mbox_stat_fops, 0444, },
1512 { "ibox_stat", &spufs_ibox_stat_fops, 0444, },
1513 { "wbox_stat", &spufs_wbox_stat_fops, 0444, },
1514 { "signal1", &spufs_signal1_fops, 0666, },
1515 { "signal2", &spufs_signal2_fops, 0666, },
1516 { "signal1_type", &spufs_signal1_type, 0666, },
1517 { "signal2_type", &spufs_signal2_type, 0666, },
1518 { "mss", &spufs_mss_fops, 0666, },
1519 { "mfc", &spufs_mfc_fops, 0666, },
1520 { "cntl", &spufs_cntl_fops, 0666, },
1521 { "npc", &spufs_npc_ops, 0666, },
1522 { "fpcr", &spufs_fpcr_fops, 0666, },
1523 { "decr", &spufs_decr_ops, 0666, },
1524 { "decr_status", &spufs_decr_status_ops, 0666, },
1525 { "spu_tag_mask", &spufs_spu_tag_mask_ops, 0666, },
1526 { "event_mask", &spufs_event_mask_ops, 0666, },
1527 { "srr0", &spufs_srr0_ops, 0666, },
1528 { "psmap", &spufs_psmap_fops, 0666, },
1529 { "phys-id", &spufs_id_ops, 0666, },
1530 { "object-id", &spufs_object_id_ops, 0666, },