Merge /pub/scm/linux/kernel/git/torvalds/linux-2.6
[linux-2.6] / arch / powerpc / platforms / cell / spufs / file.c
1 /*
2  * SPU file system -- file contents
3  *
4  * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5  *
6  * Author: Arnd Bergmann <arndb@de.ibm.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2, or (at your option)
11  * any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  */
22
23 #undef DEBUG
24
25 #include <linux/fs.h>
26 #include <linux/ioctl.h>
27 #include <linux/module.h>
28 #include <linux/pagemap.h>
29 #include <linux/poll.h>
30 #include <linux/ptrace.h>
31
32 #include <asm/io.h>
33 #include <asm/semaphore.h>
34 #include <asm/spu.h>
35 #include <asm/spu_info.h>
36 #include <asm/uaccess.h>
37
38 #include "spufs.h"
39
40 #define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
41
42 static int
43 spufs_mem_open(struct inode *inode, struct file *file)
44 {
45         struct spufs_inode_info *i = SPUFS_I(inode);
46         struct spu_context *ctx = i->i_ctx;
47
48         spin_lock(&ctx->mapping_lock);
49         file->private_data = ctx;
50         if (!i->i_openers++)
51                 ctx->local_store = inode->i_mapping;
52         spin_unlock(&ctx->mapping_lock);
53         return 0;
54 }
55
56 static int
57 spufs_mem_release(struct inode *inode, struct file *file)
58 {
59         struct spufs_inode_info *i = SPUFS_I(inode);
60         struct spu_context *ctx = i->i_ctx;
61
62         spin_lock(&ctx->mapping_lock);
63         if (!--i->i_openers)
64                 ctx->local_store = NULL;
65         spin_unlock(&ctx->mapping_lock);
66         return 0;
67 }
68
69 static ssize_t
70 __spufs_mem_read(struct spu_context *ctx, char __user *buffer,
71                         size_t size, loff_t *pos)
72 {
73         char *local_store = ctx->ops->get_ls(ctx);
74         return simple_read_from_buffer(buffer, size, pos, local_store,
75                                         LS_SIZE);
76 }
77
78 static ssize_t
79 spufs_mem_read(struct file *file, char __user *buffer,
80                                 size_t size, loff_t *pos)
81 {
82         struct spu_context *ctx = file->private_data;
83         ssize_t ret;
84
85         spu_acquire(ctx);
86         ret = __spufs_mem_read(ctx, buffer, size, pos);
87         spu_release(ctx);
88         return ret;
89 }
90
91 static ssize_t
92 spufs_mem_write(struct file *file, const char __user *buffer,
93                                         size_t size, loff_t *ppos)
94 {
95         struct spu_context *ctx = file->private_data;
96         char *local_store;
97         loff_t pos = *ppos;
98         int ret;
99
100         if (pos < 0)
101                 return -EINVAL;
102         if (pos > LS_SIZE)
103                 return -EFBIG;
104         if (size > LS_SIZE - pos)
105                 size = LS_SIZE - pos;
106
107         spu_acquire(ctx);
108         local_store = ctx->ops->get_ls(ctx);
109         ret = copy_from_user(local_store + pos, buffer, size);
110         spu_release(ctx);
111
112         if (ret)
113                 return -EFAULT;
114         *ppos = pos + size;
115         return size;
116 }
117
118 static unsigned long spufs_mem_mmap_nopfn(struct vm_area_struct *vma,
119                                           unsigned long address)
120 {
121         struct spu_context *ctx = vma->vm_file->private_data;
122         unsigned long pfn, offset, addr0 = address;
123 #ifdef CONFIG_SPU_FS_64K_LS
124         struct spu_state *csa = &ctx->csa;
125         int psize;
126
127         /* Check what page size we are using */
128         psize = get_slice_psize(vma->vm_mm, address);
129
130         /* Some sanity checking */
131         BUG_ON(csa->use_big_pages != (psize == MMU_PAGE_64K));
132
133         /* Wow, 64K, cool, we need to align the address though */
134         if (csa->use_big_pages) {
135                 BUG_ON(vma->vm_start & 0xffff);
136                 address &= ~0xfffful;
137         }
138 #endif /* CONFIG_SPU_FS_64K_LS */
139
140         offset = (address - vma->vm_start) + (vma->vm_pgoff << PAGE_SHIFT);
141         if (offset >= LS_SIZE)
142                 return NOPFN_SIGBUS;
143
144         pr_debug("spufs_mem_mmap_nopfn address=0x%lx -> 0x%lx, offset=0x%lx\n",
145                  addr0, address, offset);
146
147         spu_acquire(ctx);
148
149         if (ctx->state == SPU_STATE_SAVED) {
150                 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
151                                                         & ~_PAGE_NO_CACHE);
152                 pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset);
153         } else {
154                 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
155                                              | _PAGE_NO_CACHE);
156                 pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT;
157         }
158         vm_insert_pfn(vma, address, pfn);
159
160         spu_release(ctx);
161
162         return NOPFN_REFAULT;
163 }
164
165
166 static struct vm_operations_struct spufs_mem_mmap_vmops = {
167         .nopfn = spufs_mem_mmap_nopfn,
168 };
169
170 static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
171 {
172 #ifdef CONFIG_SPU_FS_64K_LS
173         struct spu_context      *ctx = file->private_data;
174         struct spu_state        *csa = &ctx->csa;
175
176         /* Sanity check VMA alignment */
177         if (csa->use_big_pages) {
178                 pr_debug("spufs_mem_mmap 64K, start=0x%lx, end=0x%lx,"
179                          " pgoff=0x%lx\n", vma->vm_start, vma->vm_end,
180                          vma->vm_pgoff);
181                 if (vma->vm_start & 0xffff)
182                         return -EINVAL;
183                 if (vma->vm_pgoff & 0xf)
184                         return -EINVAL;
185         }
186 #endif /* CONFIG_SPU_FS_64K_LS */
187
188         if (!(vma->vm_flags & VM_SHARED))
189                 return -EINVAL;
190
191         vma->vm_flags |= VM_IO | VM_PFNMAP;
192         vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
193                                      | _PAGE_NO_CACHE);
194
195         vma->vm_ops = &spufs_mem_mmap_vmops;
196         return 0;
197 }
198
199 #ifdef CONFIG_SPU_FS_64K_LS
200 unsigned long spufs_get_unmapped_area(struct file *file, unsigned long addr,
201                                       unsigned long len, unsigned long pgoff,
202                                       unsigned long flags)
203 {
204         struct spu_context      *ctx = file->private_data;
205         struct spu_state        *csa = &ctx->csa;
206
207         /* If not using big pages, fallback to normal MM g_u_a */
208         if (!csa->use_big_pages)
209                 return current->mm->get_unmapped_area(file, addr, len,
210                                                       pgoff, flags);
211
212         /* Else, try to obtain a 64K pages slice */
213         return slice_get_unmapped_area(addr, len, flags,
214                                        MMU_PAGE_64K, 1, 0);
215 }
216 #endif /* CONFIG_SPU_FS_64K_LS */
217
218 static const struct file_operations spufs_mem_fops = {
219         .open                   = spufs_mem_open,
220         .read                   = spufs_mem_read,
221         .write                  = spufs_mem_write,
222         .llseek                 = generic_file_llseek,
223         .mmap                   = spufs_mem_mmap,
224 #ifdef CONFIG_SPU_FS_64K_LS
225         .get_unmapped_area      = spufs_get_unmapped_area,
226 #endif
227 };
228
229 static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
230                                     unsigned long address,
231                                     unsigned long ps_offs,
232                                     unsigned long ps_size)
233 {
234         struct spu_context *ctx = vma->vm_file->private_data;
235         unsigned long area, offset = address - vma->vm_start;
236         int ret;
237
238         offset += vma->vm_pgoff << PAGE_SHIFT;
239         if (offset >= ps_size)
240                 return NOPFN_SIGBUS;
241
242         /* error here usually means a signal.. we might want to test
243          * the error code more precisely though
244          */
245         ret = spu_acquire_runnable(ctx, 0);
246         if (ret)
247                 return NOPFN_REFAULT;
248
249         area = ctx->spu->problem_phys + ps_offs;
250         vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT);
251         spu_release(ctx);
252
253         return NOPFN_REFAULT;
254 }
255
256 #if SPUFS_MMAP_4K
257 static unsigned long spufs_cntl_mmap_nopfn(struct vm_area_struct *vma,
258                                            unsigned long address)
259 {
260         return spufs_ps_nopfn(vma, address, 0x4000, 0x1000);
261 }
262
263 static struct vm_operations_struct spufs_cntl_mmap_vmops = {
264         .nopfn = spufs_cntl_mmap_nopfn,
265 };
266
267 /*
268  * mmap support for problem state control area [0x4000 - 0x4fff].
269  */
270 static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
271 {
272         if (!(vma->vm_flags & VM_SHARED))
273                 return -EINVAL;
274
275         vma->vm_flags |= VM_IO | VM_PFNMAP;
276         vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
277                                      | _PAGE_NO_CACHE | _PAGE_GUARDED);
278
279         vma->vm_ops = &spufs_cntl_mmap_vmops;
280         return 0;
281 }
282 #else /* SPUFS_MMAP_4K */
283 #define spufs_cntl_mmap NULL
284 #endif /* !SPUFS_MMAP_4K */
285
286 static u64 spufs_cntl_get(void *data)
287 {
288         struct spu_context *ctx = data;
289         u64 val;
290
291         spu_acquire(ctx);
292         val = ctx->ops->status_read(ctx);
293         spu_release(ctx);
294
295         return val;
296 }
297
298 static void spufs_cntl_set(void *data, u64 val)
299 {
300         struct spu_context *ctx = data;
301
302         spu_acquire(ctx);
303         ctx->ops->runcntl_write(ctx, val);
304         spu_release(ctx);
305 }
306
307 static int spufs_cntl_open(struct inode *inode, struct file *file)
308 {
309         struct spufs_inode_info *i = SPUFS_I(inode);
310         struct spu_context *ctx = i->i_ctx;
311
312         spin_lock(&ctx->mapping_lock);
313         file->private_data = ctx;
314         if (!i->i_openers++)
315                 ctx->cntl = inode->i_mapping;
316         spin_unlock(&ctx->mapping_lock);
317         return simple_attr_open(inode, file, spufs_cntl_get,
318                                         spufs_cntl_set, "0x%08lx");
319 }
320
321 static int
322 spufs_cntl_release(struct inode *inode, struct file *file)
323 {
324         struct spufs_inode_info *i = SPUFS_I(inode);
325         struct spu_context *ctx = i->i_ctx;
326
327         simple_attr_close(inode, file);
328
329         spin_lock(&ctx->mapping_lock);
330         if (!--i->i_openers)
331                 ctx->cntl = NULL;
332         spin_unlock(&ctx->mapping_lock);
333         return 0;
334 }
335
336 static const struct file_operations spufs_cntl_fops = {
337         .open = spufs_cntl_open,
338         .release = spufs_cntl_release,
339         .read = simple_attr_read,
340         .write = simple_attr_write,
341         .mmap = spufs_cntl_mmap,
342 };
343
344 static int
345 spufs_regs_open(struct inode *inode, struct file *file)
346 {
347         struct spufs_inode_info *i = SPUFS_I(inode);
348         file->private_data = i->i_ctx;
349         return 0;
350 }
351
352 static ssize_t
353 __spufs_regs_read(struct spu_context *ctx, char __user *buffer,
354                         size_t size, loff_t *pos)
355 {
356         struct spu_lscsa *lscsa = ctx->csa.lscsa;
357         return simple_read_from_buffer(buffer, size, pos,
358                                       lscsa->gprs, sizeof lscsa->gprs);
359 }
360
361 static ssize_t
362 spufs_regs_read(struct file *file, char __user *buffer,
363                 size_t size, loff_t *pos)
364 {
365         int ret;
366         struct spu_context *ctx = file->private_data;
367
368         spu_acquire_saved(ctx);
369         ret = __spufs_regs_read(ctx, buffer, size, pos);
370         spu_release(ctx);
371         return ret;
372 }
373
374 static ssize_t
375 spufs_regs_write(struct file *file, const char __user *buffer,
376                  size_t size, loff_t *pos)
377 {
378         struct spu_context *ctx = file->private_data;
379         struct spu_lscsa *lscsa = ctx->csa.lscsa;
380         int ret;
381
382         size = min_t(ssize_t, sizeof lscsa->gprs - *pos, size);
383         if (size <= 0)
384                 return -EFBIG;
385         *pos += size;
386
387         spu_acquire_saved(ctx);
388
389         ret = copy_from_user(lscsa->gprs + *pos - size,
390                              buffer, size) ? -EFAULT : size;
391
392         spu_release(ctx);
393         return ret;
394 }
395
396 static const struct file_operations spufs_regs_fops = {
397         .open    = spufs_regs_open,
398         .read    = spufs_regs_read,
399         .write   = spufs_regs_write,
400         .llseek  = generic_file_llseek,
401 };
402
403 static ssize_t
404 __spufs_fpcr_read(struct spu_context *ctx, char __user * buffer,
405                         size_t size, loff_t * pos)
406 {
407         struct spu_lscsa *lscsa = ctx->csa.lscsa;
408         return simple_read_from_buffer(buffer, size, pos,
409                                       &lscsa->fpcr, sizeof(lscsa->fpcr));
410 }
411
412 static ssize_t
413 spufs_fpcr_read(struct file *file, char __user * buffer,
414                 size_t size, loff_t * pos)
415 {
416         int ret;
417         struct spu_context *ctx = file->private_data;
418
419         spu_acquire_saved(ctx);
420         ret = __spufs_fpcr_read(ctx, buffer, size, pos);
421         spu_release(ctx);
422         return ret;
423 }
424
425 static ssize_t
426 spufs_fpcr_write(struct file *file, const char __user * buffer,
427                  size_t size, loff_t * pos)
428 {
429         struct spu_context *ctx = file->private_data;
430         struct spu_lscsa *lscsa = ctx->csa.lscsa;
431         int ret;
432
433         size = min_t(ssize_t, sizeof(lscsa->fpcr) - *pos, size);
434         if (size <= 0)
435                 return -EFBIG;
436         *pos += size;
437
438         spu_acquire_saved(ctx);
439
440         ret = copy_from_user((char *)&lscsa->fpcr + *pos - size,
441                              buffer, size) ? -EFAULT : size;
442
443         spu_release(ctx);
444         return ret;
445 }
446
447 static const struct file_operations spufs_fpcr_fops = {
448         .open = spufs_regs_open,
449         .read = spufs_fpcr_read,
450         .write = spufs_fpcr_write,
451         .llseek = generic_file_llseek,
452 };
453
454 /* generic open function for all pipe-like files */
455 static int spufs_pipe_open(struct inode *inode, struct file *file)
456 {
457         struct spufs_inode_info *i = SPUFS_I(inode);
458         file->private_data = i->i_ctx;
459
460         return nonseekable_open(inode, file);
461 }
462
463 /*
464  * Read as many bytes from the mailbox as possible, until
465  * one of the conditions becomes true:
466  *
467  * - no more data available in the mailbox
468  * - end of the user provided buffer
469  * - end of the mapped area
470  */
471 static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
472                         size_t len, loff_t *pos)
473 {
474         struct spu_context *ctx = file->private_data;
475         u32 mbox_data, __user *udata;
476         ssize_t count;
477
478         if (len < 4)
479                 return -EINVAL;
480
481         if (!access_ok(VERIFY_WRITE, buf, len))
482                 return -EFAULT;
483
484         udata = (void __user *)buf;
485
486         spu_acquire(ctx);
487         for (count = 0; (count + 4) <= len; count += 4, udata++) {
488                 int ret;
489                 ret = ctx->ops->mbox_read(ctx, &mbox_data);
490                 if (ret == 0)
491                         break;
492
493                 /*
494                  * at the end of the mapped area, we can fault
495                  * but still need to return the data we have
496                  * read successfully so far.
497                  */
498                 ret = __put_user(mbox_data, udata);
499                 if (ret) {
500                         if (!count)
501                                 count = -EFAULT;
502                         break;
503                 }
504         }
505         spu_release(ctx);
506
507         if (!count)
508                 count = -EAGAIN;
509
510         return count;
511 }
512
513 static const struct file_operations spufs_mbox_fops = {
514         .open   = spufs_pipe_open,
515         .read   = spufs_mbox_read,
516 };
517
518 static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf,
519                         size_t len, loff_t *pos)
520 {
521         struct spu_context *ctx = file->private_data;
522         u32 mbox_stat;
523
524         if (len < 4)
525                 return -EINVAL;
526
527         spu_acquire(ctx);
528
529         mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff;
530
531         spu_release(ctx);
532
533         if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat))
534                 return -EFAULT;
535
536         return 4;
537 }
538
539 static const struct file_operations spufs_mbox_stat_fops = {
540         .open   = spufs_pipe_open,
541         .read   = spufs_mbox_stat_read,
542 };
543
544 /* low-level ibox access function */
545 size_t spu_ibox_read(struct spu_context *ctx, u32 *data)
546 {
547         return ctx->ops->ibox_read(ctx, data);
548 }
549
550 static int spufs_ibox_fasync(int fd, struct file *file, int on)
551 {
552         struct spu_context *ctx = file->private_data;
553
554         return fasync_helper(fd, file, on, &ctx->ibox_fasync);
555 }
556
557 /* interrupt-level ibox callback function. */
558 void spufs_ibox_callback(struct spu *spu)
559 {
560         struct spu_context *ctx = spu->ctx;
561
562         wake_up_all(&ctx->ibox_wq);
563         kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN);
564 }
565
566 /*
567  * Read as many bytes from the interrupt mailbox as possible, until
568  * one of the conditions becomes true:
569  *
570  * - no more data available in the mailbox
571  * - end of the user provided buffer
572  * - end of the mapped area
573  *
574  * If the file is opened without O_NONBLOCK, we wait here until
575  * any data is available, but return when we have been able to
576  * read something.
577  */
578 static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
579                         size_t len, loff_t *pos)
580 {
581         struct spu_context *ctx = file->private_data;
582         u32 ibox_data, __user *udata;
583         ssize_t count;
584
585         if (len < 4)
586                 return -EINVAL;
587
588         if (!access_ok(VERIFY_WRITE, buf, len))
589                 return -EFAULT;
590
591         udata = (void __user *)buf;
592
593         spu_acquire(ctx);
594
595         /* wait only for the first element */
596         count = 0;
597         if (file->f_flags & O_NONBLOCK) {
598                 if (!spu_ibox_read(ctx, &ibox_data))
599                         count = -EAGAIN;
600         } else {
601                 count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data));
602         }
603         if (count)
604                 goto out;
605
606         /* if we can't write at all, return -EFAULT */
607         count = __put_user(ibox_data, udata);
608         if (count)
609                 goto out;
610
611         for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
612                 int ret;
613                 ret = ctx->ops->ibox_read(ctx, &ibox_data);
614                 if (ret == 0)
615                         break;
616                 /*
617                  * at the end of the mapped area, we can fault
618                  * but still need to return the data we have
619                  * read successfully so far.
620                  */
621                 ret = __put_user(ibox_data, udata);
622                 if (ret)
623                         break;
624         }
625
626 out:
627         spu_release(ctx);
628
629         return count;
630 }
631
632 static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait)
633 {
634         struct spu_context *ctx = file->private_data;
635         unsigned int mask;
636
637         poll_wait(file, &ctx->ibox_wq, wait);
638
639         spu_acquire(ctx);
640         mask = ctx->ops->mbox_stat_poll(ctx, POLLIN | POLLRDNORM);
641         spu_release(ctx);
642
643         return mask;
644 }
645
646 static const struct file_operations spufs_ibox_fops = {
647         .open   = spufs_pipe_open,
648         .read   = spufs_ibox_read,
649         .poll   = spufs_ibox_poll,
650         .fasync = spufs_ibox_fasync,
651 };
652
653 static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf,
654                         size_t len, loff_t *pos)
655 {
656         struct spu_context *ctx = file->private_data;
657         u32 ibox_stat;
658
659         if (len < 4)
660                 return -EINVAL;
661
662         spu_acquire(ctx);
663         ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff;
664         spu_release(ctx);
665
666         if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat))
667                 return -EFAULT;
668
669         return 4;
670 }
671
672 static const struct file_operations spufs_ibox_stat_fops = {
673         .open   = spufs_pipe_open,
674         .read   = spufs_ibox_stat_read,
675 };
676
677 /* low-level mailbox write */
678 size_t spu_wbox_write(struct spu_context *ctx, u32 data)
679 {
680         return ctx->ops->wbox_write(ctx, data);
681 }
682
683 static int spufs_wbox_fasync(int fd, struct file *file, int on)
684 {
685         struct spu_context *ctx = file->private_data;
686         int ret;
687
688         ret = fasync_helper(fd, file, on, &ctx->wbox_fasync);
689
690         return ret;
691 }
692
693 /* interrupt-level wbox callback function. */
694 void spufs_wbox_callback(struct spu *spu)
695 {
696         struct spu_context *ctx = spu->ctx;
697
698         wake_up_all(&ctx->wbox_wq);
699         kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT);
700 }
701
702 /*
703  * Write as many bytes to the interrupt mailbox as possible, until
704  * one of the conditions becomes true:
705  *
706  * - the mailbox is full
707  * - end of the user provided buffer
708  * - end of the mapped area
709  *
710  * If the file is opened without O_NONBLOCK, we wait here until
711  * space is availabyl, but return when we have been able to
712  * write something.
713  */
714 static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
715                         size_t len, loff_t *pos)
716 {
717         struct spu_context *ctx = file->private_data;
718         u32 wbox_data, __user *udata;
719         ssize_t count;
720
721         if (len < 4)
722                 return -EINVAL;
723
724         udata = (void __user *)buf;
725         if (!access_ok(VERIFY_READ, buf, len))
726                 return -EFAULT;
727
728         if (__get_user(wbox_data, udata))
729                 return -EFAULT;
730
731         spu_acquire(ctx);
732
733         /*
734          * make sure we can at least write one element, by waiting
735          * in case of !O_NONBLOCK
736          */
737         count = 0;
738         if (file->f_flags & O_NONBLOCK) {
739                 if (!spu_wbox_write(ctx, wbox_data))
740                         count = -EAGAIN;
741         } else {
742                 count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data));
743         }
744
745         if (count)
746                 goto out;
747
748         /* write aÑ• much as possible */
749         for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
750                 int ret;
751                 ret = __get_user(wbox_data, udata);
752                 if (ret)
753                         break;
754
755                 ret = spu_wbox_write(ctx, wbox_data);
756                 if (ret == 0)
757                         break;
758         }
759
760 out:
761         spu_release(ctx);
762         return count;
763 }
764
765 static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait)
766 {
767         struct spu_context *ctx = file->private_data;
768         unsigned int mask;
769
770         poll_wait(file, &ctx->wbox_wq, wait);
771
772         spu_acquire(ctx);
773         mask = ctx->ops->mbox_stat_poll(ctx, POLLOUT | POLLWRNORM);
774         spu_release(ctx);
775
776         return mask;
777 }
778
779 static const struct file_operations spufs_wbox_fops = {
780         .open   = spufs_pipe_open,
781         .write  = spufs_wbox_write,
782         .poll   = spufs_wbox_poll,
783         .fasync = spufs_wbox_fasync,
784 };
785
786 static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf,
787                         size_t len, loff_t *pos)
788 {
789         struct spu_context *ctx = file->private_data;
790         u32 wbox_stat;
791
792         if (len < 4)
793                 return -EINVAL;
794
795         spu_acquire(ctx);
796         wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff;
797         spu_release(ctx);
798
799         if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat))
800                 return -EFAULT;
801
802         return 4;
803 }
804
805 static const struct file_operations spufs_wbox_stat_fops = {
806         .open   = spufs_pipe_open,
807         .read   = spufs_wbox_stat_read,
808 };
809
810 static int spufs_signal1_open(struct inode *inode, struct file *file)
811 {
812         struct spufs_inode_info *i = SPUFS_I(inode);
813         struct spu_context *ctx = i->i_ctx;
814
815         spin_lock(&ctx->mapping_lock);
816         file->private_data = ctx;
817         if (!i->i_openers++)
818                 ctx->signal1 = inode->i_mapping;
819         spin_unlock(&ctx->mapping_lock);
820         return nonseekable_open(inode, file);
821 }
822
823 static int
824 spufs_signal1_release(struct inode *inode, struct file *file)
825 {
826         struct spufs_inode_info *i = SPUFS_I(inode);
827         struct spu_context *ctx = i->i_ctx;
828
829         spin_lock(&ctx->mapping_lock);
830         if (!--i->i_openers)
831                 ctx->signal1 = NULL;
832         spin_unlock(&ctx->mapping_lock);
833         return 0;
834 }
835
836 static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf,
837                         size_t len, loff_t *pos)
838 {
839         int ret = 0;
840         u32 data;
841
842         if (len < 4)
843                 return -EINVAL;
844
845         if (ctx->csa.spu_chnlcnt_RW[3]) {
846                 data = ctx->csa.spu_chnldata_RW[3];
847                 ret = 4;
848         }
849
850         if (!ret)
851                 goto out;
852
853         if (copy_to_user(buf, &data, 4))
854                 return -EFAULT;
855
856 out:
857         return ret;
858 }
859
860 static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
861                         size_t len, loff_t *pos)
862 {
863         int ret;
864         struct spu_context *ctx = file->private_data;
865
866         spu_acquire_saved(ctx);
867         ret = __spufs_signal1_read(ctx, buf, len, pos);
868         spu_release(ctx);
869
870         return ret;
871 }
872
873 static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
874                         size_t len, loff_t *pos)
875 {
876         struct spu_context *ctx;
877         u32 data;
878
879         ctx = file->private_data;
880
881         if (len < 4)
882                 return -EINVAL;
883
884         if (copy_from_user(&data, buf, 4))
885                 return -EFAULT;
886
887         spu_acquire(ctx);
888         ctx->ops->signal1_write(ctx, data);
889         spu_release(ctx);
890
891         return 4;
892 }
893
894 static unsigned long spufs_signal1_mmap_nopfn(struct vm_area_struct *vma,
895                                               unsigned long address)
896 {
897 #if PAGE_SIZE == 0x1000
898         return spufs_ps_nopfn(vma, address, 0x14000, 0x1000);
899 #elif PAGE_SIZE == 0x10000
900         /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
901          * signal 1 and 2 area
902          */
903         return spufs_ps_nopfn(vma, address, 0x10000, 0x10000);
904 #else
905 #error unsupported page size
906 #endif
907 }
908
909 static struct vm_operations_struct spufs_signal1_mmap_vmops = {
910         .nopfn = spufs_signal1_mmap_nopfn,
911 };
912
913 static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
914 {
915         if (!(vma->vm_flags & VM_SHARED))
916                 return -EINVAL;
917
918         vma->vm_flags |= VM_IO | VM_PFNMAP;
919         vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
920                                      | _PAGE_NO_CACHE | _PAGE_GUARDED);
921
922         vma->vm_ops = &spufs_signal1_mmap_vmops;
923         return 0;
924 }
925
926 static const struct file_operations spufs_signal1_fops = {
927         .open = spufs_signal1_open,
928         .release = spufs_signal1_release,
929         .read = spufs_signal1_read,
930         .write = spufs_signal1_write,
931         .mmap = spufs_signal1_mmap,
932 };
933
934 static int spufs_signal2_open(struct inode *inode, struct file *file)
935 {
936         struct spufs_inode_info *i = SPUFS_I(inode);
937         struct spu_context *ctx = i->i_ctx;
938
939         spin_lock(&ctx->mapping_lock);
940         file->private_data = ctx;
941         if (!i->i_openers++)
942                 ctx->signal2 = inode->i_mapping;
943         spin_unlock(&ctx->mapping_lock);
944         return nonseekable_open(inode, file);
945 }
946
947 static int
948 spufs_signal2_release(struct inode *inode, struct file *file)
949 {
950         struct spufs_inode_info *i = SPUFS_I(inode);
951         struct spu_context *ctx = i->i_ctx;
952
953         spin_lock(&ctx->mapping_lock);
954         if (!--i->i_openers)
955                 ctx->signal2 = NULL;
956         spin_unlock(&ctx->mapping_lock);
957         return 0;
958 }
959
960 static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf,
961                         size_t len, loff_t *pos)
962 {
963         int ret = 0;
964         u32 data;
965
966         if (len < 4)
967                 return -EINVAL;
968
969         if (ctx->csa.spu_chnlcnt_RW[4]) {
970                 data =  ctx->csa.spu_chnldata_RW[4];
971                 ret = 4;
972         }
973
974         if (!ret)
975                 goto out;
976
977         if (copy_to_user(buf, &data, 4))
978                 return -EFAULT;
979
980 out:
981         return ret;
982 }
983
984 static ssize_t spufs_signal2_read(struct file *file, char __user *buf,
985                         size_t len, loff_t *pos)
986 {
987         struct spu_context *ctx = file->private_data;
988         int ret;
989
990         spu_acquire_saved(ctx);
991         ret = __spufs_signal2_read(ctx, buf, len, pos);
992         spu_release(ctx);
993
994         return ret;
995 }
996
997 static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
998                         size_t len, loff_t *pos)
999 {
1000         struct spu_context *ctx;
1001         u32 data;
1002
1003         ctx = file->private_data;
1004
1005         if (len < 4)
1006                 return -EINVAL;
1007
1008         if (copy_from_user(&data, buf, 4))
1009                 return -EFAULT;
1010
1011         spu_acquire(ctx);
1012         ctx->ops->signal2_write(ctx, data);
1013         spu_release(ctx);
1014
1015         return 4;
1016 }
1017
1018 #if SPUFS_MMAP_4K
1019 static unsigned long spufs_signal2_mmap_nopfn(struct vm_area_struct *vma,
1020                                               unsigned long address)
1021 {
1022 #if PAGE_SIZE == 0x1000
1023         return spufs_ps_nopfn(vma, address, 0x1c000, 0x1000);
1024 #elif PAGE_SIZE == 0x10000
1025         /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
1026          * signal 1 and 2 area
1027          */
1028         return spufs_ps_nopfn(vma, address, 0x10000, 0x10000);
1029 #else
1030 #error unsupported page size
1031 #endif
1032 }
1033
1034 static struct vm_operations_struct spufs_signal2_mmap_vmops = {
1035         .nopfn = spufs_signal2_mmap_nopfn,
1036 };
1037
1038 static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
1039 {
1040         if (!(vma->vm_flags & VM_SHARED))
1041                 return -EINVAL;
1042
1043         vma->vm_flags |= VM_IO | VM_PFNMAP;
1044         vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1045                                      | _PAGE_NO_CACHE | _PAGE_GUARDED);
1046
1047         vma->vm_ops = &spufs_signal2_mmap_vmops;
1048         return 0;
1049 }
1050 #else /* SPUFS_MMAP_4K */
1051 #define spufs_signal2_mmap NULL
1052 #endif /* !SPUFS_MMAP_4K */
1053
1054 static const struct file_operations spufs_signal2_fops = {
1055         .open = spufs_signal2_open,
1056         .release = spufs_signal2_release,
1057         .read = spufs_signal2_read,
1058         .write = spufs_signal2_write,
1059         .mmap = spufs_signal2_mmap,
1060 };
1061
1062 static void spufs_signal1_type_set(void *data, u64 val)
1063 {
1064         struct spu_context *ctx = data;
1065
1066         spu_acquire(ctx);
1067         ctx->ops->signal1_type_set(ctx, val);
1068         spu_release(ctx);
1069 }
1070
1071 static u64 __spufs_signal1_type_get(void *data)
1072 {
1073         struct spu_context *ctx = data;
1074         return ctx->ops->signal1_type_get(ctx);
1075 }
1076
1077 static u64 spufs_signal1_type_get(void *data)
1078 {
1079         struct spu_context *ctx = data;
1080         u64 ret;
1081
1082         spu_acquire(ctx);
1083         ret = __spufs_signal1_type_get(data);
1084         spu_release(ctx);
1085
1086         return ret;
1087 }
1088 DEFINE_SIMPLE_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get,
1089                                         spufs_signal1_type_set, "%llu");
1090
1091 static void spufs_signal2_type_set(void *data, u64 val)
1092 {
1093         struct spu_context *ctx = data;
1094
1095         spu_acquire(ctx);
1096         ctx->ops->signal2_type_set(ctx, val);
1097         spu_release(ctx);
1098 }
1099
1100 static u64 __spufs_signal2_type_get(void *data)
1101 {
1102         struct spu_context *ctx = data;
1103         return ctx->ops->signal2_type_get(ctx);
1104 }
1105
1106 static u64 spufs_signal2_type_get(void *data)
1107 {
1108         struct spu_context *ctx = data;
1109         u64 ret;
1110
1111         spu_acquire(ctx);
1112         ret = __spufs_signal2_type_get(data);
1113         spu_release(ctx);
1114
1115         return ret;
1116 }
1117 DEFINE_SIMPLE_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
1118                                         spufs_signal2_type_set, "%llu");
1119
1120 #if SPUFS_MMAP_4K
1121 static unsigned long spufs_mss_mmap_nopfn(struct vm_area_struct *vma,
1122                                           unsigned long address)
1123 {
1124         return spufs_ps_nopfn(vma, address, 0x0000, 0x1000);
1125 }
1126
1127 static struct vm_operations_struct spufs_mss_mmap_vmops = {
1128         .nopfn = spufs_mss_mmap_nopfn,
1129 };
1130
1131 /*
1132  * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1133  */
1134 static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
1135 {
1136         if (!(vma->vm_flags & VM_SHARED))
1137                 return -EINVAL;
1138
1139         vma->vm_flags |= VM_IO | VM_PFNMAP;
1140         vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1141                                      | _PAGE_NO_CACHE | _PAGE_GUARDED);
1142
1143         vma->vm_ops = &spufs_mss_mmap_vmops;
1144         return 0;
1145 }
1146 #else /* SPUFS_MMAP_4K */
1147 #define spufs_mss_mmap NULL
1148 #endif /* !SPUFS_MMAP_4K */
1149
1150 static int spufs_mss_open(struct inode *inode, struct file *file)
1151 {
1152         struct spufs_inode_info *i = SPUFS_I(inode);
1153         struct spu_context *ctx = i->i_ctx;
1154
1155         file->private_data = i->i_ctx;
1156
1157         spin_lock(&ctx->mapping_lock);
1158         if (!i->i_openers++)
1159                 ctx->mss = inode->i_mapping;
1160         spin_unlock(&ctx->mapping_lock);
1161         return nonseekable_open(inode, file);
1162 }
1163
1164 static int
1165 spufs_mss_release(struct inode *inode, struct file *file)
1166 {
1167         struct spufs_inode_info *i = SPUFS_I(inode);
1168         struct spu_context *ctx = i->i_ctx;
1169
1170         spin_lock(&ctx->mapping_lock);
1171         if (!--i->i_openers)
1172                 ctx->mss = NULL;
1173         spin_unlock(&ctx->mapping_lock);
1174         return 0;
1175 }
1176
1177 static const struct file_operations spufs_mss_fops = {
1178         .open    = spufs_mss_open,
1179         .release = spufs_mss_release,
1180         .mmap    = spufs_mss_mmap,
1181 };
1182
1183 static unsigned long spufs_psmap_mmap_nopfn(struct vm_area_struct *vma,
1184                                             unsigned long address)
1185 {
1186         return spufs_ps_nopfn(vma, address, 0x0000, 0x20000);
1187 }
1188
1189 static struct vm_operations_struct spufs_psmap_mmap_vmops = {
1190         .nopfn = spufs_psmap_mmap_nopfn,
1191 };
1192
1193 /*
1194  * mmap support for full problem state area [0x00000 - 0x1ffff].
1195  */
1196 static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma)
1197 {
1198         if (!(vma->vm_flags & VM_SHARED))
1199                 return -EINVAL;
1200
1201         vma->vm_flags |= VM_IO | VM_PFNMAP;
1202         vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1203                                      | _PAGE_NO_CACHE | _PAGE_GUARDED);
1204
1205         vma->vm_ops = &spufs_psmap_mmap_vmops;
1206         return 0;
1207 }
1208
1209 static int spufs_psmap_open(struct inode *inode, struct file *file)
1210 {
1211         struct spufs_inode_info *i = SPUFS_I(inode);
1212         struct spu_context *ctx = i->i_ctx;
1213
1214         spin_lock(&ctx->mapping_lock);
1215         file->private_data = i->i_ctx;
1216         if (!i->i_openers++)
1217                 ctx->psmap = inode->i_mapping;
1218         spin_unlock(&ctx->mapping_lock);
1219         return nonseekable_open(inode, file);
1220 }
1221
1222 static int
1223 spufs_psmap_release(struct inode *inode, struct file *file)
1224 {
1225         struct spufs_inode_info *i = SPUFS_I(inode);
1226         struct spu_context *ctx = i->i_ctx;
1227
1228         spin_lock(&ctx->mapping_lock);
1229         if (!--i->i_openers)
1230                 ctx->psmap = NULL;
1231         spin_unlock(&ctx->mapping_lock);
1232         return 0;
1233 }
1234
1235 static const struct file_operations spufs_psmap_fops = {
1236         .open    = spufs_psmap_open,
1237         .release = spufs_psmap_release,
1238         .mmap    = spufs_psmap_mmap,
1239 };
1240
1241
1242 #if SPUFS_MMAP_4K
1243 static unsigned long spufs_mfc_mmap_nopfn(struct vm_area_struct *vma,
1244                                           unsigned long address)
1245 {
1246         return spufs_ps_nopfn(vma, address, 0x3000, 0x1000);
1247 }
1248
1249 static struct vm_operations_struct spufs_mfc_mmap_vmops = {
1250         .nopfn = spufs_mfc_mmap_nopfn,
1251 };
1252
1253 /*
1254  * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1255  */
1256 static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
1257 {
1258         if (!(vma->vm_flags & VM_SHARED))
1259                 return -EINVAL;
1260
1261         vma->vm_flags |= VM_IO | VM_PFNMAP;
1262         vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1263                                      | _PAGE_NO_CACHE | _PAGE_GUARDED);
1264
1265         vma->vm_ops = &spufs_mfc_mmap_vmops;
1266         return 0;
1267 }
1268 #else /* SPUFS_MMAP_4K */
1269 #define spufs_mfc_mmap NULL
1270 #endif /* !SPUFS_MMAP_4K */
1271
1272 static int spufs_mfc_open(struct inode *inode, struct file *file)
1273 {
1274         struct spufs_inode_info *i = SPUFS_I(inode);
1275         struct spu_context *ctx = i->i_ctx;
1276
1277         /* we don't want to deal with DMA into other processes */
1278         if (ctx->owner != current->mm)
1279                 return -EINVAL;
1280
1281         if (atomic_read(&inode->i_count) != 1)
1282                 return -EBUSY;
1283
1284         spin_lock(&ctx->mapping_lock);
1285         file->private_data = ctx;
1286         if (!i->i_openers++)
1287                 ctx->mfc = inode->i_mapping;
1288         spin_unlock(&ctx->mapping_lock);
1289         return nonseekable_open(inode, file);
1290 }
1291
1292 static int
1293 spufs_mfc_release(struct inode *inode, struct file *file)
1294 {
1295         struct spufs_inode_info *i = SPUFS_I(inode);
1296         struct spu_context *ctx = i->i_ctx;
1297
1298         spin_lock(&ctx->mapping_lock);
1299         if (!--i->i_openers)
1300                 ctx->mfc = NULL;
1301         spin_unlock(&ctx->mapping_lock);
1302         return 0;
1303 }
1304
1305 /* interrupt-level mfc callback function. */
1306 void spufs_mfc_callback(struct spu *spu)
1307 {
1308         struct spu_context *ctx = spu->ctx;
1309
1310         wake_up_all(&ctx->mfc_wq);
1311
1312         pr_debug("%s %s\n", __FUNCTION__, spu->name);
1313         if (ctx->mfc_fasync) {
1314                 u32 free_elements, tagstatus;
1315                 unsigned int mask;
1316
1317                 /* no need for spu_acquire in interrupt context */
1318                 free_elements = ctx->ops->get_mfc_free_elements(ctx);
1319                 tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1320
1321                 mask = 0;
1322                 if (free_elements & 0xffff)
1323                         mask |= POLLOUT;
1324                 if (tagstatus & ctx->tagwait)
1325                         mask |= POLLIN;
1326
1327                 kill_fasync(&ctx->mfc_fasync, SIGIO, mask);
1328         }
1329 }
1330
1331 static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status)
1332 {
1333         /* See if there is one tag group is complete */
1334         /* FIXME we need locking around tagwait */
1335         *status = ctx->ops->read_mfc_tagstatus(ctx) & ctx->tagwait;
1336         ctx->tagwait &= ~*status;
1337         if (*status)
1338                 return 1;
1339
1340         /* enable interrupt waiting for any tag group,
1341            may silently fail if interrupts are already enabled */
1342         ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1343         return 0;
1344 }
1345
1346 static ssize_t spufs_mfc_read(struct file *file, char __user *buffer,
1347                         size_t size, loff_t *pos)
1348 {
1349         struct spu_context *ctx = file->private_data;
1350         int ret = -EINVAL;
1351         u32 status;
1352
1353         if (size != 4)
1354                 goto out;
1355
1356         spu_acquire(ctx);
1357         if (file->f_flags & O_NONBLOCK) {
1358                 status = ctx->ops->read_mfc_tagstatus(ctx);
1359                 if (!(status & ctx->tagwait))
1360                         ret = -EAGAIN;
1361                 else
1362                         ctx->tagwait &= ~status;
1363         } else {
1364                 ret = spufs_wait(ctx->mfc_wq,
1365                            spufs_read_mfc_tagstatus(ctx, &status));
1366         }
1367         spu_release(ctx);
1368
1369         if (ret)
1370                 goto out;
1371
1372         ret = 4;
1373         if (copy_to_user(buffer, &status, 4))
1374                 ret = -EFAULT;
1375
1376 out:
1377         return ret;
1378 }
1379
1380 static int spufs_check_valid_dma(struct mfc_dma_command *cmd)
1381 {
1382         pr_debug("queueing DMA %x %lx %x %x %x\n", cmd->lsa,
1383                  cmd->ea, cmd->size, cmd->tag, cmd->cmd);
1384
1385         switch (cmd->cmd) {
1386         case MFC_PUT_CMD:
1387         case MFC_PUTF_CMD:
1388         case MFC_PUTB_CMD:
1389         case MFC_GET_CMD:
1390         case MFC_GETF_CMD:
1391         case MFC_GETB_CMD:
1392                 break;
1393         default:
1394                 pr_debug("invalid DMA opcode %x\n", cmd->cmd);
1395                 return -EIO;
1396         }
1397
1398         if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) {
1399                 pr_debug("invalid DMA alignment, ea %lx lsa %x\n",
1400                                 cmd->ea, cmd->lsa);
1401                 return -EIO;
1402         }
1403
1404         switch (cmd->size & 0xf) {
1405         case 1:
1406                 break;
1407         case 2:
1408                 if (cmd->lsa & 1)
1409                         goto error;
1410                 break;
1411         case 4:
1412                 if (cmd->lsa & 3)
1413                         goto error;
1414                 break;
1415         case 8:
1416                 if (cmd->lsa & 7)
1417                         goto error;
1418                 break;
1419         case 0:
1420                 if (cmd->lsa & 15)
1421                         goto error;
1422                 break;
1423         error:
1424         default:
1425                 pr_debug("invalid DMA alignment %x for size %x\n",
1426                         cmd->lsa & 0xf, cmd->size);
1427                 return -EIO;
1428         }
1429
1430         if (cmd->size > 16 * 1024) {
1431                 pr_debug("invalid DMA size %x\n", cmd->size);
1432                 return -EIO;
1433         }
1434
1435         if (cmd->tag & 0xfff0) {
1436                 /* we reserve the higher tag numbers for kernel use */
1437                 pr_debug("invalid DMA tag\n");
1438                 return -EIO;
1439         }
1440
1441         if (cmd->class) {
1442                 /* not supported in this version */
1443                 pr_debug("invalid DMA class\n");
1444                 return -EIO;
1445         }
1446
1447         return 0;
1448 }
1449
1450 static int spu_send_mfc_command(struct spu_context *ctx,
1451                                 struct mfc_dma_command cmd,
1452                                 int *error)
1453 {
1454         *error = ctx->ops->send_mfc_command(ctx, &cmd);
1455         if (*error == -EAGAIN) {
1456                 /* wait for any tag group to complete
1457                    so we have space for the new command */
1458                 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1459                 /* try again, because the queue might be
1460                    empty again */
1461                 *error = ctx->ops->send_mfc_command(ctx, &cmd);
1462                 if (*error == -EAGAIN)
1463                         return 0;
1464         }
1465         return 1;
1466 }
1467
1468 static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
1469                         size_t size, loff_t *pos)
1470 {
1471         struct spu_context *ctx = file->private_data;
1472         struct mfc_dma_command cmd;
1473         int ret = -EINVAL;
1474
1475         if (size != sizeof cmd)
1476                 goto out;
1477
1478         ret = -EFAULT;
1479         if (copy_from_user(&cmd, buffer, sizeof cmd))
1480                 goto out;
1481
1482         ret = spufs_check_valid_dma(&cmd);
1483         if (ret)
1484                 goto out;
1485
1486         ret = spu_acquire_runnable(ctx, 0);
1487         if (ret)
1488                 goto out;
1489
1490         if (file->f_flags & O_NONBLOCK) {
1491                 ret = ctx->ops->send_mfc_command(ctx, &cmd);
1492         } else {
1493                 int status;
1494                 ret = spufs_wait(ctx->mfc_wq,
1495                                  spu_send_mfc_command(ctx, cmd, &status));
1496                 if (status)
1497                         ret = status;
1498         }
1499         spu_release(ctx);
1500
1501         if (ret)
1502                 goto out;
1503
1504         ctx->tagwait |= 1 << cmd.tag;
1505         ret = size;
1506
1507 out:
1508         return ret;
1509 }
1510
1511 static unsigned int spufs_mfc_poll(struct file *file,poll_table *wait)
1512 {
1513         struct spu_context *ctx = file->private_data;
1514         u32 free_elements, tagstatus;
1515         unsigned int mask;
1516
1517         spu_acquire(ctx);
1518         ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2);
1519         free_elements = ctx->ops->get_mfc_free_elements(ctx);
1520         tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1521         spu_release(ctx);
1522
1523         poll_wait(file, &ctx->mfc_wq, wait);
1524
1525         mask = 0;
1526         if (free_elements & 0xffff)
1527                 mask |= POLLOUT | POLLWRNORM;
1528         if (tagstatus & ctx->tagwait)
1529                 mask |= POLLIN | POLLRDNORM;
1530
1531         pr_debug("%s: free %d tagstatus %d tagwait %d\n", __FUNCTION__,
1532                 free_elements, tagstatus, ctx->tagwait);
1533
1534         return mask;
1535 }
1536
1537 static int spufs_mfc_flush(struct file *file, fl_owner_t id)
1538 {
1539         struct spu_context *ctx = file->private_data;
1540         int ret;
1541
1542         spu_acquire(ctx);
1543 #if 0
1544 /* this currently hangs */
1545         ret = spufs_wait(ctx->mfc_wq,
1546                          ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2));
1547         if (ret)
1548                 goto out;
1549         ret = spufs_wait(ctx->mfc_wq,
1550                          ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait);
1551 out:
1552 #else
1553         ret = 0;
1554 #endif
1555         spu_release(ctx);
1556
1557         return ret;
1558 }
1559
1560 static int spufs_mfc_fsync(struct file *file, struct dentry *dentry,
1561                            int datasync)
1562 {
1563         return spufs_mfc_flush(file, NULL);
1564 }
1565
1566 static int spufs_mfc_fasync(int fd, struct file *file, int on)
1567 {
1568         struct spu_context *ctx = file->private_data;
1569
1570         return fasync_helper(fd, file, on, &ctx->mfc_fasync);
1571 }
1572
1573 static const struct file_operations spufs_mfc_fops = {
1574         .open    = spufs_mfc_open,
1575         .release = spufs_mfc_release,
1576         .read    = spufs_mfc_read,
1577         .write   = spufs_mfc_write,
1578         .poll    = spufs_mfc_poll,
1579         .flush   = spufs_mfc_flush,
1580         .fsync   = spufs_mfc_fsync,
1581         .fasync  = spufs_mfc_fasync,
1582         .mmap    = spufs_mfc_mmap,
1583 };
1584
1585 static void spufs_npc_set(void *data, u64 val)
1586 {
1587         struct spu_context *ctx = data;
1588         spu_acquire(ctx);
1589         ctx->ops->npc_write(ctx, val);
1590         spu_release(ctx);
1591 }
1592
1593 static u64 spufs_npc_get(void *data)
1594 {
1595         struct spu_context *ctx = data;
1596         u64 ret;
1597         spu_acquire(ctx);
1598         ret = ctx->ops->npc_read(ctx);
1599         spu_release(ctx);
1600         return ret;
1601 }
1602 DEFINE_SIMPLE_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set,
1603                         "0x%llx\n")
1604
1605 static void spufs_decr_set(void *data, u64 val)
1606 {
1607         struct spu_context *ctx = data;
1608         struct spu_lscsa *lscsa = ctx->csa.lscsa;
1609         spu_acquire_saved(ctx);
1610         lscsa->decr.slot[0] = (u32) val;
1611         spu_release(ctx);
1612 }
1613
1614 static u64 __spufs_decr_get(void *data)
1615 {
1616         struct spu_context *ctx = data;
1617         struct spu_lscsa *lscsa = ctx->csa.lscsa;
1618         return lscsa->decr.slot[0];
1619 }
1620
1621 static u64 spufs_decr_get(void *data)
1622 {
1623         struct spu_context *ctx = data;
1624         u64 ret;
1625         spu_acquire_saved(ctx);
1626         ret = __spufs_decr_get(data);
1627         spu_release(ctx);
1628         return ret;
1629 }
1630 DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set,
1631                         "0x%llx\n")
1632
1633 static void spufs_decr_status_set(void *data, u64 val)
1634 {
1635         struct spu_context *ctx = data;
1636         struct spu_lscsa *lscsa = ctx->csa.lscsa;
1637         spu_acquire_saved(ctx);
1638         lscsa->decr_status.slot[0] = (u32) val;
1639         spu_release(ctx);
1640 }
1641
1642 static u64 __spufs_decr_status_get(void *data)
1643 {
1644         struct spu_context *ctx = data;
1645         struct spu_lscsa *lscsa = ctx->csa.lscsa;
1646         return lscsa->decr_status.slot[0];
1647 }
1648
1649 static u64 spufs_decr_status_get(void *data)
1650 {
1651         struct spu_context *ctx = data;
1652         u64 ret;
1653         spu_acquire_saved(ctx);
1654         ret = __spufs_decr_status_get(data);
1655         spu_release(ctx);
1656         return ret;
1657 }
1658 DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get,
1659                         spufs_decr_status_set, "0x%llx\n")
1660
1661 static void spufs_event_mask_set(void *data, u64 val)
1662 {
1663         struct spu_context *ctx = data;
1664         struct spu_lscsa *lscsa = ctx->csa.lscsa;
1665         spu_acquire_saved(ctx);
1666         lscsa->event_mask.slot[0] = (u32) val;
1667         spu_release(ctx);
1668 }
1669
1670 static u64 __spufs_event_mask_get(void *data)
1671 {
1672         struct spu_context *ctx = data;
1673         struct spu_lscsa *lscsa = ctx->csa.lscsa;
1674         return lscsa->event_mask.slot[0];
1675 }
1676
1677 static u64 spufs_event_mask_get(void *data)
1678 {
1679         struct spu_context *ctx = data;
1680         u64 ret;
1681         spu_acquire_saved(ctx);
1682         ret = __spufs_event_mask_get(data);
1683         spu_release(ctx);
1684         return ret;
1685 }
1686 DEFINE_SIMPLE_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get,
1687                         spufs_event_mask_set, "0x%llx\n")
1688
1689 static u64 __spufs_event_status_get(void *data)
1690 {
1691         struct spu_context *ctx = data;
1692         struct spu_state *state = &ctx->csa;
1693         u64 stat;
1694         stat = state->spu_chnlcnt_RW[0];
1695         if (stat)
1696                 return state->spu_chnldata_RW[0];
1697         return 0;
1698 }
1699
1700 static u64 spufs_event_status_get(void *data)
1701 {
1702         struct spu_context *ctx = data;
1703         u64 ret = 0;
1704
1705         spu_acquire_saved(ctx);
1706         ret = __spufs_event_status_get(data);
1707         spu_release(ctx);
1708         return ret;
1709 }
1710 DEFINE_SIMPLE_ATTRIBUTE(spufs_event_status_ops, spufs_event_status_get,
1711                         NULL, "0x%llx\n")
1712
1713 static void spufs_srr0_set(void *data, u64 val)
1714 {
1715         struct spu_context *ctx = data;
1716         struct spu_lscsa *lscsa = ctx->csa.lscsa;
1717         spu_acquire_saved(ctx);
1718         lscsa->srr0.slot[0] = (u32) val;
1719         spu_release(ctx);
1720 }
1721
1722 static u64 spufs_srr0_get(void *data)
1723 {
1724         struct spu_context *ctx = data;
1725         struct spu_lscsa *lscsa = ctx->csa.lscsa;
1726         u64 ret;
1727         spu_acquire_saved(ctx);
1728         ret = lscsa->srr0.slot[0];
1729         spu_release(ctx);
1730         return ret;
1731 }
1732 DEFINE_SIMPLE_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set,
1733                         "0x%llx\n")
1734
1735 static u64 spufs_id_get(void *data)
1736 {
1737         struct spu_context *ctx = data;
1738         u64 num;
1739
1740         spu_acquire(ctx);
1741         if (ctx->state == SPU_STATE_RUNNABLE)
1742                 num = ctx->spu->number;
1743         else
1744                 num = (unsigned int)-1;
1745         spu_release(ctx);
1746
1747         return num;
1748 }
1749 DEFINE_SIMPLE_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n")
1750
1751 static u64 __spufs_object_id_get(void *data)
1752 {
1753         struct spu_context *ctx = data;
1754         return ctx->object_id;
1755 }
1756
1757 static u64 spufs_object_id_get(void *data)
1758 {
1759         /* FIXME: Should there really be no locking here? */
1760         return __spufs_object_id_get(data);
1761 }
1762
1763 static void spufs_object_id_set(void *data, u64 id)
1764 {
1765         struct spu_context *ctx = data;
1766         ctx->object_id = id;
1767 }
1768
1769 DEFINE_SIMPLE_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get,
1770                 spufs_object_id_set, "0x%llx\n");
1771
1772 static u64 __spufs_lslr_get(void *data)
1773 {
1774         struct spu_context *ctx = data;
1775         return ctx->csa.priv2.spu_lslr_RW;
1776 }
1777
1778 static u64 spufs_lslr_get(void *data)
1779 {
1780         struct spu_context *ctx = data;
1781         u64 ret;
1782
1783         spu_acquire_saved(ctx);
1784         ret = __spufs_lslr_get(data);
1785         spu_release(ctx);
1786
1787         return ret;
1788 }
1789 DEFINE_SIMPLE_ATTRIBUTE(spufs_lslr_ops, spufs_lslr_get, NULL, "0x%llx\n")
1790
1791 static int spufs_info_open(struct inode *inode, struct file *file)
1792 {
1793         struct spufs_inode_info *i = SPUFS_I(inode);
1794         struct spu_context *ctx = i->i_ctx;
1795         file->private_data = ctx;
1796         return 0;
1797 }
1798
1799 static ssize_t __spufs_mbox_info_read(struct spu_context *ctx,
1800                         char __user *buf, size_t len, loff_t *pos)
1801 {
1802         u32 mbox_stat;
1803         u32 data;
1804
1805         mbox_stat = ctx->csa.prob.mb_stat_R;
1806         if (mbox_stat & 0x0000ff) {
1807                 data = ctx->csa.prob.pu_mb_R;
1808         }
1809
1810         return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
1811 }
1812
1813 static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
1814                                    size_t len, loff_t *pos)
1815 {
1816         int ret;
1817         struct spu_context *ctx = file->private_data;
1818
1819         if (!access_ok(VERIFY_WRITE, buf, len))
1820                 return -EFAULT;
1821
1822         spu_acquire_saved(ctx);
1823         spin_lock(&ctx->csa.register_lock);
1824         ret = __spufs_mbox_info_read(ctx, buf, len, pos);
1825         spin_unlock(&ctx->csa.register_lock);
1826         spu_release(ctx);
1827
1828         return ret;
1829 }
1830
1831 static const struct file_operations spufs_mbox_info_fops = {
1832         .open = spufs_info_open,
1833         .read = spufs_mbox_info_read,
1834         .llseek  = generic_file_llseek,
1835 };
1836
1837 static ssize_t __spufs_ibox_info_read(struct spu_context *ctx,
1838                                 char __user *buf, size_t len, loff_t *pos)
1839 {
1840         u32 ibox_stat;
1841         u32 data;
1842
1843         ibox_stat = ctx->csa.prob.mb_stat_R;
1844         if (ibox_stat & 0xff0000) {
1845                 data = ctx->csa.priv2.puint_mb_R;
1846         }
1847
1848         return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
1849 }
1850
1851 static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
1852                                    size_t len, loff_t *pos)
1853 {
1854         struct spu_context *ctx = file->private_data;
1855         int ret;
1856
1857         if (!access_ok(VERIFY_WRITE, buf, len))
1858                 return -EFAULT;
1859
1860         spu_acquire_saved(ctx);
1861         spin_lock(&ctx->csa.register_lock);
1862         ret = __spufs_ibox_info_read(ctx, buf, len, pos);
1863         spin_unlock(&ctx->csa.register_lock);
1864         spu_release(ctx);
1865
1866         return ret;
1867 }
1868
1869 static const struct file_operations spufs_ibox_info_fops = {
1870         .open = spufs_info_open,
1871         .read = spufs_ibox_info_read,
1872         .llseek  = generic_file_llseek,
1873 };
1874
1875 static ssize_t __spufs_wbox_info_read(struct spu_context *ctx,
1876                         char __user *buf, size_t len, loff_t *pos)
1877 {
1878         int i, cnt;
1879         u32 data[4];
1880         u32 wbox_stat;
1881
1882         wbox_stat = ctx->csa.prob.mb_stat_R;
1883         cnt = 4 - ((wbox_stat & 0x00ff00) >> 8);
1884         for (i = 0; i < cnt; i++) {
1885                 data[i] = ctx->csa.spu_mailbox_data[i];
1886         }
1887
1888         return simple_read_from_buffer(buf, len, pos, &data,
1889                                 cnt * sizeof(u32));
1890 }
1891
1892 static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
1893                                    size_t len, loff_t *pos)
1894 {
1895         struct spu_context *ctx = file->private_data;
1896         int ret;
1897
1898         if (!access_ok(VERIFY_WRITE, buf, len))
1899                 return -EFAULT;
1900
1901         spu_acquire_saved(ctx);
1902         spin_lock(&ctx->csa.register_lock);
1903         ret = __spufs_wbox_info_read(ctx, buf, len, pos);
1904         spin_unlock(&ctx->csa.register_lock);
1905         spu_release(ctx);
1906
1907         return ret;
1908 }
1909
1910 static const struct file_operations spufs_wbox_info_fops = {
1911         .open = spufs_info_open,
1912         .read = spufs_wbox_info_read,
1913         .llseek  = generic_file_llseek,
1914 };
1915
1916 static ssize_t __spufs_dma_info_read(struct spu_context *ctx,
1917                         char __user *buf, size_t len, loff_t *pos)
1918 {
1919         struct spu_dma_info info;
1920         struct mfc_cq_sr *qp, *spuqp;
1921         int i;
1922
1923         info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW;
1924         info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0];
1925         info.dma_info_status = ctx->csa.spu_chnldata_RW[24];
1926         info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25];
1927         info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27];
1928         for (i = 0; i < 16; i++) {
1929                 qp = &info.dma_info_command_data[i];
1930                 spuqp = &ctx->csa.priv2.spuq[i];
1931
1932                 qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW;
1933                 qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW;
1934                 qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW;
1935                 qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW;
1936         }
1937
1938         return simple_read_from_buffer(buf, len, pos, &info,
1939                                 sizeof info);
1940 }
1941
1942 static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
1943                               size_t len, loff_t *pos)
1944 {
1945         struct spu_context *ctx = file->private_data;
1946         int ret;
1947
1948         if (!access_ok(VERIFY_WRITE, buf, len))
1949                 return -EFAULT;
1950
1951         spu_acquire_saved(ctx);
1952         spin_lock(&ctx->csa.register_lock);
1953         ret = __spufs_dma_info_read(ctx, buf, len, pos);
1954         spin_unlock(&ctx->csa.register_lock);
1955         spu_release(ctx);
1956
1957         return ret;
1958 }
1959
1960 static const struct file_operations spufs_dma_info_fops = {
1961         .open = spufs_info_open,
1962         .read = spufs_dma_info_read,
1963 };
1964
1965 static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx,
1966                         char __user *buf, size_t len, loff_t *pos)
1967 {
1968         struct spu_proxydma_info info;
1969         struct mfc_cq_sr *qp, *puqp;
1970         int ret = sizeof info;
1971         int i;
1972
1973         if (len < ret)
1974                 return -EINVAL;
1975
1976         if (!access_ok(VERIFY_WRITE, buf, len))
1977                 return -EFAULT;
1978
1979         info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW;
1980         info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW;
1981         info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R;
1982         for (i = 0; i < 8; i++) {
1983                 qp = &info.proxydma_info_command_data[i];
1984                 puqp = &ctx->csa.priv2.puq[i];
1985
1986                 qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW;
1987                 qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW;
1988                 qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW;
1989                 qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW;
1990         }
1991
1992         return simple_read_from_buffer(buf, len, pos, &info,
1993                                 sizeof info);
1994 }
1995
1996 static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf,
1997                                    size_t len, loff_t *pos)
1998 {
1999         struct spu_context *ctx = file->private_data;
2000         int ret;
2001
2002         spu_acquire_saved(ctx);
2003         spin_lock(&ctx->csa.register_lock);
2004         ret = __spufs_proxydma_info_read(ctx, buf, len, pos);
2005         spin_unlock(&ctx->csa.register_lock);
2006         spu_release(ctx);
2007
2008         return ret;
2009 }
2010
2011 static const struct file_operations spufs_proxydma_info_fops = {
2012         .open = spufs_info_open,
2013         .read = spufs_proxydma_info_read,
2014 };
2015
2016 struct tree_descr spufs_dir_contents[] = {
2017         { "mem",  &spufs_mem_fops,  0666, },
2018         { "regs", &spufs_regs_fops,  0666, },
2019         { "mbox", &spufs_mbox_fops, 0444, },
2020         { "ibox", &spufs_ibox_fops, 0444, },
2021         { "wbox", &spufs_wbox_fops, 0222, },
2022         { "mbox_stat", &spufs_mbox_stat_fops, 0444, },
2023         { "ibox_stat", &spufs_ibox_stat_fops, 0444, },
2024         { "wbox_stat", &spufs_wbox_stat_fops, 0444, },
2025         { "signal1", &spufs_signal1_fops, 0666, },
2026         { "signal2", &spufs_signal2_fops, 0666, },
2027         { "signal1_type", &spufs_signal1_type, 0666, },
2028         { "signal2_type", &spufs_signal2_type, 0666, },
2029         { "cntl", &spufs_cntl_fops,  0666, },
2030         { "fpcr", &spufs_fpcr_fops, 0666, },
2031         { "lslr", &spufs_lslr_ops, 0444, },
2032         { "mfc", &spufs_mfc_fops, 0666, },
2033         { "mss", &spufs_mss_fops, 0666, },
2034         { "npc", &spufs_npc_ops, 0666, },
2035         { "srr0", &spufs_srr0_ops, 0666, },
2036         { "decr", &spufs_decr_ops, 0666, },
2037         { "decr_status", &spufs_decr_status_ops, 0666, },
2038         { "event_mask", &spufs_event_mask_ops, 0666, },
2039         { "event_status", &spufs_event_status_ops, 0444, },
2040         { "psmap", &spufs_psmap_fops, 0666, },
2041         { "phys-id", &spufs_id_ops, 0666, },
2042         { "object-id", &spufs_object_id_ops, 0666, },
2043         { "mbox_info", &spufs_mbox_info_fops, 0444, },
2044         { "ibox_info", &spufs_ibox_info_fops, 0444, },
2045         { "wbox_info", &spufs_wbox_info_fops, 0444, },
2046         { "dma_info", &spufs_dma_info_fops, 0444, },
2047         { "proxydma_info", &spufs_proxydma_info_fops, 0444, },
2048         {},
2049 };
2050
2051 struct tree_descr spufs_dir_nosched_contents[] = {
2052         { "mem",  &spufs_mem_fops,  0666, },
2053         { "mbox", &spufs_mbox_fops, 0444, },
2054         { "ibox", &spufs_ibox_fops, 0444, },
2055         { "wbox", &spufs_wbox_fops, 0222, },
2056         { "mbox_stat", &spufs_mbox_stat_fops, 0444, },
2057         { "ibox_stat", &spufs_ibox_stat_fops, 0444, },
2058         { "wbox_stat", &spufs_wbox_stat_fops, 0444, },
2059         { "signal1", &spufs_signal1_fops, 0666, },
2060         { "signal2", &spufs_signal2_fops, 0666, },
2061         { "signal1_type", &spufs_signal1_type, 0666, },
2062         { "signal2_type", &spufs_signal2_type, 0666, },
2063         { "mss", &spufs_mss_fops, 0666, },
2064         { "mfc", &spufs_mfc_fops, 0666, },
2065         { "cntl", &spufs_cntl_fops,  0666, },
2066         { "npc", &spufs_npc_ops, 0666, },
2067         { "psmap", &spufs_psmap_fops, 0666, },
2068         { "phys-id", &spufs_id_ops, 0666, },
2069         { "object-id", &spufs_object_id_ops, 0666, },
2070         {},
2071 };
2072
2073 struct spufs_coredump_reader spufs_coredump_read[] = {
2074         { "regs", __spufs_regs_read, NULL, 128 * 16 },
2075         { "fpcr", __spufs_fpcr_read, NULL, 16 },
2076         { "lslr", NULL, __spufs_lslr_get, 11 },
2077         { "decr", NULL, __spufs_decr_get, 11 },
2078         { "decr_status", NULL, __spufs_decr_status_get, 11 },
2079         { "mem", __spufs_mem_read, NULL, 256 * 1024, },
2080         { "signal1", __spufs_signal1_read, NULL, 4 },
2081         { "signal1_type", NULL, __spufs_signal1_type_get, 2 },
2082         { "signal2", __spufs_signal2_read, NULL, 4 },
2083         { "signal2_type", NULL, __spufs_signal2_type_get, 2 },
2084         { "event_mask", NULL, __spufs_event_mask_get, 8 },
2085         { "event_status", NULL, __spufs_event_status_get, 8 },
2086         { "mbox_info", __spufs_mbox_info_read, NULL, 4 },
2087         { "ibox_info", __spufs_ibox_info_read, NULL, 4 },
2088         { "wbox_info", __spufs_wbox_info_read, NULL, 16 },
2089         { "dma_info", __spufs_dma_info_read, NULL, 69 * 8 },
2090         { "proxydma_info", __spufs_proxydma_info_read, NULL, 35 * 8 },
2091         { "object-id", NULL, __spufs_object_id_get, 19 },
2092         { },
2093 };
2094 int spufs_coredump_num_notes = ARRAY_SIZE(spufs_coredump_read) - 1;
2095