2 * Low-level SPU handling
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #include <linux/interrupt.h>
26 #include <linux/list.h>
27 #include <linux/module.h>
28 #include <linux/ptrace.h>
29 #include <linux/slab.h>
30 #include <linux/wait.h>
33 #include <linux/mutex.h>
35 #include <asm/spu_priv1.h>
38 const struct spu_management_ops *spu_management_ops;
39 const struct spu_priv1_ops *spu_priv1_ops;
41 static struct list_head spu_list[MAX_NUMNODES];
42 static LIST_HEAD(spu_full_list);
43 static DEFINE_MUTEX(spu_mutex);
44 static spinlock_t spu_list_lock = SPIN_LOCK_UNLOCKED;
46 EXPORT_SYMBOL_GPL(spu_priv1_ops);
48 void spu_invalidate_slbs(struct spu *spu)
50 struct spu_priv2 __iomem *priv2 = spu->priv2;
52 if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK)
53 out_be64(&priv2->slb_invalidate_all_W, 0UL);
55 EXPORT_SYMBOL_GPL(spu_invalidate_slbs);
57 /* This is called by the MM core when a segment size is changed, to
58 * request a flush of all the SPEs using a given mm
60 void spu_flush_all_slbs(struct mm_struct *mm)
65 spin_lock_irqsave(&spu_list_lock, flags);
66 list_for_each_entry(spu, &spu_full_list, full_list) {
68 spu_invalidate_slbs(spu);
70 spin_unlock_irqrestore(&spu_list_lock, flags);
73 /* The hack below stinks... try to do something better one of
74 * these days... Does it even work properly with NR_CPUS == 1 ?
76 static inline void mm_needs_global_tlbie(struct mm_struct *mm)
78 int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1;
80 /* Global TLBIE broadcast required with SPEs. */
81 __cpus_setall(&mm->cpu_vm_mask, nr);
84 void spu_associate_mm(struct spu *spu, struct mm_struct *mm)
88 spin_lock_irqsave(&spu_list_lock, flags);
90 spin_unlock_irqrestore(&spu_list_lock, flags);
92 mm_needs_global_tlbie(mm);
94 EXPORT_SYMBOL_GPL(spu_associate_mm);
96 static int __spu_trap_invalid_dma(struct spu *spu)
98 pr_debug("%s\n", __FUNCTION__);
99 spu->dma_callback(spu, SPE_EVENT_INVALID_DMA);
103 static int __spu_trap_dma_align(struct spu *spu)
105 pr_debug("%s\n", __FUNCTION__);
106 spu->dma_callback(spu, SPE_EVENT_DMA_ALIGNMENT);
110 static int __spu_trap_error(struct spu *spu)
112 pr_debug("%s\n", __FUNCTION__);
113 spu->dma_callback(spu, SPE_EVENT_SPE_ERROR);
117 static void spu_restart_dma(struct spu *spu)
119 struct spu_priv2 __iomem *priv2 = spu->priv2;
121 if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags))
122 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
125 static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
127 struct spu_priv2 __iomem *priv2 = spu->priv2;
128 struct mm_struct *mm = spu->mm;
132 pr_debug("%s\n", __FUNCTION__);
134 if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) {
135 /* SLBs are pre-loaded for context switch, so
136 * we should never get here!
138 printk("%s: invalid access during switch!\n", __func__);
141 esid = (ea & ESID_MASK) | SLB_ESID_V;
143 switch(REGION_ID(ea)) {
145 #ifdef CONFIG_HUGETLB_PAGE
146 if (in_hugepage_area(mm->context, ea))
147 psize = mmu_huge_psize;
150 psize = mm->context.user_psize;
151 vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) |
154 case VMALLOC_REGION_ID:
155 if (ea < VMALLOC_END)
156 psize = mmu_vmalloc_psize;
158 psize = mmu_io_psize;
159 vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) |
162 case KERNEL_REGION_ID:
163 psize = mmu_linear_psize;
164 vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) |
168 /* Future: support kernel segments so that drivers
171 pr_debug("invalid region access at %016lx\n", ea);
174 llp = mmu_psize_defs[psize].sllp;
176 out_be64(&priv2->slb_index_W, spu->slb_replace);
177 out_be64(&priv2->slb_vsid_RW, vsid | llp);
178 out_be64(&priv2->slb_esid_RW, esid);
181 if (spu->slb_replace >= 8)
182 spu->slb_replace = 0;
184 spu_restart_dma(spu);
189 extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX
190 static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
192 pr_debug("%s, %lx, %lx\n", __FUNCTION__, dsisr, ea);
194 /* Handle kernel space hash faults immediately.
195 User hash faults need to be deferred to process context. */
196 if ((dsisr & MFC_DSISR_PTE_NOT_FOUND)
197 && REGION_ID(ea) != USER_REGION_ID
198 && hash_page(ea, _PAGE_PRESENT, 0x300) == 0) {
199 spu_restart_dma(spu);
203 if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) {
204 printk("%s: invalid access during switch!\n", __func__);
211 spu->stop_callback(spu);
216 spu_irq_class_0(int irq, void *data)
221 spu->class_0_pending = 1;
222 spu->stop_callback(spu);
228 spu_irq_class_0_bottom(struct spu *spu)
230 unsigned long stat, mask;
233 spu->class_0_pending = 0;
235 spin_lock_irqsave(&spu->register_lock, flags);
236 mask = spu_int_mask_get(spu, 0);
237 stat = spu_int_stat_get(spu, 0);
241 if (stat & 1) /* invalid DMA alignment */
242 __spu_trap_dma_align(spu);
244 if (stat & 2) /* invalid MFC DMA */
245 __spu_trap_invalid_dma(spu);
247 if (stat & 4) /* error on SPU */
248 __spu_trap_error(spu);
250 spu_int_stat_clear(spu, 0, stat);
251 spin_unlock_irqrestore(&spu->register_lock, flags);
253 return (stat & 0x7) ? -EIO : 0;
255 EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom);
258 spu_irq_class_1(int irq, void *data)
261 unsigned long stat, mask, dar, dsisr;
265 /* atomically read & clear class1 status. */
266 spin_lock(&spu->register_lock);
267 mask = spu_int_mask_get(spu, 1);
268 stat = spu_int_stat_get(spu, 1) & mask;
269 dar = spu_mfc_dar_get(spu);
270 dsisr = spu_mfc_dsisr_get(spu);
271 if (stat & 2) /* mapping fault */
272 spu_mfc_dsisr_set(spu, 0ul);
273 spu_int_stat_clear(spu, 1, stat);
274 spin_unlock(&spu->register_lock);
275 pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat,
278 if (stat & 1) /* segment fault */
279 __spu_trap_data_seg(spu, dar);
281 if (stat & 2) { /* mapping fault */
282 __spu_trap_data_map(spu, dar, dsisr);
285 if (stat & 4) /* ls compare & suspend on get */
288 if (stat & 8) /* ls compare & suspend on put */
291 return stat ? IRQ_HANDLED : IRQ_NONE;
293 EXPORT_SYMBOL_GPL(spu_irq_class_1_bottom);
296 spu_irq_class_2(int irq, void *data)
303 spin_lock(&spu->register_lock);
304 stat = spu_int_stat_get(spu, 2);
305 mask = spu_int_mask_get(spu, 2);
306 /* ignore interrupts we're not waiting for */
309 * mailbox interrupts (0x1 and 0x10) are level triggered.
310 * mask them now before acknowledging.
313 spu_int_mask_and(spu, 2, ~(stat & 0x11));
314 /* acknowledge all interrupts before the callbacks */
315 spu_int_stat_clear(spu, 2, stat);
316 spin_unlock(&spu->register_lock);
318 pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask);
320 if (stat & 1) /* PPC core mailbox */
321 spu->ibox_callback(spu);
323 if (stat & 2) /* SPU stop-and-signal */
324 spu->stop_callback(spu);
326 if (stat & 4) /* SPU halted */
327 spu->stop_callback(spu);
329 if (stat & 8) /* DMA tag group complete */
330 spu->mfc_callback(spu);
332 if (stat & 0x10) /* SPU mailbox threshold */
333 spu->wbox_callback(spu);
335 return stat ? IRQ_HANDLED : IRQ_NONE;
338 static int spu_request_irqs(struct spu *spu)
342 if (spu->irqs[0] != NO_IRQ) {
343 snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0",
345 ret = request_irq(spu->irqs[0], spu_irq_class_0,
351 if (spu->irqs[1] != NO_IRQ) {
352 snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1",
354 ret = request_irq(spu->irqs[1], spu_irq_class_1,
360 if (spu->irqs[2] != NO_IRQ) {
361 snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2",
363 ret = request_irq(spu->irqs[2], spu_irq_class_2,
372 if (spu->irqs[1] != NO_IRQ)
373 free_irq(spu->irqs[1], spu);
375 if (spu->irqs[0] != NO_IRQ)
376 free_irq(spu->irqs[0], spu);
381 static void spu_free_irqs(struct spu *spu)
383 if (spu->irqs[0] != NO_IRQ)
384 free_irq(spu->irqs[0], spu);
385 if (spu->irqs[1] != NO_IRQ)
386 free_irq(spu->irqs[1], spu);
387 if (spu->irqs[2] != NO_IRQ)
388 free_irq(spu->irqs[2], spu);
391 static void spu_init_channels(struct spu *spu)
393 static const struct {
397 { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, },
398 { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, },
400 { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, },
401 { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, },
402 { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, },
404 struct spu_priv2 __iomem *priv2;
409 /* initialize all channel data to zero */
410 for (i = 0; i < ARRAY_SIZE(zero_list); i++) {
413 out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel);
414 for (count = 0; count < zero_list[i].count; count++)
415 out_be64(&priv2->spu_chnldata_RW, 0);
418 /* initialize channel counts to meaningful values */
419 for (i = 0; i < ARRAY_SIZE(count_list); i++) {
420 out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel);
421 out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count);
425 struct spu *spu_alloc_node(int node)
427 struct spu *spu = NULL;
429 mutex_lock(&spu_mutex);
430 if (!list_empty(&spu_list[node])) {
431 spu = list_entry(spu_list[node].next, struct spu, list);
432 list_del_init(&spu->list);
433 pr_debug("Got SPU %d %d\n", spu->number, spu->node);
434 spu_init_channels(spu);
436 mutex_unlock(&spu_mutex);
440 EXPORT_SYMBOL_GPL(spu_alloc_node);
442 struct spu *spu_alloc(void)
444 struct spu *spu = NULL;
447 for (node = 0; node < MAX_NUMNODES; node++) {
448 spu = spu_alloc_node(node);
456 void spu_free(struct spu *spu)
458 mutex_lock(&spu_mutex);
459 list_add_tail(&spu->list, &spu_list[spu->node]);
460 mutex_unlock(&spu_mutex);
462 EXPORT_SYMBOL_GPL(spu_free);
464 static int spu_handle_mm_fault(struct spu *spu)
466 struct mm_struct *mm = spu->mm;
467 struct vm_area_struct *vma;
468 u64 ea, dsisr, is_write;
474 if (!IS_VALID_EA(ea)) {
481 if (mm->pgd == NULL) {
485 down_read(&mm->mmap_sem);
486 vma = find_vma(mm, ea);
489 if (vma->vm_start <= ea)
491 if (!(vma->vm_flags & VM_GROWSDOWN))
494 if (expand_stack(vma, ea))
498 is_write = dsisr & MFC_DSISR_ACCESS_PUT;
500 if (!(vma->vm_flags & VM_WRITE))
503 if (dsisr & MFC_DSISR_ACCESS_DENIED)
505 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
509 switch (handle_mm_fault(mm, vma, ea, is_write)) {
516 case VM_FAULT_SIGBUS:
525 up_read(&mm->mmap_sem);
529 up_read(&mm->mmap_sem);
533 int spu_irq_class_1_bottom(struct spu *spu)
535 u64 ea, dsisr, access, error = 0UL;
540 if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)) {
543 access = (_PAGE_PRESENT | _PAGE_USER);
544 access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_RW : 0UL;
545 local_irq_save(flags);
546 if (hash_page(ea, access, 0x300) != 0)
547 error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
548 local_irq_restore(flags);
550 if (error & CLASS1_ENABLE_STORAGE_FAULT_INTR) {
551 if ((ret = spu_handle_mm_fault(spu)) != 0)
552 error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
554 error &= ~CLASS1_ENABLE_STORAGE_FAULT_INTR;
559 spu_restart_dma(spu);
561 spu->dma_callback(spu, SPE_EVENT_SPE_DATA_STORAGE);
566 struct sysdev_class spu_sysdev_class = {
570 int spu_add_sysdev_attr(struct sysdev_attribute *attr)
573 mutex_lock(&spu_mutex);
575 list_for_each_entry(spu, &spu_full_list, full_list)
576 sysdev_create_file(&spu->sysdev, attr);
578 mutex_unlock(&spu_mutex);
581 EXPORT_SYMBOL_GPL(spu_add_sysdev_attr);
583 int spu_add_sysdev_attr_group(struct attribute_group *attrs)
586 mutex_lock(&spu_mutex);
588 list_for_each_entry(spu, &spu_full_list, full_list)
589 sysfs_create_group(&spu->sysdev.kobj, attrs);
591 mutex_unlock(&spu_mutex);
594 EXPORT_SYMBOL_GPL(spu_add_sysdev_attr_group);
597 void spu_remove_sysdev_attr(struct sysdev_attribute *attr)
600 mutex_lock(&spu_mutex);
602 list_for_each_entry(spu, &spu_full_list, full_list)
603 sysdev_remove_file(&spu->sysdev, attr);
605 mutex_unlock(&spu_mutex);
607 EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr);
609 void spu_remove_sysdev_attr_group(struct attribute_group *attrs)
612 mutex_lock(&spu_mutex);
614 list_for_each_entry(spu, &spu_full_list, full_list)
615 sysfs_remove_group(&spu->sysdev.kobj, attrs);
617 mutex_unlock(&spu_mutex);
619 EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr_group);
621 static int spu_create_sysdev(struct spu *spu)
625 spu->sysdev.id = spu->number;
626 spu->sysdev.cls = &spu_sysdev_class;
627 ret = sysdev_register(&spu->sysdev);
629 printk(KERN_ERR "Can't register SPU %d with sysfs\n",
634 sysfs_add_device_to_node(&spu->sysdev, spu->node);
639 static void spu_destroy_sysdev(struct spu *spu)
641 sysfs_remove_device_from_node(&spu->sysdev, spu->node);
642 sysdev_unregister(&spu->sysdev);
645 static int __init create_spu(void *data)
653 spu = kzalloc(sizeof (*spu), GFP_KERNEL);
657 spin_lock_init(&spu->register_lock);
658 mutex_lock(&spu_mutex);
659 spu->number = number++;
660 mutex_unlock(&spu_mutex);
662 ret = spu_create_spu(spu, data);
667 spu_mfc_sdr_setup(spu);
668 spu_mfc_sr1_set(spu, 0x33);
669 ret = spu_request_irqs(spu);
673 ret = spu_create_sysdev(spu);
677 mutex_lock(&spu_mutex);
678 spin_lock_irqsave(&spu_list_lock, flags);
679 list_add(&spu->list, &spu_list[spu->node]);
680 list_add(&spu->full_list, &spu_full_list);
681 spin_unlock_irqrestore(&spu_list_lock, flags);
682 mutex_unlock(&spu_mutex);
689 spu_destroy_spu(spu);
696 static void destroy_spu(struct spu *spu)
698 list_del_init(&spu->list);
699 list_del_init(&spu->full_list);
701 spu_destroy_sysdev(spu);
703 spu_destroy_spu(spu);
707 static void cleanup_spu_base(void)
709 struct spu *spu, *tmp;
712 mutex_lock(&spu_mutex);
713 for (node = 0; node < MAX_NUMNODES; node++) {
714 list_for_each_entry_safe(spu, tmp, &spu_list[node], list)
717 mutex_unlock(&spu_mutex);
718 sysdev_class_unregister(&spu_sysdev_class);
720 module_exit(cleanup_spu_base);
722 static int __init init_spu_base(void)
726 if (!spu_management_ops)
729 /* create sysdev class for spus */
730 ret = sysdev_class_register(&spu_sysdev_class);
734 for (i = 0; i < MAX_NUMNODES; i++)
735 INIT_LIST_HEAD(&spu_list[i]);
737 ret = spu_enumerate_spus(create_spu);
740 printk(KERN_WARNING "%s: Error initializing spus\n",
746 xmon_register_spus(&spu_full_list);
750 module_init(init_spu_base);
752 MODULE_LICENSE("GPL");
753 MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");