2 * spu hypervisor abstraction for direct hardware access.
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 * Copyright 2006 Sony Corp.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #include <linux/interrupt.h>
22 #include <linux/list.h>
23 #include <linux/module.h>
24 #include <linux/ptrace.h>
25 #include <linux/slab.h>
26 #include <linux/wait.h>
29 #include <linux/mutex.h>
30 #include <linux/device.h>
33 #include <asm/spu_priv1.h>
34 #include <asm/firmware.h>
37 #include "interrupt.h"
38 #include "spu_priv1_mmio.h"
42 struct device_node *devnode;
43 struct spu_priv1 __iomem *priv1;
46 static struct spu_pdata *spu_get_pdata(struct spu *spu)
52 struct device_node *spu_devnode(struct spu *spu)
54 return spu_get_pdata(spu)->devnode;
57 EXPORT_SYMBOL_GPL(spu_devnode);
59 static int __init find_spu_node_id(struct device_node *spe)
61 const unsigned int *id;
62 struct device_node *cpu;
63 cpu = spe->parent->parent;
64 id = get_property(cpu, "node-id", NULL);
68 static int __init cell_spuprop_present(struct spu *spu, struct device_node *spe,
71 static DEFINE_MUTEX(add_spumem_mutex);
73 const struct address_prop {
74 unsigned long address;
76 } __attribute__((packed)) *p;
79 unsigned long start_pfn, nr_pages;
80 struct pglist_data *pgdata;
84 p = get_property(spe, prop, &proplen);
85 WARN_ON(proplen != sizeof (*p));
87 start_pfn = p->address >> PAGE_SHIFT;
88 nr_pages = ((unsigned long)p->len + PAGE_SIZE - 1) >> PAGE_SHIFT;
90 pgdata = NODE_DATA(spu_get_pdata(spu)->nid);
91 zone = pgdata->node_zones;
93 /* XXX rethink locking here */
94 mutex_lock(&add_spumem_mutex);
95 ret = __add_pages(zone, start_pfn, nr_pages);
96 mutex_unlock(&add_spumem_mutex);
101 static void __iomem * __init map_spe_prop(struct spu *spu,
102 struct device_node *n, const char *name)
104 const struct address_prop {
105 unsigned long address;
107 } __attribute__((packed)) *prop;
111 void __iomem *ret = NULL;
114 p = get_property(n, name, &proplen);
115 if (proplen != sizeof (struct address_prop))
120 err = cell_spuprop_present(spu, n, name);
121 if (err && (err != -EEXIST))
124 ret = ioremap(prop->address, prop->len);
130 static void spu_unmap(struct spu *spu)
133 iounmap(spu_get_pdata(spu)->priv1);
134 iounmap(spu->problem);
135 iounmap((__force u8 __iomem *)spu->local_store);
138 static int __init spu_map_interrupts_old(struct spu *spu,
139 struct device_node *np)
144 /* Get the interrupt source unit from the device-tree */
145 tmp = get_property(np, "isrc", NULL);
150 /* Add the node number */
151 isrc |= spu->node << IIC_IRQ_NODE_SHIFT;
153 /* Now map interrupts of all 3 classes */
154 spu->irqs[0] = irq_create_mapping(NULL, IIC_IRQ_CLASS_0 | isrc);
155 spu->irqs[1] = irq_create_mapping(NULL, IIC_IRQ_CLASS_1 | isrc);
156 spu->irqs[2] = irq_create_mapping(NULL, IIC_IRQ_CLASS_2 | isrc);
158 /* Right now, we only fail if class 2 failed */
159 return spu->irqs[2] == NO_IRQ ? -EINVAL : 0;
162 static int __init spu_map_device_old(struct spu *spu, struct device_node *node)
168 spu->name = get_property(node, "name", NULL);
172 prop = get_property(node, "local-store", NULL);
175 spu->local_store_phys = *(unsigned long *)prop;
177 /* we use local store as ram, not io memory */
178 spu->local_store = (void __force *)
179 map_spe_prop(spu, node, "local-store");
180 if (!spu->local_store)
183 prop = get_property(node, "problem", NULL);
186 spu->problem_phys = *(unsigned long *)prop;
188 spu->problem= map_spe_prop(spu, node, "problem");
192 spu_get_pdata(spu)->priv1= map_spe_prop(spu, node, "priv1");
194 spu->priv2= map_spe_prop(spu, node, "priv2");
206 static int __init spu_map_interrupts(struct spu *spu, struct device_node *np)
212 for (i=0; i < 3; i++) {
213 ret = of_irq_map_one(np, i, &oirq);
215 pr_debug("spu_new: failed to get irq %d\n", i);
219 pr_debug(" irq %d no 0x%x on %s\n", i, oirq.specifier[0],
220 oirq.controller->full_name);
221 spu->irqs[i] = irq_create_of_mapping(oirq.controller,
222 oirq.specifier, oirq.size);
223 if (spu->irqs[i] == NO_IRQ) {
224 pr_debug("spu_new: failed to map it !\n");
231 pr_debug("failed to map irq %x for spu %s\n", *oirq.specifier,
233 for (; i >= 0; i--) {
234 if (spu->irqs[i] != NO_IRQ)
235 irq_dispose_mapping(spu->irqs[i]);
240 static int spu_map_resource(struct device_node *node, int nr,
241 void __iomem** virt, unsigned long *phys)
243 struct resource resource = { };
246 ret = of_address_to_resource(node, nr, &resource);
251 *phys = resource.start;
252 *virt = ioremap(resource.start, resource.end - resource.start);
260 static int __init spu_map_device(struct spu *spu, struct device_node *node)
263 spu->name = get_property(node, "name", NULL);
267 ret = spu_map_resource(node, 0, (void __iomem**)&spu->local_store,
268 &spu->local_store_phys);
270 pr_debug("spu_new: failed to map %s resource 0\n",
274 ret = spu_map_resource(node, 1, (void __iomem**)&spu->problem,
277 pr_debug("spu_new: failed to map %s resource 1\n",
281 ret = spu_map_resource(node, 2, (void __iomem**)&spu->priv2,
284 pr_debug("spu_new: failed to map %s resource 2\n",
288 if (!firmware_has_feature(FW_FEATURE_LPAR))
289 ret = spu_map_resource(node, 3,
290 (void __iomem**)&spu_get_pdata(spu)->priv1, NULL);
292 pr_debug("spu_new: failed to map %s resource 3\n",
296 pr_debug("spu_new: %s maps:\n", node->full_name);
297 pr_debug(" local store : 0x%016lx -> 0x%p\n",
298 spu->local_store_phys, spu->local_store);
299 pr_debug(" problem state : 0x%016lx -> 0x%p\n",
300 spu->problem_phys, spu->problem);
301 pr_debug(" priv2 : 0x%p\n", spu->priv2);
302 pr_debug(" priv1 : 0x%p\n",
303 spu_get_pdata(spu)->priv1);
310 pr_debug("failed to map spe %s: %d\n", spu->name, ret);
314 static int __init of_enumerate_spus(int (*fn)(void *data))
317 struct device_node *node;
320 for (node = of_find_node_by_type(NULL, "spe");
321 node; node = of_find_node_by_type(node, "spe")) {
324 printk(KERN_WARNING "%s: Error initializing %s\n",
325 __FUNCTION__, node->name);
332 static int __init of_create_spu(struct spu *spu, void *data)
335 struct device_node *spe = (struct device_node *)data;
337 spu->pdata = kzalloc(sizeof(struct spu_pdata),
344 spu->node = find_spu_node_id(spe);
345 if (spu->node >= MAX_NUMNODES) {
346 printk(KERN_WARNING "SPE %s on node %d ignored,"
347 " node number too big\n", spe->full_name, spu->node);
348 printk(KERN_WARNING "Check if CONFIG_NUMA is enabled.\n");
353 spu_get_pdata(spu)->nid = of_node_to_nid(spe);
354 if (spu_get_pdata(spu)->nid == -1)
355 spu_get_pdata(spu)->nid = 0;
357 ret = spu_map_device(spu, spe);
360 ret = spu_map_device_old(spu, spe);
364 ret = spu_map_interrupts(spu, spe);
366 ret = spu_map_interrupts_old(spu, spe);
370 spu_get_pdata(spu)->devnode = of_node_get(spe);
372 pr_debug(KERN_DEBUG "Using SPE %s %p %p %p %p %d\n", spu->name,
373 spu->local_store, spu->problem, spu_get_pdata(spu)->priv1,
374 spu->priv2, spu->number);
386 static int of_destroy_spu(struct spu *spu)
389 of_node_put(spu_get_pdata(spu)->devnode);
395 const struct spu_management_ops spu_management_of_ops = {
396 .enumerate_spus = of_enumerate_spus,
397 .create_spu = of_create_spu,
398 .destroy_spu = of_destroy_spu,
401 static void int_mask_and(struct spu *spu, int class, u64 mask)
405 old_mask = in_be64(&spu_get_pdata(spu)->priv1->int_mask_RW[class]);
406 out_be64(&spu_get_pdata(spu)->priv1->int_mask_RW[class],
410 static void int_mask_or(struct spu *spu, int class, u64 mask)
414 old_mask = in_be64(&spu_get_pdata(spu)->priv1->int_mask_RW[class]);
415 out_be64(&spu_get_pdata(spu)->priv1->int_mask_RW[class],
419 static void int_mask_set(struct spu *spu, int class, u64 mask)
421 out_be64(&spu_get_pdata(spu)->priv1->int_mask_RW[class], mask);
424 static u64 int_mask_get(struct spu *spu, int class)
426 return in_be64(&spu_get_pdata(spu)->priv1->int_mask_RW[class]);
429 static void int_stat_clear(struct spu *spu, int class, u64 stat)
431 out_be64(&spu_get_pdata(spu)->priv1->int_stat_RW[class], stat);
434 static u64 int_stat_get(struct spu *spu, int class)
436 return in_be64(&spu_get_pdata(spu)->priv1->int_stat_RW[class]);
439 static void cpu_affinity_set(struct spu *spu, int cpu)
441 u64 target = iic_get_target_id(cpu);
442 u64 route = target << 48 | target << 32 | target << 16;
443 out_be64(&spu_get_pdata(spu)->priv1->int_route_RW, route);
446 static u64 mfc_dar_get(struct spu *spu)
448 return in_be64(&spu_get_pdata(spu)->priv1->mfc_dar_RW);
451 static u64 mfc_dsisr_get(struct spu *spu)
453 return in_be64(&spu_get_pdata(spu)->priv1->mfc_dsisr_RW);
456 static void mfc_dsisr_set(struct spu *spu, u64 dsisr)
458 out_be64(&spu_get_pdata(spu)->priv1->mfc_dsisr_RW, dsisr);
461 static void mfc_sdr_setup(struct spu *spu)
463 out_be64(&spu_get_pdata(spu)->priv1->mfc_sdr_RW, mfspr(SPRN_SDR1));
466 static void mfc_sr1_set(struct spu *spu, u64 sr1)
468 out_be64(&spu_get_pdata(spu)->priv1->mfc_sr1_RW, sr1);
471 static u64 mfc_sr1_get(struct spu *spu)
473 return in_be64(&spu_get_pdata(spu)->priv1->mfc_sr1_RW);
476 static void mfc_tclass_id_set(struct spu *spu, u64 tclass_id)
478 out_be64(&spu_get_pdata(spu)->priv1->mfc_tclass_id_RW, tclass_id);
481 static u64 mfc_tclass_id_get(struct spu *spu)
483 return in_be64(&spu_get_pdata(spu)->priv1->mfc_tclass_id_RW);
486 static void tlb_invalidate(struct spu *spu)
488 out_be64(&spu_get_pdata(spu)->priv1->tlb_invalidate_entry_W, 0ul);
491 static void resource_allocation_groupID_set(struct spu *spu, u64 id)
493 out_be64(&spu_get_pdata(spu)->priv1->resource_allocation_groupID_RW,
497 static u64 resource_allocation_groupID_get(struct spu *spu)
500 &spu_get_pdata(spu)->priv1->resource_allocation_groupID_RW);
503 static void resource_allocation_enable_set(struct spu *spu, u64 enable)
505 out_be64(&spu_get_pdata(spu)->priv1->resource_allocation_enable_RW,
509 static u64 resource_allocation_enable_get(struct spu *spu)
512 &spu_get_pdata(spu)->priv1->resource_allocation_enable_RW);
515 const struct spu_priv1_ops spu_priv1_mmio_ops =
517 .int_mask_and = int_mask_and,
518 .int_mask_or = int_mask_or,
519 .int_mask_set = int_mask_set,
520 .int_mask_get = int_mask_get,
521 .int_stat_clear = int_stat_clear,
522 .int_stat_get = int_stat_get,
523 .cpu_affinity_set = cpu_affinity_set,
524 .mfc_dar_get = mfc_dar_get,
525 .mfc_dsisr_get = mfc_dsisr_get,
526 .mfc_dsisr_set = mfc_dsisr_set,
527 .mfc_sdr_setup = mfc_sdr_setup,
528 .mfc_sr1_set = mfc_sr1_set,
529 .mfc_sr1_get = mfc_sr1_get,
530 .mfc_tclass_id_set = mfc_tclass_id_set,
531 .mfc_tclass_id_get = mfc_tclass_id_get,
532 .tlb_invalidate = tlb_invalidate,
533 .resource_allocation_groupID_set = resource_allocation_groupID_set,
534 .resource_allocation_groupID_get = resource_allocation_groupID_get,
535 .resource_allocation_enable_set = resource_allocation_enable_set,
536 .resource_allocation_enable_get = resource_allocation_enable_get,