Merge branch 'master' into for_mm
[linux-2.6] / arch / powerpc / platforms / cell / spu_manage.c
1 /*
2  * spu management operations for of based platforms
3  *
4  * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5  * Copyright 2006 Sony Corp.
6  * (C) Copyright 2007 TOSHIBA CORPORATION
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; version 2 of the License.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License along
18  * with this program; if not, write to the Free Software Foundation, Inc.,
19  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20  */
21
22 #include <linux/interrupt.h>
23 #include <linux/list.h>
24 #include <linux/module.h>
25 #include <linux/ptrace.h>
26 #include <linux/slab.h>
27 #include <linux/wait.h>
28 #include <linux/mm.h>
29 #include <linux/io.h>
30 #include <linux/mutex.h>
31 #include <linux/device.h>
32
33 #include <asm/spu.h>
34 #include <asm/spu_priv1.h>
35 #include <asm/firmware.h>
36 #include <asm/prom.h>
37
38 #include "spufs/spufs.h"
39 #include "interrupt.h"
40
41 struct device_node *spu_devnode(struct spu *spu)
42 {
43         return spu->devnode;
44 }
45
46 EXPORT_SYMBOL_GPL(spu_devnode);
47
48 static u64 __init find_spu_unit_number(struct device_node *spe)
49 {
50         const unsigned int *prop;
51         int proplen;
52
53         /* new device trees should provide the physical-id attribute */
54         prop = of_get_property(spe, "physical-id", &proplen);
55         if (proplen == 4)
56                 return (u64)*prop;
57
58         /* celleb device tree provides the unit-id */
59         prop = of_get_property(spe, "unit-id", &proplen);
60         if (proplen == 4)
61                 return (u64)*prop;
62
63         /* legacy device trees provide the id in the reg attribute */
64         prop = of_get_property(spe, "reg", &proplen);
65         if (proplen == 4)
66                 return (u64)*prop;
67
68         return 0;
69 }
70
71 static void spu_unmap(struct spu *spu)
72 {
73         if (!firmware_has_feature(FW_FEATURE_LPAR))
74                 iounmap(spu->priv1);
75         iounmap(spu->priv2);
76         iounmap(spu->problem);
77         iounmap((__force u8 __iomem *)spu->local_store);
78 }
79
80 static int __init spu_map_interrupts_old(struct spu *spu,
81         struct device_node *np)
82 {
83         unsigned int isrc;
84         const u32 *tmp;
85         int nid;
86
87         /* Get the interrupt source unit from the device-tree */
88         tmp = of_get_property(np, "isrc", NULL);
89         if (!tmp)
90                 return -ENODEV;
91         isrc = tmp[0];
92
93         tmp = of_get_property(np->parent->parent, "node-id", NULL);
94         if (!tmp) {
95                 printk(KERN_WARNING "%s: can't find node-id\n", __func__);
96                 nid = spu->node;
97         } else
98                 nid = tmp[0];
99
100         /* Add the node number */
101         isrc |= nid << IIC_IRQ_NODE_SHIFT;
102
103         /* Now map interrupts of all 3 classes */
104         spu->irqs[0] = irq_create_mapping(NULL, IIC_IRQ_CLASS_0 | isrc);
105         spu->irqs[1] = irq_create_mapping(NULL, IIC_IRQ_CLASS_1 | isrc);
106         spu->irqs[2] = irq_create_mapping(NULL, IIC_IRQ_CLASS_2 | isrc);
107
108         /* Right now, we only fail if class 2 failed */
109         return spu->irqs[2] == NO_IRQ ? -EINVAL : 0;
110 }
111
112 static void __iomem * __init spu_map_prop_old(struct spu *spu,
113                                               struct device_node *n,
114                                               const char *name)
115 {
116         const struct address_prop {
117                 unsigned long address;
118                 unsigned int len;
119         } __attribute__((packed)) *prop;
120         int proplen;
121
122         prop = of_get_property(n, name, &proplen);
123         if (prop == NULL || proplen != sizeof (struct address_prop))
124                 return NULL;
125
126         return ioremap(prop->address, prop->len);
127 }
128
129 static int __init spu_map_device_old(struct spu *spu)
130 {
131         struct device_node *node = spu->devnode;
132         const char *prop;
133         int ret;
134
135         ret = -ENODEV;
136         spu->name = of_get_property(node, "name", NULL);
137         if (!spu->name)
138                 goto out;
139
140         prop = of_get_property(node, "local-store", NULL);
141         if (!prop)
142                 goto out;
143         spu->local_store_phys = *(unsigned long *)prop;
144
145         /* we use local store as ram, not io memory */
146         spu->local_store = (void __force *)
147                 spu_map_prop_old(spu, node, "local-store");
148         if (!spu->local_store)
149                 goto out;
150
151         prop = of_get_property(node, "problem", NULL);
152         if (!prop)
153                 goto out_unmap;
154         spu->problem_phys = *(unsigned long *)prop;
155
156         spu->problem = spu_map_prop_old(spu, node, "problem");
157         if (!spu->problem)
158                 goto out_unmap;
159
160         spu->priv2 = spu_map_prop_old(spu, node, "priv2");
161         if (!spu->priv2)
162                 goto out_unmap;
163
164         if (!firmware_has_feature(FW_FEATURE_LPAR)) {
165                 spu->priv1 = spu_map_prop_old(spu, node, "priv1");
166                 if (!spu->priv1)
167                         goto out_unmap;
168         }
169
170         ret = 0;
171         goto out;
172
173 out_unmap:
174         spu_unmap(spu);
175 out:
176         return ret;
177 }
178
179 static int __init spu_map_interrupts(struct spu *spu, struct device_node *np)
180 {
181         struct of_irq oirq;
182         int ret;
183         int i;
184
185         for (i=0; i < 3; i++) {
186                 ret = of_irq_map_one(np, i, &oirq);
187                 if (ret) {
188                         pr_debug("spu_new: failed to get irq %d\n", i);
189                         goto err;
190                 }
191                 ret = -EINVAL;
192                 pr_debug("  irq %d no 0x%x on %s\n", i, oirq.specifier[0],
193                          oirq.controller->full_name);
194                 spu->irqs[i] = irq_create_of_mapping(oirq.controller,
195                                         oirq.specifier, oirq.size);
196                 if (spu->irqs[i] == NO_IRQ) {
197                         pr_debug("spu_new: failed to map it !\n");
198                         goto err;
199                 }
200         }
201         return 0;
202
203 err:
204         pr_debug("failed to map irq %x for spu %s\n", *oirq.specifier,
205                 spu->name);
206         for (; i >= 0; i--) {
207                 if (spu->irqs[i] != NO_IRQ)
208                         irq_dispose_mapping(spu->irqs[i]);
209         }
210         return ret;
211 }
212
213 static int spu_map_resource(struct spu *spu, int nr,
214                             void __iomem** virt, unsigned long *phys)
215 {
216         struct device_node *np = spu->devnode;
217         struct resource resource = { };
218         unsigned long len;
219         int ret;
220
221         ret = of_address_to_resource(np, nr, &resource);
222         if (ret)
223                 return ret;
224         if (phys)
225                 *phys = resource.start;
226         len = resource.end - resource.start + 1;
227         *virt = ioremap(resource.start, len);
228         if (!*virt)
229                 return -EINVAL;
230         return 0;
231 }
232
233 static int __init spu_map_device(struct spu *spu)
234 {
235         struct device_node *np = spu->devnode;
236         int ret = -ENODEV;
237
238         spu->name = of_get_property(np, "name", NULL);
239         if (!spu->name)
240                 goto out;
241
242         ret = spu_map_resource(spu, 0, (void __iomem**)&spu->local_store,
243                                &spu->local_store_phys);
244         if (ret) {
245                 pr_debug("spu_new: failed to map %s resource 0\n",
246                          np->full_name);
247                 goto out;
248         }
249         ret = spu_map_resource(spu, 1, (void __iomem**)&spu->problem,
250                                &spu->problem_phys);
251         if (ret) {
252                 pr_debug("spu_new: failed to map %s resource 1\n",
253                          np->full_name);
254                 goto out_unmap;
255         }
256         ret = spu_map_resource(spu, 2, (void __iomem**)&spu->priv2, NULL);
257         if (ret) {
258                 pr_debug("spu_new: failed to map %s resource 2\n",
259                          np->full_name);
260                 goto out_unmap;
261         }
262         if (!firmware_has_feature(FW_FEATURE_LPAR))
263                 ret = spu_map_resource(spu, 3,
264                                (void __iomem**)&spu->priv1, NULL);
265         if (ret) {
266                 pr_debug("spu_new: failed to map %s resource 3\n",
267                          np->full_name);
268                 goto out_unmap;
269         }
270         pr_debug("spu_new: %s maps:\n", np->full_name);
271         pr_debug("  local store   : 0x%016lx -> 0x%p\n",
272                  spu->local_store_phys, spu->local_store);
273         pr_debug("  problem state : 0x%016lx -> 0x%p\n",
274                  spu->problem_phys, spu->problem);
275         pr_debug("  priv2         :                       0x%p\n", spu->priv2);
276         pr_debug("  priv1         :                       0x%p\n", spu->priv1);
277
278         return 0;
279
280 out_unmap:
281         spu_unmap(spu);
282 out:
283         pr_debug("failed to map spe %s: %d\n", spu->name, ret);
284         return ret;
285 }
286
287 static int __init of_enumerate_spus(int (*fn)(void *data))
288 {
289         int ret;
290         struct device_node *node;
291         unsigned int n = 0;
292
293         ret = -ENODEV;
294         for (node = of_find_node_by_type(NULL, "spe");
295                         node; node = of_find_node_by_type(node, "spe")) {
296                 ret = fn(node);
297                 if (ret) {
298                         printk(KERN_WARNING "%s: Error initializing %s\n",
299                                 __func__, node->name);
300                         break;
301                 }
302                 n++;
303         }
304         return ret ? ret : n;
305 }
306
307 static int __init of_create_spu(struct spu *spu, void *data)
308 {
309         int ret;
310         struct device_node *spe = (struct device_node *)data;
311         static int legacy_map = 0, legacy_irq = 0;
312
313         spu->devnode = of_node_get(spe);
314         spu->spe_id = find_spu_unit_number(spe);
315
316         spu->node = of_node_to_nid(spe);
317         if (spu->node >= MAX_NUMNODES) {
318                 printk(KERN_WARNING "SPE %s on node %d ignored,"
319                        " node number too big\n", spe->full_name, spu->node);
320                 printk(KERN_WARNING "Check if CONFIG_NUMA is enabled.\n");
321                 ret = -ENODEV;
322                 goto out;
323         }
324
325         ret = spu_map_device(spu);
326         if (ret) {
327                 if (!legacy_map) {
328                         legacy_map = 1;
329                         printk(KERN_WARNING "%s: Legacy device tree found, "
330                                 "trying to map old style\n", __func__);
331                 }
332                 ret = spu_map_device_old(spu);
333                 if (ret) {
334                         printk(KERN_ERR "Unable to map %s\n",
335                                 spu->name);
336                         goto out;
337                 }
338         }
339
340         ret = spu_map_interrupts(spu, spe);
341         if (ret) {
342                 if (!legacy_irq) {
343                         legacy_irq = 1;
344                         printk(KERN_WARNING "%s: Legacy device tree found, "
345                                 "trying old style irq\n", __func__);
346                 }
347                 ret = spu_map_interrupts_old(spu, spe);
348                 if (ret) {
349                         printk(KERN_ERR "%s: could not map interrupts\n",
350                                 spu->name);
351                         goto out_unmap;
352                 }
353         }
354
355         pr_debug("Using SPE %s %p %p %p %p %d\n", spu->name,
356                 spu->local_store, spu->problem, spu->priv1,
357                 spu->priv2, spu->number);
358         goto out;
359
360 out_unmap:
361         spu_unmap(spu);
362 out:
363         return ret;
364 }
365
366 static int of_destroy_spu(struct spu *spu)
367 {
368         spu_unmap(spu);
369         of_node_put(spu->devnode);
370         return 0;
371 }
372
373 static void enable_spu_by_master_run(struct spu_context *ctx)
374 {
375         ctx->ops->master_start(ctx);
376 }
377
378 static void disable_spu_by_master_run(struct spu_context *ctx)
379 {
380         ctx->ops->master_stop(ctx);
381 }
382
383 /* Hardcoded affinity idxs for qs20 */
384 #define QS20_SPES_PER_BE 8
385 static int qs20_reg_idxs[QS20_SPES_PER_BE] =   { 0, 2, 4, 6, 7, 5, 3, 1 };
386 static int qs20_reg_memory[QS20_SPES_PER_BE] = { 1, 1, 0, 0, 0, 0, 0, 0 };
387
388 static struct spu *spu_lookup_reg(int node, u32 reg)
389 {
390         struct spu *spu;
391         const u32 *spu_reg;
392
393         list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
394                 spu_reg = of_get_property(spu_devnode(spu), "reg", NULL);
395                 if (*spu_reg == reg)
396                         return spu;
397         }
398         return NULL;
399 }
400
401 static void init_affinity_qs20_harcoded(void)
402 {
403         int node, i;
404         struct spu *last_spu, *spu;
405         u32 reg;
406
407         for (node = 0; node < MAX_NUMNODES; node++) {
408                 last_spu = NULL;
409                 for (i = 0; i < QS20_SPES_PER_BE; i++) {
410                         reg = qs20_reg_idxs[i];
411                         spu = spu_lookup_reg(node, reg);
412                         if (!spu)
413                                 continue;
414                         spu->has_mem_affinity = qs20_reg_memory[reg];
415                         if (last_spu)
416                                 list_add_tail(&spu->aff_list,
417                                                 &last_spu->aff_list);
418                         last_spu = spu;
419                 }
420         }
421 }
422
423 static int of_has_vicinity(void)
424 {
425         struct device_node *dn;
426
427         for_each_node_by_type(dn, "spe") {
428                 if (of_find_property(dn, "vicinity", NULL))  {
429                         of_node_put(dn);
430                         return 1;
431                 }
432         }
433         return 0;
434 }
435
436 static struct spu *devnode_spu(int cbe, struct device_node *dn)
437 {
438         struct spu *spu;
439
440         list_for_each_entry(spu, &cbe_spu_info[cbe].spus, cbe_list)
441                 if (spu_devnode(spu) == dn)
442                         return spu;
443         return NULL;
444 }
445
446 static struct spu *
447 neighbour_spu(int cbe, struct device_node *target, struct device_node *avoid)
448 {
449         struct spu *spu;
450         struct device_node *spu_dn;
451         const phandle *vic_handles;
452         int lenp, i;
453
454         list_for_each_entry(spu, &cbe_spu_info[cbe].spus, cbe_list) {
455                 spu_dn = spu_devnode(spu);
456                 if (spu_dn == avoid)
457                         continue;
458                 vic_handles = of_get_property(spu_dn, "vicinity", &lenp);
459                 for (i=0; i < (lenp / sizeof(phandle)); i++) {
460                         if (vic_handles[i] == target->linux_phandle)
461                                 return spu;
462                 }
463         }
464         return NULL;
465 }
466
467 static void init_affinity_node(int cbe)
468 {
469         struct spu *spu, *last_spu;
470         struct device_node *vic_dn, *last_spu_dn;
471         phandle avoid_ph;
472         const phandle *vic_handles;
473         const char *name;
474         int lenp, i, added;
475
476         last_spu = list_first_entry(&cbe_spu_info[cbe].spus, struct spu,
477                                                                 cbe_list);
478         avoid_ph = 0;
479         for (added = 1; added < cbe_spu_info[cbe].n_spus; added++) {
480                 last_spu_dn = spu_devnode(last_spu);
481                 vic_handles = of_get_property(last_spu_dn, "vicinity", &lenp);
482
483                 /*
484                  * Walk through each phandle in vicinity property of the spu
485                  * (tipically two vicinity phandles per spe node)
486                  */
487                 for (i = 0; i < (lenp / sizeof(phandle)); i++) {
488                         if (vic_handles[i] == avoid_ph)
489                                 continue;
490
491                         vic_dn = of_find_node_by_phandle(vic_handles[i]);
492                         if (!vic_dn)
493                                 continue;
494
495                         /* a neighbour might be spe, mic-tm, or bif0 */
496                         name = of_get_property(vic_dn, "name", NULL);
497                         if (!name)
498                                 continue;
499
500                         if (strcmp(name, "spe") == 0) {
501                                 spu = devnode_spu(cbe, vic_dn);
502                                 avoid_ph = last_spu_dn->linux_phandle;
503                         } else {
504                                 /*
505                                  * "mic-tm" and "bif0" nodes do not have
506                                  * vicinity property. So we need to find the
507                                  * spe which has vic_dn as neighbour, but
508                                  * skipping the one we came from (last_spu_dn)
509                                  */
510                                 spu = neighbour_spu(cbe, vic_dn, last_spu_dn);
511                                 if (!spu)
512                                         continue;
513                                 if (!strcmp(name, "mic-tm")) {
514                                         last_spu->has_mem_affinity = 1;
515                                         spu->has_mem_affinity = 1;
516                                 }
517                                 avoid_ph = vic_dn->linux_phandle;
518                         }
519
520                         list_add_tail(&spu->aff_list, &last_spu->aff_list);
521                         last_spu = spu;
522                         break;
523                 }
524         }
525 }
526
527 static void init_affinity_fw(void)
528 {
529         int cbe;
530
531         for (cbe = 0; cbe < MAX_NUMNODES; cbe++)
532                 init_affinity_node(cbe);
533 }
534
535 static int __init init_affinity(void)
536 {
537         if (of_has_vicinity()) {
538                 init_affinity_fw();
539         } else {
540                 long root = of_get_flat_dt_root();
541                 if (of_flat_dt_is_compatible(root, "IBM,CPBW-1.0"))
542                         init_affinity_qs20_harcoded();
543                 else
544                         printk("No affinity configuration found\n");
545         }
546
547         return 0;
548 }
549
550 const struct spu_management_ops spu_management_of_ops = {
551         .enumerate_spus = of_enumerate_spus,
552         .create_spu = of_create_spu,
553         .destroy_spu = of_destroy_spu,
554         .enable_spu = enable_spu_by_master_run,
555         .disable_spu = disable_spu_by_master_run,
556         .init_affinity = init_affinity,
557 };