Merge ../linus
[linux-2.6] / arch / powerpc / sysdev / qe_lib / qe_ic.c
1 /*
2  * arch/powerpc/sysdev/qe_lib/qe_ic.c
3  *
4  * Copyright (C) 2006 Freescale Semicondutor, Inc.  All rights reserved.
5  *
6  * Author: Li Yang <leoli@freescale.com>
7  * Based on code from Shlomi Gridish <gridish@freescale.com>
8  *
9  * QUICC ENGINE Interrupt Controller
10  *
11  * This program is free software; you can redistribute  it and/or modify it
12  * under  the terms of  the GNU General  Public License as published by the
13  * Free Software Foundation;  either version 2 of the  License, or (at your
14  * option) any later version.
15  */
16
17 #include <linux/kernel.h>
18 #include <linux/init.h>
19 #include <linux/errno.h>
20 #include <linux/reboot.h>
21 #include <linux/slab.h>
22 #include <linux/stddef.h>
23 #include <linux/sched.h>
24 #include <linux/signal.h>
25 #include <linux/sysdev.h>
26 #include <linux/device.h>
27 #include <linux/bootmem.h>
28 #include <linux/spinlock.h>
29 #include <asm/irq.h>
30 #include <asm/io.h>
31 #include <asm/prom.h>
32 #include <asm/qe_ic.h>
33
34 #include "qe_ic.h"
35
36 static DEFINE_SPINLOCK(qe_ic_lock);
37
38 static struct qe_ic_info qe_ic_info[] = {
39         [1] = {
40                .mask = 0x00008000,
41                .mask_reg = QEIC_CIMR,
42                .pri_code = 0,
43                .pri_reg = QEIC_CIPWCC,
44                },
45         [2] = {
46                .mask = 0x00004000,
47                .mask_reg = QEIC_CIMR,
48                .pri_code = 1,
49                .pri_reg = QEIC_CIPWCC,
50                },
51         [3] = {
52                .mask = 0x00002000,
53                .mask_reg = QEIC_CIMR,
54                .pri_code = 2,
55                .pri_reg = QEIC_CIPWCC,
56                },
57         [10] = {
58                 .mask = 0x00000040,
59                 .mask_reg = QEIC_CIMR,
60                 .pri_code = 1,
61                 .pri_reg = QEIC_CIPZCC,
62                 },
63         [11] = {
64                 .mask = 0x00000020,
65                 .mask_reg = QEIC_CIMR,
66                 .pri_code = 2,
67                 .pri_reg = QEIC_CIPZCC,
68                 },
69         [12] = {
70                 .mask = 0x00000010,
71                 .mask_reg = QEIC_CIMR,
72                 .pri_code = 3,
73                 .pri_reg = QEIC_CIPZCC,
74                 },
75         [13] = {
76                 .mask = 0x00000008,
77                 .mask_reg = QEIC_CIMR,
78                 .pri_code = 4,
79                 .pri_reg = QEIC_CIPZCC,
80                 },
81         [14] = {
82                 .mask = 0x00000004,
83                 .mask_reg = QEIC_CIMR,
84                 .pri_code = 5,
85                 .pri_reg = QEIC_CIPZCC,
86                 },
87         [15] = {
88                 .mask = 0x00000002,
89                 .mask_reg = QEIC_CIMR,
90                 .pri_code = 6,
91                 .pri_reg = QEIC_CIPZCC,
92                 },
93         [20] = {
94                 .mask = 0x10000000,
95                 .mask_reg = QEIC_CRIMR,
96                 .pri_code = 3,
97                 .pri_reg = QEIC_CIPRTA,
98                 },
99         [25] = {
100                 .mask = 0x00800000,
101                 .mask_reg = QEIC_CRIMR,
102                 .pri_code = 0,
103                 .pri_reg = QEIC_CIPRTB,
104                 },
105         [26] = {
106                 .mask = 0x00400000,
107                 .mask_reg = QEIC_CRIMR,
108                 .pri_code = 1,
109                 .pri_reg = QEIC_CIPRTB,
110                 },
111         [27] = {
112                 .mask = 0x00200000,
113                 .mask_reg = QEIC_CRIMR,
114                 .pri_code = 2,
115                 .pri_reg = QEIC_CIPRTB,
116                 },
117         [28] = {
118                 .mask = 0x00100000,
119                 .mask_reg = QEIC_CRIMR,
120                 .pri_code = 3,
121                 .pri_reg = QEIC_CIPRTB,
122                 },
123         [32] = {
124                 .mask = 0x80000000,
125                 .mask_reg = QEIC_CIMR,
126                 .pri_code = 0,
127                 .pri_reg = QEIC_CIPXCC,
128                 },
129         [33] = {
130                 .mask = 0x40000000,
131                 .mask_reg = QEIC_CIMR,
132                 .pri_code = 1,
133                 .pri_reg = QEIC_CIPXCC,
134                 },
135         [34] = {
136                 .mask = 0x20000000,
137                 .mask_reg = QEIC_CIMR,
138                 .pri_code = 2,
139                 .pri_reg = QEIC_CIPXCC,
140                 },
141         [35] = {
142                 .mask = 0x10000000,
143                 .mask_reg = QEIC_CIMR,
144                 .pri_code = 3,
145                 .pri_reg = QEIC_CIPXCC,
146                 },
147         [36] = {
148                 .mask = 0x08000000,
149                 .mask_reg = QEIC_CIMR,
150                 .pri_code = 4,
151                 .pri_reg = QEIC_CIPXCC,
152                 },
153         [40] = {
154                 .mask = 0x00800000,
155                 .mask_reg = QEIC_CIMR,
156                 .pri_code = 0,
157                 .pri_reg = QEIC_CIPYCC,
158                 },
159         [41] = {
160                 .mask = 0x00400000,
161                 .mask_reg = QEIC_CIMR,
162                 .pri_code = 1,
163                 .pri_reg = QEIC_CIPYCC,
164                 },
165         [42] = {
166                 .mask = 0x00200000,
167                 .mask_reg = QEIC_CIMR,
168                 .pri_code = 2,
169                 .pri_reg = QEIC_CIPYCC,
170                 },
171         [43] = {
172                 .mask = 0x00100000,
173                 .mask_reg = QEIC_CIMR,
174                 .pri_code = 3,
175                 .pri_reg = QEIC_CIPYCC,
176                 },
177 };
178
179 static inline u32 qe_ic_read(volatile __be32  __iomem * base, unsigned int reg)
180 {
181         return in_be32(base + (reg >> 2));
182 }
183
184 static inline void qe_ic_write(volatile __be32  __iomem * base, unsigned int reg,
185                                u32 value)
186 {
187         out_be32(base + (reg >> 2), value);
188 }
189
190 static inline struct qe_ic *qe_ic_from_irq(unsigned int virq)
191 {
192         return irq_desc[virq].chip_data;
193 }
194
195 #define virq_to_hw(virq)        ((unsigned int)irq_map[virq].hwirq)
196
197 static void qe_ic_unmask_irq(unsigned int virq)
198 {
199         struct qe_ic *qe_ic = qe_ic_from_irq(virq);
200         unsigned int src = virq_to_hw(virq);
201         unsigned long flags;
202         u32 temp;
203
204         spin_lock_irqsave(&qe_ic_lock, flags);
205
206         temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
207         qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
208                     temp | qe_ic_info[src].mask);
209
210         spin_unlock_irqrestore(&qe_ic_lock, flags);
211 }
212
213 static void qe_ic_mask_irq(unsigned int virq)
214 {
215         struct qe_ic *qe_ic = qe_ic_from_irq(virq);
216         unsigned int src = virq_to_hw(virq);
217         unsigned long flags;
218         u32 temp;
219
220         spin_lock_irqsave(&qe_ic_lock, flags);
221
222         temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
223         qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
224                     temp & ~qe_ic_info[src].mask);
225
226         /* Flush the above write before enabling interrupts; otherwise,
227          * spurious interrupts will sometimes happen.  To be 100% sure
228          * that the write has reached the device before interrupts are
229          * enabled, the mask register would have to be read back; however,
230          * this is not required for correctness, only to avoid wasting
231          * time on a large number of spurious interrupts.  In testing,
232          * a sync reduced the observed spurious interrupts to zero.
233          */
234         mb();
235
236         spin_unlock_irqrestore(&qe_ic_lock, flags);
237 }
238
239 static struct irq_chip qe_ic_irq_chip = {
240         .typename = " QEIC  ",
241         .unmask = qe_ic_unmask_irq,
242         .mask = qe_ic_mask_irq,
243         .mask_ack = qe_ic_mask_irq,
244 };
245
246 static int qe_ic_host_match(struct irq_host *h, struct device_node *node)
247 {
248         struct qe_ic *qe_ic = h->host_data;
249
250         /* Exact match, unless qe_ic node is NULL */
251         return qe_ic->of_node == NULL || qe_ic->of_node == node;
252 }
253
254 static int qe_ic_host_map(struct irq_host *h, unsigned int virq,
255                           irq_hw_number_t hw)
256 {
257         struct qe_ic *qe_ic = h->host_data;
258         struct irq_chip *chip;
259
260         if (qe_ic_info[hw].mask == 0) {
261                 printk(KERN_ERR "Can't map reserved IRQ \n");
262                 return -EINVAL;
263         }
264         /* Default chip */
265         chip = &qe_ic->hc_irq;
266
267         set_irq_chip_data(virq, qe_ic);
268         get_irq_desc(virq)->status |= IRQ_LEVEL;
269
270         set_irq_chip_and_handler(virq, chip, handle_level_irq);
271
272         return 0;
273 }
274
275 static int qe_ic_host_xlate(struct irq_host *h, struct device_node *ct,
276                             u32 * intspec, unsigned int intsize,
277                             irq_hw_number_t * out_hwirq,
278                             unsigned int *out_flags)
279 {
280         *out_hwirq = intspec[0];
281         if (intsize > 1)
282                 *out_flags = intspec[1];
283         else
284                 *out_flags = IRQ_TYPE_NONE;
285         return 0;
286 }
287
288 static struct irq_host_ops qe_ic_host_ops = {
289         .match = qe_ic_host_match,
290         .map = qe_ic_host_map,
291         .xlate = qe_ic_host_xlate,
292 };
293
294 /* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
295 unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
296 {
297         int irq;
298
299         BUG_ON(qe_ic == NULL);
300
301         /* get the interrupt source vector. */
302         irq = qe_ic_read(qe_ic->regs, QEIC_CIVEC) >> 26;
303
304         if (irq == 0)
305                 return NO_IRQ;
306
307         return irq_linear_revmap(qe_ic->irqhost, irq);
308 }
309
310 /* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
311 unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
312 {
313         int irq;
314
315         BUG_ON(qe_ic == NULL);
316
317         /* get the interrupt source vector. */
318         irq = qe_ic_read(qe_ic->regs, QEIC_CHIVEC) >> 26;
319
320         if (irq == 0)
321                 return NO_IRQ;
322
323         return irq_linear_revmap(qe_ic->irqhost, irq);
324 }
325
326 void fastcall qe_ic_cascade_low(unsigned int irq, struct irq_desc *desc)
327 {
328         struct qe_ic *qe_ic = desc->handler_data;
329         unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
330
331         if (cascade_irq != NO_IRQ)
332                 generic_handle_irq(cascade_irq);
333 }
334
335 void fastcall qe_ic_cascade_high(unsigned int irq, struct irq_desc *desc)
336 {
337         struct qe_ic *qe_ic = desc->handler_data;
338         unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
339
340         if (cascade_irq != NO_IRQ)
341                 generic_handle_irq(cascade_irq);
342 }
343
344 void __init qe_ic_init(struct device_node *node, unsigned int flags)
345 {
346         struct qe_ic *qe_ic;
347         struct resource res;
348         u32 temp = 0, ret, high_active = 0;
349
350         qe_ic = alloc_bootmem(sizeof(struct qe_ic));
351         if (qe_ic == NULL)
352                 return;
353
354         memset(qe_ic, 0, sizeof(struct qe_ic));
355         qe_ic->of_node = node ? of_node_get(node) : NULL;
356
357         qe_ic->irqhost = irq_alloc_host(IRQ_HOST_MAP_LINEAR,
358                                         NR_QE_IC_INTS, &qe_ic_host_ops, 0);
359         if (qe_ic->irqhost == NULL) {
360                 of_node_put(node);
361                 return;
362         }
363
364         ret = of_address_to_resource(node, 0, &res);
365         if (ret)
366                 return;
367
368         qe_ic->regs = ioremap(res.start, res.end - res.start + 1);
369
370         qe_ic->irqhost->host_data = qe_ic;
371         qe_ic->hc_irq = qe_ic_irq_chip;
372
373         qe_ic->virq_high = irq_of_parse_and_map(node, 0);
374         qe_ic->virq_low = irq_of_parse_and_map(node, 1);
375
376         if (qe_ic->virq_low == NO_IRQ) {
377                 printk(KERN_ERR "Failed to map QE_IC low IRQ\n");
378                 return;
379         }
380
381         /* default priority scheme is grouped. If spread mode is    */
382         /* required, configure cicr accordingly.                    */
383         if (flags & QE_IC_SPREADMODE_GRP_W)
384                 temp |= CICR_GWCC;
385         if (flags & QE_IC_SPREADMODE_GRP_X)
386                 temp |= CICR_GXCC;
387         if (flags & QE_IC_SPREADMODE_GRP_Y)
388                 temp |= CICR_GYCC;
389         if (flags & QE_IC_SPREADMODE_GRP_Z)
390                 temp |= CICR_GZCC;
391         if (flags & QE_IC_SPREADMODE_GRP_RISCA)
392                 temp |= CICR_GRTA;
393         if (flags & QE_IC_SPREADMODE_GRP_RISCB)
394                 temp |= CICR_GRTB;
395
396         /* choose destination signal for highest priority interrupt */
397         if (flags & QE_IC_HIGH_SIGNAL) {
398                 temp |= (SIGNAL_HIGH << CICR_HPIT_SHIFT);
399                 high_active = 1;
400         }
401
402         qe_ic_write(qe_ic->regs, QEIC_CICR, temp);
403
404         set_irq_data(qe_ic->virq_low, qe_ic);
405         set_irq_chained_handler(qe_ic->virq_low, qe_ic_cascade_low);
406
407         if (qe_ic->virq_high != NO_IRQ) {
408                 set_irq_data(qe_ic->virq_high, qe_ic);
409                 set_irq_chained_handler(qe_ic->virq_high, qe_ic_cascade_high);
410         }
411
412         printk("QEIC (%d IRQ sources) at %p\n", NR_QE_IC_INTS, qe_ic->regs);
413 }
414
415 void qe_ic_set_highest_priority(unsigned int virq, int high)
416 {
417         struct qe_ic *qe_ic = qe_ic_from_irq(virq);
418         unsigned int src = virq_to_hw(virq);
419         u32 temp = 0;
420
421         temp = qe_ic_read(qe_ic->regs, QEIC_CICR);
422
423         temp &= ~CICR_HP_MASK;
424         temp |= src << CICR_HP_SHIFT;
425
426         temp &= ~CICR_HPIT_MASK;
427         temp |= (high ? SIGNAL_HIGH : SIGNAL_LOW) << CICR_HPIT_SHIFT;
428
429         qe_ic_write(qe_ic->regs, QEIC_CICR, temp);
430 }
431
432 /* Set Priority level within its group, from 1 to 8 */
433 int qe_ic_set_priority(unsigned int virq, unsigned int priority)
434 {
435         struct qe_ic *qe_ic = qe_ic_from_irq(virq);
436         unsigned int src = virq_to_hw(virq);
437         u32 temp;
438
439         if (priority > 8 || priority == 0)
440                 return -EINVAL;
441         if (src > 127)
442                 return -EINVAL;
443         if (qe_ic_info[src].pri_reg == 0)
444                 return -EINVAL;
445
446         temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].pri_reg);
447
448         if (priority < 4) {
449                 temp &= ~(0x7 << (32 - priority * 3));
450                 temp |= qe_ic_info[src].pri_code << (32 - priority * 3);
451         } else {
452                 temp &= ~(0x7 << (24 - priority * 3));
453                 temp |= qe_ic_info[src].pri_code << (24 - priority * 3);
454         }
455
456         qe_ic_write(qe_ic->regs, qe_ic_info[src].pri_reg, temp);
457
458         return 0;
459 }
460
461 /* Set a QE priority to use high irq, only priority 1~2 can use high irq */
462 int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high)
463 {
464         struct qe_ic *qe_ic = qe_ic_from_irq(virq);
465         unsigned int src = virq_to_hw(virq);
466         u32 temp, control_reg = QEIC_CICNR, shift = 0;
467
468         if (priority > 2 || priority == 0)
469                 return -EINVAL;
470
471         switch (qe_ic_info[src].pri_reg) {
472         case QEIC_CIPZCC:
473                 shift = CICNR_ZCC1T_SHIFT;
474                 break;
475         case QEIC_CIPWCC:
476                 shift = CICNR_WCC1T_SHIFT;
477                 break;
478         case QEIC_CIPYCC:
479                 shift = CICNR_YCC1T_SHIFT;
480                 break;
481         case QEIC_CIPXCC:
482                 shift = CICNR_XCC1T_SHIFT;
483                 break;
484         case QEIC_CIPRTA:
485                 shift = CRICR_RTA1T_SHIFT;
486                 control_reg = QEIC_CRICR;
487                 break;
488         case QEIC_CIPRTB:
489                 shift = CRICR_RTB1T_SHIFT;
490                 control_reg = QEIC_CRICR;
491                 break;
492         default:
493                 return -EINVAL;
494         }
495
496         shift += (2 - priority) * 2;
497         temp = qe_ic_read(qe_ic->regs, control_reg);
498         temp &= ~(SIGNAL_MASK << shift);
499         temp |= (high ? SIGNAL_HIGH : SIGNAL_LOW) << shift;
500         qe_ic_write(qe_ic->regs, control_reg, temp);
501
502         return 0;
503 }
504
505 static struct sysdev_class qe_ic_sysclass = {
506         set_kset_name("qe_ic"),
507 };
508
509 static struct sys_device device_qe_ic = {
510         .id = 0,
511         .cls = &qe_ic_sysclass,
512 };
513
514 static int __init init_qe_ic_sysfs(void)
515 {
516         int rc;
517
518         printk(KERN_DEBUG "Registering qe_ic with sysfs...\n");
519
520         rc = sysdev_class_register(&qe_ic_sysclass);
521         if (rc) {
522                 printk(KERN_ERR "Failed registering qe_ic sys class\n");
523                 return -ENODEV;
524         }
525         rc = sysdev_register(&device_qe_ic);
526         if (rc) {
527                 printk(KERN_ERR "Failed registering qe_ic sys device\n");
528                 return -ENODEV;
529         }
530         return 0;
531 }
532
533 subsys_initcall(init_qe_ic_sysfs);