Merge branch 'ppc-fixes' of git://git.bocc.de/dbox2 into for-2.6.24
[linux-2.6] / arch / powerpc / sysdev / qe_lib / qe_ic.c
1 /*
2  * arch/powerpc/sysdev/qe_lib/qe_ic.c
3  *
4  * Copyright (C) 2006 Freescale Semicondutor, Inc.  All rights reserved.
5  *
6  * Author: Li Yang <leoli@freescale.com>
7  * Based on code from Shlomi Gridish <gridish@freescale.com>
8  *
9  * QUICC ENGINE Interrupt Controller
10  *
11  * This program is free software; you can redistribute  it and/or modify it
12  * under  the terms of  the GNU General  Public License as published by the
13  * Free Software Foundation;  either version 2 of the  License, or (at your
14  * option) any later version.
15  */
16
17 #include <linux/kernel.h>
18 #include <linux/init.h>
19 #include <linux/errno.h>
20 #include <linux/reboot.h>
21 #include <linux/slab.h>
22 #include <linux/stddef.h>
23 #include <linux/sched.h>
24 #include <linux/signal.h>
25 #include <linux/sysdev.h>
26 #include <linux/device.h>
27 #include <linux/bootmem.h>
28 #include <linux/spinlock.h>
29 #include <asm/irq.h>
30 #include <asm/io.h>
31 #include <asm/prom.h>
32 #include <asm/qe_ic.h>
33
34 #include "qe_ic.h"
35
36 static DEFINE_SPINLOCK(qe_ic_lock);
37
38 static struct qe_ic_info qe_ic_info[] = {
39         [1] = {
40                .mask = 0x00008000,
41                .mask_reg = QEIC_CIMR,
42                .pri_code = 0,
43                .pri_reg = QEIC_CIPWCC,
44                },
45         [2] = {
46                .mask = 0x00004000,
47                .mask_reg = QEIC_CIMR,
48                .pri_code = 1,
49                .pri_reg = QEIC_CIPWCC,
50                },
51         [3] = {
52                .mask = 0x00002000,
53                .mask_reg = QEIC_CIMR,
54                .pri_code = 2,
55                .pri_reg = QEIC_CIPWCC,
56                },
57         [10] = {
58                 .mask = 0x00000040,
59                 .mask_reg = QEIC_CIMR,
60                 .pri_code = 1,
61                 .pri_reg = QEIC_CIPZCC,
62                 },
63         [11] = {
64                 .mask = 0x00000020,
65                 .mask_reg = QEIC_CIMR,
66                 .pri_code = 2,
67                 .pri_reg = QEIC_CIPZCC,
68                 },
69         [12] = {
70                 .mask = 0x00000010,
71                 .mask_reg = QEIC_CIMR,
72                 .pri_code = 3,
73                 .pri_reg = QEIC_CIPZCC,
74                 },
75         [13] = {
76                 .mask = 0x00000008,
77                 .mask_reg = QEIC_CIMR,
78                 .pri_code = 4,
79                 .pri_reg = QEIC_CIPZCC,
80                 },
81         [14] = {
82                 .mask = 0x00000004,
83                 .mask_reg = QEIC_CIMR,
84                 .pri_code = 5,
85                 .pri_reg = QEIC_CIPZCC,
86                 },
87         [15] = {
88                 .mask = 0x00000002,
89                 .mask_reg = QEIC_CIMR,
90                 .pri_code = 6,
91                 .pri_reg = QEIC_CIPZCC,
92                 },
93         [20] = {
94                 .mask = 0x10000000,
95                 .mask_reg = QEIC_CRIMR,
96                 .pri_code = 3,
97                 .pri_reg = QEIC_CIPRTA,
98                 },
99         [25] = {
100                 .mask = 0x00800000,
101                 .mask_reg = QEIC_CRIMR,
102                 .pri_code = 0,
103                 .pri_reg = QEIC_CIPRTB,
104                 },
105         [26] = {
106                 .mask = 0x00400000,
107                 .mask_reg = QEIC_CRIMR,
108                 .pri_code = 1,
109                 .pri_reg = QEIC_CIPRTB,
110                 },
111         [27] = {
112                 .mask = 0x00200000,
113                 .mask_reg = QEIC_CRIMR,
114                 .pri_code = 2,
115                 .pri_reg = QEIC_CIPRTB,
116                 },
117         [28] = {
118                 .mask = 0x00100000,
119                 .mask_reg = QEIC_CRIMR,
120                 .pri_code = 3,
121                 .pri_reg = QEIC_CIPRTB,
122                 },
123         [32] = {
124                 .mask = 0x80000000,
125                 .mask_reg = QEIC_CIMR,
126                 .pri_code = 0,
127                 .pri_reg = QEIC_CIPXCC,
128                 },
129         [33] = {
130                 .mask = 0x40000000,
131                 .mask_reg = QEIC_CIMR,
132                 .pri_code = 1,
133                 .pri_reg = QEIC_CIPXCC,
134                 },
135         [34] = {
136                 .mask = 0x20000000,
137                 .mask_reg = QEIC_CIMR,
138                 .pri_code = 2,
139                 .pri_reg = QEIC_CIPXCC,
140                 },
141         [35] = {
142                 .mask = 0x10000000,
143                 .mask_reg = QEIC_CIMR,
144                 .pri_code = 3,
145                 .pri_reg = QEIC_CIPXCC,
146                 },
147         [36] = {
148                 .mask = 0x08000000,
149                 .mask_reg = QEIC_CIMR,
150                 .pri_code = 4,
151                 .pri_reg = QEIC_CIPXCC,
152                 },
153         [40] = {
154                 .mask = 0x00800000,
155                 .mask_reg = QEIC_CIMR,
156                 .pri_code = 0,
157                 .pri_reg = QEIC_CIPYCC,
158                 },
159         [41] = {
160                 .mask = 0x00400000,
161                 .mask_reg = QEIC_CIMR,
162                 .pri_code = 1,
163                 .pri_reg = QEIC_CIPYCC,
164                 },
165         [42] = {
166                 .mask = 0x00200000,
167                 .mask_reg = QEIC_CIMR,
168                 .pri_code = 2,
169                 .pri_reg = QEIC_CIPYCC,
170                 },
171         [43] = {
172                 .mask = 0x00100000,
173                 .mask_reg = QEIC_CIMR,
174                 .pri_code = 3,
175                 .pri_reg = QEIC_CIPYCC,
176                 },
177 };
178
179 static inline u32 qe_ic_read(volatile __be32  __iomem * base, unsigned int reg)
180 {
181         return in_be32(base + (reg >> 2));
182 }
183
184 static inline void qe_ic_write(volatile __be32  __iomem * base, unsigned int reg,
185                                u32 value)
186 {
187         out_be32(base + (reg >> 2), value);
188 }
189
190 static inline struct qe_ic *qe_ic_from_irq(unsigned int virq)
191 {
192         return irq_desc[virq].chip_data;
193 }
194
195 #define virq_to_hw(virq)        ((unsigned int)irq_map[virq].hwirq)
196
197 static void qe_ic_unmask_irq(unsigned int virq)
198 {
199         struct qe_ic *qe_ic = qe_ic_from_irq(virq);
200         unsigned int src = virq_to_hw(virq);
201         unsigned long flags;
202         u32 temp;
203
204         spin_lock_irqsave(&qe_ic_lock, flags);
205
206         temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
207         qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
208                     temp | qe_ic_info[src].mask);
209
210         spin_unlock_irqrestore(&qe_ic_lock, flags);
211 }
212
213 static void qe_ic_mask_irq(unsigned int virq)
214 {
215         struct qe_ic *qe_ic = qe_ic_from_irq(virq);
216         unsigned int src = virq_to_hw(virq);
217         unsigned long flags;
218         u32 temp;
219
220         spin_lock_irqsave(&qe_ic_lock, flags);
221
222         temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
223         qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
224                     temp & ~qe_ic_info[src].mask);
225
226         /* Flush the above write before enabling interrupts; otherwise,
227          * spurious interrupts will sometimes happen.  To be 100% sure
228          * that the write has reached the device before interrupts are
229          * enabled, the mask register would have to be read back; however,
230          * this is not required for correctness, only to avoid wasting
231          * time on a large number of spurious interrupts.  In testing,
232          * a sync reduced the observed spurious interrupts to zero.
233          */
234         mb();
235
236         spin_unlock_irqrestore(&qe_ic_lock, flags);
237 }
238
239 static struct irq_chip qe_ic_irq_chip = {
240         .typename = " QEIC  ",
241         .unmask = qe_ic_unmask_irq,
242         .mask = qe_ic_mask_irq,
243         .mask_ack = qe_ic_mask_irq,
244 };
245
246 static int qe_ic_host_match(struct irq_host *h, struct device_node *node)
247 {
248         /* Exact match, unless qe_ic node is NULL */
249         return h->of_node == NULL || h->of_node == node;
250 }
251
252 static int qe_ic_host_map(struct irq_host *h, unsigned int virq,
253                           irq_hw_number_t hw)
254 {
255         struct qe_ic *qe_ic = h->host_data;
256         struct irq_chip *chip;
257
258         if (qe_ic_info[hw].mask == 0) {
259                 printk(KERN_ERR "Can't map reserved IRQ \n");
260                 return -EINVAL;
261         }
262         /* Default chip */
263         chip = &qe_ic->hc_irq;
264
265         set_irq_chip_data(virq, qe_ic);
266         get_irq_desc(virq)->status |= IRQ_LEVEL;
267
268         set_irq_chip_and_handler(virq, chip, handle_level_irq);
269
270         return 0;
271 }
272
273 static int qe_ic_host_xlate(struct irq_host *h, struct device_node *ct,
274                             u32 * intspec, unsigned int intsize,
275                             irq_hw_number_t * out_hwirq,
276                             unsigned int *out_flags)
277 {
278         *out_hwirq = intspec[0];
279         if (intsize > 1)
280                 *out_flags = intspec[1];
281         else
282                 *out_flags = IRQ_TYPE_NONE;
283         return 0;
284 }
285
286 static struct irq_host_ops qe_ic_host_ops = {
287         .match = qe_ic_host_match,
288         .map = qe_ic_host_map,
289         .xlate = qe_ic_host_xlate,
290 };
291
292 /* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
293 unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
294 {
295         int irq;
296
297         BUG_ON(qe_ic == NULL);
298
299         /* get the interrupt source vector. */
300         irq = qe_ic_read(qe_ic->regs, QEIC_CIVEC) >> 26;
301
302         if (irq == 0)
303                 return NO_IRQ;
304
305         return irq_linear_revmap(qe_ic->irqhost, irq);
306 }
307
308 /* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
309 unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
310 {
311         int irq;
312
313         BUG_ON(qe_ic == NULL);
314
315         /* get the interrupt source vector. */
316         irq = qe_ic_read(qe_ic->regs, QEIC_CHIVEC) >> 26;
317
318         if (irq == 0)
319                 return NO_IRQ;
320
321         return irq_linear_revmap(qe_ic->irqhost, irq);
322 }
323
324 void qe_ic_cascade_low(unsigned int irq, struct irq_desc *desc)
325 {
326         struct qe_ic *qe_ic = desc->handler_data;
327         unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
328
329         if (cascade_irq != NO_IRQ)
330                 generic_handle_irq(cascade_irq);
331 }
332
333 void qe_ic_cascade_high(unsigned int irq, struct irq_desc *desc)
334 {
335         struct qe_ic *qe_ic = desc->handler_data;
336         unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
337
338         if (cascade_irq != NO_IRQ)
339                 generic_handle_irq(cascade_irq);
340 }
341
342 void __init qe_ic_init(struct device_node *node, unsigned int flags)
343 {
344         struct qe_ic *qe_ic;
345         struct resource res;
346         u32 temp = 0, ret, high_active = 0;
347
348         qe_ic = alloc_bootmem(sizeof(struct qe_ic));
349         if (qe_ic == NULL)
350                 return;
351
352         memset(qe_ic, 0, sizeof(struct qe_ic));
353
354         qe_ic->irqhost = irq_alloc_host(of_node_get(node), IRQ_HOST_MAP_LINEAR,
355                                         NR_QE_IC_INTS, &qe_ic_host_ops, 0);
356         if (qe_ic->irqhost == NULL) {
357                 of_node_put(node);
358                 return;
359         }
360
361         ret = of_address_to_resource(node, 0, &res);
362         if (ret)
363                 return;
364
365         qe_ic->regs = ioremap(res.start, res.end - res.start + 1);
366
367         qe_ic->irqhost->host_data = qe_ic;
368         qe_ic->hc_irq = qe_ic_irq_chip;
369
370         qe_ic->virq_high = irq_of_parse_and_map(node, 0);
371         qe_ic->virq_low = irq_of_parse_and_map(node, 1);
372
373         if (qe_ic->virq_low == NO_IRQ) {
374                 printk(KERN_ERR "Failed to map QE_IC low IRQ\n");
375                 return;
376         }
377
378         /* default priority scheme is grouped. If spread mode is    */
379         /* required, configure cicr accordingly.                    */
380         if (flags & QE_IC_SPREADMODE_GRP_W)
381                 temp |= CICR_GWCC;
382         if (flags & QE_IC_SPREADMODE_GRP_X)
383                 temp |= CICR_GXCC;
384         if (flags & QE_IC_SPREADMODE_GRP_Y)
385                 temp |= CICR_GYCC;
386         if (flags & QE_IC_SPREADMODE_GRP_Z)
387                 temp |= CICR_GZCC;
388         if (flags & QE_IC_SPREADMODE_GRP_RISCA)
389                 temp |= CICR_GRTA;
390         if (flags & QE_IC_SPREADMODE_GRP_RISCB)
391                 temp |= CICR_GRTB;
392
393         /* choose destination signal for highest priority interrupt */
394         if (flags & QE_IC_HIGH_SIGNAL) {
395                 temp |= (SIGNAL_HIGH << CICR_HPIT_SHIFT);
396                 high_active = 1;
397         }
398
399         qe_ic_write(qe_ic->regs, QEIC_CICR, temp);
400
401         set_irq_data(qe_ic->virq_low, qe_ic);
402         set_irq_chained_handler(qe_ic->virq_low, qe_ic_cascade_low);
403
404         if (qe_ic->virq_high != NO_IRQ) {
405                 set_irq_data(qe_ic->virq_high, qe_ic);
406                 set_irq_chained_handler(qe_ic->virq_high, qe_ic_cascade_high);
407         }
408
409         printk("QEIC (%d IRQ sources) at %p\n", NR_QE_IC_INTS, qe_ic->regs);
410 }
411
412 void qe_ic_set_highest_priority(unsigned int virq, int high)
413 {
414         struct qe_ic *qe_ic = qe_ic_from_irq(virq);
415         unsigned int src = virq_to_hw(virq);
416         u32 temp = 0;
417
418         temp = qe_ic_read(qe_ic->regs, QEIC_CICR);
419
420         temp &= ~CICR_HP_MASK;
421         temp |= src << CICR_HP_SHIFT;
422
423         temp &= ~CICR_HPIT_MASK;
424         temp |= (high ? SIGNAL_HIGH : SIGNAL_LOW) << CICR_HPIT_SHIFT;
425
426         qe_ic_write(qe_ic->regs, QEIC_CICR, temp);
427 }
428
429 /* Set Priority level within its group, from 1 to 8 */
430 int qe_ic_set_priority(unsigned int virq, unsigned int priority)
431 {
432         struct qe_ic *qe_ic = qe_ic_from_irq(virq);
433         unsigned int src = virq_to_hw(virq);
434         u32 temp;
435
436         if (priority > 8 || priority == 0)
437                 return -EINVAL;
438         if (src > 127)
439                 return -EINVAL;
440         if (qe_ic_info[src].pri_reg == 0)
441                 return -EINVAL;
442
443         temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].pri_reg);
444
445         if (priority < 4) {
446                 temp &= ~(0x7 << (32 - priority * 3));
447                 temp |= qe_ic_info[src].pri_code << (32 - priority * 3);
448         } else {
449                 temp &= ~(0x7 << (24 - priority * 3));
450                 temp |= qe_ic_info[src].pri_code << (24 - priority * 3);
451         }
452
453         qe_ic_write(qe_ic->regs, qe_ic_info[src].pri_reg, temp);
454
455         return 0;
456 }
457
458 /* Set a QE priority to use high irq, only priority 1~2 can use high irq */
459 int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high)
460 {
461         struct qe_ic *qe_ic = qe_ic_from_irq(virq);
462         unsigned int src = virq_to_hw(virq);
463         u32 temp, control_reg = QEIC_CICNR, shift = 0;
464
465         if (priority > 2 || priority == 0)
466                 return -EINVAL;
467
468         switch (qe_ic_info[src].pri_reg) {
469         case QEIC_CIPZCC:
470                 shift = CICNR_ZCC1T_SHIFT;
471                 break;
472         case QEIC_CIPWCC:
473                 shift = CICNR_WCC1T_SHIFT;
474                 break;
475         case QEIC_CIPYCC:
476                 shift = CICNR_YCC1T_SHIFT;
477                 break;
478         case QEIC_CIPXCC:
479                 shift = CICNR_XCC1T_SHIFT;
480                 break;
481         case QEIC_CIPRTA:
482                 shift = CRICR_RTA1T_SHIFT;
483                 control_reg = QEIC_CRICR;
484                 break;
485         case QEIC_CIPRTB:
486                 shift = CRICR_RTB1T_SHIFT;
487                 control_reg = QEIC_CRICR;
488                 break;
489         default:
490                 return -EINVAL;
491         }
492
493         shift += (2 - priority) * 2;
494         temp = qe_ic_read(qe_ic->regs, control_reg);
495         temp &= ~(SIGNAL_MASK << shift);
496         temp |= (high ? SIGNAL_HIGH : SIGNAL_LOW) << shift;
497         qe_ic_write(qe_ic->regs, control_reg, temp);
498
499         return 0;
500 }
501
502 static struct sysdev_class qe_ic_sysclass = {
503         set_kset_name("qe_ic"),
504 };
505
506 static struct sys_device device_qe_ic = {
507         .id = 0,
508         .cls = &qe_ic_sysclass,
509 };
510
511 static int __init init_qe_ic_sysfs(void)
512 {
513         int rc;
514
515         printk(KERN_DEBUG "Registering qe_ic with sysfs...\n");
516
517         rc = sysdev_class_register(&qe_ic_sysclass);
518         if (rc) {
519                 printk(KERN_ERR "Failed registering qe_ic sys class\n");
520                 return -ENODEV;
521         }
522         rc = sysdev_register(&device_qe_ic);
523         if (rc) {
524                 printk(KERN_ERR "Failed registering qe_ic sys device\n");
525                 return -ENODEV;
526         }
527         return 0;
528 }
529
530 subsys_initcall(init_qe_ic_sysfs);