2 * ip22-int.c: Routines for generic manipulation of the INT[23] ASIC
3 * found on INDY and Indigo2 workstations.
5 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
6 * Copyright (C) 1997, 1998 Ralf Baechle (ralf@gnu.org)
7 * Copyright (C) 1999 Andrew R. Baker (andrewb@uab.edu)
9 * - Interrupt handling fixes
10 * Copyright (C) 2001, 2003 Ladislav Michl (ladis@linux-mips.org)
12 #include <linux/types.h>
13 #include <linux/init.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/interrupt.h>
17 #include <asm/irq_cpu.h>
18 #include <asm/sgi/hpc3.h>
19 #include <asm/sgi/ip22.h>
21 /* So far nothing hangs here */
24 struct sgint_regs *sgint;
26 static char lc0msk_to_irqnr[256];
27 static char lc1msk_to_irqnr[256];
28 static char lc2msk_to_irqnr[256];
29 static char lc3msk_to_irqnr[256];
31 extern int ip22_eisa_init(void);
33 static void enable_local0_irq(unsigned int irq)
35 /* don't allow mappable interrupt to be enabled from setup_irq,
36 * we have our own way to do so */
37 if (irq != SGI_MAP_0_IRQ)
38 sgint->imask0 |= (1 << (irq - SGINT_LOCAL0));
41 static void disable_local0_irq(unsigned int irq)
43 sgint->imask0 &= ~(1 << (irq - SGINT_LOCAL0));
46 static struct irq_chip ip22_local0_irq_type = {
47 .name = "IP22 local 0",
48 .ack = disable_local0_irq,
49 .mask = disable_local0_irq,
50 .mask_ack = disable_local0_irq,
51 .unmask = enable_local0_irq,
54 static void enable_local1_irq(unsigned int irq)
56 /* don't allow mappable interrupt to be enabled from setup_irq,
57 * we have our own way to do so */
58 if (irq != SGI_MAP_1_IRQ)
59 sgint->imask1 |= (1 << (irq - SGINT_LOCAL1));
62 static void disable_local1_irq(unsigned int irq)
64 sgint->imask1 &= ~(1 << (irq - SGINT_LOCAL1));
67 static struct irq_chip ip22_local1_irq_type = {
68 .name = "IP22 local 1",
69 .ack = disable_local1_irq,
70 .mask = disable_local1_irq,
71 .mask_ack = disable_local1_irq,
72 .unmask = enable_local1_irq,
75 static void enable_local2_irq(unsigned int irq)
77 sgint->imask0 |= (1 << (SGI_MAP_0_IRQ - SGINT_LOCAL0));
78 sgint->cmeimask0 |= (1 << (irq - SGINT_LOCAL2));
81 static void disable_local2_irq(unsigned int irq)
83 sgint->cmeimask0 &= ~(1 << (irq - SGINT_LOCAL2));
84 if (!sgint->cmeimask0)
85 sgint->imask0 &= ~(1 << (SGI_MAP_0_IRQ - SGINT_LOCAL0));
88 static struct irq_chip ip22_local2_irq_type = {
89 .name = "IP22 local 2",
90 .ack = disable_local2_irq,
91 .mask = disable_local2_irq,
92 .mask_ack = disable_local2_irq,
93 .unmask = enable_local2_irq,
96 static void enable_local3_irq(unsigned int irq)
98 sgint->imask1 |= (1 << (SGI_MAP_1_IRQ - SGINT_LOCAL1));
99 sgint->cmeimask1 |= (1 << (irq - SGINT_LOCAL3));
102 static void disable_local3_irq(unsigned int irq)
104 sgint->cmeimask1 &= ~(1 << (irq - SGINT_LOCAL3));
105 if (!sgint->cmeimask1)
106 sgint->imask1 &= ~(1 << (SGI_MAP_1_IRQ - SGINT_LOCAL1));
109 static struct irq_chip ip22_local3_irq_type = {
110 .name = "IP22 local 3",
111 .ack = disable_local3_irq,
112 .mask = disable_local3_irq,
113 .mask_ack = disable_local3_irq,
114 .unmask = enable_local3_irq,
117 static void indy_local0_irqdispatch(void)
119 u8 mask = sgint->istat0 & sgint->imask0;
123 if (mask & SGINT_ISTAT0_LIO2) {
124 mask2 = sgint->vmeistat & sgint->cmeimask0;
125 irq = lc2msk_to_irqnr[mask2];
127 irq = lc0msk_to_irqnr[mask];
129 /* if irq == 0, then the interrupt has already been cleared */
134 static void indy_local1_irqdispatch(void)
136 u8 mask = sgint->istat1 & sgint->imask1;
140 if (mask & SGINT_ISTAT1_LIO3) {
141 mask2 = sgint->vmeistat & sgint->cmeimask1;
142 irq = lc3msk_to_irqnr[mask2];
144 irq = lc1msk_to_irqnr[mask];
146 /* if irq == 0, then the interrupt has already been cleared */
151 extern void ip22_be_interrupt(int irq);
153 static void indy_buserror_irq(void)
155 int irq = SGI_BUSERR_IRQ;
158 kstat_this_cpu.irqs[irq]++;
159 ip22_be_interrupt(irq);
163 static struct irqaction local0_cascade = {
164 .handler = no_action,
165 .flags = IRQF_DISABLED,
166 .name = "local0 cascade",
169 static struct irqaction local1_cascade = {
170 .handler = no_action,
171 .flags = IRQF_DISABLED,
172 .name = "local1 cascade",
175 static struct irqaction buserr = {
176 .handler = no_action,
177 .flags = IRQF_DISABLED,
181 static struct irqaction map0_cascade = {
182 .handler = no_action,
183 .flags = IRQF_DISABLED,
184 .name = "mapable0 cascade",
188 static struct irqaction map1_cascade = {
189 .handler = no_action,
190 .flags = IRQF_DISABLED,
191 .name = "mapable1 cascade",
193 #define SGI_INTERRUPTS SGINT_END
195 #define SGI_INTERRUPTS SGINT_LOCAL3
198 extern void indy_8254timer_irq(void);
201 * IRQs on the INDY look basically (barring software IRQs which we don't use
206 * 0 Software (ignored)
207 * 1 Software (ignored)
208 * 2 Local IRQ level zero
209 * 3 Local IRQ level one
213 * 7 R4k timer (what we use)
215 * We handle the IRQ according to _our_ priority which is:
217 * Highest ---- R4k Timer
222 * Lowest ---- 8254 Timer one
224 * then we just return, if multiple IRQs are pending then we will just take
225 * another exception, big deal.
228 asmlinkage void plat_irq_dispatch(void)
230 unsigned int pending = read_c0_status() & read_c0_cause();
233 * First we check for r4k counter/timer IRQ.
235 if (pending & CAUSEF_IP7)
236 do_IRQ(SGI_TIMER_IRQ);
237 else if (pending & CAUSEF_IP2)
238 indy_local0_irqdispatch();
239 else if (pending & CAUSEF_IP3)
240 indy_local1_irqdispatch();
241 else if (pending & CAUSEF_IP6)
243 else if (pending & (CAUSEF_IP4 | CAUSEF_IP5))
244 indy_8254timer_irq();
247 void __init arch_init_irq(void)
251 /* Init local mask --> irq tables. */
252 for (i = 0; i < 256; i++) {
254 lc0msk_to_irqnr[i] = SGINT_LOCAL0 + 7;
255 lc1msk_to_irqnr[i] = SGINT_LOCAL1 + 7;
256 lc2msk_to_irqnr[i] = SGINT_LOCAL2 + 7;
257 lc3msk_to_irqnr[i] = SGINT_LOCAL3 + 7;
258 } else if (i & 0x40) {
259 lc0msk_to_irqnr[i] = SGINT_LOCAL0 + 6;
260 lc1msk_to_irqnr[i] = SGINT_LOCAL1 + 6;
261 lc2msk_to_irqnr[i] = SGINT_LOCAL2 + 6;
262 lc3msk_to_irqnr[i] = SGINT_LOCAL3 + 6;
263 } else if (i & 0x20) {
264 lc0msk_to_irqnr[i] = SGINT_LOCAL0 + 5;
265 lc1msk_to_irqnr[i] = SGINT_LOCAL1 + 5;
266 lc2msk_to_irqnr[i] = SGINT_LOCAL2 + 5;
267 lc3msk_to_irqnr[i] = SGINT_LOCAL3 + 5;
268 } else if (i & 0x10) {
269 lc0msk_to_irqnr[i] = SGINT_LOCAL0 + 4;
270 lc1msk_to_irqnr[i] = SGINT_LOCAL1 + 4;
271 lc2msk_to_irqnr[i] = SGINT_LOCAL2 + 4;
272 lc3msk_to_irqnr[i] = SGINT_LOCAL3 + 4;
273 } else if (i & 0x08) {
274 lc0msk_to_irqnr[i] = SGINT_LOCAL0 + 3;
275 lc1msk_to_irqnr[i] = SGINT_LOCAL1 + 3;
276 lc2msk_to_irqnr[i] = SGINT_LOCAL2 + 3;
277 lc3msk_to_irqnr[i] = SGINT_LOCAL3 + 3;
278 } else if (i & 0x04) {
279 lc0msk_to_irqnr[i] = SGINT_LOCAL0 + 2;
280 lc1msk_to_irqnr[i] = SGINT_LOCAL1 + 2;
281 lc2msk_to_irqnr[i] = SGINT_LOCAL2 + 2;
282 lc3msk_to_irqnr[i] = SGINT_LOCAL3 + 2;
283 } else if (i & 0x02) {
284 lc0msk_to_irqnr[i] = SGINT_LOCAL0 + 1;
285 lc1msk_to_irqnr[i] = SGINT_LOCAL1 + 1;
286 lc2msk_to_irqnr[i] = SGINT_LOCAL2 + 1;
287 lc3msk_to_irqnr[i] = SGINT_LOCAL3 + 1;
288 } else if (i & 0x01) {
289 lc0msk_to_irqnr[i] = SGINT_LOCAL0 + 0;
290 lc1msk_to_irqnr[i] = SGINT_LOCAL1 + 0;
291 lc2msk_to_irqnr[i] = SGINT_LOCAL2 + 0;
292 lc3msk_to_irqnr[i] = SGINT_LOCAL3 + 0;
294 lc0msk_to_irqnr[i] = 0;
295 lc1msk_to_irqnr[i] = 0;
296 lc2msk_to_irqnr[i] = 0;
297 lc3msk_to_irqnr[i] = 0;
301 /* Mask out all interrupts. */
304 sgint->cmeimask0 = 0;
305 sgint->cmeimask1 = 0;
310 for (i = SGINT_LOCAL0; i < SGI_INTERRUPTS; i++) {
311 struct irq_chip *handler;
313 if (i < SGINT_LOCAL1)
314 handler = &ip22_local0_irq_type;
315 else if (i < SGINT_LOCAL2)
316 handler = &ip22_local1_irq_type;
317 else if (i < SGINT_LOCAL3)
318 handler = &ip22_local2_irq_type;
320 handler = &ip22_local3_irq_type;
322 set_irq_chip_and_handler(i, handler, handle_level_irq);
325 /* vector handler. this register the IRQ as non-sharable */
326 setup_irq(SGI_LOCAL_0_IRQ, &local0_cascade);
327 setup_irq(SGI_LOCAL_1_IRQ, &local1_cascade);
328 setup_irq(SGI_BUSERR_IRQ, &buserr);
330 /* cascade in cascade. i love Indy ;-) */
331 setup_irq(SGI_MAP_0_IRQ, &map0_cascade);
333 setup_irq(SGI_MAP_1_IRQ, &map1_cascade);
337 if (ip22_is_fullhouse()) /* Only Indigo-2 has EISA stuff */