2 * ip27-irq.c: Highlevel interrupt handling for IP27 architecture.
4 * Copyright (C) 1999, 2000 Ralf Baechle (ralf@gnu.org)
5 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
6 * Copyright (C) 1999 - 2001 Kanoj Sarcar
11 #include <linux/config.h>
12 #include <linux/init.h>
13 #include <linux/irq.h>
14 #include <linux/errno.h>
15 #include <linux/signal.h>
16 #include <linux/sched.h>
17 #include <linux/types.h>
18 #include <linux/interrupt.h>
19 #include <linux/ioport.h>
20 #include <linux/timex.h>
21 #include <linux/slab.h>
22 #include <linux/random.h>
23 #include <linux/smp_lock.h>
24 #include <linux/kernel.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/delay.h>
27 #include <linux/bitops.h>
29 #include <asm/bootinfo.h>
31 #include <asm/mipsregs.h>
32 #include <asm/system.h>
34 #include <asm/ptrace.h>
35 #include <asm/processor.h>
36 #include <asm/pci/bridge.h>
37 #include <asm/sn/addrs.h>
38 #include <asm/sn/agent.h>
39 #include <asm/sn/arch.h>
40 #include <asm/sn/hub.h>
41 #include <asm/sn/intr.h>
44 * Linux has a controller-independent x86 interrupt architecture.
45 * every controller has a 'controller-template', that is used
46 * by the main code to do the right thing. Each driver-visible
47 * interrupt source is transparently wired to the apropriate
48 * controller. Thus drivers need not be aware of the
49 * interrupt-controller.
51 * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC,
52 * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC.
53 * (IO-APICs assumed to be messaging to Pentium local-APICs)
55 * the code is designed to be easily extended with new/different
56 * interrupt controllers, without having to do assembly magic.
59 extern asmlinkage void ip27_irq(void);
61 extern struct bridge_controller *irq_to_bridge[];
62 extern int irq_to_slot[];
65 * use these macros to get the encoded nasid and widget id
68 #define IRQ_TO_BRIDGE(i) irq_to_bridge[(i)]
69 #define SLOT_FROM_PCI_IRQ(i) irq_to_slot[i]
71 static inline int alloc_level(int cpu, int irq)
73 struct hub_data *hub = hub_data(cpu_to_node(cpu));
74 struct slice_data *si = cpu_data[cpu].data;
77 level = find_first_zero_bit(hub->irq_alloc_mask, LEVELS_PER_SLICE);
78 if (level >= LEVELS_PER_SLICE)
79 panic("Cpu %d flooded with devices\n", cpu);
81 __set_bit(level, hub->irq_alloc_mask);
82 si->level_to_irq[level] = irq;
87 static inline int find_level(cpuid_t *cpunum, int irq)
91 for_each_online_cpu(cpu) {
92 struct slice_data *si = cpu_data[cpu].data;
94 for (i = BASE_PCI_IRQ; i < LEVELS_PER_SLICE; i++)
95 if (si->level_to_irq[i] == irq) {
102 panic("Could not identify cpu/level for irq %d\n", irq);
108 static int ms1bit(unsigned long x)
112 s = 16; if (x >> 16 == 0) s = 0; b += s; x >>= s;
113 s = 8; if (x >> 8 == 0) s = 0; b += s; x >>= s;
114 s = 4; if (x >> 4 == 0) s = 0; b += s; x >>= s;
115 s = 2; if (x >> 2 == 0) s = 0; b += s; x >>= s;
116 s = 1; if (x >> 1 == 0) s = 0; b += s;
122 * This code is unnecessarily complex, because we do SA_INTERRUPT
123 * intr enabling. Basically, once we grab the set of intrs we need
124 * to service, we must mask _all_ these interrupts; firstly, to make
125 * sure the same intr does not intr again, causing recursion that
126 * can lead to stack overflow. Secondly, we can not just mask the
127 * one intr we are do_IRQing, because the non-masked intrs in the
128 * first set might intr again, causing multiple servicings of the
129 * same intr. This effect is mostly seen for intercpu intrs.
133 static void ip27_do_irq_mask0(struct pt_regs *regs)
136 hubreg_t pend0, mask0;
137 cpuid_t cpu = smp_processor_id();
139 (cputoslice(cpu) == 0) ? PI_INT_MASK0_A : PI_INT_MASK0_B;
141 /* copied from Irix intpend0() */
142 pend0 = LOCAL_HUB_L(PI_INT_PEND0);
143 mask0 = LOCAL_HUB_L(pi_int_mask0);
145 pend0 &= mask0; /* Pick intrs we should look at */
149 swlevel = ms1bit(pend0);
151 if (pend0 & (1UL << CPU_RESCHED_A_IRQ)) {
152 LOCAL_HUB_CLR_INTR(CPU_RESCHED_A_IRQ);
153 } else if (pend0 & (1UL << CPU_RESCHED_B_IRQ)) {
154 LOCAL_HUB_CLR_INTR(CPU_RESCHED_B_IRQ);
155 } else if (pend0 & (1UL << CPU_CALL_A_IRQ)) {
156 LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ);
157 smp_call_function_interrupt();
158 } else if (pend0 & (1UL << CPU_CALL_B_IRQ)) {
159 LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ);
160 smp_call_function_interrupt();
164 /* "map" swlevel to irq */
165 struct slice_data *si = cpu_data[cpu].data;
167 irq = si->level_to_irq[swlevel];
171 LOCAL_HUB_L(PI_INT_PEND0);
174 static void ip27_do_irq_mask1(struct pt_regs *regs)
177 hubreg_t pend1, mask1;
178 cpuid_t cpu = smp_processor_id();
179 int pi_int_mask1 = (cputoslice(cpu) == 0) ? PI_INT_MASK1_A : PI_INT_MASK1_B;
180 struct slice_data *si = cpu_data[cpu].data;
182 /* copied from Irix intpend0() */
183 pend1 = LOCAL_HUB_L(PI_INT_PEND1);
184 mask1 = LOCAL_HUB_L(pi_int_mask1);
186 pend1 &= mask1; /* Pick intrs we should look at */
190 swlevel = ms1bit(pend1);
191 /* "map" swlevel to irq */
192 irq = si->level_to_irq[swlevel];
193 LOCAL_HUB_CLR_INTR(swlevel);
196 LOCAL_HUB_L(PI_INT_PEND1);
199 static void ip27_prof_timer(struct pt_regs *regs)
201 panic("CPU %d got a profiling interrupt", smp_processor_id());
204 static void ip27_hub_error(struct pt_regs *regs)
206 panic("CPU %d got a hub error interrupt", smp_processor_id());
209 static int intr_connect_level(int cpu, int bit)
211 nasid_t nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu));
212 struct slice_data *si = cpu_data[cpu].data;
215 set_bit(bit, si->irq_enable_mask);
217 local_irq_save(flags);
218 if (!cputoslice(cpu)) {
219 REMOTE_HUB_S(nasid, PI_INT_MASK0_A, si->irq_enable_mask[0]);
220 REMOTE_HUB_S(nasid, PI_INT_MASK1_A, si->irq_enable_mask[1]);
222 REMOTE_HUB_S(nasid, PI_INT_MASK0_B, si->irq_enable_mask[0]);
223 REMOTE_HUB_S(nasid, PI_INT_MASK1_B, si->irq_enable_mask[1]);
225 local_irq_restore(flags);
230 static int intr_disconnect_level(int cpu, int bit)
232 nasid_t nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu));
233 struct slice_data *si = cpu_data[cpu].data;
235 clear_bit(bit, si->irq_enable_mask);
237 if (!cputoslice(cpu)) {
238 REMOTE_HUB_S(nasid, PI_INT_MASK0_A, si->irq_enable_mask[0]);
239 REMOTE_HUB_S(nasid, PI_INT_MASK1_A, si->irq_enable_mask[1]);
241 REMOTE_HUB_S(nasid, PI_INT_MASK0_B, si->irq_enable_mask[0]);
242 REMOTE_HUB_S(nasid, PI_INT_MASK1_B, si->irq_enable_mask[1]);
248 /* Startup one of the (PCI ...) IRQs routes over a bridge. */
249 static unsigned int startup_bridge_irq(unsigned int irq)
251 struct bridge_controller *bc;
257 pin = SLOT_FROM_PCI_IRQ(irq);
258 bc = IRQ_TO_BRIDGE(irq);
261 pr_debug("bridge_startup(): irq= 0x%x pin=%d\n", irq, pin);
263 * "map" irq to a swlevel greater than 6 since the first 6 bits
264 * of INT_PEND0 are taken
266 swlevel = find_level(&cpu, irq);
267 bridge->b_int_addr[pin].addr = (0x20000 | swlevel | (bc->nasid << 8));
268 bridge->b_int_enable |= (1 << pin);
269 bridge->b_int_enable |= 0x7ffffe00; /* more stuff in int_enable */
272 * Enable sending of an interrupt clear packt to the hub on a high to
273 * low transition of the interrupt pin.
275 * IRIX sets additional bits in the address which are documented as
276 * reserved in the bridge docs.
278 bridge->b_int_mode |= (1UL << pin);
281 * We assume the bridge to have a 1:1 mapping between devices
282 * (slots) and intr pins.
284 device = bridge->b_int_device;
285 device &= ~(7 << (pin*3));
286 device |= (pin << (pin*3));
287 bridge->b_int_device = device;
289 bridge->b_wid_tflush;
291 return 0; /* Never anything pending. */
294 /* Shutdown one of the (PCI ...) IRQs routes over a bridge. */
295 static void shutdown_bridge_irq(unsigned int irq)
297 struct bridge_controller *bc = IRQ_TO_BRIDGE(irq);
298 struct hub_data *hub = hub_data(cpu_to_node(bc->irq_cpu));
299 bridge_t *bridge = bc->base;
300 struct slice_data *si = cpu_data[bc->irq_cpu].data;
304 pr_debug("bridge_shutdown: irq 0x%x\n", irq);
305 pin = SLOT_FROM_PCI_IRQ(irq);
308 * map irq to a swlevel greater than 6 since the first 6 bits
309 * of INT_PEND0 are taken
311 swlevel = find_level(&cpu, irq);
312 intr_disconnect_level(cpu, swlevel);
314 __clear_bit(swlevel, hub->irq_alloc_mask);
315 si->level_to_irq[swlevel] = -1;
317 bridge->b_int_enable &= ~(1 << pin);
318 bridge->b_wid_tflush;
321 static inline void enable_bridge_irq(unsigned int irq)
326 swlevel = find_level(&cpu, irq); /* Criminal offence */
327 intr_connect_level(cpu, swlevel);
330 static inline void disable_bridge_irq(unsigned int irq)
335 swlevel = find_level(&cpu, irq); /* Criminal offence */
336 intr_disconnect_level(cpu, swlevel);
339 static void mask_and_ack_bridge_irq(unsigned int irq)
341 disable_bridge_irq(irq);
344 static void end_bridge_irq(unsigned int irq)
346 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)) &&
347 irq_desc[irq].action)
348 enable_bridge_irq(irq);
351 static struct hw_interrupt_type bridge_irq_type = {
352 .typename = "bridge",
353 .startup = startup_bridge_irq,
354 .shutdown = shutdown_bridge_irq,
355 .enable = enable_bridge_irq,
356 .disable = disable_bridge_irq,
357 .ack = mask_and_ack_bridge_irq,
358 .end = end_bridge_irq,
361 static unsigned long irq_map[NR_IRQS / BITS_PER_LONG];
363 static int allocate_irqno(void)
368 irq = find_first_zero_bit(irq_map, NR_IRQS);
373 if (test_and_set_bit(irq, irq_map))
379 void free_irqno(unsigned int irq)
381 clear_bit(irq, irq_map);
384 void __devinit register_bridge_irq(unsigned int irq)
386 irq_desc[irq].status = IRQ_DISABLED;
387 irq_desc[irq].action = 0;
388 irq_desc[irq].depth = 1;
389 irq_desc[irq].handler = &bridge_irq_type;
392 int __devinit request_bridge_irq(struct bridge_controller *bc)
394 int irq = allocate_irqno();
402 * "map" irq to a swlevel greater than 6 since the first 6 bits
403 * of INT_PEND0 are taken
406 swlevel = alloc_level(cpu, irq);
407 if (unlikely(swlevel < 0)) {
413 /* Make sure it's not already pending when we connect it. */
414 nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu));
415 REMOTE_HUB_CLR_INTR(nasid, swlevel);
417 intr_connect_level(cpu, swlevel);
419 register_bridge_irq(irq);
424 extern void ip27_rt_timer_interrupt(struct pt_regs *regs);
426 asmlinkage void plat_irq_dispatch(struct pt_regs *regs)
428 unsigned long pending = read_c0_cause() & read_c0_status();
430 if (pending & CAUSEF_IP4)
431 ip27_rt_timer_interrupt(regs);
432 else if (pending & CAUSEF_IP2) /* PI_INT_PEND_0 or CC_PEND_{A|B} */
433 ip27_do_irq_mask0(regs);
434 else if (pending & CAUSEF_IP3) /* PI_INT_PEND_1 */
435 ip27_do_irq_mask1(regs);
436 else if (pending & CAUSEF_IP5)
437 ip27_prof_timer(regs);
438 else if (pending & CAUSEF_IP6)
439 ip27_hub_error(regs);
442 void __init arch_init_irq(void)
446 void install_ipi(void)
448 int slice = LOCAL_HUB_L(PI_CPU_NUM);
449 int cpu = smp_processor_id();
450 struct slice_data *si = cpu_data[cpu].data;
451 struct hub_data *hub = hub_data(cpu_to_node(cpu));
454 resched = CPU_RESCHED_A_IRQ + slice;
455 __set_bit(resched, hub->irq_alloc_mask);
456 __set_bit(resched, si->irq_enable_mask);
457 LOCAL_HUB_CLR_INTR(resched);
459 call = CPU_CALL_A_IRQ + slice;
460 __set_bit(call, hub->irq_alloc_mask);
461 __set_bit(call, si->irq_enable_mask);
462 LOCAL_HUB_CLR_INTR(call);
465 LOCAL_HUB_S(PI_INT_MASK0_A, si->irq_enable_mask[0]);
466 LOCAL_HUB_S(PI_INT_MASK1_A, si->irq_enable_mask[1]);
468 LOCAL_HUB_S(PI_INT_MASK0_B, si->irq_enable_mask[0]);
469 LOCAL_HUB_S(PI_INT_MASK1_B, si->irq_enable_mask[1]);