2 * Cell Internal Interrupt Controller
4 * Copyright (C) 2006 Benjamin Herrenschmidt (benh@kernel.crashing.org)
7 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
9 * Author: Arnd Bergmann <arndb@de.ibm.com>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 * - Fix various assumptions related to HW CPU numbers vs. linux CPU numbers
27 * vs node numbers in the setup code
28 * - Implement proper handling of maxcpus=1/2 (that is, routing of irqs from
29 * a non-active node to the active node)
32 #include <linux/interrupt.h>
33 #include <linux/irq.h>
34 #include <linux/module.h>
35 #include <linux/percpu.h>
36 #include <linux/types.h>
37 #include <linux/ioport.h>
38 #include <linux/kernel_stat.h>
41 #include <asm/pgtable.h>
43 #include <asm/ptrace.h>
44 #include <asm/machdep.h>
45 #include <asm/cell-regs.h>
47 #include "interrupt.h"
50 struct cbe_iic_thread_regs __iomem *regs;
54 struct device_node *node;
57 static DEFINE_PER_CPU(struct iic, iic);
58 #define IIC_NODE_COUNT 2
59 static struct irq_host *iic_host;
61 /* Convert between "pending" bits and hw irq number */
62 static irq_hw_number_t iic_pending_to_hwnum(struct cbe_iic_pending_bits bits)
64 unsigned char unit = bits.source & 0xf;
65 unsigned char node = bits.source >> 4;
66 unsigned char class = bits.class & 3;
69 if (bits.flags & CBE_IIC_IRQ_IPI)
70 return IIC_IRQ_TYPE_IPI | (bits.prio >> 4);
72 return (node << IIC_IRQ_NODE_SHIFT) | (class << 4) | unit;
75 static void iic_mask(unsigned int irq)
79 static void iic_unmask(unsigned int irq)
83 static void iic_eoi(unsigned int irq)
85 struct iic *iic = &__get_cpu_var(iic);
86 out_be64(&iic->regs->prio, iic->eoi_stack[--iic->eoi_ptr]);
87 BUG_ON(iic->eoi_ptr < 0);
90 static struct irq_chip iic_chip = {
91 .typename = " CELL-IIC ",
98 static void iic_ioexc_eoi(unsigned int irq)
102 static void iic_ioexc_cascade(unsigned int irq, struct irq_desc *desc)
104 struct cbe_iic_regs __iomem *node_iic = (void __iomem *)desc->handler_data;
105 unsigned int base = (irq & 0xffffff00) | IIC_IRQ_TYPE_IOEXC;
106 unsigned long bits, ack;
110 bits = in_be64(&node_iic->iic_is);
113 /* pre-ack edge interrupts */
114 ack = bits & IIC_ISR_EDGE_MASK;
116 out_be64(&node_iic->iic_is, ack);
118 for (cascade = 63; cascade >= 0; cascade--)
119 if (bits & (0x8000000000000000UL >> cascade)) {
121 irq_linear_revmap(iic_host,
124 generic_handle_irq(cirq);
126 /* post-ack level interrupts */
127 ack = bits & ~IIC_ISR_EDGE_MASK;
129 out_be64(&node_iic->iic_is, ack);
131 desc->chip->eoi(irq);
135 static struct irq_chip iic_ioexc_chip = {
136 .typename = " CELL-IOEX",
138 .unmask = iic_unmask,
139 .eoi = iic_ioexc_eoi,
142 /* Get an IRQ number from the pending state register of the IIC */
143 static unsigned int iic_get_irq(void)
145 struct cbe_iic_pending_bits pending;
149 iic = &__get_cpu_var(iic);
150 *(unsigned long *) &pending =
151 in_be64((unsigned long __iomem *) &iic->regs->pending_destr);
152 if (!(pending.flags & CBE_IIC_IRQ_VALID))
154 virq = irq_linear_revmap(iic_host, iic_pending_to_hwnum(pending));
157 iic->eoi_stack[++iic->eoi_ptr] = pending.prio;
158 BUG_ON(iic->eoi_ptr > 15);
162 void iic_setup_cpu(void)
164 out_be64(&__get_cpu_var(iic).regs->prio, 0xff);
167 u8 iic_get_target_id(int cpu)
169 return per_cpu(iic, cpu).target_id;
172 EXPORT_SYMBOL_GPL(iic_get_target_id);
176 /* Use the highest interrupt priorities for IPI */
177 static inline int iic_ipi_to_irq(int ipi)
179 return IIC_IRQ_TYPE_IPI + 0xf - ipi;
182 void iic_cause_IPI(int cpu, int mesg)
184 out_be64(&per_cpu(iic, cpu).regs->generate, (0xf - mesg) << 4);
187 struct irq_host *iic_get_irq_host(int node)
191 EXPORT_SYMBOL_GPL(iic_get_irq_host);
193 static irqreturn_t iic_ipi_action(int irq, void *dev_id)
195 int ipi = (int)(long)dev_id;
197 smp_message_recv(ipi);
201 static void iic_request_ipi(int ipi, const char *name)
205 virq = irq_create_mapping(iic_host, iic_ipi_to_irq(ipi));
206 if (virq == NO_IRQ) {
208 "iic: failed to map IPI %s\n", name);
211 if (request_irq(virq, iic_ipi_action, IRQF_DISABLED, name,
214 "iic: failed to request IPI %s\n", name);
217 void iic_request_IPIs(void)
219 iic_request_ipi(PPC_MSG_CALL_FUNCTION, "IPI-call");
220 iic_request_ipi(PPC_MSG_RESCHEDULE, "IPI-resched");
221 iic_request_ipi(PPC_MSG_CALL_FUNC_SINGLE, "IPI-call-single");
222 #ifdef CONFIG_DEBUGGER
223 iic_request_ipi(PPC_MSG_DEBUGGER_BREAK, "IPI-debug");
224 #endif /* CONFIG_DEBUGGER */
227 #endif /* CONFIG_SMP */
230 static int iic_host_match(struct irq_host *h, struct device_node *node)
232 return of_device_is_compatible(node,
233 "IBM,CBEA-Internal-Interrupt-Controller");
236 extern int noirqdebug;
238 static void handle_iic_irq(unsigned int irq, struct irq_desc *desc)
240 const unsigned int cpu = smp_processor_id();
242 spin_lock(&desc->lock);
244 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
247 * If we're currently running this IRQ, or its disabled,
248 * we shouldn't process the IRQ. Mark it pending, handle
249 * the necessary masking and go out
251 if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) ||
253 desc->status |= IRQ_PENDING;
257 kstat_cpu(cpu).irqs[irq]++;
259 /* Mark the IRQ currently in progress.*/
260 desc->status |= IRQ_INPROGRESS;
263 struct irqaction *action = desc->action;
264 irqreturn_t action_ret;
266 if (unlikely(!action))
269 desc->status &= ~IRQ_PENDING;
270 spin_unlock(&desc->lock);
271 action_ret = handle_IRQ_event(irq, action);
273 note_interrupt(irq, desc, action_ret);
274 spin_lock(&desc->lock);
276 } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);
278 desc->status &= ~IRQ_INPROGRESS;
280 desc->chip->eoi(irq);
281 spin_unlock(&desc->lock);
284 static int iic_host_map(struct irq_host *h, unsigned int virq,
287 switch (hw & IIC_IRQ_TYPE_MASK) {
288 case IIC_IRQ_TYPE_IPI:
289 set_irq_chip_and_handler(virq, &iic_chip, handle_percpu_irq);
291 case IIC_IRQ_TYPE_IOEXC:
292 set_irq_chip_and_handler(virq, &iic_ioexc_chip,
296 set_irq_chip_and_handler(virq, &iic_chip, handle_iic_irq);
301 static int iic_host_xlate(struct irq_host *h, struct device_node *ct,
302 u32 *intspec, unsigned int intsize,
303 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
306 unsigned int node, ext, unit, class;
309 if (!of_device_is_compatible(ct,
310 "IBM,CBEA-Internal-Interrupt-Controller"))
314 val = of_get_property(ct, "#interrupt-cells", NULL);
315 if (val == NULL || *val != 1)
318 node = intspec[0] >> 24;
319 ext = (intspec[0] >> 16) & 0xff;
320 class = (intspec[0] >> 8) & 0xff;
321 unit = intspec[0] & 0xff;
323 /* Check if node is in supported range */
327 /* Build up interrupt number, special case for IO exceptions */
328 *out_hwirq = (node << IIC_IRQ_NODE_SHIFT);
329 if (unit == IIC_UNIT_IIC && class == 1)
330 *out_hwirq |= IIC_IRQ_TYPE_IOEXC | ext;
332 *out_hwirq |= IIC_IRQ_TYPE_NORMAL |
333 (class << IIC_IRQ_CLASS_SHIFT) | unit;
335 /* Dummy flags, ignored by iic code */
336 *out_flags = IRQ_TYPE_EDGE_RISING;
341 static struct irq_host_ops iic_host_ops = {
342 .match = iic_host_match,
344 .xlate = iic_host_xlate,
347 static void __init init_one_iic(unsigned int hw_cpu, unsigned long addr,
348 struct device_node *node)
350 /* XXX FIXME: should locate the linux CPU number from the HW cpu
351 * number properly. We are lucky for now
353 struct iic *iic = &per_cpu(iic, hw_cpu);
355 iic->regs = ioremap(addr, sizeof(struct cbe_iic_thread_regs));
356 BUG_ON(iic->regs == NULL);
358 iic->target_id = ((hw_cpu & 2) << 3) | ((hw_cpu & 1) ? 0xf : 0xe);
359 iic->eoi_stack[0] = 0xff;
360 iic->node = of_node_get(node);
361 out_be64(&iic->regs->prio, 0);
363 printk(KERN_INFO "IIC for CPU %d target id 0x%x : %s\n",
364 hw_cpu, iic->target_id, node->full_name);
367 static int __init setup_iic(void)
369 struct device_node *dn;
370 struct resource r0, r1;
371 unsigned int node, cascade, found = 0;
372 struct cbe_iic_regs __iomem *node_iic;
376 (dn = of_find_node_by_name(dn,"interrupt-controller")) != NULL;) {
377 if (!of_device_is_compatible(dn,
378 "IBM,CBEA-Internal-Interrupt-Controller"))
380 np = of_get_property(dn, "ibm,interrupt-server-ranges", NULL);
382 printk(KERN_WARNING "IIC: CPU association not found\n");
386 if (of_address_to_resource(dn, 0, &r0) ||
387 of_address_to_resource(dn, 1, &r1)) {
388 printk(KERN_WARNING "IIC: Can't resolve addresses\n");
393 init_one_iic(np[0], r0.start, dn);
394 init_one_iic(np[1], r1.start, dn);
396 /* Setup cascade for IO exceptions. XXX cleanup tricks to get
398 * Note that we configure the IIC_IRR here with a hard coded
399 * priority of 1. We might want to improve that later.
402 node_iic = cbe_get_cpu_iic_regs(np[0]);
403 cascade = node << IIC_IRQ_NODE_SHIFT;
404 cascade |= 1 << IIC_IRQ_CLASS_SHIFT;
405 cascade |= IIC_UNIT_IIC;
406 cascade = irq_create_mapping(iic_host, cascade);
407 if (cascade == NO_IRQ)
410 * irq_data is a generic pointer that gets passed back
411 * to us later, so the forced cast is fine.
413 set_irq_data(cascade, (void __force *)node_iic);
414 set_irq_chained_handler(cascade , iic_ioexc_cascade);
415 out_be64(&node_iic->iic_ir,
416 (1 << 12) /* priority */ |
417 (node << 4) /* dest node */ |
418 IIC_UNIT_THREAD_0 /* route them to thread 0 */);
419 /* Flush pending (make sure it triggers if there is
422 out_be64(&node_iic->iic_is, 0xfffffffffffffffful);
431 void __init iic_init_IRQ(void)
433 /* Setup an irq host data structure */
434 iic_host = irq_alloc_host(NULL, IRQ_HOST_MAP_LINEAR, IIC_SOURCE_COUNT,
435 &iic_host_ops, IIC_IRQ_INVALID);
436 BUG_ON(iic_host == NULL);
437 irq_set_default_host(iic_host);
439 /* Discover and initialize iics */
441 panic("IIC: Failed to initialize !\n");
443 /* Set master interrupt handling function */
444 ppc_md.get_irq = iic_get_irq;
446 /* Enable on current CPU */
450 void iic_set_interrupt_routing(int cpu, int thread, int priority)
452 struct cbe_iic_regs __iomem *iic_regs = cbe_get_cpu_iic_regs(cpu);
456 /* Set which node and thread will handle the next interrupt */
457 iic_ir |= CBE_IIC_IR_PRIO(priority) |
458 CBE_IIC_IR_DEST_NODE(node);
460 iic_ir |= CBE_IIC_IR_DEST_UNIT(CBE_IIC_IR_PT_0);
462 iic_ir |= CBE_IIC_IR_DEST_UNIT(CBE_IIC_IR_PT_1);
463 out_be64(&iic_regs->iic_ir, iic_ir);