2 * arch/powerpc/platforms/pseries/xics.c
4 * Copyright 2000 IBM Corporation.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
14 #include <linux/types.h>
15 #include <linux/threads.h>
16 #include <linux/kernel.h>
17 #include <linux/irq.h>
18 #include <linux/smp.h>
19 #include <linux/interrupt.h>
20 #include <linux/signal.h>
21 #include <linux/init.h>
22 #include <linux/gfp.h>
23 #include <linux/radix-tree.h>
24 #include <linux/cpu.h>
26 #include <asm/firmware.h>
29 #include <asm/pgtable.h>
32 #include <asm/hvcall.h>
33 #include <asm/machdep.h>
34 #include <asm/i8259.h>
37 #include "plpar_wrappers.h"
40 #define XICS_IRQ_SPURIOUS 0
42 /* Want a priority other than 0. Various HW issues require this. */
43 #define DEFAULT_PRIORITY 5
46 * Mark IPIs as higher priority so we can take them inside interrupts that
47 * arent marked IRQF_DISABLED
49 #define IPI_PRIORITY 4
67 static struct xics_ipl __iomem *xics_per_cpu[NR_CPUS];
69 static unsigned int default_server = 0xFF;
70 static unsigned int default_distrib_server = 0;
71 static unsigned int interrupt_server_size = 8;
73 static struct irq_host *xics_host;
76 * XICS only has a single IPI, so encode the messages per CPU
78 struct xics_ipi_struct xics_ipi_message[NR_CPUS] __cacheline_aligned;
80 /* RTAS service tokens */
81 static int ibm_get_xive;
82 static int ibm_set_xive;
83 static int ibm_int_on;
84 static int ibm_int_off;
87 /* Direct HW low level accessors */
90 static inline unsigned int direct_xirr_info_get(void)
92 int cpu = smp_processor_id();
94 return in_be32(&xics_per_cpu[cpu]->xirr.word);
97 static inline void direct_xirr_info_set(int value)
99 int cpu = smp_processor_id();
101 out_be32(&xics_per_cpu[cpu]->xirr.word, value);
104 static inline void direct_cppr_info(u8 value)
106 int cpu = smp_processor_id();
108 out_8(&xics_per_cpu[cpu]->xirr.bytes[0], value);
111 static inline void direct_qirr_info(int n_cpu, u8 value)
113 out_8(&xics_per_cpu[n_cpu]->qirr.bytes[0], value);
117 /* LPAR low level accessors */
120 static inline unsigned int lpar_xirr_info_get(void)
122 unsigned long lpar_rc;
123 unsigned long return_value;
125 lpar_rc = plpar_xirr(&return_value);
126 if (lpar_rc != H_SUCCESS)
127 panic(" bad return code xirr - rc = %lx \n", lpar_rc);
128 return (unsigned int)return_value;
131 static inline void lpar_xirr_info_set(int value)
133 unsigned long lpar_rc;
134 unsigned long val64 = value & 0xffffffff;
136 lpar_rc = plpar_eoi(val64);
137 if (lpar_rc != H_SUCCESS)
138 panic("bad return code EOI - rc = %ld, value=%lx\n", lpar_rc,
142 static inline void lpar_cppr_info(u8 value)
144 unsigned long lpar_rc;
146 lpar_rc = plpar_cppr(value);
147 if (lpar_rc != H_SUCCESS)
148 panic("bad return code cppr - rc = %lx\n", lpar_rc);
151 static inline void lpar_qirr_info(int n_cpu , u8 value)
153 unsigned long lpar_rc;
155 lpar_rc = plpar_ipi(get_hard_smp_processor_id(n_cpu), value);
156 if (lpar_rc != H_SUCCESS)
157 panic("bad return code qirr - rc = %lx\n", lpar_rc);
161 /* High level handlers and init code */
163 static void xics_update_irq_servers(void)
166 struct device_node *np;
168 const u32 *ireg, *isize;
171 /* Find the server numbers for the boot cpu. */
172 np = of_get_cpu_node(boot_cpuid, NULL);
175 ireg = of_get_property(np, "ibm,ppc-interrupt-gserver#s", &ilen);
181 i = ilen / sizeof(int);
182 hcpuid = get_hard_smp_processor_id(boot_cpuid);
184 /* Global interrupt distribution server is specified in the last
185 * entry of "ibm,ppc-interrupt-gserver#s" property. Get the last
186 * entry fom this property for current boot cpu id and use it as
187 * default distribution server
189 for (j = 0; j < i; j += 2) {
190 if (ireg[j] == hcpuid) {
191 default_server = hcpuid;
192 default_distrib_server = ireg[j+1];
194 isize = of_get_property(np,
195 "ibm,interrupt-server#-size", NULL);
197 interrupt_server_size = *isize;
205 static int get_irq_server(unsigned int virq, unsigned int strict_check)
208 /* For the moment only implement delivery to all cpus or one cpu */
209 cpumask_t cpumask = irq_desc[virq].affinity;
210 cpumask_t tmp = CPU_MASK_NONE;
212 if (! cpu_isset(default_server, cpu_online_map))
213 xics_update_irq_servers();
215 if (!distribute_irqs)
216 return default_server;
218 if (!cpus_equal(cpumask, CPU_MASK_ALL)) {
219 cpus_and(tmp, cpu_online_map, cpumask);
221 server = first_cpu(tmp);
223 if (server < NR_CPUS)
224 return get_hard_smp_processor_id(server);
230 if (cpus_equal(cpu_online_map, cpu_present_map))
231 return default_distrib_server;
233 return default_server;
236 static int get_irq_server(unsigned int virq, unsigned int strict_check)
238 return default_server;
243 static void xics_unmask_irq(unsigned int virq)
249 pr_debug("xics: unmask virq %d\n", virq);
251 irq = (unsigned int)irq_map[virq].hwirq;
252 pr_debug(" -> map to hwirq 0x%x\n", irq);
253 if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
256 server = get_irq_server(virq, 0);
258 call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server,
260 if (call_status != 0) {
261 printk(KERN_ERR "xics_enable_irq: irq=%u: ibm_set_xive "
262 "returned %d\n", irq, call_status);
263 printk("set_xive %x, server %x\n", ibm_set_xive, server);
267 /* Now unmask the interrupt (often a no-op) */
268 call_status = rtas_call(ibm_int_on, 1, 1, NULL, irq);
269 if (call_status != 0) {
270 printk(KERN_ERR "xics_enable_irq: irq=%u: ibm_int_on "
271 "returned %d\n", irq, call_status);
276 static void xics_mask_real_irq(unsigned int irq)
283 call_status = rtas_call(ibm_int_off, 1, 1, NULL, irq);
284 if (call_status != 0) {
285 printk(KERN_ERR "xics_disable_real_irq: irq=%u: "
286 "ibm_int_off returned %d\n", irq, call_status);
290 /* Have to set XIVE to 0xff to be able to remove a slot */
291 call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq,
292 default_server, 0xff);
293 if (call_status != 0) {
294 printk(KERN_ERR "xics_disable_irq: irq=%u: ibm_set_xive(0xff)"
295 " returned %d\n", irq, call_status);
300 static void xics_mask_irq(unsigned int virq)
304 pr_debug("xics: mask virq %d\n", virq);
306 irq = (unsigned int)irq_map[virq].hwirq;
307 if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
309 xics_mask_real_irq(irq);
312 static unsigned int xics_startup(unsigned int virq)
316 /* force a reverse mapping of the interrupt so it gets in the cache */
317 irq = (unsigned int)irq_map[virq].hwirq;
318 irq_radix_revmap(xics_host, irq);
321 xics_unmask_irq(virq);
325 static void xics_eoi_direct(unsigned int virq)
327 unsigned int irq = (unsigned int)irq_map[virq].hwirq;
330 direct_xirr_info_set((0xff << 24) | irq);
334 static void xics_eoi_lpar(unsigned int virq)
336 unsigned int irq = (unsigned int)irq_map[virq].hwirq;
339 lpar_xirr_info_set((0xff << 24) | irq);
342 static inline unsigned int xics_remap_irq(unsigned int vec)
348 if (vec == XICS_IRQ_SPURIOUS)
350 irq = irq_radix_revmap(xics_host, vec);
351 if (likely(irq != NO_IRQ))
354 printk(KERN_ERR "Interrupt %u (real) is invalid,"
355 " disabling it.\n", vec);
356 xics_mask_real_irq(vec);
360 static unsigned int xics_get_irq_direct(void)
362 return xics_remap_irq(direct_xirr_info_get());
365 static unsigned int xics_get_irq_lpar(void)
367 return xics_remap_irq(lpar_xirr_info_get());
372 static irqreturn_t xics_ipi_dispatch(int cpu)
374 WARN_ON(cpu_is_offline(cpu));
376 while (xics_ipi_message[cpu].value) {
377 if (test_and_clear_bit(PPC_MSG_CALL_FUNCTION,
378 &xics_ipi_message[cpu].value)) {
380 smp_message_recv(PPC_MSG_CALL_FUNCTION);
382 if (test_and_clear_bit(PPC_MSG_RESCHEDULE,
383 &xics_ipi_message[cpu].value)) {
385 smp_message_recv(PPC_MSG_RESCHEDULE);
388 if (test_and_clear_bit(PPC_MSG_MIGRATE_TASK,
389 &xics_ipi_message[cpu].value)) {
391 smp_message_recv(PPC_MSG_MIGRATE_TASK);
394 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
395 if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK,
396 &xics_ipi_message[cpu].value)) {
398 smp_message_recv(PPC_MSG_DEBUGGER_BREAK);
405 static irqreturn_t xics_ipi_action_direct(int irq, void *dev_id)
407 int cpu = smp_processor_id();
409 direct_qirr_info(cpu, 0xff);
411 return xics_ipi_dispatch(cpu);
414 static irqreturn_t xics_ipi_action_lpar(int irq, void *dev_id)
416 int cpu = smp_processor_id();
418 lpar_qirr_info(cpu, 0xff);
420 return xics_ipi_dispatch(cpu);
423 void xics_cause_IPI(int cpu)
425 if (firmware_has_feature(FW_FEATURE_LPAR))
426 lpar_qirr_info(cpu, IPI_PRIORITY);
428 direct_qirr_info(cpu, IPI_PRIORITY);
431 #endif /* CONFIG_SMP */
433 static void xics_set_cpu_priority(unsigned char cppr)
435 if (firmware_has_feature(FW_FEATURE_LPAR))
436 lpar_cppr_info(cppr);
438 direct_cppr_info(cppr);
442 static void xics_set_affinity(unsigned int virq, cpumask_t cpumask)
449 irq = (unsigned int)irq_map[virq].hwirq;
450 if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
453 status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq);
456 printk(KERN_ERR "xics_set_affinity: irq=%u ibm,get-xive "
457 "returns %d\n", irq, status);
462 * For the moment only implement delivery to all cpus or one cpu.
463 * Get current irq_server for the given irq
465 irq_server = get_irq_server(virq, 1);
466 if (irq_server == -1) {
468 cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask);
469 printk(KERN_WARNING "xics_set_affinity: No online cpus in "
470 "the mask %s for irq %d\n", cpulist, virq);
474 status = rtas_call(ibm_set_xive, 3, 1, NULL,
475 irq, irq_server, xics_status[1]);
478 printk(KERN_ERR "xics_set_affinity: irq=%u ibm,set-xive "
479 "returns %d\n", irq, status);
484 void xics_setup_cpu(void)
486 xics_set_cpu_priority(0xff);
489 * Put the calling processor into the GIQ. This is really only
490 * necessary from a secondary thread as the OF start-cpu interface
491 * performs this function for us on primary threads.
493 * XXX: undo of teardown on kexec needs this too, as may hotplug
495 rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE,
496 (1UL << interrupt_server_size) - 1 - default_distrib_server, 1);
500 static struct irq_chip xics_pic_direct = {
501 .typename = " XICS ",
502 .startup = xics_startup,
503 .mask = xics_mask_irq,
504 .unmask = xics_unmask_irq,
505 .eoi = xics_eoi_direct,
506 .set_affinity = xics_set_affinity
510 static struct irq_chip xics_pic_lpar = {
511 .typename = " XICS ",
512 .startup = xics_startup,
513 .mask = xics_mask_irq,
514 .unmask = xics_unmask_irq,
515 .eoi = xics_eoi_lpar,
516 .set_affinity = xics_set_affinity
520 static int xics_host_match(struct irq_host *h, struct device_node *node)
522 /* IBM machines have interrupt parents of various funky types for things
523 * like vdevices, events, etc... The trick we use here is to match
524 * everything here except the legacy 8259 which is compatible "chrp,iic"
526 return !of_device_is_compatible(node, "chrp,iic");
529 static int xics_host_map_direct(struct irq_host *h, unsigned int virq,
532 pr_debug("xics: map_direct virq %d, hwirq 0x%lx\n", virq, hw);
534 get_irq_desc(virq)->status |= IRQ_LEVEL;
535 set_irq_chip_and_handler(virq, &xics_pic_direct, handle_fasteoi_irq);
539 static int xics_host_map_lpar(struct irq_host *h, unsigned int virq,
542 pr_debug("xics: map_direct virq %d, hwirq 0x%lx\n", virq, hw);
544 get_irq_desc(virq)->status |= IRQ_LEVEL;
545 set_irq_chip_and_handler(virq, &xics_pic_lpar, handle_fasteoi_irq);
549 static int xics_host_xlate(struct irq_host *h, struct device_node *ct,
550 u32 *intspec, unsigned int intsize,
551 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
554 /* Current xics implementation translates everything
555 * to level. It is not technically right for MSIs but this
556 * is irrelevant at this point. We might get smarter in the future
558 *out_hwirq = intspec[0];
559 *out_flags = IRQ_TYPE_LEVEL_LOW;
564 static struct irq_host_ops xics_host_direct_ops = {
565 .match = xics_host_match,
566 .map = xics_host_map_direct,
567 .xlate = xics_host_xlate,
570 static struct irq_host_ops xics_host_lpar_ops = {
571 .match = xics_host_match,
572 .map = xics_host_map_lpar,
573 .xlate = xics_host_xlate,
576 static void __init xics_init_host(void)
578 struct irq_host_ops *ops;
580 if (firmware_has_feature(FW_FEATURE_LPAR))
581 ops = &xics_host_lpar_ops;
583 ops = &xics_host_direct_ops;
584 xics_host = irq_alloc_host(NULL, IRQ_HOST_MAP_TREE, 0, ops,
586 BUG_ON(xics_host == NULL);
587 irq_set_default_host(xics_host);
590 static void __init xics_map_one_cpu(int hw_id, unsigned long addr,
596 /* This may look gross but it's good enough for now, we don't quite
597 * have a hard -> linux processor id matching.
599 for_each_possible_cpu(i) {
602 if (hw_id == get_hard_smp_processor_id(i)) {
603 xics_per_cpu[i] = ioremap(addr, size);
610 xics_per_cpu[0] = ioremap(addr, size);
611 #endif /* CONFIG_SMP */
614 static void __init xics_init_one_node(struct device_node *np,
620 /* This code does the theorically broken assumption that the interrupt
621 * server numbers are the same as the hard CPU numbers.
622 * This happens to be the case so far but we are playing with fire...
623 * should be fixed one of these days. -BenH.
625 ireg = of_get_property(np, "ibm,interrupt-server-ranges", NULL);
627 /* Do that ever happen ? we'll know soon enough... but even good'old
628 * f80 does have that property ..
630 WARN_ON(ireg == NULL);
633 * set node starting index for this node
637 ireg = of_get_property(np, "reg", &ilen);
639 panic("xics_init_IRQ: can't find interrupt reg property");
641 while (ilen >= (4 * sizeof(u32))) {
642 unsigned long addr, size;
644 /* XXX Use proper OF parsing code here !!! */
645 addr = (unsigned long)*ireg++ << 32;
649 size = (unsigned long)*ireg++ << 32;
653 xics_map_one_cpu(*indx, addr, size);
659 static void __init xics_setup_8259_cascade(void)
661 struct device_node *np, *old, *found = NULL;
664 unsigned long intack = 0;
666 for_each_node_by_type(np, "interrupt-controller")
667 if (of_device_is_compatible(np, "chrp,iic")) {
672 printk(KERN_DEBUG "xics: no ISA interrupt controller\n");
675 cascade = irq_of_parse_and_map(found, 0);
676 if (cascade == NO_IRQ) {
677 printk(KERN_ERR "xics: failed to map cascade interrupt");
680 pr_debug("xics: cascade mapped to irq %d\n", cascade);
682 for (old = of_node_get(found); old != NULL ; old = np) {
683 np = of_get_parent(old);
687 if (strcmp(np->name, "pci") != 0)
689 addrp = of_get_property(np, "8259-interrupt-acknowledge", NULL);
692 naddr = of_n_addr_cells(np);
693 intack = addrp[naddr-1];
695 intack |= ((unsigned long)addrp[naddr-2]) << 32;
698 printk(KERN_DEBUG "xics: PCI 8259 intack at 0x%016lx\n", intack);
699 i8259_init(found, intack);
701 set_irq_chained_handler(cascade, pseries_8259_cascade);
704 void __init xics_init_IRQ(void)
706 struct device_node *np;
710 ppc64_boot_msg(0x20, "XICS Init");
712 ibm_get_xive = rtas_token("ibm,get-xive");
713 ibm_set_xive = rtas_token("ibm,set-xive");
714 ibm_int_on = rtas_token("ibm,int-on");
715 ibm_int_off = rtas_token("ibm,int-off");
717 for_each_node_by_type(np, "PowerPC-External-Interrupt-Presentation") {
719 if (firmware_has_feature(FW_FEATURE_LPAR))
721 xics_init_one_node(np, &indx);
727 xics_update_irq_servers();
729 if (firmware_has_feature(FW_FEATURE_LPAR))
730 ppc_md.get_irq = xics_get_irq_lpar;
732 ppc_md.get_irq = xics_get_irq_direct;
736 xics_setup_8259_cascade();
738 ppc64_boot_msg(0x21, "XICS Done");
743 void xics_request_IPIs(void)
748 ipi = irq_create_mapping(xics_host, XICS_IPI);
749 BUG_ON(ipi == NO_IRQ);
752 * IPIs are marked IRQF_DISABLED as they must run with irqs
755 set_irq_handler(ipi, handle_percpu_irq);
756 if (firmware_has_feature(FW_FEATURE_LPAR))
757 rc = request_irq(ipi, xics_ipi_action_lpar, IRQF_DISABLED,
760 rc = request_irq(ipi, xics_ipi_action_direct, IRQF_DISABLED,
764 #endif /* CONFIG_SMP */
766 void xics_teardown_cpu()
768 int cpu = smp_processor_id();
770 xics_set_cpu_priority(0);
775 if (firmware_has_feature(FW_FEATURE_LPAR))
776 lpar_qirr_info(cpu, 0xff);
778 direct_qirr_info(cpu, 0xff);
781 void xics_kexec_teardown_cpu(int secondary)
784 struct irq_desc *desc;
789 * we need to EOI the IPI
791 * probably need to check all the other interrupts too
792 * should we be flagging idle loop instead?
793 * or creating some task to be scheduled?
796 ipi = irq_find_mapping(xics_host, XICS_IPI);
797 if (ipi == XICS_IRQ_SPURIOUS)
799 desc = get_irq_desc(ipi);
800 if (desc->chip && desc->chip->eoi)
801 desc->chip->eoi(ipi);
804 * Some machines need to have at least one cpu in the GIQ,
805 * so leave the master cpu in the group.
808 rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE,
809 (1UL << interrupt_server_size) - 1 -
810 default_distrib_server, 0);
813 #ifdef CONFIG_HOTPLUG_CPU
815 /* Interrupts are disabled. */
816 void xics_migrate_irqs_away(void)
819 int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id();
820 unsigned int irq, virq;
822 /* Reject any interrupt that was queued to us... */
823 xics_set_cpu_priority(0);
825 /* remove ourselves from the global interrupt queue */
826 status = rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE,
827 (1UL << interrupt_server_size) - 1 - default_distrib_server, 0);
830 /* Allow IPIs again... */
831 xics_set_cpu_priority(DEFAULT_PRIORITY);
834 struct irq_desc *desc;
838 /* We cant set affinity on ISA interrupts */
839 if (virq < NUM_ISA_INTERRUPTS)
841 if (irq_map[virq].host != xics_host)
843 irq = (unsigned int)irq_map[virq].hwirq;
844 /* We need to get IPIs still. */
845 if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
847 desc = get_irq_desc(virq);
849 /* We only need to migrate enabled IRQS */
850 if (desc == NULL || desc->chip == NULL
851 || desc->action == NULL
852 || desc->chip->set_affinity == NULL)
855 spin_lock_irqsave(&desc->lock, flags);
857 status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq);
859 printk(KERN_ERR "migrate_irqs_away: irq=%u "
860 "ibm,get-xive returns %d\n",
866 * We only support delivery to all cpus or to one cpu.
867 * The irq has to be migrated only in the single cpu
870 if (xics_status[0] != hw_cpu)
873 printk(KERN_WARNING "IRQ %u affinity broken off cpu %u\n",
876 /* Reset affinity to all cpus */
877 irq_desc[virq].affinity = CPU_MASK_ALL;
878 desc->chip->set_affinity(virq, CPU_MASK_ALL);
880 spin_unlock_irqrestore(&desc->lock, flags);