[POWERPC] Invert null match behaviour for irq_hosts
[linux-2.6] / arch / powerpc / kernel / irq.c
1 /*
2  *  Derived from arch/i386/kernel/irq.c
3  *    Copyright (C) 1992 Linus Torvalds
4  *  Adapted from arch/i386 by Gary Thomas
5  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6  *  Updated and modified by Cort Dougan <cort@fsmlabs.com>
7  *    Copyright (C) 1996-2001 Cort Dougan
8  *  Adapted for Power Macintosh by Paul Mackerras
9  *    Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
10  *
11  * This program is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU General Public License
13  * as published by the Free Software Foundation; either version
14  * 2 of the License, or (at your option) any later version.
15  *
16  * This file contains the code used by various IRQ handling routines:
17  * asking for different IRQ's should be done through these routines
18  * instead of just grabbing them. Thus setups with different IRQ numbers
19  * shouldn't result in any weird surprises, and installing new handlers
20  * should be easier.
21  *
22  * The MPC8xx has an interrupt mask in the SIU.  If a bit is set, the
23  * interrupt is _enabled_.  As expected, IRQ0 is bit 0 in the 32-bit
24  * mask register (of which only 16 are defined), hence the weird shifting
25  * and complement of the cached_irq_mask.  I want to be able to stuff
26  * this right into the SIU SMASK register.
27  * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx
28  * to reduce code space and undefined function references.
29  */
30
31 #undef DEBUG
32
33 #include <linux/module.h>
34 #include <linux/threads.h>
35 #include <linux/kernel_stat.h>
36 #include <linux/signal.h>
37 #include <linux/sched.h>
38 #include <linux/ptrace.h>
39 #include <linux/ioport.h>
40 #include <linux/interrupt.h>
41 #include <linux/timex.h>
42 #include <linux/init.h>
43 #include <linux/slab.h>
44 #include <linux/delay.h>
45 #include <linux/irq.h>
46 #include <linux/seq_file.h>
47 #include <linux/cpumask.h>
48 #include <linux/profile.h>
49 #include <linux/bitops.h>
50 #include <linux/list.h>
51 #include <linux/radix-tree.h>
52 #include <linux/mutex.h>
53 #include <linux/bootmem.h>
54 #include <linux/pci.h>
55
56 #include <asm/uaccess.h>
57 #include <asm/system.h>
58 #include <asm/io.h>
59 #include <asm/pgtable.h>
60 #include <asm/irq.h>
61 #include <asm/cache.h>
62 #include <asm/prom.h>
63 #include <asm/ptrace.h>
64 #include <asm/machdep.h>
65 #include <asm/udbg.h>
66 #ifdef CONFIG_PPC64
67 #include <asm/paca.h>
68 #include <asm/firmware.h>
69 #include <asm/lv1call.h>
70 #endif
71
72 int __irq_offset_value;
73 static int ppc_spurious_interrupts;
74
75 #ifdef CONFIG_PPC32
76 EXPORT_SYMBOL(__irq_offset_value);
77 atomic_t ppc_n_lost_interrupts;
78
79 #ifndef CONFIG_PPC_MERGE
80 #define NR_MASK_WORDS   ((NR_IRQS + 31) / 32)
81 unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
82 #endif
83
84 #ifdef CONFIG_TAU_INT
85 extern int tau_initialized;
86 extern int tau_interrupts(int);
87 #endif
88 #endif /* CONFIG_PPC32 */
89
90 #if defined(CONFIG_SMP) && !defined(CONFIG_PPC_MERGE)
91 extern atomic_t ipi_recv;
92 extern atomic_t ipi_sent;
93 #endif
94
95 #ifdef CONFIG_PPC64
96 EXPORT_SYMBOL(irq_desc);
97
98 int distribute_irqs = 1;
99
100 static inline unsigned long get_hard_enabled(void)
101 {
102         unsigned long enabled;
103
104         __asm__ __volatile__("lbz %0,%1(13)"
105         : "=r" (enabled) : "i" (offsetof(struct paca_struct, hard_enabled)));
106
107         return enabled;
108 }
109
110 static inline void set_soft_enabled(unsigned long enable)
111 {
112         __asm__ __volatile__("stb %0,%1(13)"
113         : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
114 }
115
116 void local_irq_restore(unsigned long en)
117 {
118         /*
119          * get_paca()->soft_enabled = en;
120          * Is it ever valid to use local_irq_restore(0) when soft_enabled is 1?
121          * That was allowed before, and in such a case we do need to take care
122          * that gcc will set soft_enabled directly via r13, not choose to use
123          * an intermediate register, lest we're preempted to a different cpu.
124          */
125         set_soft_enabled(en);
126         if (!en)
127                 return;
128
129         if (firmware_has_feature(FW_FEATURE_ISERIES)) {
130                 /*
131                  * Do we need to disable preemption here?  Not really: in the
132                  * unlikely event that we're preempted to a different cpu in
133                  * between getting r13, loading its lppaca_ptr, and loading
134                  * its any_int, we might call iseries_handle_interrupts without
135                  * an interrupt pending on the new cpu, but that's no disaster,
136                  * is it?  And the business of preempting us off the old cpu
137                  * would itself involve a local_irq_restore which handles the
138                  * interrupt to that cpu.
139                  *
140                  * But use "local_paca->lppaca_ptr" instead of "get_lppaca()"
141                  * to avoid any preemption checking added into get_paca().
142                  */
143                 if (local_paca->lppaca_ptr->int_dword.any_int)
144                         iseries_handle_interrupts();
145                 return;
146         }
147
148         /*
149          * if (get_paca()->hard_enabled) return;
150          * But again we need to take care that gcc gets hard_enabled directly
151          * via r13, not choose to use an intermediate register, lest we're
152          * preempted to a different cpu in between the two instructions.
153          */
154         if (get_hard_enabled())
155                 return;
156
157         /*
158          * Need to hard-enable interrupts here.  Since currently disabled,
159          * no need to take further asm precautions against preemption; but
160          * use local_paca instead of get_paca() to avoid preemption checking.
161          */
162         local_paca->hard_enabled = en;
163         if ((int)mfspr(SPRN_DEC) < 0)
164                 mtspr(SPRN_DEC, 1);
165
166         /*
167          * Force the delivery of pending soft-disabled interrupts on PS3.
168          * Any HV call will have this side effect.
169          */
170         if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
171                 u64 tmp;
172                 lv1_get_version_info(&tmp);
173         }
174
175         __hard_irq_enable();
176 }
177 #endif /* CONFIG_PPC64 */
178
179 int show_interrupts(struct seq_file *p, void *v)
180 {
181         int i = *(loff_t *)v, j;
182         struct irqaction *action;
183         irq_desc_t *desc;
184         unsigned long flags;
185
186         if (i == 0) {
187                 seq_puts(p, "           ");
188                 for_each_online_cpu(j)
189                         seq_printf(p, "CPU%d       ", j);
190                 seq_putc(p, '\n');
191         }
192
193         if (i < NR_IRQS) {
194                 desc = get_irq_desc(i);
195                 spin_lock_irqsave(&desc->lock, flags);
196                 action = desc->action;
197                 if (!action || !action->handler)
198                         goto skip;
199                 seq_printf(p, "%3d: ", i);
200 #ifdef CONFIG_SMP
201                 for_each_online_cpu(j)
202                         seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
203 #else
204                 seq_printf(p, "%10u ", kstat_irqs(i));
205 #endif /* CONFIG_SMP */
206                 if (desc->chip)
207                         seq_printf(p, " %s ", desc->chip->typename);
208                 else
209                         seq_puts(p, "  None      ");
210                 seq_printf(p, "%s", (desc->status & IRQ_LEVEL) ? "Level " : "Edge  ");
211                 seq_printf(p, "    %s", action->name);
212                 for (action = action->next; action; action = action->next)
213                         seq_printf(p, ", %s", action->name);
214                 seq_putc(p, '\n');
215 skip:
216                 spin_unlock_irqrestore(&desc->lock, flags);
217         } else if (i == NR_IRQS) {
218 #ifdef CONFIG_PPC32
219 #ifdef CONFIG_TAU_INT
220                 if (tau_initialized){
221                         seq_puts(p, "TAU: ");
222                         for_each_online_cpu(j)
223                                 seq_printf(p, "%10u ", tau_interrupts(j));
224                         seq_puts(p, "  PowerPC             Thermal Assist (cpu temp)\n");
225                 }
226 #endif
227 #if defined(CONFIG_SMP) && !defined(CONFIG_PPC_MERGE)
228                 /* should this be per processor send/receive? */
229                 seq_printf(p, "IPI (recv/sent): %10u/%u\n",
230                                 atomic_read(&ipi_recv), atomic_read(&ipi_sent));
231 #endif
232 #endif /* CONFIG_PPC32 */
233                 seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts);
234         }
235         return 0;
236 }
237
238 #ifdef CONFIG_HOTPLUG_CPU
239 void fixup_irqs(cpumask_t map)
240 {
241         unsigned int irq;
242         static int warned;
243
244         for_each_irq(irq) {
245                 cpumask_t mask;
246
247                 if (irq_desc[irq].status & IRQ_PER_CPU)
248                         continue;
249
250                 cpus_and(mask, irq_desc[irq].affinity, map);
251                 if (any_online_cpu(mask) == NR_CPUS) {
252                         printk("Breaking affinity for irq %i\n", irq);
253                         mask = map;
254                 }
255                 if (irq_desc[irq].chip->set_affinity)
256                         irq_desc[irq].chip->set_affinity(irq, mask);
257                 else if (irq_desc[irq].action && !(warned++))
258                         printk("Cannot set affinity for irq %i\n", irq);
259         }
260
261         local_irq_enable();
262         mdelay(1);
263         local_irq_disable();
264 }
265 #endif
266
267 void do_IRQ(struct pt_regs *regs)
268 {
269         struct pt_regs *old_regs = set_irq_regs(regs);
270         unsigned int irq;
271 #ifdef CONFIG_IRQSTACKS
272         struct thread_info *curtp, *irqtp;
273 #endif
274
275         irq_enter();
276
277 #ifdef CONFIG_DEBUG_STACKOVERFLOW
278         /* Debugging check for stack overflow: is there less than 2KB free? */
279         {
280                 long sp;
281
282                 sp = __get_SP() & (THREAD_SIZE-1);
283
284                 if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
285                         printk("do_IRQ: stack overflow: %ld\n",
286                                 sp - sizeof(struct thread_info));
287                         dump_stack();
288                 }
289         }
290 #endif
291
292         /*
293          * Every platform is required to implement ppc_md.get_irq.
294          * This function will either return an irq number or NO_IRQ to
295          * indicate there are no more pending.
296          * The value NO_IRQ_IGNORE is for buggy hardware and means that this
297          * IRQ has already been handled. -- Tom
298          */
299         irq = ppc_md.get_irq();
300
301         if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) {
302 #ifdef CONFIG_IRQSTACKS
303                 /* Switch to the irq stack to handle this */
304                 curtp = current_thread_info();
305                 irqtp = hardirq_ctx[smp_processor_id()];
306                 if (curtp != irqtp) {
307                         struct irq_desc *desc = irq_desc + irq;
308                         void *handler = desc->handle_irq;
309                         if (handler == NULL)
310                                 handler = &__do_IRQ;
311                         irqtp->task = curtp->task;
312                         irqtp->flags = 0;
313                         call_handle_irq(irq, desc, irqtp, handler);
314                         irqtp->task = NULL;
315                         if (irqtp->flags)
316                                 set_bits(irqtp->flags, &curtp->flags);
317                 } else
318 #endif
319                         generic_handle_irq(irq);
320         } else if (irq != NO_IRQ_IGNORE)
321                 /* That's not SMP safe ... but who cares ? */
322                 ppc_spurious_interrupts++;
323
324         irq_exit();
325         set_irq_regs(old_regs);
326
327 #ifdef CONFIG_PPC_ISERIES
328         if (firmware_has_feature(FW_FEATURE_ISERIES) &&
329                         get_lppaca()->int_dword.fields.decr_int) {
330                 get_lppaca()->int_dword.fields.decr_int = 0;
331                 /* Signal a fake decrementer interrupt */
332                 timer_interrupt(regs);
333         }
334 #endif
335 }
336
337 void __init init_IRQ(void)
338 {
339         if (ppc_md.init_IRQ)
340                 ppc_md.init_IRQ();
341 #ifdef CONFIG_PPC64
342         irq_ctx_init();
343 #endif
344 }
345
346
347 #ifdef CONFIG_IRQSTACKS
348 struct thread_info *softirq_ctx[NR_CPUS] __read_mostly;
349 struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly;
350
351 void irq_ctx_init(void)
352 {
353         struct thread_info *tp;
354         int i;
355
356         for_each_possible_cpu(i) {
357                 memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
358                 tp = softirq_ctx[i];
359                 tp->cpu = i;
360                 tp->preempt_count = SOFTIRQ_OFFSET;
361
362                 memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
363                 tp = hardirq_ctx[i];
364                 tp->cpu = i;
365                 tp->preempt_count = HARDIRQ_OFFSET;
366         }
367 }
368
369 static inline void do_softirq_onstack(void)
370 {
371         struct thread_info *curtp, *irqtp;
372
373         curtp = current_thread_info();
374         irqtp = softirq_ctx[smp_processor_id()];
375         irqtp->task = curtp->task;
376         call_do_softirq(irqtp);
377         irqtp->task = NULL;
378 }
379
380 #else
381 #define do_softirq_onstack()    __do_softirq()
382 #endif /* CONFIG_IRQSTACKS */
383
384 void do_softirq(void)
385 {
386         unsigned long flags;
387
388         if (in_interrupt())
389                 return;
390
391         local_irq_save(flags);
392
393         if (local_softirq_pending())
394                 do_softirq_onstack();
395
396         local_irq_restore(flags);
397 }
398 EXPORT_SYMBOL(do_softirq);
399
400
401 /*
402  * IRQ controller and virtual interrupts
403  */
404
405 #ifdef CONFIG_PPC_MERGE
406
407 static LIST_HEAD(irq_hosts);
408 static DEFINE_SPINLOCK(irq_big_lock);
409 static DEFINE_PER_CPU(unsigned int, irq_radix_reader);
410 static unsigned int irq_radix_writer;
411 struct irq_map_entry irq_map[NR_IRQS];
412 static unsigned int irq_virq_count = NR_IRQS;
413 static struct irq_host *irq_default_host;
414
415 irq_hw_number_t virq_to_hw(unsigned int virq)
416 {
417         return irq_map[virq].hwirq;
418 }
419 EXPORT_SYMBOL_GPL(virq_to_hw);
420
421 __init_refok struct irq_host *irq_alloc_host(struct device_node *of_node,
422                                 unsigned int revmap_type,
423                                 unsigned int revmap_arg,
424                                 struct irq_host_ops *ops,
425                                 irq_hw_number_t inval_irq)
426 {
427         struct irq_host *host;
428         unsigned int size = sizeof(struct irq_host);
429         unsigned int i;
430         unsigned int *rmap;
431         unsigned long flags;
432
433         /* Allocate structure and revmap table if using linear mapping */
434         if (revmap_type == IRQ_HOST_MAP_LINEAR)
435                 size += revmap_arg * sizeof(unsigned int);
436         if (mem_init_done)
437                 host = kzalloc(size, GFP_KERNEL);
438         else {
439                 host = alloc_bootmem(size);
440                 if (host)
441                         memset(host, 0, size);
442         }
443         if (host == NULL)
444                 return NULL;
445
446         /* Fill structure */
447         host->revmap_type = revmap_type;
448         host->inval_irq = inval_irq;
449         host->ops = ops;
450         host->of_node = of_node;
451
452         spin_lock_irqsave(&irq_big_lock, flags);
453
454         /* If it's a legacy controller, check for duplicates and
455          * mark it as allocated (we use irq 0 host pointer for that
456          */
457         if (revmap_type == IRQ_HOST_MAP_LEGACY) {
458                 if (irq_map[0].host != NULL) {
459                         spin_unlock_irqrestore(&irq_big_lock, flags);
460                         /* If we are early boot, we can't free the structure,
461                          * too bad...
462                          * this will be fixed once slab is made available early
463                          * instead of the current cruft
464                          */
465                         if (mem_init_done)
466                                 kfree(host);
467                         return NULL;
468                 }
469                 irq_map[0].host = host;
470         }
471
472         list_add(&host->link, &irq_hosts);
473         spin_unlock_irqrestore(&irq_big_lock, flags);
474
475         /* Additional setups per revmap type */
476         switch(revmap_type) {
477         case IRQ_HOST_MAP_LEGACY:
478                 /* 0 is always the invalid number for legacy */
479                 host->inval_irq = 0;
480                 /* setup us as the host for all legacy interrupts */
481                 for (i = 1; i < NUM_ISA_INTERRUPTS; i++) {
482                         irq_map[i].hwirq = 0;
483                         smp_wmb();
484                         irq_map[i].host = host;
485                         smp_wmb();
486
487                         /* Clear norequest flags */
488                         get_irq_desc(i)->status &= ~IRQ_NOREQUEST;
489
490                         /* Legacy flags are left to default at this point,
491                          * one can then use irq_create_mapping() to
492                          * explicitely change them
493                          */
494                         ops->map(host, i, i);
495                 }
496                 break;
497         case IRQ_HOST_MAP_LINEAR:
498                 rmap = (unsigned int *)(host + 1);
499                 for (i = 0; i < revmap_arg; i++)
500                         rmap[i] = NO_IRQ;
501                 host->revmap_data.linear.size = revmap_arg;
502                 smp_wmb();
503                 host->revmap_data.linear.revmap = rmap;
504                 break;
505         default:
506                 break;
507         }
508
509         pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host);
510
511         return host;
512 }
513
514 struct irq_host *irq_find_host(struct device_node *node)
515 {
516         struct irq_host *h, *found = NULL;
517         unsigned long flags;
518
519         /* We might want to match the legacy controller last since
520          * it might potentially be set to match all interrupts in
521          * the absence of a device node. This isn't a problem so far
522          * yet though...
523          */
524         spin_lock_irqsave(&irq_big_lock, flags);
525         list_for_each_entry(h, &irq_hosts, link)
526                 if (h->ops->match != NULL && h->ops->match(h, node)) {
527                         found = h;
528                         break;
529                 }
530         spin_unlock_irqrestore(&irq_big_lock, flags);
531         return found;
532 }
533 EXPORT_SYMBOL_GPL(irq_find_host);
534
535 void irq_set_default_host(struct irq_host *host)
536 {
537         pr_debug("irq: Default host set to @0x%p\n", host);
538
539         irq_default_host = host;
540 }
541
542 void irq_set_virq_count(unsigned int count)
543 {
544         pr_debug("irq: Trying to set virq count to %d\n", count);
545
546         BUG_ON(count < NUM_ISA_INTERRUPTS);
547         if (count < NR_IRQS)
548                 irq_virq_count = count;
549 }
550
551 /* radix tree not lockless safe ! we use a brlock-type mecanism
552  * for now, until we can use a lockless radix tree
553  */
554 static void irq_radix_wrlock(unsigned long *flags)
555 {
556         unsigned int cpu, ok;
557
558         spin_lock_irqsave(&irq_big_lock, *flags);
559         irq_radix_writer = 1;
560         smp_mb();
561         do {
562                 barrier();
563                 ok = 1;
564                 for_each_possible_cpu(cpu) {
565                         if (per_cpu(irq_radix_reader, cpu)) {
566                                 ok = 0;
567                                 break;
568                         }
569                 }
570                 if (!ok)
571                         cpu_relax();
572         } while(!ok);
573 }
574
575 static void irq_radix_wrunlock(unsigned long flags)
576 {
577         smp_wmb();
578         irq_radix_writer = 0;
579         spin_unlock_irqrestore(&irq_big_lock, flags);
580 }
581
582 static void irq_radix_rdlock(unsigned long *flags)
583 {
584         local_irq_save(*flags);
585         __get_cpu_var(irq_radix_reader) = 1;
586         smp_mb();
587         if (likely(irq_radix_writer == 0))
588                 return;
589         __get_cpu_var(irq_radix_reader) = 0;
590         smp_wmb();
591         spin_lock(&irq_big_lock);
592         __get_cpu_var(irq_radix_reader) = 1;
593         spin_unlock(&irq_big_lock);
594 }
595
596 static void irq_radix_rdunlock(unsigned long flags)
597 {
598         __get_cpu_var(irq_radix_reader) = 0;
599         local_irq_restore(flags);
600 }
601
602 static int irq_setup_virq(struct irq_host *host, unsigned int virq,
603                             irq_hw_number_t hwirq)
604 {
605         /* Clear IRQ_NOREQUEST flag */
606         get_irq_desc(virq)->status &= ~IRQ_NOREQUEST;
607
608         /* map it */
609         smp_wmb();
610         irq_map[virq].hwirq = hwirq;
611         smp_mb();
612
613         if (host->ops->map(host, virq, hwirq)) {
614                 pr_debug("irq: -> mapping failed, freeing\n");
615                 irq_free_virt(virq, 1);
616                 return -1;
617         }
618
619         return 0;
620 }
621
622 unsigned int irq_create_direct_mapping(struct irq_host *host)
623 {
624         unsigned int virq;
625
626         if (host == NULL)
627                 host = irq_default_host;
628
629         BUG_ON(host == NULL);
630         WARN_ON(host->revmap_type != IRQ_HOST_MAP_NOMAP);
631
632         virq = irq_alloc_virt(host, 1, 0);
633         if (virq == NO_IRQ) {
634                 pr_debug("irq: create_direct virq allocation failed\n");
635                 return NO_IRQ;
636         }
637
638         pr_debug("irq: create_direct obtained virq %d\n", virq);
639
640         if (irq_setup_virq(host, virq, virq))
641                 return NO_IRQ;
642
643         return virq;
644 }
645
646 unsigned int irq_create_mapping(struct irq_host *host,
647                                 irq_hw_number_t hwirq)
648 {
649         unsigned int virq, hint;
650
651         pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", host, hwirq);
652
653         /* Look for default host if nececssary */
654         if (host == NULL)
655                 host = irq_default_host;
656         if (host == NULL) {
657                 printk(KERN_WARNING "irq_create_mapping called for"
658                        " NULL host, hwirq=%lx\n", hwirq);
659                 WARN_ON(1);
660                 return NO_IRQ;
661         }
662         pr_debug("irq: -> using host @%p\n", host);
663
664         /* Check if mapping already exist, if it does, call
665          * host->ops->map() to update the flags
666          */
667         virq = irq_find_mapping(host, hwirq);
668         if (virq != NO_IRQ) {
669                 if (host->ops->remap)
670                         host->ops->remap(host, virq, hwirq);
671                 pr_debug("irq: -> existing mapping on virq %d\n", virq);
672                 return virq;
673         }
674
675         /* Get a virtual interrupt number */
676         if (host->revmap_type == IRQ_HOST_MAP_LEGACY) {
677                 /* Handle legacy */
678                 virq = (unsigned int)hwirq;
679                 if (virq == 0 || virq >= NUM_ISA_INTERRUPTS)
680                         return NO_IRQ;
681                 return virq;
682         } else {
683                 /* Allocate a virtual interrupt number */
684                 hint = hwirq % irq_virq_count;
685                 virq = irq_alloc_virt(host, 1, hint);
686                 if (virq == NO_IRQ) {
687                         pr_debug("irq: -> virq allocation failed\n");
688                         return NO_IRQ;
689                 }
690         }
691         pr_debug("irq: -> obtained virq %d\n", virq);
692
693         if (irq_setup_virq(host, virq, hwirq))
694                 return NO_IRQ;
695
696         return virq;
697 }
698 EXPORT_SYMBOL_GPL(irq_create_mapping);
699
700 unsigned int irq_create_of_mapping(struct device_node *controller,
701                                    u32 *intspec, unsigned int intsize)
702 {
703         struct irq_host *host;
704         irq_hw_number_t hwirq;
705         unsigned int type = IRQ_TYPE_NONE;
706         unsigned int virq;
707
708         if (controller == NULL)
709                 host = irq_default_host;
710         else
711                 host = irq_find_host(controller);
712         if (host == NULL) {
713                 printk(KERN_WARNING "irq: no irq host found for %s !\n",
714                        controller->full_name);
715                 return NO_IRQ;
716         }
717
718         /* If host has no translation, then we assume interrupt line */
719         if (host->ops->xlate == NULL)
720                 hwirq = intspec[0];
721         else {
722                 if (host->ops->xlate(host, controller, intspec, intsize,
723                                      &hwirq, &type))
724                         return NO_IRQ;
725         }
726
727         /* Create mapping */
728         virq = irq_create_mapping(host, hwirq);
729         if (virq == NO_IRQ)
730                 return virq;
731
732         /* Set type if specified and different than the current one */
733         if (type != IRQ_TYPE_NONE &&
734             type != (get_irq_desc(virq)->status & IRQF_TRIGGER_MASK))
735                 set_irq_type(virq, type);
736         return virq;
737 }
738 EXPORT_SYMBOL_GPL(irq_create_of_mapping);
739
740 unsigned int irq_of_parse_and_map(struct device_node *dev, int index)
741 {
742         struct of_irq oirq;
743
744         if (of_irq_map_one(dev, index, &oirq))
745                 return NO_IRQ;
746
747         return irq_create_of_mapping(oirq.controller, oirq.specifier,
748                                      oirq.size);
749 }
750 EXPORT_SYMBOL_GPL(irq_of_parse_and_map);
751
752 void irq_dispose_mapping(unsigned int virq)
753 {
754         struct irq_host *host;
755         irq_hw_number_t hwirq;
756         unsigned long flags;
757
758         if (virq == NO_IRQ)
759                 return;
760
761         host = irq_map[virq].host;
762         WARN_ON (host == NULL);
763         if (host == NULL)
764                 return;
765
766         /* Never unmap legacy interrupts */
767         if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
768                 return;
769
770         /* remove chip and handler */
771         set_irq_chip_and_handler(virq, NULL, NULL);
772
773         /* Make sure it's completed */
774         synchronize_irq(virq);
775
776         /* Tell the PIC about it */
777         if (host->ops->unmap)
778                 host->ops->unmap(host, virq);
779         smp_mb();
780
781         /* Clear reverse map */
782         hwirq = irq_map[virq].hwirq;
783         switch(host->revmap_type) {
784         case IRQ_HOST_MAP_LINEAR:
785                 if (hwirq < host->revmap_data.linear.size)
786                         host->revmap_data.linear.revmap[hwirq] = NO_IRQ;
787                 break;
788         case IRQ_HOST_MAP_TREE:
789                 /* Check if radix tree allocated yet */
790                 if (host->revmap_data.tree.gfp_mask == 0)
791                         break;
792                 irq_radix_wrlock(&flags);
793                 radix_tree_delete(&host->revmap_data.tree, hwirq);
794                 irq_radix_wrunlock(flags);
795                 break;
796         }
797
798         /* Destroy map */
799         smp_mb();
800         irq_map[virq].hwirq = host->inval_irq;
801
802         /* Set some flags */
803         get_irq_desc(virq)->status |= IRQ_NOREQUEST;
804
805         /* Free it */
806         irq_free_virt(virq, 1);
807 }
808 EXPORT_SYMBOL_GPL(irq_dispose_mapping);
809
810 unsigned int irq_find_mapping(struct irq_host *host,
811                               irq_hw_number_t hwirq)
812 {
813         unsigned int i;
814         unsigned int hint = hwirq % irq_virq_count;
815
816         /* Look for default host if nececssary */
817         if (host == NULL)
818                 host = irq_default_host;
819         if (host == NULL)
820                 return NO_IRQ;
821
822         /* legacy -> bail early */
823         if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
824                 return hwirq;
825
826         /* Slow path does a linear search of the map */
827         if (hint < NUM_ISA_INTERRUPTS)
828                 hint = NUM_ISA_INTERRUPTS;
829         i = hint;
830         do  {
831                 if (irq_map[i].host == host &&
832                     irq_map[i].hwirq == hwirq)
833                         return i;
834                 i++;
835                 if (i >= irq_virq_count)
836                         i = NUM_ISA_INTERRUPTS;
837         } while(i != hint);
838         return NO_IRQ;
839 }
840 EXPORT_SYMBOL_GPL(irq_find_mapping);
841
842
843 unsigned int irq_radix_revmap(struct irq_host *host,
844                               irq_hw_number_t hwirq)
845 {
846         struct radix_tree_root *tree;
847         struct irq_map_entry *ptr;
848         unsigned int virq;
849         unsigned long flags;
850
851         WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE);
852
853         /* Check if the radix tree exist yet. We test the value of
854          * the gfp_mask for that. Sneaky but saves another int in the
855          * structure. If not, we fallback to slow mode
856          */
857         tree = &host->revmap_data.tree;
858         if (tree->gfp_mask == 0)
859                 return irq_find_mapping(host, hwirq);
860
861         /* Now try to resolve */
862         irq_radix_rdlock(&flags);
863         ptr = radix_tree_lookup(tree, hwirq);
864         irq_radix_rdunlock(flags);
865
866         /* Found it, return */
867         if (ptr) {
868                 virq = ptr - irq_map;
869                 return virq;
870         }
871
872         /* If not there, try to insert it */
873         virq = irq_find_mapping(host, hwirq);
874         if (virq != NO_IRQ) {
875                 irq_radix_wrlock(&flags);
876                 radix_tree_insert(tree, hwirq, &irq_map[virq]);
877                 irq_radix_wrunlock(flags);
878         }
879         return virq;
880 }
881
882 unsigned int irq_linear_revmap(struct irq_host *host,
883                                irq_hw_number_t hwirq)
884 {
885         unsigned int *revmap;
886
887         WARN_ON(host->revmap_type != IRQ_HOST_MAP_LINEAR);
888
889         /* Check revmap bounds */
890         if (unlikely(hwirq >= host->revmap_data.linear.size))
891                 return irq_find_mapping(host, hwirq);
892
893         /* Check if revmap was allocated */
894         revmap = host->revmap_data.linear.revmap;
895         if (unlikely(revmap == NULL))
896                 return irq_find_mapping(host, hwirq);
897
898         /* Fill up revmap with slow path if no mapping found */
899         if (unlikely(revmap[hwirq] == NO_IRQ))
900                 revmap[hwirq] = irq_find_mapping(host, hwirq);
901
902         return revmap[hwirq];
903 }
904
905 unsigned int irq_alloc_virt(struct irq_host *host,
906                             unsigned int count,
907                             unsigned int hint)
908 {
909         unsigned long flags;
910         unsigned int i, j, found = NO_IRQ;
911
912         if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS))
913                 return NO_IRQ;
914
915         spin_lock_irqsave(&irq_big_lock, flags);
916
917         /* Use hint for 1 interrupt if any */
918         if (count == 1 && hint >= NUM_ISA_INTERRUPTS &&
919             hint < irq_virq_count && irq_map[hint].host == NULL) {
920                 found = hint;
921                 goto hint_found;
922         }
923
924         /* Look for count consecutive numbers in the allocatable
925          * (non-legacy) space
926          */
927         for (i = NUM_ISA_INTERRUPTS, j = 0; i < irq_virq_count; i++) {
928                 if (irq_map[i].host != NULL)
929                         j = 0;
930                 else
931                         j++;
932
933                 if (j == count) {
934                         found = i - count + 1;
935                         break;
936                 }
937         }
938         if (found == NO_IRQ) {
939                 spin_unlock_irqrestore(&irq_big_lock, flags);
940                 return NO_IRQ;
941         }
942  hint_found:
943         for (i = found; i < (found + count); i++) {
944                 irq_map[i].hwirq = host->inval_irq;
945                 smp_wmb();
946                 irq_map[i].host = host;
947         }
948         spin_unlock_irqrestore(&irq_big_lock, flags);
949         return found;
950 }
951
952 void irq_free_virt(unsigned int virq, unsigned int count)
953 {
954         unsigned long flags;
955         unsigned int i;
956
957         WARN_ON (virq < NUM_ISA_INTERRUPTS);
958         WARN_ON (count == 0 || (virq + count) > irq_virq_count);
959
960         spin_lock_irqsave(&irq_big_lock, flags);
961         for (i = virq; i < (virq + count); i++) {
962                 struct irq_host *host;
963
964                 if (i < NUM_ISA_INTERRUPTS ||
965                     (virq + count) > irq_virq_count)
966                         continue;
967
968                 host = irq_map[i].host;
969                 irq_map[i].hwirq = host->inval_irq;
970                 smp_wmb();
971                 irq_map[i].host = NULL;
972         }
973         spin_unlock_irqrestore(&irq_big_lock, flags);
974 }
975
976 void irq_early_init(void)
977 {
978         unsigned int i;
979
980         for (i = 0; i < NR_IRQS; i++)
981                 get_irq_desc(i)->status |= IRQ_NOREQUEST;
982 }
983
984 /* We need to create the radix trees late */
985 static int irq_late_init(void)
986 {
987         struct irq_host *h;
988         unsigned long flags;
989
990         irq_radix_wrlock(&flags);
991         list_for_each_entry(h, &irq_hosts, link) {
992                 if (h->revmap_type == IRQ_HOST_MAP_TREE)
993                         INIT_RADIX_TREE(&h->revmap_data.tree, GFP_ATOMIC);
994         }
995         irq_radix_wrunlock(flags);
996
997         return 0;
998 }
999 arch_initcall(irq_late_init);
1000
1001 #endif /* CONFIG_PPC_MERGE */
1002
1003 #ifdef CONFIG_PPC64
1004 static int __init setup_noirqdistrib(char *str)
1005 {
1006         distribute_irqs = 0;
1007         return 1;
1008 }
1009
1010 __setup("noirqdistrib", setup_noirqdistrib);
1011 #endif /* CONFIG_PPC64 */