Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx
[linux-2.6] / kernel / irq / migration.c
1
2 #include <linux/irq.h>
3
4 void move_masked_irq(int irq)
5 {
6         struct irq_desc *desc = irq_to_desc(irq);
7         cpumask_t tmp;
8
9         if (likely(!(desc->status & IRQ_MOVE_PENDING)))
10                 return;
11
12         /*
13          * Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
14          */
15         if (CHECK_IRQ_PER_CPU(desc->status)) {
16                 WARN_ON(1);
17                 return;
18         }
19
20         desc->status &= ~IRQ_MOVE_PENDING;
21
22         if (unlikely(cpus_empty(desc->pending_mask)))
23                 return;
24
25         if (!desc->chip->set_affinity)
26                 return;
27
28         assert_spin_locked(&desc->lock);
29
30         cpus_and(tmp, desc->pending_mask, cpu_online_map);
31
32         /*
33          * If there was a valid mask to work with, please
34          * do the disable, re-program, enable sequence.
35          * This is *not* particularly important for level triggered
36          * but in a edge trigger case, we might be setting rte
37          * when an active trigger is comming in. This could
38          * cause some ioapics to mal-function.
39          * Being paranoid i guess!
40          *
41          * For correct operation this depends on the caller
42          * masking the irqs.
43          */
44         if (likely(!cpus_empty(tmp))) {
45                 desc->chip->set_affinity(irq,tmp);
46         }
47         cpus_clear(desc->pending_mask);
48 }
49
50 void move_native_irq(int irq)
51 {
52         struct irq_desc *desc = irq_to_desc(irq);
53
54         if (likely(!(desc->status & IRQ_MOVE_PENDING)))
55                 return;
56
57         if (unlikely(desc->status & IRQ_DISABLED))
58                 return;
59
60         desc->chip->mask(irq);
61         move_masked_irq(irq);
62         desc->chip->unmask(irq);
63 }
64