bounce: don't rely on a zeroed bio_vec list
[linux-2.6] / arch / x86 / kernel / irq.c
1 /*
2  * Common interrupt code for 32 and 64 bit
3  */
4 #include <linux/cpu.h>
5 #include <linux/interrupt.h>
6 #include <linux/kernel_stat.h>
7 #include <linux/seq_file.h>
8
9 #include <asm/apic.h>
10 #include <asm/io_apic.h>
11 #include <asm/smp.h>
12
13 atomic_t irq_err_count;
14
15 /*
16  * 'what should we do if we get a hw irq event on an illegal vector'.
17  * each architecture has to answer this themselves.
18  */
19 void ack_bad_irq(unsigned int irq)
20 {
21         printk(KERN_ERR "unexpected IRQ trap at vector %02x\n", irq);
22
23 #ifdef CONFIG_X86_LOCAL_APIC
24         /*
25          * Currently unexpected vectors happen only on SMP and APIC.
26          * We _must_ ack these because every local APIC has only N
27          * irq slots per priority level, and a 'hanging, unacked' IRQ
28          * holds up an irq slot - in excessive cases (when multiple
29          * unexpected vectors occur) that might lock up the APIC
30          * completely.
31          * But only ack when the APIC is enabled -AK
32          */
33         if (cpu_has_apic)
34                 ack_APIC_irq();
35 #endif
36 }
37
38 #ifdef CONFIG_X86_32
39 # define irq_stats(x)           (&per_cpu(irq_stat, x))
40 #else
41 # define irq_stats(x)           cpu_pda(x)
42 #endif
43 /*
44  * /proc/interrupts printing:
45  */
46 static int show_other_interrupts(struct seq_file *p)
47 {
48         int j;
49
50         seq_printf(p, "NMI: ");
51         for_each_online_cpu(j)
52                 seq_printf(p, "%10u ", irq_stats(j)->__nmi_count);
53         seq_printf(p, "  Non-maskable interrupts\n");
54 #ifdef CONFIG_X86_LOCAL_APIC
55         seq_printf(p, "LOC: ");
56         for_each_online_cpu(j)
57                 seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs);
58         seq_printf(p, "  Local timer interrupts\n");
59 #endif
60 #ifdef CONFIG_SMP
61         seq_printf(p, "RES: ");
62         for_each_online_cpu(j)
63                 seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
64         seq_printf(p, "  Rescheduling interrupts\n");
65         seq_printf(p, "CAL: ");
66         for_each_online_cpu(j)
67                 seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
68         seq_printf(p, "  Function call interrupts\n");
69         seq_printf(p, "TLB: ");
70         for_each_online_cpu(j)
71                 seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
72         seq_printf(p, "  TLB shootdowns\n");
73 #endif
74 #ifdef CONFIG_X86_MCE
75         seq_printf(p, "TRM: ");
76         for_each_online_cpu(j)
77                 seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count);
78         seq_printf(p, "  Thermal event interrupts\n");
79 # ifdef CONFIG_X86_64
80         seq_printf(p, "THR: ");
81         for_each_online_cpu(j)
82                 seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count);
83         seq_printf(p, "  Threshold APIC interrupts\n");
84 # endif
85 #endif
86 #ifdef CONFIG_X86_LOCAL_APIC
87         seq_printf(p, "SPU: ");
88         for_each_online_cpu(j)
89                 seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count);
90         seq_printf(p, "  Spurious interrupts\n");
91 #endif
92         seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
93 #if defined(CONFIG_X86_IO_APIC)
94         seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
95 #endif
96         return 0;
97 }
98
99 int show_interrupts(struct seq_file *p, void *v)
100 {
101         unsigned long flags, any_count = 0;
102         int i = *(loff_t *) v, j;
103         struct irqaction *action;
104         struct irq_desc *desc;
105
106         if (i > nr_irqs)
107                 return 0;
108
109         if (i == nr_irqs)
110                 return show_other_interrupts(p);
111
112         /* print header */
113         if (i == 0) {
114                 seq_printf(p, "           ");
115                 for_each_online_cpu(j)
116                         seq_printf(p, "CPU%-8d", j);
117                 seq_putc(p, '\n');
118         }
119
120         desc = irq_to_desc(i);
121         spin_lock_irqsave(&desc->lock, flags);
122 #ifndef CONFIG_SMP
123         any_count = kstat_irqs(i);
124 #else
125         for_each_online_cpu(j)
126                 any_count |= kstat_irqs_cpu(i, j);
127 #endif
128         action = desc->action;
129         if (!action && !any_count)
130                 goto out;
131
132         seq_printf(p, "%3d: ", i);
133 #ifndef CONFIG_SMP
134         seq_printf(p, "%10u ", kstat_irqs(i));
135 #else
136         for_each_online_cpu(j)
137                 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
138 #endif
139         seq_printf(p, " %8s", desc->chip->name);
140         seq_printf(p, "-%-8s", desc->name);
141
142         if (action) {
143                 seq_printf(p, "  %s", action->name);
144                 while ((action = action->next) != NULL)
145                         seq_printf(p, ", %s", action->name);
146         }
147
148         seq_putc(p, '\n');
149 out:
150         spin_unlock_irqrestore(&desc->lock, flags);
151         return 0;
152 }
153
154 /*
155  * /proc/stat helpers
156  */
157 u64 arch_irq_stat_cpu(unsigned int cpu)
158 {
159         u64 sum = irq_stats(cpu)->__nmi_count;
160
161 #ifdef CONFIG_X86_LOCAL_APIC
162         sum += irq_stats(cpu)->apic_timer_irqs;
163 #endif
164 #ifdef CONFIG_SMP
165         sum += irq_stats(cpu)->irq_resched_count;
166         sum += irq_stats(cpu)->irq_call_count;
167         sum += irq_stats(cpu)->irq_tlb_count;
168 #endif
169 #ifdef CONFIG_X86_MCE
170         sum += irq_stats(cpu)->irq_thermal_count;
171 # ifdef CONFIG_X86_64
172         sum += irq_stats(cpu)->irq_threshold_count;
173 #endif
174 #endif
175 #ifdef CONFIG_X86_LOCAL_APIC
176         sum += irq_stats(cpu)->irq_spurious_count;
177 #endif
178         return sum;
179 }
180
181 u64 arch_irq_stat(void)
182 {
183         u64 sum = atomic_read(&irq_err_count);
184
185 #ifdef CONFIG_X86_IO_APIC
186         sum += atomic_read(&irq_mis_count);
187 #endif
188         return sum;
189 }