1 #include <linux/errno.h>
2 #include <linux/signal.h>
3 #include <linux/sched.h>
4 #include <linux/ioport.h>
5 #include <linux/interrupt.h>
6 #include <linux/slab.h>
7 #include <linux/random.h>
8 #include <linux/init.h>
9 #include <linux/kernel_stat.h>
10 #include <linux/sysdev.h>
11 #include <linux/bitops.h>
13 #include <asm/atomic.h>
14 #include <asm/system.h>
16 #include <asm/timer.h>
17 #include <asm/pgtable.h>
18 #include <asm/delay.h>
21 #include <asm/arch_hooks.h>
22 #include <asm/i8259.h>
25 * This is the 'legacy' 8259A Programmable Interrupt Controller,
26 * present in the majority of PC/AT boxes.
27 * plus some generic x86 specific things if generic specifics makes
29 * this file should become arch/i386/kernel/irq.c when the old irq.c
30 * moves to arch independent land
33 static int i8259A_auto_eoi;
34 DEFINE_SPINLOCK(i8259A_lock);
35 static void mask_and_ack_8259A(unsigned int);
37 static struct irq_chip i8259A_chip = {
39 .mask = disable_8259A_irq,
40 .disable = disable_8259A_irq,
41 .unmask = enable_8259A_irq,
42 .mask_ack = mask_and_ack_8259A,
46 * 8259A PIC functions to handle ISA devices:
50 * This contains the irq mask for both 8259A irq controllers,
52 unsigned int cached_irq_mask = 0xffff;
55 * Not all IRQs can be routed through the IO-APIC, eg. on certain (older)
56 * boards the timer interrupt is not really connected to any IO-APIC pin,
57 * it's fed to the master 8259A's IR0 line only.
59 * Any '1' bit in this mask means the IRQ is routed through the IO-APIC.
60 * this 'mixed mode' IRQ handling costs nothing because it's only used
63 unsigned long io_apic_irqs;
65 void disable_8259A_irq(unsigned int irq)
67 unsigned int mask = 1 << irq;
70 spin_lock_irqsave(&i8259A_lock, flags);
71 cached_irq_mask |= mask;
73 outb(cached_slave_mask, PIC_SLAVE_IMR);
75 outb(cached_master_mask, PIC_MASTER_IMR);
76 spin_unlock_irqrestore(&i8259A_lock, flags);
79 void enable_8259A_irq(unsigned int irq)
81 unsigned int mask = ~(1 << irq);
84 spin_lock_irqsave(&i8259A_lock, flags);
85 cached_irq_mask &= mask;
87 outb(cached_slave_mask, PIC_SLAVE_IMR);
89 outb(cached_master_mask, PIC_MASTER_IMR);
90 spin_unlock_irqrestore(&i8259A_lock, flags);
93 int i8259A_irq_pending(unsigned int irq)
95 unsigned int mask = 1<<irq;
99 spin_lock_irqsave(&i8259A_lock, flags);
101 ret = inb(PIC_MASTER_CMD) & mask;
103 ret = inb(PIC_SLAVE_CMD) & (mask >> 8);
104 spin_unlock_irqrestore(&i8259A_lock, flags);
109 void make_8259A_irq(unsigned int irq)
111 disable_irq_nosync(irq);
112 io_apic_irqs &= ~(1<<irq);
113 set_irq_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq,
119 * This function assumes to be called rarely. Switching between
120 * 8259A registers is slow.
121 * This has to be protected by the irq controller spinlock
122 * before being called.
124 static inline int i8259A_irq_real(unsigned int irq)
127 int irqmask = 1<<irq;
130 outb(0x0B,PIC_MASTER_CMD); /* ISR register */
131 value = inb(PIC_MASTER_CMD) & irqmask;
132 outb(0x0A,PIC_MASTER_CMD); /* back to the IRR register */
135 outb(0x0B,PIC_SLAVE_CMD); /* ISR register */
136 value = inb(PIC_SLAVE_CMD) & (irqmask >> 8);
137 outb(0x0A,PIC_SLAVE_CMD); /* back to the IRR register */
142 * Careful! The 8259A is a fragile beast, it pretty
143 * much _has_ to be done exactly like this (mask it
144 * first, _then_ send the EOI, and the order of EOI
145 * to the two 8259s is important!
147 static void mask_and_ack_8259A(unsigned int irq)
149 unsigned int irqmask = 1 << irq;
152 spin_lock_irqsave(&i8259A_lock, flags);
154 * Lightweight spurious IRQ detection. We do not want
155 * to overdo spurious IRQ handling - it's usually a sign
156 * of hardware problems, so we only do the checks we can
157 * do without slowing down good hardware unnecessarily.
159 * Note that IRQ7 and IRQ15 (the two spurious IRQs
160 * usually resulting from the 8259A-1|2 PICs) occur
161 * even if the IRQ is masked in the 8259A. Thus we
162 * can check spurious 8259A IRQs without doing the
163 * quite slow i8259A_irq_real() call for every IRQ.
164 * This does not cover 100% of spurious interrupts,
165 * but should be enough to warn the user that there
166 * is something bad going on ...
168 if (cached_irq_mask & irqmask)
169 goto spurious_8259A_irq;
170 cached_irq_mask |= irqmask;
174 inb(PIC_SLAVE_IMR); /* DUMMY - (do we need this?) */
175 outb(cached_slave_mask, PIC_SLAVE_IMR);
176 outb(0x60+(irq&7),PIC_SLAVE_CMD);/* 'Specific EOI' to slave */
177 outb(0x60+PIC_CASCADE_IR,PIC_MASTER_CMD); /* 'Specific EOI' to master-IRQ2 */
179 inb(PIC_MASTER_IMR); /* DUMMY - (do we need this?) */
180 outb(cached_master_mask, PIC_MASTER_IMR);
181 outb(0x60+irq,PIC_MASTER_CMD); /* 'Specific EOI to master */
183 spin_unlock_irqrestore(&i8259A_lock, flags);
188 * this is the slow path - should happen rarely.
190 if (i8259A_irq_real(irq))
192 * oops, the IRQ _is_ in service according to the
193 * 8259A - not spurious, go handle it.
195 goto handle_real_irq;
198 static int spurious_irq_mask;
200 * At this point we can be sure the IRQ is spurious,
201 * lets ACK and report it. [once per IRQ]
203 if (!(spurious_irq_mask & irqmask)) {
204 printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
205 spurious_irq_mask |= irqmask;
207 atomic_inc(&irq_err_count);
209 * Theoretically we do not have to handle this IRQ,
210 * but in Linux this does not cause problems and is
213 goto handle_real_irq;
217 static char irq_trigger[2];
219 * ELCR registers (0x4d0, 0x4d1) control edge/level of IRQ
221 static void restore_ELCR(char *trigger)
223 outb(trigger[0], 0x4d0);
224 outb(trigger[1], 0x4d1);
227 static void save_ELCR(char *trigger)
229 /* IRQ 0,1,2,8,13 are marked as reserved */
230 trigger[0] = inb(0x4d0) & 0xF8;
231 trigger[1] = inb(0x4d1) & 0xDE;
234 static int i8259A_resume(struct sys_device *dev)
236 init_8259A(i8259A_auto_eoi);
237 restore_ELCR(irq_trigger);
241 static int i8259A_suspend(struct sys_device *dev, pm_message_t state)
243 save_ELCR(irq_trigger);
247 static int i8259A_shutdown(struct sys_device *dev)
249 /* Put the i8259A into a quiescent state that
250 * the kernel initialization code can get it
253 outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
254 outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-1 */
258 static struct sysdev_class i8259_sysdev_class = {
260 .suspend = i8259A_suspend,
261 .resume = i8259A_resume,
262 .shutdown = i8259A_shutdown,
265 static struct sys_device device_i8259A = {
267 .cls = &i8259_sysdev_class,
270 static int __init i8259A_init_sysfs(void)
272 int error = sysdev_class_register(&i8259_sysdev_class);
274 error = sysdev_register(&device_i8259A);
278 device_initcall(i8259A_init_sysfs);
280 void init_8259A(int auto_eoi)
284 i8259A_auto_eoi = auto_eoi;
286 spin_lock_irqsave(&i8259A_lock, flags);
288 outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
289 outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
292 * outb_p - this has to work on a wide range of PC hardware.
294 outb_p(0x11, PIC_MASTER_CMD); /* ICW1: select 8259A-1 init */
295 outb_p(0x20 + 0, PIC_MASTER_IMR); /* ICW2: 8259A-1 IR0-7 mapped to 0x20-0x27 */
296 outb_p(1U << PIC_CASCADE_IR, PIC_MASTER_IMR); /* 8259A-1 (the master) has a slave on IR2 */
297 if (auto_eoi) /* master does Auto EOI */
298 outb_p(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR);
299 else /* master expects normal EOI */
300 outb_p(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR);
302 outb_p(0x11, PIC_SLAVE_CMD); /* ICW1: select 8259A-2 init */
303 outb_p(0x20 + 8, PIC_SLAVE_IMR); /* ICW2: 8259A-2 IR0-7 mapped to 0x28-0x2f */
304 outb_p(PIC_CASCADE_IR, PIC_SLAVE_IMR); /* 8259A-2 is a slave on master's IR2 */
305 outb_p(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR); /* (slave's support for AEOI in flat mode is to be investigated) */
308 * In AEOI mode we just have to mask the interrupt
311 i8259A_chip.mask_ack = disable_8259A_irq;
313 i8259A_chip.mask_ack = mask_and_ack_8259A;
315 udelay(100); /* wait for 8259A to initialize */
317 outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */
318 outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */
320 spin_unlock_irqrestore(&i8259A_lock, flags);
324 * Note that on a 486, we don't want to do a SIGFPE on an irq13
325 * as the irq is unreliable, and exception 16 works correctly
326 * (ie as explained in the intel literature). On a 386, you
327 * can't use exception 16 due to bad IBM design, so we have to
328 * rely on the less exact irq13.
330 * Careful.. Not only is IRQ13 unreliable, but it is also
331 * leads to races. IBM designers who came up with it should
336 static irqreturn_t math_error_irq(int cpl, void *dev_id)
338 extern void math_error(void __user *);
340 if (ignore_fpu_irq || !boot_cpu_data.hard_math)
342 math_error((void __user *)get_irq_regs()->ip);
347 * New motherboards sometimes make IRQ 13 be a PCI interrupt,
348 * so allow interrupt sharing.
350 static struct irqaction fpu_irq = {
351 .handler = math_error_irq,
352 .mask = CPU_MASK_NONE,
356 void __init init_ISA_irqs (void)
360 #ifdef CONFIG_X86_LOCAL_APIC
365 for (i = 0; i < NR_IRQS; i++) {
366 irq_desc[i].status = IRQ_DISABLED;
367 irq_desc[i].action = NULL;
368 irq_desc[i].depth = 1;
372 * 16 old-style INTA-cycle interrupts:
374 set_irq_chip_and_handler_name(i, &i8259A_chip,
375 handle_level_irq, "XT");
378 * 'high' PCI IRQs filled in on demand
380 irq_desc[i].chip = &no_irq_chip;
385 /* Overridden in paravirt.c */
386 void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ")));
388 void __init native_init_IRQ(void)
392 /* all the set up before the call gates are initialised */
393 pre_intr_init_hook();
396 * Cover the whole vector space, no vector can escape
397 * us. (some of these will be overridden and become
398 * 'special' SMP interrupts)
400 for (i = 0; i < (NR_VECTORS - FIRST_EXTERNAL_VECTOR); i++) {
401 int vector = FIRST_EXTERNAL_VECTOR + i;
404 /* SYSCALL_VECTOR was reserved in trap_init. */
405 if (!test_bit(vector, used_vectors))
406 set_intr_gate(vector, interrupt[i]);
409 /* setup after call gates are initialised (usually add in
410 * the architecture specific gates)
415 * External FPU? Set up irq13 if so, for
416 * original braindamaged IBM FERR coupling.
418 if (boot_cpu_data.hard_math && !cpu_has_fpu)
419 setup_irq(FPU_IRQ, &fpu_irq);
421 irq_ctx_init(smp_processor_id());