irq: use GFP_KERNEL for action allocation in request_irq()
[linux-2.6] / kernel / irq / manage.c
1 /*
2  * linux/kernel/irq/manage.c
3  *
4  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5  * Copyright (C) 2005-2006 Thomas Gleixner
6  *
7  * This file contains driver APIs to the irq subsystem.
8  */
9
10 #include <linux/irq.h>
11 #include <linux/module.h>
12 #include <linux/random.h>
13 #include <linux/interrupt.h>
14 #include <linux/slab.h>
15
16 #include "internals.h"
17
18 #ifdef CONFIG_SMP
19 cpumask_var_t irq_default_affinity;
20
21 static int init_irq_default_affinity(void)
22 {
23         alloc_cpumask_var(&irq_default_affinity, GFP_KERNEL);
24         cpumask_setall(irq_default_affinity);
25         return 0;
26 }
27 core_initcall(init_irq_default_affinity);
28
29 /**
30  *      synchronize_irq - wait for pending IRQ handlers (on other CPUs)
31  *      @irq: interrupt number to wait for
32  *
33  *      This function waits for any pending IRQ handlers for this interrupt
34  *      to complete before returning. If you use this function while
35  *      holding a resource the IRQ handler may need you will deadlock.
36  *
37  *      This function may be called - with care - from IRQ context.
38  */
39 void synchronize_irq(unsigned int irq)
40 {
41         struct irq_desc *desc = irq_to_desc(irq);
42         unsigned int status;
43
44         if (!desc)
45                 return;
46
47         do {
48                 unsigned long flags;
49
50                 /*
51                  * Wait until we're out of the critical section.  This might
52                  * give the wrong answer due to the lack of memory barriers.
53                  */
54                 while (desc->status & IRQ_INPROGRESS)
55                         cpu_relax();
56
57                 /* Ok, that indicated we're done: double-check carefully. */
58                 spin_lock_irqsave(&desc->lock, flags);
59                 status = desc->status;
60                 spin_unlock_irqrestore(&desc->lock, flags);
61
62                 /* Oops, that failed? */
63         } while (status & IRQ_INPROGRESS);
64 }
65 EXPORT_SYMBOL(synchronize_irq);
66
67 /**
68  *      irq_can_set_affinity - Check if the affinity of a given irq can be set
69  *      @irq:           Interrupt to check
70  *
71  */
72 int irq_can_set_affinity(unsigned int irq)
73 {
74         struct irq_desc *desc = irq_to_desc(irq);
75
76         if (CHECK_IRQ_PER_CPU(desc->status) || !desc->chip ||
77             !desc->chip->set_affinity)
78                 return 0;
79
80         return 1;
81 }
82
83 /**
84  *      irq_set_affinity - Set the irq affinity of a given irq
85  *      @irq:           Interrupt to set affinity
86  *      @cpumask:       cpumask
87  *
88  */
89 int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
90 {
91         struct irq_desc *desc = irq_to_desc(irq);
92         unsigned long flags;
93
94         if (!desc->chip->set_affinity)
95                 return -EINVAL;
96
97         spin_lock_irqsave(&desc->lock, flags);
98
99 #ifdef CONFIG_GENERIC_PENDING_IRQ
100         if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) {
101                 cpumask_copy(&desc->affinity, cpumask);
102                 desc->chip->set_affinity(irq, cpumask);
103         } else {
104                 desc->status |= IRQ_MOVE_PENDING;
105                 cpumask_copy(&desc->pending_mask, cpumask);
106         }
107 #else
108         cpumask_copy(&desc->affinity, cpumask);
109         desc->chip->set_affinity(irq, cpumask);
110 #endif
111         desc->status |= IRQ_AFFINITY_SET;
112         spin_unlock_irqrestore(&desc->lock, flags);
113         return 0;
114 }
115
116 #ifndef CONFIG_AUTO_IRQ_AFFINITY
117 /*
118  * Generic version of the affinity autoselector.
119  */
120 int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc)
121 {
122         if (!irq_can_set_affinity(irq))
123                 return 0;
124
125         /*
126          * Preserve an userspace affinity setup, but make sure that
127          * one of the targets is online.
128          */
129         if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) {
130                 if (cpumask_any_and(&desc->affinity, cpu_online_mask)
131                     < nr_cpu_ids)
132                         goto set_affinity;
133                 else
134                         desc->status &= ~IRQ_AFFINITY_SET;
135         }
136
137         cpumask_and(&desc->affinity, cpu_online_mask, irq_default_affinity);
138 set_affinity:
139         desc->chip->set_affinity(irq, &desc->affinity);
140
141         return 0;
142 }
143 #else
144 static inline int do_irq_select_affinity(unsigned int irq, struct irq_desc *d)
145 {
146         return irq_select_affinity(irq);
147 }
148 #endif
149
150 /*
151  * Called when affinity is set via /proc/irq
152  */
153 int irq_select_affinity_usr(unsigned int irq)
154 {
155         struct irq_desc *desc = irq_to_desc(irq);
156         unsigned long flags;
157         int ret;
158
159         spin_lock_irqsave(&desc->lock, flags);
160         ret = do_irq_select_affinity(irq, desc);
161         spin_unlock_irqrestore(&desc->lock, flags);
162
163         return ret;
164 }
165
166 #else
167 static inline int do_irq_select_affinity(int irq, struct irq_desc *desc)
168 {
169         return 0;
170 }
171 #endif
172
173 /**
174  *      disable_irq_nosync - disable an irq without waiting
175  *      @irq: Interrupt to disable
176  *
177  *      Disable the selected interrupt line.  Disables and Enables are
178  *      nested.
179  *      Unlike disable_irq(), this function does not ensure existing
180  *      instances of the IRQ handler have completed before returning.
181  *
182  *      This function may be called from IRQ context.
183  */
184 void disable_irq_nosync(unsigned int irq)
185 {
186         struct irq_desc *desc = irq_to_desc(irq);
187         unsigned long flags;
188
189         if (!desc)
190                 return;
191
192         spin_lock_irqsave(&desc->lock, flags);
193         if (!desc->depth++) {
194                 desc->status |= IRQ_DISABLED;
195                 desc->chip->disable(irq);
196         }
197         spin_unlock_irqrestore(&desc->lock, flags);
198 }
199 EXPORT_SYMBOL(disable_irq_nosync);
200
201 /**
202  *      disable_irq - disable an irq and wait for completion
203  *      @irq: Interrupt to disable
204  *
205  *      Disable the selected interrupt line.  Enables and Disables are
206  *      nested.
207  *      This function waits for any pending IRQ handlers for this interrupt
208  *      to complete before returning. If you use this function while
209  *      holding a resource the IRQ handler may need you will deadlock.
210  *
211  *      This function may be called - with care - from IRQ context.
212  */
213 void disable_irq(unsigned int irq)
214 {
215         struct irq_desc *desc = irq_to_desc(irq);
216
217         if (!desc)
218                 return;
219
220         disable_irq_nosync(irq);
221         if (desc->action)
222                 synchronize_irq(irq);
223 }
224 EXPORT_SYMBOL(disable_irq);
225
226 static void __enable_irq(struct irq_desc *desc, unsigned int irq)
227 {
228         switch (desc->depth) {
229         case 0:
230                 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
231                 break;
232         case 1: {
233                 unsigned int status = desc->status & ~IRQ_DISABLED;
234
235                 /* Prevent probing on this irq: */
236                 desc->status = status | IRQ_NOPROBE;
237                 check_irq_resend(desc, irq);
238                 /* fall-through */
239         }
240         default:
241                 desc->depth--;
242         }
243 }
244
245 /**
246  *      enable_irq - enable handling of an irq
247  *      @irq: Interrupt to enable
248  *
249  *      Undoes the effect of one call to disable_irq().  If this
250  *      matches the last disable, processing of interrupts on this
251  *      IRQ line is re-enabled.
252  *
253  *      This function may be called from IRQ context.
254  */
255 void enable_irq(unsigned int irq)
256 {
257         struct irq_desc *desc = irq_to_desc(irq);
258         unsigned long flags;
259
260         if (!desc)
261                 return;
262
263         spin_lock_irqsave(&desc->lock, flags);
264         __enable_irq(desc, irq);
265         spin_unlock_irqrestore(&desc->lock, flags);
266 }
267 EXPORT_SYMBOL(enable_irq);
268
269 static int set_irq_wake_real(unsigned int irq, unsigned int on)
270 {
271         struct irq_desc *desc = irq_to_desc(irq);
272         int ret = -ENXIO;
273
274         if (desc->chip->set_wake)
275                 ret = desc->chip->set_wake(irq, on);
276
277         return ret;
278 }
279
280 /**
281  *      set_irq_wake - control irq power management wakeup
282  *      @irq:   interrupt to control
283  *      @on:    enable/disable power management wakeup
284  *
285  *      Enable/disable power management wakeup mode, which is
286  *      disabled by default.  Enables and disables must match,
287  *      just as they match for non-wakeup mode support.
288  *
289  *      Wakeup mode lets this IRQ wake the system from sleep
290  *      states like "suspend to RAM".
291  */
292 int set_irq_wake(unsigned int irq, unsigned int on)
293 {
294         struct irq_desc *desc = irq_to_desc(irq);
295         unsigned long flags;
296         int ret = 0;
297
298         /* wakeup-capable irqs can be shared between drivers that
299          * don't need to have the same sleep mode behaviors.
300          */
301         spin_lock_irqsave(&desc->lock, flags);
302         if (on) {
303                 if (desc->wake_depth++ == 0) {
304                         ret = set_irq_wake_real(irq, on);
305                         if (ret)
306                                 desc->wake_depth = 0;
307                         else
308                                 desc->status |= IRQ_WAKEUP;
309                 }
310         } else {
311                 if (desc->wake_depth == 0) {
312                         WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
313                 } else if (--desc->wake_depth == 0) {
314                         ret = set_irq_wake_real(irq, on);
315                         if (ret)
316                                 desc->wake_depth = 1;
317                         else
318                                 desc->status &= ~IRQ_WAKEUP;
319                 }
320         }
321
322         spin_unlock_irqrestore(&desc->lock, flags);
323         return ret;
324 }
325 EXPORT_SYMBOL(set_irq_wake);
326
327 /*
328  * Internal function that tells the architecture code whether a
329  * particular irq has been exclusively allocated or is available
330  * for driver use.
331  */
332 int can_request_irq(unsigned int irq, unsigned long irqflags)
333 {
334         struct irq_desc *desc = irq_to_desc(irq);
335         struct irqaction *action;
336
337         if (!desc)
338                 return 0;
339
340         if (desc->status & IRQ_NOREQUEST)
341                 return 0;
342
343         action = desc->action;
344         if (action)
345                 if (irqflags & action->flags & IRQF_SHARED)
346                         action = NULL;
347
348         return !action;
349 }
350
351 void compat_irq_chip_set_default_handler(struct irq_desc *desc)
352 {
353         /*
354          * If the architecture still has not overriden
355          * the flow handler then zap the default. This
356          * should catch incorrect flow-type setting.
357          */
358         if (desc->handle_irq == &handle_bad_irq)
359                 desc->handle_irq = NULL;
360 }
361
362 int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
363                 unsigned long flags)
364 {
365         int ret;
366         struct irq_chip *chip = desc->chip;
367
368         if (!chip || !chip->set_type) {
369                 /*
370                  * IRQF_TRIGGER_* but the PIC does not support multiple
371                  * flow-types?
372                  */
373                 pr_debug("No set_type function for IRQ %d (%s)\n", irq,
374                                 chip ? (chip->name ? : "unknown") : "unknown");
375                 return 0;
376         }
377
378         /* caller masked out all except trigger mode flags */
379         ret = chip->set_type(irq, flags);
380
381         if (ret)
382                 pr_err("setting trigger mode %d for irq %u failed (%pF)\n",
383                                 (int)flags, irq, chip->set_type);
384         else {
385                 if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
386                         flags |= IRQ_LEVEL;
387                 /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */
388                 desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK);
389                 desc->status |= flags;
390         }
391
392         return ret;
393 }
394
395 /*
396  * Internal function to register an irqaction - typically used to
397  * allocate special interrupts that are part of the architecture.
398  */
399 static int
400 __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
401 {
402         struct irqaction *old, **p;
403         const char *old_name = NULL;
404         unsigned long flags;
405         int shared = 0;
406         int ret;
407
408         if (!desc)
409                 return -EINVAL;
410
411         if (desc->chip == &no_irq_chip)
412                 return -ENOSYS;
413         /*
414          * Some drivers like serial.c use request_irq() heavily,
415          * so we have to be careful not to interfere with a
416          * running system.
417          */
418         if (new->flags & IRQF_SAMPLE_RANDOM) {
419                 /*
420                  * This function might sleep, we want to call it first,
421                  * outside of the atomic block.
422                  * Yes, this might clear the entropy pool if the wrong
423                  * driver is attempted to be loaded, without actually
424                  * installing a new handler, but is this really a problem,
425                  * only the sysadmin is able to do this.
426                  */
427                 rand_initialize_irq(irq);
428         }
429
430         /*
431          * The following block of code has to be executed atomically
432          */
433         spin_lock_irqsave(&desc->lock, flags);
434         p = &desc->action;
435         old = *p;
436         if (old) {
437                 /*
438                  * Can't share interrupts unless both agree to and are
439                  * the same type (level, edge, polarity). So both flag
440                  * fields must have IRQF_SHARED set and the bits which
441                  * set the trigger type must match.
442                  */
443                 if (!((old->flags & new->flags) & IRQF_SHARED) ||
444                     ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK)) {
445                         old_name = old->name;
446                         goto mismatch;
447                 }
448
449 #if defined(CONFIG_IRQ_PER_CPU)
450                 /* All handlers must agree on per-cpuness */
451                 if ((old->flags & IRQF_PERCPU) !=
452                     (new->flags & IRQF_PERCPU))
453                         goto mismatch;
454 #endif
455
456                 /* add new interrupt at end of irq queue */
457                 do {
458                         p = &old->next;
459                         old = *p;
460                 } while (old);
461                 shared = 1;
462         }
463
464         if (!shared) {
465                 irq_chip_set_defaults(desc->chip);
466
467                 /* Setup the type (level, edge polarity) if configured: */
468                 if (new->flags & IRQF_TRIGGER_MASK) {
469                         ret = __irq_set_trigger(desc, irq,
470                                         new->flags & IRQF_TRIGGER_MASK);
471
472                         if (ret) {
473                                 spin_unlock_irqrestore(&desc->lock, flags);
474                                 return ret;
475                         }
476                 } else
477                         compat_irq_chip_set_default_handler(desc);
478 #if defined(CONFIG_IRQ_PER_CPU)
479                 if (new->flags & IRQF_PERCPU)
480                         desc->status |= IRQ_PER_CPU;
481 #endif
482
483                 desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING |
484                                   IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED);
485
486                 if (!(desc->status & IRQ_NOAUTOEN)) {
487                         desc->depth = 0;
488                         desc->status &= ~IRQ_DISABLED;
489                         desc->chip->startup(irq);
490                 } else
491                         /* Undo nested disables: */
492                         desc->depth = 1;
493
494                 /* Exclude IRQ from balancing if requested */
495                 if (new->flags & IRQF_NOBALANCING)
496                         desc->status |= IRQ_NO_BALANCING;
497
498                 /* Set default affinity mask once everything is setup */
499                 do_irq_select_affinity(irq, desc);
500
501         } else if ((new->flags & IRQF_TRIGGER_MASK)
502                         && (new->flags & IRQF_TRIGGER_MASK)
503                                 != (desc->status & IRQ_TYPE_SENSE_MASK)) {
504                 /* hope the handler works with the actual trigger mode... */
505                 pr_warning("IRQ %d uses trigger mode %d; requested %d\n",
506                                 irq, (int)(desc->status & IRQ_TYPE_SENSE_MASK),
507                                 (int)(new->flags & IRQF_TRIGGER_MASK));
508         }
509
510         *p = new;
511
512         /* Reset broken irq detection when installing new handler */
513         desc->irq_count = 0;
514         desc->irqs_unhandled = 0;
515
516         /*
517          * Check whether we disabled the irq via the spurious handler
518          * before. Reenable it and give it another chance.
519          */
520         if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) {
521                 desc->status &= ~IRQ_SPURIOUS_DISABLED;
522                 __enable_irq(desc, irq);
523         }
524
525         spin_unlock_irqrestore(&desc->lock, flags);
526
527         new->irq = irq;
528         register_irq_proc(irq, desc);
529         new->dir = NULL;
530         register_handler_proc(irq, new);
531
532         return 0;
533
534 mismatch:
535 #ifdef CONFIG_DEBUG_SHIRQ
536         if (!(new->flags & IRQF_PROBE_SHARED)) {
537                 printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq);
538                 if (old_name)
539                         printk(KERN_ERR "current handler: %s\n", old_name);
540                 dump_stack();
541         }
542 #endif
543         spin_unlock_irqrestore(&desc->lock, flags);
544         return -EBUSY;
545 }
546
547 /**
548  *      setup_irq - setup an interrupt
549  *      @irq: Interrupt line to setup
550  *      @act: irqaction for the interrupt
551  *
552  * Used to statically setup interrupts in the early boot process.
553  */
554 int setup_irq(unsigned int irq, struct irqaction *act)
555 {
556         struct irq_desc *desc = irq_to_desc(irq);
557
558         return __setup_irq(irq, desc, act);
559 }
560
561 /**
562  *      free_irq - free an interrupt
563  *      @irq: Interrupt line to free
564  *      @dev_id: Device identity to free
565  *
566  *      Remove an interrupt handler. The handler is removed and if the
567  *      interrupt line is no longer in use by any driver it is disabled.
568  *      On a shared IRQ the caller must ensure the interrupt is disabled
569  *      on the card it drives before calling this function. The function
570  *      does not return until any executing interrupts for this IRQ
571  *      have completed.
572  *
573  *      This function must not be called from interrupt context.
574  */
575 void free_irq(unsigned int irq, void *dev_id)
576 {
577         struct irq_desc *desc = irq_to_desc(irq);
578         struct irqaction **p;
579         unsigned long flags;
580
581         WARN_ON(in_interrupt());
582
583         if (!desc)
584                 return;
585
586         spin_lock_irqsave(&desc->lock, flags);
587         p = &desc->action;
588         for (;;) {
589                 struct irqaction *action = *p;
590
591                 if (action) {
592                         struct irqaction **pp = p;
593
594                         p = &action->next;
595                         if (action->dev_id != dev_id)
596                                 continue;
597
598                         /* Found it - now remove it from the list of entries */
599                         *pp = action->next;
600
601                         /* Currently used only by UML, might disappear one day.*/
602 #ifdef CONFIG_IRQ_RELEASE_METHOD
603                         if (desc->chip->release)
604                                 desc->chip->release(irq, dev_id);
605 #endif
606
607                         if (!desc->action) {
608                                 desc->status |= IRQ_DISABLED;
609                                 if (desc->chip->shutdown)
610                                         desc->chip->shutdown(irq);
611                                 else
612                                         desc->chip->disable(irq);
613                         }
614                         spin_unlock_irqrestore(&desc->lock, flags);
615                         unregister_handler_proc(irq, action);
616
617                         /* Make sure it's not being used on another CPU */
618                         synchronize_irq(irq);
619 #ifdef CONFIG_DEBUG_SHIRQ
620                         /*
621                          * It's a shared IRQ -- the driver ought to be
622                          * prepared for it to happen even now it's
623                          * being freed, so let's make sure....  We do
624                          * this after actually deregistering it, to
625                          * make sure that a 'real' IRQ doesn't run in
626                          * parallel with our fake
627                          */
628                         if (action->flags & IRQF_SHARED) {
629                                 local_irq_save(flags);
630                                 action->handler(irq, dev_id);
631                                 local_irq_restore(flags);
632                         }
633 #endif
634                         kfree(action);
635                         return;
636                 }
637                 printk(KERN_ERR "Trying to free already-free IRQ %d\n", irq);
638 #ifdef CONFIG_DEBUG_SHIRQ
639                 dump_stack();
640 #endif
641                 spin_unlock_irqrestore(&desc->lock, flags);
642                 return;
643         }
644 }
645 EXPORT_SYMBOL(free_irq);
646
647 /**
648  *      request_irq - allocate an interrupt line
649  *      @irq: Interrupt line to allocate
650  *      @handler: Function to be called when the IRQ occurs
651  *      @irqflags: Interrupt type flags
652  *      @devname: An ascii name for the claiming device
653  *      @dev_id: A cookie passed back to the handler function
654  *
655  *      This call allocates interrupt resources and enables the
656  *      interrupt line and IRQ handling. From the point this
657  *      call is made your handler function may be invoked. Since
658  *      your handler function must clear any interrupt the board
659  *      raises, you must take care both to initialise your hardware
660  *      and to set up the interrupt handler in the right order.
661  *
662  *      Dev_id must be globally unique. Normally the address of the
663  *      device data structure is used as the cookie. Since the handler
664  *      receives this value it makes sense to use it.
665  *
666  *      If your interrupt is shared you must pass a non NULL dev_id
667  *      as this is required when freeing the interrupt.
668  *
669  *      Flags:
670  *
671  *      IRQF_SHARED             Interrupt is shared
672  *      IRQF_DISABLED   Disable local interrupts while processing
673  *      IRQF_SAMPLE_RANDOM      The interrupt can be used for entropy
674  *      IRQF_TRIGGER_*          Specify active edge(s) or level
675  *
676  */
677 int request_irq(unsigned int irq, irq_handler_t handler,
678                 unsigned long irqflags, const char *devname, void *dev_id)
679 {
680         struct irqaction *action;
681         struct irq_desc *desc;
682         int retval;
683
684         /*
685          * handle_IRQ_event() always ignores IRQF_DISABLED except for
686          * the _first_ irqaction (sigh).  That can cause oopsing, but
687          * the behavior is classified as "will not fix" so we need to
688          * start nudging drivers away from using that idiom.
689          */
690         if ((irqflags & (IRQF_SHARED|IRQF_DISABLED))
691                         == (IRQF_SHARED|IRQF_DISABLED))
692                 pr_warning("IRQ %d/%s: IRQF_DISABLED is not "
693                                 "guaranteed on shared IRQs\n",
694                                 irq, devname);
695
696 #ifdef CONFIG_LOCKDEP
697         /*
698          * Lockdep wants atomic interrupt handlers:
699          */
700         irqflags |= IRQF_DISABLED;
701 #endif
702         /*
703          * Sanity-check: shared interrupts must pass in a real dev-ID,
704          * otherwise we'll have trouble later trying to figure out
705          * which interrupt is which (messes up the interrupt freeing
706          * logic etc).
707          */
708         if ((irqflags & IRQF_SHARED) && !dev_id)
709                 return -EINVAL;
710
711         desc = irq_to_desc(irq);
712         if (!desc)
713                 return -EINVAL;
714
715         if (desc->status & IRQ_NOREQUEST)
716                 return -EINVAL;
717         if (!handler)
718                 return -EINVAL;
719
720         action = kmalloc(sizeof(struct irqaction), GFP_KERNEL);
721         if (!action)
722                 return -ENOMEM;
723
724         action->handler = handler;
725         action->flags = irqflags;
726         cpus_clear(action->mask);
727         action->name = devname;
728         action->next = NULL;
729         action->dev_id = dev_id;
730
731         retval = __setup_irq(irq, desc, action);
732         if (retval)
733                 kfree(action);
734
735 #ifdef CONFIG_DEBUG_SHIRQ
736         if (irqflags & IRQF_SHARED) {
737                 /*
738                  * It's a shared IRQ -- the driver ought to be prepared for it
739                  * to happen immediately, so let's make sure....
740                  * We disable the irq to make sure that a 'real' IRQ doesn't
741                  * run in parallel with our fake.
742                  */
743                 unsigned long flags;
744
745                 disable_irq(irq);
746                 local_irq_save(flags);
747
748                 handler(irq, dev_id);
749
750                 local_irq_restore(flags);
751                 enable_irq(irq);
752         }
753 #endif
754         return retval;
755 }
756 EXPORT_SYMBOL(request_irq);