4 * Xen models interrupts with abstract event channels. Because each
5 * domain gets 1024 event channels, but NR_IRQ is not that large, we
6 * must dynamically map irqs<->event channels. The event channels
7 * interface with the rest of the kernel by defining a xen interrupt
8 * chip. When an event is recieved, it is mapped to an irq and sent
9 * through the normal interrupt processing path.
11 * There are four kinds of events which can be mapped to an event
14 * 1. Inter-domain notifications. This includes all the virtual
15 * device events, since they're driven by front-ends in another domain
17 * 2. VIRQs, typically used for timers. These are per-cpu events.
19 * 4. Hardware interrupts. Not supported at present.
21 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
24 #include <linux/linkage.h>
25 #include <linux/interrupt.h>
26 #include <linux/irq.h>
27 #include <linux/module.h>
28 #include <linux/string.h>
30 #include <asm/ptrace.h>
32 #include <asm/sync_bitops.h>
33 #include <asm/xen/hypercall.h>
34 #include <asm/xen/hypervisor.h>
36 #include <xen/xen-ops.h>
37 #include <xen/events.h>
38 #include <xen/interface/xen.h>
39 #include <xen/interface/event_channel.h>
42 * This lock protects updates to the following mapping and reference-count
43 * arrays. The lock does not need to be acquired to read the mapping tables.
45 static DEFINE_SPINLOCK(irq_mapping_update_lock);
47 /* IRQ <-> VIRQ mapping. */
48 static DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1};
50 /* IRQ <-> IPI mapping */
51 static DEFINE_PER_CPU(int, ipi_to_irq[XEN_NR_IPIS]) = {[0 ... XEN_NR_IPIS-1] = -1};
53 /* Packed IRQ information: binding type, sub-type index, and event channel. */
56 unsigned short evtchn;
61 static struct packed_irq irq_info[NR_IRQS];
72 /* Convenient shorthand for packed representation of an unbound IRQ. */
73 #define IRQ_UNBOUND mk_irq_info(IRQT_UNBOUND, 0, 0)
75 static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
76 [0 ... NR_EVENT_CHANNELS-1] = -1
78 static unsigned long cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/BITS_PER_LONG];
79 static u8 cpu_evtchn[NR_EVENT_CHANNELS];
81 /* Reference counts for bindings to IRQs. */
82 static int irq_bindcount[NR_IRQS];
84 /* Xen will never allocate port zero for any purpose. */
85 #define VALID_EVTCHN(chn) ((chn) != 0)
87 static struct irq_chip xen_dynamic_chip;
89 /* Constructor for packed IRQ information. */
90 static inline struct packed_irq mk_irq_info(u32 type, u32 index, u32 evtchn)
92 return (struct packed_irq) { evtchn, index, type };
96 * Accessors for packed IRQ information.
98 static inline unsigned int evtchn_from_irq(int irq)
100 return irq_info[irq].evtchn;
103 static inline unsigned int index_from_irq(int irq)
105 return irq_info[irq].index;
108 static inline unsigned int type_from_irq(int irq)
110 return irq_info[irq].type;
113 static inline unsigned long active_evtchns(unsigned int cpu,
114 struct shared_info *sh,
117 return (sh->evtchn_pending[idx] &
118 cpu_evtchn_mask[cpu][idx] &
119 ~sh->evtchn_mask[idx]);
122 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
124 int irq = evtchn_to_irq[chn];
128 irq_to_desc(irq)->affinity = cpumask_of_cpu(cpu);
131 __clear_bit(chn, cpu_evtchn_mask[cpu_evtchn[chn]]);
132 __set_bit(chn, cpu_evtchn_mask[cpu]);
134 cpu_evtchn[chn] = cpu;
137 static void init_evtchn_cpu_bindings(void)
140 struct irq_desc *desc;
143 /* By default all event channels notify CPU#0. */
144 for_each_irq_desc(i, desc)
145 desc->affinity = cpumask_of_cpu(0);
148 memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
149 memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
152 static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
154 return cpu_evtchn[evtchn];
157 static inline void clear_evtchn(int port)
159 struct shared_info *s = HYPERVISOR_shared_info;
160 sync_clear_bit(port, &s->evtchn_pending[0]);
163 static inline void set_evtchn(int port)
165 struct shared_info *s = HYPERVISOR_shared_info;
166 sync_set_bit(port, &s->evtchn_pending[0]);
169 static inline int test_evtchn(int port)
171 struct shared_info *s = HYPERVISOR_shared_info;
172 return sync_test_bit(port, &s->evtchn_pending[0]);
177 * notify_remote_via_irq - send event to remote end of event channel via irq
178 * @irq: irq of event channel to send event to
180 * Unlike notify_remote_via_evtchn(), this is safe to use across
181 * save/restore. Notifications on a broken connection are silently
184 void notify_remote_via_irq(int irq)
186 int evtchn = evtchn_from_irq(irq);
188 if (VALID_EVTCHN(evtchn))
189 notify_remote_via_evtchn(evtchn);
191 EXPORT_SYMBOL_GPL(notify_remote_via_irq);
193 static void mask_evtchn(int port)
195 struct shared_info *s = HYPERVISOR_shared_info;
196 sync_set_bit(port, &s->evtchn_mask[0]);
199 static void unmask_evtchn(int port)
201 struct shared_info *s = HYPERVISOR_shared_info;
202 unsigned int cpu = get_cpu();
204 BUG_ON(!irqs_disabled());
206 /* Slow path (hypercall) if this is a non-local port. */
207 if (unlikely(cpu != cpu_from_evtchn(port))) {
208 struct evtchn_unmask unmask = { .port = port };
209 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
211 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
213 sync_clear_bit(port, &s->evtchn_mask[0]);
216 * The following is basically the equivalent of
217 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
218 * the interrupt edge' if the channel is masked.
220 if (sync_test_bit(port, &s->evtchn_pending[0]) &&
221 !sync_test_and_set_bit(port / BITS_PER_LONG,
222 &vcpu_info->evtchn_pending_sel))
223 vcpu_info->evtchn_upcall_pending = 1;
229 static int find_unbound_irq(void)
233 /* Only allocate from dynirq range */
235 if (irq_bindcount[irq] == 0)
239 panic("No available IRQ to bind to: increase nr_irqs!\n");
244 int bind_evtchn_to_irq(unsigned int evtchn)
248 spin_lock(&irq_mapping_update_lock);
250 irq = evtchn_to_irq[evtchn];
253 irq = find_unbound_irq();
255 dynamic_irq_init(irq);
256 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
257 handle_level_irq, "event");
259 evtchn_to_irq[evtchn] = irq;
260 irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn);
263 irq_bindcount[irq]++;
265 spin_unlock(&irq_mapping_update_lock);
269 EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
271 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
273 struct evtchn_bind_ipi bind_ipi;
276 spin_lock(&irq_mapping_update_lock);
278 irq = per_cpu(ipi_to_irq, cpu)[ipi];
280 irq = find_unbound_irq();
284 dynamic_irq_init(irq);
285 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
286 handle_level_irq, "ipi");
289 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
292 evtchn = bind_ipi.port;
294 evtchn_to_irq[evtchn] = irq;
295 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
297 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
299 bind_evtchn_to_cpu(evtchn, cpu);
302 irq_bindcount[irq]++;
305 spin_unlock(&irq_mapping_update_lock);
310 static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
312 struct evtchn_bind_virq bind_virq;
315 spin_lock(&irq_mapping_update_lock);
317 irq = per_cpu(virq_to_irq, cpu)[virq];
320 bind_virq.virq = virq;
321 bind_virq.vcpu = cpu;
322 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
325 evtchn = bind_virq.port;
327 irq = find_unbound_irq();
329 dynamic_irq_init(irq);
330 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
331 handle_level_irq, "virq");
333 evtchn_to_irq[evtchn] = irq;
334 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
336 per_cpu(virq_to_irq, cpu)[virq] = irq;
338 bind_evtchn_to_cpu(evtchn, cpu);
341 irq_bindcount[irq]++;
343 spin_unlock(&irq_mapping_update_lock);
348 static void unbind_from_irq(unsigned int irq)
350 struct evtchn_close close;
351 int evtchn = evtchn_from_irq(irq);
353 spin_lock(&irq_mapping_update_lock);
355 if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
357 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
360 switch (type_from_irq(irq)) {
362 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
363 [index_from_irq(irq)] = -1;
366 per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
367 [index_from_irq(irq)] = -1;
373 /* Closed ports are implicitly re-bound to VCPU0. */
374 bind_evtchn_to_cpu(evtchn, 0);
376 evtchn_to_irq[evtchn] = -1;
377 irq_info[irq] = IRQ_UNBOUND;
379 dynamic_irq_cleanup(irq);
382 spin_unlock(&irq_mapping_update_lock);
385 int bind_evtchn_to_irqhandler(unsigned int evtchn,
386 irq_handler_t handler,
387 unsigned long irqflags,
388 const char *devname, void *dev_id)
393 irq = bind_evtchn_to_irq(evtchn);
394 retval = request_irq(irq, handler, irqflags, devname, dev_id);
396 unbind_from_irq(irq);
402 EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
404 int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
405 irq_handler_t handler,
406 unsigned long irqflags, const char *devname, void *dev_id)
411 irq = bind_virq_to_irq(virq, cpu);
412 retval = request_irq(irq, handler, irqflags, devname, dev_id);
414 unbind_from_irq(irq);
420 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
422 int bind_ipi_to_irqhandler(enum ipi_vector ipi,
424 irq_handler_t handler,
425 unsigned long irqflags,
431 irq = bind_ipi_to_irq(ipi, cpu);
435 retval = request_irq(irq, handler, irqflags, devname, dev_id);
437 unbind_from_irq(irq);
444 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
446 free_irq(irq, dev_id);
447 unbind_from_irq(irq);
449 EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
451 void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
453 int irq = per_cpu(ipi_to_irq, cpu)[vector];
455 notify_remote_via_irq(irq);
458 irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
460 struct shared_info *sh = HYPERVISOR_shared_info;
461 int cpu = smp_processor_id();
464 static DEFINE_SPINLOCK(debug_lock);
466 spin_lock_irqsave(&debug_lock, flags);
468 printk("vcpu %d\n ", cpu);
470 for_each_online_cpu(i) {
471 struct vcpu_info *v = per_cpu(xen_vcpu, i);
472 printk("%d: masked=%d pending=%d event_sel %08lx\n ", i,
473 (get_irq_regs() && i == cpu) ? xen_irqs_disabled(get_irq_regs()) : v->evtchn_upcall_mask,
474 v->evtchn_upcall_pending,
475 v->evtchn_pending_sel);
477 printk("pending:\n ");
478 for(i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
479 printk("%08lx%s", sh->evtchn_pending[i],
480 i % 8 == 0 ? "\n " : " ");
481 printk("\nmasks:\n ");
482 for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
483 printk("%08lx%s", sh->evtchn_mask[i],
484 i % 8 == 0 ? "\n " : " ");
486 printk("\nunmasked:\n ");
487 for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
488 printk("%08lx%s", sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
489 i % 8 == 0 ? "\n " : " ");
491 printk("\npending list:\n");
492 for(i = 0; i < NR_EVENT_CHANNELS; i++) {
493 if (sync_test_bit(i, sh->evtchn_pending)) {
494 printk(" %d: event %d -> irq %d\n",
500 spin_unlock_irqrestore(&debug_lock, flags);
507 * Search the CPUs pending events bitmasks. For each one found, map
508 * the event number to an irq, and feed it into do_IRQ() for
511 * Xen uses a two-level bitmap to speed searching. The first level is
512 * a bitset of words which contain pending event bits. The second
513 * level is a bitset of pending events themselves.
515 void xen_evtchn_do_upcall(struct pt_regs *regs)
518 struct shared_info *s = HYPERVISOR_shared_info;
519 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
520 static DEFINE_PER_CPU(unsigned, nesting_count);
524 unsigned long pending_words;
526 vcpu_info->evtchn_upcall_pending = 0;
528 if (__get_cpu_var(nesting_count)++)
531 #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
532 /* Clear master flag /before/ clearing selector flag. */
535 pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
536 while (pending_words != 0) {
537 unsigned long pending_bits;
538 int word_idx = __ffs(pending_words);
539 pending_words &= ~(1UL << word_idx);
541 while ((pending_bits = active_evtchns(cpu, s, word_idx)) != 0) {
542 int bit_idx = __ffs(pending_bits);
543 int port = (word_idx * BITS_PER_LONG) + bit_idx;
544 int irq = evtchn_to_irq[port];
547 xen_do_IRQ(irq, regs);
551 BUG_ON(!irqs_disabled());
553 count = __get_cpu_var(nesting_count);
554 __get_cpu_var(nesting_count) = 0;
561 /* Rebind a new event channel to an existing irq. */
562 void rebind_evtchn_irq(int evtchn, int irq)
564 /* Make sure the irq is masked, since the new event channel
565 will also be masked. */
568 spin_lock(&irq_mapping_update_lock);
570 /* After resume the irq<->evtchn mappings are all cleared out */
571 BUG_ON(evtchn_to_irq[evtchn] != -1);
572 /* Expect irq to have been bound before,
573 so the bindcount should be non-0 */
574 BUG_ON(irq_bindcount[irq] == 0);
576 evtchn_to_irq[evtchn] = irq;
577 irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn);
579 spin_unlock(&irq_mapping_update_lock);
581 /* new event channels are always bound to cpu 0 */
582 irq_set_affinity(irq, cpumask_of_cpu(0));
584 /* Unmask the event channel. */
588 /* Rebind an evtchn so that it gets delivered to a specific cpu */
589 static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
591 struct evtchn_bind_vcpu bind_vcpu;
592 int evtchn = evtchn_from_irq(irq);
594 if (!VALID_EVTCHN(evtchn))
597 /* Send future instances of this interrupt to other vcpu. */
598 bind_vcpu.port = evtchn;
599 bind_vcpu.vcpu = tcpu;
602 * If this fails, it usually just indicates that we're dealing with a
603 * virq or IPI channel, which don't actually need to be rebound. Ignore
604 * it, but don't do the xenlinux-level rebind in that case.
606 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
607 bind_evtchn_to_cpu(evtchn, tcpu);
611 static void set_affinity_irq(unsigned irq, cpumask_t dest)
613 unsigned tcpu = first_cpu(dest);
614 rebind_irq_to_cpu(irq, tcpu);
617 int resend_irq_on_evtchn(unsigned int irq)
619 int masked, evtchn = evtchn_from_irq(irq);
620 struct shared_info *s = HYPERVISOR_shared_info;
622 if (!VALID_EVTCHN(evtchn))
625 masked = sync_test_and_set_bit(evtchn, s->evtchn_mask);
626 sync_set_bit(evtchn, s->evtchn_pending);
628 unmask_evtchn(evtchn);
633 static void enable_dynirq(unsigned int irq)
635 int evtchn = evtchn_from_irq(irq);
637 if (VALID_EVTCHN(evtchn))
638 unmask_evtchn(evtchn);
641 static void disable_dynirq(unsigned int irq)
643 int evtchn = evtchn_from_irq(irq);
645 if (VALID_EVTCHN(evtchn))
649 static void ack_dynirq(unsigned int irq)
651 int evtchn = evtchn_from_irq(irq);
653 move_native_irq(irq);
655 if (VALID_EVTCHN(evtchn))
656 clear_evtchn(evtchn);
659 static int retrigger_dynirq(unsigned int irq)
661 int evtchn = evtchn_from_irq(irq);
662 struct shared_info *sh = HYPERVISOR_shared_info;
665 if (VALID_EVTCHN(evtchn)) {
668 masked = sync_test_and_set_bit(evtchn, sh->evtchn_mask);
669 sync_set_bit(evtchn, sh->evtchn_pending);
671 unmask_evtchn(evtchn);
678 static void restore_cpu_virqs(unsigned int cpu)
680 struct evtchn_bind_virq bind_virq;
681 int virq, irq, evtchn;
683 for (virq = 0; virq < NR_VIRQS; virq++) {
684 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
687 BUG_ON(irq_info[irq].type != IRQT_VIRQ);
688 BUG_ON(irq_info[irq].index != virq);
690 /* Get a new binding from Xen. */
691 bind_virq.virq = virq;
692 bind_virq.vcpu = cpu;
693 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
696 evtchn = bind_virq.port;
698 /* Record the new mapping. */
699 evtchn_to_irq[evtchn] = irq;
700 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
701 bind_evtchn_to_cpu(evtchn, cpu);
704 unmask_evtchn(evtchn);
708 static void restore_cpu_ipis(unsigned int cpu)
710 struct evtchn_bind_ipi bind_ipi;
711 int ipi, irq, evtchn;
713 for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
714 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
717 BUG_ON(irq_info[irq].type != IRQT_IPI);
718 BUG_ON(irq_info[irq].index != ipi);
720 /* Get a new binding from Xen. */
722 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
725 evtchn = bind_ipi.port;
727 /* Record the new mapping. */
728 evtchn_to_irq[evtchn] = irq;
729 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
730 bind_evtchn_to_cpu(evtchn, cpu);
733 unmask_evtchn(evtchn);
738 /* Clear an irq's pending state, in preparation for polling on it */
739 void xen_clear_irq_pending(int irq)
741 int evtchn = evtchn_from_irq(irq);
743 if (VALID_EVTCHN(evtchn))
744 clear_evtchn(evtchn);
747 void xen_set_irq_pending(int irq)
749 int evtchn = evtchn_from_irq(irq);
751 if (VALID_EVTCHN(evtchn))
755 bool xen_test_irq_pending(int irq)
757 int evtchn = evtchn_from_irq(irq);
760 if (VALID_EVTCHN(evtchn))
761 ret = test_evtchn(evtchn);
766 /* Poll waiting for an irq to become pending. In the usual case, the
767 irq will be disabled so it won't deliver an interrupt. */
768 void xen_poll_irq(int irq)
770 evtchn_port_t evtchn = evtchn_from_irq(irq);
772 if (VALID_EVTCHN(evtchn)) {
773 struct sched_poll poll;
777 set_xen_guest_handle(poll.ports, &evtchn);
779 if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
784 void xen_irq_resume(void)
786 unsigned int cpu, irq, evtchn;
788 init_evtchn_cpu_bindings();
790 /* New event-channel space is not 'live' yet. */
791 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
794 /* No IRQ <-> event-channel mappings. */
796 irq_info[irq].evtchn = 0; /* zap event-channel binding */
798 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
799 evtchn_to_irq[evtchn] = -1;
801 for_each_possible_cpu(cpu) {
802 restore_cpu_virqs(cpu);
803 restore_cpu_ipis(cpu);
807 static struct irq_chip xen_dynamic_chip __read_mostly = {
809 .mask = disable_dynirq,
810 .unmask = enable_dynirq,
812 .set_affinity = set_affinity_irq,
813 .retrigger = retrigger_dynirq,
816 void __init xen_init_IRQ(void)
820 init_evtchn_cpu_bindings();
822 /* No event channels are 'live' right now. */
823 for (i = 0; i < NR_EVENT_CHANNELS; i++)
826 /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
828 irq_bindcount[i] = 0;
830 irq_ctx_init(smp_processor_id());