Merge branch 'linus' into perfcounters/core
[linux-2.6] / drivers / xen / events.c
1 /*
2  * Xen event channels
3  *
4  * Xen models interrupts with abstract event channels.  Because each
5  * domain gets 1024 event channels, but NR_IRQ is not that large, we
6  * must dynamically map irqs<->event channels.  The event channels
7  * interface with the rest of the kernel by defining a xen interrupt
8  * chip.  When an event is recieved, it is mapped to an irq and sent
9  * through the normal interrupt processing path.
10  *
11  * There are four kinds of events which can be mapped to an event
12  * channel:
13  *
14  * 1. Inter-domain notifications.  This includes all the virtual
15  *    device events, since they're driven by front-ends in another domain
16  *    (typically dom0).
17  * 2. VIRQs, typically used for timers.  These are per-cpu events.
18  * 3. IPIs.
19  * 4. Hardware interrupts. Not supported at present.
20  *
21  * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
22  */
23
24 #include <linux/linkage.h>
25 #include <linux/interrupt.h>
26 #include <linux/irq.h>
27 #include <linux/module.h>
28 #include <linux/string.h>
29 #include <linux/bootmem.h>
30
31 #include <asm/ptrace.h>
32 #include <asm/irq.h>
33 #include <asm/sync_bitops.h>
34 #include <asm/xen/hypercall.h>
35 #include <asm/xen/hypervisor.h>
36
37 #include <xen/xen-ops.h>
38 #include <xen/events.h>
39 #include <xen/interface/xen.h>
40 #include <xen/interface/event_channel.h>
41
42 /*
43  * This lock protects updates to the following mapping and reference-count
44  * arrays. The lock does not need to be acquired to read the mapping tables.
45  */
46 static DEFINE_SPINLOCK(irq_mapping_update_lock);
47
48 /* IRQ <-> VIRQ mapping. */
49 static DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1};
50
51 /* IRQ <-> IPI mapping */
52 static DEFINE_PER_CPU(int, ipi_to_irq[XEN_NR_IPIS]) = {[0 ... XEN_NR_IPIS-1] = -1};
53
54 /* Packed IRQ information: binding type, sub-type index, and event channel. */
55 struct packed_irq
56 {
57         unsigned short evtchn;
58         unsigned char index;
59         unsigned char type;
60 };
61
62 static struct packed_irq irq_info[NR_IRQS];
63
64 /* Binding types. */
65 enum {
66         IRQT_UNBOUND,
67         IRQT_PIRQ,
68         IRQT_VIRQ,
69         IRQT_IPI,
70         IRQT_EVTCHN
71 };
72
73 /* Convenient shorthand for packed representation of an unbound IRQ. */
74 #define IRQ_UNBOUND     mk_irq_info(IRQT_UNBOUND, 0, 0)
75
76 static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
77         [0 ... NR_EVENT_CHANNELS-1] = -1
78 };
79 struct cpu_evtchn_s {
80         unsigned long bits[NR_EVENT_CHANNELS/BITS_PER_LONG];
81 };
82 static struct cpu_evtchn_s *cpu_evtchn_mask_p;
83 static inline unsigned long *cpu_evtchn_mask(int cpu)
84 {
85         return cpu_evtchn_mask_p[cpu].bits;
86 }
87 static u8 cpu_evtchn[NR_EVENT_CHANNELS];
88
89 /* Reference counts for bindings to IRQs. */
90 static int irq_bindcount[NR_IRQS];
91
92 /* Xen will never allocate port zero for any purpose. */
93 #define VALID_EVTCHN(chn)       ((chn) != 0)
94
95 static struct irq_chip xen_dynamic_chip;
96
97 /* Constructor for packed IRQ information. */
98 static inline struct packed_irq mk_irq_info(u32 type, u32 index, u32 evtchn)
99 {
100         return (struct packed_irq) { evtchn, index, type };
101 }
102
103 /*
104  * Accessors for packed IRQ information.
105  */
106 static inline unsigned int evtchn_from_irq(int irq)
107 {
108         return irq_info[irq].evtchn;
109 }
110
111 static inline unsigned int index_from_irq(int irq)
112 {
113         return irq_info[irq].index;
114 }
115
116 static inline unsigned int type_from_irq(int irq)
117 {
118         return irq_info[irq].type;
119 }
120
121 static inline unsigned long active_evtchns(unsigned int cpu,
122                                            struct shared_info *sh,
123                                            unsigned int idx)
124 {
125         return (sh->evtchn_pending[idx] &
126                 cpu_evtchn_mask(cpu)[idx] &
127                 ~sh->evtchn_mask[idx]);
128 }
129
130 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
131 {
132         int irq = evtchn_to_irq[chn];
133
134         BUG_ON(irq == -1);
135 #ifdef CONFIG_SMP
136         cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu));
137 #endif
138
139         __clear_bit(chn, cpu_evtchn_mask(cpu_evtchn[chn]));
140         __set_bit(chn, cpu_evtchn_mask(cpu));
141
142         cpu_evtchn[chn] = cpu;
143 }
144
145 static void init_evtchn_cpu_bindings(void)
146 {
147 #ifdef CONFIG_SMP
148         struct irq_desc *desc;
149         int i;
150
151         /* By default all event channels notify CPU#0. */
152         for_each_irq_desc(i, desc) {
153                 cpumask_copy(desc->affinity, cpumask_of(0));
154         }
155 #endif
156
157         memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
158         memset(cpu_evtchn_mask(0), ~0, sizeof(cpu_evtchn_mask(0)));
159 }
160
161 static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
162 {
163         return cpu_evtchn[evtchn];
164 }
165
166 static inline void clear_evtchn(int port)
167 {
168         struct shared_info *s = HYPERVISOR_shared_info;
169         sync_clear_bit(port, &s->evtchn_pending[0]);
170 }
171
172 static inline void set_evtchn(int port)
173 {
174         struct shared_info *s = HYPERVISOR_shared_info;
175         sync_set_bit(port, &s->evtchn_pending[0]);
176 }
177
178 static inline int test_evtchn(int port)
179 {
180         struct shared_info *s = HYPERVISOR_shared_info;
181         return sync_test_bit(port, &s->evtchn_pending[0]);
182 }
183
184
185 /**
186  * notify_remote_via_irq - send event to remote end of event channel via irq
187  * @irq: irq of event channel to send event to
188  *
189  * Unlike notify_remote_via_evtchn(), this is safe to use across
190  * save/restore. Notifications on a broken connection are silently
191  * dropped.
192  */
193 void notify_remote_via_irq(int irq)
194 {
195         int evtchn = evtchn_from_irq(irq);
196
197         if (VALID_EVTCHN(evtchn))
198                 notify_remote_via_evtchn(evtchn);
199 }
200 EXPORT_SYMBOL_GPL(notify_remote_via_irq);
201
202 static void mask_evtchn(int port)
203 {
204         struct shared_info *s = HYPERVISOR_shared_info;
205         sync_set_bit(port, &s->evtchn_mask[0]);
206 }
207
208 static void unmask_evtchn(int port)
209 {
210         struct shared_info *s = HYPERVISOR_shared_info;
211         unsigned int cpu = get_cpu();
212
213         BUG_ON(!irqs_disabled());
214
215         /* Slow path (hypercall) if this is a non-local port. */
216         if (unlikely(cpu != cpu_from_evtchn(port))) {
217                 struct evtchn_unmask unmask = { .port = port };
218                 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
219         } else {
220                 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
221
222                 sync_clear_bit(port, &s->evtchn_mask[0]);
223
224                 /*
225                  * The following is basically the equivalent of
226                  * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
227                  * the interrupt edge' if the channel is masked.
228                  */
229                 if (sync_test_bit(port, &s->evtchn_pending[0]) &&
230                     !sync_test_and_set_bit(port / BITS_PER_LONG,
231                                            &vcpu_info->evtchn_pending_sel))
232                         vcpu_info->evtchn_upcall_pending = 1;
233         }
234
235         put_cpu();
236 }
237
238 static int find_unbound_irq(void)
239 {
240         int irq;
241         struct irq_desc *desc;
242
243         /* Only allocate from dynirq range */
244         for (irq = 0; irq < nr_irqs; irq++)
245                 if (irq_bindcount[irq] == 0)
246                         break;
247
248         if (irq == nr_irqs)
249                 panic("No available IRQ to bind to: increase nr_irqs!\n");
250
251         desc = irq_to_desc_alloc_cpu(irq, 0);
252         if (WARN_ON(desc == NULL))
253                 return -1;
254
255         return irq;
256 }
257
258 int bind_evtchn_to_irq(unsigned int evtchn)
259 {
260         int irq;
261
262         spin_lock(&irq_mapping_update_lock);
263
264         irq = evtchn_to_irq[evtchn];
265
266         if (irq == -1) {
267                 irq = find_unbound_irq();
268
269                 dynamic_irq_init(irq);
270                 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
271                                               handle_level_irq, "event");
272
273                 evtchn_to_irq[evtchn] = irq;
274                 irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn);
275         }
276
277         irq_bindcount[irq]++;
278
279         spin_unlock(&irq_mapping_update_lock);
280
281         return irq;
282 }
283 EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
284
285 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
286 {
287         struct evtchn_bind_ipi bind_ipi;
288         int evtchn, irq;
289
290         spin_lock(&irq_mapping_update_lock);
291
292         irq = per_cpu(ipi_to_irq, cpu)[ipi];
293         if (irq == -1) {
294                 irq = find_unbound_irq();
295                 if (irq < 0)
296                         goto out;
297
298                 dynamic_irq_init(irq);
299                 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
300                                               handle_level_irq, "ipi");
301
302                 bind_ipi.vcpu = cpu;
303                 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
304                                                 &bind_ipi) != 0)
305                         BUG();
306                 evtchn = bind_ipi.port;
307
308                 evtchn_to_irq[evtchn] = irq;
309                 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
310
311                 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
312
313                 bind_evtchn_to_cpu(evtchn, cpu);
314         }
315
316         irq_bindcount[irq]++;
317
318  out:
319         spin_unlock(&irq_mapping_update_lock);
320         return irq;
321 }
322
323
324 static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
325 {
326         struct evtchn_bind_virq bind_virq;
327         int evtchn, irq;
328
329         spin_lock(&irq_mapping_update_lock);
330
331         irq = per_cpu(virq_to_irq, cpu)[virq];
332
333         if (irq == -1) {
334                 bind_virq.virq = virq;
335                 bind_virq.vcpu = cpu;
336                 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
337                                                 &bind_virq) != 0)
338                         BUG();
339                 evtchn = bind_virq.port;
340
341                 irq = find_unbound_irq();
342
343                 dynamic_irq_init(irq);
344                 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
345                                               handle_level_irq, "virq");
346
347                 evtchn_to_irq[evtchn] = irq;
348                 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
349
350                 per_cpu(virq_to_irq, cpu)[virq] = irq;
351
352                 bind_evtchn_to_cpu(evtchn, cpu);
353         }
354
355         irq_bindcount[irq]++;
356
357         spin_unlock(&irq_mapping_update_lock);
358
359         return irq;
360 }
361
362 static void unbind_from_irq(unsigned int irq)
363 {
364         struct evtchn_close close;
365         int evtchn = evtchn_from_irq(irq);
366
367         spin_lock(&irq_mapping_update_lock);
368
369         if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
370                 close.port = evtchn;
371                 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
372                         BUG();
373
374                 switch (type_from_irq(irq)) {
375                 case IRQT_VIRQ:
376                         per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
377                                 [index_from_irq(irq)] = -1;
378                         break;
379                 case IRQT_IPI:
380                         per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
381                                 [index_from_irq(irq)] = -1;
382                         break;
383                 default:
384                         break;
385                 }
386
387                 /* Closed ports are implicitly re-bound to VCPU0. */
388                 bind_evtchn_to_cpu(evtchn, 0);
389
390                 evtchn_to_irq[evtchn] = -1;
391                 irq_info[irq] = IRQ_UNBOUND;
392
393                 dynamic_irq_cleanup(irq);
394         }
395
396         spin_unlock(&irq_mapping_update_lock);
397 }
398
399 int bind_evtchn_to_irqhandler(unsigned int evtchn,
400                               irq_handler_t handler,
401                               unsigned long irqflags,
402                               const char *devname, void *dev_id)
403 {
404         unsigned int irq;
405         int retval;
406
407         irq = bind_evtchn_to_irq(evtchn);
408         retval = request_irq(irq, handler, irqflags, devname, dev_id);
409         if (retval != 0) {
410                 unbind_from_irq(irq);
411                 return retval;
412         }
413
414         return irq;
415 }
416 EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
417
418 int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
419                             irq_handler_t handler,
420                             unsigned long irqflags, const char *devname, void *dev_id)
421 {
422         unsigned int irq;
423         int retval;
424
425         irq = bind_virq_to_irq(virq, cpu);
426         retval = request_irq(irq, handler, irqflags, devname, dev_id);
427         if (retval != 0) {
428                 unbind_from_irq(irq);
429                 return retval;
430         }
431
432         return irq;
433 }
434 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
435
436 int bind_ipi_to_irqhandler(enum ipi_vector ipi,
437                            unsigned int cpu,
438                            irq_handler_t handler,
439                            unsigned long irqflags,
440                            const char *devname,
441                            void *dev_id)
442 {
443         int irq, retval;
444
445         irq = bind_ipi_to_irq(ipi, cpu);
446         if (irq < 0)
447                 return irq;
448
449         retval = request_irq(irq, handler, irqflags, devname, dev_id);
450         if (retval != 0) {
451                 unbind_from_irq(irq);
452                 return retval;
453         }
454
455         return irq;
456 }
457
458 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
459 {
460         free_irq(irq, dev_id);
461         unbind_from_irq(irq);
462 }
463 EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
464
465 void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
466 {
467         int irq = per_cpu(ipi_to_irq, cpu)[vector];
468         BUG_ON(irq < 0);
469         notify_remote_via_irq(irq);
470 }
471
472 irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
473 {
474         struct shared_info *sh = HYPERVISOR_shared_info;
475         int cpu = smp_processor_id();
476         int i;
477         unsigned long flags;
478         static DEFINE_SPINLOCK(debug_lock);
479
480         spin_lock_irqsave(&debug_lock, flags);
481
482         printk("vcpu %d\n  ", cpu);
483
484         for_each_online_cpu(i) {
485                 struct vcpu_info *v = per_cpu(xen_vcpu, i);
486                 printk("%d: masked=%d pending=%d event_sel %08lx\n  ", i,
487                         (get_irq_regs() && i == cpu) ? xen_irqs_disabled(get_irq_regs()) : v->evtchn_upcall_mask,
488                         v->evtchn_upcall_pending,
489                         v->evtchn_pending_sel);
490         }
491         printk("pending:\n   ");
492         for(i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
493                 printk("%08lx%s", sh->evtchn_pending[i],
494                         i % 8 == 0 ? "\n   " : " ");
495         printk("\nmasks:\n   ");
496         for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
497                 printk("%08lx%s", sh->evtchn_mask[i],
498                         i % 8 == 0 ? "\n   " : " ");
499
500         printk("\nunmasked:\n   ");
501         for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
502                 printk("%08lx%s", sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
503                         i % 8 == 0 ? "\n   " : " ");
504
505         printk("\npending list:\n");
506         for(i = 0; i < NR_EVENT_CHANNELS; i++) {
507                 if (sync_test_bit(i, sh->evtchn_pending)) {
508                         printk("  %d: event %d -> irq %d\n",
509                                 cpu_evtchn[i], i,
510                                 evtchn_to_irq[i]);
511                 }
512         }
513
514         spin_unlock_irqrestore(&debug_lock, flags);
515
516         return IRQ_HANDLED;
517 }
518
519
520 /*
521  * Search the CPUs pending events bitmasks.  For each one found, map
522  * the event number to an irq, and feed it into do_IRQ() for
523  * handling.
524  *
525  * Xen uses a two-level bitmap to speed searching.  The first level is
526  * a bitset of words which contain pending event bits.  The second
527  * level is a bitset of pending events themselves.
528  */
529 void xen_evtchn_do_upcall(struct pt_regs *regs)
530 {
531         int cpu = get_cpu();
532         struct shared_info *s = HYPERVISOR_shared_info;
533         struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
534         static DEFINE_PER_CPU(unsigned, nesting_count);
535         unsigned count;
536
537         do {
538                 unsigned long pending_words;
539
540                 vcpu_info->evtchn_upcall_pending = 0;
541
542                 if (__get_cpu_var(nesting_count)++)
543                         goto out;
544
545 #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
546                 /* Clear master flag /before/ clearing selector flag. */
547                 wmb();
548 #endif
549                 pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
550                 while (pending_words != 0) {
551                         unsigned long pending_bits;
552                         int word_idx = __ffs(pending_words);
553                         pending_words &= ~(1UL << word_idx);
554
555                         while ((pending_bits = active_evtchns(cpu, s, word_idx)) != 0) {
556                                 int bit_idx = __ffs(pending_bits);
557                                 int port = (word_idx * BITS_PER_LONG) + bit_idx;
558                                 int irq = evtchn_to_irq[port];
559
560                                 if (irq != -1)
561                                         xen_do_IRQ(irq, regs);
562                         }
563                 }
564
565                 BUG_ON(!irqs_disabled());
566
567                 count = __get_cpu_var(nesting_count);
568                 __get_cpu_var(nesting_count) = 0;
569         } while(count != 1);
570
571 out:
572         put_cpu();
573 }
574
575 /* Rebind a new event channel to an existing irq. */
576 void rebind_evtchn_irq(int evtchn, int irq)
577 {
578         /* Make sure the irq is masked, since the new event channel
579            will also be masked. */
580         disable_irq(irq);
581
582         spin_lock(&irq_mapping_update_lock);
583
584         /* After resume the irq<->evtchn mappings are all cleared out */
585         BUG_ON(evtchn_to_irq[evtchn] != -1);
586         /* Expect irq to have been bound before,
587            so the bindcount should be non-0 */
588         BUG_ON(irq_bindcount[irq] == 0);
589
590         evtchn_to_irq[evtchn] = irq;
591         irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn);
592
593         spin_unlock(&irq_mapping_update_lock);
594
595         /* new event channels are always bound to cpu 0 */
596         irq_set_affinity(irq, cpumask_of(0));
597
598         /* Unmask the event channel. */
599         enable_irq(irq);
600 }
601
602 /* Rebind an evtchn so that it gets delivered to a specific cpu */
603 static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
604 {
605         struct evtchn_bind_vcpu bind_vcpu;
606         int evtchn = evtchn_from_irq(irq);
607
608         if (!VALID_EVTCHN(evtchn))
609                 return;
610
611         /* Send future instances of this interrupt to other vcpu. */
612         bind_vcpu.port = evtchn;
613         bind_vcpu.vcpu = tcpu;
614
615         /*
616          * If this fails, it usually just indicates that we're dealing with a
617          * virq or IPI channel, which don't actually need to be rebound. Ignore
618          * it, but don't do the xenlinux-level rebind in that case.
619          */
620         if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
621                 bind_evtchn_to_cpu(evtchn, tcpu);
622 }
623
624
625 static void set_affinity_irq(unsigned irq, const struct cpumask *dest)
626 {
627         unsigned tcpu = cpumask_first(dest);
628         rebind_irq_to_cpu(irq, tcpu);
629 }
630
631 int resend_irq_on_evtchn(unsigned int irq)
632 {
633         int masked, evtchn = evtchn_from_irq(irq);
634         struct shared_info *s = HYPERVISOR_shared_info;
635
636         if (!VALID_EVTCHN(evtchn))
637                 return 1;
638
639         masked = sync_test_and_set_bit(evtchn, s->evtchn_mask);
640         sync_set_bit(evtchn, s->evtchn_pending);
641         if (!masked)
642                 unmask_evtchn(evtchn);
643
644         return 1;
645 }
646
647 static void enable_dynirq(unsigned int irq)
648 {
649         int evtchn = evtchn_from_irq(irq);
650
651         if (VALID_EVTCHN(evtchn))
652                 unmask_evtchn(evtchn);
653 }
654
655 static void disable_dynirq(unsigned int irq)
656 {
657         int evtchn = evtchn_from_irq(irq);
658
659         if (VALID_EVTCHN(evtchn))
660                 mask_evtchn(evtchn);
661 }
662
663 static void ack_dynirq(unsigned int irq)
664 {
665         int evtchn = evtchn_from_irq(irq);
666
667         move_native_irq(irq);
668
669         if (VALID_EVTCHN(evtchn))
670                 clear_evtchn(evtchn);
671 }
672
673 static int retrigger_dynirq(unsigned int irq)
674 {
675         int evtchn = evtchn_from_irq(irq);
676         struct shared_info *sh = HYPERVISOR_shared_info;
677         int ret = 0;
678
679         if (VALID_EVTCHN(evtchn)) {
680                 int masked;
681
682                 masked = sync_test_and_set_bit(evtchn, sh->evtchn_mask);
683                 sync_set_bit(evtchn, sh->evtchn_pending);
684                 if (!masked)
685                         unmask_evtchn(evtchn);
686                 ret = 1;
687         }
688
689         return ret;
690 }
691
692 static void restore_cpu_virqs(unsigned int cpu)
693 {
694         struct evtchn_bind_virq bind_virq;
695         int virq, irq, evtchn;
696
697         for (virq = 0; virq < NR_VIRQS; virq++) {
698                 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
699                         continue;
700
701                 BUG_ON(irq_info[irq].type != IRQT_VIRQ);
702                 BUG_ON(irq_info[irq].index != virq);
703
704                 /* Get a new binding from Xen. */
705                 bind_virq.virq = virq;
706                 bind_virq.vcpu = cpu;
707                 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
708                                                 &bind_virq) != 0)
709                         BUG();
710                 evtchn = bind_virq.port;
711
712                 /* Record the new mapping. */
713                 evtchn_to_irq[evtchn] = irq;
714                 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
715                 bind_evtchn_to_cpu(evtchn, cpu);
716
717                 /* Ready for use. */
718                 unmask_evtchn(evtchn);
719         }
720 }
721
722 static void restore_cpu_ipis(unsigned int cpu)
723 {
724         struct evtchn_bind_ipi bind_ipi;
725         int ipi, irq, evtchn;
726
727         for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
728                 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
729                         continue;
730
731                 BUG_ON(irq_info[irq].type != IRQT_IPI);
732                 BUG_ON(irq_info[irq].index != ipi);
733
734                 /* Get a new binding from Xen. */
735                 bind_ipi.vcpu = cpu;
736                 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
737                                                 &bind_ipi) != 0)
738                         BUG();
739                 evtchn = bind_ipi.port;
740
741                 /* Record the new mapping. */
742                 evtchn_to_irq[evtchn] = irq;
743                 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
744                 bind_evtchn_to_cpu(evtchn, cpu);
745
746                 /* Ready for use. */
747                 unmask_evtchn(evtchn);
748
749         }
750 }
751
752 /* Clear an irq's pending state, in preparation for polling on it */
753 void xen_clear_irq_pending(int irq)
754 {
755         int evtchn = evtchn_from_irq(irq);
756
757         if (VALID_EVTCHN(evtchn))
758                 clear_evtchn(evtchn);
759 }
760
761 void xen_set_irq_pending(int irq)
762 {
763         int evtchn = evtchn_from_irq(irq);
764
765         if (VALID_EVTCHN(evtchn))
766                 set_evtchn(evtchn);
767 }
768
769 bool xen_test_irq_pending(int irq)
770 {
771         int evtchn = evtchn_from_irq(irq);
772         bool ret = false;
773
774         if (VALID_EVTCHN(evtchn))
775                 ret = test_evtchn(evtchn);
776
777         return ret;
778 }
779
780 /* Poll waiting for an irq to become pending.  In the usual case, the
781    irq will be disabled so it won't deliver an interrupt. */
782 void xen_poll_irq(int irq)
783 {
784         evtchn_port_t evtchn = evtchn_from_irq(irq);
785
786         if (VALID_EVTCHN(evtchn)) {
787                 struct sched_poll poll;
788
789                 poll.nr_ports = 1;
790                 poll.timeout = 0;
791                 set_xen_guest_handle(poll.ports, &evtchn);
792
793                 if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
794                         BUG();
795         }
796 }
797
798 void xen_irq_resume(void)
799 {
800         unsigned int cpu, irq, evtchn;
801
802         init_evtchn_cpu_bindings();
803
804         /* New event-channel space is not 'live' yet. */
805         for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
806                 mask_evtchn(evtchn);
807
808         /* No IRQ <-> event-channel mappings. */
809         for (irq = 0; irq < nr_irqs; irq++)
810                 irq_info[irq].evtchn = 0; /* zap event-channel binding */
811
812         for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
813                 evtchn_to_irq[evtchn] = -1;
814
815         for_each_possible_cpu(cpu) {
816                 restore_cpu_virqs(cpu);
817                 restore_cpu_ipis(cpu);
818         }
819 }
820
821 static struct irq_chip xen_dynamic_chip __read_mostly = {
822         .name           = "xen-dyn",
823         .mask           = disable_dynirq,
824         .unmask         = enable_dynirq,
825         .ack            = ack_dynirq,
826         .set_affinity   = set_affinity_irq,
827         .retrigger      = retrigger_dynirq,
828 };
829
830 void __init xen_init_IRQ(void)
831 {
832         int i;
833         size_t size = nr_cpu_ids * sizeof(struct cpu_evtchn_s);
834
835         cpu_evtchn_mask_p = alloc_bootmem(size);
836         BUG_ON(cpu_evtchn_mask_p == NULL);
837
838         init_evtchn_cpu_bindings();
839
840         /* No event channels are 'live' right now. */
841         for (i = 0; i < NR_EVENT_CHANNELS; i++)
842                 mask_evtchn(i);
843
844         /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
845         for (i = 0; i < nr_irqs; i++)
846                 irq_bindcount[i] = 0;
847
848         irq_ctx_init(smp_processor_id());
849 }