2 * irq_comm.c: Common API for in kernel interrupt controller
3 * Copyright (c) 2007, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 * Yaozu (Eddie) Dong <Eddie.dong@intel.com>
22 #include <linux/kvm_host.h>
24 #include <asm/msidef.h>
30 static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e,
31 struct kvm *kvm, int level)
34 return kvm_pic_set_irq(pic_irqchip(kvm), e->irqchip.pin, level);
40 static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e,
41 struct kvm *kvm, int level)
43 return kvm_ioapic_set_irq(kvm->arch.vioapic, e->irqchip.pin, level);
46 void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
47 union kvm_ioapic_redirect_entry *entry,
48 unsigned long *deliver_bitmask)
51 struct kvm *kvm = ioapic->kvm;
52 struct kvm_vcpu *vcpu;
54 bitmap_zero(deliver_bitmask, KVM_MAX_VCPUS);
56 if (entry->fields.dest_mode == 0) { /* Physical mode. */
57 if (entry->fields.dest_id == 0xFF) { /* Broadcast. */
58 for (i = 0; i < KVM_MAX_VCPUS; ++i)
59 if (kvm->vcpus[i] && kvm->vcpus[i]->arch.apic)
60 __set_bit(i, deliver_bitmask);
61 /* Lowest priority shouldn't combine with broadcast */
62 if (entry->fields.delivery_mode ==
63 IOAPIC_LOWEST_PRIORITY && printk_ratelimit())
64 printk(KERN_INFO "kvm: apic: phys broadcast "
68 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
72 if (kvm_apic_match_physical_addr(vcpu->arch.apic,
73 entry->fields.dest_id)) {
75 __set_bit(i, deliver_bitmask);
79 } else if (entry->fields.dest_id != 0) /* Logical mode, MDA non-zero. */
80 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
84 if (vcpu->arch.apic &&
85 kvm_apic_match_logical_addr(vcpu->arch.apic,
86 entry->fields.dest_id))
87 __set_bit(i, deliver_bitmask);
90 switch (entry->fields.delivery_mode) {
91 case IOAPIC_LOWEST_PRIORITY:
92 /* Select one in deliver_bitmask */
93 vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm,
94 entry->fields.vector, deliver_bitmask);
95 bitmap_zero(deliver_bitmask, KVM_MAX_VCPUS);
98 __set_bit(vcpu->vcpu_id, deliver_bitmask);
104 if (printk_ratelimit())
105 printk(KERN_INFO "kvm: unsupported delivery mode %d\n",
106 entry->fields.delivery_mode);
107 bitmap_zero(deliver_bitmask, KVM_MAX_VCPUS);
111 static int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
112 struct kvm *kvm, int level)
115 struct kvm_vcpu *vcpu;
116 struct kvm_ioapic *ioapic = ioapic_irqchip(kvm);
117 union kvm_ioapic_redirect_entry entry;
118 DECLARE_BITMAP(deliver_bitmask, KVM_MAX_VCPUS);
123 entry.fields.dest_id = (e->msi.address_lo &
124 MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
125 entry.fields.vector = (e->msi.data &
126 MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT;
127 entry.fields.dest_mode = test_bit(MSI_ADDR_DEST_MODE_SHIFT,
128 (unsigned long *)&e->msi.address_lo);
129 entry.fields.trig_mode = test_bit(MSI_DATA_TRIGGER_SHIFT,
130 (unsigned long *)&e->msi.data);
131 entry.fields.delivery_mode = test_bit(
132 MSI_DATA_DELIVERY_MODE_SHIFT,
133 (unsigned long *)&e->msi.data);
135 /* TODO Deal with RH bit of MSI message address */
137 kvm_get_intr_delivery_bitmask(ioapic, &entry, deliver_bitmask);
139 if (find_first_bit(deliver_bitmask, KVM_MAX_VCPUS) >= KVM_MAX_VCPUS) {
140 printk(KERN_WARNING "kvm: no destination for MSI delivery!");
143 while ((vcpu_id = find_first_bit(deliver_bitmask,
144 KVM_MAX_VCPUS)) < KVM_MAX_VCPUS) {
145 __clear_bit(vcpu_id, deliver_bitmask);
146 vcpu = ioapic->kvm->vcpus[vcpu_id];
150 r += kvm_apic_set_irq(vcpu, entry.fields.vector,
151 entry.fields.dest_mode,
152 entry.fields.trig_mode);
158 /* This should be called with the kvm->lock mutex held
160 * < 0 Interrupt was ignored (masked or not delivered for other reasons)
161 * = 0 Interrupt was coalesced (previous irq is still pending)
162 * > 0 Number of CPUs interrupt was delivered to
164 int kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level)
166 struct kvm_kernel_irq_routing_entry *e;
167 unsigned long *irq_state, sig_level;
170 if (irq < KVM_IOAPIC_NUM_PINS) {
171 irq_state = (unsigned long *)&kvm->arch.irq_states[irq];
173 /* Logical OR for level trig interrupt */
175 set_bit(irq_source_id, irq_state);
177 clear_bit(irq_source_id, irq_state);
178 sig_level = !!(*irq_state);
179 } else /* Deal with MSI/MSI-X */
182 /* Not possible to detect if the guest uses the PIC or the
183 * IOAPIC. So set the bit in both. The guest will ignore
184 * writes to the unused one.
186 list_for_each_entry(e, &kvm->irq_routing, link)
188 int r = e->set(e, kvm, sig_level);
192 ret = r + ((ret < 0) ? 0 : ret);
197 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
199 struct kvm_kernel_irq_routing_entry *e;
200 struct kvm_irq_ack_notifier *kian;
201 struct hlist_node *n;
204 list_for_each_entry(e, &kvm->irq_routing, link)
205 if (e->irqchip.irqchip == irqchip &&
206 e->irqchip.pin == pin) {
211 hlist_for_each_entry(kian, n, &kvm->arch.irq_ack_notifier_list, link)
212 if (kian->gsi == gsi)
213 kian->irq_acked(kian);
216 void kvm_register_irq_ack_notifier(struct kvm *kvm,
217 struct kvm_irq_ack_notifier *kian)
219 hlist_add_head(&kian->link, &kvm->arch.irq_ack_notifier_list);
222 void kvm_unregister_irq_ack_notifier(struct kvm_irq_ack_notifier *kian)
224 hlist_del_init(&kian->link);
227 /* The caller must hold kvm->lock mutex */
228 int kvm_request_irq_source_id(struct kvm *kvm)
230 unsigned long *bitmap = &kvm->arch.irq_sources_bitmap;
231 int irq_source_id = find_first_zero_bit(bitmap,
232 sizeof(kvm->arch.irq_sources_bitmap));
234 if (irq_source_id >= sizeof(kvm->arch.irq_sources_bitmap)) {
235 printk(KERN_WARNING "kvm: exhaust allocatable IRQ sources!\n");
239 ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID);
240 set_bit(irq_source_id, bitmap);
242 return irq_source_id;
245 void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id)
249 ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID);
251 if (irq_source_id < 0 ||
252 irq_source_id >= sizeof(kvm->arch.irq_sources_bitmap)) {
253 printk(KERN_ERR "kvm: IRQ source ID out of range!\n");
256 for (i = 0; i < KVM_IOAPIC_NUM_PINS; i++)
257 clear_bit(irq_source_id, &kvm->arch.irq_states[i]);
258 clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap);
261 void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
262 struct kvm_irq_mask_notifier *kimn)
265 hlist_add_head(&kimn->link, &kvm->mask_notifier_list);
268 void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
269 struct kvm_irq_mask_notifier *kimn)
271 hlist_del(&kimn->link);
274 void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask)
276 struct kvm_irq_mask_notifier *kimn;
277 struct hlist_node *n;
279 hlist_for_each_entry(kimn, n, &kvm->mask_notifier_list, link)
280 if (kimn->irq == irq)
281 kimn->func(kimn, mask);
284 static void __kvm_free_irq_routing(struct list_head *irq_routing)
286 struct kvm_kernel_irq_routing_entry *e, *n;
288 list_for_each_entry_safe(e, n, irq_routing, link)
292 void kvm_free_irq_routing(struct kvm *kvm)
294 __kvm_free_irq_routing(&kvm->irq_routing);
297 static int setup_routing_entry(struct kvm_kernel_irq_routing_entry *e,
298 const struct kvm_irq_routing_entry *ue)
305 case KVM_IRQ_ROUTING_IRQCHIP:
307 switch (ue->u.irqchip.irqchip) {
308 case KVM_IRQCHIP_PIC_MASTER:
309 e->set = kvm_set_pic_irq;
311 case KVM_IRQCHIP_PIC_SLAVE:
312 e->set = kvm_set_pic_irq;
315 case KVM_IRQCHIP_IOAPIC:
316 e->set = kvm_set_ioapic_irq;
321 e->irqchip.irqchip = ue->u.irqchip.irqchip;
322 e->irqchip.pin = ue->u.irqchip.pin + delta;
324 case KVM_IRQ_ROUTING_MSI:
325 e->set = kvm_set_msi;
326 e->msi.address_lo = ue->u.msi.address_lo;
327 e->msi.address_hi = ue->u.msi.address_hi;
328 e->msi.data = ue->u.msi.data;
339 int kvm_set_irq_routing(struct kvm *kvm,
340 const struct kvm_irq_routing_entry *ue,
344 struct list_head irq_list = LIST_HEAD_INIT(irq_list);
345 struct list_head tmp = LIST_HEAD_INIT(tmp);
346 struct kvm_kernel_irq_routing_entry *e = NULL;
350 for (i = 0; i < nr; ++i) {
352 if (ue->gsi >= KVM_MAX_IRQ_ROUTES)
357 e = kzalloc(sizeof(*e), GFP_KERNEL);
360 r = setup_routing_entry(e, ue);
364 list_add(&e->link, &irq_list);
368 mutex_lock(&kvm->lock);
369 list_splice(&kvm->irq_routing, &tmp);
370 INIT_LIST_HEAD(&kvm->irq_routing);
371 list_splice(&irq_list, &kvm->irq_routing);
372 INIT_LIST_HEAD(&irq_list);
373 list_splice(&tmp, &irq_list);
374 mutex_unlock(&kvm->lock);
380 __kvm_free_irq_routing(&irq_list);
384 #define IOAPIC_ROUTING_ENTRY(irq) \
385 { .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP, \
386 .u.irqchip.irqchip = KVM_IRQCHIP_IOAPIC, .u.irqchip.pin = (irq) }
387 #define ROUTING_ENTRY1(irq) IOAPIC_ROUTING_ENTRY(irq)
390 # define PIC_ROUTING_ENTRY(irq) \
391 { .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP, \
392 .u.irqchip.irqchip = SELECT_PIC(irq), .u.irqchip.pin = (irq) % 8 }
393 # define ROUTING_ENTRY2(irq) \
394 IOAPIC_ROUTING_ENTRY(irq), PIC_ROUTING_ENTRY(irq)
396 # define ROUTING_ENTRY2(irq) \
397 IOAPIC_ROUTING_ENTRY(irq)
400 static const struct kvm_irq_routing_entry default_routing[] = {
401 ROUTING_ENTRY2(0), ROUTING_ENTRY2(1),
402 ROUTING_ENTRY2(2), ROUTING_ENTRY2(3),
403 ROUTING_ENTRY2(4), ROUTING_ENTRY2(5),
404 ROUTING_ENTRY2(6), ROUTING_ENTRY2(7),
405 ROUTING_ENTRY2(8), ROUTING_ENTRY2(9),
406 ROUTING_ENTRY2(10), ROUTING_ENTRY2(11),
407 ROUTING_ENTRY2(12), ROUTING_ENTRY2(13),
408 ROUTING_ENTRY2(14), ROUTING_ENTRY2(15),
409 ROUTING_ENTRY1(16), ROUTING_ENTRY1(17),
410 ROUTING_ENTRY1(18), ROUTING_ENTRY1(19),
411 ROUTING_ENTRY1(20), ROUTING_ENTRY1(21),
412 ROUTING_ENTRY1(22), ROUTING_ENTRY1(23),
414 ROUTING_ENTRY1(24), ROUTING_ENTRY1(25),
415 ROUTING_ENTRY1(26), ROUTING_ENTRY1(27),
416 ROUTING_ENTRY1(28), ROUTING_ENTRY1(29),
417 ROUTING_ENTRY1(30), ROUTING_ENTRY1(31),
418 ROUTING_ENTRY1(32), ROUTING_ENTRY1(33),
419 ROUTING_ENTRY1(34), ROUTING_ENTRY1(35),
420 ROUTING_ENTRY1(36), ROUTING_ENTRY1(37),
421 ROUTING_ENTRY1(38), ROUTING_ENTRY1(39),
422 ROUTING_ENTRY1(40), ROUTING_ENTRY1(41),
423 ROUTING_ENTRY1(42), ROUTING_ENTRY1(43),
424 ROUTING_ENTRY1(44), ROUTING_ENTRY1(45),
425 ROUTING_ENTRY1(46), ROUTING_ENTRY1(47),
429 int kvm_setup_default_irq_routing(struct kvm *kvm)
431 return kvm_set_irq_routing(kvm, default_routing,
432 ARRAY_SIZE(default_routing), 0);