1 /*P:800 Interrupts (traps) are complicated enough to earn their own file.
2 * There are three classes of interrupts:
4 * 1) Real hardware interrupts which occur while we're running the Guest,
5 * 2) Interrupts for virtual devices attached to the Guest, and
6 * 3) Traps and faults from the Guest.
8 * Real hardware interrupts must be delivered to the Host, not the Guest.
9 * Virtual interrupts must be delivered to the Guest, but we make them look
10 * just like real hardware would deliver them. Traps from the Guest can be set
11 * up to go directly back into the Guest, but sometimes the Host wants to see
12 * them first, so we also have a way of "reflecting" them into the Guest as if
13 * they had been delivered to it directly. :*/
14 #include <linux/uaccess.h>
17 static unsigned long idt_address(u32 lo, u32 hi)
19 return (lo & 0x0000FFFF) | (hi & 0xFFFF0000);
22 static int idt_type(u32 lo, u32 hi)
24 return (hi >> 8) & 0xF;
27 static int idt_present(u32 lo, u32 hi)
32 static void push_guest_stack(struct lguest *lg, unsigned long *gstack, u32 val)
35 lgwrite_u32(lg, *gstack, val);
38 static void set_guest_interrupt(struct lguest *lg, u32 lo, u32 hi, int has_err)
41 u32 eflags, ss, irq_enable;
43 /* If they want a ring change, we use new stack and push old ss/esp */
44 if ((lg->regs->ss&0x3) != GUEST_PL) {
45 gstack = guest_pa(lg, lg->esp1);
47 push_guest_stack(lg, &gstack, lg->regs->ss);
48 push_guest_stack(lg, &gstack, lg->regs->esp);
50 gstack = guest_pa(lg, lg->regs->esp);
54 /* We use IF bit in eflags to indicate whether irqs were enabled
55 (it's always 1, since irqs are enabled when guest is running). */
56 eflags = lg->regs->eflags;
57 if (get_user(irq_enable, &lg->lguest_data->irq_enabled) == 0
58 && !(irq_enable & X86_EFLAGS_IF))
59 eflags &= ~X86_EFLAGS_IF;
61 push_guest_stack(lg, &gstack, eflags);
62 push_guest_stack(lg, &gstack, lg->regs->cs);
63 push_guest_stack(lg, &gstack, lg->regs->eip);
66 push_guest_stack(lg, &gstack, lg->regs->errcode);
68 /* Change the real stack so switcher returns to trap handler */
70 lg->regs->esp = gstack + lg->page_offset;
71 lg->regs->cs = (__KERNEL_CS|GUEST_PL);
72 lg->regs->eip = idt_address(lo, hi);
74 /* Disable interrupts for an interrupt gate. */
75 if (idt_type(lo, hi) == 0xE)
76 if (put_user(0, &lg->lguest_data->irq_enabled))
77 kill_guest(lg, "Disabling interrupts");
80 void maybe_do_interrupt(struct lguest *lg)
83 DECLARE_BITMAP(blk, LGUEST_IRQS);
84 struct desc_struct *idt;
89 /* Mask out any interrupts they have blocked. */
90 if (copy_from_user(&blk, lg->lguest_data->blocked_interrupts,
94 bitmap_andnot(blk, lg->irqs_pending, blk, LGUEST_IRQS);
96 irq = find_first_bit(blk, LGUEST_IRQS);
97 if (irq >= LGUEST_IRQS)
100 if (lg->regs->eip >= lg->noirq_start && lg->regs->eip < lg->noirq_end)
103 /* If they're halted, we re-enable interrupts. */
105 /* Re-enable interrupts. */
106 if (put_user(X86_EFLAGS_IF, &lg->lguest_data->irq_enabled))
107 kill_guest(lg, "Re-enabling interrupts");
110 /* Maybe they have interrupts disabled? */
112 if (get_user(irq_enabled, &lg->lguest_data->irq_enabled))
118 idt = &lg->idt[FIRST_EXTERNAL_VECTOR+irq];
119 if (idt_present(idt->a, idt->b)) {
120 clear_bit(irq, lg->irqs_pending);
121 set_guest_interrupt(lg, idt->a, idt->b, 0);
125 static int has_err(unsigned int trap)
127 return (trap == 8 || (trap >= 10 && trap <= 14) || trap == 17);
130 int deliver_trap(struct lguest *lg, unsigned int num)
132 u32 lo = lg->idt[num].a, hi = lg->idt[num].b;
134 if (!idt_present(lo, hi))
136 set_guest_interrupt(lg, lo, hi, has_err(num));
140 static int direct_trap(const struct lguest *lg,
141 const struct desc_struct *trap,
144 /* Hardware interrupts don't go to guest (except syscall). */
145 if (num >= FIRST_EXTERNAL_VECTOR && num != SYSCALL_VECTOR)
148 /* We intercept page fault (demand shadow paging & cr2 saving)
149 protection fault (in/out emulation) and device not
150 available (TS handling), and hypercall */
151 if (num == 14 || num == 13 || num == 7 || num == LGUEST_TRAP_ENTRY)
154 /* Interrupt gates (0xE) or not present (0x0) can't go direct. */
155 return idt_type(trap->a, trap->b) == 0xF;
158 void pin_stack_pages(struct lguest *lg)
162 for (i = 0; i < lg->stack_pages; i++)
163 pin_page(lg, lg->esp1 - i * PAGE_SIZE);
166 void guest_set_stack(struct lguest *lg, u32 seg, u32 esp, unsigned int pages)
168 /* You cannot have a stack segment with priv level 0. */
169 if ((seg & 0x3) != GUEST_PL)
170 kill_guest(lg, "bad stack segment %i", seg);
172 kill_guest(lg, "bad stack pages %u", pages);
175 lg->stack_pages = pages;
179 /* Set up trap in IDT. */
180 static void set_trap(struct lguest *lg, struct desc_struct *trap,
181 unsigned int num, u32 lo, u32 hi)
183 u8 type = idt_type(lo, hi);
185 if (!idt_present(lo, hi)) {
186 trap->a = trap->b = 0;
190 if (type != 0xE && type != 0xF)
191 kill_guest(lg, "bad IDT type %i", type);
193 trap->a = ((__KERNEL_CS|GUEST_PL)<<16) | (lo&0x0000FFFF);
194 trap->b = (hi&0xFFFFEF00);
197 void load_guest_idt_entry(struct lguest *lg, unsigned int num, u32 lo, u32 hi)
199 /* Guest never handles: NMI, doublefault, hypercall, spurious irq. */
200 if (num == 2 || num == 8 || num == 15 || num == LGUEST_TRAP_ENTRY)
203 lg->changed |= CHANGED_IDT;
204 if (num < ARRAY_SIZE(lg->idt))
205 set_trap(lg, &lg->idt[num], num, lo, hi);
206 else if (num == SYSCALL_VECTOR)
207 set_trap(lg, &lg->syscall_idt, num, lo, hi);
210 static void default_idt_entry(struct desc_struct *idt,
212 const unsigned long handler)
216 /* They can't "int" into any of them except hypercall. */
217 if (trap == LGUEST_TRAP_ENTRY)
218 flags |= (GUEST_PL << 13);
220 idt->a = (LGUEST_CS<<16) | (handler&0x0000FFFF);
221 idt->b = (handler&0xFFFF0000) | flags;
224 void setup_default_idt_entries(struct lguest_ro_state *state,
225 const unsigned long *def)
229 for (i = 0; i < ARRAY_SIZE(state->guest_idt); i++)
230 default_idt_entry(&state->guest_idt[i], i, def[i]);
233 void copy_traps(const struct lguest *lg, struct desc_struct *idt,
234 const unsigned long *def)
238 /* All hardware interrupts are same whatever the guest: only the
239 * traps might be different. */
240 for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) {
241 if (direct_trap(lg, &lg->idt[i], i))
244 default_idt_entry(&idt[i], i, def[i]);
247 if (direct_trap(lg, &lg->syscall_idt, i))
248 idt[i] = lg->syscall_idt;
250 default_idt_entry(&idt[i], i, def[i]);
253 void guest_set_clockevent(struct lguest *lg, unsigned long delta)
257 if (unlikely(delta == 0)) {
258 /* Clock event device is shutting down. */
259 hrtimer_cancel(&lg->hrt);
263 expires = ktime_add_ns(ktime_get_real(), delta);
264 hrtimer_start(&lg->hrt, expires, HRTIMER_MODE_ABS);
267 static enum hrtimer_restart clockdev_fn(struct hrtimer *timer)
269 struct lguest *lg = container_of(timer, struct lguest, hrt);
271 set_bit(0, lg->irqs_pending);
273 wake_up_process(lg->tsk);
274 return HRTIMER_NORESTART;
277 void init_clockdev(struct lguest *lg)
279 hrtimer_init(&lg->hrt, CLOCK_REALTIME, HRTIMER_MODE_ABS);
280 lg->hrt.function = clockdev_fn;