Pull utrace into release branch
[linux-2.6] / arch / x86 / kvm / x86.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * derived from drivers/kvm/kvm_main.c
5  *
6  * Copyright (C) 2006 Qumranet, Inc.
7  * Copyright (C) 2008 Qumranet, Inc.
8  * Copyright IBM Corporation, 2008
9  *
10  * Authors:
11  *   Avi Kivity   <avi@qumranet.com>
12  *   Yaniv Kamay  <yaniv@qumranet.com>
13  *   Amit Shah    <amit.shah@qumranet.com>
14  *   Ben-Ami Yassour <benami@il.ibm.com>
15  *
16  * This work is licensed under the terms of the GNU GPL, version 2.  See
17  * the COPYING file in the top-level directory.
18  *
19  */
20
21 #include <linux/kvm_host.h>
22 #include "irq.h"
23 #include "mmu.h"
24 #include "i8254.h"
25 #include "tss.h"
26 #include "kvm_cache_regs.h"
27 #include "x86.h"
28
29 #include <linux/clocksource.h>
30 #include <linux/interrupt.h>
31 #include <linux/kvm.h>
32 #include <linux/fs.h>
33 #include <linux/vmalloc.h>
34 #include <linux/module.h>
35 #include <linux/mman.h>
36 #include <linux/highmem.h>
37 #include <linux/intel-iommu.h>
38
39 #include <asm/uaccess.h>
40 #include <asm/msr.h>
41 #include <asm/desc.h>
42
43 #define MAX_IO_MSRS 256
44 #define CR0_RESERVED_BITS                                               \
45         (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
46                           | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
47                           | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
48 #define CR4_RESERVED_BITS                                               \
49         (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
50                           | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE     \
51                           | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR  \
52                           | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
53
54 #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
55 /* EFER defaults:
56  * - enable syscall per default because its emulated by KVM
57  * - enable LME and LMA per default on 64 bit KVM
58  */
59 #ifdef CONFIG_X86_64
60 static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffafeULL;
61 #else
62 static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffffeULL;
63 #endif
64
65 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
66 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
67
68 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
69                                     struct kvm_cpuid_entry2 __user *entries);
70
71 struct kvm_x86_ops *kvm_x86_ops;
72 EXPORT_SYMBOL_GPL(kvm_x86_ops);
73
74 struct kvm_stats_debugfs_item debugfs_entries[] = {
75         { "pf_fixed", VCPU_STAT(pf_fixed) },
76         { "pf_guest", VCPU_STAT(pf_guest) },
77         { "tlb_flush", VCPU_STAT(tlb_flush) },
78         { "invlpg", VCPU_STAT(invlpg) },
79         { "exits", VCPU_STAT(exits) },
80         { "io_exits", VCPU_STAT(io_exits) },
81         { "mmio_exits", VCPU_STAT(mmio_exits) },
82         { "signal_exits", VCPU_STAT(signal_exits) },
83         { "irq_window", VCPU_STAT(irq_window_exits) },
84         { "nmi_window", VCPU_STAT(nmi_window_exits) },
85         { "halt_exits", VCPU_STAT(halt_exits) },
86         { "halt_wakeup", VCPU_STAT(halt_wakeup) },
87         { "hypercalls", VCPU_STAT(hypercalls) },
88         { "request_irq", VCPU_STAT(request_irq_exits) },
89         { "irq_exits", VCPU_STAT(irq_exits) },
90         { "host_state_reload", VCPU_STAT(host_state_reload) },
91         { "efer_reload", VCPU_STAT(efer_reload) },
92         { "fpu_reload", VCPU_STAT(fpu_reload) },
93         { "insn_emulation", VCPU_STAT(insn_emulation) },
94         { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
95         { "irq_injections", VCPU_STAT(irq_injections) },
96         { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
97         { "mmu_pte_write", VM_STAT(mmu_pte_write) },
98         { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
99         { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
100         { "mmu_flooded", VM_STAT(mmu_flooded) },
101         { "mmu_recycled", VM_STAT(mmu_recycled) },
102         { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
103         { "mmu_unsync", VM_STAT(mmu_unsync) },
104         { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
105         { "largepages", VM_STAT(lpages) },
106         { NULL }
107 };
108
109 unsigned long segment_base(u16 selector)
110 {
111         struct descriptor_table gdt;
112         struct desc_struct *d;
113         unsigned long table_base;
114         unsigned long v;
115
116         if (selector == 0)
117                 return 0;
118
119         asm("sgdt %0" : "=m"(gdt));
120         table_base = gdt.base;
121
122         if (selector & 4) {           /* from ldt */
123                 u16 ldt_selector;
124
125                 asm("sldt %0" : "=g"(ldt_selector));
126                 table_base = segment_base(ldt_selector);
127         }
128         d = (struct desc_struct *)(table_base + (selector & ~7));
129         v = d->base0 | ((unsigned long)d->base1 << 16) |
130                 ((unsigned long)d->base2 << 24);
131 #ifdef CONFIG_X86_64
132         if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
133                 v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32;
134 #endif
135         return v;
136 }
137 EXPORT_SYMBOL_GPL(segment_base);
138
139 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
140 {
141         if (irqchip_in_kernel(vcpu->kvm))
142                 return vcpu->arch.apic_base;
143         else
144                 return vcpu->arch.apic_base;
145 }
146 EXPORT_SYMBOL_GPL(kvm_get_apic_base);
147
148 void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
149 {
150         /* TODO: reserve bits check */
151         if (irqchip_in_kernel(vcpu->kvm))
152                 kvm_lapic_set_base(vcpu, data);
153         else
154                 vcpu->arch.apic_base = data;
155 }
156 EXPORT_SYMBOL_GPL(kvm_set_apic_base);
157
158 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
159 {
160         WARN_ON(vcpu->arch.exception.pending);
161         vcpu->arch.exception.pending = true;
162         vcpu->arch.exception.has_error_code = false;
163         vcpu->arch.exception.nr = nr;
164 }
165 EXPORT_SYMBOL_GPL(kvm_queue_exception);
166
167 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
168                            u32 error_code)
169 {
170         ++vcpu->stat.pf_guest;
171         if (vcpu->arch.exception.pending) {
172                 if (vcpu->arch.exception.nr == PF_VECTOR) {
173                         printk(KERN_DEBUG "kvm: inject_page_fault:"
174                                         " double fault 0x%lx\n", addr);
175                         vcpu->arch.exception.nr = DF_VECTOR;
176                         vcpu->arch.exception.error_code = 0;
177                 } else if (vcpu->arch.exception.nr == DF_VECTOR) {
178                         /* triple fault -> shutdown */
179                         set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
180                 }
181                 return;
182         }
183         vcpu->arch.cr2 = addr;
184         kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
185 }
186
187 void kvm_inject_nmi(struct kvm_vcpu *vcpu)
188 {
189         vcpu->arch.nmi_pending = 1;
190 }
191 EXPORT_SYMBOL_GPL(kvm_inject_nmi);
192
193 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
194 {
195         WARN_ON(vcpu->arch.exception.pending);
196         vcpu->arch.exception.pending = true;
197         vcpu->arch.exception.has_error_code = true;
198         vcpu->arch.exception.nr = nr;
199         vcpu->arch.exception.error_code = error_code;
200 }
201 EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
202
203 static void __queue_exception(struct kvm_vcpu *vcpu)
204 {
205         kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
206                                      vcpu->arch.exception.has_error_code,
207                                      vcpu->arch.exception.error_code);
208 }
209
210 /*
211  * Load the pae pdptrs.  Return true is they are all valid.
212  */
213 int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
214 {
215         gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
216         unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
217         int i;
218         int ret;
219         u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
220
221         ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
222                                   offset * sizeof(u64), sizeof(pdpte));
223         if (ret < 0) {
224                 ret = 0;
225                 goto out;
226         }
227         for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
228                 if ((pdpte[i] & 1) && (pdpte[i] & 0xfffffff0000001e6ull)) {
229                         ret = 0;
230                         goto out;
231                 }
232         }
233         ret = 1;
234
235         memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
236 out:
237
238         return ret;
239 }
240 EXPORT_SYMBOL_GPL(load_pdptrs);
241
242 static bool pdptrs_changed(struct kvm_vcpu *vcpu)
243 {
244         u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
245         bool changed = true;
246         int r;
247
248         if (is_long_mode(vcpu) || !is_pae(vcpu))
249                 return false;
250
251         r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte));
252         if (r < 0)
253                 goto out;
254         changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0;
255 out:
256
257         return changed;
258 }
259
260 void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
261 {
262         if (cr0 & CR0_RESERVED_BITS) {
263                 printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
264                        cr0, vcpu->arch.cr0);
265                 kvm_inject_gp(vcpu, 0);
266                 return;
267         }
268
269         if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
270                 printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
271                 kvm_inject_gp(vcpu, 0);
272                 return;
273         }
274
275         if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
276                 printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
277                        "and a clear PE flag\n");
278                 kvm_inject_gp(vcpu, 0);
279                 return;
280         }
281
282         if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
283 #ifdef CONFIG_X86_64
284                 if ((vcpu->arch.shadow_efer & EFER_LME)) {
285                         int cs_db, cs_l;
286
287                         if (!is_pae(vcpu)) {
288                                 printk(KERN_DEBUG "set_cr0: #GP, start paging "
289                                        "in long mode while PAE is disabled\n");
290                                 kvm_inject_gp(vcpu, 0);
291                                 return;
292                         }
293                         kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
294                         if (cs_l) {
295                                 printk(KERN_DEBUG "set_cr0: #GP, start paging "
296                                        "in long mode while CS.L == 1\n");
297                                 kvm_inject_gp(vcpu, 0);
298                                 return;
299
300                         }
301                 } else
302 #endif
303                 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
304                         printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
305                                "reserved bits\n");
306                         kvm_inject_gp(vcpu, 0);
307                         return;
308                 }
309
310         }
311
312         kvm_x86_ops->set_cr0(vcpu, cr0);
313         vcpu->arch.cr0 = cr0;
314
315         kvm_mmu_reset_context(vcpu);
316         return;
317 }
318 EXPORT_SYMBOL_GPL(kvm_set_cr0);
319
320 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
321 {
322         kvm_set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f));
323         KVMTRACE_1D(LMSW, vcpu,
324                     (u32)((vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f)),
325                     handler);
326 }
327 EXPORT_SYMBOL_GPL(kvm_lmsw);
328
329 void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
330 {
331         if (cr4 & CR4_RESERVED_BITS) {
332                 printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
333                 kvm_inject_gp(vcpu, 0);
334                 return;
335         }
336
337         if (is_long_mode(vcpu)) {
338                 if (!(cr4 & X86_CR4_PAE)) {
339                         printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
340                                "in long mode\n");
341                         kvm_inject_gp(vcpu, 0);
342                         return;
343                 }
344         } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & X86_CR4_PAE)
345                    && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
346                 printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
347                 kvm_inject_gp(vcpu, 0);
348                 return;
349         }
350
351         if (cr4 & X86_CR4_VMXE) {
352                 printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
353                 kvm_inject_gp(vcpu, 0);
354                 return;
355         }
356         kvm_x86_ops->set_cr4(vcpu, cr4);
357         vcpu->arch.cr4 = cr4;
358         kvm_mmu_reset_context(vcpu);
359 }
360 EXPORT_SYMBOL_GPL(kvm_set_cr4);
361
362 void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
363 {
364         if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) {
365                 kvm_mmu_sync_roots(vcpu);
366                 kvm_mmu_flush_tlb(vcpu);
367                 return;
368         }
369
370         if (is_long_mode(vcpu)) {
371                 if (cr3 & CR3_L_MODE_RESERVED_BITS) {
372                         printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
373                         kvm_inject_gp(vcpu, 0);
374                         return;
375                 }
376         } else {
377                 if (is_pae(vcpu)) {
378                         if (cr3 & CR3_PAE_RESERVED_BITS) {
379                                 printk(KERN_DEBUG
380                                        "set_cr3: #GP, reserved bits\n");
381                                 kvm_inject_gp(vcpu, 0);
382                                 return;
383                         }
384                         if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
385                                 printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
386                                        "reserved bits\n");
387                                 kvm_inject_gp(vcpu, 0);
388                                 return;
389                         }
390                 }
391                 /*
392                  * We don't check reserved bits in nonpae mode, because
393                  * this isn't enforced, and VMware depends on this.
394                  */
395         }
396
397         /*
398          * Does the new cr3 value map to physical memory? (Note, we
399          * catch an invalid cr3 even in real-mode, because it would
400          * cause trouble later on when we turn on paging anyway.)
401          *
402          * A real CPU would silently accept an invalid cr3 and would
403          * attempt to use it - with largely undefined (and often hard
404          * to debug) behavior on the guest side.
405          */
406         if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
407                 kvm_inject_gp(vcpu, 0);
408         else {
409                 vcpu->arch.cr3 = cr3;
410                 vcpu->arch.mmu.new_cr3(vcpu);
411         }
412 }
413 EXPORT_SYMBOL_GPL(kvm_set_cr3);
414
415 void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
416 {
417         if (cr8 & CR8_RESERVED_BITS) {
418                 printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
419                 kvm_inject_gp(vcpu, 0);
420                 return;
421         }
422         if (irqchip_in_kernel(vcpu->kvm))
423                 kvm_lapic_set_tpr(vcpu, cr8);
424         else
425                 vcpu->arch.cr8 = cr8;
426 }
427 EXPORT_SYMBOL_GPL(kvm_set_cr8);
428
429 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
430 {
431         if (irqchip_in_kernel(vcpu->kvm))
432                 return kvm_lapic_get_cr8(vcpu);
433         else
434                 return vcpu->arch.cr8;
435 }
436 EXPORT_SYMBOL_GPL(kvm_get_cr8);
437
438 /*
439  * List of msr numbers which we expose to userspace through KVM_GET_MSRS
440  * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
441  *
442  * This list is modified at module load time to reflect the
443  * capabilities of the host cpu.
444  */
445 static u32 msrs_to_save[] = {
446         MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
447         MSR_K6_STAR,
448 #ifdef CONFIG_X86_64
449         MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
450 #endif
451         MSR_IA32_TIME_STAMP_COUNTER, MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
452         MSR_IA32_PERF_STATUS,
453 };
454
455 static unsigned num_msrs_to_save;
456
457 static u32 emulated_msrs[] = {
458         MSR_IA32_MISC_ENABLE,
459 };
460
461 static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
462 {
463         if (efer & efer_reserved_bits) {
464                 printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
465                        efer);
466                 kvm_inject_gp(vcpu, 0);
467                 return;
468         }
469
470         if (is_paging(vcpu)
471             && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME)) {
472                 printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
473                 kvm_inject_gp(vcpu, 0);
474                 return;
475         }
476
477         kvm_x86_ops->set_efer(vcpu, efer);
478
479         efer &= ~EFER_LMA;
480         efer |= vcpu->arch.shadow_efer & EFER_LMA;
481
482         vcpu->arch.shadow_efer = efer;
483 }
484
485 void kvm_enable_efer_bits(u64 mask)
486 {
487        efer_reserved_bits &= ~mask;
488 }
489 EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
490
491
492 /*
493  * Writes msr value into into the appropriate "register".
494  * Returns 0 on success, non-0 otherwise.
495  * Assumes vcpu_load() was already called.
496  */
497 int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
498 {
499         return kvm_x86_ops->set_msr(vcpu, msr_index, data);
500 }
501
502 /*
503  * Adapt set_msr() to msr_io()'s calling convention
504  */
505 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
506 {
507         return kvm_set_msr(vcpu, index, *data);
508 }
509
510 static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
511 {
512         static int version;
513         struct pvclock_wall_clock wc;
514         struct timespec now, sys, boot;
515
516         if (!wall_clock)
517                 return;
518
519         version++;
520
521         kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
522
523         /*
524          * The guest calculates current wall clock time by adding
525          * system time (updated by kvm_write_guest_time below) to the
526          * wall clock specified here.  guest system time equals host
527          * system time for us, thus we must fill in host boot time here.
528          */
529         now = current_kernel_time();
530         ktime_get_ts(&sys);
531         boot = ns_to_timespec(timespec_to_ns(&now) - timespec_to_ns(&sys));
532
533         wc.sec = boot.tv_sec;
534         wc.nsec = boot.tv_nsec;
535         wc.version = version;
536
537         kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
538
539         version++;
540         kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
541 }
542
543 static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
544 {
545         uint32_t quotient, remainder;
546
547         /* Don't try to replace with do_div(), this one calculates
548          * "(dividend << 32) / divisor" */
549         __asm__ ( "divl %4"
550                   : "=a" (quotient), "=d" (remainder)
551                   : "0" (0), "1" (dividend), "r" (divisor) );
552         return quotient;
553 }
554
555 static void kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info *hv_clock)
556 {
557         uint64_t nsecs = 1000000000LL;
558         int32_t  shift = 0;
559         uint64_t tps64;
560         uint32_t tps32;
561
562         tps64 = tsc_khz * 1000LL;
563         while (tps64 > nsecs*2) {
564                 tps64 >>= 1;
565                 shift--;
566         }
567
568         tps32 = (uint32_t)tps64;
569         while (tps32 <= (uint32_t)nsecs) {
570                 tps32 <<= 1;
571                 shift++;
572         }
573
574         hv_clock->tsc_shift = shift;
575         hv_clock->tsc_to_system_mul = div_frac(nsecs, tps32);
576
577         pr_debug("%s: tsc_khz %u, tsc_shift %d, tsc_mul %u\n",
578                  __func__, tsc_khz, hv_clock->tsc_shift,
579                  hv_clock->tsc_to_system_mul);
580 }
581
582 static void kvm_write_guest_time(struct kvm_vcpu *v)
583 {
584         struct timespec ts;
585         unsigned long flags;
586         struct kvm_vcpu_arch *vcpu = &v->arch;
587         void *shared_kaddr;
588
589         if ((!vcpu->time_page))
590                 return;
591
592         if (unlikely(vcpu->hv_clock_tsc_khz != tsc_khz)) {
593                 kvm_set_time_scale(tsc_khz, &vcpu->hv_clock);
594                 vcpu->hv_clock_tsc_khz = tsc_khz;
595         }
596
597         /* Keep irq disabled to prevent changes to the clock */
598         local_irq_save(flags);
599         kvm_get_msr(v, MSR_IA32_TIME_STAMP_COUNTER,
600                           &vcpu->hv_clock.tsc_timestamp);
601         ktime_get_ts(&ts);
602         local_irq_restore(flags);
603
604         /* With all the info we got, fill in the values */
605
606         vcpu->hv_clock.system_time = ts.tv_nsec +
607                                      (NSEC_PER_SEC * (u64)ts.tv_sec);
608         /*
609          * The interface expects us to write an even number signaling that the
610          * update is finished. Since the guest won't see the intermediate
611          * state, we just increase by 2 at the end.
612          */
613         vcpu->hv_clock.version += 2;
614
615         shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0);
616
617         memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
618                sizeof(vcpu->hv_clock));
619
620         kunmap_atomic(shared_kaddr, KM_USER0);
621
622         mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
623 }
624
625 static bool msr_mtrr_valid(unsigned msr)
626 {
627         switch (msr) {
628         case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
629         case MSR_MTRRfix64K_00000:
630         case MSR_MTRRfix16K_80000:
631         case MSR_MTRRfix16K_A0000:
632         case MSR_MTRRfix4K_C0000:
633         case MSR_MTRRfix4K_C8000:
634         case MSR_MTRRfix4K_D0000:
635         case MSR_MTRRfix4K_D8000:
636         case MSR_MTRRfix4K_E0000:
637         case MSR_MTRRfix4K_E8000:
638         case MSR_MTRRfix4K_F0000:
639         case MSR_MTRRfix4K_F8000:
640         case MSR_MTRRdefType:
641         case MSR_IA32_CR_PAT:
642                 return true;
643         case 0x2f8:
644                 return true;
645         }
646         return false;
647 }
648
649 static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
650 {
651         if (!msr_mtrr_valid(msr))
652                 return 1;
653
654         vcpu->arch.mtrr[msr - 0x200] = data;
655         return 0;
656 }
657
658 int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
659 {
660         switch (msr) {
661         case MSR_EFER:
662                 set_efer(vcpu, data);
663                 break;
664         case MSR_IA32_MC0_STATUS:
665                 pr_unimpl(vcpu, "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n",
666                        __func__, data);
667                 break;
668         case MSR_IA32_MCG_STATUS:
669                 pr_unimpl(vcpu, "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n",
670                         __func__, data);
671                 break;
672         case MSR_IA32_MCG_CTL:
673                 pr_unimpl(vcpu, "%s: MSR_IA32_MCG_CTL 0x%llx, nop\n",
674                         __func__, data);
675                 break;
676         case MSR_IA32_DEBUGCTLMSR:
677                 if (!data) {
678                         /* We support the non-activated case already */
679                         break;
680                 } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
681                         /* Values other than LBR and BTF are vendor-specific,
682                            thus reserved and should throw a #GP */
683                         return 1;
684                 }
685                 pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
686                         __func__, data);
687                 break;
688         case MSR_IA32_UCODE_REV:
689         case MSR_IA32_UCODE_WRITE:
690                 break;
691         case 0x200 ... 0x2ff:
692                 return set_msr_mtrr(vcpu, msr, data);
693         case MSR_IA32_APICBASE:
694                 kvm_set_apic_base(vcpu, data);
695                 break;
696         case MSR_IA32_MISC_ENABLE:
697                 vcpu->arch.ia32_misc_enable_msr = data;
698                 break;
699         case MSR_KVM_WALL_CLOCK:
700                 vcpu->kvm->arch.wall_clock = data;
701                 kvm_write_wall_clock(vcpu->kvm, data);
702                 break;
703         case MSR_KVM_SYSTEM_TIME: {
704                 if (vcpu->arch.time_page) {
705                         kvm_release_page_dirty(vcpu->arch.time_page);
706                         vcpu->arch.time_page = NULL;
707                 }
708
709                 vcpu->arch.time = data;
710
711                 /* we verify if the enable bit is set... */
712                 if (!(data & 1))
713                         break;
714
715                 /* ...but clean it before doing the actual write */
716                 vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
717
718                 vcpu->arch.time_page =
719                                 gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
720
721                 if (is_error_page(vcpu->arch.time_page)) {
722                         kvm_release_page_clean(vcpu->arch.time_page);
723                         vcpu->arch.time_page = NULL;
724                 }
725
726                 kvm_write_guest_time(vcpu);
727                 break;
728         }
729         default:
730                 pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", msr, data);
731                 return 1;
732         }
733         return 0;
734 }
735 EXPORT_SYMBOL_GPL(kvm_set_msr_common);
736
737
738 /*
739  * Reads an msr value (of 'msr_index') into 'pdata'.
740  * Returns 0 on success, non-0 otherwise.
741  * Assumes vcpu_load() was already called.
742  */
743 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
744 {
745         return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
746 }
747
748 static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
749 {
750         if (!msr_mtrr_valid(msr))
751                 return 1;
752
753         *pdata = vcpu->arch.mtrr[msr - 0x200];
754         return 0;
755 }
756
757 int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
758 {
759         u64 data;
760
761         switch (msr) {
762         case 0xc0010010: /* SYSCFG */
763         case 0xc0010015: /* HWCR */
764         case MSR_IA32_PLATFORM_ID:
765         case MSR_IA32_P5_MC_ADDR:
766         case MSR_IA32_P5_MC_TYPE:
767         case MSR_IA32_MC0_CTL:
768         case MSR_IA32_MCG_STATUS:
769         case MSR_IA32_MCG_CAP:
770         case MSR_IA32_MCG_CTL:
771         case MSR_IA32_MC0_MISC:
772         case MSR_IA32_MC0_MISC+4:
773         case MSR_IA32_MC0_MISC+8:
774         case MSR_IA32_MC0_MISC+12:
775         case MSR_IA32_MC0_MISC+16:
776         case MSR_IA32_MC0_MISC+20:
777         case MSR_IA32_UCODE_REV:
778         case MSR_IA32_EBL_CR_POWERON:
779         case MSR_IA32_DEBUGCTLMSR:
780         case MSR_IA32_LASTBRANCHFROMIP:
781         case MSR_IA32_LASTBRANCHTOIP:
782         case MSR_IA32_LASTINTFROMIP:
783         case MSR_IA32_LASTINTTOIP:
784                 data = 0;
785                 break;
786         case MSR_MTRRcap:
787                 data = 0x500 | KVM_NR_VAR_MTRR;
788                 break;
789         case 0x200 ... 0x2ff:
790                 return get_msr_mtrr(vcpu, msr, pdata);
791         case 0xcd: /* fsb frequency */
792                 data = 3;
793                 break;
794         case MSR_IA32_APICBASE:
795                 data = kvm_get_apic_base(vcpu);
796                 break;
797         case MSR_IA32_MISC_ENABLE:
798                 data = vcpu->arch.ia32_misc_enable_msr;
799                 break;
800         case MSR_IA32_PERF_STATUS:
801                 /* TSC increment by tick */
802                 data = 1000ULL;
803                 /* CPU multiplier */
804                 data |= (((uint64_t)4ULL) << 40);
805                 break;
806         case MSR_EFER:
807                 data = vcpu->arch.shadow_efer;
808                 break;
809         case MSR_KVM_WALL_CLOCK:
810                 data = vcpu->kvm->arch.wall_clock;
811                 break;
812         case MSR_KVM_SYSTEM_TIME:
813                 data = vcpu->arch.time;
814                 break;
815         default:
816                 pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
817                 return 1;
818         }
819         *pdata = data;
820         return 0;
821 }
822 EXPORT_SYMBOL_GPL(kvm_get_msr_common);
823
824 /*
825  * Read or write a bunch of msrs. All parameters are kernel addresses.
826  *
827  * @return number of msrs set successfully.
828  */
829 static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
830                     struct kvm_msr_entry *entries,
831                     int (*do_msr)(struct kvm_vcpu *vcpu,
832                                   unsigned index, u64 *data))
833 {
834         int i;
835
836         vcpu_load(vcpu);
837
838         down_read(&vcpu->kvm->slots_lock);
839         for (i = 0; i < msrs->nmsrs; ++i)
840                 if (do_msr(vcpu, entries[i].index, &entries[i].data))
841                         break;
842         up_read(&vcpu->kvm->slots_lock);
843
844         vcpu_put(vcpu);
845
846         return i;
847 }
848
849 /*
850  * Read or write a bunch of msrs. Parameters are user addresses.
851  *
852  * @return number of msrs set successfully.
853  */
854 static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
855                   int (*do_msr)(struct kvm_vcpu *vcpu,
856                                 unsigned index, u64 *data),
857                   int writeback)
858 {
859         struct kvm_msrs msrs;
860         struct kvm_msr_entry *entries;
861         int r, n;
862         unsigned size;
863
864         r = -EFAULT;
865         if (copy_from_user(&msrs, user_msrs, sizeof msrs))
866                 goto out;
867
868         r = -E2BIG;
869         if (msrs.nmsrs >= MAX_IO_MSRS)
870                 goto out;
871
872         r = -ENOMEM;
873         size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
874         entries = vmalloc(size);
875         if (!entries)
876                 goto out;
877
878         r = -EFAULT;
879         if (copy_from_user(entries, user_msrs->entries, size))
880                 goto out_free;
881
882         r = n = __msr_io(vcpu, &msrs, entries, do_msr);
883         if (r < 0)
884                 goto out_free;
885
886         r = -EFAULT;
887         if (writeback && copy_to_user(user_msrs->entries, entries, size))
888                 goto out_free;
889
890         r = n;
891
892 out_free:
893         vfree(entries);
894 out:
895         return r;
896 }
897
898 int kvm_dev_ioctl_check_extension(long ext)
899 {
900         int r;
901
902         switch (ext) {
903         case KVM_CAP_IRQCHIP:
904         case KVM_CAP_HLT:
905         case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
906         case KVM_CAP_USER_MEMORY:
907         case KVM_CAP_SET_TSS_ADDR:
908         case KVM_CAP_EXT_CPUID:
909         case KVM_CAP_CLOCKSOURCE:
910         case KVM_CAP_PIT:
911         case KVM_CAP_NOP_IO_DELAY:
912         case KVM_CAP_MP_STATE:
913         case KVM_CAP_SYNC_MMU:
914                 r = 1;
915                 break;
916         case KVM_CAP_COALESCED_MMIO:
917                 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
918                 break;
919         case KVM_CAP_VAPIC:
920                 r = !kvm_x86_ops->cpu_has_accelerated_tpr();
921                 break;
922         case KVM_CAP_NR_VCPUS:
923                 r = KVM_MAX_VCPUS;
924                 break;
925         case KVM_CAP_NR_MEMSLOTS:
926                 r = KVM_MEMORY_SLOTS;
927                 break;
928         case KVM_CAP_PV_MMU:
929                 r = !tdp_enabled;
930                 break;
931         case KVM_CAP_IOMMU:
932                 r = intel_iommu_found();
933                 break;
934         default:
935                 r = 0;
936                 break;
937         }
938         return r;
939
940 }
941
942 long kvm_arch_dev_ioctl(struct file *filp,
943                         unsigned int ioctl, unsigned long arg)
944 {
945         void __user *argp = (void __user *)arg;
946         long r;
947
948         switch (ioctl) {
949         case KVM_GET_MSR_INDEX_LIST: {
950                 struct kvm_msr_list __user *user_msr_list = argp;
951                 struct kvm_msr_list msr_list;
952                 unsigned n;
953
954                 r = -EFAULT;
955                 if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
956                         goto out;
957                 n = msr_list.nmsrs;
958                 msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
959                 if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
960                         goto out;
961                 r = -E2BIG;
962                 if (n < num_msrs_to_save)
963                         goto out;
964                 r = -EFAULT;
965                 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
966                                  num_msrs_to_save * sizeof(u32)))
967                         goto out;
968                 if (copy_to_user(user_msr_list->indices
969                                  + num_msrs_to_save * sizeof(u32),
970                                  &emulated_msrs,
971                                  ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
972                         goto out;
973                 r = 0;
974                 break;
975         }
976         case KVM_GET_SUPPORTED_CPUID: {
977                 struct kvm_cpuid2 __user *cpuid_arg = argp;
978                 struct kvm_cpuid2 cpuid;
979
980                 r = -EFAULT;
981                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
982                         goto out;
983                 r = kvm_dev_ioctl_get_supported_cpuid(&cpuid,
984                         cpuid_arg->entries);
985                 if (r)
986                         goto out;
987
988                 r = -EFAULT;
989                 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
990                         goto out;
991                 r = 0;
992                 break;
993         }
994         default:
995                 r = -EINVAL;
996         }
997 out:
998         return r;
999 }
1000
1001 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1002 {
1003         kvm_x86_ops->vcpu_load(vcpu, cpu);
1004         kvm_write_guest_time(vcpu);
1005 }
1006
1007 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1008 {
1009         kvm_x86_ops->vcpu_put(vcpu);
1010         kvm_put_guest_fpu(vcpu);
1011 }
1012
1013 static int is_efer_nx(void)
1014 {
1015         u64 efer;
1016
1017         rdmsrl(MSR_EFER, efer);
1018         return efer & EFER_NX;
1019 }
1020
1021 static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
1022 {
1023         int i;
1024         struct kvm_cpuid_entry2 *e, *entry;
1025
1026         entry = NULL;
1027         for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
1028                 e = &vcpu->arch.cpuid_entries[i];
1029                 if (e->function == 0x80000001) {
1030                         entry = e;
1031                         break;
1032                 }
1033         }
1034         if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) {
1035                 entry->edx &= ~(1 << 20);
1036                 printk(KERN_INFO "kvm: guest NX capability removed\n");
1037         }
1038 }
1039
1040 /* when an old userspace process fills a new kernel module */
1041 static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
1042                                     struct kvm_cpuid *cpuid,
1043                                     struct kvm_cpuid_entry __user *entries)
1044 {
1045         int r, i;
1046         struct kvm_cpuid_entry *cpuid_entries;
1047
1048         r = -E2BIG;
1049         if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1050                 goto out;
1051         r = -ENOMEM;
1052         cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
1053         if (!cpuid_entries)
1054                 goto out;
1055         r = -EFAULT;
1056         if (copy_from_user(cpuid_entries, entries,
1057                            cpuid->nent * sizeof(struct kvm_cpuid_entry)))
1058                 goto out_free;
1059         for (i = 0; i < cpuid->nent; i++) {
1060                 vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
1061                 vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
1062                 vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
1063                 vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
1064                 vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
1065                 vcpu->arch.cpuid_entries[i].index = 0;
1066                 vcpu->arch.cpuid_entries[i].flags = 0;
1067                 vcpu->arch.cpuid_entries[i].padding[0] = 0;
1068                 vcpu->arch.cpuid_entries[i].padding[1] = 0;
1069                 vcpu->arch.cpuid_entries[i].padding[2] = 0;
1070         }
1071         vcpu->arch.cpuid_nent = cpuid->nent;
1072         cpuid_fix_nx_cap(vcpu);
1073         r = 0;
1074
1075 out_free:
1076         vfree(cpuid_entries);
1077 out:
1078         return r;
1079 }
1080
1081 static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
1082                                     struct kvm_cpuid2 *cpuid,
1083                                     struct kvm_cpuid_entry2 __user *entries)
1084 {
1085         int r;
1086
1087         r = -E2BIG;
1088         if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1089                 goto out;
1090         r = -EFAULT;
1091         if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
1092                            cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
1093                 goto out;
1094         vcpu->arch.cpuid_nent = cpuid->nent;
1095         return 0;
1096
1097 out:
1098         return r;
1099 }
1100
1101 static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
1102                                     struct kvm_cpuid2 *cpuid,
1103                                     struct kvm_cpuid_entry2 __user *entries)
1104 {
1105         int r;
1106
1107         r = -E2BIG;
1108         if (cpuid->nent < vcpu->arch.cpuid_nent)
1109                 goto out;
1110         r = -EFAULT;
1111         if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
1112                            vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
1113                 goto out;
1114         return 0;
1115
1116 out:
1117         cpuid->nent = vcpu->arch.cpuid_nent;
1118         return r;
1119 }
1120
1121 static inline u32 bit(int bitno)
1122 {
1123         return 1 << (bitno & 31);
1124 }
1125
1126 static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1127                           u32 index)
1128 {
1129         entry->function = function;
1130         entry->index = index;
1131         cpuid_count(entry->function, entry->index,
1132                 &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
1133         entry->flags = 0;
1134 }
1135
1136 static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1137                          u32 index, int *nent, int maxnent)
1138 {
1139         const u32 kvm_supported_word0_x86_features = bit(X86_FEATURE_FPU) |
1140                 bit(X86_FEATURE_VME) | bit(X86_FEATURE_DE) |
1141                 bit(X86_FEATURE_PSE) | bit(X86_FEATURE_TSC) |
1142                 bit(X86_FEATURE_MSR) | bit(X86_FEATURE_PAE) |
1143                 bit(X86_FEATURE_CX8) | bit(X86_FEATURE_APIC) |
1144                 bit(X86_FEATURE_SEP) | bit(X86_FEATURE_PGE) |
1145                 bit(X86_FEATURE_CMOV) | bit(X86_FEATURE_PSE36) |
1146                 bit(X86_FEATURE_CLFLSH) | bit(X86_FEATURE_MMX) |
1147                 bit(X86_FEATURE_FXSR) | bit(X86_FEATURE_XMM) |
1148                 bit(X86_FEATURE_XMM2) | bit(X86_FEATURE_SELFSNOOP);
1149         const u32 kvm_supported_word1_x86_features = bit(X86_FEATURE_FPU) |
1150                 bit(X86_FEATURE_VME) | bit(X86_FEATURE_DE) |
1151                 bit(X86_FEATURE_PSE) | bit(X86_FEATURE_TSC) |
1152                 bit(X86_FEATURE_MSR) | bit(X86_FEATURE_PAE) |
1153                 bit(X86_FEATURE_CX8) | bit(X86_FEATURE_APIC) |
1154                 bit(X86_FEATURE_PGE) |
1155                 bit(X86_FEATURE_CMOV) | bit(X86_FEATURE_PSE36) |
1156                 bit(X86_FEATURE_MMX) | bit(X86_FEATURE_FXSR) |
1157                 bit(X86_FEATURE_SYSCALL) |
1158                 (bit(X86_FEATURE_NX) && is_efer_nx()) |
1159 #ifdef CONFIG_X86_64
1160                 bit(X86_FEATURE_LM) |
1161 #endif
1162                 bit(X86_FEATURE_MMXEXT) |
1163                 bit(X86_FEATURE_3DNOWEXT) |
1164                 bit(X86_FEATURE_3DNOW);
1165         const u32 kvm_supported_word3_x86_features =
1166                 bit(X86_FEATURE_XMM3) | bit(X86_FEATURE_CX16);
1167         const u32 kvm_supported_word6_x86_features =
1168                 bit(X86_FEATURE_LAHF_LM) | bit(X86_FEATURE_CMP_LEGACY);
1169
1170         /* all func 2 cpuid_count() should be called on the same cpu */
1171         get_cpu();
1172         do_cpuid_1_ent(entry, function, index);
1173         ++*nent;
1174
1175         switch (function) {
1176         case 0:
1177                 entry->eax = min(entry->eax, (u32)0xb);
1178                 break;
1179         case 1:
1180                 entry->edx &= kvm_supported_word0_x86_features;
1181                 entry->ecx &= kvm_supported_word3_x86_features;
1182                 break;
1183         /* function 2 entries are STATEFUL. That is, repeated cpuid commands
1184          * may return different values. This forces us to get_cpu() before
1185          * issuing the first command, and also to emulate this annoying behavior
1186          * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
1187         case 2: {
1188                 int t, times = entry->eax & 0xff;
1189
1190                 entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
1191                 for (t = 1; t < times && *nent < maxnent; ++t) {
1192                         do_cpuid_1_ent(&entry[t], function, 0);
1193                         entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
1194                         ++*nent;
1195                 }
1196                 break;
1197         }
1198         /* function 4 and 0xb have additional index. */
1199         case 4: {
1200                 int i, cache_type;
1201
1202                 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1203                 /* read more entries until cache_type is zero */
1204                 for (i = 1; *nent < maxnent; ++i) {
1205                         cache_type = entry[i - 1].eax & 0x1f;
1206                         if (!cache_type)
1207                                 break;
1208                         do_cpuid_1_ent(&entry[i], function, i);
1209                         entry[i].flags |=
1210                                KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1211                         ++*nent;
1212                 }
1213                 break;
1214         }
1215         case 0xb: {
1216                 int i, level_type;
1217
1218                 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1219                 /* read more entries until level_type is zero */
1220                 for (i = 1; *nent < maxnent; ++i) {
1221                         level_type = entry[i - 1].ecx & 0xff;
1222                         if (!level_type)
1223                                 break;
1224                         do_cpuid_1_ent(&entry[i], function, i);
1225                         entry[i].flags |=
1226                                KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1227                         ++*nent;
1228                 }
1229                 break;
1230         }
1231         case 0x80000000:
1232                 entry->eax = min(entry->eax, 0x8000001a);
1233                 break;
1234         case 0x80000001:
1235                 entry->edx &= kvm_supported_word1_x86_features;
1236                 entry->ecx &= kvm_supported_word6_x86_features;
1237                 break;
1238         }
1239         put_cpu();
1240 }
1241
1242 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
1243                                     struct kvm_cpuid_entry2 __user *entries)
1244 {
1245         struct kvm_cpuid_entry2 *cpuid_entries;
1246         int limit, nent = 0, r = -E2BIG;
1247         u32 func;
1248
1249         if (cpuid->nent < 1)
1250                 goto out;
1251         r = -ENOMEM;
1252         cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
1253         if (!cpuid_entries)
1254                 goto out;
1255
1256         do_cpuid_ent(&cpuid_entries[0], 0, 0, &nent, cpuid->nent);
1257         limit = cpuid_entries[0].eax;
1258         for (func = 1; func <= limit && nent < cpuid->nent; ++func)
1259                 do_cpuid_ent(&cpuid_entries[nent], func, 0,
1260                                 &nent, cpuid->nent);
1261         r = -E2BIG;
1262         if (nent >= cpuid->nent)
1263                 goto out_free;
1264
1265         do_cpuid_ent(&cpuid_entries[nent], 0x80000000, 0, &nent, cpuid->nent);
1266         limit = cpuid_entries[nent - 1].eax;
1267         for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func)
1268                 do_cpuid_ent(&cpuid_entries[nent], func, 0,
1269                                &nent, cpuid->nent);
1270         r = -EFAULT;
1271         if (copy_to_user(entries, cpuid_entries,
1272                         nent * sizeof(struct kvm_cpuid_entry2)))
1273                 goto out_free;
1274         cpuid->nent = nent;
1275         r = 0;
1276
1277 out_free:
1278         vfree(cpuid_entries);
1279 out:
1280         return r;
1281 }
1282
1283 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
1284                                     struct kvm_lapic_state *s)
1285 {
1286         vcpu_load(vcpu);
1287         memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
1288         vcpu_put(vcpu);
1289
1290         return 0;
1291 }
1292
1293 static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
1294                                     struct kvm_lapic_state *s)
1295 {
1296         vcpu_load(vcpu);
1297         memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
1298         kvm_apic_post_state_restore(vcpu);
1299         vcpu_put(vcpu);
1300
1301         return 0;
1302 }
1303
1304 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
1305                                     struct kvm_interrupt *irq)
1306 {
1307         if (irq->irq < 0 || irq->irq >= 256)
1308                 return -EINVAL;
1309         if (irqchip_in_kernel(vcpu->kvm))
1310                 return -ENXIO;
1311         vcpu_load(vcpu);
1312
1313         set_bit(irq->irq, vcpu->arch.irq_pending);
1314         set_bit(irq->irq / BITS_PER_LONG, &vcpu->arch.irq_summary);
1315
1316         vcpu_put(vcpu);
1317
1318         return 0;
1319 }
1320
1321 static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
1322                                            struct kvm_tpr_access_ctl *tac)
1323 {
1324         if (tac->flags)
1325                 return -EINVAL;
1326         vcpu->arch.tpr_access_reporting = !!tac->enabled;
1327         return 0;
1328 }
1329
1330 long kvm_arch_vcpu_ioctl(struct file *filp,
1331                          unsigned int ioctl, unsigned long arg)
1332 {
1333         struct kvm_vcpu *vcpu = filp->private_data;
1334         void __user *argp = (void __user *)arg;
1335         int r;
1336         struct kvm_lapic_state *lapic = NULL;
1337
1338         switch (ioctl) {
1339         case KVM_GET_LAPIC: {
1340                 lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
1341
1342                 r = -ENOMEM;
1343                 if (!lapic)
1344                         goto out;
1345                 r = kvm_vcpu_ioctl_get_lapic(vcpu, lapic);
1346                 if (r)
1347                         goto out;
1348                 r = -EFAULT;
1349                 if (copy_to_user(argp, lapic, sizeof(struct kvm_lapic_state)))
1350                         goto out;
1351                 r = 0;
1352                 break;
1353         }
1354         case KVM_SET_LAPIC: {
1355                 lapic = kmalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
1356                 r = -ENOMEM;
1357                 if (!lapic)
1358                         goto out;
1359                 r = -EFAULT;
1360                 if (copy_from_user(lapic, argp, sizeof(struct kvm_lapic_state)))
1361                         goto out;
1362                 r = kvm_vcpu_ioctl_set_lapic(vcpu, lapic);
1363                 if (r)
1364                         goto out;
1365                 r = 0;
1366                 break;
1367         }
1368         case KVM_INTERRUPT: {
1369                 struct kvm_interrupt irq;
1370
1371                 r = -EFAULT;
1372                 if (copy_from_user(&irq, argp, sizeof irq))
1373                         goto out;
1374                 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
1375                 if (r)
1376                         goto out;
1377                 r = 0;
1378                 break;
1379         }
1380         case KVM_SET_CPUID: {
1381                 struct kvm_cpuid __user *cpuid_arg = argp;
1382                 struct kvm_cpuid cpuid;
1383
1384                 r = -EFAULT;
1385                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1386                         goto out;
1387                 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
1388                 if (r)
1389                         goto out;
1390                 break;
1391         }
1392         case KVM_SET_CPUID2: {
1393                 struct kvm_cpuid2 __user *cpuid_arg = argp;
1394                 struct kvm_cpuid2 cpuid;
1395
1396                 r = -EFAULT;
1397                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1398                         goto out;
1399                 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
1400                                 cpuid_arg->entries);
1401                 if (r)
1402                         goto out;
1403                 break;
1404         }
1405         case KVM_GET_CPUID2: {
1406                 struct kvm_cpuid2 __user *cpuid_arg = argp;
1407                 struct kvm_cpuid2 cpuid;
1408
1409                 r = -EFAULT;
1410                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1411                         goto out;
1412                 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
1413                                 cpuid_arg->entries);
1414                 if (r)
1415                         goto out;
1416                 r = -EFAULT;
1417                 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
1418                         goto out;
1419                 r = 0;
1420                 break;
1421         }
1422         case KVM_GET_MSRS:
1423                 r = msr_io(vcpu, argp, kvm_get_msr, 1);
1424                 break;
1425         case KVM_SET_MSRS:
1426                 r = msr_io(vcpu, argp, do_set_msr, 0);
1427                 break;
1428         case KVM_TPR_ACCESS_REPORTING: {
1429                 struct kvm_tpr_access_ctl tac;
1430
1431                 r = -EFAULT;
1432                 if (copy_from_user(&tac, argp, sizeof tac))
1433                         goto out;
1434                 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
1435                 if (r)
1436                         goto out;
1437                 r = -EFAULT;
1438                 if (copy_to_user(argp, &tac, sizeof tac))
1439                         goto out;
1440                 r = 0;
1441                 break;
1442         };
1443         case KVM_SET_VAPIC_ADDR: {
1444                 struct kvm_vapic_addr va;
1445
1446                 r = -EINVAL;
1447                 if (!irqchip_in_kernel(vcpu->kvm))
1448                         goto out;
1449                 r = -EFAULT;
1450                 if (copy_from_user(&va, argp, sizeof va))
1451                         goto out;
1452                 r = 0;
1453                 kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
1454                 break;
1455         }
1456         default:
1457                 r = -EINVAL;
1458         }
1459 out:
1460         if (lapic)
1461                 kfree(lapic);
1462         return r;
1463 }
1464
1465 static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
1466 {
1467         int ret;
1468
1469         if (addr > (unsigned int)(-3 * PAGE_SIZE))
1470                 return -1;
1471         ret = kvm_x86_ops->set_tss_addr(kvm, addr);
1472         return ret;
1473 }
1474
1475 static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
1476                                           u32 kvm_nr_mmu_pages)
1477 {
1478         if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
1479                 return -EINVAL;
1480
1481         down_write(&kvm->slots_lock);
1482
1483         kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
1484         kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
1485
1486         up_write(&kvm->slots_lock);
1487         return 0;
1488 }
1489
1490 static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
1491 {
1492         return kvm->arch.n_alloc_mmu_pages;
1493 }
1494
1495 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
1496 {
1497         int i;
1498         struct kvm_mem_alias *alias;
1499
1500         for (i = 0; i < kvm->arch.naliases; ++i) {
1501                 alias = &kvm->arch.aliases[i];
1502                 if (gfn >= alias->base_gfn
1503                     && gfn < alias->base_gfn + alias->npages)
1504                         return alias->target_gfn + gfn - alias->base_gfn;
1505         }
1506         return gfn;
1507 }
1508
1509 /*
1510  * Set a new alias region.  Aliases map a portion of physical memory into
1511  * another portion.  This is useful for memory windows, for example the PC
1512  * VGA region.
1513  */
1514 static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
1515                                          struct kvm_memory_alias *alias)
1516 {
1517         int r, n;
1518         struct kvm_mem_alias *p;
1519
1520         r = -EINVAL;
1521         /* General sanity checks */
1522         if (alias->memory_size & (PAGE_SIZE - 1))
1523                 goto out;
1524         if (alias->guest_phys_addr & (PAGE_SIZE - 1))
1525                 goto out;
1526         if (alias->slot >= KVM_ALIAS_SLOTS)
1527                 goto out;
1528         if (alias->guest_phys_addr + alias->memory_size
1529             < alias->guest_phys_addr)
1530                 goto out;
1531         if (alias->target_phys_addr + alias->memory_size
1532             < alias->target_phys_addr)
1533                 goto out;
1534
1535         down_write(&kvm->slots_lock);
1536         spin_lock(&kvm->mmu_lock);
1537
1538         p = &kvm->arch.aliases[alias->slot];
1539         p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
1540         p->npages = alias->memory_size >> PAGE_SHIFT;
1541         p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
1542
1543         for (n = KVM_ALIAS_SLOTS; n > 0; --n)
1544                 if (kvm->arch.aliases[n - 1].npages)
1545                         break;
1546         kvm->arch.naliases = n;
1547
1548         spin_unlock(&kvm->mmu_lock);
1549         kvm_mmu_zap_all(kvm);
1550
1551         up_write(&kvm->slots_lock);
1552
1553         return 0;
1554
1555 out:
1556         return r;
1557 }
1558
1559 static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
1560 {
1561         int r;
1562
1563         r = 0;
1564         switch (chip->chip_id) {
1565         case KVM_IRQCHIP_PIC_MASTER:
1566                 memcpy(&chip->chip.pic,
1567                         &pic_irqchip(kvm)->pics[0],
1568                         sizeof(struct kvm_pic_state));
1569                 break;
1570         case KVM_IRQCHIP_PIC_SLAVE:
1571                 memcpy(&chip->chip.pic,
1572                         &pic_irqchip(kvm)->pics[1],
1573                         sizeof(struct kvm_pic_state));
1574                 break;
1575         case KVM_IRQCHIP_IOAPIC:
1576                 memcpy(&chip->chip.ioapic,
1577                         ioapic_irqchip(kvm),
1578                         sizeof(struct kvm_ioapic_state));
1579                 break;
1580         default:
1581                 r = -EINVAL;
1582                 break;
1583         }
1584         return r;
1585 }
1586
1587 static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
1588 {
1589         int r;
1590
1591         r = 0;
1592         switch (chip->chip_id) {
1593         case KVM_IRQCHIP_PIC_MASTER:
1594                 memcpy(&pic_irqchip(kvm)->pics[0],
1595                         &chip->chip.pic,
1596                         sizeof(struct kvm_pic_state));
1597                 break;
1598         case KVM_IRQCHIP_PIC_SLAVE:
1599                 memcpy(&pic_irqchip(kvm)->pics[1],
1600                         &chip->chip.pic,
1601                         sizeof(struct kvm_pic_state));
1602                 break;
1603         case KVM_IRQCHIP_IOAPIC:
1604                 memcpy(ioapic_irqchip(kvm),
1605                         &chip->chip.ioapic,
1606                         sizeof(struct kvm_ioapic_state));
1607                 break;
1608         default:
1609                 r = -EINVAL;
1610                 break;
1611         }
1612         kvm_pic_update_irq(pic_irqchip(kvm));
1613         return r;
1614 }
1615
1616 static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
1617 {
1618         int r = 0;
1619
1620         memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
1621         return r;
1622 }
1623
1624 static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
1625 {
1626         int r = 0;
1627
1628         memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
1629         kvm_pit_load_count(kvm, 0, ps->channels[0].count);
1630         return r;
1631 }
1632
1633 /*
1634  * Get (and clear) the dirty memory log for a memory slot.
1635  */
1636 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1637                                       struct kvm_dirty_log *log)
1638 {
1639         int r;
1640         int n;
1641         struct kvm_memory_slot *memslot;
1642         int is_dirty = 0;
1643
1644         down_write(&kvm->slots_lock);
1645
1646         r = kvm_get_dirty_log(kvm, log, &is_dirty);
1647         if (r)
1648                 goto out;
1649
1650         /* If nothing is dirty, don't bother messing with page tables. */
1651         if (is_dirty) {
1652                 kvm_mmu_slot_remove_write_access(kvm, log->slot);
1653                 kvm_flush_remote_tlbs(kvm);
1654                 memslot = &kvm->memslots[log->slot];
1655                 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
1656                 memset(memslot->dirty_bitmap, 0, n);
1657         }
1658         r = 0;
1659 out:
1660         up_write(&kvm->slots_lock);
1661         return r;
1662 }
1663
1664 long kvm_arch_vm_ioctl(struct file *filp,
1665                        unsigned int ioctl, unsigned long arg)
1666 {
1667         struct kvm *kvm = filp->private_data;
1668         void __user *argp = (void __user *)arg;
1669         int r = -EINVAL;
1670         /*
1671          * This union makes it completely explicit to gcc-3.x
1672          * that these two variables' stack usage should be
1673          * combined, not added together.
1674          */
1675         union {
1676                 struct kvm_pit_state ps;
1677                 struct kvm_memory_alias alias;
1678         } u;
1679
1680         switch (ioctl) {
1681         case KVM_SET_TSS_ADDR:
1682                 r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
1683                 if (r < 0)
1684                         goto out;
1685                 break;
1686         case KVM_SET_MEMORY_REGION: {
1687                 struct kvm_memory_region kvm_mem;
1688                 struct kvm_userspace_memory_region kvm_userspace_mem;
1689
1690                 r = -EFAULT;
1691                 if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
1692                         goto out;
1693                 kvm_userspace_mem.slot = kvm_mem.slot;
1694                 kvm_userspace_mem.flags = kvm_mem.flags;
1695                 kvm_userspace_mem.guest_phys_addr = kvm_mem.guest_phys_addr;
1696                 kvm_userspace_mem.memory_size = kvm_mem.memory_size;
1697                 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 0);
1698                 if (r)
1699                         goto out;
1700                 break;
1701         }
1702         case KVM_SET_NR_MMU_PAGES:
1703                 r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
1704                 if (r)
1705                         goto out;
1706                 break;
1707         case KVM_GET_NR_MMU_PAGES:
1708                 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
1709                 break;
1710         case KVM_SET_MEMORY_ALIAS:
1711                 r = -EFAULT;
1712                 if (copy_from_user(&u.alias, argp, sizeof(struct kvm_memory_alias)))
1713                         goto out;
1714                 r = kvm_vm_ioctl_set_memory_alias(kvm, &u.alias);
1715                 if (r)
1716                         goto out;
1717                 break;
1718         case KVM_CREATE_IRQCHIP:
1719                 r = -ENOMEM;
1720                 kvm->arch.vpic = kvm_create_pic(kvm);
1721                 if (kvm->arch.vpic) {
1722                         r = kvm_ioapic_init(kvm);
1723                         if (r) {
1724                                 kfree(kvm->arch.vpic);
1725                                 kvm->arch.vpic = NULL;
1726                                 goto out;
1727                         }
1728                 } else
1729                         goto out;
1730                 break;
1731         case KVM_CREATE_PIT:
1732                 r = -ENOMEM;
1733                 kvm->arch.vpit = kvm_create_pit(kvm);
1734                 if (kvm->arch.vpit)
1735                         r = 0;
1736                 break;
1737         case KVM_IRQ_LINE: {
1738                 struct kvm_irq_level irq_event;
1739
1740                 r = -EFAULT;
1741                 if (copy_from_user(&irq_event, argp, sizeof irq_event))
1742                         goto out;
1743                 if (irqchip_in_kernel(kvm)) {
1744                         mutex_lock(&kvm->lock);
1745                         kvm_set_irq(kvm, irq_event.irq, irq_event.level);
1746                         mutex_unlock(&kvm->lock);
1747                         r = 0;
1748                 }
1749                 break;
1750         }
1751         case KVM_GET_IRQCHIP: {
1752                 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
1753                 struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
1754
1755                 r = -ENOMEM;
1756                 if (!chip)
1757                         goto out;
1758                 r = -EFAULT;
1759                 if (copy_from_user(chip, argp, sizeof *chip))
1760                         goto get_irqchip_out;
1761                 r = -ENXIO;
1762                 if (!irqchip_in_kernel(kvm))
1763                         goto get_irqchip_out;
1764                 r = kvm_vm_ioctl_get_irqchip(kvm, chip);
1765                 if (r)
1766                         goto get_irqchip_out;
1767                 r = -EFAULT;
1768                 if (copy_to_user(argp, chip, sizeof *chip))
1769                         goto get_irqchip_out;
1770                 r = 0;
1771         get_irqchip_out:
1772                 kfree(chip);
1773                 if (r)
1774                         goto out;
1775                 break;
1776         }
1777         case KVM_SET_IRQCHIP: {
1778                 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
1779                 struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
1780
1781                 r = -ENOMEM;
1782                 if (!chip)
1783                         goto out;
1784                 r = -EFAULT;
1785                 if (copy_from_user(chip, argp, sizeof *chip))
1786                         goto set_irqchip_out;
1787                 r = -ENXIO;
1788                 if (!irqchip_in_kernel(kvm))
1789                         goto set_irqchip_out;
1790                 r = kvm_vm_ioctl_set_irqchip(kvm, chip);
1791                 if (r)
1792                         goto set_irqchip_out;
1793                 r = 0;
1794         set_irqchip_out:
1795                 kfree(chip);
1796                 if (r)
1797                         goto out;
1798                 break;
1799         }
1800         case KVM_GET_PIT: {
1801                 r = -EFAULT;
1802                 if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
1803                         goto out;
1804                 r = -ENXIO;
1805                 if (!kvm->arch.vpit)
1806                         goto out;
1807                 r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
1808                 if (r)
1809                         goto out;
1810                 r = -EFAULT;
1811                 if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
1812                         goto out;
1813                 r = 0;
1814                 break;
1815         }
1816         case KVM_SET_PIT: {
1817                 r = -EFAULT;
1818                 if (copy_from_user(&u.ps, argp, sizeof u.ps))
1819                         goto out;
1820                 r = -ENXIO;
1821                 if (!kvm->arch.vpit)
1822                         goto out;
1823                 r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
1824                 if (r)
1825                         goto out;
1826                 r = 0;
1827                 break;
1828         }
1829         default:
1830                 ;
1831         }
1832 out:
1833         return r;
1834 }
1835
1836 static void kvm_init_msr_list(void)
1837 {
1838         u32 dummy[2];
1839         unsigned i, j;
1840
1841         for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
1842                 if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
1843                         continue;
1844                 if (j < i)
1845                         msrs_to_save[j] = msrs_to_save[i];
1846                 j++;
1847         }
1848         num_msrs_to_save = j;
1849 }
1850
1851 /*
1852  * Only apic need an MMIO device hook, so shortcut now..
1853  */
1854 static struct kvm_io_device *vcpu_find_pervcpu_dev(struct kvm_vcpu *vcpu,
1855                                                 gpa_t addr, int len,
1856                                                 int is_write)
1857 {
1858         struct kvm_io_device *dev;
1859
1860         if (vcpu->arch.apic) {
1861                 dev = &vcpu->arch.apic->dev;
1862                 if (dev->in_range(dev, addr, len, is_write))
1863                         return dev;
1864         }
1865         return NULL;
1866 }
1867
1868
1869 static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
1870                                                 gpa_t addr, int len,
1871                                                 int is_write)
1872 {
1873         struct kvm_io_device *dev;
1874
1875         dev = vcpu_find_pervcpu_dev(vcpu, addr, len, is_write);
1876         if (dev == NULL)
1877                 dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr, len,
1878                                           is_write);
1879         return dev;
1880 }
1881
1882 int emulator_read_std(unsigned long addr,
1883                              void *val,
1884                              unsigned int bytes,
1885                              struct kvm_vcpu *vcpu)
1886 {
1887         void *data = val;
1888         int r = X86EMUL_CONTINUE;
1889
1890         while (bytes) {
1891                 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
1892                 unsigned offset = addr & (PAGE_SIZE-1);
1893                 unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset);
1894                 int ret;
1895
1896                 if (gpa == UNMAPPED_GVA) {
1897                         r = X86EMUL_PROPAGATE_FAULT;
1898                         goto out;
1899                 }
1900                 ret = kvm_read_guest(vcpu->kvm, gpa, data, tocopy);
1901                 if (ret < 0) {
1902                         r = X86EMUL_UNHANDLEABLE;
1903                         goto out;
1904                 }
1905
1906                 bytes -= tocopy;
1907                 data += tocopy;
1908                 addr += tocopy;
1909         }
1910 out:
1911         return r;
1912 }
1913 EXPORT_SYMBOL_GPL(emulator_read_std);
1914
1915 static int emulator_read_emulated(unsigned long addr,
1916                                   void *val,
1917                                   unsigned int bytes,
1918                                   struct kvm_vcpu *vcpu)
1919 {
1920         struct kvm_io_device *mmio_dev;
1921         gpa_t                 gpa;
1922
1923         if (vcpu->mmio_read_completed) {
1924                 memcpy(val, vcpu->mmio_data, bytes);
1925                 vcpu->mmio_read_completed = 0;
1926                 return X86EMUL_CONTINUE;
1927         }
1928
1929         gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
1930
1931         /* For APIC access vmexit */
1932         if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
1933                 goto mmio;
1934
1935         if (emulator_read_std(addr, val, bytes, vcpu)
1936                         == X86EMUL_CONTINUE)
1937                 return X86EMUL_CONTINUE;
1938         if (gpa == UNMAPPED_GVA)
1939                 return X86EMUL_PROPAGATE_FAULT;
1940
1941 mmio:
1942         /*
1943          * Is this MMIO handled locally?
1944          */
1945         mutex_lock(&vcpu->kvm->lock);
1946         mmio_dev = vcpu_find_mmio_dev(vcpu, gpa, bytes, 0);
1947         if (mmio_dev) {
1948                 kvm_iodevice_read(mmio_dev, gpa, bytes, val);
1949                 mutex_unlock(&vcpu->kvm->lock);
1950                 return X86EMUL_CONTINUE;
1951         }
1952         mutex_unlock(&vcpu->kvm->lock);
1953
1954         vcpu->mmio_needed = 1;
1955         vcpu->mmio_phys_addr = gpa;
1956         vcpu->mmio_size = bytes;
1957         vcpu->mmio_is_write = 0;
1958
1959         return X86EMUL_UNHANDLEABLE;
1960 }
1961
1962 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
1963                           const void *val, int bytes)
1964 {
1965         int ret;
1966
1967         ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
1968         if (ret < 0)
1969                 return 0;
1970         kvm_mmu_pte_write(vcpu, gpa, val, bytes);
1971         return 1;
1972 }
1973
1974 static int emulator_write_emulated_onepage(unsigned long addr,
1975                                            const void *val,
1976                                            unsigned int bytes,
1977                                            struct kvm_vcpu *vcpu)
1978 {
1979         struct kvm_io_device *mmio_dev;
1980         gpa_t                 gpa;
1981
1982         gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
1983
1984         if (gpa == UNMAPPED_GVA) {
1985                 kvm_inject_page_fault(vcpu, addr, 2);
1986                 return X86EMUL_PROPAGATE_FAULT;
1987         }
1988
1989         /* For APIC access vmexit */
1990         if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
1991                 goto mmio;
1992
1993         if (emulator_write_phys(vcpu, gpa, val, bytes))
1994                 return X86EMUL_CONTINUE;
1995
1996 mmio:
1997         /*
1998          * Is this MMIO handled locally?
1999          */
2000         mutex_lock(&vcpu->kvm->lock);
2001         mmio_dev = vcpu_find_mmio_dev(vcpu, gpa, bytes, 1);
2002         if (mmio_dev) {
2003                 kvm_iodevice_write(mmio_dev, gpa, bytes, val);
2004                 mutex_unlock(&vcpu->kvm->lock);
2005                 return X86EMUL_CONTINUE;
2006         }
2007         mutex_unlock(&vcpu->kvm->lock);
2008
2009         vcpu->mmio_needed = 1;
2010         vcpu->mmio_phys_addr = gpa;
2011         vcpu->mmio_size = bytes;
2012         vcpu->mmio_is_write = 1;
2013         memcpy(vcpu->mmio_data, val, bytes);
2014
2015         return X86EMUL_CONTINUE;
2016 }
2017
2018 int emulator_write_emulated(unsigned long addr,
2019                                    const void *val,
2020                                    unsigned int bytes,
2021                                    struct kvm_vcpu *vcpu)
2022 {
2023         /* Crossing a page boundary? */
2024         if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
2025                 int rc, now;
2026
2027                 now = -addr & ~PAGE_MASK;
2028                 rc = emulator_write_emulated_onepage(addr, val, now, vcpu);
2029                 if (rc != X86EMUL_CONTINUE)
2030                         return rc;
2031                 addr += now;
2032                 val += now;
2033                 bytes -= now;
2034         }
2035         return emulator_write_emulated_onepage(addr, val, bytes, vcpu);
2036 }
2037 EXPORT_SYMBOL_GPL(emulator_write_emulated);
2038
2039 static int emulator_cmpxchg_emulated(unsigned long addr,
2040                                      const void *old,
2041                                      const void *new,
2042                                      unsigned int bytes,
2043                                      struct kvm_vcpu *vcpu)
2044 {
2045         static int reported;
2046
2047         if (!reported) {
2048                 reported = 1;
2049                 printk(KERN_WARNING "kvm: emulating exchange as write\n");
2050         }
2051 #ifndef CONFIG_X86_64
2052         /* guests cmpxchg8b have to be emulated atomically */
2053         if (bytes == 8) {
2054                 gpa_t gpa;
2055                 struct page *page;
2056                 char *kaddr;
2057                 u64 val;
2058
2059                 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
2060
2061                 if (gpa == UNMAPPED_GVA ||
2062                    (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
2063                         goto emul_write;
2064
2065                 if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
2066                         goto emul_write;
2067
2068                 val = *(u64 *)new;
2069
2070                 page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
2071
2072                 kaddr = kmap_atomic(page, KM_USER0);
2073                 set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val);
2074                 kunmap_atomic(kaddr, KM_USER0);
2075                 kvm_release_page_dirty(page);
2076         }
2077 emul_write:
2078 #endif
2079
2080         return emulator_write_emulated(addr, new, bytes, vcpu);
2081 }
2082
2083 static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
2084 {
2085         return kvm_x86_ops->get_segment_base(vcpu, seg);
2086 }
2087
2088 int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
2089 {
2090         kvm_mmu_invlpg(vcpu, address);
2091         return X86EMUL_CONTINUE;
2092 }
2093
2094 int emulate_clts(struct kvm_vcpu *vcpu)
2095 {
2096         KVMTRACE_0D(CLTS, vcpu, handler);
2097         kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 & ~X86_CR0_TS);
2098         return X86EMUL_CONTINUE;
2099 }
2100
2101 int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
2102 {
2103         struct kvm_vcpu *vcpu = ctxt->vcpu;
2104
2105         switch (dr) {
2106         case 0 ... 3:
2107                 *dest = kvm_x86_ops->get_dr(vcpu, dr);
2108                 return X86EMUL_CONTINUE;
2109         default:
2110                 pr_unimpl(vcpu, "%s: unexpected dr %u\n", __func__, dr);
2111                 return X86EMUL_UNHANDLEABLE;
2112         }
2113 }
2114
2115 int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
2116 {
2117         unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
2118         int exception;
2119
2120         kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
2121         if (exception) {
2122                 /* FIXME: better handling */
2123                 return X86EMUL_UNHANDLEABLE;
2124         }
2125         return X86EMUL_CONTINUE;
2126 }
2127
2128 void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
2129 {
2130         u8 opcodes[4];
2131         unsigned long rip = kvm_rip_read(vcpu);
2132         unsigned long rip_linear;
2133
2134         if (!printk_ratelimit())
2135                 return;
2136
2137         rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
2138
2139         emulator_read_std(rip_linear, (void *)opcodes, 4, vcpu);
2140
2141         printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
2142                context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
2143 }
2144 EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
2145
2146 static struct x86_emulate_ops emulate_ops = {
2147         .read_std            = emulator_read_std,
2148         .read_emulated       = emulator_read_emulated,
2149         .write_emulated      = emulator_write_emulated,
2150         .cmpxchg_emulated    = emulator_cmpxchg_emulated,
2151 };
2152
2153 static void cache_all_regs(struct kvm_vcpu *vcpu)
2154 {
2155         kvm_register_read(vcpu, VCPU_REGS_RAX);
2156         kvm_register_read(vcpu, VCPU_REGS_RSP);
2157         kvm_register_read(vcpu, VCPU_REGS_RIP);
2158         vcpu->arch.regs_dirty = ~0;
2159 }
2160
2161 int emulate_instruction(struct kvm_vcpu *vcpu,
2162                         struct kvm_run *run,
2163                         unsigned long cr2,
2164                         u16 error_code,
2165                         int emulation_type)
2166 {
2167         int r;
2168         struct decode_cache *c;
2169
2170         kvm_clear_exception_queue(vcpu);
2171         vcpu->arch.mmio_fault_cr2 = cr2;
2172         /*
2173          * TODO: fix x86_emulate.c to use guest_read/write_register
2174          * instead of direct ->regs accesses, can save hundred cycles
2175          * on Intel for instructions that don't read/change RSP, for
2176          * for example.
2177          */
2178         cache_all_regs(vcpu);
2179
2180         vcpu->mmio_is_write = 0;
2181         vcpu->arch.pio.string = 0;
2182
2183         if (!(emulation_type & EMULTYPE_NO_DECODE)) {
2184                 int cs_db, cs_l;
2185                 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
2186
2187                 vcpu->arch.emulate_ctxt.vcpu = vcpu;
2188                 vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
2189                 vcpu->arch.emulate_ctxt.mode =
2190                         (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
2191                         ? X86EMUL_MODE_REAL : cs_l
2192                         ? X86EMUL_MODE_PROT64 : cs_db
2193                         ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
2194
2195                 r = x86_decode_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
2196
2197                 /* Reject the instructions other than VMCALL/VMMCALL when
2198                  * try to emulate invalid opcode */
2199                 c = &vcpu->arch.emulate_ctxt.decode;
2200                 if ((emulation_type & EMULTYPE_TRAP_UD) &&
2201                     (!(c->twobyte && c->b == 0x01 &&
2202                       (c->modrm_reg == 0 || c->modrm_reg == 3) &&
2203                        c->modrm_mod == 3 && c->modrm_rm == 1)))
2204                         return EMULATE_FAIL;
2205
2206                 ++vcpu->stat.insn_emulation;
2207                 if (r)  {
2208                         ++vcpu->stat.insn_emulation_fail;
2209                         if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
2210                                 return EMULATE_DONE;
2211                         return EMULATE_FAIL;
2212                 }
2213         }
2214
2215         r = x86_emulate_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
2216
2217         if (vcpu->arch.pio.string)
2218                 return EMULATE_DO_MMIO;
2219
2220         if ((r || vcpu->mmio_is_write) && run) {
2221                 run->exit_reason = KVM_EXIT_MMIO;
2222                 run->mmio.phys_addr = vcpu->mmio_phys_addr;
2223                 memcpy(run->mmio.data, vcpu->mmio_data, 8);
2224                 run->mmio.len = vcpu->mmio_size;
2225                 run->mmio.is_write = vcpu->mmio_is_write;
2226         }
2227
2228         if (r) {
2229                 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
2230                         return EMULATE_DONE;
2231                 if (!vcpu->mmio_needed) {
2232                         kvm_report_emulation_failure(vcpu, "mmio");
2233                         return EMULATE_FAIL;
2234                 }
2235                 return EMULATE_DO_MMIO;
2236         }
2237
2238         kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
2239
2240         if (vcpu->mmio_is_write) {
2241                 vcpu->mmio_needed = 0;
2242                 return EMULATE_DO_MMIO;
2243         }
2244
2245         return EMULATE_DONE;
2246 }
2247 EXPORT_SYMBOL_GPL(emulate_instruction);
2248
2249 static void free_pio_guest_pages(struct kvm_vcpu *vcpu)
2250 {
2251         int i;
2252
2253         for (i = 0; i < ARRAY_SIZE(vcpu->arch.pio.guest_pages); ++i)
2254                 if (vcpu->arch.pio.guest_pages[i]) {
2255                         kvm_release_page_dirty(vcpu->arch.pio.guest_pages[i]);
2256                         vcpu->arch.pio.guest_pages[i] = NULL;
2257                 }
2258 }
2259
2260 static int pio_copy_data(struct kvm_vcpu *vcpu)
2261 {
2262         void *p = vcpu->arch.pio_data;
2263         void *q;
2264         unsigned bytes;
2265         int nr_pages = vcpu->arch.pio.guest_pages[1] ? 2 : 1;
2266
2267         q = vmap(vcpu->arch.pio.guest_pages, nr_pages, VM_READ|VM_WRITE,
2268                  PAGE_KERNEL);
2269         if (!q) {
2270                 free_pio_guest_pages(vcpu);
2271                 return -ENOMEM;
2272         }
2273         q += vcpu->arch.pio.guest_page_offset;
2274         bytes = vcpu->arch.pio.size * vcpu->arch.pio.cur_count;
2275         if (vcpu->arch.pio.in)
2276                 memcpy(q, p, bytes);
2277         else
2278                 memcpy(p, q, bytes);
2279         q -= vcpu->arch.pio.guest_page_offset;
2280         vunmap(q);
2281         free_pio_guest_pages(vcpu);
2282         return 0;
2283 }
2284
2285 int complete_pio(struct kvm_vcpu *vcpu)
2286 {
2287         struct kvm_pio_request *io = &vcpu->arch.pio;
2288         long delta;
2289         int r;
2290         unsigned long val;
2291
2292         if (!io->string) {
2293                 if (io->in) {
2294                         val = kvm_register_read(vcpu, VCPU_REGS_RAX);
2295                         memcpy(&val, vcpu->arch.pio_data, io->size);
2296                         kvm_register_write(vcpu, VCPU_REGS_RAX, val);
2297                 }
2298         } else {
2299                 if (io->in) {
2300                         r = pio_copy_data(vcpu);
2301                         if (r)
2302                                 return r;
2303                 }
2304
2305                 delta = 1;
2306                 if (io->rep) {
2307                         delta *= io->cur_count;
2308                         /*
2309                          * The size of the register should really depend on
2310                          * current address size.
2311                          */
2312                         val = kvm_register_read(vcpu, VCPU_REGS_RCX);
2313                         val -= delta;
2314                         kvm_register_write(vcpu, VCPU_REGS_RCX, val);
2315                 }
2316                 if (io->down)
2317                         delta = -delta;
2318                 delta *= io->size;
2319                 if (io->in) {
2320                         val = kvm_register_read(vcpu, VCPU_REGS_RDI);
2321                         val += delta;
2322                         kvm_register_write(vcpu, VCPU_REGS_RDI, val);
2323                 } else {
2324                         val = kvm_register_read(vcpu, VCPU_REGS_RSI);
2325                         val += delta;
2326                         kvm_register_write(vcpu, VCPU_REGS_RSI, val);
2327                 }
2328         }
2329
2330         io->count -= io->cur_count;
2331         io->cur_count = 0;
2332
2333         return 0;
2334 }
2335
2336 static void kernel_pio(struct kvm_io_device *pio_dev,
2337                        struct kvm_vcpu *vcpu,
2338                        void *pd)
2339 {
2340         /* TODO: String I/O for in kernel device */
2341
2342         mutex_lock(&vcpu->kvm->lock);
2343         if (vcpu->arch.pio.in)
2344                 kvm_iodevice_read(pio_dev, vcpu->arch.pio.port,
2345                                   vcpu->arch.pio.size,
2346                                   pd);
2347         else
2348                 kvm_iodevice_write(pio_dev, vcpu->arch.pio.port,
2349                                    vcpu->arch.pio.size,
2350                                    pd);
2351         mutex_unlock(&vcpu->kvm->lock);
2352 }
2353
2354 static void pio_string_write(struct kvm_io_device *pio_dev,
2355                              struct kvm_vcpu *vcpu)
2356 {
2357         struct kvm_pio_request *io = &vcpu->arch.pio;
2358         void *pd = vcpu->arch.pio_data;
2359         int i;
2360
2361         mutex_lock(&vcpu->kvm->lock);
2362         for (i = 0; i < io->cur_count; i++) {
2363                 kvm_iodevice_write(pio_dev, io->port,
2364                                    io->size,
2365                                    pd);
2366                 pd += io->size;
2367         }
2368         mutex_unlock(&vcpu->kvm->lock);
2369 }
2370
2371 static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu,
2372                                                gpa_t addr, int len,
2373                                                int is_write)
2374 {
2375         return kvm_io_bus_find_dev(&vcpu->kvm->pio_bus, addr, len, is_write);
2376 }
2377
2378 int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2379                   int size, unsigned port)
2380 {
2381         struct kvm_io_device *pio_dev;
2382         unsigned long val;
2383
2384         vcpu->run->exit_reason = KVM_EXIT_IO;
2385         vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
2386         vcpu->run->io.size = vcpu->arch.pio.size = size;
2387         vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
2388         vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = 1;
2389         vcpu->run->io.port = vcpu->arch.pio.port = port;
2390         vcpu->arch.pio.in = in;
2391         vcpu->arch.pio.string = 0;
2392         vcpu->arch.pio.down = 0;
2393         vcpu->arch.pio.guest_page_offset = 0;
2394         vcpu->arch.pio.rep = 0;
2395
2396         if (vcpu->run->io.direction == KVM_EXIT_IO_IN)
2397                 KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size,
2398                             handler);
2399         else
2400                 KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
2401                             handler);
2402
2403         val = kvm_register_read(vcpu, VCPU_REGS_RAX);
2404         memcpy(vcpu->arch.pio_data, &val, 4);
2405
2406         kvm_x86_ops->skip_emulated_instruction(vcpu);
2407
2408         pio_dev = vcpu_find_pio_dev(vcpu, port, size, !in);
2409         if (pio_dev) {
2410                 kernel_pio(pio_dev, vcpu, vcpu->arch.pio_data);
2411                 complete_pio(vcpu);
2412                 return 1;
2413         }
2414         return 0;
2415 }
2416 EXPORT_SYMBOL_GPL(kvm_emulate_pio);
2417
2418 int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2419                   int size, unsigned long count, int down,
2420                   gva_t address, int rep, unsigned port)
2421 {
2422         unsigned now, in_page;
2423         int i, ret = 0;
2424         int nr_pages = 1;
2425         struct page *page;
2426         struct kvm_io_device *pio_dev;
2427
2428         vcpu->run->exit_reason = KVM_EXIT_IO;
2429         vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
2430         vcpu->run->io.size = vcpu->arch.pio.size = size;
2431         vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
2432         vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = count;
2433         vcpu->run->io.port = vcpu->arch.pio.port = port;
2434         vcpu->arch.pio.in = in;
2435         vcpu->arch.pio.string = 1;
2436         vcpu->arch.pio.down = down;
2437         vcpu->arch.pio.guest_page_offset = offset_in_page(address);
2438         vcpu->arch.pio.rep = rep;
2439
2440         if (vcpu->run->io.direction == KVM_EXIT_IO_IN)
2441                 KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size,
2442                             handler);
2443         else
2444                 KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
2445                             handler);
2446
2447         if (!count) {
2448                 kvm_x86_ops->skip_emulated_instruction(vcpu);
2449                 return 1;
2450         }
2451
2452         if (!down)
2453                 in_page = PAGE_SIZE - offset_in_page(address);
2454         else
2455                 in_page = offset_in_page(address) + size;
2456         now = min(count, (unsigned long)in_page / size);
2457         if (!now) {
2458                 /*
2459                  * String I/O straddles page boundary.  Pin two guest pages
2460                  * so that we satisfy atomicity constraints.  Do just one
2461                  * transaction to avoid complexity.
2462                  */
2463                 nr_pages = 2;
2464                 now = 1;
2465         }
2466         if (down) {
2467                 /*
2468                  * String I/O in reverse.  Yuck.  Kill the guest, fix later.
2469                  */
2470                 pr_unimpl(vcpu, "guest string pio down\n");
2471                 kvm_inject_gp(vcpu, 0);
2472                 return 1;
2473         }
2474         vcpu->run->io.count = now;
2475         vcpu->arch.pio.cur_count = now;
2476
2477         if (vcpu->arch.pio.cur_count == vcpu->arch.pio.count)
2478                 kvm_x86_ops->skip_emulated_instruction(vcpu);
2479
2480         for (i = 0; i < nr_pages; ++i) {
2481                 page = gva_to_page(vcpu, address + i * PAGE_SIZE);
2482                 vcpu->arch.pio.guest_pages[i] = page;
2483                 if (!page) {
2484                         kvm_inject_gp(vcpu, 0);
2485                         free_pio_guest_pages(vcpu);
2486                         return 1;
2487                 }
2488         }
2489
2490         pio_dev = vcpu_find_pio_dev(vcpu, port,
2491                                     vcpu->arch.pio.cur_count,
2492                                     !vcpu->arch.pio.in);
2493         if (!vcpu->arch.pio.in) {
2494                 /* string PIO write */
2495                 ret = pio_copy_data(vcpu);
2496                 if (ret >= 0 && pio_dev) {
2497                         pio_string_write(pio_dev, vcpu);
2498                         complete_pio(vcpu);
2499                         if (vcpu->arch.pio.count == 0)
2500                                 ret = 1;
2501                 }
2502         } else if (pio_dev)
2503                 pr_unimpl(vcpu, "no string pio read support yet, "
2504                        "port %x size %d count %ld\n",
2505                         port, size, count);
2506
2507         return ret;
2508 }
2509 EXPORT_SYMBOL_GPL(kvm_emulate_pio_string);
2510
2511 int kvm_arch_init(void *opaque)
2512 {
2513         int r;
2514         struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
2515
2516         if (kvm_x86_ops) {
2517                 printk(KERN_ERR "kvm: already loaded the other module\n");
2518                 r = -EEXIST;
2519                 goto out;
2520         }
2521
2522         if (!ops->cpu_has_kvm_support()) {
2523                 printk(KERN_ERR "kvm: no hardware support\n");
2524                 r = -EOPNOTSUPP;
2525                 goto out;
2526         }
2527         if (ops->disabled_by_bios()) {
2528                 printk(KERN_ERR "kvm: disabled by bios\n");
2529                 r = -EOPNOTSUPP;
2530                 goto out;
2531         }
2532
2533         r = kvm_mmu_module_init();
2534         if (r)
2535                 goto out;
2536
2537         kvm_init_msr_list();
2538
2539         kvm_x86_ops = ops;
2540         kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
2541         kvm_mmu_set_base_ptes(PT_PRESENT_MASK);
2542         kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
2543                         PT_DIRTY_MASK, PT64_NX_MASK, 0);
2544         return 0;
2545
2546 out:
2547         return r;
2548 }
2549
2550 void kvm_arch_exit(void)
2551 {
2552         kvm_x86_ops = NULL;
2553         kvm_mmu_module_exit();
2554 }
2555
2556 int kvm_emulate_halt(struct kvm_vcpu *vcpu)
2557 {
2558         ++vcpu->stat.halt_exits;
2559         KVMTRACE_0D(HLT, vcpu, handler);
2560         if (irqchip_in_kernel(vcpu->kvm)) {
2561                 vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
2562                 return 1;
2563         } else {
2564                 vcpu->run->exit_reason = KVM_EXIT_HLT;
2565                 return 0;
2566         }
2567 }
2568 EXPORT_SYMBOL_GPL(kvm_emulate_halt);
2569
2570 static inline gpa_t hc_gpa(struct kvm_vcpu *vcpu, unsigned long a0,
2571                            unsigned long a1)
2572 {
2573         if (is_long_mode(vcpu))
2574                 return a0;
2575         else
2576                 return a0 | ((gpa_t)a1 << 32);
2577 }
2578
2579 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
2580 {
2581         unsigned long nr, a0, a1, a2, a3, ret;
2582         int r = 1;
2583
2584         nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
2585         a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
2586         a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
2587         a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
2588         a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
2589
2590         KVMTRACE_1D(VMMCALL, vcpu, (u32)nr, handler);
2591
2592         if (!is_long_mode(vcpu)) {
2593                 nr &= 0xFFFFFFFF;
2594                 a0 &= 0xFFFFFFFF;
2595                 a1 &= 0xFFFFFFFF;
2596                 a2 &= 0xFFFFFFFF;
2597                 a3 &= 0xFFFFFFFF;
2598         }
2599
2600         switch (nr) {
2601         case KVM_HC_VAPIC_POLL_IRQ:
2602                 ret = 0;
2603                 break;
2604         case KVM_HC_MMU_OP:
2605                 r = kvm_pv_mmu_op(vcpu, a0, hc_gpa(vcpu, a1, a2), &ret);
2606                 break;
2607         default:
2608                 ret = -KVM_ENOSYS;
2609                 break;
2610         }
2611         kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
2612         ++vcpu->stat.hypercalls;
2613         return r;
2614 }
2615 EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
2616
2617 int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
2618 {
2619         char instruction[3];
2620         int ret = 0;
2621         unsigned long rip = kvm_rip_read(vcpu);
2622
2623
2624         /*
2625          * Blow out the MMU to ensure that no other VCPU has an active mapping
2626          * to ensure that the updated hypercall appears atomically across all
2627          * VCPUs.
2628          */
2629         kvm_mmu_zap_all(vcpu->kvm);
2630
2631         kvm_x86_ops->patch_hypercall(vcpu, instruction);
2632         if (emulator_write_emulated(rip, instruction, 3, vcpu)
2633             != X86EMUL_CONTINUE)
2634                 ret = -EFAULT;
2635
2636         return ret;
2637 }
2638
2639 static u64 mk_cr_64(u64 curr_cr, u32 new_val)
2640 {
2641         return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
2642 }
2643
2644 void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
2645 {
2646         struct descriptor_table dt = { limit, base };
2647
2648         kvm_x86_ops->set_gdt(vcpu, &dt);
2649 }
2650
2651 void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
2652 {
2653         struct descriptor_table dt = { limit, base };
2654
2655         kvm_x86_ops->set_idt(vcpu, &dt);
2656 }
2657
2658 void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
2659                    unsigned long *rflags)
2660 {
2661         kvm_lmsw(vcpu, msw);
2662         *rflags = kvm_x86_ops->get_rflags(vcpu);
2663 }
2664
2665 unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
2666 {
2667         unsigned long value;
2668
2669         kvm_x86_ops->decache_cr4_guest_bits(vcpu);
2670         switch (cr) {
2671         case 0:
2672                 value = vcpu->arch.cr0;
2673                 break;
2674         case 2:
2675                 value = vcpu->arch.cr2;
2676                 break;
2677         case 3:
2678                 value = vcpu->arch.cr3;
2679                 break;
2680         case 4:
2681                 value = vcpu->arch.cr4;
2682                 break;
2683         case 8:
2684                 value = kvm_get_cr8(vcpu);
2685                 break;
2686         default:
2687                 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
2688                 return 0;
2689         }
2690         KVMTRACE_3D(CR_READ, vcpu, (u32)cr, (u32)value,
2691                     (u32)((u64)value >> 32), handler);
2692
2693         return value;
2694 }
2695
2696 void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
2697                      unsigned long *rflags)
2698 {
2699         KVMTRACE_3D(CR_WRITE, vcpu, (u32)cr, (u32)val,
2700                     (u32)((u64)val >> 32), handler);
2701
2702         switch (cr) {
2703         case 0:
2704                 kvm_set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val));
2705                 *rflags = kvm_x86_ops->get_rflags(vcpu);
2706                 break;
2707         case 2:
2708                 vcpu->arch.cr2 = val;
2709                 break;
2710         case 3:
2711                 kvm_set_cr3(vcpu, val);
2712                 break;
2713         case 4:
2714                 kvm_set_cr4(vcpu, mk_cr_64(vcpu->arch.cr4, val));
2715                 break;
2716         case 8:
2717                 kvm_set_cr8(vcpu, val & 0xfUL);
2718                 break;
2719         default:
2720                 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
2721         }
2722 }
2723
2724 static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
2725 {
2726         struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
2727         int j, nent = vcpu->arch.cpuid_nent;
2728
2729         e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
2730         /* when no next entry is found, the current entry[i] is reselected */
2731         for (j = i + 1; j == i; j = (j + 1) % nent) {
2732                 struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
2733                 if (ej->function == e->function) {
2734                         ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
2735                         return j;
2736                 }
2737         }
2738         return 0; /* silence gcc, even though control never reaches here */
2739 }
2740
2741 /* find an entry with matching function, matching index (if needed), and that
2742  * should be read next (if it's stateful) */
2743 static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
2744         u32 function, u32 index)
2745 {
2746         if (e->function != function)
2747                 return 0;
2748         if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
2749                 return 0;
2750         if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
2751                 !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
2752                 return 0;
2753         return 1;
2754 }
2755
2756 void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
2757 {
2758         int i;
2759         u32 function, index;
2760         struct kvm_cpuid_entry2 *e, *best;
2761
2762         function = kvm_register_read(vcpu, VCPU_REGS_RAX);
2763         index = kvm_register_read(vcpu, VCPU_REGS_RCX);
2764         kvm_register_write(vcpu, VCPU_REGS_RAX, 0);
2765         kvm_register_write(vcpu, VCPU_REGS_RBX, 0);
2766         kvm_register_write(vcpu, VCPU_REGS_RCX, 0);
2767         kvm_register_write(vcpu, VCPU_REGS_RDX, 0);
2768         best = NULL;
2769         for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
2770                 e = &vcpu->arch.cpuid_entries[i];
2771                 if (is_matching_cpuid_entry(e, function, index)) {
2772                         if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
2773                                 move_to_next_stateful_cpuid_entry(vcpu, i);
2774                         best = e;
2775                         break;
2776                 }
2777                 /*
2778                  * Both basic or both extended?
2779                  */
2780                 if (((e->function ^ function) & 0x80000000) == 0)
2781                         if (!best || e->function > best->function)
2782                                 best = e;
2783         }
2784         if (best) {
2785                 kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax);
2786                 kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx);
2787                 kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx);
2788                 kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx);
2789         }
2790         kvm_x86_ops->skip_emulated_instruction(vcpu);
2791         KVMTRACE_5D(CPUID, vcpu, function,
2792                     (u32)kvm_register_read(vcpu, VCPU_REGS_RAX),
2793                     (u32)kvm_register_read(vcpu, VCPU_REGS_RBX),
2794                     (u32)kvm_register_read(vcpu, VCPU_REGS_RCX),
2795                     (u32)kvm_register_read(vcpu, VCPU_REGS_RDX), handler);
2796 }
2797 EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
2798
2799 /*
2800  * Check if userspace requested an interrupt window, and that the
2801  * interrupt window is open.
2802  *
2803  * No need to exit to userspace if we already have an interrupt queued.
2804  */
2805 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
2806                                           struct kvm_run *kvm_run)
2807 {
2808         return (!vcpu->arch.irq_summary &&
2809                 kvm_run->request_interrupt_window &&
2810                 vcpu->arch.interrupt_window_open &&
2811                 (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF));
2812 }
2813
2814 static void post_kvm_run_save(struct kvm_vcpu *vcpu,
2815                               struct kvm_run *kvm_run)
2816 {
2817         kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
2818         kvm_run->cr8 = kvm_get_cr8(vcpu);
2819         kvm_run->apic_base = kvm_get_apic_base(vcpu);
2820         if (irqchip_in_kernel(vcpu->kvm))
2821                 kvm_run->ready_for_interrupt_injection = 1;
2822         else
2823                 kvm_run->ready_for_interrupt_injection =
2824                                         (vcpu->arch.interrupt_window_open &&
2825                                          vcpu->arch.irq_summary == 0);
2826 }
2827
2828 static void vapic_enter(struct kvm_vcpu *vcpu)
2829 {
2830         struct kvm_lapic *apic = vcpu->arch.apic;
2831         struct page *page;
2832
2833         if (!apic || !apic->vapic_addr)
2834                 return;
2835
2836         page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
2837
2838         vcpu->arch.apic->vapic_page = page;
2839 }
2840
2841 static void vapic_exit(struct kvm_vcpu *vcpu)
2842 {
2843         struct kvm_lapic *apic = vcpu->arch.apic;
2844
2845         if (!apic || !apic->vapic_addr)
2846                 return;
2847
2848         down_read(&vcpu->kvm->slots_lock);
2849         kvm_release_page_dirty(apic->vapic_page);
2850         mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
2851         up_read(&vcpu->kvm->slots_lock);
2852 }
2853
2854 static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2855 {
2856         int r;
2857
2858         if (vcpu->requests)
2859                 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
2860                         kvm_mmu_unload(vcpu);
2861
2862         r = kvm_mmu_reload(vcpu);
2863         if (unlikely(r))
2864                 goto out;
2865
2866         if (vcpu->requests) {
2867                 if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests))
2868                         __kvm_migrate_timers(vcpu);
2869                 if (test_and_clear_bit(KVM_REQ_MMU_SYNC, &vcpu->requests))
2870                         kvm_mmu_sync_roots(vcpu);
2871                 if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
2872                         kvm_x86_ops->tlb_flush(vcpu);
2873                 if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS,
2874                                        &vcpu->requests)) {
2875                         kvm_run->exit_reason = KVM_EXIT_TPR_ACCESS;
2876                         r = 0;
2877                         goto out;
2878                 }
2879                 if (test_and_clear_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests)) {
2880                         kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
2881                         r = 0;
2882                         goto out;
2883                 }
2884         }
2885
2886         clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
2887         kvm_inject_pending_timer_irqs(vcpu);
2888
2889         preempt_disable();
2890
2891         kvm_x86_ops->prepare_guest_switch(vcpu);
2892         kvm_load_guest_fpu(vcpu);
2893
2894         local_irq_disable();
2895
2896         if (vcpu->requests || need_resched() || signal_pending(current)) {
2897                 local_irq_enable();
2898                 preempt_enable();
2899                 r = 1;
2900                 goto out;
2901         }
2902
2903         if (vcpu->guest_debug.enabled)
2904                 kvm_x86_ops->guest_debug_pre(vcpu);
2905
2906         vcpu->guest_mode = 1;
2907         /*
2908          * Make sure that guest_mode assignment won't happen after
2909          * testing the pending IRQ vector bitmap.
2910          */
2911         smp_wmb();
2912
2913         if (vcpu->arch.exception.pending)
2914                 __queue_exception(vcpu);
2915         else if (irqchip_in_kernel(vcpu->kvm))
2916                 kvm_x86_ops->inject_pending_irq(vcpu);
2917         else
2918                 kvm_x86_ops->inject_pending_vectors(vcpu, kvm_run);
2919
2920         kvm_lapic_sync_to_vapic(vcpu);
2921
2922         up_read(&vcpu->kvm->slots_lock);
2923
2924         kvm_guest_enter();
2925
2926
2927         KVMTRACE_0D(VMENTRY, vcpu, entryexit);
2928         kvm_x86_ops->run(vcpu, kvm_run);
2929
2930         vcpu->guest_mode = 0;
2931         local_irq_enable();
2932
2933         ++vcpu->stat.exits;
2934
2935         /*
2936          * We must have an instruction between local_irq_enable() and
2937          * kvm_guest_exit(), so the timer interrupt isn't delayed by
2938          * the interrupt shadow.  The stat.exits increment will do nicely.
2939          * But we need to prevent reordering, hence this barrier():
2940          */
2941         barrier();
2942
2943         kvm_guest_exit();
2944
2945         preempt_enable();
2946
2947         down_read(&vcpu->kvm->slots_lock);
2948
2949         /*
2950          * Profile KVM exit RIPs:
2951          */
2952         if (unlikely(prof_on == KVM_PROFILING)) {
2953                 unsigned long rip = kvm_rip_read(vcpu);
2954                 profile_hit(KVM_PROFILING, (void *)rip);
2955         }
2956
2957         if (vcpu->arch.exception.pending && kvm_x86_ops->exception_injected(vcpu))
2958                 vcpu->arch.exception.pending = false;
2959
2960         kvm_lapic_sync_from_vapic(vcpu);
2961
2962         r = kvm_x86_ops->handle_exit(kvm_run, vcpu);
2963 out:
2964         return r;
2965 }
2966
2967 static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2968 {
2969         int r;
2970
2971         if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
2972                 pr_debug("vcpu %d received sipi with vector # %x\n",
2973                          vcpu->vcpu_id, vcpu->arch.sipi_vector);
2974                 kvm_lapic_reset(vcpu);
2975                 r = kvm_x86_ops->vcpu_reset(vcpu);
2976                 if (r)
2977                         return r;
2978                 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
2979         }
2980
2981         down_read(&vcpu->kvm->slots_lock);
2982         vapic_enter(vcpu);
2983
2984         r = 1;
2985         while (r > 0) {
2986                 if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
2987                         r = vcpu_enter_guest(vcpu, kvm_run);
2988                 else {
2989                         up_read(&vcpu->kvm->slots_lock);
2990                         kvm_vcpu_block(vcpu);
2991                         down_read(&vcpu->kvm->slots_lock);
2992                         if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests))
2993                                 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
2994                                         vcpu->arch.mp_state =
2995                                                         KVM_MP_STATE_RUNNABLE;
2996                         if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
2997                                 r = -EINTR;
2998                 }
2999
3000                 if (r > 0) {
3001                         if (dm_request_for_irq_injection(vcpu, kvm_run)) {
3002                                 r = -EINTR;
3003                                 kvm_run->exit_reason = KVM_EXIT_INTR;
3004                                 ++vcpu->stat.request_irq_exits;
3005                         }
3006                         if (signal_pending(current)) {
3007                                 r = -EINTR;
3008                                 kvm_run->exit_reason = KVM_EXIT_INTR;
3009                                 ++vcpu->stat.signal_exits;
3010                         }
3011                         if (need_resched()) {
3012                                 up_read(&vcpu->kvm->slots_lock);
3013                                 kvm_resched(vcpu);
3014                                 down_read(&vcpu->kvm->slots_lock);
3015                         }
3016                 }
3017         }
3018
3019         up_read(&vcpu->kvm->slots_lock);
3020         post_kvm_run_save(vcpu, kvm_run);
3021
3022         vapic_exit(vcpu);
3023
3024         return r;
3025 }
3026
3027 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3028 {
3029         int r;
3030         sigset_t sigsaved;
3031
3032         vcpu_load(vcpu);
3033
3034         if (vcpu->sigset_active)
3035                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
3036
3037         if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
3038                 kvm_vcpu_block(vcpu);
3039                 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
3040                 r = -EAGAIN;
3041                 goto out;
3042         }
3043
3044         /* re-sync apic's tpr */
3045         if (!irqchip_in_kernel(vcpu->kvm))
3046                 kvm_set_cr8(vcpu, kvm_run->cr8);
3047
3048         if (vcpu->arch.pio.cur_count) {
3049                 r = complete_pio(vcpu);
3050                 if (r)
3051                         goto out;
3052         }
3053 #if CONFIG_HAS_IOMEM
3054         if (vcpu->mmio_needed) {
3055                 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
3056                 vcpu->mmio_read_completed = 1;
3057                 vcpu->mmio_needed = 0;
3058
3059                 down_read(&vcpu->kvm->slots_lock);
3060                 r = emulate_instruction(vcpu, kvm_run,
3061                                         vcpu->arch.mmio_fault_cr2, 0,
3062                                         EMULTYPE_NO_DECODE);
3063                 up_read(&vcpu->kvm->slots_lock);
3064                 if (r == EMULATE_DO_MMIO) {
3065                         /*
3066                          * Read-modify-write.  Back to userspace.
3067                          */
3068                         r = 0;
3069                         goto out;
3070                 }
3071         }
3072 #endif
3073         if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL)
3074                 kvm_register_write(vcpu, VCPU_REGS_RAX,
3075                                      kvm_run->hypercall.ret);
3076
3077         r = __vcpu_run(vcpu, kvm_run);
3078
3079 out:
3080         if (vcpu->sigset_active)
3081                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
3082
3083         vcpu_put(vcpu);
3084         return r;
3085 }
3086
3087 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3088 {
3089         vcpu_load(vcpu);
3090
3091         regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
3092         regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
3093         regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
3094         regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
3095         regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
3096         regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
3097         regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
3098         regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
3099 #ifdef CONFIG_X86_64
3100         regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
3101         regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
3102         regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
3103         regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
3104         regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
3105         regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
3106         regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
3107         regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
3108 #endif
3109
3110         regs->rip = kvm_rip_read(vcpu);
3111         regs->rflags = kvm_x86_ops->get_rflags(vcpu);
3112
3113         /*
3114          * Don't leak debug flags in case they were set for guest debugging
3115          */
3116         if (vcpu->guest_debug.enabled && vcpu->guest_debug.singlestep)
3117                 regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
3118
3119         vcpu_put(vcpu);
3120
3121         return 0;
3122 }
3123
3124 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3125 {
3126         vcpu_load(vcpu);
3127
3128         kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
3129         kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
3130         kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
3131         kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
3132         kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
3133         kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
3134         kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
3135         kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
3136 #ifdef CONFIG_X86_64
3137         kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
3138         kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
3139         kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
3140         kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
3141         kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
3142         kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
3143         kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
3144         kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
3145
3146 #endif
3147
3148         kvm_rip_write(vcpu, regs->rip);
3149         kvm_x86_ops->set_rflags(vcpu, regs->rflags);
3150
3151
3152         vcpu->arch.exception.pending = false;
3153
3154         vcpu_put(vcpu);
3155
3156         return 0;
3157 }
3158
3159 void kvm_get_segment(struct kvm_vcpu *vcpu,
3160                      struct kvm_segment *var, int seg)
3161 {
3162         kvm_x86_ops->get_segment(vcpu, var, seg);
3163 }
3164
3165 void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
3166 {
3167         struct kvm_segment cs;
3168
3169         kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
3170         *db = cs.db;
3171         *l = cs.l;
3172 }
3173 EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
3174
3175 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
3176                                   struct kvm_sregs *sregs)
3177 {
3178         struct descriptor_table dt;
3179         int pending_vec;
3180
3181         vcpu_load(vcpu);
3182
3183         kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
3184         kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
3185         kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
3186         kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
3187         kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
3188         kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
3189
3190         kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
3191         kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
3192
3193         kvm_x86_ops->get_idt(vcpu, &dt);
3194         sregs->idt.limit = dt.limit;
3195         sregs->idt.base = dt.base;
3196         kvm_x86_ops->get_gdt(vcpu, &dt);
3197         sregs->gdt.limit = dt.limit;
3198         sregs->gdt.base = dt.base;
3199
3200         kvm_x86_ops->decache_cr4_guest_bits(vcpu);
3201         sregs->cr0 = vcpu->arch.cr0;
3202         sregs->cr2 = vcpu->arch.cr2;
3203         sregs->cr3 = vcpu->arch.cr3;
3204         sregs->cr4 = vcpu->arch.cr4;
3205         sregs->cr8 = kvm_get_cr8(vcpu);
3206         sregs->efer = vcpu->arch.shadow_efer;
3207         sregs->apic_base = kvm_get_apic_base(vcpu);
3208
3209         if (irqchip_in_kernel(vcpu->kvm)) {
3210                 memset(sregs->interrupt_bitmap, 0,
3211                        sizeof sregs->interrupt_bitmap);
3212                 pending_vec = kvm_x86_ops->get_irq(vcpu);
3213                 if (pending_vec >= 0)
3214                         set_bit(pending_vec,
3215                                 (unsigned long *)sregs->interrupt_bitmap);
3216         } else
3217                 memcpy(sregs->interrupt_bitmap, vcpu->arch.irq_pending,
3218                        sizeof sregs->interrupt_bitmap);
3219
3220         vcpu_put(vcpu);
3221
3222         return 0;
3223 }
3224
3225 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
3226                                     struct kvm_mp_state *mp_state)
3227 {
3228         vcpu_load(vcpu);
3229         mp_state->mp_state = vcpu->arch.mp_state;
3230         vcpu_put(vcpu);
3231         return 0;
3232 }
3233
3234 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
3235                                     struct kvm_mp_state *mp_state)
3236 {
3237         vcpu_load(vcpu);
3238         vcpu->arch.mp_state = mp_state->mp_state;
3239         vcpu_put(vcpu);
3240         return 0;
3241 }
3242
3243 static void kvm_set_segment(struct kvm_vcpu *vcpu,
3244                         struct kvm_segment *var, int seg)
3245 {
3246         kvm_x86_ops->set_segment(vcpu, var, seg);
3247 }
3248
3249 static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector,
3250                                    struct kvm_segment *kvm_desct)
3251 {
3252         kvm_desct->base = seg_desc->base0;
3253         kvm_desct->base |= seg_desc->base1 << 16;
3254         kvm_desct->base |= seg_desc->base2 << 24;
3255         kvm_desct->limit = seg_desc->limit0;
3256         kvm_desct->limit |= seg_desc->limit << 16;
3257         if (seg_desc->g) {
3258                 kvm_desct->limit <<= 12;
3259                 kvm_desct->limit |= 0xfff;
3260         }
3261         kvm_desct->selector = selector;
3262         kvm_desct->type = seg_desc->type;
3263         kvm_desct->present = seg_desc->p;
3264         kvm_desct->dpl = seg_desc->dpl;
3265         kvm_desct->db = seg_desc->d;
3266         kvm_desct->s = seg_desc->s;
3267         kvm_desct->l = seg_desc->l;
3268         kvm_desct->g = seg_desc->g;
3269         kvm_desct->avl = seg_desc->avl;
3270         if (!selector)
3271                 kvm_desct->unusable = 1;
3272         else
3273                 kvm_desct->unusable = 0;
3274         kvm_desct->padding = 0;
3275 }
3276
3277 static void get_segment_descritptor_dtable(struct kvm_vcpu *vcpu,
3278                                            u16 selector,
3279                                            struct descriptor_table *dtable)
3280 {
3281         if (selector & 1 << 2) {
3282                 struct kvm_segment kvm_seg;
3283
3284                 kvm_get_segment(vcpu, &kvm_seg, VCPU_SREG_LDTR);
3285
3286                 if (kvm_seg.unusable)
3287                         dtable->limit = 0;
3288                 else
3289                         dtable->limit = kvm_seg.limit;
3290                 dtable->base = kvm_seg.base;
3291         }
3292         else
3293                 kvm_x86_ops->get_gdt(vcpu, dtable);
3294 }
3295
3296 /* allowed just for 8 bytes segments */
3297 static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3298                                          struct desc_struct *seg_desc)
3299 {
3300         gpa_t gpa;
3301         struct descriptor_table dtable;
3302         u16 index = selector >> 3;
3303
3304         get_segment_descritptor_dtable(vcpu, selector, &dtable);
3305
3306         if (dtable.limit < index * 8 + 7) {
3307                 kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
3308                 return 1;
3309         }
3310         gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base);
3311         gpa += index * 8;
3312         return kvm_read_guest(vcpu->kvm, gpa, seg_desc, 8);
3313 }
3314
3315 /* allowed just for 8 bytes segments */
3316 static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3317                                          struct desc_struct *seg_desc)
3318 {
3319         gpa_t gpa;
3320         struct descriptor_table dtable;
3321         u16 index = selector >> 3;
3322
3323         get_segment_descritptor_dtable(vcpu, selector, &dtable);
3324
3325         if (dtable.limit < index * 8 + 7)
3326                 return 1;
3327         gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base);
3328         gpa += index * 8;
3329         return kvm_write_guest(vcpu->kvm, gpa, seg_desc, 8);
3330 }
3331
3332 static u32 get_tss_base_addr(struct kvm_vcpu *vcpu,
3333                              struct desc_struct *seg_desc)
3334 {
3335         u32 base_addr;
3336
3337         base_addr = seg_desc->base0;
3338         base_addr |= (seg_desc->base1 << 16);
3339         base_addr |= (seg_desc->base2 << 24);
3340
3341         return vcpu->arch.mmu.gva_to_gpa(vcpu, base_addr);
3342 }
3343
3344 static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
3345 {
3346         struct kvm_segment kvm_seg;
3347
3348         kvm_get_segment(vcpu, &kvm_seg, seg);
3349         return kvm_seg.selector;
3350 }
3351
3352 static int load_segment_descriptor_to_kvm_desct(struct kvm_vcpu *vcpu,
3353                                                 u16 selector,
3354                                                 struct kvm_segment *kvm_seg)
3355 {
3356         struct desc_struct seg_desc;
3357
3358         if (load_guest_segment_descriptor(vcpu, selector, &seg_desc))
3359                 return 1;
3360         seg_desct_to_kvm_desct(&seg_desc, selector, kvm_seg);
3361         return 0;
3362 }
3363
3364 static int kvm_load_realmode_segment(struct kvm_vcpu *vcpu, u16 selector, int seg)
3365 {
3366         struct kvm_segment segvar = {
3367                 .base = selector << 4,
3368                 .limit = 0xffff,
3369                 .selector = selector,
3370                 .type = 3,
3371                 .present = 1,
3372                 .dpl = 3,
3373                 .db = 0,
3374                 .s = 1,
3375                 .l = 0,
3376                 .g = 0,
3377                 .avl = 0,
3378                 .unusable = 0,
3379         };
3380         kvm_x86_ops->set_segment(vcpu, &segvar, seg);
3381         return 0;
3382 }
3383
3384 int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3385                                 int type_bits, int seg)
3386 {
3387         struct kvm_segment kvm_seg;
3388
3389         if (!(vcpu->arch.cr0 & X86_CR0_PE))
3390                 return kvm_load_realmode_segment(vcpu, selector, seg);
3391         if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg))
3392                 return 1;
3393         kvm_seg.type |= type_bits;
3394
3395         if (seg != VCPU_SREG_SS && seg != VCPU_SREG_CS &&
3396             seg != VCPU_SREG_LDTR)
3397                 if (!kvm_seg.s)
3398                         kvm_seg.unusable = 1;
3399
3400         kvm_set_segment(vcpu, &kvm_seg, seg);
3401         return 0;
3402 }
3403
3404 static void save_state_to_tss32(struct kvm_vcpu *vcpu,
3405                                 struct tss_segment_32 *tss)
3406 {
3407         tss->cr3 = vcpu->arch.cr3;
3408         tss->eip = kvm_rip_read(vcpu);
3409         tss->eflags = kvm_x86_ops->get_rflags(vcpu);
3410         tss->eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
3411         tss->ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
3412         tss->edx = kvm_register_read(vcpu, VCPU_REGS_RDX);
3413         tss->ebx = kvm_register_read(vcpu, VCPU_REGS_RBX);
3414         tss->esp = kvm_register_read(vcpu, VCPU_REGS_RSP);
3415         tss->ebp = kvm_register_read(vcpu, VCPU_REGS_RBP);
3416         tss->esi = kvm_register_read(vcpu, VCPU_REGS_RSI);
3417         tss->edi = kvm_register_read(vcpu, VCPU_REGS_RDI);
3418         tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
3419         tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
3420         tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
3421         tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
3422         tss->fs = get_segment_selector(vcpu, VCPU_SREG_FS);
3423         tss->gs = get_segment_selector(vcpu, VCPU_SREG_GS);
3424         tss->ldt_selector = get_segment_selector(vcpu, VCPU_SREG_LDTR);
3425         tss->prev_task_link = get_segment_selector(vcpu, VCPU_SREG_TR);
3426 }
3427
3428 static int load_state_from_tss32(struct kvm_vcpu *vcpu,
3429                                   struct tss_segment_32 *tss)
3430 {
3431         kvm_set_cr3(vcpu, tss->cr3);
3432
3433         kvm_rip_write(vcpu, tss->eip);
3434         kvm_x86_ops->set_rflags(vcpu, tss->eflags | 2);
3435
3436         kvm_register_write(vcpu, VCPU_REGS_RAX, tss->eax);
3437         kvm_register_write(vcpu, VCPU_REGS_RCX, tss->ecx);
3438         kvm_register_write(vcpu, VCPU_REGS_RDX, tss->edx);
3439         kvm_register_write(vcpu, VCPU_REGS_RBX, tss->ebx);
3440         kvm_register_write(vcpu, VCPU_REGS_RSP, tss->esp);
3441         kvm_register_write(vcpu, VCPU_REGS_RBP, tss->ebp);
3442         kvm_register_write(vcpu, VCPU_REGS_RSI, tss->esi);
3443         kvm_register_write(vcpu, VCPU_REGS_RDI, tss->edi);
3444
3445         if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, 0, VCPU_SREG_LDTR))
3446                 return 1;
3447
3448         if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
3449                 return 1;
3450
3451         if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
3452                 return 1;
3453
3454         if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
3455                 return 1;
3456
3457         if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
3458                 return 1;
3459
3460         if (kvm_load_segment_descriptor(vcpu, tss->fs, 1, VCPU_SREG_FS))
3461                 return 1;
3462
3463         if (kvm_load_segment_descriptor(vcpu, tss->gs, 1, VCPU_SREG_GS))
3464                 return 1;
3465         return 0;
3466 }
3467
3468 static void save_state_to_tss16(struct kvm_vcpu *vcpu,
3469                                 struct tss_segment_16 *tss)
3470 {
3471         tss->ip = kvm_rip_read(vcpu);
3472         tss->flag = kvm_x86_ops->get_rflags(vcpu);
3473         tss->ax = kvm_register_read(vcpu, VCPU_REGS_RAX);
3474         tss->cx = kvm_register_read(vcpu, VCPU_REGS_RCX);
3475         tss->dx = kvm_register_read(vcpu, VCPU_REGS_RDX);
3476         tss->bx = kvm_register_read(vcpu, VCPU_REGS_RBX);
3477         tss->sp = kvm_register_read(vcpu, VCPU_REGS_RSP);
3478         tss->bp = kvm_register_read(vcpu, VCPU_REGS_RBP);
3479         tss->si = kvm_register_read(vcpu, VCPU_REGS_RSI);
3480         tss->di = kvm_register_read(vcpu, VCPU_REGS_RDI);
3481
3482         tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
3483         tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
3484         tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
3485         tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
3486         tss->ldt = get_segment_selector(vcpu, VCPU_SREG_LDTR);
3487         tss->prev_task_link = get_segment_selector(vcpu, VCPU_SREG_TR);
3488 }
3489
3490 static int load_state_from_tss16(struct kvm_vcpu *vcpu,
3491                                  struct tss_segment_16 *tss)
3492 {
3493         kvm_rip_write(vcpu, tss->ip);
3494         kvm_x86_ops->set_rflags(vcpu, tss->flag | 2);
3495         kvm_register_write(vcpu, VCPU_REGS_RAX, tss->ax);
3496         kvm_register_write(vcpu, VCPU_REGS_RCX, tss->cx);
3497         kvm_register_write(vcpu, VCPU_REGS_RDX, tss->dx);
3498         kvm_register_write(vcpu, VCPU_REGS_RBX, tss->bx);
3499         kvm_register_write(vcpu, VCPU_REGS_RSP, tss->sp);
3500         kvm_register_write(vcpu, VCPU_REGS_RBP, tss->bp);
3501         kvm_register_write(vcpu, VCPU_REGS_RSI, tss->si);
3502         kvm_register_write(vcpu, VCPU_REGS_RDI, tss->di);
3503
3504         if (kvm_load_segment_descriptor(vcpu, tss->ldt, 0, VCPU_SREG_LDTR))
3505                 return 1;
3506
3507         if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
3508                 return 1;
3509
3510         if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
3511                 return 1;
3512
3513         if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
3514                 return 1;
3515
3516         if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
3517                 return 1;
3518         return 0;
3519 }
3520
3521 static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
3522                        u32 old_tss_base,
3523                        struct desc_struct *nseg_desc)
3524 {
3525         struct tss_segment_16 tss_segment_16;
3526         int ret = 0;
3527
3528         if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
3529                            sizeof tss_segment_16))
3530                 goto out;
3531
3532         save_state_to_tss16(vcpu, &tss_segment_16);
3533
3534         if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
3535                             sizeof tss_segment_16))
3536                 goto out;
3537
3538         if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
3539                            &tss_segment_16, sizeof tss_segment_16))
3540                 goto out;
3541
3542         if (load_state_from_tss16(vcpu, &tss_segment_16))
3543                 goto out;
3544
3545         ret = 1;
3546 out:
3547         return ret;
3548 }
3549
3550 static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
3551                        u32 old_tss_base,
3552                        struct desc_struct *nseg_desc)
3553 {
3554         struct tss_segment_32 tss_segment_32;
3555         int ret = 0;
3556
3557         if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
3558                            sizeof tss_segment_32))
3559                 goto out;
3560
3561         save_state_to_tss32(vcpu, &tss_segment_32);
3562
3563         if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
3564                             sizeof tss_segment_32))
3565                 goto out;
3566
3567         if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
3568                            &tss_segment_32, sizeof tss_segment_32))
3569                 goto out;
3570
3571         if (load_state_from_tss32(vcpu, &tss_segment_32))
3572                 goto out;
3573
3574         ret = 1;
3575 out:
3576         return ret;
3577 }
3578
3579 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
3580 {
3581         struct kvm_segment tr_seg;
3582         struct desc_struct cseg_desc;
3583         struct desc_struct nseg_desc;
3584         int ret = 0;
3585         u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
3586         u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
3587
3588         old_tss_base = vcpu->arch.mmu.gva_to_gpa(vcpu, old_tss_base);
3589
3590         /* FIXME: Handle errors. Failure to read either TSS or their
3591          * descriptors should generate a pagefault.
3592          */
3593         if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc))
3594                 goto out;
3595
3596         if (load_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc))
3597                 goto out;
3598
3599         if (reason != TASK_SWITCH_IRET) {
3600                 int cpl;
3601
3602                 cpl = kvm_x86_ops->get_cpl(vcpu);
3603                 if ((tss_selector & 3) > nseg_desc.dpl || cpl > nseg_desc.dpl) {
3604                         kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
3605                         return 1;
3606                 }
3607         }
3608
3609         if (!nseg_desc.p || (nseg_desc.limit0 | nseg_desc.limit << 16) < 0x67) {
3610                 kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc);
3611                 return 1;
3612         }
3613
3614         if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
3615                 cseg_desc.type &= ~(1 << 1); //clear the B flag
3616                 save_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc);
3617         }
3618
3619         if (reason == TASK_SWITCH_IRET) {
3620                 u32 eflags = kvm_x86_ops->get_rflags(vcpu);
3621                 kvm_x86_ops->set_rflags(vcpu, eflags & ~X86_EFLAGS_NT);
3622         }
3623
3624         kvm_x86_ops->skip_emulated_instruction(vcpu);
3625
3626         if (nseg_desc.type & 8)
3627                 ret = kvm_task_switch_32(vcpu, tss_selector, old_tss_base,
3628                                          &nseg_desc);
3629         else
3630                 ret = kvm_task_switch_16(vcpu, tss_selector, old_tss_base,
3631                                          &nseg_desc);
3632
3633         if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) {
3634                 u32 eflags = kvm_x86_ops->get_rflags(vcpu);
3635                 kvm_x86_ops->set_rflags(vcpu, eflags | X86_EFLAGS_NT);
3636         }
3637
3638         if (reason != TASK_SWITCH_IRET) {
3639                 nseg_desc.type |= (1 << 1);
3640                 save_guest_segment_descriptor(vcpu, tss_selector,
3641                                               &nseg_desc);
3642         }
3643
3644         kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 | X86_CR0_TS);
3645         seg_desct_to_kvm_desct(&nseg_desc, tss_selector, &tr_seg);
3646         tr_seg.type = 11;
3647         kvm_set_segment(vcpu, &tr_seg, VCPU_SREG_TR);
3648 out:
3649         return ret;
3650 }
3651 EXPORT_SYMBOL_GPL(kvm_task_switch);
3652
3653 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
3654                                   struct kvm_sregs *sregs)
3655 {
3656         int mmu_reset_needed = 0;
3657         int i, pending_vec, max_bits;
3658         struct descriptor_table dt;
3659
3660         vcpu_load(vcpu);
3661
3662         dt.limit = sregs->idt.limit;
3663         dt.base = sregs->idt.base;
3664         kvm_x86_ops->set_idt(vcpu, &dt);
3665         dt.limit = sregs->gdt.limit;
3666         dt.base = sregs->gdt.base;
3667         kvm_x86_ops->set_gdt(vcpu, &dt);
3668
3669         vcpu->arch.cr2 = sregs->cr2;
3670         mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3;
3671         vcpu->arch.cr3 = sregs->cr3;
3672
3673         kvm_set_cr8(vcpu, sregs->cr8);
3674
3675         mmu_reset_needed |= vcpu->arch.shadow_efer != sregs->efer;
3676         kvm_x86_ops->set_efer(vcpu, sregs->efer);
3677         kvm_set_apic_base(vcpu, sregs->apic_base);
3678
3679         kvm_x86_ops->decache_cr4_guest_bits(vcpu);
3680
3681         mmu_reset_needed |= vcpu->arch.cr0 != sregs->cr0;
3682         kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
3683         vcpu->arch.cr0 = sregs->cr0;
3684
3685         mmu_reset_needed |= vcpu->arch.cr4 != sregs->cr4;
3686         kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
3687         if (!is_long_mode(vcpu) && is_pae(vcpu))
3688                 load_pdptrs(vcpu, vcpu->arch.cr3);
3689
3690         if (mmu_reset_needed)
3691                 kvm_mmu_reset_context(vcpu);
3692
3693         if (!irqchip_in_kernel(vcpu->kvm)) {
3694                 memcpy(vcpu->arch.irq_pending, sregs->interrupt_bitmap,
3695                        sizeof vcpu->arch.irq_pending);
3696                 vcpu->arch.irq_summary = 0;
3697                 for (i = 0; i < ARRAY_SIZE(vcpu->arch.irq_pending); ++i)
3698                         if (vcpu->arch.irq_pending[i])
3699                                 __set_bit(i, &vcpu->arch.irq_summary);
3700         } else {
3701                 max_bits = (sizeof sregs->interrupt_bitmap) << 3;
3702                 pending_vec = find_first_bit(
3703                         (const unsigned long *)sregs->interrupt_bitmap,
3704                         max_bits);
3705                 /* Only pending external irq is handled here */
3706                 if (pending_vec < max_bits) {
3707                         kvm_x86_ops->set_irq(vcpu, pending_vec);
3708                         pr_debug("Set back pending irq %d\n",
3709                                  pending_vec);
3710                 }
3711                 kvm_pic_clear_isr_ack(vcpu->kvm);
3712         }
3713
3714         kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
3715         kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
3716         kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
3717         kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
3718         kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
3719         kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
3720
3721         kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
3722         kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
3723
3724         /* Older userspace won't unhalt the vcpu on reset. */
3725         if (vcpu->vcpu_id == 0 && kvm_rip_read(vcpu) == 0xfff0 &&
3726             sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
3727             !(vcpu->arch.cr0 & X86_CR0_PE))
3728                 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
3729
3730         vcpu_put(vcpu);
3731
3732         return 0;
3733 }
3734
3735 int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
3736                                     struct kvm_debug_guest *dbg)
3737 {
3738         int r;
3739
3740         vcpu_load(vcpu);
3741
3742         r = kvm_x86_ops->set_guest_debug(vcpu, dbg);
3743
3744         vcpu_put(vcpu);
3745
3746         return r;
3747 }
3748
3749 /*
3750  * fxsave fpu state.  Taken from x86_64/processor.h.  To be killed when
3751  * we have asm/x86/processor.h
3752  */
3753 struct fxsave {
3754         u16     cwd;
3755         u16     swd;
3756         u16     twd;
3757         u16     fop;
3758         u64     rip;
3759         u64     rdp;
3760         u32     mxcsr;
3761         u32     mxcsr_mask;
3762         u32     st_space[32];   /* 8*16 bytes for each FP-reg = 128 bytes */
3763 #ifdef CONFIG_X86_64
3764         u32     xmm_space[64];  /* 16*16 bytes for each XMM-reg = 256 bytes */
3765 #else
3766         u32     xmm_space[32];  /* 8*16 bytes for each XMM-reg = 128 bytes */
3767 #endif
3768 };
3769
3770 /*
3771  * Translate a guest virtual address to a guest physical address.
3772  */
3773 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
3774                                     struct kvm_translation *tr)
3775 {
3776         unsigned long vaddr = tr->linear_address;
3777         gpa_t gpa;
3778
3779         vcpu_load(vcpu);
3780         down_read(&vcpu->kvm->slots_lock);
3781         gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr);
3782         up_read(&vcpu->kvm->slots_lock);
3783         tr->physical_address = gpa;
3784         tr->valid = gpa != UNMAPPED_GVA;
3785         tr->writeable = 1;
3786         tr->usermode = 0;
3787         vcpu_put(vcpu);
3788
3789         return 0;
3790 }
3791
3792 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3793 {
3794         struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
3795
3796         vcpu_load(vcpu);
3797
3798         memcpy(fpu->fpr, fxsave->st_space, 128);
3799         fpu->fcw = fxsave->cwd;
3800         fpu->fsw = fxsave->swd;
3801         fpu->ftwx = fxsave->twd;
3802         fpu->last_opcode = fxsave->fop;
3803         fpu->last_ip = fxsave->rip;
3804         fpu->last_dp = fxsave->rdp;
3805         memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
3806
3807         vcpu_put(vcpu);
3808
3809         return 0;
3810 }
3811
3812 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3813 {
3814         struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
3815
3816         vcpu_load(vcpu);
3817
3818         memcpy(fxsave->st_space, fpu->fpr, 128);
3819         fxsave->cwd = fpu->fcw;
3820         fxsave->swd = fpu->fsw;
3821         fxsave->twd = fpu->ftwx;
3822         fxsave->fop = fpu->last_opcode;
3823         fxsave->rip = fpu->last_ip;
3824         fxsave->rdp = fpu->last_dp;
3825         memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
3826
3827         vcpu_put(vcpu);
3828
3829         return 0;
3830 }
3831
3832 void fx_init(struct kvm_vcpu *vcpu)
3833 {
3834         unsigned after_mxcsr_mask;
3835
3836         /*
3837          * Touch the fpu the first time in non atomic context as if
3838          * this is the first fpu instruction the exception handler
3839          * will fire before the instruction returns and it'll have to
3840          * allocate ram with GFP_KERNEL.
3841          */
3842         if (!used_math())
3843                 kvm_fx_save(&vcpu->arch.host_fx_image);
3844
3845         /* Initialize guest FPU by resetting ours and saving into guest's */
3846         preempt_disable();
3847         kvm_fx_save(&vcpu->arch.host_fx_image);
3848         kvm_fx_finit();
3849         kvm_fx_save(&vcpu->arch.guest_fx_image);
3850         kvm_fx_restore(&vcpu->arch.host_fx_image);
3851         preempt_enable();
3852
3853         vcpu->arch.cr0 |= X86_CR0_ET;
3854         after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space);
3855         vcpu->arch.guest_fx_image.mxcsr = 0x1f80;
3856         memset((void *)&vcpu->arch.guest_fx_image + after_mxcsr_mask,
3857                0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask);
3858 }
3859 EXPORT_SYMBOL_GPL(fx_init);
3860
3861 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
3862 {
3863         if (!vcpu->fpu_active || vcpu->guest_fpu_loaded)
3864                 return;
3865
3866         vcpu->guest_fpu_loaded = 1;
3867         kvm_fx_save(&vcpu->arch.host_fx_image);
3868         kvm_fx_restore(&vcpu->arch.guest_fx_image);
3869 }
3870 EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
3871
3872 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
3873 {
3874         if (!vcpu->guest_fpu_loaded)
3875                 return;
3876
3877         vcpu->guest_fpu_loaded = 0;
3878         kvm_fx_save(&vcpu->arch.guest_fx_image);
3879         kvm_fx_restore(&vcpu->arch.host_fx_image);
3880         ++vcpu->stat.fpu_reload;
3881 }
3882 EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
3883
3884 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
3885 {
3886         kvm_x86_ops->vcpu_free(vcpu);
3887 }
3888
3889 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
3890                                                 unsigned int id)
3891 {
3892         return kvm_x86_ops->vcpu_create(kvm, id);
3893 }
3894
3895 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
3896 {
3897         int r;
3898
3899         /* We do fxsave: this must be aligned. */
3900         BUG_ON((unsigned long)&vcpu->arch.host_fx_image & 0xF);
3901
3902         vcpu_load(vcpu);
3903         r = kvm_arch_vcpu_reset(vcpu);
3904         if (r == 0)
3905                 r = kvm_mmu_setup(vcpu);
3906         vcpu_put(vcpu);
3907         if (r < 0)
3908                 goto free_vcpu;
3909
3910         return 0;
3911 free_vcpu:
3912         kvm_x86_ops->vcpu_free(vcpu);
3913         return r;
3914 }
3915
3916 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
3917 {
3918         vcpu_load(vcpu);
3919         kvm_mmu_unload(vcpu);
3920         vcpu_put(vcpu);
3921
3922         kvm_x86_ops->vcpu_free(vcpu);
3923 }
3924
3925 int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
3926 {
3927         return kvm_x86_ops->vcpu_reset(vcpu);
3928 }
3929
3930 void kvm_arch_hardware_enable(void *garbage)
3931 {
3932         kvm_x86_ops->hardware_enable(garbage);
3933 }
3934
3935 void kvm_arch_hardware_disable(void *garbage)
3936 {
3937         kvm_x86_ops->hardware_disable(garbage);
3938 }
3939
3940 int kvm_arch_hardware_setup(void)
3941 {
3942         return kvm_x86_ops->hardware_setup();
3943 }
3944
3945 void kvm_arch_hardware_unsetup(void)
3946 {
3947         kvm_x86_ops->hardware_unsetup();
3948 }
3949
3950 void kvm_arch_check_processor_compat(void *rtn)
3951 {
3952         kvm_x86_ops->check_processor_compatibility(rtn);
3953 }
3954
3955 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
3956 {
3957         struct page *page;
3958         struct kvm *kvm;
3959         int r;
3960
3961         BUG_ON(vcpu->kvm == NULL);
3962         kvm = vcpu->kvm;
3963
3964         vcpu->arch.mmu.root_hpa = INVALID_PAGE;
3965         if (!irqchip_in_kernel(kvm) || vcpu->vcpu_id == 0)
3966                 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
3967         else
3968                 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
3969
3970         page = alloc_page(GFP_KERNEL | __GFP_ZERO);
3971         if (!page) {
3972                 r = -ENOMEM;
3973                 goto fail;
3974         }
3975         vcpu->arch.pio_data = page_address(page);
3976
3977         r = kvm_mmu_create(vcpu);
3978         if (r < 0)
3979                 goto fail_free_pio_data;
3980
3981         if (irqchip_in_kernel(kvm)) {
3982                 r = kvm_create_lapic(vcpu);
3983                 if (r < 0)
3984                         goto fail_mmu_destroy;
3985         }
3986
3987         return 0;
3988
3989 fail_mmu_destroy:
3990         kvm_mmu_destroy(vcpu);
3991 fail_free_pio_data:
3992         free_page((unsigned long)vcpu->arch.pio_data);
3993 fail:
3994         return r;
3995 }
3996
3997 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
3998 {
3999         kvm_free_lapic(vcpu);
4000         down_read(&vcpu->kvm->slots_lock);
4001         kvm_mmu_destroy(vcpu);
4002         up_read(&vcpu->kvm->slots_lock);
4003         free_page((unsigned long)vcpu->arch.pio_data);
4004 }
4005
4006 struct  kvm *kvm_arch_create_vm(void)
4007 {
4008         struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
4009
4010         if (!kvm)
4011                 return ERR_PTR(-ENOMEM);
4012
4013         INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
4014         INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
4015
4016         return kvm;
4017 }
4018
4019 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
4020 {
4021         vcpu_load(vcpu);
4022         kvm_mmu_unload(vcpu);
4023         vcpu_put(vcpu);
4024 }
4025
4026 static void kvm_free_vcpus(struct kvm *kvm)
4027 {
4028         unsigned int i;
4029
4030         /*
4031          * Unpin any mmu pages first.
4032          */
4033         for (i = 0; i < KVM_MAX_VCPUS; ++i)
4034                 if (kvm->vcpus[i])
4035                         kvm_unload_vcpu_mmu(kvm->vcpus[i]);
4036         for (i = 0; i < KVM_MAX_VCPUS; ++i) {
4037                 if (kvm->vcpus[i]) {
4038                         kvm_arch_vcpu_free(kvm->vcpus[i]);
4039                         kvm->vcpus[i] = NULL;
4040                 }
4041         }
4042
4043 }
4044
4045 void kvm_arch_destroy_vm(struct kvm *kvm)
4046 {
4047         kvm_iommu_unmap_guest(kvm);
4048         kvm_free_all_assigned_devices(kvm);
4049         kvm_free_pit(kvm);
4050         kfree(kvm->arch.vpic);
4051         kfree(kvm->arch.vioapic);
4052         kvm_free_vcpus(kvm);
4053         kvm_free_physmem(kvm);
4054         if (kvm->arch.apic_access_page)
4055                 put_page(kvm->arch.apic_access_page);
4056         if (kvm->arch.ept_identity_pagetable)
4057                 put_page(kvm->arch.ept_identity_pagetable);
4058         kfree(kvm);
4059 }
4060
4061 int kvm_arch_set_memory_region(struct kvm *kvm,
4062                                 struct kvm_userspace_memory_region *mem,
4063                                 struct kvm_memory_slot old,
4064                                 int user_alloc)
4065 {
4066         int npages = mem->memory_size >> PAGE_SHIFT;
4067         struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
4068
4069         /*To keep backward compatibility with older userspace,
4070          *x86 needs to hanlde !user_alloc case.
4071          */
4072         if (!user_alloc) {
4073                 if (npages && !old.rmap) {
4074                         unsigned long userspace_addr;
4075
4076                         down_write(&current->mm->mmap_sem);
4077                         userspace_addr = do_mmap(NULL, 0,
4078                                                  npages * PAGE_SIZE,
4079                                                  PROT_READ | PROT_WRITE,
4080                                                  MAP_PRIVATE | MAP_ANONYMOUS,
4081                                                  0);
4082                         up_write(&current->mm->mmap_sem);
4083
4084                         if (IS_ERR((void *)userspace_addr))
4085                                 return PTR_ERR((void *)userspace_addr);
4086
4087                         /* set userspace_addr atomically for kvm_hva_to_rmapp */
4088                         spin_lock(&kvm->mmu_lock);
4089                         memslot->userspace_addr = userspace_addr;
4090                         spin_unlock(&kvm->mmu_lock);
4091                 } else {
4092                         if (!old.user_alloc && old.rmap) {
4093                                 int ret;
4094
4095                                 down_write(&current->mm->mmap_sem);
4096                                 ret = do_munmap(current->mm, old.userspace_addr,
4097                                                 old.npages * PAGE_SIZE);
4098                                 up_write(&current->mm->mmap_sem);
4099                                 if (ret < 0)
4100                                         printk(KERN_WARNING
4101                                        "kvm_vm_ioctl_set_memory_region: "
4102                                        "failed to munmap memory\n");
4103                         }
4104                 }
4105         }
4106
4107         if (!kvm->arch.n_requested_mmu_pages) {
4108                 unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
4109                 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
4110         }
4111
4112         kvm_mmu_slot_remove_write_access(kvm, mem->slot);
4113         kvm_flush_remote_tlbs(kvm);
4114
4115         return 0;
4116 }
4117
4118 void kvm_arch_flush_shadow(struct kvm *kvm)
4119 {
4120         kvm_mmu_zap_all(kvm);
4121 }
4122
4123 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
4124 {
4125         return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE
4126                || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED;
4127 }
4128
4129 static void vcpu_kick_intr(void *info)
4130 {
4131 #ifdef DEBUG
4132         struct kvm_vcpu *vcpu = (struct kvm_vcpu *)info;
4133         printk(KERN_DEBUG "vcpu_kick_intr %p \n", vcpu);
4134 #endif
4135 }
4136
4137 void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
4138 {
4139         int ipi_pcpu = vcpu->cpu;
4140         int cpu = get_cpu();
4141
4142         if (waitqueue_active(&vcpu->wq)) {
4143                 wake_up_interruptible(&vcpu->wq);
4144                 ++vcpu->stat.halt_wakeup;
4145         }
4146         /*
4147          * We may be called synchronously with irqs disabled in guest mode,
4148          * So need not to call smp_call_function_single() in that case.
4149          */
4150         if (vcpu->guest_mode && vcpu->cpu != cpu)
4151                 smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0);
4152         put_cpu();
4153 }