KVM: don't enter guest after SIPI was received by a CPU
[linux-2.6] / arch / x86 / kvm / svm.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * AMD SVM support
5  *
6  * Copyright (C) 2006 Qumranet, Inc.
7  *
8  * Authors:
9  *   Yaniv Kamay  <yaniv@qumranet.com>
10  *   Avi Kivity   <avi@qumranet.com>
11  *
12  * This work is licensed under the terms of the GNU GPL, version 2.  See
13  * the COPYING file in the top-level directory.
14  *
15  */
16 #include <linux/kvm_host.h>
17
18 #include "kvm_svm.h"
19 #include "irq.h"
20 #include "mmu.h"
21 #include "kvm_cache_regs.h"
22
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/vmalloc.h>
26 #include <linux/highmem.h>
27 #include <linux/sched.h>
28
29 #include <asm/desc.h>
30
31 #define __ex(x) __kvm_handle_fault_on_reboot(x)
32
33 MODULE_AUTHOR("Qumranet");
34 MODULE_LICENSE("GPL");
35
36 #define IOPM_ALLOC_ORDER 2
37 #define MSRPM_ALLOC_ORDER 1
38
39 #define DR7_GD_MASK (1 << 13)
40 #define DR6_BD_MASK (1 << 13)
41
42 #define SEG_TYPE_LDT 2
43 #define SEG_TYPE_BUSY_TSS16 3
44
45 #define SVM_FEATURE_NPT  (1 << 0)
46 #define SVM_FEATURE_LBRV (1 << 1)
47 #define SVM_FEATURE_SVML (1 << 2)
48
49 #define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
50
51 /* enable NPT for AMD64 and X86 with PAE */
52 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
53 static bool npt_enabled = true;
54 #else
55 static bool npt_enabled = false;
56 #endif
57 static int npt = 1;
58
59 module_param(npt, int, S_IRUGO);
60
61 static void kvm_reput_irq(struct vcpu_svm *svm);
62 static void svm_flush_tlb(struct kvm_vcpu *vcpu);
63
64 static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
65 {
66         return container_of(vcpu, struct vcpu_svm, vcpu);
67 }
68
69 static unsigned long iopm_base;
70
71 struct kvm_ldttss_desc {
72         u16 limit0;
73         u16 base0;
74         unsigned base1 : 8, type : 5, dpl : 2, p : 1;
75         unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
76         u32 base3;
77         u32 zero1;
78 } __attribute__((packed));
79
80 struct svm_cpu_data {
81         int cpu;
82
83         u64 asid_generation;
84         u32 max_asid;
85         u32 next_asid;
86         struct kvm_ldttss_desc *tss_desc;
87
88         struct page *save_area;
89 };
90
91 static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
92 static uint32_t svm_features;
93
94 struct svm_init_data {
95         int cpu;
96         int r;
97 };
98
99 static u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
100
101 #define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
102 #define MSRS_RANGE_SIZE 2048
103 #define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
104
105 #define MAX_INST_SIZE 15
106
107 static inline u32 svm_has(u32 feat)
108 {
109         return svm_features & feat;
110 }
111
112 static inline u8 pop_irq(struct kvm_vcpu *vcpu)
113 {
114         int word_index = __ffs(vcpu->arch.irq_summary);
115         int bit_index = __ffs(vcpu->arch.irq_pending[word_index]);
116         int irq = word_index * BITS_PER_LONG + bit_index;
117
118         clear_bit(bit_index, &vcpu->arch.irq_pending[word_index]);
119         if (!vcpu->arch.irq_pending[word_index])
120                 clear_bit(word_index, &vcpu->arch.irq_summary);
121         return irq;
122 }
123
124 static inline void push_irq(struct kvm_vcpu *vcpu, u8 irq)
125 {
126         set_bit(irq, vcpu->arch.irq_pending);
127         set_bit(irq / BITS_PER_LONG, &vcpu->arch.irq_summary);
128 }
129
130 static inline void clgi(void)
131 {
132         asm volatile (__ex(SVM_CLGI));
133 }
134
135 static inline void stgi(void)
136 {
137         asm volatile (__ex(SVM_STGI));
138 }
139
140 static inline void invlpga(unsigned long addr, u32 asid)
141 {
142         asm volatile (__ex(SVM_INVLPGA) :: "a"(addr), "c"(asid));
143 }
144
145 static inline unsigned long kvm_read_cr2(void)
146 {
147         unsigned long cr2;
148
149         asm volatile ("mov %%cr2, %0" : "=r" (cr2));
150         return cr2;
151 }
152
153 static inline void kvm_write_cr2(unsigned long val)
154 {
155         asm volatile ("mov %0, %%cr2" :: "r" (val));
156 }
157
158 static inline unsigned long read_dr6(void)
159 {
160         unsigned long dr6;
161
162         asm volatile ("mov %%dr6, %0" : "=r" (dr6));
163         return dr6;
164 }
165
166 static inline void write_dr6(unsigned long val)
167 {
168         asm volatile ("mov %0, %%dr6" :: "r" (val));
169 }
170
171 static inline unsigned long read_dr7(void)
172 {
173         unsigned long dr7;
174
175         asm volatile ("mov %%dr7, %0" : "=r" (dr7));
176         return dr7;
177 }
178
179 static inline void write_dr7(unsigned long val)
180 {
181         asm volatile ("mov %0, %%dr7" :: "r" (val));
182 }
183
184 static inline void force_new_asid(struct kvm_vcpu *vcpu)
185 {
186         to_svm(vcpu)->asid_generation--;
187 }
188
189 static inline void flush_guest_tlb(struct kvm_vcpu *vcpu)
190 {
191         force_new_asid(vcpu);
192 }
193
194 static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
195 {
196         if (!npt_enabled && !(efer & EFER_LMA))
197                 efer &= ~EFER_LME;
198
199         to_svm(vcpu)->vmcb->save.efer = efer | MSR_EFER_SVME_MASK;
200         vcpu->arch.shadow_efer = efer;
201 }
202
203 static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
204                                 bool has_error_code, u32 error_code)
205 {
206         struct vcpu_svm *svm = to_svm(vcpu);
207
208         svm->vmcb->control.event_inj = nr
209                 | SVM_EVTINJ_VALID
210                 | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
211                 | SVM_EVTINJ_TYPE_EXEPT;
212         svm->vmcb->control.event_inj_err = error_code;
213 }
214
215 static bool svm_exception_injected(struct kvm_vcpu *vcpu)
216 {
217         struct vcpu_svm *svm = to_svm(vcpu);
218
219         return !(svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID);
220 }
221
222 static int is_external_interrupt(u32 info)
223 {
224         info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
225         return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
226 }
227
228 static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
229 {
230         struct vcpu_svm *svm = to_svm(vcpu);
231
232         if (!svm->next_rip) {
233                 printk(KERN_DEBUG "%s: NOP\n", __func__);
234                 return;
235         }
236         if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE)
237                 printk(KERN_ERR "%s: ip 0x%lx next 0x%llx\n",
238                        __func__, kvm_rip_read(vcpu), svm->next_rip);
239
240         kvm_rip_write(vcpu, svm->next_rip);
241         svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
242
243         vcpu->arch.interrupt_window_open = 1;
244 }
245
246 static int has_svm(void)
247 {
248         uint32_t eax, ebx, ecx, edx;
249
250         if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
251                 printk(KERN_INFO "has_svm: not amd\n");
252                 return 0;
253         }
254
255         cpuid(0x80000000, &eax, &ebx, &ecx, &edx);
256         if (eax < SVM_CPUID_FUNC) {
257                 printk(KERN_INFO "has_svm: can't execute cpuid_8000000a\n");
258                 return 0;
259         }
260
261         cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
262         if (!(ecx & (1 << SVM_CPUID_FEATURE_SHIFT))) {
263                 printk(KERN_DEBUG "has_svm: svm not available\n");
264                 return 0;
265         }
266         return 1;
267 }
268
269 static void svm_hardware_disable(void *garbage)
270 {
271         uint64_t efer;
272
273         wrmsrl(MSR_VM_HSAVE_PA, 0);
274         rdmsrl(MSR_EFER, efer);
275         wrmsrl(MSR_EFER, efer & ~MSR_EFER_SVME_MASK);
276 }
277
278 static void svm_hardware_enable(void *garbage)
279 {
280
281         struct svm_cpu_data *svm_data;
282         uint64_t efer;
283         struct desc_ptr gdt_descr;
284         struct desc_struct *gdt;
285         int me = raw_smp_processor_id();
286
287         if (!has_svm()) {
288                 printk(KERN_ERR "svm_cpu_init: err EOPNOTSUPP on %d\n", me);
289                 return;
290         }
291         svm_data = per_cpu(svm_data, me);
292
293         if (!svm_data) {
294                 printk(KERN_ERR "svm_cpu_init: svm_data is NULL on %d\n",
295                        me);
296                 return;
297         }
298
299         svm_data->asid_generation = 1;
300         svm_data->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
301         svm_data->next_asid = svm_data->max_asid + 1;
302
303         asm volatile ("sgdt %0" : "=m"(gdt_descr));
304         gdt = (struct desc_struct *)gdt_descr.address;
305         svm_data->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
306
307         rdmsrl(MSR_EFER, efer);
308         wrmsrl(MSR_EFER, efer | MSR_EFER_SVME_MASK);
309
310         wrmsrl(MSR_VM_HSAVE_PA,
311                page_to_pfn(svm_data->save_area) << PAGE_SHIFT);
312 }
313
314 static void svm_cpu_uninit(int cpu)
315 {
316         struct svm_cpu_data *svm_data
317                 = per_cpu(svm_data, raw_smp_processor_id());
318
319         if (!svm_data)
320                 return;
321
322         per_cpu(svm_data, raw_smp_processor_id()) = NULL;
323         __free_page(svm_data->save_area);
324         kfree(svm_data);
325 }
326
327 static int svm_cpu_init(int cpu)
328 {
329         struct svm_cpu_data *svm_data;
330         int r;
331
332         svm_data = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
333         if (!svm_data)
334                 return -ENOMEM;
335         svm_data->cpu = cpu;
336         svm_data->save_area = alloc_page(GFP_KERNEL);
337         r = -ENOMEM;
338         if (!svm_data->save_area)
339                 goto err_1;
340
341         per_cpu(svm_data, cpu) = svm_data;
342
343         return 0;
344
345 err_1:
346         kfree(svm_data);
347         return r;
348
349 }
350
351 static void set_msr_interception(u32 *msrpm, unsigned msr,
352                                  int read, int write)
353 {
354         int i;
355
356         for (i = 0; i < NUM_MSR_MAPS; i++) {
357                 if (msr >= msrpm_ranges[i] &&
358                     msr < msrpm_ranges[i] + MSRS_IN_RANGE) {
359                         u32 msr_offset = (i * MSRS_IN_RANGE + msr -
360                                           msrpm_ranges[i]) * 2;
361
362                         u32 *base = msrpm + (msr_offset / 32);
363                         u32 msr_shift = msr_offset % 32;
364                         u32 mask = ((write) ? 0 : 2) | ((read) ? 0 : 1);
365                         *base = (*base & ~(0x3 << msr_shift)) |
366                                 (mask << msr_shift);
367                         return;
368                 }
369         }
370         BUG();
371 }
372
373 static void svm_vcpu_init_msrpm(u32 *msrpm)
374 {
375         memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
376
377 #ifdef CONFIG_X86_64
378         set_msr_interception(msrpm, MSR_GS_BASE, 1, 1);
379         set_msr_interception(msrpm, MSR_FS_BASE, 1, 1);
380         set_msr_interception(msrpm, MSR_KERNEL_GS_BASE, 1, 1);
381         set_msr_interception(msrpm, MSR_LSTAR, 1, 1);
382         set_msr_interception(msrpm, MSR_CSTAR, 1, 1);
383         set_msr_interception(msrpm, MSR_SYSCALL_MASK, 1, 1);
384 #endif
385         set_msr_interception(msrpm, MSR_K6_STAR, 1, 1);
386         set_msr_interception(msrpm, MSR_IA32_SYSENTER_CS, 1, 1);
387         set_msr_interception(msrpm, MSR_IA32_SYSENTER_ESP, 1, 1);
388         set_msr_interception(msrpm, MSR_IA32_SYSENTER_EIP, 1, 1);
389 }
390
391 static void svm_enable_lbrv(struct vcpu_svm *svm)
392 {
393         u32 *msrpm = svm->msrpm;
394
395         svm->vmcb->control.lbr_ctl = 1;
396         set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
397         set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
398         set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
399         set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
400 }
401
402 static void svm_disable_lbrv(struct vcpu_svm *svm)
403 {
404         u32 *msrpm = svm->msrpm;
405
406         svm->vmcb->control.lbr_ctl = 0;
407         set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
408         set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
409         set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
410         set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
411 }
412
413 static __init int svm_hardware_setup(void)
414 {
415         int cpu;
416         struct page *iopm_pages;
417         void *iopm_va;
418         int r;
419
420         iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
421
422         if (!iopm_pages)
423                 return -ENOMEM;
424
425         iopm_va = page_address(iopm_pages);
426         memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
427         clear_bit(0x80, iopm_va); /* allow direct access to PC debug port */
428         iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
429
430         if (boot_cpu_has(X86_FEATURE_NX))
431                 kvm_enable_efer_bits(EFER_NX);
432
433         for_each_online_cpu(cpu) {
434                 r = svm_cpu_init(cpu);
435                 if (r)
436                         goto err;
437         }
438
439         svm_features = cpuid_edx(SVM_CPUID_FUNC);
440
441         if (!svm_has(SVM_FEATURE_NPT))
442                 npt_enabled = false;
443
444         if (npt_enabled && !npt) {
445                 printk(KERN_INFO "kvm: Nested Paging disabled\n");
446                 npt_enabled = false;
447         }
448
449         if (npt_enabled) {
450                 printk(KERN_INFO "kvm: Nested Paging enabled\n");
451                 kvm_enable_tdp();
452         } else
453                 kvm_disable_tdp();
454
455         return 0;
456
457 err:
458         __free_pages(iopm_pages, IOPM_ALLOC_ORDER);
459         iopm_base = 0;
460         return r;
461 }
462
463 static __exit void svm_hardware_unsetup(void)
464 {
465         int cpu;
466
467         for_each_online_cpu(cpu)
468                 svm_cpu_uninit(cpu);
469
470         __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
471         iopm_base = 0;
472 }
473
474 static void init_seg(struct vmcb_seg *seg)
475 {
476         seg->selector = 0;
477         seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
478                 SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
479         seg->limit = 0xffff;
480         seg->base = 0;
481 }
482
483 static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
484 {
485         seg->selector = 0;
486         seg->attrib = SVM_SELECTOR_P_MASK | type;
487         seg->limit = 0xffff;
488         seg->base = 0;
489 }
490
491 static void init_vmcb(struct vcpu_svm *svm)
492 {
493         struct vmcb_control_area *control = &svm->vmcb->control;
494         struct vmcb_save_area *save = &svm->vmcb->save;
495
496         control->intercept_cr_read =    INTERCEPT_CR0_MASK |
497                                         INTERCEPT_CR3_MASK |
498                                         INTERCEPT_CR4_MASK;
499
500         control->intercept_cr_write =   INTERCEPT_CR0_MASK |
501                                         INTERCEPT_CR3_MASK |
502                                         INTERCEPT_CR4_MASK |
503                                         INTERCEPT_CR8_MASK;
504
505         control->intercept_dr_read =    INTERCEPT_DR0_MASK |
506                                         INTERCEPT_DR1_MASK |
507                                         INTERCEPT_DR2_MASK |
508                                         INTERCEPT_DR3_MASK;
509
510         control->intercept_dr_write =   INTERCEPT_DR0_MASK |
511                                         INTERCEPT_DR1_MASK |
512                                         INTERCEPT_DR2_MASK |
513                                         INTERCEPT_DR3_MASK |
514                                         INTERCEPT_DR5_MASK |
515                                         INTERCEPT_DR7_MASK;
516
517         control->intercept_exceptions = (1 << PF_VECTOR) |
518                                         (1 << UD_VECTOR) |
519                                         (1 << MC_VECTOR);
520
521
522         control->intercept =    (1ULL << INTERCEPT_INTR) |
523                                 (1ULL << INTERCEPT_NMI) |
524                                 (1ULL << INTERCEPT_SMI) |
525                                 (1ULL << INTERCEPT_CPUID) |
526                                 (1ULL << INTERCEPT_INVD) |
527                                 (1ULL << INTERCEPT_HLT) |
528                                 (1ULL << INTERCEPT_INVLPGA) |
529                                 (1ULL << INTERCEPT_IOIO_PROT) |
530                                 (1ULL << INTERCEPT_MSR_PROT) |
531                                 (1ULL << INTERCEPT_TASK_SWITCH) |
532                                 (1ULL << INTERCEPT_SHUTDOWN) |
533                                 (1ULL << INTERCEPT_VMRUN) |
534                                 (1ULL << INTERCEPT_VMMCALL) |
535                                 (1ULL << INTERCEPT_VMLOAD) |
536                                 (1ULL << INTERCEPT_VMSAVE) |
537                                 (1ULL << INTERCEPT_STGI) |
538                                 (1ULL << INTERCEPT_CLGI) |
539                                 (1ULL << INTERCEPT_SKINIT) |
540                                 (1ULL << INTERCEPT_WBINVD) |
541                                 (1ULL << INTERCEPT_MONITOR) |
542                                 (1ULL << INTERCEPT_MWAIT);
543
544         control->iopm_base_pa = iopm_base;
545         control->msrpm_base_pa = __pa(svm->msrpm);
546         control->tsc_offset = 0;
547         control->int_ctl = V_INTR_MASKING_MASK;
548
549         init_seg(&save->es);
550         init_seg(&save->ss);
551         init_seg(&save->ds);
552         init_seg(&save->fs);
553         init_seg(&save->gs);
554
555         save->cs.selector = 0xf000;
556         /* Executable/Readable Code Segment */
557         save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
558                 SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
559         save->cs.limit = 0xffff;
560         /*
561          * cs.base should really be 0xffff0000, but vmx can't handle that, so
562          * be consistent with it.
563          *
564          * Replace when we have real mode working for vmx.
565          */
566         save->cs.base = 0xf0000;
567
568         save->gdtr.limit = 0xffff;
569         save->idtr.limit = 0xffff;
570
571         init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
572         init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
573
574         save->efer = MSR_EFER_SVME_MASK;
575         save->dr6 = 0xffff0ff0;
576         save->dr7 = 0x400;
577         save->rflags = 2;
578         save->rip = 0x0000fff0;
579         svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
580
581         /*
582          * cr0 val on cpu init should be 0x60000010, we enable cpu
583          * cache by default. the orderly way is to enable cache in bios.
584          */
585         save->cr0 = 0x00000010 | X86_CR0_PG | X86_CR0_WP;
586         save->cr4 = X86_CR4_PAE;
587         /* rdx = ?? */
588
589         if (npt_enabled) {
590                 /* Setup VMCB for Nested Paging */
591                 control->nested_ctl = 1;
592                 control->intercept &= ~(1ULL << INTERCEPT_TASK_SWITCH);
593                 control->intercept_exceptions &= ~(1 << PF_VECTOR);
594                 control->intercept_cr_read &= ~(INTERCEPT_CR0_MASK|
595                                                 INTERCEPT_CR3_MASK);
596                 control->intercept_cr_write &= ~(INTERCEPT_CR0_MASK|
597                                                  INTERCEPT_CR3_MASK);
598                 save->g_pat = 0x0007040600070406ULL;
599                 /* enable caching because the QEMU Bios doesn't enable it */
600                 save->cr0 = X86_CR0_ET;
601                 save->cr3 = 0;
602                 save->cr4 = 0;
603         }
604         force_new_asid(&svm->vcpu);
605 }
606
607 static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
608 {
609         struct vcpu_svm *svm = to_svm(vcpu);
610
611         init_vmcb(svm);
612
613         if (vcpu->vcpu_id != 0) {
614                 kvm_rip_write(vcpu, 0);
615                 svm->vmcb->save.cs.base = svm->vcpu.arch.sipi_vector << 12;
616                 svm->vmcb->save.cs.selector = svm->vcpu.arch.sipi_vector << 8;
617         }
618         vcpu->arch.regs_avail = ~0;
619         vcpu->arch.regs_dirty = ~0;
620
621         return 0;
622 }
623
624 static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
625 {
626         struct vcpu_svm *svm;
627         struct page *page;
628         struct page *msrpm_pages;
629         int err;
630
631         svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
632         if (!svm) {
633                 err = -ENOMEM;
634                 goto out;
635         }
636
637         err = kvm_vcpu_init(&svm->vcpu, kvm, id);
638         if (err)
639                 goto free_svm;
640
641         page = alloc_page(GFP_KERNEL);
642         if (!page) {
643                 err = -ENOMEM;
644                 goto uninit;
645         }
646
647         err = -ENOMEM;
648         msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
649         if (!msrpm_pages)
650                 goto uninit;
651         svm->msrpm = page_address(msrpm_pages);
652         svm_vcpu_init_msrpm(svm->msrpm);
653
654         svm->vmcb = page_address(page);
655         clear_page(svm->vmcb);
656         svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
657         svm->asid_generation = 0;
658         memset(svm->db_regs, 0, sizeof(svm->db_regs));
659         init_vmcb(svm);
660
661         fx_init(&svm->vcpu);
662         svm->vcpu.fpu_active = 1;
663         svm->vcpu.arch.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
664         if (svm->vcpu.vcpu_id == 0)
665                 svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
666
667         return &svm->vcpu;
668
669 uninit:
670         kvm_vcpu_uninit(&svm->vcpu);
671 free_svm:
672         kmem_cache_free(kvm_vcpu_cache, svm);
673 out:
674         return ERR_PTR(err);
675 }
676
677 static void svm_free_vcpu(struct kvm_vcpu *vcpu)
678 {
679         struct vcpu_svm *svm = to_svm(vcpu);
680
681         __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
682         __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
683         kvm_vcpu_uninit(vcpu);
684         kmem_cache_free(kvm_vcpu_cache, svm);
685 }
686
687 static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
688 {
689         struct vcpu_svm *svm = to_svm(vcpu);
690         int i;
691
692         if (unlikely(cpu != vcpu->cpu)) {
693                 u64 tsc_this, delta;
694
695                 /*
696                  * Make sure that the guest sees a monotonically
697                  * increasing TSC.
698                  */
699                 rdtscll(tsc_this);
700                 delta = vcpu->arch.host_tsc - tsc_this;
701                 svm->vmcb->control.tsc_offset += delta;
702                 vcpu->cpu = cpu;
703                 kvm_migrate_timers(vcpu);
704         }
705
706         for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
707                 rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
708 }
709
710 static void svm_vcpu_put(struct kvm_vcpu *vcpu)
711 {
712         struct vcpu_svm *svm = to_svm(vcpu);
713         int i;
714
715         ++vcpu->stat.host_state_reload;
716         for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
717                 wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
718
719         rdtscll(vcpu->arch.host_tsc);
720 }
721
722 static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
723 {
724         return to_svm(vcpu)->vmcb->save.rflags;
725 }
726
727 static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
728 {
729         to_svm(vcpu)->vmcb->save.rflags = rflags;
730 }
731
732 static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
733 {
734         struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
735
736         switch (seg) {
737         case VCPU_SREG_CS: return &save->cs;
738         case VCPU_SREG_DS: return &save->ds;
739         case VCPU_SREG_ES: return &save->es;
740         case VCPU_SREG_FS: return &save->fs;
741         case VCPU_SREG_GS: return &save->gs;
742         case VCPU_SREG_SS: return &save->ss;
743         case VCPU_SREG_TR: return &save->tr;
744         case VCPU_SREG_LDTR: return &save->ldtr;
745         }
746         BUG();
747         return NULL;
748 }
749
750 static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
751 {
752         struct vmcb_seg *s = svm_seg(vcpu, seg);
753
754         return s->base;
755 }
756
757 static void svm_get_segment(struct kvm_vcpu *vcpu,
758                             struct kvm_segment *var, int seg)
759 {
760         struct vmcb_seg *s = svm_seg(vcpu, seg);
761
762         var->base = s->base;
763         var->limit = s->limit;
764         var->selector = s->selector;
765         var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
766         var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
767         var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
768         var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
769         var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
770         var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
771         var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
772         var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1;
773         var->unusable = !var->present;
774 }
775
776 static int svm_get_cpl(struct kvm_vcpu *vcpu)
777 {
778         struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
779
780         return save->cpl;
781 }
782
783 static void svm_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
784 {
785         struct vcpu_svm *svm = to_svm(vcpu);
786
787         dt->limit = svm->vmcb->save.idtr.limit;
788         dt->base = svm->vmcb->save.idtr.base;
789 }
790
791 static void svm_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
792 {
793         struct vcpu_svm *svm = to_svm(vcpu);
794
795         svm->vmcb->save.idtr.limit = dt->limit;
796         svm->vmcb->save.idtr.base = dt->base ;
797 }
798
799 static void svm_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
800 {
801         struct vcpu_svm *svm = to_svm(vcpu);
802
803         dt->limit = svm->vmcb->save.gdtr.limit;
804         dt->base = svm->vmcb->save.gdtr.base;
805 }
806
807 static void svm_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
808 {
809         struct vcpu_svm *svm = to_svm(vcpu);
810
811         svm->vmcb->save.gdtr.limit = dt->limit;
812         svm->vmcb->save.gdtr.base = dt->base ;
813 }
814
815 static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
816 {
817 }
818
819 static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
820 {
821         struct vcpu_svm *svm = to_svm(vcpu);
822
823 #ifdef CONFIG_X86_64
824         if (vcpu->arch.shadow_efer & EFER_LME) {
825                 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
826                         vcpu->arch.shadow_efer |= EFER_LMA;
827                         svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
828                 }
829
830                 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
831                         vcpu->arch.shadow_efer &= ~EFER_LMA;
832                         svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
833                 }
834         }
835 #endif
836         if (npt_enabled)
837                 goto set;
838
839         if ((vcpu->arch.cr0 & X86_CR0_TS) && !(cr0 & X86_CR0_TS)) {
840                 svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
841                 vcpu->fpu_active = 1;
842         }
843
844         vcpu->arch.cr0 = cr0;
845         cr0 |= X86_CR0_PG | X86_CR0_WP;
846         if (!vcpu->fpu_active) {
847                 svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR);
848                 cr0 |= X86_CR0_TS;
849         }
850 set:
851         /*
852          * re-enable caching here because the QEMU bios
853          * does not do it - this results in some delay at
854          * reboot
855          */
856         cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
857         svm->vmcb->save.cr0 = cr0;
858 }
859
860 static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
861 {
862         unsigned long host_cr4_mce = read_cr4() & X86_CR4_MCE;
863         unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
864
865         if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
866                 force_new_asid(vcpu);
867
868         vcpu->arch.cr4 = cr4;
869         if (!npt_enabled)
870                 cr4 |= X86_CR4_PAE;
871         cr4 |= host_cr4_mce;
872         to_svm(vcpu)->vmcb->save.cr4 = cr4;
873 }
874
875 static void svm_set_segment(struct kvm_vcpu *vcpu,
876                             struct kvm_segment *var, int seg)
877 {
878         struct vcpu_svm *svm = to_svm(vcpu);
879         struct vmcb_seg *s = svm_seg(vcpu, seg);
880
881         s->base = var->base;
882         s->limit = var->limit;
883         s->selector = var->selector;
884         if (var->unusable)
885                 s->attrib = 0;
886         else {
887                 s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
888                 s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
889                 s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
890                 s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT;
891                 s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
892                 s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
893                 s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
894                 s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
895         }
896         if (seg == VCPU_SREG_CS)
897                 svm->vmcb->save.cpl
898                         = (svm->vmcb->save.cs.attrib
899                            >> SVM_SELECTOR_DPL_SHIFT) & 3;
900
901 }
902
903 static int svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
904 {
905         return -EOPNOTSUPP;
906 }
907
908 static int svm_get_irq(struct kvm_vcpu *vcpu)
909 {
910         struct vcpu_svm *svm = to_svm(vcpu);
911         u32 exit_int_info = svm->vmcb->control.exit_int_info;
912
913         if (is_external_interrupt(exit_int_info))
914                 return exit_int_info & SVM_EVTINJ_VEC_MASK;
915         return -1;
916 }
917
918 static void load_host_msrs(struct kvm_vcpu *vcpu)
919 {
920 #ifdef CONFIG_X86_64
921         wrmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base);
922 #endif
923 }
924
925 static void save_host_msrs(struct kvm_vcpu *vcpu)
926 {
927 #ifdef CONFIG_X86_64
928         rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base);
929 #endif
930 }
931
932 static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *svm_data)
933 {
934         if (svm_data->next_asid > svm_data->max_asid) {
935                 ++svm_data->asid_generation;
936                 svm_data->next_asid = 1;
937                 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
938         }
939
940         svm->vcpu.cpu = svm_data->cpu;
941         svm->asid_generation = svm_data->asid_generation;
942         svm->vmcb->control.asid = svm_data->next_asid++;
943 }
944
945 static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr)
946 {
947         unsigned long val = to_svm(vcpu)->db_regs[dr];
948         KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler);
949         return val;
950 }
951
952 static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value,
953                        int *exception)
954 {
955         struct vcpu_svm *svm = to_svm(vcpu);
956
957         *exception = 0;
958
959         if (svm->vmcb->save.dr7 & DR7_GD_MASK) {
960                 svm->vmcb->save.dr7 &= ~DR7_GD_MASK;
961                 svm->vmcb->save.dr6 |= DR6_BD_MASK;
962                 *exception = DB_VECTOR;
963                 return;
964         }
965
966         switch (dr) {
967         case 0 ... 3:
968                 svm->db_regs[dr] = value;
969                 return;
970         case 4 ... 5:
971                 if (vcpu->arch.cr4 & X86_CR4_DE) {
972                         *exception = UD_VECTOR;
973                         return;
974                 }
975         case 7: {
976                 if (value & ~((1ULL << 32) - 1)) {
977                         *exception = GP_VECTOR;
978                         return;
979                 }
980                 svm->vmcb->save.dr7 = value;
981                 return;
982         }
983         default:
984                 printk(KERN_DEBUG "%s: unexpected dr %u\n",
985                        __func__, dr);
986                 *exception = UD_VECTOR;
987                 return;
988         }
989 }
990
991 static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
992 {
993         u32 exit_int_info = svm->vmcb->control.exit_int_info;
994         struct kvm *kvm = svm->vcpu.kvm;
995         u64 fault_address;
996         u32 error_code;
997         bool event_injection = false;
998
999         if (!irqchip_in_kernel(kvm) &&
1000             is_external_interrupt(exit_int_info)) {
1001                 event_injection = true;
1002                 push_irq(&svm->vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK);
1003         }
1004
1005         fault_address  = svm->vmcb->control.exit_info_2;
1006         error_code = svm->vmcb->control.exit_info_1;
1007
1008         if (!npt_enabled)
1009                 KVMTRACE_3D(PAGE_FAULT, &svm->vcpu, error_code,
1010                             (u32)fault_address, (u32)(fault_address >> 32),
1011                             handler);
1012         else
1013                 KVMTRACE_3D(TDP_FAULT, &svm->vcpu, error_code,
1014                             (u32)fault_address, (u32)(fault_address >> 32),
1015                             handler);
1016         /*
1017          * FIXME: Tis shouldn't be necessary here, but there is a flush
1018          * missing in the MMU code. Until we find this bug, flush the
1019          * complete TLB here on an NPF
1020          */
1021         if (npt_enabled)
1022                 svm_flush_tlb(&svm->vcpu);
1023
1024         if (!npt_enabled && event_injection)
1025                 kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
1026         return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code);
1027 }
1028
1029 static int ud_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1030 {
1031         int er;
1032
1033         er = emulate_instruction(&svm->vcpu, kvm_run, 0, 0, EMULTYPE_TRAP_UD);
1034         if (er != EMULATE_DONE)
1035                 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
1036         return 1;
1037 }
1038
1039 static int nm_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1040 {
1041         svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
1042         if (!(svm->vcpu.arch.cr0 & X86_CR0_TS))
1043                 svm->vmcb->save.cr0 &= ~X86_CR0_TS;
1044         svm->vcpu.fpu_active = 1;
1045
1046         return 1;
1047 }
1048
1049 static int mc_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1050 {
1051         /*
1052          * On an #MC intercept the MCE handler is not called automatically in
1053          * the host. So do it by hand here.
1054          */
1055         asm volatile (
1056                 "int $0x12\n");
1057         /* not sure if we ever come back to this point */
1058
1059         return 1;
1060 }
1061
1062 static int shutdown_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1063 {
1064         /*
1065          * VMCB is undefined after a SHUTDOWN intercept
1066          * so reinitialize it.
1067          */
1068         clear_page(svm->vmcb);
1069         init_vmcb(svm);
1070
1071         kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
1072         return 0;
1073 }
1074
1075 static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1076 {
1077         u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
1078         int size, down, in, string, rep;
1079         unsigned port;
1080
1081         ++svm->vcpu.stat.io_exits;
1082
1083         svm->next_rip = svm->vmcb->control.exit_info_2;
1084
1085         string = (io_info & SVM_IOIO_STR_MASK) != 0;
1086
1087         if (string) {
1088                 if (emulate_instruction(&svm->vcpu,
1089                                         kvm_run, 0, 0, 0) == EMULATE_DO_MMIO)
1090                         return 0;
1091                 return 1;
1092         }
1093
1094         in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
1095         port = io_info >> 16;
1096         size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
1097         rep = (io_info & SVM_IOIO_REP_MASK) != 0;
1098         down = (svm->vmcb->save.rflags & X86_EFLAGS_DF) != 0;
1099
1100         return kvm_emulate_pio(&svm->vcpu, kvm_run, in, size, port);
1101 }
1102
1103 static int nmi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1104 {
1105         KVMTRACE_0D(NMI, &svm->vcpu, handler);
1106         return 1;
1107 }
1108
1109 static int intr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1110 {
1111         ++svm->vcpu.stat.irq_exits;
1112         KVMTRACE_0D(INTR, &svm->vcpu, handler);
1113         return 1;
1114 }
1115
1116 static int nop_on_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1117 {
1118         return 1;
1119 }
1120
1121 static int halt_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1122 {
1123         svm->next_rip = kvm_rip_read(&svm->vcpu) + 1;
1124         skip_emulated_instruction(&svm->vcpu);
1125         return kvm_emulate_halt(&svm->vcpu);
1126 }
1127
1128 static int vmmcall_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1129 {
1130         svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1131         skip_emulated_instruction(&svm->vcpu);
1132         kvm_emulate_hypercall(&svm->vcpu);
1133         return 1;
1134 }
1135
1136 static int invalid_op_interception(struct vcpu_svm *svm,
1137                                    struct kvm_run *kvm_run)
1138 {
1139         kvm_queue_exception(&svm->vcpu, UD_VECTOR);
1140         return 1;
1141 }
1142
1143 static int task_switch_interception(struct vcpu_svm *svm,
1144                                     struct kvm_run *kvm_run)
1145 {
1146         u16 tss_selector;
1147
1148         tss_selector = (u16)svm->vmcb->control.exit_info_1;
1149         if (svm->vmcb->control.exit_info_2 &
1150             (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
1151                 return kvm_task_switch(&svm->vcpu, tss_selector,
1152                                        TASK_SWITCH_IRET);
1153         if (svm->vmcb->control.exit_info_2 &
1154             (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
1155                 return kvm_task_switch(&svm->vcpu, tss_selector,
1156                                        TASK_SWITCH_JMP);
1157         return kvm_task_switch(&svm->vcpu, tss_selector, TASK_SWITCH_CALL);
1158 }
1159
1160 static int cpuid_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1161 {
1162         svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
1163         kvm_emulate_cpuid(&svm->vcpu);
1164         return 1;
1165 }
1166
1167 static int emulate_on_interception(struct vcpu_svm *svm,
1168                                    struct kvm_run *kvm_run)
1169 {
1170         if (emulate_instruction(&svm->vcpu, NULL, 0, 0, 0) != EMULATE_DONE)
1171                 pr_unimpl(&svm->vcpu, "%s: failed\n", __func__);
1172         return 1;
1173 }
1174
1175 static int cr8_write_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1176 {
1177         emulate_instruction(&svm->vcpu, NULL, 0, 0, 0);
1178         if (irqchip_in_kernel(svm->vcpu.kvm))
1179                 return 1;
1180         kvm_run->exit_reason = KVM_EXIT_SET_TPR;
1181         return 0;
1182 }
1183
1184 static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
1185 {
1186         struct vcpu_svm *svm = to_svm(vcpu);
1187
1188         switch (ecx) {
1189         case MSR_IA32_TIME_STAMP_COUNTER: {
1190                 u64 tsc;
1191
1192                 rdtscll(tsc);
1193                 *data = svm->vmcb->control.tsc_offset + tsc;
1194                 break;
1195         }
1196         case MSR_K6_STAR:
1197                 *data = svm->vmcb->save.star;
1198                 break;
1199 #ifdef CONFIG_X86_64
1200         case MSR_LSTAR:
1201                 *data = svm->vmcb->save.lstar;
1202                 break;
1203         case MSR_CSTAR:
1204                 *data = svm->vmcb->save.cstar;
1205                 break;
1206         case MSR_KERNEL_GS_BASE:
1207                 *data = svm->vmcb->save.kernel_gs_base;
1208                 break;
1209         case MSR_SYSCALL_MASK:
1210                 *data = svm->vmcb->save.sfmask;
1211                 break;
1212 #endif
1213         case MSR_IA32_SYSENTER_CS:
1214                 *data = svm->vmcb->save.sysenter_cs;
1215                 break;
1216         case MSR_IA32_SYSENTER_EIP:
1217                 *data = svm->vmcb->save.sysenter_eip;
1218                 break;
1219         case MSR_IA32_SYSENTER_ESP:
1220                 *data = svm->vmcb->save.sysenter_esp;
1221                 break;
1222         /* Nobody will change the following 5 values in the VMCB so
1223            we can safely return them on rdmsr. They will always be 0
1224            until LBRV is implemented. */
1225         case MSR_IA32_DEBUGCTLMSR:
1226                 *data = svm->vmcb->save.dbgctl;
1227                 break;
1228         case MSR_IA32_LASTBRANCHFROMIP:
1229                 *data = svm->vmcb->save.br_from;
1230                 break;
1231         case MSR_IA32_LASTBRANCHTOIP:
1232                 *data = svm->vmcb->save.br_to;
1233                 break;
1234         case MSR_IA32_LASTINTFROMIP:
1235                 *data = svm->vmcb->save.last_excp_from;
1236                 break;
1237         case MSR_IA32_LASTINTTOIP:
1238                 *data = svm->vmcb->save.last_excp_to;
1239                 break;
1240         default:
1241                 return kvm_get_msr_common(vcpu, ecx, data);
1242         }
1243         return 0;
1244 }
1245
1246 static int rdmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1247 {
1248         u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
1249         u64 data;
1250
1251         if (svm_get_msr(&svm->vcpu, ecx, &data))
1252                 kvm_inject_gp(&svm->vcpu, 0);
1253         else {
1254                 KVMTRACE_3D(MSR_READ, &svm->vcpu, ecx, (u32)data,
1255                             (u32)(data >> 32), handler);
1256
1257                 svm->vcpu.arch.regs[VCPU_REGS_RAX] = data & 0xffffffff;
1258                 svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32;
1259                 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
1260                 skip_emulated_instruction(&svm->vcpu);
1261         }
1262         return 1;
1263 }
1264
1265 static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
1266 {
1267         struct vcpu_svm *svm = to_svm(vcpu);
1268
1269         switch (ecx) {
1270         case MSR_IA32_TIME_STAMP_COUNTER: {
1271                 u64 tsc;
1272
1273                 rdtscll(tsc);
1274                 svm->vmcb->control.tsc_offset = data - tsc;
1275                 break;
1276         }
1277         case MSR_K6_STAR:
1278                 svm->vmcb->save.star = data;
1279                 break;
1280 #ifdef CONFIG_X86_64
1281         case MSR_LSTAR:
1282                 svm->vmcb->save.lstar = data;
1283                 break;
1284         case MSR_CSTAR:
1285                 svm->vmcb->save.cstar = data;
1286                 break;
1287         case MSR_KERNEL_GS_BASE:
1288                 svm->vmcb->save.kernel_gs_base = data;
1289                 break;
1290         case MSR_SYSCALL_MASK:
1291                 svm->vmcb->save.sfmask = data;
1292                 break;
1293 #endif
1294         case MSR_IA32_SYSENTER_CS:
1295                 svm->vmcb->save.sysenter_cs = data;
1296                 break;
1297         case MSR_IA32_SYSENTER_EIP:
1298                 svm->vmcb->save.sysenter_eip = data;
1299                 break;
1300         case MSR_IA32_SYSENTER_ESP:
1301                 svm->vmcb->save.sysenter_esp = data;
1302                 break;
1303         case MSR_IA32_DEBUGCTLMSR:
1304                 if (!svm_has(SVM_FEATURE_LBRV)) {
1305                         pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
1306                                         __func__, data);
1307                         break;
1308                 }
1309                 if (data & DEBUGCTL_RESERVED_BITS)
1310                         return 1;
1311
1312                 svm->vmcb->save.dbgctl = data;
1313                 if (data & (1ULL<<0))
1314                         svm_enable_lbrv(svm);
1315                 else
1316                         svm_disable_lbrv(svm);
1317                 break;
1318         case MSR_K7_EVNTSEL0:
1319         case MSR_K7_EVNTSEL1:
1320         case MSR_K7_EVNTSEL2:
1321         case MSR_K7_EVNTSEL3:
1322         case MSR_K7_PERFCTR0:
1323         case MSR_K7_PERFCTR1:
1324         case MSR_K7_PERFCTR2:
1325         case MSR_K7_PERFCTR3:
1326                 /*
1327                  * Just discard all writes to the performance counters; this
1328                  * should keep both older linux and windows 64-bit guests
1329                  * happy
1330                  */
1331                 pr_unimpl(vcpu, "unimplemented perfctr wrmsr: 0x%x data 0x%llx\n", ecx, data);
1332
1333                 break;
1334         default:
1335                 return kvm_set_msr_common(vcpu, ecx, data);
1336         }
1337         return 0;
1338 }
1339
1340 static int wrmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1341 {
1342         u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
1343         u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u)
1344                 | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32);
1345
1346         KVMTRACE_3D(MSR_WRITE, &svm->vcpu, ecx, (u32)data, (u32)(data >> 32),
1347                     handler);
1348
1349         svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
1350         if (svm_set_msr(&svm->vcpu, ecx, data))
1351                 kvm_inject_gp(&svm->vcpu, 0);
1352         else
1353                 skip_emulated_instruction(&svm->vcpu);
1354         return 1;
1355 }
1356
1357 static int msr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1358 {
1359         if (svm->vmcb->control.exit_info_1)
1360                 return wrmsr_interception(svm, kvm_run);
1361         else
1362                 return rdmsr_interception(svm, kvm_run);
1363 }
1364
1365 static int interrupt_window_interception(struct vcpu_svm *svm,
1366                                    struct kvm_run *kvm_run)
1367 {
1368         KVMTRACE_0D(PEND_INTR, &svm->vcpu, handler);
1369
1370         svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VINTR);
1371         svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
1372         /*
1373          * If the user space waits to inject interrupts, exit as soon as
1374          * possible
1375          */
1376         if (kvm_run->request_interrupt_window &&
1377             !svm->vcpu.arch.irq_summary) {
1378                 ++svm->vcpu.stat.irq_window_exits;
1379                 kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
1380                 return 0;
1381         }
1382
1383         return 1;
1384 }
1385
1386 static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
1387                                       struct kvm_run *kvm_run) = {
1388         [SVM_EXIT_READ_CR0]                     = emulate_on_interception,
1389         [SVM_EXIT_READ_CR3]                     = emulate_on_interception,
1390         [SVM_EXIT_READ_CR4]                     = emulate_on_interception,
1391         [SVM_EXIT_READ_CR8]                     = emulate_on_interception,
1392         /* for now: */
1393         [SVM_EXIT_WRITE_CR0]                    = emulate_on_interception,
1394         [SVM_EXIT_WRITE_CR3]                    = emulate_on_interception,
1395         [SVM_EXIT_WRITE_CR4]                    = emulate_on_interception,
1396         [SVM_EXIT_WRITE_CR8]                    = cr8_write_interception,
1397         [SVM_EXIT_READ_DR0]                     = emulate_on_interception,
1398         [SVM_EXIT_READ_DR1]                     = emulate_on_interception,
1399         [SVM_EXIT_READ_DR2]                     = emulate_on_interception,
1400         [SVM_EXIT_READ_DR3]                     = emulate_on_interception,
1401         [SVM_EXIT_WRITE_DR0]                    = emulate_on_interception,
1402         [SVM_EXIT_WRITE_DR1]                    = emulate_on_interception,
1403         [SVM_EXIT_WRITE_DR2]                    = emulate_on_interception,
1404         [SVM_EXIT_WRITE_DR3]                    = emulate_on_interception,
1405         [SVM_EXIT_WRITE_DR5]                    = emulate_on_interception,
1406         [SVM_EXIT_WRITE_DR7]                    = emulate_on_interception,
1407         [SVM_EXIT_EXCP_BASE + UD_VECTOR]        = ud_interception,
1408         [SVM_EXIT_EXCP_BASE + PF_VECTOR]        = pf_interception,
1409         [SVM_EXIT_EXCP_BASE + NM_VECTOR]        = nm_interception,
1410         [SVM_EXIT_EXCP_BASE + MC_VECTOR]        = mc_interception,
1411         [SVM_EXIT_INTR]                         = intr_interception,
1412         [SVM_EXIT_NMI]                          = nmi_interception,
1413         [SVM_EXIT_SMI]                          = nop_on_interception,
1414         [SVM_EXIT_INIT]                         = nop_on_interception,
1415         [SVM_EXIT_VINTR]                        = interrupt_window_interception,
1416         /* [SVM_EXIT_CR0_SEL_WRITE]             = emulate_on_interception, */
1417         [SVM_EXIT_CPUID]                        = cpuid_interception,
1418         [SVM_EXIT_INVD]                         = emulate_on_interception,
1419         [SVM_EXIT_HLT]                          = halt_interception,
1420         [SVM_EXIT_INVLPG]                       = emulate_on_interception,
1421         [SVM_EXIT_INVLPGA]                      = invalid_op_interception,
1422         [SVM_EXIT_IOIO]                         = io_interception,
1423         [SVM_EXIT_MSR]                          = msr_interception,
1424         [SVM_EXIT_TASK_SWITCH]                  = task_switch_interception,
1425         [SVM_EXIT_SHUTDOWN]                     = shutdown_interception,
1426         [SVM_EXIT_VMRUN]                        = invalid_op_interception,
1427         [SVM_EXIT_VMMCALL]                      = vmmcall_interception,
1428         [SVM_EXIT_VMLOAD]                       = invalid_op_interception,
1429         [SVM_EXIT_VMSAVE]                       = invalid_op_interception,
1430         [SVM_EXIT_STGI]                         = invalid_op_interception,
1431         [SVM_EXIT_CLGI]                         = invalid_op_interception,
1432         [SVM_EXIT_SKINIT]                       = invalid_op_interception,
1433         [SVM_EXIT_WBINVD]                       = emulate_on_interception,
1434         [SVM_EXIT_MONITOR]                      = invalid_op_interception,
1435         [SVM_EXIT_MWAIT]                        = invalid_op_interception,
1436         [SVM_EXIT_NPF]                          = pf_interception,
1437 };
1438
1439 static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1440 {
1441         struct vcpu_svm *svm = to_svm(vcpu);
1442         u32 exit_code = svm->vmcb->control.exit_code;
1443
1444         KVMTRACE_3D(VMEXIT, vcpu, exit_code, (u32)svm->vmcb->save.rip,
1445                     (u32)((u64)svm->vmcb->save.rip >> 32), entryexit);
1446
1447         if (npt_enabled) {
1448                 int mmu_reload = 0;
1449                 if ((vcpu->arch.cr0 ^ svm->vmcb->save.cr0) & X86_CR0_PG) {
1450                         svm_set_cr0(vcpu, svm->vmcb->save.cr0);
1451                         mmu_reload = 1;
1452                 }
1453                 vcpu->arch.cr0 = svm->vmcb->save.cr0;
1454                 vcpu->arch.cr3 = svm->vmcb->save.cr3;
1455                 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
1456                         if (!load_pdptrs(vcpu, vcpu->arch.cr3)) {
1457                                 kvm_inject_gp(vcpu, 0);
1458                                 return 1;
1459                         }
1460                 }
1461                 if (mmu_reload) {
1462                         kvm_mmu_reset_context(vcpu);
1463                         kvm_mmu_load(vcpu);
1464                 }
1465         }
1466
1467         kvm_reput_irq(svm);
1468
1469         if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
1470                 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
1471                 kvm_run->fail_entry.hardware_entry_failure_reason
1472                         = svm->vmcb->control.exit_code;
1473                 return 0;
1474         }
1475
1476         if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
1477             exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
1478             exit_code != SVM_EXIT_NPF)
1479                 printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x "
1480                        "exit_code 0x%x\n",
1481                        __func__, svm->vmcb->control.exit_int_info,
1482                        exit_code);
1483
1484         if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
1485             || !svm_exit_handlers[exit_code]) {
1486                 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
1487                 kvm_run->hw.hardware_exit_reason = exit_code;
1488                 return 0;
1489         }
1490
1491         return svm_exit_handlers[exit_code](svm, kvm_run);
1492 }
1493
1494 static void reload_tss(struct kvm_vcpu *vcpu)
1495 {
1496         int cpu = raw_smp_processor_id();
1497
1498         struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
1499         svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
1500         load_TR_desc();
1501 }
1502
1503 static void pre_svm_run(struct vcpu_svm *svm)
1504 {
1505         int cpu = raw_smp_processor_id();
1506
1507         struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
1508
1509         svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
1510         if (svm->vcpu.cpu != cpu ||
1511             svm->asid_generation != svm_data->asid_generation)
1512                 new_asid(svm, svm_data);
1513 }
1514
1515
1516 static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
1517 {
1518         struct vmcb_control_area *control;
1519
1520         KVMTRACE_1D(INJ_VIRQ, &svm->vcpu, (u32)irq, handler);
1521
1522         ++svm->vcpu.stat.irq_injections;
1523         control = &svm->vmcb->control;
1524         control->int_vector = irq;
1525         control->int_ctl &= ~V_INTR_PRIO_MASK;
1526         control->int_ctl |= V_IRQ_MASK |
1527                 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
1528 }
1529
1530 static void svm_set_irq(struct kvm_vcpu *vcpu, int irq)
1531 {
1532         struct vcpu_svm *svm = to_svm(vcpu);
1533
1534         svm_inject_irq(svm, irq);
1535 }
1536
1537 static void update_cr8_intercept(struct kvm_vcpu *vcpu)
1538 {
1539         struct vcpu_svm *svm = to_svm(vcpu);
1540         struct vmcb *vmcb = svm->vmcb;
1541         int max_irr, tpr;
1542
1543         if (!irqchip_in_kernel(vcpu->kvm) || vcpu->arch.apic->vapic_addr)
1544                 return;
1545
1546         vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK;
1547
1548         max_irr = kvm_lapic_find_highest_irr(vcpu);
1549         if (max_irr == -1)
1550                 return;
1551
1552         tpr = kvm_lapic_get_cr8(vcpu) << 4;
1553
1554         if (tpr >= (max_irr & 0xf0))
1555                 vmcb->control.intercept_cr_write |= INTERCEPT_CR8_MASK;
1556 }
1557
1558 static void svm_intr_assist(struct kvm_vcpu *vcpu)
1559 {
1560         struct vcpu_svm *svm = to_svm(vcpu);
1561         struct vmcb *vmcb = svm->vmcb;
1562         int intr_vector = -1;
1563
1564         if ((vmcb->control.exit_int_info & SVM_EVTINJ_VALID) &&
1565             ((vmcb->control.exit_int_info & SVM_EVTINJ_TYPE_MASK) == 0)) {
1566                 intr_vector = vmcb->control.exit_int_info &
1567                               SVM_EVTINJ_VEC_MASK;
1568                 vmcb->control.exit_int_info = 0;
1569                 svm_inject_irq(svm, intr_vector);
1570                 goto out;
1571         }
1572
1573         if (vmcb->control.int_ctl & V_IRQ_MASK)
1574                 goto out;
1575
1576         if (!kvm_cpu_has_interrupt(vcpu))
1577                 goto out;
1578
1579         if (!(vmcb->save.rflags & X86_EFLAGS_IF) ||
1580             (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) ||
1581             (vmcb->control.event_inj & SVM_EVTINJ_VALID)) {
1582                 /* unable to deliver irq, set pending irq */
1583                 vmcb->control.intercept |= (1ULL << INTERCEPT_VINTR);
1584                 svm_inject_irq(svm, 0x0);
1585                 goto out;
1586         }
1587         /* Okay, we can deliver the interrupt: grab it and update PIC state. */
1588         intr_vector = kvm_cpu_get_interrupt(vcpu);
1589         svm_inject_irq(svm, intr_vector);
1590         kvm_timer_intr_post(vcpu, intr_vector);
1591 out:
1592         update_cr8_intercept(vcpu);
1593 }
1594
1595 static void kvm_reput_irq(struct vcpu_svm *svm)
1596 {
1597         struct vmcb_control_area *control = &svm->vmcb->control;
1598
1599         if ((control->int_ctl & V_IRQ_MASK)
1600             && !irqchip_in_kernel(svm->vcpu.kvm)) {
1601                 control->int_ctl &= ~V_IRQ_MASK;
1602                 push_irq(&svm->vcpu, control->int_vector);
1603         }
1604
1605         svm->vcpu.arch.interrupt_window_open =
1606                 !(control->int_state & SVM_INTERRUPT_SHADOW_MASK);
1607 }
1608
1609 static void svm_do_inject_vector(struct vcpu_svm *svm)
1610 {
1611         struct kvm_vcpu *vcpu = &svm->vcpu;
1612         int word_index = __ffs(vcpu->arch.irq_summary);
1613         int bit_index = __ffs(vcpu->arch.irq_pending[word_index]);
1614         int irq = word_index * BITS_PER_LONG + bit_index;
1615
1616         clear_bit(bit_index, &vcpu->arch.irq_pending[word_index]);
1617         if (!vcpu->arch.irq_pending[word_index])
1618                 clear_bit(word_index, &vcpu->arch.irq_summary);
1619         svm_inject_irq(svm, irq);
1620 }
1621
1622 static void do_interrupt_requests(struct kvm_vcpu *vcpu,
1623                                        struct kvm_run *kvm_run)
1624 {
1625         struct vcpu_svm *svm = to_svm(vcpu);
1626         struct vmcb_control_area *control = &svm->vmcb->control;
1627
1628         svm->vcpu.arch.interrupt_window_open =
1629                 (!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) &&
1630                  (svm->vmcb->save.rflags & X86_EFLAGS_IF));
1631
1632         if (svm->vcpu.arch.interrupt_window_open && svm->vcpu.arch.irq_summary)
1633                 /*
1634                  * If interrupts enabled, and not blocked by sti or mov ss. Good.
1635                  */
1636                 svm_do_inject_vector(svm);
1637
1638         /*
1639          * Interrupts blocked.  Wait for unblock.
1640          */
1641         if (!svm->vcpu.arch.interrupt_window_open &&
1642             (svm->vcpu.arch.irq_summary || kvm_run->request_interrupt_window))
1643                 control->intercept |= 1ULL << INTERCEPT_VINTR;
1644          else
1645                 control->intercept &= ~(1ULL << INTERCEPT_VINTR);
1646 }
1647
1648 static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
1649 {
1650         return 0;
1651 }
1652
1653 static void save_db_regs(unsigned long *db_regs)
1654 {
1655         asm volatile ("mov %%dr0, %0" : "=r"(db_regs[0]));
1656         asm volatile ("mov %%dr1, %0" : "=r"(db_regs[1]));
1657         asm volatile ("mov %%dr2, %0" : "=r"(db_regs[2]));
1658         asm volatile ("mov %%dr3, %0" : "=r"(db_regs[3]));
1659 }
1660
1661 static void load_db_regs(unsigned long *db_regs)
1662 {
1663         asm volatile ("mov %0, %%dr0" : : "r"(db_regs[0]));
1664         asm volatile ("mov %0, %%dr1" : : "r"(db_regs[1]));
1665         asm volatile ("mov %0, %%dr2" : : "r"(db_regs[2]));
1666         asm volatile ("mov %0, %%dr3" : : "r"(db_regs[3]));
1667 }
1668
1669 static void svm_flush_tlb(struct kvm_vcpu *vcpu)
1670 {
1671         force_new_asid(vcpu);
1672 }
1673
1674 static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
1675 {
1676 }
1677
1678 static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
1679 {
1680         struct vcpu_svm *svm = to_svm(vcpu);
1681
1682         if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR8_MASK)) {
1683                 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
1684                 kvm_lapic_set_tpr(vcpu, cr8);
1685         }
1686 }
1687
1688 static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
1689 {
1690         struct vcpu_svm *svm = to_svm(vcpu);
1691         u64 cr8;
1692
1693         if (!irqchip_in_kernel(vcpu->kvm))
1694                 return;
1695
1696         cr8 = kvm_get_cr8(vcpu);
1697         svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
1698         svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
1699 }
1700
1701 #ifdef CONFIG_X86_64
1702 #define R "r"
1703 #else
1704 #define R "e"
1705 #endif
1706
1707 static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1708 {
1709         struct vcpu_svm *svm = to_svm(vcpu);
1710         u16 fs_selector;
1711         u16 gs_selector;
1712         u16 ldt_selector;
1713
1714         svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
1715         svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
1716         svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
1717
1718         pre_svm_run(svm);
1719
1720         sync_lapic_to_cr8(vcpu);
1721
1722         save_host_msrs(vcpu);
1723         fs_selector = kvm_read_fs();
1724         gs_selector = kvm_read_gs();
1725         ldt_selector = kvm_read_ldt();
1726         svm->host_cr2 = kvm_read_cr2();
1727         svm->host_dr6 = read_dr6();
1728         svm->host_dr7 = read_dr7();
1729         svm->vmcb->save.cr2 = vcpu->arch.cr2;
1730         /* required for live migration with NPT */
1731         if (npt_enabled)
1732                 svm->vmcb->save.cr3 = vcpu->arch.cr3;
1733
1734         if (svm->vmcb->save.dr7 & 0xff) {
1735                 write_dr7(0);
1736                 save_db_regs(svm->host_db_regs);
1737                 load_db_regs(svm->db_regs);
1738         }
1739
1740         clgi();
1741
1742         local_irq_enable();
1743
1744         asm volatile (
1745                 "push %%"R"bp; \n\t"
1746                 "mov %c[rbx](%[svm]), %%"R"bx \n\t"
1747                 "mov %c[rcx](%[svm]), %%"R"cx \n\t"
1748                 "mov %c[rdx](%[svm]), %%"R"dx \n\t"
1749                 "mov %c[rsi](%[svm]), %%"R"si \n\t"
1750                 "mov %c[rdi](%[svm]), %%"R"di \n\t"
1751                 "mov %c[rbp](%[svm]), %%"R"bp \n\t"
1752 #ifdef CONFIG_X86_64
1753                 "mov %c[r8](%[svm]),  %%r8  \n\t"
1754                 "mov %c[r9](%[svm]),  %%r9  \n\t"
1755                 "mov %c[r10](%[svm]), %%r10 \n\t"
1756                 "mov %c[r11](%[svm]), %%r11 \n\t"
1757                 "mov %c[r12](%[svm]), %%r12 \n\t"
1758                 "mov %c[r13](%[svm]), %%r13 \n\t"
1759                 "mov %c[r14](%[svm]), %%r14 \n\t"
1760                 "mov %c[r15](%[svm]), %%r15 \n\t"
1761 #endif
1762
1763                 /* Enter guest mode */
1764                 "push %%"R"ax \n\t"
1765                 "mov %c[vmcb](%[svm]), %%"R"ax \n\t"
1766                 __ex(SVM_VMLOAD) "\n\t"
1767                 __ex(SVM_VMRUN) "\n\t"
1768                 __ex(SVM_VMSAVE) "\n\t"
1769                 "pop %%"R"ax \n\t"
1770
1771                 /* Save guest registers, load host registers */
1772                 "mov %%"R"bx, %c[rbx](%[svm]) \n\t"
1773                 "mov %%"R"cx, %c[rcx](%[svm]) \n\t"
1774                 "mov %%"R"dx, %c[rdx](%[svm]) \n\t"
1775                 "mov %%"R"si, %c[rsi](%[svm]) \n\t"
1776                 "mov %%"R"di, %c[rdi](%[svm]) \n\t"
1777                 "mov %%"R"bp, %c[rbp](%[svm]) \n\t"
1778 #ifdef CONFIG_X86_64
1779                 "mov %%r8,  %c[r8](%[svm]) \n\t"
1780                 "mov %%r9,  %c[r9](%[svm]) \n\t"
1781                 "mov %%r10, %c[r10](%[svm]) \n\t"
1782                 "mov %%r11, %c[r11](%[svm]) \n\t"
1783                 "mov %%r12, %c[r12](%[svm]) \n\t"
1784                 "mov %%r13, %c[r13](%[svm]) \n\t"
1785                 "mov %%r14, %c[r14](%[svm]) \n\t"
1786                 "mov %%r15, %c[r15](%[svm]) \n\t"
1787 #endif
1788                 "pop %%"R"bp"
1789                 :
1790                 : [svm]"a"(svm),
1791                   [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
1792                   [rbx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBX])),
1793                   [rcx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RCX])),
1794                   [rdx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDX])),
1795                   [rsi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RSI])),
1796                   [rdi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDI])),
1797                   [rbp]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBP]))
1798 #ifdef CONFIG_X86_64
1799                   , [r8]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R8])),
1800                   [r9]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R9])),
1801                   [r10]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R10])),
1802                   [r11]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R11])),
1803                   [r12]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R12])),
1804                   [r13]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R13])),
1805                   [r14]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R14])),
1806                   [r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15]))
1807 #endif
1808                 : "cc", "memory"
1809                 , R"bx", R"cx", R"dx", R"si", R"di"
1810 #ifdef CONFIG_X86_64
1811                 , "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15"
1812 #endif
1813                 );
1814
1815         if ((svm->vmcb->save.dr7 & 0xff))
1816                 load_db_regs(svm->host_db_regs);
1817
1818         vcpu->arch.cr2 = svm->vmcb->save.cr2;
1819         vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
1820         vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
1821         vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
1822
1823         write_dr6(svm->host_dr6);
1824         write_dr7(svm->host_dr7);
1825         kvm_write_cr2(svm->host_cr2);
1826
1827         kvm_load_fs(fs_selector);
1828         kvm_load_gs(gs_selector);
1829         kvm_load_ldt(ldt_selector);
1830         load_host_msrs(vcpu);
1831
1832         reload_tss(vcpu);
1833
1834         local_irq_disable();
1835
1836         stgi();
1837
1838         sync_cr8_to_lapic(vcpu);
1839
1840         svm->next_rip = 0;
1841 }
1842
1843 #undef R
1844
1845 static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
1846 {
1847         struct vcpu_svm *svm = to_svm(vcpu);
1848
1849         if (npt_enabled) {
1850                 svm->vmcb->control.nested_cr3 = root;
1851                 force_new_asid(vcpu);
1852                 return;
1853         }
1854
1855         svm->vmcb->save.cr3 = root;
1856         force_new_asid(vcpu);
1857
1858         if (vcpu->fpu_active) {
1859                 svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR);
1860                 svm->vmcb->save.cr0 |= X86_CR0_TS;
1861                 vcpu->fpu_active = 0;
1862         }
1863 }
1864
1865 static int is_disabled(void)
1866 {
1867         u64 vm_cr;
1868
1869         rdmsrl(MSR_VM_CR, vm_cr);
1870         if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
1871                 return 1;
1872
1873         return 0;
1874 }
1875
1876 static void
1877 svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
1878 {
1879         /*
1880          * Patch in the VMMCALL instruction:
1881          */
1882         hypercall[0] = 0x0f;
1883         hypercall[1] = 0x01;
1884         hypercall[2] = 0xd9;
1885 }
1886
1887 static void svm_check_processor_compat(void *rtn)
1888 {
1889         *(int *)rtn = 0;
1890 }
1891
1892 static bool svm_cpu_has_accelerated_tpr(void)
1893 {
1894         return false;
1895 }
1896
1897 static int get_npt_level(void)
1898 {
1899 #ifdef CONFIG_X86_64
1900         return PT64_ROOT_LEVEL;
1901 #else
1902         return PT32E_ROOT_LEVEL;
1903 #endif
1904 }
1905
1906 static struct kvm_x86_ops svm_x86_ops = {
1907         .cpu_has_kvm_support = has_svm,
1908         .disabled_by_bios = is_disabled,
1909         .hardware_setup = svm_hardware_setup,
1910         .hardware_unsetup = svm_hardware_unsetup,
1911         .check_processor_compatibility = svm_check_processor_compat,
1912         .hardware_enable = svm_hardware_enable,
1913         .hardware_disable = svm_hardware_disable,
1914         .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
1915
1916         .vcpu_create = svm_create_vcpu,
1917         .vcpu_free = svm_free_vcpu,
1918         .vcpu_reset = svm_vcpu_reset,
1919
1920         .prepare_guest_switch = svm_prepare_guest_switch,
1921         .vcpu_load = svm_vcpu_load,
1922         .vcpu_put = svm_vcpu_put,
1923
1924         .set_guest_debug = svm_guest_debug,
1925         .get_msr = svm_get_msr,
1926         .set_msr = svm_set_msr,
1927         .get_segment_base = svm_get_segment_base,
1928         .get_segment = svm_get_segment,
1929         .set_segment = svm_set_segment,
1930         .get_cpl = svm_get_cpl,
1931         .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
1932         .decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
1933         .set_cr0 = svm_set_cr0,
1934         .set_cr3 = svm_set_cr3,
1935         .set_cr4 = svm_set_cr4,
1936         .set_efer = svm_set_efer,
1937         .get_idt = svm_get_idt,
1938         .set_idt = svm_set_idt,
1939         .get_gdt = svm_get_gdt,
1940         .set_gdt = svm_set_gdt,
1941         .get_dr = svm_get_dr,
1942         .set_dr = svm_set_dr,
1943         .get_rflags = svm_get_rflags,
1944         .set_rflags = svm_set_rflags,
1945
1946         .tlb_flush = svm_flush_tlb,
1947
1948         .run = svm_vcpu_run,
1949         .handle_exit = handle_exit,
1950         .skip_emulated_instruction = skip_emulated_instruction,
1951         .patch_hypercall = svm_patch_hypercall,
1952         .get_irq = svm_get_irq,
1953         .set_irq = svm_set_irq,
1954         .queue_exception = svm_queue_exception,
1955         .exception_injected = svm_exception_injected,
1956         .inject_pending_irq = svm_intr_assist,
1957         .inject_pending_vectors = do_interrupt_requests,
1958
1959         .set_tss_addr = svm_set_tss_addr,
1960         .get_tdp_level = get_npt_level,
1961 };
1962
1963 static int __init svm_init(void)
1964 {
1965         return kvm_init(&svm_x86_ops, sizeof(struct vcpu_svm),
1966                               THIS_MODULE);
1967 }
1968
1969 static void __exit svm_exit(void)
1970 {
1971         kvm_exit();
1972 }
1973
1974 module_init(svm_init)
1975 module_exit(svm_exit)