KVM: Bypass irq_pending get/set when using in kernel irqchip
[linux-2.6] / drivers / kvm / kvm_main.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  *
9  * Authors:
10  *   Avi Kivity   <avi@qumranet.com>
11  *   Yaniv Kamay  <yaniv@qumranet.com>
12  *
13  * This work is licensed under the terms of the GNU GPL, version 2.  See
14  * the COPYING file in the top-level directory.
15  *
16  */
17
18 #include "kvm.h"
19 #include "x86_emulate.h"
20 #include "segment_descriptor.h"
21 #include "irq.h"
22
23 #include <linux/kvm.h>
24 #include <linux/module.h>
25 #include <linux/errno.h>
26 #include <linux/percpu.h>
27 #include <linux/gfp.h>
28 #include <linux/mm.h>
29 #include <linux/miscdevice.h>
30 #include <linux/vmalloc.h>
31 #include <linux/reboot.h>
32 #include <linux/debugfs.h>
33 #include <linux/highmem.h>
34 #include <linux/file.h>
35 #include <linux/sysdev.h>
36 #include <linux/cpu.h>
37 #include <linux/sched.h>
38 #include <linux/cpumask.h>
39 #include <linux/smp.h>
40 #include <linux/anon_inodes.h>
41
42 #include <asm/processor.h>
43 #include <asm/msr.h>
44 #include <asm/io.h>
45 #include <asm/uaccess.h>
46 #include <asm/desc.h>
47
48 MODULE_AUTHOR("Qumranet");
49 MODULE_LICENSE("GPL");
50
51 static DEFINE_SPINLOCK(kvm_lock);
52 static LIST_HEAD(vm_list);
53
54 static cpumask_t cpus_hardware_enabled;
55
56 struct kvm_arch_ops *kvm_arch_ops;
57 struct kmem_cache *kvm_vcpu_cache;
58 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
59
60 static __read_mostly struct preempt_ops kvm_preempt_ops;
61
62 #define STAT_OFFSET(x) offsetof(struct kvm_vcpu, stat.x)
63
64 static struct kvm_stats_debugfs_item {
65         const char *name;
66         int offset;
67         struct dentry *dentry;
68 } debugfs_entries[] = {
69         { "pf_fixed", STAT_OFFSET(pf_fixed) },
70         { "pf_guest", STAT_OFFSET(pf_guest) },
71         { "tlb_flush", STAT_OFFSET(tlb_flush) },
72         { "invlpg", STAT_OFFSET(invlpg) },
73         { "exits", STAT_OFFSET(exits) },
74         { "io_exits", STAT_OFFSET(io_exits) },
75         { "mmio_exits", STAT_OFFSET(mmio_exits) },
76         { "signal_exits", STAT_OFFSET(signal_exits) },
77         { "irq_window", STAT_OFFSET(irq_window_exits) },
78         { "halt_exits", STAT_OFFSET(halt_exits) },
79         { "halt_wakeup", STAT_OFFSET(halt_wakeup) },
80         { "request_irq", STAT_OFFSET(request_irq_exits) },
81         { "irq_exits", STAT_OFFSET(irq_exits) },
82         { "light_exits", STAT_OFFSET(light_exits) },
83         { "efer_reload", STAT_OFFSET(efer_reload) },
84         { NULL }
85 };
86
87 static struct dentry *debugfs_dir;
88
89 #define MAX_IO_MSRS 256
90
91 #define CR0_RESERVED_BITS                                               \
92         (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
93                           | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
94                           | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
95 #define CR4_RESERVED_BITS                                               \
96         (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
97                           | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE     \
98                           | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR  \
99                           | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
100
101 #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
102 #define EFER_RESERVED_BITS 0xfffffffffffff2fe
103
104 #ifdef CONFIG_X86_64
105 // LDT or TSS descriptor in the GDT. 16 bytes.
106 struct segment_descriptor_64 {
107         struct segment_descriptor s;
108         u32 base_higher;
109         u32 pad_zero;
110 };
111
112 #endif
113
114 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
115                            unsigned long arg);
116
117 unsigned long segment_base(u16 selector)
118 {
119         struct descriptor_table gdt;
120         struct segment_descriptor *d;
121         unsigned long table_base;
122         typedef unsigned long ul;
123         unsigned long v;
124
125         if (selector == 0)
126                 return 0;
127
128         asm ("sgdt %0" : "=m"(gdt));
129         table_base = gdt.base;
130
131         if (selector & 4) {           /* from ldt */
132                 u16 ldt_selector;
133
134                 asm ("sldt %0" : "=g"(ldt_selector));
135                 table_base = segment_base(ldt_selector);
136         }
137         d = (struct segment_descriptor *)(table_base + (selector & ~7));
138         v = d->base_low | ((ul)d->base_mid << 16) | ((ul)d->base_high << 24);
139 #ifdef CONFIG_X86_64
140         if (d->system == 0
141             && (d->type == 2 || d->type == 9 || d->type == 11))
142                 v |= ((ul)((struct segment_descriptor_64 *)d)->base_higher) << 32;
143 #endif
144         return v;
145 }
146 EXPORT_SYMBOL_GPL(segment_base);
147
148 static inline int valid_vcpu(int n)
149 {
150         return likely(n >= 0 && n < KVM_MAX_VCPUS);
151 }
152
153 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
154 {
155         if (!vcpu->fpu_active || vcpu->guest_fpu_loaded)
156                 return;
157
158         vcpu->guest_fpu_loaded = 1;
159         fx_save(&vcpu->host_fx_image);
160         fx_restore(&vcpu->guest_fx_image);
161 }
162 EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
163
164 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
165 {
166         if (!vcpu->guest_fpu_loaded)
167                 return;
168
169         vcpu->guest_fpu_loaded = 0;
170         fx_save(&vcpu->guest_fx_image);
171         fx_restore(&vcpu->host_fx_image);
172 }
173 EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
174
175 /*
176  * Switches to specified vcpu, until a matching vcpu_put()
177  */
178 static void vcpu_load(struct kvm_vcpu *vcpu)
179 {
180         int cpu;
181
182         mutex_lock(&vcpu->mutex);
183         cpu = get_cpu();
184         preempt_notifier_register(&vcpu->preempt_notifier);
185         kvm_arch_ops->vcpu_load(vcpu, cpu);
186         put_cpu();
187 }
188
189 static void vcpu_put(struct kvm_vcpu *vcpu)
190 {
191         preempt_disable();
192         kvm_arch_ops->vcpu_put(vcpu);
193         preempt_notifier_unregister(&vcpu->preempt_notifier);
194         preempt_enable();
195         mutex_unlock(&vcpu->mutex);
196 }
197
198 static void ack_flush(void *_completed)
199 {
200         atomic_t *completed = _completed;
201
202         atomic_inc(completed);
203 }
204
205 void kvm_flush_remote_tlbs(struct kvm *kvm)
206 {
207         int i, cpu, needed;
208         cpumask_t cpus;
209         struct kvm_vcpu *vcpu;
210         atomic_t completed;
211
212         atomic_set(&completed, 0);
213         cpus_clear(cpus);
214         needed = 0;
215         for (i = 0; i < KVM_MAX_VCPUS; ++i) {
216                 vcpu = kvm->vcpus[i];
217                 if (!vcpu)
218                         continue;
219                 if (test_and_set_bit(KVM_TLB_FLUSH, &vcpu->requests))
220                         continue;
221                 cpu = vcpu->cpu;
222                 if (cpu != -1 && cpu != raw_smp_processor_id())
223                         if (!cpu_isset(cpu, cpus)) {
224                                 cpu_set(cpu, cpus);
225                                 ++needed;
226                         }
227         }
228
229         /*
230          * We really want smp_call_function_mask() here.  But that's not
231          * available, so ipi all cpus in parallel and wait for them
232          * to complete.
233          */
234         for (cpu = first_cpu(cpus); cpu != NR_CPUS; cpu = next_cpu(cpu, cpus))
235                 smp_call_function_single(cpu, ack_flush, &completed, 1, 0);
236         while (atomic_read(&completed) != needed) {
237                 cpu_relax();
238                 barrier();
239         }
240 }
241
242 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
243 {
244         struct page *page;
245         int r;
246
247         mutex_init(&vcpu->mutex);
248         vcpu->cpu = -1;
249         vcpu->mmu.root_hpa = INVALID_PAGE;
250         vcpu->kvm = kvm;
251         vcpu->vcpu_id = id;
252         init_waitqueue_head(&vcpu->wq);
253
254         page = alloc_page(GFP_KERNEL | __GFP_ZERO);
255         if (!page) {
256                 r = -ENOMEM;
257                 goto fail;
258         }
259         vcpu->run = page_address(page);
260
261         page = alloc_page(GFP_KERNEL | __GFP_ZERO);
262         if (!page) {
263                 r = -ENOMEM;
264                 goto fail_free_run;
265         }
266         vcpu->pio_data = page_address(page);
267
268         r = kvm_mmu_create(vcpu);
269         if (r < 0)
270                 goto fail_free_pio_data;
271
272         return 0;
273
274 fail_free_pio_data:
275         free_page((unsigned long)vcpu->pio_data);
276 fail_free_run:
277         free_page((unsigned long)vcpu->run);
278 fail:
279         return -ENOMEM;
280 }
281 EXPORT_SYMBOL_GPL(kvm_vcpu_init);
282
283 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
284 {
285         kvm_mmu_destroy(vcpu);
286         kvm_free_apic(vcpu->apic);
287         free_page((unsigned long)vcpu->pio_data);
288         free_page((unsigned long)vcpu->run);
289 }
290 EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
291
292 static struct kvm *kvm_create_vm(void)
293 {
294         struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
295
296         if (!kvm)
297                 return ERR_PTR(-ENOMEM);
298
299         kvm_io_bus_init(&kvm->pio_bus);
300         mutex_init(&kvm->lock);
301         INIT_LIST_HEAD(&kvm->active_mmu_pages);
302         kvm_io_bus_init(&kvm->mmio_bus);
303         spin_lock(&kvm_lock);
304         list_add(&kvm->vm_list, &vm_list);
305         spin_unlock(&kvm_lock);
306         return kvm;
307 }
308
309 /*
310  * Free any memory in @free but not in @dont.
311  */
312 static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
313                                   struct kvm_memory_slot *dont)
314 {
315         int i;
316
317         if (!dont || free->phys_mem != dont->phys_mem)
318                 if (free->phys_mem) {
319                         for (i = 0; i < free->npages; ++i)
320                                 if (free->phys_mem[i])
321                                         __free_page(free->phys_mem[i]);
322                         vfree(free->phys_mem);
323                 }
324
325         if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
326                 vfree(free->dirty_bitmap);
327
328         free->phys_mem = NULL;
329         free->npages = 0;
330         free->dirty_bitmap = NULL;
331 }
332
333 static void kvm_free_physmem(struct kvm *kvm)
334 {
335         int i;
336
337         for (i = 0; i < kvm->nmemslots; ++i)
338                 kvm_free_physmem_slot(&kvm->memslots[i], NULL);
339 }
340
341 static void free_pio_guest_pages(struct kvm_vcpu *vcpu)
342 {
343         int i;
344
345         for (i = 0; i < ARRAY_SIZE(vcpu->pio.guest_pages); ++i)
346                 if (vcpu->pio.guest_pages[i]) {
347                         __free_page(vcpu->pio.guest_pages[i]);
348                         vcpu->pio.guest_pages[i] = NULL;
349                 }
350 }
351
352 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
353 {
354         vcpu_load(vcpu);
355         kvm_mmu_unload(vcpu);
356         vcpu_put(vcpu);
357 }
358
359 static void kvm_free_vcpus(struct kvm *kvm)
360 {
361         unsigned int i;
362
363         /*
364          * Unpin any mmu pages first.
365          */
366         for (i = 0; i < KVM_MAX_VCPUS; ++i)
367                 if (kvm->vcpus[i])
368                         kvm_unload_vcpu_mmu(kvm->vcpus[i]);
369         for (i = 0; i < KVM_MAX_VCPUS; ++i) {
370                 if (kvm->vcpus[i]) {
371                         kvm_arch_ops->vcpu_free(kvm->vcpus[i]);
372                         kvm->vcpus[i] = NULL;
373                 }
374         }
375
376 }
377
378 static void kvm_destroy_vm(struct kvm *kvm)
379 {
380         spin_lock(&kvm_lock);
381         list_del(&kvm->vm_list);
382         spin_unlock(&kvm_lock);
383         kvm_io_bus_destroy(&kvm->pio_bus);
384         kvm_io_bus_destroy(&kvm->mmio_bus);
385         kfree(kvm->vpic);
386         kfree(kvm->vioapic);
387         kvm_free_vcpus(kvm);
388         kvm_free_physmem(kvm);
389         kfree(kvm);
390 }
391
392 static int kvm_vm_release(struct inode *inode, struct file *filp)
393 {
394         struct kvm *kvm = filp->private_data;
395
396         kvm_destroy_vm(kvm);
397         return 0;
398 }
399
400 static void inject_gp(struct kvm_vcpu *vcpu)
401 {
402         kvm_arch_ops->inject_gp(vcpu, 0);
403 }
404
405 /*
406  * Load the pae pdptrs.  Return true is they are all valid.
407  */
408 static int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
409 {
410         gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
411         unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
412         int i;
413         u64 *pdpt;
414         int ret;
415         struct page *page;
416         u64 pdpte[ARRAY_SIZE(vcpu->pdptrs)];
417
418         mutex_lock(&vcpu->kvm->lock);
419         page = gfn_to_page(vcpu->kvm, pdpt_gfn);
420         if (!page) {
421                 ret = 0;
422                 goto out;
423         }
424
425         pdpt = kmap_atomic(page, KM_USER0);
426         memcpy(pdpte, pdpt+offset, sizeof(pdpte));
427         kunmap_atomic(pdpt, KM_USER0);
428
429         for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
430                 if ((pdpte[i] & 1) && (pdpte[i] & 0xfffffff0000001e6ull)) {
431                         ret = 0;
432                         goto out;
433                 }
434         }
435         ret = 1;
436
437         memcpy(vcpu->pdptrs, pdpte, sizeof(vcpu->pdptrs));
438 out:
439         mutex_unlock(&vcpu->kvm->lock);
440
441         return ret;
442 }
443
444 void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
445 {
446         if (cr0 & CR0_RESERVED_BITS) {
447                 printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
448                        cr0, vcpu->cr0);
449                 inject_gp(vcpu);
450                 return;
451         }
452
453         if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
454                 printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
455                 inject_gp(vcpu);
456                 return;
457         }
458
459         if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
460                 printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
461                        "and a clear PE flag\n");
462                 inject_gp(vcpu);
463                 return;
464         }
465
466         if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
467 #ifdef CONFIG_X86_64
468                 if ((vcpu->shadow_efer & EFER_LME)) {
469                         int cs_db, cs_l;
470
471                         if (!is_pae(vcpu)) {
472                                 printk(KERN_DEBUG "set_cr0: #GP, start paging "
473                                        "in long mode while PAE is disabled\n");
474                                 inject_gp(vcpu);
475                                 return;
476                         }
477                         kvm_arch_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
478                         if (cs_l) {
479                                 printk(KERN_DEBUG "set_cr0: #GP, start paging "
480                                        "in long mode while CS.L == 1\n");
481                                 inject_gp(vcpu);
482                                 return;
483
484                         }
485                 } else
486 #endif
487                 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->cr3)) {
488                         printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
489                                "reserved bits\n");
490                         inject_gp(vcpu);
491                         return;
492                 }
493
494         }
495
496         kvm_arch_ops->set_cr0(vcpu, cr0);
497         vcpu->cr0 = cr0;
498
499         mutex_lock(&vcpu->kvm->lock);
500         kvm_mmu_reset_context(vcpu);
501         mutex_unlock(&vcpu->kvm->lock);
502         return;
503 }
504 EXPORT_SYMBOL_GPL(set_cr0);
505
506 void lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
507 {
508         set_cr0(vcpu, (vcpu->cr0 & ~0x0ful) | (msw & 0x0f));
509 }
510 EXPORT_SYMBOL_GPL(lmsw);
511
512 void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
513 {
514         if (cr4 & CR4_RESERVED_BITS) {
515                 printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
516                 inject_gp(vcpu);
517                 return;
518         }
519
520         if (is_long_mode(vcpu)) {
521                 if (!(cr4 & X86_CR4_PAE)) {
522                         printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
523                                "in long mode\n");
524                         inject_gp(vcpu);
525                         return;
526                 }
527         } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & X86_CR4_PAE)
528                    && !load_pdptrs(vcpu, vcpu->cr3)) {
529                 printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
530                 inject_gp(vcpu);
531                 return;
532         }
533
534         if (cr4 & X86_CR4_VMXE) {
535                 printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
536                 inject_gp(vcpu);
537                 return;
538         }
539         kvm_arch_ops->set_cr4(vcpu, cr4);
540         mutex_lock(&vcpu->kvm->lock);
541         kvm_mmu_reset_context(vcpu);
542         mutex_unlock(&vcpu->kvm->lock);
543 }
544 EXPORT_SYMBOL_GPL(set_cr4);
545
546 void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
547 {
548         if (is_long_mode(vcpu)) {
549                 if (cr3 & CR3_L_MODE_RESERVED_BITS) {
550                         printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
551                         inject_gp(vcpu);
552                         return;
553                 }
554         } else {
555                 if (is_pae(vcpu)) {
556                         if (cr3 & CR3_PAE_RESERVED_BITS) {
557                                 printk(KERN_DEBUG
558                                        "set_cr3: #GP, reserved bits\n");
559                                 inject_gp(vcpu);
560                                 return;
561                         }
562                         if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
563                                 printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
564                                        "reserved bits\n");
565                                 inject_gp(vcpu);
566                                 return;
567                         }
568                 } else {
569                         if (cr3 & CR3_NONPAE_RESERVED_BITS) {
570                                 printk(KERN_DEBUG
571                                        "set_cr3: #GP, reserved bits\n");
572                                 inject_gp(vcpu);
573                                 return;
574                         }
575                 }
576         }
577
578         mutex_lock(&vcpu->kvm->lock);
579         /*
580          * Does the new cr3 value map to physical memory? (Note, we
581          * catch an invalid cr3 even in real-mode, because it would
582          * cause trouble later on when we turn on paging anyway.)
583          *
584          * A real CPU would silently accept an invalid cr3 and would
585          * attempt to use it - with largely undefined (and often hard
586          * to debug) behavior on the guest side.
587          */
588         if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
589                 inject_gp(vcpu);
590         else {
591                 vcpu->cr3 = cr3;
592                 vcpu->mmu.new_cr3(vcpu);
593         }
594         mutex_unlock(&vcpu->kvm->lock);
595 }
596 EXPORT_SYMBOL_GPL(set_cr3);
597
598 void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
599 {
600         if (cr8 & CR8_RESERVED_BITS) {
601                 printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
602                 inject_gp(vcpu);
603                 return;
604         }
605         if (irqchip_in_kernel(vcpu->kvm))
606                 kvm_lapic_set_tpr(vcpu, cr8);
607         else
608                 vcpu->cr8 = cr8;
609 }
610 EXPORT_SYMBOL_GPL(set_cr8);
611
612 unsigned long get_cr8(struct kvm_vcpu *vcpu)
613 {
614         if (irqchip_in_kernel(vcpu->kvm))
615                 return kvm_lapic_get_cr8(vcpu);
616         else
617                 return vcpu->cr8;
618 }
619 EXPORT_SYMBOL_GPL(get_cr8);
620
621 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
622 {
623         if (irqchip_in_kernel(vcpu->kvm))
624                 return vcpu->apic_base;
625         else
626                 return vcpu->apic_base;
627 }
628 EXPORT_SYMBOL_GPL(kvm_get_apic_base);
629
630 void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
631 {
632         /* TODO: reserve bits check */
633         if (irqchip_in_kernel(vcpu->kvm))
634                 kvm_lapic_set_base(vcpu, data);
635         else
636                 vcpu->apic_base = data;
637 }
638 EXPORT_SYMBOL_GPL(kvm_set_apic_base);
639
640 void fx_init(struct kvm_vcpu *vcpu)
641 {
642         unsigned after_mxcsr_mask;
643
644         /* Initialize guest FPU by resetting ours and saving into guest's */
645         preempt_disable();
646         fx_save(&vcpu->host_fx_image);
647         fpu_init();
648         fx_save(&vcpu->guest_fx_image);
649         fx_restore(&vcpu->host_fx_image);
650         preempt_enable();
651
652         after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space);
653         vcpu->guest_fx_image.mxcsr = 0x1f80;
654         memset((void *)&vcpu->guest_fx_image + after_mxcsr_mask,
655                0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask);
656 }
657 EXPORT_SYMBOL_GPL(fx_init);
658
659 /*
660  * Allocate some memory and give it an address in the guest physical address
661  * space.
662  *
663  * Discontiguous memory is allowed, mostly for framebuffers.
664  */
665 static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
666                                           struct kvm_memory_region *mem)
667 {
668         int r;
669         gfn_t base_gfn;
670         unsigned long npages;
671         unsigned long i;
672         struct kvm_memory_slot *memslot;
673         struct kvm_memory_slot old, new;
674         int memory_config_version;
675
676         r = -EINVAL;
677         /* General sanity checks */
678         if (mem->memory_size & (PAGE_SIZE - 1))
679                 goto out;
680         if (mem->guest_phys_addr & (PAGE_SIZE - 1))
681                 goto out;
682         if (mem->slot >= KVM_MEMORY_SLOTS)
683                 goto out;
684         if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
685                 goto out;
686
687         memslot = &kvm->memslots[mem->slot];
688         base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
689         npages = mem->memory_size >> PAGE_SHIFT;
690
691         if (!npages)
692                 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
693
694 raced:
695         mutex_lock(&kvm->lock);
696
697         memory_config_version = kvm->memory_config_version;
698         new = old = *memslot;
699
700         new.base_gfn = base_gfn;
701         new.npages = npages;
702         new.flags = mem->flags;
703
704         /* Disallow changing a memory slot's size. */
705         r = -EINVAL;
706         if (npages && old.npages && npages != old.npages)
707                 goto out_unlock;
708
709         /* Check for overlaps */
710         r = -EEXIST;
711         for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
712                 struct kvm_memory_slot *s = &kvm->memslots[i];
713
714                 if (s == memslot)
715                         continue;
716                 if (!((base_gfn + npages <= s->base_gfn) ||
717                       (base_gfn >= s->base_gfn + s->npages)))
718                         goto out_unlock;
719         }
720         /*
721          * Do memory allocations outside lock.  memory_config_version will
722          * detect any races.
723          */
724         mutex_unlock(&kvm->lock);
725
726         /* Deallocate if slot is being removed */
727         if (!npages)
728                 new.phys_mem = NULL;
729
730         /* Free page dirty bitmap if unneeded */
731         if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
732                 new.dirty_bitmap = NULL;
733
734         r = -ENOMEM;
735
736         /* Allocate if a slot is being created */
737         if (npages && !new.phys_mem) {
738                 new.phys_mem = vmalloc(npages * sizeof(struct page *));
739
740                 if (!new.phys_mem)
741                         goto out_free;
742
743                 memset(new.phys_mem, 0, npages * sizeof(struct page *));
744                 for (i = 0; i < npages; ++i) {
745                         new.phys_mem[i] = alloc_page(GFP_HIGHUSER
746                                                      | __GFP_ZERO);
747                         if (!new.phys_mem[i])
748                                 goto out_free;
749                         set_page_private(new.phys_mem[i],0);
750                 }
751         }
752
753         /* Allocate page dirty bitmap if needed */
754         if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
755                 unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
756
757                 new.dirty_bitmap = vmalloc(dirty_bytes);
758                 if (!new.dirty_bitmap)
759                         goto out_free;
760                 memset(new.dirty_bitmap, 0, dirty_bytes);
761         }
762
763         mutex_lock(&kvm->lock);
764
765         if (memory_config_version != kvm->memory_config_version) {
766                 mutex_unlock(&kvm->lock);
767                 kvm_free_physmem_slot(&new, &old);
768                 goto raced;
769         }
770
771         r = -EAGAIN;
772         if (kvm->busy)
773                 goto out_unlock;
774
775         if (mem->slot >= kvm->nmemslots)
776                 kvm->nmemslots = mem->slot + 1;
777
778         *memslot = new;
779         ++kvm->memory_config_version;
780
781         kvm_mmu_slot_remove_write_access(kvm, mem->slot);
782         kvm_flush_remote_tlbs(kvm);
783
784         mutex_unlock(&kvm->lock);
785
786         kvm_free_physmem_slot(&old, &new);
787         return 0;
788
789 out_unlock:
790         mutex_unlock(&kvm->lock);
791 out_free:
792         kvm_free_physmem_slot(&new, &old);
793 out:
794         return r;
795 }
796
797 /*
798  * Get (and clear) the dirty memory log for a memory slot.
799  */
800 static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
801                                       struct kvm_dirty_log *log)
802 {
803         struct kvm_memory_slot *memslot;
804         int r, i;
805         int n;
806         unsigned long any = 0;
807
808         mutex_lock(&kvm->lock);
809
810         /*
811          * Prevent changes to guest memory configuration even while the lock
812          * is not taken.
813          */
814         ++kvm->busy;
815         mutex_unlock(&kvm->lock);
816         r = -EINVAL;
817         if (log->slot >= KVM_MEMORY_SLOTS)
818                 goto out;
819
820         memslot = &kvm->memslots[log->slot];
821         r = -ENOENT;
822         if (!memslot->dirty_bitmap)
823                 goto out;
824
825         n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
826
827         for (i = 0; !any && i < n/sizeof(long); ++i)
828                 any = memslot->dirty_bitmap[i];
829
830         r = -EFAULT;
831         if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
832                 goto out;
833
834         /* If nothing is dirty, don't bother messing with page tables. */
835         if (any) {
836                 mutex_lock(&kvm->lock);
837                 kvm_mmu_slot_remove_write_access(kvm, log->slot);
838                 kvm_flush_remote_tlbs(kvm);
839                 memset(memslot->dirty_bitmap, 0, n);
840                 mutex_unlock(&kvm->lock);
841         }
842
843         r = 0;
844
845 out:
846         mutex_lock(&kvm->lock);
847         --kvm->busy;
848         mutex_unlock(&kvm->lock);
849         return r;
850 }
851
852 /*
853  * Set a new alias region.  Aliases map a portion of physical memory into
854  * another portion.  This is useful for memory windows, for example the PC
855  * VGA region.
856  */
857 static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
858                                          struct kvm_memory_alias *alias)
859 {
860         int r, n;
861         struct kvm_mem_alias *p;
862
863         r = -EINVAL;
864         /* General sanity checks */
865         if (alias->memory_size & (PAGE_SIZE - 1))
866                 goto out;
867         if (alias->guest_phys_addr & (PAGE_SIZE - 1))
868                 goto out;
869         if (alias->slot >= KVM_ALIAS_SLOTS)
870                 goto out;
871         if (alias->guest_phys_addr + alias->memory_size
872             < alias->guest_phys_addr)
873                 goto out;
874         if (alias->target_phys_addr + alias->memory_size
875             < alias->target_phys_addr)
876                 goto out;
877
878         mutex_lock(&kvm->lock);
879
880         p = &kvm->aliases[alias->slot];
881         p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
882         p->npages = alias->memory_size >> PAGE_SHIFT;
883         p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
884
885         for (n = KVM_ALIAS_SLOTS; n > 0; --n)
886                 if (kvm->aliases[n - 1].npages)
887                         break;
888         kvm->naliases = n;
889
890         kvm_mmu_zap_all(kvm);
891
892         mutex_unlock(&kvm->lock);
893
894         return 0;
895
896 out:
897         return r;
898 }
899
900 static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
901 {
902         int r;
903
904         r = 0;
905         switch (chip->chip_id) {
906         case KVM_IRQCHIP_PIC_MASTER:
907                 memcpy (&chip->chip.pic,
908                         &pic_irqchip(kvm)->pics[0],
909                         sizeof(struct kvm_pic_state));
910                 break;
911         case KVM_IRQCHIP_PIC_SLAVE:
912                 memcpy (&chip->chip.pic,
913                         &pic_irqchip(kvm)->pics[1],
914                         sizeof(struct kvm_pic_state));
915                 break;
916         default:
917                 r = -EINVAL;
918                 break;
919         }
920         return r;
921 }
922
923 static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
924 {
925         int r;
926
927         r = 0;
928         switch (chip->chip_id) {
929         case KVM_IRQCHIP_PIC_MASTER:
930                 memcpy (&pic_irqchip(kvm)->pics[0],
931                         &chip->chip.pic,
932                         sizeof(struct kvm_pic_state));
933                 break;
934         case KVM_IRQCHIP_PIC_SLAVE:
935                 memcpy (&pic_irqchip(kvm)->pics[1],
936                         &chip->chip.pic,
937                         sizeof(struct kvm_pic_state));
938                 break;
939         default:
940                 r = -EINVAL;
941                 break;
942         }
943         kvm_pic_update_irq(pic_irqchip(kvm));
944         return r;
945 }
946
947 static gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
948 {
949         int i;
950         struct kvm_mem_alias *alias;
951
952         for (i = 0; i < kvm->naliases; ++i) {
953                 alias = &kvm->aliases[i];
954                 if (gfn >= alias->base_gfn
955                     && gfn < alias->base_gfn + alias->npages)
956                         return alias->target_gfn + gfn - alias->base_gfn;
957         }
958         return gfn;
959 }
960
961 static struct kvm_memory_slot *__gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
962 {
963         int i;
964
965         for (i = 0; i < kvm->nmemslots; ++i) {
966                 struct kvm_memory_slot *memslot = &kvm->memslots[i];
967
968                 if (gfn >= memslot->base_gfn
969                     && gfn < memslot->base_gfn + memslot->npages)
970                         return memslot;
971         }
972         return NULL;
973 }
974
975 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
976 {
977         gfn = unalias_gfn(kvm, gfn);
978         return __gfn_to_memslot(kvm, gfn);
979 }
980
981 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
982 {
983         struct kvm_memory_slot *slot;
984
985         gfn = unalias_gfn(kvm, gfn);
986         slot = __gfn_to_memslot(kvm, gfn);
987         if (!slot)
988                 return NULL;
989         return slot->phys_mem[gfn - slot->base_gfn];
990 }
991 EXPORT_SYMBOL_GPL(gfn_to_page);
992
993 /* WARNING: Does not work on aliased pages. */
994 void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
995 {
996         struct kvm_memory_slot *memslot;
997
998         memslot = __gfn_to_memslot(kvm, gfn);
999         if (memslot && memslot->dirty_bitmap) {
1000                 unsigned long rel_gfn = gfn - memslot->base_gfn;
1001
1002                 /* avoid RMW */
1003                 if (!test_bit(rel_gfn, memslot->dirty_bitmap))
1004                         set_bit(rel_gfn, memslot->dirty_bitmap);
1005         }
1006 }
1007
1008 int emulator_read_std(unsigned long addr,
1009                              void *val,
1010                              unsigned int bytes,
1011                              struct kvm_vcpu *vcpu)
1012 {
1013         void *data = val;
1014
1015         while (bytes) {
1016                 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
1017                 unsigned offset = addr & (PAGE_SIZE-1);
1018                 unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset);
1019                 unsigned long pfn;
1020                 struct page *page;
1021                 void *page_virt;
1022
1023                 if (gpa == UNMAPPED_GVA)
1024                         return X86EMUL_PROPAGATE_FAULT;
1025                 pfn = gpa >> PAGE_SHIFT;
1026                 page = gfn_to_page(vcpu->kvm, pfn);
1027                 if (!page)
1028                         return X86EMUL_UNHANDLEABLE;
1029                 page_virt = kmap_atomic(page, KM_USER0);
1030
1031                 memcpy(data, page_virt + offset, tocopy);
1032
1033                 kunmap_atomic(page_virt, KM_USER0);
1034
1035                 bytes -= tocopy;
1036                 data += tocopy;
1037                 addr += tocopy;
1038         }
1039
1040         return X86EMUL_CONTINUE;
1041 }
1042 EXPORT_SYMBOL_GPL(emulator_read_std);
1043
1044 static int emulator_write_std(unsigned long addr,
1045                               const void *val,
1046                               unsigned int bytes,
1047                               struct kvm_vcpu *vcpu)
1048 {
1049         pr_unimpl(vcpu, "emulator_write_std: addr %lx n %d\n", addr, bytes);
1050         return X86EMUL_UNHANDLEABLE;
1051 }
1052
1053 /*
1054  * Only apic need an MMIO device hook, so shortcut now..
1055  */
1056 static struct kvm_io_device *vcpu_find_pervcpu_dev(struct kvm_vcpu *vcpu,
1057                                                 gpa_t addr)
1058 {
1059         struct kvm_io_device *dev;
1060
1061         if (vcpu->apic) {
1062                 dev = &vcpu->apic->dev;
1063                 if (dev->in_range(dev, addr))
1064                         return dev;
1065         }
1066         return NULL;
1067 }
1068
1069 static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
1070                                                 gpa_t addr)
1071 {
1072         struct kvm_io_device *dev;
1073
1074         dev = vcpu_find_pervcpu_dev(vcpu, addr);
1075         if (dev == NULL)
1076                 dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr);
1077         return dev;
1078 }
1079
1080 static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu,
1081                                                gpa_t addr)
1082 {
1083         return kvm_io_bus_find_dev(&vcpu->kvm->pio_bus, addr);
1084 }
1085
1086 static int emulator_read_emulated(unsigned long addr,
1087                                   void *val,
1088                                   unsigned int bytes,
1089                                   struct kvm_vcpu *vcpu)
1090 {
1091         struct kvm_io_device *mmio_dev;
1092         gpa_t                 gpa;
1093
1094         if (vcpu->mmio_read_completed) {
1095                 memcpy(val, vcpu->mmio_data, bytes);
1096                 vcpu->mmio_read_completed = 0;
1097                 return X86EMUL_CONTINUE;
1098         } else if (emulator_read_std(addr, val, bytes, vcpu)
1099                    == X86EMUL_CONTINUE)
1100                 return X86EMUL_CONTINUE;
1101
1102         gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
1103         if (gpa == UNMAPPED_GVA)
1104                 return X86EMUL_PROPAGATE_FAULT;
1105
1106         /*
1107          * Is this MMIO handled locally?
1108          */
1109         mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
1110         if (mmio_dev) {
1111                 kvm_iodevice_read(mmio_dev, gpa, bytes, val);
1112                 return X86EMUL_CONTINUE;
1113         }
1114
1115         vcpu->mmio_needed = 1;
1116         vcpu->mmio_phys_addr = gpa;
1117         vcpu->mmio_size = bytes;
1118         vcpu->mmio_is_write = 0;
1119
1120         return X86EMUL_UNHANDLEABLE;
1121 }
1122
1123 static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
1124                                const void *val, int bytes)
1125 {
1126         struct page *page;
1127         void *virt;
1128
1129         if (((gpa + bytes - 1) >> PAGE_SHIFT) != (gpa >> PAGE_SHIFT))
1130                 return 0;
1131         page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
1132         if (!page)
1133                 return 0;
1134         mark_page_dirty(vcpu->kvm, gpa >> PAGE_SHIFT);
1135         virt = kmap_atomic(page, KM_USER0);
1136         kvm_mmu_pte_write(vcpu, gpa, val, bytes);
1137         memcpy(virt + offset_in_page(gpa), val, bytes);
1138         kunmap_atomic(virt, KM_USER0);
1139         return 1;
1140 }
1141
1142 static int emulator_write_emulated_onepage(unsigned long addr,
1143                                            const void *val,
1144                                            unsigned int bytes,
1145                                            struct kvm_vcpu *vcpu)
1146 {
1147         struct kvm_io_device *mmio_dev;
1148         gpa_t                 gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
1149
1150         if (gpa == UNMAPPED_GVA) {
1151                 kvm_arch_ops->inject_page_fault(vcpu, addr, 2);
1152                 return X86EMUL_PROPAGATE_FAULT;
1153         }
1154
1155         if (emulator_write_phys(vcpu, gpa, val, bytes))
1156                 return X86EMUL_CONTINUE;
1157
1158         /*
1159          * Is this MMIO handled locally?
1160          */
1161         mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
1162         if (mmio_dev) {
1163                 kvm_iodevice_write(mmio_dev, gpa, bytes, val);
1164                 return X86EMUL_CONTINUE;
1165         }
1166
1167         vcpu->mmio_needed = 1;
1168         vcpu->mmio_phys_addr = gpa;
1169         vcpu->mmio_size = bytes;
1170         vcpu->mmio_is_write = 1;
1171         memcpy(vcpu->mmio_data, val, bytes);
1172
1173         return X86EMUL_CONTINUE;
1174 }
1175
1176 int emulator_write_emulated(unsigned long addr,
1177                                    const void *val,
1178                                    unsigned int bytes,
1179                                    struct kvm_vcpu *vcpu)
1180 {
1181         /* Crossing a page boundary? */
1182         if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
1183                 int rc, now;
1184
1185                 now = -addr & ~PAGE_MASK;
1186                 rc = emulator_write_emulated_onepage(addr, val, now, vcpu);
1187                 if (rc != X86EMUL_CONTINUE)
1188                         return rc;
1189                 addr += now;
1190                 val += now;
1191                 bytes -= now;
1192         }
1193         return emulator_write_emulated_onepage(addr, val, bytes, vcpu);
1194 }
1195 EXPORT_SYMBOL_GPL(emulator_write_emulated);
1196
1197 static int emulator_cmpxchg_emulated(unsigned long addr,
1198                                      const void *old,
1199                                      const void *new,
1200                                      unsigned int bytes,
1201                                      struct kvm_vcpu *vcpu)
1202 {
1203         static int reported;
1204
1205         if (!reported) {
1206                 reported = 1;
1207                 printk(KERN_WARNING "kvm: emulating exchange as write\n");
1208         }
1209         return emulator_write_emulated(addr, new, bytes, vcpu);
1210 }
1211
1212 static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
1213 {
1214         return kvm_arch_ops->get_segment_base(vcpu, seg);
1215 }
1216
1217 int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
1218 {
1219         return X86EMUL_CONTINUE;
1220 }
1221
1222 int emulate_clts(struct kvm_vcpu *vcpu)
1223 {
1224         unsigned long cr0;
1225
1226         cr0 = vcpu->cr0 & ~X86_CR0_TS;
1227         kvm_arch_ops->set_cr0(vcpu, cr0);
1228         return X86EMUL_CONTINUE;
1229 }
1230
1231 int emulator_get_dr(struct x86_emulate_ctxt* ctxt, int dr, unsigned long *dest)
1232 {
1233         struct kvm_vcpu *vcpu = ctxt->vcpu;
1234
1235         switch (dr) {
1236         case 0 ... 3:
1237                 *dest = kvm_arch_ops->get_dr(vcpu, dr);
1238                 return X86EMUL_CONTINUE;
1239         default:
1240                 pr_unimpl(vcpu, "%s: unexpected dr %u\n", __FUNCTION__, dr);
1241                 return X86EMUL_UNHANDLEABLE;
1242         }
1243 }
1244
1245 int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
1246 {
1247         unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
1248         int exception;
1249
1250         kvm_arch_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
1251         if (exception) {
1252                 /* FIXME: better handling */
1253                 return X86EMUL_UNHANDLEABLE;
1254         }
1255         return X86EMUL_CONTINUE;
1256 }
1257
1258 static void report_emulation_failure(struct x86_emulate_ctxt *ctxt)
1259 {
1260         static int reported;
1261         u8 opcodes[4];
1262         unsigned long rip = ctxt->vcpu->rip;
1263         unsigned long rip_linear;
1264
1265         rip_linear = rip + get_segment_base(ctxt->vcpu, VCPU_SREG_CS);
1266
1267         if (reported)
1268                 return;
1269
1270         emulator_read_std(rip_linear, (void *)opcodes, 4, ctxt->vcpu);
1271
1272         printk(KERN_ERR "emulation failed but !mmio_needed?"
1273                " rip %lx %02x %02x %02x %02x\n",
1274                rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
1275         reported = 1;
1276 }
1277
1278 struct x86_emulate_ops emulate_ops = {
1279         .read_std            = emulator_read_std,
1280         .write_std           = emulator_write_std,
1281         .read_emulated       = emulator_read_emulated,
1282         .write_emulated      = emulator_write_emulated,
1283         .cmpxchg_emulated    = emulator_cmpxchg_emulated,
1284 };
1285
1286 int emulate_instruction(struct kvm_vcpu *vcpu,
1287                         struct kvm_run *run,
1288                         unsigned long cr2,
1289                         u16 error_code)
1290 {
1291         struct x86_emulate_ctxt emulate_ctxt;
1292         int r;
1293         int cs_db, cs_l;
1294
1295         vcpu->mmio_fault_cr2 = cr2;
1296         kvm_arch_ops->cache_regs(vcpu);
1297
1298         kvm_arch_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
1299
1300         emulate_ctxt.vcpu = vcpu;
1301         emulate_ctxt.eflags = kvm_arch_ops->get_rflags(vcpu);
1302         emulate_ctxt.cr2 = cr2;
1303         emulate_ctxt.mode = (emulate_ctxt.eflags & X86_EFLAGS_VM)
1304                 ? X86EMUL_MODE_REAL : cs_l
1305                 ? X86EMUL_MODE_PROT64 : cs_db
1306                 ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
1307
1308         if (emulate_ctxt.mode == X86EMUL_MODE_PROT64) {
1309                 emulate_ctxt.cs_base = 0;
1310                 emulate_ctxt.ds_base = 0;
1311                 emulate_ctxt.es_base = 0;
1312                 emulate_ctxt.ss_base = 0;
1313         } else {
1314                 emulate_ctxt.cs_base = get_segment_base(vcpu, VCPU_SREG_CS);
1315                 emulate_ctxt.ds_base = get_segment_base(vcpu, VCPU_SREG_DS);
1316                 emulate_ctxt.es_base = get_segment_base(vcpu, VCPU_SREG_ES);
1317                 emulate_ctxt.ss_base = get_segment_base(vcpu, VCPU_SREG_SS);
1318         }
1319
1320         emulate_ctxt.gs_base = get_segment_base(vcpu, VCPU_SREG_GS);
1321         emulate_ctxt.fs_base = get_segment_base(vcpu, VCPU_SREG_FS);
1322
1323         vcpu->mmio_is_write = 0;
1324         vcpu->pio.string = 0;
1325         r = x86_emulate_memop(&emulate_ctxt, &emulate_ops);
1326         if (vcpu->pio.string)
1327                 return EMULATE_DO_MMIO;
1328
1329         if ((r || vcpu->mmio_is_write) && run) {
1330                 run->exit_reason = KVM_EXIT_MMIO;
1331                 run->mmio.phys_addr = vcpu->mmio_phys_addr;
1332                 memcpy(run->mmio.data, vcpu->mmio_data, 8);
1333                 run->mmio.len = vcpu->mmio_size;
1334                 run->mmio.is_write = vcpu->mmio_is_write;
1335         }
1336
1337         if (r) {
1338                 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
1339                         return EMULATE_DONE;
1340                 if (!vcpu->mmio_needed) {
1341                         report_emulation_failure(&emulate_ctxt);
1342                         return EMULATE_FAIL;
1343                 }
1344                 return EMULATE_DO_MMIO;
1345         }
1346
1347         kvm_arch_ops->decache_regs(vcpu);
1348         kvm_arch_ops->set_rflags(vcpu, emulate_ctxt.eflags);
1349
1350         if (vcpu->mmio_is_write) {
1351                 vcpu->mmio_needed = 0;
1352                 return EMULATE_DO_MMIO;
1353         }
1354
1355         return EMULATE_DONE;
1356 }
1357 EXPORT_SYMBOL_GPL(emulate_instruction);
1358
1359 /*
1360  * The vCPU has executed a HLT instruction with in-kernel mode enabled.
1361  */
1362 static void kvm_vcpu_kernel_halt(struct kvm_vcpu *vcpu)
1363 {
1364         DECLARE_WAITQUEUE(wait, current);
1365
1366         add_wait_queue(&vcpu->wq, &wait);
1367
1368         /*
1369          * We will block until either an interrupt or a signal wakes us up
1370          */
1371         while(!(irqchip_in_kernel(vcpu->kvm) && kvm_cpu_has_interrupt(vcpu))
1372               && !vcpu->irq_summary
1373               && !signal_pending(current)) {
1374                 set_current_state(TASK_INTERRUPTIBLE);
1375                 vcpu_put(vcpu);
1376                 schedule();
1377                 vcpu_load(vcpu);
1378         }
1379
1380         remove_wait_queue(&vcpu->wq, &wait);
1381         set_current_state(TASK_RUNNING);
1382 }
1383
1384 int kvm_emulate_halt(struct kvm_vcpu *vcpu)
1385 {
1386         ++vcpu->stat.halt_exits;
1387         if (irqchip_in_kernel(vcpu->kvm)) {
1388                 kvm_vcpu_kernel_halt(vcpu);
1389                 return 1;
1390         } else {
1391                 vcpu->run->exit_reason = KVM_EXIT_HLT;
1392                 return 0;
1393         }
1394 }
1395 EXPORT_SYMBOL_GPL(kvm_emulate_halt);
1396
1397 int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run)
1398 {
1399         unsigned long nr, a0, a1, a2, a3, a4, a5, ret;
1400
1401         kvm_arch_ops->cache_regs(vcpu);
1402         ret = -KVM_EINVAL;
1403 #ifdef CONFIG_X86_64
1404         if (is_long_mode(vcpu)) {
1405                 nr = vcpu->regs[VCPU_REGS_RAX];
1406                 a0 = vcpu->regs[VCPU_REGS_RDI];
1407                 a1 = vcpu->regs[VCPU_REGS_RSI];
1408                 a2 = vcpu->regs[VCPU_REGS_RDX];
1409                 a3 = vcpu->regs[VCPU_REGS_RCX];
1410                 a4 = vcpu->regs[VCPU_REGS_R8];
1411                 a5 = vcpu->regs[VCPU_REGS_R9];
1412         } else
1413 #endif
1414         {
1415                 nr = vcpu->regs[VCPU_REGS_RBX] & -1u;
1416                 a0 = vcpu->regs[VCPU_REGS_RAX] & -1u;
1417                 a1 = vcpu->regs[VCPU_REGS_RCX] & -1u;
1418                 a2 = vcpu->regs[VCPU_REGS_RDX] & -1u;
1419                 a3 = vcpu->regs[VCPU_REGS_RSI] & -1u;
1420                 a4 = vcpu->regs[VCPU_REGS_RDI] & -1u;
1421                 a5 = vcpu->regs[VCPU_REGS_RBP] & -1u;
1422         }
1423         switch (nr) {
1424         default:
1425                 run->hypercall.nr = nr;
1426                 run->hypercall.args[0] = a0;
1427                 run->hypercall.args[1] = a1;
1428                 run->hypercall.args[2] = a2;
1429                 run->hypercall.args[3] = a3;
1430                 run->hypercall.args[4] = a4;
1431                 run->hypercall.args[5] = a5;
1432                 run->hypercall.ret = ret;
1433                 run->hypercall.longmode = is_long_mode(vcpu);
1434                 kvm_arch_ops->decache_regs(vcpu);
1435                 return 0;
1436         }
1437         vcpu->regs[VCPU_REGS_RAX] = ret;
1438         kvm_arch_ops->decache_regs(vcpu);
1439         return 1;
1440 }
1441 EXPORT_SYMBOL_GPL(kvm_hypercall);
1442
1443 static u64 mk_cr_64(u64 curr_cr, u32 new_val)
1444 {
1445         return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
1446 }
1447
1448 void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
1449 {
1450         struct descriptor_table dt = { limit, base };
1451
1452         kvm_arch_ops->set_gdt(vcpu, &dt);
1453 }
1454
1455 void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
1456 {
1457         struct descriptor_table dt = { limit, base };
1458
1459         kvm_arch_ops->set_idt(vcpu, &dt);
1460 }
1461
1462 void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
1463                    unsigned long *rflags)
1464 {
1465         lmsw(vcpu, msw);
1466         *rflags = kvm_arch_ops->get_rflags(vcpu);
1467 }
1468
1469 unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
1470 {
1471         kvm_arch_ops->decache_cr4_guest_bits(vcpu);
1472         switch (cr) {
1473         case 0:
1474                 return vcpu->cr0;
1475         case 2:
1476                 return vcpu->cr2;
1477         case 3:
1478                 return vcpu->cr3;
1479         case 4:
1480                 return vcpu->cr4;
1481         default:
1482                 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
1483                 return 0;
1484         }
1485 }
1486
1487 void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
1488                      unsigned long *rflags)
1489 {
1490         switch (cr) {
1491         case 0:
1492                 set_cr0(vcpu, mk_cr_64(vcpu->cr0, val));
1493                 *rflags = kvm_arch_ops->get_rflags(vcpu);
1494                 break;
1495         case 2:
1496                 vcpu->cr2 = val;
1497                 break;
1498         case 3:
1499                 set_cr3(vcpu, val);
1500                 break;
1501         case 4:
1502                 set_cr4(vcpu, mk_cr_64(vcpu->cr4, val));
1503                 break;
1504         default:
1505                 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
1506         }
1507 }
1508
1509 /*
1510  * Register the para guest with the host:
1511  */
1512 static int vcpu_register_para(struct kvm_vcpu *vcpu, gpa_t para_state_gpa)
1513 {
1514         struct kvm_vcpu_para_state *para_state;
1515         hpa_t para_state_hpa, hypercall_hpa;
1516         struct page *para_state_page;
1517         unsigned char *hypercall;
1518         gpa_t hypercall_gpa;
1519
1520         printk(KERN_DEBUG "kvm: guest trying to enter paravirtual mode\n");
1521         printk(KERN_DEBUG ".... para_state_gpa: %08Lx\n", para_state_gpa);
1522
1523         /*
1524          * Needs to be page aligned:
1525          */
1526         if (para_state_gpa != PAGE_ALIGN(para_state_gpa))
1527                 goto err_gp;
1528
1529         para_state_hpa = gpa_to_hpa(vcpu, para_state_gpa);
1530         printk(KERN_DEBUG ".... para_state_hpa: %08Lx\n", para_state_hpa);
1531         if (is_error_hpa(para_state_hpa))
1532                 goto err_gp;
1533
1534         mark_page_dirty(vcpu->kvm, para_state_gpa >> PAGE_SHIFT);
1535         para_state_page = pfn_to_page(para_state_hpa >> PAGE_SHIFT);
1536         para_state = kmap(para_state_page);
1537
1538         printk(KERN_DEBUG "....  guest version: %d\n", para_state->guest_version);
1539         printk(KERN_DEBUG "....           size: %d\n", para_state->size);
1540
1541         para_state->host_version = KVM_PARA_API_VERSION;
1542         /*
1543          * We cannot support guests that try to register themselves
1544          * with a newer API version than the host supports:
1545          */
1546         if (para_state->guest_version > KVM_PARA_API_VERSION) {
1547                 para_state->ret = -KVM_EINVAL;
1548                 goto err_kunmap_skip;
1549         }
1550
1551         hypercall_gpa = para_state->hypercall_gpa;
1552         hypercall_hpa = gpa_to_hpa(vcpu, hypercall_gpa);
1553         printk(KERN_DEBUG ".... hypercall_hpa: %08Lx\n", hypercall_hpa);
1554         if (is_error_hpa(hypercall_hpa)) {
1555                 para_state->ret = -KVM_EINVAL;
1556                 goto err_kunmap_skip;
1557         }
1558
1559         printk(KERN_DEBUG "kvm: para guest successfully registered.\n");
1560         vcpu->para_state_page = para_state_page;
1561         vcpu->para_state_gpa = para_state_gpa;
1562         vcpu->hypercall_gpa = hypercall_gpa;
1563
1564         mark_page_dirty(vcpu->kvm, hypercall_gpa >> PAGE_SHIFT);
1565         hypercall = kmap_atomic(pfn_to_page(hypercall_hpa >> PAGE_SHIFT),
1566                                 KM_USER1) + (hypercall_hpa & ~PAGE_MASK);
1567         kvm_arch_ops->patch_hypercall(vcpu, hypercall);
1568         kunmap_atomic(hypercall, KM_USER1);
1569
1570         para_state->ret = 0;
1571 err_kunmap_skip:
1572         kunmap(para_state_page);
1573         return 0;
1574 err_gp:
1575         return 1;
1576 }
1577
1578 int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1579 {
1580         u64 data;
1581
1582         switch (msr) {
1583         case 0xc0010010: /* SYSCFG */
1584         case 0xc0010015: /* HWCR */
1585         case MSR_IA32_PLATFORM_ID:
1586         case MSR_IA32_P5_MC_ADDR:
1587         case MSR_IA32_P5_MC_TYPE:
1588         case MSR_IA32_MC0_CTL:
1589         case MSR_IA32_MCG_STATUS:
1590         case MSR_IA32_MCG_CAP:
1591         case MSR_IA32_MC0_MISC:
1592         case MSR_IA32_MC0_MISC+4:
1593         case MSR_IA32_MC0_MISC+8:
1594         case MSR_IA32_MC0_MISC+12:
1595         case MSR_IA32_MC0_MISC+16:
1596         case MSR_IA32_UCODE_REV:
1597         case MSR_IA32_PERF_STATUS:
1598         case MSR_IA32_EBL_CR_POWERON:
1599                 /* MTRR registers */
1600         case 0xfe:
1601         case 0x200 ... 0x2ff:
1602                 data = 0;
1603                 break;
1604         case 0xcd: /* fsb frequency */
1605                 data = 3;
1606                 break;
1607         case MSR_IA32_APICBASE:
1608                 data = kvm_get_apic_base(vcpu);
1609                 break;
1610         case MSR_IA32_MISC_ENABLE:
1611                 data = vcpu->ia32_misc_enable_msr;
1612                 break;
1613 #ifdef CONFIG_X86_64
1614         case MSR_EFER:
1615                 data = vcpu->shadow_efer;
1616                 break;
1617 #endif
1618         default:
1619                 pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
1620                 return 1;
1621         }
1622         *pdata = data;
1623         return 0;
1624 }
1625 EXPORT_SYMBOL_GPL(kvm_get_msr_common);
1626
1627 /*
1628  * Reads an msr value (of 'msr_index') into 'pdata'.
1629  * Returns 0 on success, non-0 otherwise.
1630  * Assumes vcpu_load() was already called.
1631  */
1632 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
1633 {
1634         return kvm_arch_ops->get_msr(vcpu, msr_index, pdata);
1635 }
1636
1637 #ifdef CONFIG_X86_64
1638
1639 static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
1640 {
1641         if (efer & EFER_RESERVED_BITS) {
1642                 printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
1643                        efer);
1644                 inject_gp(vcpu);
1645                 return;
1646         }
1647
1648         if (is_paging(vcpu)
1649             && (vcpu->shadow_efer & EFER_LME) != (efer & EFER_LME)) {
1650                 printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
1651                 inject_gp(vcpu);
1652                 return;
1653         }
1654
1655         kvm_arch_ops->set_efer(vcpu, efer);
1656
1657         efer &= ~EFER_LMA;
1658         efer |= vcpu->shadow_efer & EFER_LMA;
1659
1660         vcpu->shadow_efer = efer;
1661 }
1662
1663 #endif
1664
1665 int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1666 {
1667         switch (msr) {
1668 #ifdef CONFIG_X86_64
1669         case MSR_EFER:
1670                 set_efer(vcpu, data);
1671                 break;
1672 #endif
1673         case MSR_IA32_MC0_STATUS:
1674                 pr_unimpl(vcpu, "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n",
1675                        __FUNCTION__, data);
1676                 break;
1677         case MSR_IA32_MCG_STATUS:
1678                 pr_unimpl(vcpu, "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n",
1679                         __FUNCTION__, data);
1680                 break;
1681         case MSR_IA32_UCODE_REV:
1682         case MSR_IA32_UCODE_WRITE:
1683         case 0x200 ... 0x2ff: /* MTRRs */
1684                 break;
1685         case MSR_IA32_APICBASE:
1686                 kvm_set_apic_base(vcpu, data);
1687                 break;
1688         case MSR_IA32_MISC_ENABLE:
1689                 vcpu->ia32_misc_enable_msr = data;
1690                 break;
1691         /*
1692          * This is the 'probe whether the host is KVM' logic:
1693          */
1694         case MSR_KVM_API_MAGIC:
1695                 return vcpu_register_para(vcpu, data);
1696
1697         default:
1698                 pr_unimpl(vcpu, "unhandled wrmsr: 0x%x\n", msr);
1699                 return 1;
1700         }
1701         return 0;
1702 }
1703 EXPORT_SYMBOL_GPL(kvm_set_msr_common);
1704
1705 /*
1706  * Writes msr value into into the appropriate "register".
1707  * Returns 0 on success, non-0 otherwise.
1708  * Assumes vcpu_load() was already called.
1709  */
1710 int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
1711 {
1712         return kvm_arch_ops->set_msr(vcpu, msr_index, data);
1713 }
1714
1715 void kvm_resched(struct kvm_vcpu *vcpu)
1716 {
1717         if (!need_resched())
1718                 return;
1719         cond_resched();
1720 }
1721 EXPORT_SYMBOL_GPL(kvm_resched);
1722
1723 void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
1724 {
1725         int i;
1726         u32 function;
1727         struct kvm_cpuid_entry *e, *best;
1728
1729         kvm_arch_ops->cache_regs(vcpu);
1730         function = vcpu->regs[VCPU_REGS_RAX];
1731         vcpu->regs[VCPU_REGS_RAX] = 0;
1732         vcpu->regs[VCPU_REGS_RBX] = 0;
1733         vcpu->regs[VCPU_REGS_RCX] = 0;
1734         vcpu->regs[VCPU_REGS_RDX] = 0;
1735         best = NULL;
1736         for (i = 0; i < vcpu->cpuid_nent; ++i) {
1737                 e = &vcpu->cpuid_entries[i];
1738                 if (e->function == function) {
1739                         best = e;
1740                         break;
1741                 }
1742                 /*
1743                  * Both basic or both extended?
1744                  */
1745                 if (((e->function ^ function) & 0x80000000) == 0)
1746                         if (!best || e->function > best->function)
1747                                 best = e;
1748         }
1749         if (best) {
1750                 vcpu->regs[VCPU_REGS_RAX] = best->eax;
1751                 vcpu->regs[VCPU_REGS_RBX] = best->ebx;
1752                 vcpu->regs[VCPU_REGS_RCX] = best->ecx;
1753                 vcpu->regs[VCPU_REGS_RDX] = best->edx;
1754         }
1755         kvm_arch_ops->decache_regs(vcpu);
1756         kvm_arch_ops->skip_emulated_instruction(vcpu);
1757 }
1758 EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
1759
1760 static int pio_copy_data(struct kvm_vcpu *vcpu)
1761 {
1762         void *p = vcpu->pio_data;
1763         void *q;
1764         unsigned bytes;
1765         int nr_pages = vcpu->pio.guest_pages[1] ? 2 : 1;
1766
1767         q = vmap(vcpu->pio.guest_pages, nr_pages, VM_READ|VM_WRITE,
1768                  PAGE_KERNEL);
1769         if (!q) {
1770                 free_pio_guest_pages(vcpu);
1771                 return -ENOMEM;
1772         }
1773         q += vcpu->pio.guest_page_offset;
1774         bytes = vcpu->pio.size * vcpu->pio.cur_count;
1775         if (vcpu->pio.in)
1776                 memcpy(q, p, bytes);
1777         else
1778                 memcpy(p, q, bytes);
1779         q -= vcpu->pio.guest_page_offset;
1780         vunmap(q);
1781         free_pio_guest_pages(vcpu);
1782         return 0;
1783 }
1784
1785 static int complete_pio(struct kvm_vcpu *vcpu)
1786 {
1787         struct kvm_pio_request *io = &vcpu->pio;
1788         long delta;
1789         int r;
1790
1791         kvm_arch_ops->cache_regs(vcpu);
1792
1793         if (!io->string) {
1794                 if (io->in)
1795                         memcpy(&vcpu->regs[VCPU_REGS_RAX], vcpu->pio_data,
1796                                io->size);
1797         } else {
1798                 if (io->in) {
1799                         r = pio_copy_data(vcpu);
1800                         if (r) {
1801                                 kvm_arch_ops->cache_regs(vcpu);
1802                                 return r;
1803                         }
1804                 }
1805
1806                 delta = 1;
1807                 if (io->rep) {
1808                         delta *= io->cur_count;
1809                         /*
1810                          * The size of the register should really depend on
1811                          * current address size.
1812                          */
1813                         vcpu->regs[VCPU_REGS_RCX] -= delta;
1814                 }
1815                 if (io->down)
1816                         delta = -delta;
1817                 delta *= io->size;
1818                 if (io->in)
1819                         vcpu->regs[VCPU_REGS_RDI] += delta;
1820                 else
1821                         vcpu->regs[VCPU_REGS_RSI] += delta;
1822         }
1823
1824         kvm_arch_ops->decache_regs(vcpu);
1825
1826         io->count -= io->cur_count;
1827         io->cur_count = 0;
1828
1829         if (!io->count)
1830                 kvm_arch_ops->skip_emulated_instruction(vcpu);
1831         return 0;
1832 }
1833
1834 static void kernel_pio(struct kvm_io_device *pio_dev,
1835                        struct kvm_vcpu *vcpu,
1836                        void *pd)
1837 {
1838         /* TODO: String I/O for in kernel device */
1839
1840         mutex_lock(&vcpu->kvm->lock);
1841         if (vcpu->pio.in)
1842                 kvm_iodevice_read(pio_dev, vcpu->pio.port,
1843                                   vcpu->pio.size,
1844                                   pd);
1845         else
1846                 kvm_iodevice_write(pio_dev, vcpu->pio.port,
1847                                    vcpu->pio.size,
1848                                    pd);
1849         mutex_unlock(&vcpu->kvm->lock);
1850 }
1851
1852 static void pio_string_write(struct kvm_io_device *pio_dev,
1853                              struct kvm_vcpu *vcpu)
1854 {
1855         struct kvm_pio_request *io = &vcpu->pio;
1856         void *pd = vcpu->pio_data;
1857         int i;
1858
1859         mutex_lock(&vcpu->kvm->lock);
1860         for (i = 0; i < io->cur_count; i++) {
1861                 kvm_iodevice_write(pio_dev, io->port,
1862                                    io->size,
1863                                    pd);
1864                 pd += io->size;
1865         }
1866         mutex_unlock(&vcpu->kvm->lock);
1867 }
1868
1869 int kvm_emulate_pio (struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
1870                   int size, unsigned port)
1871 {
1872         struct kvm_io_device *pio_dev;
1873
1874         vcpu->run->exit_reason = KVM_EXIT_IO;
1875         vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
1876         vcpu->run->io.size = vcpu->pio.size = size;
1877         vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
1878         vcpu->run->io.count = vcpu->pio.count = vcpu->pio.cur_count = 1;
1879         vcpu->run->io.port = vcpu->pio.port = port;
1880         vcpu->pio.in = in;
1881         vcpu->pio.string = 0;
1882         vcpu->pio.down = 0;
1883         vcpu->pio.guest_page_offset = 0;
1884         vcpu->pio.rep = 0;
1885
1886         kvm_arch_ops->cache_regs(vcpu);
1887         memcpy(vcpu->pio_data, &vcpu->regs[VCPU_REGS_RAX], 4);
1888         kvm_arch_ops->decache_regs(vcpu);
1889
1890         pio_dev = vcpu_find_pio_dev(vcpu, port);
1891         if (pio_dev) {
1892                 kernel_pio(pio_dev, vcpu, vcpu->pio_data);
1893                 complete_pio(vcpu);
1894                 return 1;
1895         }
1896         return 0;
1897 }
1898 EXPORT_SYMBOL_GPL(kvm_emulate_pio);
1899
1900 int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
1901                   int size, unsigned long count, int down,
1902                   gva_t address, int rep, unsigned port)
1903 {
1904         unsigned now, in_page;
1905         int i, ret = 0;
1906         int nr_pages = 1;
1907         struct page *page;
1908         struct kvm_io_device *pio_dev;
1909
1910         vcpu->run->exit_reason = KVM_EXIT_IO;
1911         vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
1912         vcpu->run->io.size = vcpu->pio.size = size;
1913         vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
1914         vcpu->run->io.count = vcpu->pio.count = vcpu->pio.cur_count = count;
1915         vcpu->run->io.port = vcpu->pio.port = port;
1916         vcpu->pio.in = in;
1917         vcpu->pio.string = 1;
1918         vcpu->pio.down = down;
1919         vcpu->pio.guest_page_offset = offset_in_page(address);
1920         vcpu->pio.rep = rep;
1921
1922         if (!count) {
1923                 kvm_arch_ops->skip_emulated_instruction(vcpu);
1924                 return 1;
1925         }
1926
1927         if (!down)
1928                 in_page = PAGE_SIZE - offset_in_page(address);
1929         else
1930                 in_page = offset_in_page(address) + size;
1931         now = min(count, (unsigned long)in_page / size);
1932         if (!now) {
1933                 /*
1934                  * String I/O straddles page boundary.  Pin two guest pages
1935                  * so that we satisfy atomicity constraints.  Do just one
1936                  * transaction to avoid complexity.
1937                  */
1938                 nr_pages = 2;
1939                 now = 1;
1940         }
1941         if (down) {
1942                 /*
1943                  * String I/O in reverse.  Yuck.  Kill the guest, fix later.
1944                  */
1945                 pr_unimpl(vcpu, "guest string pio down\n");
1946                 inject_gp(vcpu);
1947                 return 1;
1948         }
1949         vcpu->run->io.count = now;
1950         vcpu->pio.cur_count = now;
1951
1952         for (i = 0; i < nr_pages; ++i) {
1953                 mutex_lock(&vcpu->kvm->lock);
1954                 page = gva_to_page(vcpu, address + i * PAGE_SIZE);
1955                 if (page)
1956                         get_page(page);
1957                 vcpu->pio.guest_pages[i] = page;
1958                 mutex_unlock(&vcpu->kvm->lock);
1959                 if (!page) {
1960                         inject_gp(vcpu);
1961                         free_pio_guest_pages(vcpu);
1962                         return 1;
1963                 }
1964         }
1965
1966         pio_dev = vcpu_find_pio_dev(vcpu, port);
1967         if (!vcpu->pio.in) {
1968                 /* string PIO write */
1969                 ret = pio_copy_data(vcpu);
1970                 if (ret >= 0 && pio_dev) {
1971                         pio_string_write(pio_dev, vcpu);
1972                         complete_pio(vcpu);
1973                         if (vcpu->pio.count == 0)
1974                                 ret = 1;
1975                 }
1976         } else if (pio_dev)
1977                 pr_unimpl(vcpu, "no string pio read support yet, "
1978                        "port %x size %d count %ld\n",
1979                         port, size, count);
1980
1981         return ret;
1982 }
1983 EXPORT_SYMBOL_GPL(kvm_emulate_pio_string);
1984
1985 static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1986 {
1987         int r;
1988         sigset_t sigsaved;
1989
1990         vcpu_load(vcpu);
1991
1992         if (vcpu->sigset_active)
1993                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1994
1995         /* re-sync apic's tpr */
1996         set_cr8(vcpu, kvm_run->cr8);
1997
1998         if (vcpu->pio.cur_count) {
1999                 r = complete_pio(vcpu);
2000                 if (r)
2001                         goto out;
2002         }
2003
2004         if (vcpu->mmio_needed) {
2005                 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
2006                 vcpu->mmio_read_completed = 1;
2007                 vcpu->mmio_needed = 0;
2008                 r = emulate_instruction(vcpu, kvm_run,
2009                                         vcpu->mmio_fault_cr2, 0);
2010                 if (r == EMULATE_DO_MMIO) {
2011                         /*
2012                          * Read-modify-write.  Back to userspace.
2013                          */
2014                         r = 0;
2015                         goto out;
2016                 }
2017         }
2018
2019         if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) {
2020                 kvm_arch_ops->cache_regs(vcpu);
2021                 vcpu->regs[VCPU_REGS_RAX] = kvm_run->hypercall.ret;
2022                 kvm_arch_ops->decache_regs(vcpu);
2023         }
2024
2025         r = kvm_arch_ops->run(vcpu, kvm_run);
2026
2027 out:
2028         if (vcpu->sigset_active)
2029                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2030
2031         vcpu_put(vcpu);
2032         return r;
2033 }
2034
2035 static int kvm_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu,
2036                                    struct kvm_regs *regs)
2037 {
2038         vcpu_load(vcpu);
2039
2040         kvm_arch_ops->cache_regs(vcpu);
2041
2042         regs->rax = vcpu->regs[VCPU_REGS_RAX];
2043         regs->rbx = vcpu->regs[VCPU_REGS_RBX];
2044         regs->rcx = vcpu->regs[VCPU_REGS_RCX];
2045         regs->rdx = vcpu->regs[VCPU_REGS_RDX];
2046         regs->rsi = vcpu->regs[VCPU_REGS_RSI];
2047         regs->rdi = vcpu->regs[VCPU_REGS_RDI];
2048         regs->rsp = vcpu->regs[VCPU_REGS_RSP];
2049         regs->rbp = vcpu->regs[VCPU_REGS_RBP];
2050 #ifdef CONFIG_X86_64
2051         regs->r8 = vcpu->regs[VCPU_REGS_R8];
2052         regs->r9 = vcpu->regs[VCPU_REGS_R9];
2053         regs->r10 = vcpu->regs[VCPU_REGS_R10];
2054         regs->r11 = vcpu->regs[VCPU_REGS_R11];
2055         regs->r12 = vcpu->regs[VCPU_REGS_R12];
2056         regs->r13 = vcpu->regs[VCPU_REGS_R13];
2057         regs->r14 = vcpu->regs[VCPU_REGS_R14];
2058         regs->r15 = vcpu->regs[VCPU_REGS_R15];
2059 #endif
2060
2061         regs->rip = vcpu->rip;
2062         regs->rflags = kvm_arch_ops->get_rflags(vcpu);
2063
2064         /*
2065          * Don't leak debug flags in case they were set for guest debugging
2066          */
2067         if (vcpu->guest_debug.enabled && vcpu->guest_debug.singlestep)
2068                 regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
2069
2070         vcpu_put(vcpu);
2071
2072         return 0;
2073 }
2074
2075 static int kvm_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu,
2076                                    struct kvm_regs *regs)
2077 {
2078         vcpu_load(vcpu);
2079
2080         vcpu->regs[VCPU_REGS_RAX] = regs->rax;
2081         vcpu->regs[VCPU_REGS_RBX] = regs->rbx;
2082         vcpu->regs[VCPU_REGS_RCX] = regs->rcx;
2083         vcpu->regs[VCPU_REGS_RDX] = regs->rdx;
2084         vcpu->regs[VCPU_REGS_RSI] = regs->rsi;
2085         vcpu->regs[VCPU_REGS_RDI] = regs->rdi;
2086         vcpu->regs[VCPU_REGS_RSP] = regs->rsp;
2087         vcpu->regs[VCPU_REGS_RBP] = regs->rbp;
2088 #ifdef CONFIG_X86_64
2089         vcpu->regs[VCPU_REGS_R8] = regs->r8;
2090         vcpu->regs[VCPU_REGS_R9] = regs->r9;
2091         vcpu->regs[VCPU_REGS_R10] = regs->r10;
2092         vcpu->regs[VCPU_REGS_R11] = regs->r11;
2093         vcpu->regs[VCPU_REGS_R12] = regs->r12;
2094         vcpu->regs[VCPU_REGS_R13] = regs->r13;
2095         vcpu->regs[VCPU_REGS_R14] = regs->r14;
2096         vcpu->regs[VCPU_REGS_R15] = regs->r15;
2097 #endif
2098
2099         vcpu->rip = regs->rip;
2100         kvm_arch_ops->set_rflags(vcpu, regs->rflags);
2101
2102         kvm_arch_ops->decache_regs(vcpu);
2103
2104         vcpu_put(vcpu);
2105
2106         return 0;
2107 }
2108
2109 static void get_segment(struct kvm_vcpu *vcpu,
2110                         struct kvm_segment *var, int seg)
2111 {
2112         return kvm_arch_ops->get_segment(vcpu, var, seg);
2113 }
2114
2115 static int kvm_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2116                                     struct kvm_sregs *sregs)
2117 {
2118         struct descriptor_table dt;
2119
2120         vcpu_load(vcpu);
2121
2122         get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
2123         get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
2124         get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
2125         get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
2126         get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
2127         get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
2128
2129         get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
2130         get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
2131
2132         kvm_arch_ops->get_idt(vcpu, &dt);
2133         sregs->idt.limit = dt.limit;
2134         sregs->idt.base = dt.base;
2135         kvm_arch_ops->get_gdt(vcpu, &dt);
2136         sregs->gdt.limit = dt.limit;
2137         sregs->gdt.base = dt.base;
2138
2139         kvm_arch_ops->decache_cr4_guest_bits(vcpu);
2140         sregs->cr0 = vcpu->cr0;
2141         sregs->cr2 = vcpu->cr2;
2142         sregs->cr3 = vcpu->cr3;
2143         sregs->cr4 = vcpu->cr4;
2144         sregs->cr8 = get_cr8(vcpu);
2145         sregs->efer = vcpu->shadow_efer;
2146         sregs->apic_base = kvm_get_apic_base(vcpu);
2147
2148         if (irqchip_in_kernel(vcpu->kvm))
2149                 memset(sregs->interrupt_bitmap, 0,
2150                        sizeof sregs->interrupt_bitmap);
2151         else
2152                 memcpy(sregs->interrupt_bitmap, vcpu->irq_pending,
2153                        sizeof sregs->interrupt_bitmap);
2154
2155         vcpu_put(vcpu);
2156
2157         return 0;
2158 }
2159
2160 static void set_segment(struct kvm_vcpu *vcpu,
2161                         struct kvm_segment *var, int seg)
2162 {
2163         return kvm_arch_ops->set_segment(vcpu, var, seg);
2164 }
2165
2166 static int kvm_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2167                                     struct kvm_sregs *sregs)
2168 {
2169         int mmu_reset_needed = 0;
2170         int i;
2171         struct descriptor_table dt;
2172
2173         vcpu_load(vcpu);
2174
2175         dt.limit = sregs->idt.limit;
2176         dt.base = sregs->idt.base;
2177         kvm_arch_ops->set_idt(vcpu, &dt);
2178         dt.limit = sregs->gdt.limit;
2179         dt.base = sregs->gdt.base;
2180         kvm_arch_ops->set_gdt(vcpu, &dt);
2181
2182         vcpu->cr2 = sregs->cr2;
2183         mmu_reset_needed |= vcpu->cr3 != sregs->cr3;
2184         vcpu->cr3 = sregs->cr3;
2185
2186         set_cr8(vcpu, sregs->cr8);
2187
2188         mmu_reset_needed |= vcpu->shadow_efer != sregs->efer;
2189 #ifdef CONFIG_X86_64
2190         kvm_arch_ops->set_efer(vcpu, sregs->efer);
2191 #endif
2192         kvm_set_apic_base(vcpu, sregs->apic_base);
2193
2194         kvm_arch_ops->decache_cr4_guest_bits(vcpu);
2195
2196         mmu_reset_needed |= vcpu->cr0 != sregs->cr0;
2197         kvm_arch_ops->set_cr0(vcpu, sregs->cr0);
2198
2199         mmu_reset_needed |= vcpu->cr4 != sregs->cr4;
2200         kvm_arch_ops->set_cr4(vcpu, sregs->cr4);
2201         if (!is_long_mode(vcpu) && is_pae(vcpu))
2202                 load_pdptrs(vcpu, vcpu->cr3);
2203
2204         if (mmu_reset_needed)
2205                 kvm_mmu_reset_context(vcpu);
2206
2207         if (!irqchip_in_kernel(vcpu->kvm)) {
2208                 memcpy(vcpu->irq_pending, sregs->interrupt_bitmap,
2209                        sizeof vcpu->irq_pending);
2210                 vcpu->irq_summary = 0;
2211                 for (i = 0; i < ARRAY_SIZE(vcpu->irq_pending); ++i)
2212                         if (vcpu->irq_pending[i])
2213                                 __set_bit(i, &vcpu->irq_summary);
2214         }
2215
2216         set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
2217         set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
2218         set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
2219         set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
2220         set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
2221         set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
2222
2223         set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
2224         set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
2225
2226         vcpu_put(vcpu);
2227
2228         return 0;
2229 }
2230
2231 /*
2232  * List of msr numbers which we expose to userspace through KVM_GET_MSRS
2233  * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
2234  *
2235  * This list is modified at module load time to reflect the
2236  * capabilities of the host cpu.
2237  */
2238 static u32 msrs_to_save[] = {
2239         MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
2240         MSR_K6_STAR,
2241 #ifdef CONFIG_X86_64
2242         MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
2243 #endif
2244         MSR_IA32_TIME_STAMP_COUNTER,
2245 };
2246
2247 static unsigned num_msrs_to_save;
2248
2249 static u32 emulated_msrs[] = {
2250         MSR_IA32_MISC_ENABLE,
2251 };
2252
2253 static __init void kvm_init_msr_list(void)
2254 {
2255         u32 dummy[2];
2256         unsigned i, j;
2257
2258         for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
2259                 if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
2260                         continue;
2261                 if (j < i)
2262                         msrs_to_save[j] = msrs_to_save[i];
2263                 j++;
2264         }
2265         num_msrs_to_save = j;
2266 }
2267
2268 /*
2269  * Adapt set_msr() to msr_io()'s calling convention
2270  */
2271 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
2272 {
2273         return kvm_set_msr(vcpu, index, *data);
2274 }
2275
2276 /*
2277  * Read or write a bunch of msrs. All parameters are kernel addresses.
2278  *
2279  * @return number of msrs set successfully.
2280  */
2281 static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
2282                     struct kvm_msr_entry *entries,
2283                     int (*do_msr)(struct kvm_vcpu *vcpu,
2284                                   unsigned index, u64 *data))
2285 {
2286         int i;
2287
2288         vcpu_load(vcpu);
2289
2290         for (i = 0; i < msrs->nmsrs; ++i)
2291                 if (do_msr(vcpu, entries[i].index, &entries[i].data))
2292                         break;
2293
2294         vcpu_put(vcpu);
2295
2296         return i;
2297 }
2298
2299 /*
2300  * Read or write a bunch of msrs. Parameters are user addresses.
2301  *
2302  * @return number of msrs set successfully.
2303  */
2304 static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
2305                   int (*do_msr)(struct kvm_vcpu *vcpu,
2306                                 unsigned index, u64 *data),
2307                   int writeback)
2308 {
2309         struct kvm_msrs msrs;
2310         struct kvm_msr_entry *entries;
2311         int r, n;
2312         unsigned size;
2313
2314         r = -EFAULT;
2315         if (copy_from_user(&msrs, user_msrs, sizeof msrs))
2316                 goto out;
2317
2318         r = -E2BIG;
2319         if (msrs.nmsrs >= MAX_IO_MSRS)
2320                 goto out;
2321
2322         r = -ENOMEM;
2323         size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
2324         entries = vmalloc(size);
2325         if (!entries)
2326                 goto out;
2327
2328         r = -EFAULT;
2329         if (copy_from_user(entries, user_msrs->entries, size))
2330                 goto out_free;
2331
2332         r = n = __msr_io(vcpu, &msrs, entries, do_msr);
2333         if (r < 0)
2334                 goto out_free;
2335
2336         r = -EFAULT;
2337         if (writeback && copy_to_user(user_msrs->entries, entries, size))
2338                 goto out_free;
2339
2340         r = n;
2341
2342 out_free:
2343         vfree(entries);
2344 out:
2345         return r;
2346 }
2347
2348 /*
2349  * Translate a guest virtual address to a guest physical address.
2350  */
2351 static int kvm_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
2352                                     struct kvm_translation *tr)
2353 {
2354         unsigned long vaddr = tr->linear_address;
2355         gpa_t gpa;
2356
2357         vcpu_load(vcpu);
2358         mutex_lock(&vcpu->kvm->lock);
2359         gpa = vcpu->mmu.gva_to_gpa(vcpu, vaddr);
2360         tr->physical_address = gpa;
2361         tr->valid = gpa != UNMAPPED_GVA;
2362         tr->writeable = 1;
2363         tr->usermode = 0;
2364         mutex_unlock(&vcpu->kvm->lock);
2365         vcpu_put(vcpu);
2366
2367         return 0;
2368 }
2369
2370 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
2371                                     struct kvm_interrupt *irq)
2372 {
2373         if (irq->irq < 0 || irq->irq >= 256)
2374                 return -EINVAL;
2375         if (irqchip_in_kernel(vcpu->kvm))
2376                 return -ENXIO;
2377         vcpu_load(vcpu);
2378
2379         set_bit(irq->irq, vcpu->irq_pending);
2380         set_bit(irq->irq / BITS_PER_LONG, &vcpu->irq_summary);
2381
2382         vcpu_put(vcpu);
2383
2384         return 0;
2385 }
2386
2387 static int kvm_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
2388                                       struct kvm_debug_guest *dbg)
2389 {
2390         int r;
2391
2392         vcpu_load(vcpu);
2393
2394         r = kvm_arch_ops->set_guest_debug(vcpu, dbg);
2395
2396         vcpu_put(vcpu);
2397
2398         return r;
2399 }
2400
2401 static struct page *kvm_vcpu_nopage(struct vm_area_struct *vma,
2402                                     unsigned long address,
2403                                     int *type)
2404 {
2405         struct kvm_vcpu *vcpu = vma->vm_file->private_data;
2406         unsigned long pgoff;
2407         struct page *page;
2408
2409         pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2410         if (pgoff == 0)
2411                 page = virt_to_page(vcpu->run);
2412         else if (pgoff == KVM_PIO_PAGE_OFFSET)
2413                 page = virt_to_page(vcpu->pio_data);
2414         else
2415                 return NOPAGE_SIGBUS;
2416         get_page(page);
2417         if (type != NULL)
2418                 *type = VM_FAULT_MINOR;
2419
2420         return page;
2421 }
2422
2423 static struct vm_operations_struct kvm_vcpu_vm_ops = {
2424         .nopage = kvm_vcpu_nopage,
2425 };
2426
2427 static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
2428 {
2429         vma->vm_ops = &kvm_vcpu_vm_ops;
2430         return 0;
2431 }
2432
2433 static int kvm_vcpu_release(struct inode *inode, struct file *filp)
2434 {
2435         struct kvm_vcpu *vcpu = filp->private_data;
2436
2437         fput(vcpu->kvm->filp);
2438         return 0;
2439 }
2440
2441 static struct file_operations kvm_vcpu_fops = {
2442         .release        = kvm_vcpu_release,
2443         .unlocked_ioctl = kvm_vcpu_ioctl,
2444         .compat_ioctl   = kvm_vcpu_ioctl,
2445         .mmap           = kvm_vcpu_mmap,
2446 };
2447
2448 /*
2449  * Allocates an inode for the vcpu.
2450  */
2451 static int create_vcpu_fd(struct kvm_vcpu *vcpu)
2452 {
2453         int fd, r;
2454         struct inode *inode;
2455         struct file *file;
2456
2457         r = anon_inode_getfd(&fd, &inode, &file,
2458                              "kvm-vcpu", &kvm_vcpu_fops, vcpu);
2459         if (r)
2460                 return r;
2461         atomic_inc(&vcpu->kvm->filp->f_count);
2462         return fd;
2463 }
2464
2465 /*
2466  * Creates some virtual cpus.  Good luck creating more than one.
2467  */
2468 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
2469 {
2470         int r;
2471         struct kvm_vcpu *vcpu;
2472
2473         if (!valid_vcpu(n))
2474                 return -EINVAL;
2475
2476         vcpu = kvm_arch_ops->vcpu_create(kvm, n);
2477         if (IS_ERR(vcpu))
2478                 return PTR_ERR(vcpu);
2479
2480         preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
2481
2482         /* We do fxsave: this must be aligned. */
2483         BUG_ON((unsigned long)&vcpu->host_fx_image & 0xF);
2484
2485         vcpu_load(vcpu);
2486         r = kvm_mmu_setup(vcpu);
2487         vcpu_put(vcpu);
2488         if (r < 0)
2489                 goto free_vcpu;
2490
2491         mutex_lock(&kvm->lock);
2492         if (kvm->vcpus[n]) {
2493                 r = -EEXIST;
2494                 mutex_unlock(&kvm->lock);
2495                 goto mmu_unload;
2496         }
2497         kvm->vcpus[n] = vcpu;
2498         mutex_unlock(&kvm->lock);
2499
2500         /* Now it's all set up, let userspace reach it */
2501         r = create_vcpu_fd(vcpu);
2502         if (r < 0)
2503                 goto unlink;
2504         return r;
2505
2506 unlink:
2507         mutex_lock(&kvm->lock);
2508         kvm->vcpus[n] = NULL;
2509         mutex_unlock(&kvm->lock);
2510
2511 mmu_unload:
2512         vcpu_load(vcpu);
2513         kvm_mmu_unload(vcpu);
2514         vcpu_put(vcpu);
2515
2516 free_vcpu:
2517         kvm_arch_ops->vcpu_free(vcpu);
2518         return r;
2519 }
2520
2521 static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
2522 {
2523         u64 efer;
2524         int i;
2525         struct kvm_cpuid_entry *e, *entry;
2526
2527         rdmsrl(MSR_EFER, efer);
2528         entry = NULL;
2529         for (i = 0; i < vcpu->cpuid_nent; ++i) {
2530                 e = &vcpu->cpuid_entries[i];
2531                 if (e->function == 0x80000001) {
2532                         entry = e;
2533                         break;
2534                 }
2535         }
2536         if (entry && (entry->edx & (1 << 20)) && !(efer & EFER_NX)) {
2537                 entry->edx &= ~(1 << 20);
2538                 printk(KERN_INFO "kvm: guest NX capability removed\n");
2539         }
2540 }
2541
2542 static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
2543                                     struct kvm_cpuid *cpuid,
2544                                     struct kvm_cpuid_entry __user *entries)
2545 {
2546         int r;
2547
2548         r = -E2BIG;
2549         if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
2550                 goto out;
2551         r = -EFAULT;
2552         if (copy_from_user(&vcpu->cpuid_entries, entries,
2553                            cpuid->nent * sizeof(struct kvm_cpuid_entry)))
2554                 goto out;
2555         vcpu->cpuid_nent = cpuid->nent;
2556         cpuid_fix_nx_cap(vcpu);
2557         return 0;
2558
2559 out:
2560         return r;
2561 }
2562
2563 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
2564 {
2565         if (sigset) {
2566                 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2567                 vcpu->sigset_active = 1;
2568                 vcpu->sigset = *sigset;
2569         } else
2570                 vcpu->sigset_active = 0;
2571         return 0;
2572 }
2573
2574 /*
2575  * fxsave fpu state.  Taken from x86_64/processor.h.  To be killed when
2576  * we have asm/x86/processor.h
2577  */
2578 struct fxsave {
2579         u16     cwd;
2580         u16     swd;
2581         u16     twd;
2582         u16     fop;
2583         u64     rip;
2584         u64     rdp;
2585         u32     mxcsr;
2586         u32     mxcsr_mask;
2587         u32     st_space[32];   /* 8*16 bytes for each FP-reg = 128 bytes */
2588 #ifdef CONFIG_X86_64
2589         u32     xmm_space[64];  /* 16*16 bytes for each XMM-reg = 256 bytes */
2590 #else
2591         u32     xmm_space[32];  /* 8*16 bytes for each XMM-reg = 128 bytes */
2592 #endif
2593 };
2594
2595 static int kvm_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2596 {
2597         struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image;
2598
2599         vcpu_load(vcpu);
2600
2601         memcpy(fpu->fpr, fxsave->st_space, 128);
2602         fpu->fcw = fxsave->cwd;
2603         fpu->fsw = fxsave->swd;
2604         fpu->ftwx = fxsave->twd;
2605         fpu->last_opcode = fxsave->fop;
2606         fpu->last_ip = fxsave->rip;
2607         fpu->last_dp = fxsave->rdp;
2608         memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
2609
2610         vcpu_put(vcpu);
2611
2612         return 0;
2613 }
2614
2615 static int kvm_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2616 {
2617         struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image;
2618
2619         vcpu_load(vcpu);
2620
2621         memcpy(fxsave->st_space, fpu->fpr, 128);
2622         fxsave->cwd = fpu->fcw;
2623         fxsave->swd = fpu->fsw;
2624         fxsave->twd = fpu->ftwx;
2625         fxsave->fop = fpu->last_opcode;
2626         fxsave->rip = fpu->last_ip;
2627         fxsave->rdp = fpu->last_dp;
2628         memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
2629
2630         vcpu_put(vcpu);
2631
2632         return 0;
2633 }
2634
2635 static long kvm_vcpu_ioctl(struct file *filp,
2636                            unsigned int ioctl, unsigned long arg)
2637 {
2638         struct kvm_vcpu *vcpu = filp->private_data;
2639         void __user *argp = (void __user *)arg;
2640         int r = -EINVAL;
2641
2642         switch (ioctl) {
2643         case KVM_RUN:
2644                 r = -EINVAL;
2645                 if (arg)
2646                         goto out;
2647                 r = kvm_vcpu_ioctl_run(vcpu, vcpu->run);
2648                 break;
2649         case KVM_GET_REGS: {
2650                 struct kvm_regs kvm_regs;
2651
2652                 memset(&kvm_regs, 0, sizeof kvm_regs);
2653                 r = kvm_vcpu_ioctl_get_regs(vcpu, &kvm_regs);
2654                 if (r)
2655                         goto out;
2656                 r = -EFAULT;
2657                 if (copy_to_user(argp, &kvm_regs, sizeof kvm_regs))
2658                         goto out;
2659                 r = 0;
2660                 break;
2661         }
2662         case KVM_SET_REGS: {
2663                 struct kvm_regs kvm_regs;
2664
2665                 r = -EFAULT;
2666                 if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs))
2667                         goto out;
2668                 r = kvm_vcpu_ioctl_set_regs(vcpu, &kvm_regs);
2669                 if (r)
2670                         goto out;
2671                 r = 0;
2672                 break;
2673         }
2674         case KVM_GET_SREGS: {
2675                 struct kvm_sregs kvm_sregs;
2676
2677                 memset(&kvm_sregs, 0, sizeof kvm_sregs);
2678                 r = kvm_vcpu_ioctl_get_sregs(vcpu, &kvm_sregs);
2679                 if (r)
2680                         goto out;
2681                 r = -EFAULT;
2682                 if (copy_to_user(argp, &kvm_sregs, sizeof kvm_sregs))
2683                         goto out;
2684                 r = 0;
2685                 break;
2686         }
2687         case KVM_SET_SREGS: {
2688                 struct kvm_sregs kvm_sregs;
2689
2690                 r = -EFAULT;
2691                 if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs))
2692                         goto out;
2693                 r = kvm_vcpu_ioctl_set_sregs(vcpu, &kvm_sregs);
2694                 if (r)
2695                         goto out;
2696                 r = 0;
2697                 break;
2698         }
2699         case KVM_TRANSLATE: {
2700                 struct kvm_translation tr;
2701
2702                 r = -EFAULT;
2703                 if (copy_from_user(&tr, argp, sizeof tr))
2704                         goto out;
2705                 r = kvm_vcpu_ioctl_translate(vcpu, &tr);
2706                 if (r)
2707                         goto out;
2708                 r = -EFAULT;
2709                 if (copy_to_user(argp, &tr, sizeof tr))
2710                         goto out;
2711                 r = 0;
2712                 break;
2713         }
2714         case KVM_INTERRUPT: {
2715                 struct kvm_interrupt irq;
2716
2717                 r = -EFAULT;
2718                 if (copy_from_user(&irq, argp, sizeof irq))
2719                         goto out;
2720                 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
2721                 if (r)
2722                         goto out;
2723                 r = 0;
2724                 break;
2725         }
2726         case KVM_DEBUG_GUEST: {
2727                 struct kvm_debug_guest dbg;
2728
2729                 r = -EFAULT;
2730                 if (copy_from_user(&dbg, argp, sizeof dbg))
2731                         goto out;
2732                 r = kvm_vcpu_ioctl_debug_guest(vcpu, &dbg);
2733                 if (r)
2734                         goto out;
2735                 r = 0;
2736                 break;
2737         }
2738         case KVM_GET_MSRS:
2739                 r = msr_io(vcpu, argp, kvm_get_msr, 1);
2740                 break;
2741         case KVM_SET_MSRS:
2742                 r = msr_io(vcpu, argp, do_set_msr, 0);
2743                 break;
2744         case KVM_SET_CPUID: {
2745                 struct kvm_cpuid __user *cpuid_arg = argp;
2746                 struct kvm_cpuid cpuid;
2747
2748                 r = -EFAULT;
2749                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
2750                         goto out;
2751                 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
2752                 if (r)
2753                         goto out;
2754                 break;
2755         }
2756         case KVM_SET_SIGNAL_MASK: {
2757                 struct kvm_signal_mask __user *sigmask_arg = argp;
2758                 struct kvm_signal_mask kvm_sigmask;
2759                 sigset_t sigset, *p;
2760
2761                 p = NULL;
2762                 if (argp) {
2763                         r = -EFAULT;
2764                         if (copy_from_user(&kvm_sigmask, argp,
2765                                            sizeof kvm_sigmask))
2766                                 goto out;
2767                         r = -EINVAL;
2768                         if (kvm_sigmask.len != sizeof sigset)
2769                                 goto out;
2770                         r = -EFAULT;
2771                         if (copy_from_user(&sigset, sigmask_arg->sigset,
2772                                            sizeof sigset))
2773                                 goto out;
2774                         p = &sigset;
2775                 }
2776                 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
2777                 break;
2778         }
2779         case KVM_GET_FPU: {
2780                 struct kvm_fpu fpu;
2781
2782                 memset(&fpu, 0, sizeof fpu);
2783                 r = kvm_vcpu_ioctl_get_fpu(vcpu, &fpu);
2784                 if (r)
2785                         goto out;
2786                 r = -EFAULT;
2787                 if (copy_to_user(argp, &fpu, sizeof fpu))
2788                         goto out;
2789                 r = 0;
2790                 break;
2791         }
2792         case KVM_SET_FPU: {
2793                 struct kvm_fpu fpu;
2794
2795                 r = -EFAULT;
2796                 if (copy_from_user(&fpu, argp, sizeof fpu))
2797                         goto out;
2798                 r = kvm_vcpu_ioctl_set_fpu(vcpu, &fpu);
2799                 if (r)
2800                         goto out;
2801                 r = 0;
2802                 break;
2803         }
2804         default:
2805                 ;
2806         }
2807 out:
2808         return r;
2809 }
2810
2811 static long kvm_vm_ioctl(struct file *filp,
2812                            unsigned int ioctl, unsigned long arg)
2813 {
2814         struct kvm *kvm = filp->private_data;
2815         void __user *argp = (void __user *)arg;
2816         int r = -EINVAL;
2817
2818         switch (ioctl) {
2819         case KVM_CREATE_VCPU:
2820                 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
2821                 if (r < 0)
2822                         goto out;
2823                 break;
2824         case KVM_SET_MEMORY_REGION: {
2825                 struct kvm_memory_region kvm_mem;
2826
2827                 r = -EFAULT;
2828                 if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
2829                         goto out;
2830                 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_mem);
2831                 if (r)
2832                         goto out;
2833                 break;
2834         }
2835         case KVM_GET_DIRTY_LOG: {
2836                 struct kvm_dirty_log log;
2837
2838                 r = -EFAULT;
2839                 if (copy_from_user(&log, argp, sizeof log))
2840                         goto out;
2841                 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
2842                 if (r)
2843                         goto out;
2844                 break;
2845         }
2846         case KVM_SET_MEMORY_ALIAS: {
2847                 struct kvm_memory_alias alias;
2848
2849                 r = -EFAULT;
2850                 if (copy_from_user(&alias, argp, sizeof alias))
2851                         goto out;
2852                 r = kvm_vm_ioctl_set_memory_alias(kvm, &alias);
2853                 if (r)
2854                         goto out;
2855                 break;
2856         }
2857         case KVM_CREATE_IRQCHIP:
2858                 r = -ENOMEM;
2859                 kvm->vpic = kvm_create_pic(kvm);
2860                 if (kvm->vpic) {
2861                         r = kvm_ioapic_init(kvm);
2862                         if (r) {
2863                                 kfree(kvm->vpic);
2864                                 kvm->vpic = NULL;
2865                                 goto out;
2866                         }
2867                 }
2868                 else
2869                         goto out;
2870                 break;
2871         case KVM_IRQ_LINE: {
2872                 struct kvm_irq_level irq_event;
2873
2874                 r = -EFAULT;
2875                 if (copy_from_user(&irq_event, argp, sizeof irq_event))
2876                         goto out;
2877                 if (irqchip_in_kernel(kvm)) {
2878                         mutex_lock(&kvm->lock);
2879                         if (irq_event.irq < 16)
2880                                 kvm_pic_set_irq(pic_irqchip(kvm),
2881                                         irq_event.irq,
2882                                         irq_event.level);
2883                         kvm_ioapic_set_irq(kvm->vioapic,
2884                                         irq_event.irq,
2885                                         irq_event.level);
2886                         mutex_unlock(&kvm->lock);
2887                         r = 0;
2888                 }
2889                 break;
2890         }
2891         case KVM_GET_IRQCHIP: {
2892                 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
2893                 struct kvm_irqchip chip;
2894
2895                 r = -EFAULT;
2896                 if (copy_from_user(&chip, argp, sizeof chip))
2897                         goto out;
2898                 r = -ENXIO;
2899                 if (!irqchip_in_kernel(kvm))
2900                         goto out;
2901                 r = kvm_vm_ioctl_get_irqchip(kvm, &chip);
2902                 if (r)
2903                         goto out;
2904                 r = -EFAULT;
2905                 if (copy_to_user(argp, &chip, sizeof chip))
2906                         goto out;
2907                 r = 0;
2908                 break;
2909         }
2910         case KVM_SET_IRQCHIP: {
2911                 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
2912                 struct kvm_irqchip chip;
2913
2914                 r = -EFAULT;
2915                 if (copy_from_user(&chip, argp, sizeof chip))
2916                         goto out;
2917                 r = -ENXIO;
2918                 if (!irqchip_in_kernel(kvm))
2919                         goto out;
2920                 r = kvm_vm_ioctl_set_irqchip(kvm, &chip);
2921                 if (r)
2922                         goto out;
2923                 r = 0;
2924                 break;
2925         }
2926         default:
2927                 ;
2928         }
2929 out:
2930         return r;
2931 }
2932
2933 static struct page *kvm_vm_nopage(struct vm_area_struct *vma,
2934                                   unsigned long address,
2935                                   int *type)
2936 {
2937         struct kvm *kvm = vma->vm_file->private_data;
2938         unsigned long pgoff;
2939         struct page *page;
2940
2941         pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2942         page = gfn_to_page(kvm, pgoff);
2943         if (!page)
2944                 return NOPAGE_SIGBUS;
2945         get_page(page);
2946         if (type != NULL)
2947                 *type = VM_FAULT_MINOR;
2948
2949         return page;
2950 }
2951
2952 static struct vm_operations_struct kvm_vm_vm_ops = {
2953         .nopage = kvm_vm_nopage,
2954 };
2955
2956 static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
2957 {
2958         vma->vm_ops = &kvm_vm_vm_ops;
2959         return 0;
2960 }
2961
2962 static struct file_operations kvm_vm_fops = {
2963         .release        = kvm_vm_release,
2964         .unlocked_ioctl = kvm_vm_ioctl,
2965         .compat_ioctl   = kvm_vm_ioctl,
2966         .mmap           = kvm_vm_mmap,
2967 };
2968
2969 static int kvm_dev_ioctl_create_vm(void)
2970 {
2971         int fd, r;
2972         struct inode *inode;
2973         struct file *file;
2974         struct kvm *kvm;
2975
2976         kvm = kvm_create_vm();
2977         if (IS_ERR(kvm))
2978                 return PTR_ERR(kvm);
2979         r = anon_inode_getfd(&fd, &inode, &file, "kvm-vm", &kvm_vm_fops, kvm);
2980         if (r) {
2981                 kvm_destroy_vm(kvm);
2982                 return r;
2983         }
2984
2985         kvm->filp = file;
2986
2987         return fd;
2988 }
2989
2990 static long kvm_dev_ioctl(struct file *filp,
2991                           unsigned int ioctl, unsigned long arg)
2992 {
2993         void __user *argp = (void __user *)arg;
2994         long r = -EINVAL;
2995
2996         switch (ioctl) {
2997         case KVM_GET_API_VERSION:
2998                 r = -EINVAL;
2999                 if (arg)
3000                         goto out;
3001                 r = KVM_API_VERSION;
3002                 break;
3003         case KVM_CREATE_VM:
3004                 r = -EINVAL;
3005                 if (arg)
3006                         goto out;
3007                 r = kvm_dev_ioctl_create_vm();
3008                 break;
3009         case KVM_GET_MSR_INDEX_LIST: {
3010                 struct kvm_msr_list __user *user_msr_list = argp;
3011                 struct kvm_msr_list msr_list;
3012                 unsigned n;
3013
3014                 r = -EFAULT;
3015                 if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
3016                         goto out;
3017                 n = msr_list.nmsrs;
3018                 msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
3019                 if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
3020                         goto out;
3021                 r = -E2BIG;
3022                 if (n < num_msrs_to_save)
3023                         goto out;
3024                 r = -EFAULT;
3025                 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
3026                                  num_msrs_to_save * sizeof(u32)))
3027                         goto out;
3028                 if (copy_to_user(user_msr_list->indices
3029                                  + num_msrs_to_save * sizeof(u32),
3030                                  &emulated_msrs,
3031                                  ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
3032                         goto out;
3033                 r = 0;
3034                 break;
3035         }
3036         case KVM_CHECK_EXTENSION: {
3037                 int ext = (long)argp;
3038
3039                 switch (ext) {
3040                 case KVM_CAP_IRQCHIP:
3041                 case KVM_CAP_HLT:
3042                         r = 1;
3043                         break;
3044                 default:
3045                         r = 0;
3046                         break;
3047                 }
3048                 break;
3049         }
3050         case KVM_GET_VCPU_MMAP_SIZE:
3051                 r = -EINVAL;
3052                 if (arg)
3053                         goto out;
3054                 r = 2 * PAGE_SIZE;
3055                 break;
3056         default:
3057                 ;
3058         }
3059 out:
3060         return r;
3061 }
3062
3063 static struct file_operations kvm_chardev_ops = {
3064         .unlocked_ioctl = kvm_dev_ioctl,
3065         .compat_ioctl   = kvm_dev_ioctl,
3066 };
3067
3068 static struct miscdevice kvm_dev = {
3069         KVM_MINOR,
3070         "kvm",
3071         &kvm_chardev_ops,
3072 };
3073
3074 /*
3075  * Make sure that a cpu that is being hot-unplugged does not have any vcpus
3076  * cached on it.
3077  */
3078 static void decache_vcpus_on_cpu(int cpu)
3079 {
3080         struct kvm *vm;
3081         struct kvm_vcpu *vcpu;
3082         int i;
3083
3084         spin_lock(&kvm_lock);
3085         list_for_each_entry(vm, &vm_list, vm_list)
3086                 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
3087                         vcpu = vm->vcpus[i];
3088                         if (!vcpu)
3089                                 continue;
3090                         /*
3091                          * If the vcpu is locked, then it is running on some
3092                          * other cpu and therefore it is not cached on the
3093                          * cpu in question.
3094                          *
3095                          * If it's not locked, check the last cpu it executed
3096                          * on.
3097                          */
3098                         if (mutex_trylock(&vcpu->mutex)) {
3099                                 if (vcpu->cpu == cpu) {
3100                                         kvm_arch_ops->vcpu_decache(vcpu);
3101                                         vcpu->cpu = -1;
3102                                 }
3103                                 mutex_unlock(&vcpu->mutex);
3104                         }
3105                 }
3106         spin_unlock(&kvm_lock);
3107 }
3108
3109 static void hardware_enable(void *junk)
3110 {
3111         int cpu = raw_smp_processor_id();
3112
3113         if (cpu_isset(cpu, cpus_hardware_enabled))
3114                 return;
3115         cpu_set(cpu, cpus_hardware_enabled);
3116         kvm_arch_ops->hardware_enable(NULL);
3117 }
3118
3119 static void hardware_disable(void *junk)
3120 {
3121         int cpu = raw_smp_processor_id();
3122
3123         if (!cpu_isset(cpu, cpus_hardware_enabled))
3124                 return;
3125         cpu_clear(cpu, cpus_hardware_enabled);
3126         decache_vcpus_on_cpu(cpu);
3127         kvm_arch_ops->hardware_disable(NULL);
3128 }
3129
3130 static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
3131                            void *v)
3132 {
3133         int cpu = (long)v;
3134
3135         switch (val) {
3136         case CPU_DYING:
3137         case CPU_DYING_FROZEN:
3138                 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
3139                        cpu);
3140                 hardware_disable(NULL);
3141                 break;
3142         case CPU_UP_CANCELED:
3143         case CPU_UP_CANCELED_FROZEN:
3144                 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
3145                        cpu);
3146                 smp_call_function_single(cpu, hardware_disable, NULL, 0, 1);
3147                 break;
3148         case CPU_ONLINE:
3149         case CPU_ONLINE_FROZEN:
3150                 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
3151                        cpu);
3152                 smp_call_function_single(cpu, hardware_enable, NULL, 0, 1);
3153                 break;
3154         }
3155         return NOTIFY_OK;
3156 }
3157
3158 static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
3159                        void *v)
3160 {
3161         if (val == SYS_RESTART) {
3162                 /*
3163                  * Some (well, at least mine) BIOSes hang on reboot if
3164                  * in vmx root mode.
3165                  */
3166                 printk(KERN_INFO "kvm: exiting hardware virtualization\n");
3167                 on_each_cpu(hardware_disable, NULL, 0, 1);
3168         }
3169         return NOTIFY_OK;
3170 }
3171
3172 static struct notifier_block kvm_reboot_notifier = {
3173         .notifier_call = kvm_reboot,
3174         .priority = 0,
3175 };
3176
3177 void kvm_io_bus_init(struct kvm_io_bus *bus)
3178 {
3179         memset(bus, 0, sizeof(*bus));
3180 }
3181
3182 void kvm_io_bus_destroy(struct kvm_io_bus *bus)
3183 {
3184         int i;
3185
3186         for (i = 0; i < bus->dev_count; i++) {
3187                 struct kvm_io_device *pos = bus->devs[i];
3188
3189                 kvm_iodevice_destructor(pos);
3190         }
3191 }
3192
3193 struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr)
3194 {
3195         int i;
3196
3197         for (i = 0; i < bus->dev_count; i++) {
3198                 struct kvm_io_device *pos = bus->devs[i];
3199
3200                 if (pos->in_range(pos, addr))
3201                         return pos;
3202         }
3203
3204         return NULL;
3205 }
3206
3207 void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev)
3208 {
3209         BUG_ON(bus->dev_count > (NR_IOBUS_DEVS-1));
3210
3211         bus->devs[bus->dev_count++] = dev;
3212 }
3213
3214 static struct notifier_block kvm_cpu_notifier = {
3215         .notifier_call = kvm_cpu_hotplug,
3216         .priority = 20, /* must be > scheduler priority */
3217 };
3218
3219 static u64 stat_get(void *_offset)
3220 {
3221         unsigned offset = (long)_offset;
3222         u64 total = 0;
3223         struct kvm *kvm;
3224         struct kvm_vcpu *vcpu;
3225         int i;
3226
3227         spin_lock(&kvm_lock);
3228         list_for_each_entry(kvm, &vm_list, vm_list)
3229                 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
3230                         vcpu = kvm->vcpus[i];
3231                         if (vcpu)
3232                                 total += *(u32 *)((void *)vcpu + offset);
3233                 }
3234         spin_unlock(&kvm_lock);
3235         return total;
3236 }
3237
3238 DEFINE_SIMPLE_ATTRIBUTE(stat_fops, stat_get, NULL, "%llu\n");
3239
3240 static __init void kvm_init_debug(void)
3241 {
3242         struct kvm_stats_debugfs_item *p;
3243
3244         debugfs_dir = debugfs_create_dir("kvm", NULL);
3245         for (p = debugfs_entries; p->name; ++p)
3246                 p->dentry = debugfs_create_file(p->name, 0444, debugfs_dir,
3247                                                 (void *)(long)p->offset,
3248                                                 &stat_fops);
3249 }
3250
3251 static void kvm_exit_debug(void)
3252 {
3253         struct kvm_stats_debugfs_item *p;
3254
3255         for (p = debugfs_entries; p->name; ++p)
3256                 debugfs_remove(p->dentry);
3257         debugfs_remove(debugfs_dir);
3258 }
3259
3260 static int kvm_suspend(struct sys_device *dev, pm_message_t state)
3261 {
3262         hardware_disable(NULL);
3263         return 0;
3264 }
3265
3266 static int kvm_resume(struct sys_device *dev)
3267 {
3268         hardware_enable(NULL);
3269         return 0;
3270 }
3271
3272 static struct sysdev_class kvm_sysdev_class = {
3273         set_kset_name("kvm"),
3274         .suspend = kvm_suspend,
3275         .resume = kvm_resume,
3276 };
3277
3278 static struct sys_device kvm_sysdev = {
3279         .id = 0,
3280         .cls = &kvm_sysdev_class,
3281 };
3282
3283 hpa_t bad_page_address;
3284
3285 static inline
3286 struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
3287 {
3288         return container_of(pn, struct kvm_vcpu, preempt_notifier);
3289 }
3290
3291 static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
3292 {
3293         struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
3294
3295         kvm_arch_ops->vcpu_load(vcpu, cpu);
3296 }
3297
3298 static void kvm_sched_out(struct preempt_notifier *pn,
3299                           struct task_struct *next)
3300 {
3301         struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
3302
3303         kvm_arch_ops->vcpu_put(vcpu);
3304 }
3305
3306 int kvm_init_arch(struct kvm_arch_ops *ops, unsigned int vcpu_size,
3307                   struct module *module)
3308 {
3309         int r;
3310         int cpu;
3311
3312         if (kvm_arch_ops) {
3313                 printk(KERN_ERR "kvm: already loaded the other module\n");
3314                 return -EEXIST;
3315         }
3316
3317         if (!ops->cpu_has_kvm_support()) {
3318                 printk(KERN_ERR "kvm: no hardware support\n");
3319                 return -EOPNOTSUPP;
3320         }
3321         if (ops->disabled_by_bios()) {
3322                 printk(KERN_ERR "kvm: disabled by bios\n");
3323                 return -EOPNOTSUPP;
3324         }
3325
3326         kvm_arch_ops = ops;
3327
3328         r = kvm_arch_ops->hardware_setup();
3329         if (r < 0)
3330                 goto out;
3331
3332         for_each_online_cpu(cpu) {
3333                 smp_call_function_single(cpu,
3334                                 kvm_arch_ops->check_processor_compatibility,
3335                                 &r, 0, 1);
3336                 if (r < 0)
3337                         goto out_free_0;
3338         }
3339
3340         on_each_cpu(hardware_enable, NULL, 0, 1);
3341         r = register_cpu_notifier(&kvm_cpu_notifier);
3342         if (r)
3343                 goto out_free_1;
3344         register_reboot_notifier(&kvm_reboot_notifier);
3345
3346         r = sysdev_class_register(&kvm_sysdev_class);
3347         if (r)
3348                 goto out_free_2;
3349
3350         r = sysdev_register(&kvm_sysdev);
3351         if (r)
3352                 goto out_free_3;
3353
3354         /* A kmem cache lets us meet the alignment requirements of fx_save. */
3355         kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
3356                                            __alignof__(struct kvm_vcpu), 0, 0);
3357         if (!kvm_vcpu_cache) {
3358                 r = -ENOMEM;
3359                 goto out_free_4;
3360         }
3361
3362         kvm_chardev_ops.owner = module;
3363
3364         r = misc_register(&kvm_dev);
3365         if (r) {
3366                 printk (KERN_ERR "kvm: misc device register failed\n");
3367                 goto out_free;
3368         }
3369
3370         kvm_preempt_ops.sched_in = kvm_sched_in;
3371         kvm_preempt_ops.sched_out = kvm_sched_out;
3372
3373         return r;
3374
3375 out_free:
3376         kmem_cache_destroy(kvm_vcpu_cache);
3377 out_free_4:
3378         sysdev_unregister(&kvm_sysdev);
3379 out_free_3:
3380         sysdev_class_unregister(&kvm_sysdev_class);
3381 out_free_2:
3382         unregister_reboot_notifier(&kvm_reboot_notifier);
3383         unregister_cpu_notifier(&kvm_cpu_notifier);
3384 out_free_1:
3385         on_each_cpu(hardware_disable, NULL, 0, 1);
3386 out_free_0:
3387         kvm_arch_ops->hardware_unsetup();
3388 out:
3389         kvm_arch_ops = NULL;
3390         return r;
3391 }
3392
3393 void kvm_exit_arch(void)
3394 {
3395         misc_deregister(&kvm_dev);
3396         kmem_cache_destroy(kvm_vcpu_cache);
3397         sysdev_unregister(&kvm_sysdev);
3398         sysdev_class_unregister(&kvm_sysdev_class);
3399         unregister_reboot_notifier(&kvm_reboot_notifier);
3400         unregister_cpu_notifier(&kvm_cpu_notifier);
3401         on_each_cpu(hardware_disable, NULL, 0, 1);
3402         kvm_arch_ops->hardware_unsetup();
3403         kvm_arch_ops = NULL;
3404 }
3405
3406 static __init int kvm_init(void)
3407 {
3408         static struct page *bad_page;
3409         int r;
3410
3411         r = kvm_mmu_module_init();
3412         if (r)
3413                 goto out4;
3414
3415         kvm_init_debug();
3416
3417         kvm_init_msr_list();
3418
3419         if ((bad_page = alloc_page(GFP_KERNEL)) == NULL) {
3420                 r = -ENOMEM;
3421                 goto out;
3422         }
3423
3424         bad_page_address = page_to_pfn(bad_page) << PAGE_SHIFT;
3425         memset(__va(bad_page_address), 0, PAGE_SIZE);
3426
3427         return 0;
3428
3429 out:
3430         kvm_exit_debug();
3431         kvm_mmu_module_exit();
3432 out4:
3433         return r;
3434 }
3435
3436 static __exit void kvm_exit(void)
3437 {
3438         kvm_exit_debug();
3439         __free_page(pfn_to_page(bad_page_address >> PAGE_SHIFT));
3440         kvm_mmu_module_exit();
3441 }
3442
3443 module_init(kvm_init)
3444 module_exit(kvm_exit)
3445
3446 EXPORT_SYMBOL_GPL(kvm_init_arch);
3447 EXPORT_SYMBOL_GPL(kvm_exit_arch);