[PATCH] KVM: Do not export unsupported msrs to userspace
[linux-2.6] / drivers / kvm / kvm_main.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  *
9  * Authors:
10  *   Avi Kivity   <avi@qumranet.com>
11  *   Yaniv Kamay  <yaniv@qumranet.com>
12  *
13  * This work is licensed under the terms of the GNU GPL, version 2.  See
14  * the COPYING file in the top-level directory.
15  *
16  */
17
18 #include "kvm.h"
19
20 #include <linux/kvm.h>
21 #include <linux/module.h>
22 #include <linux/errno.h>
23 #include <asm/processor.h>
24 #include <linux/percpu.h>
25 #include <linux/gfp.h>
26 #include <asm/msr.h>
27 #include <linux/mm.h>
28 #include <linux/miscdevice.h>
29 #include <linux/vmalloc.h>
30 #include <asm/uaccess.h>
31 #include <linux/reboot.h>
32 #include <asm/io.h>
33 #include <linux/debugfs.h>
34 #include <linux/highmem.h>
35 #include <linux/file.h>
36 #include <asm/desc.h>
37
38 #include "x86_emulate.h"
39 #include "segment_descriptor.h"
40
41 MODULE_AUTHOR("Qumranet");
42 MODULE_LICENSE("GPL");
43
44 struct kvm_arch_ops *kvm_arch_ops;
45 struct kvm_stat kvm_stat;
46 EXPORT_SYMBOL_GPL(kvm_stat);
47
48 static struct kvm_stats_debugfs_item {
49         const char *name;
50         u32 *data;
51         struct dentry *dentry;
52 } debugfs_entries[] = {
53         { "pf_fixed", &kvm_stat.pf_fixed },
54         { "pf_guest", &kvm_stat.pf_guest },
55         { "tlb_flush", &kvm_stat.tlb_flush },
56         { "invlpg", &kvm_stat.invlpg },
57         { "exits", &kvm_stat.exits },
58         { "io_exits", &kvm_stat.io_exits },
59         { "mmio_exits", &kvm_stat.mmio_exits },
60         { "signal_exits", &kvm_stat.signal_exits },
61         { "irq_exits", &kvm_stat.irq_exits },
62         { 0, 0 }
63 };
64
65 static struct dentry *debugfs_dir;
66
67 #define MAX_IO_MSRS 256
68
69 #define CR0_RESEVED_BITS 0xffffffff1ffaffc0ULL
70 #define LMSW_GUEST_MASK 0x0eULL
71 #define CR4_RESEVED_BITS (~((1ULL << 11) - 1))
72 #define CR8_RESEVED_BITS (~0x0fULL)
73 #define EFER_RESERVED_BITS 0xfffffffffffff2fe
74
75 #ifdef CONFIG_X86_64
76 // LDT or TSS descriptor in the GDT. 16 bytes.
77 struct segment_descriptor_64 {
78         struct segment_descriptor s;
79         u32 base_higher;
80         u32 pad_zero;
81 };
82
83 #endif
84
85 unsigned long segment_base(u16 selector)
86 {
87         struct descriptor_table gdt;
88         struct segment_descriptor *d;
89         unsigned long table_base;
90         typedef unsigned long ul;
91         unsigned long v;
92
93         if (selector == 0)
94                 return 0;
95
96         asm ("sgdt %0" : "=m"(gdt));
97         table_base = gdt.base;
98
99         if (selector & 4) {           /* from ldt */
100                 u16 ldt_selector;
101
102                 asm ("sldt %0" : "=g"(ldt_selector));
103                 table_base = segment_base(ldt_selector);
104         }
105         d = (struct segment_descriptor *)(table_base + (selector & ~7));
106         v = d->base_low | ((ul)d->base_mid << 16) | ((ul)d->base_high << 24);
107 #ifdef CONFIG_X86_64
108         if (d->system == 0
109             && (d->type == 2 || d->type == 9 || d->type == 11))
110                 v |= ((ul)((struct segment_descriptor_64 *)d)->base_higher) << 32;
111 #endif
112         return v;
113 }
114 EXPORT_SYMBOL_GPL(segment_base);
115
116 static inline int valid_vcpu(int n)
117 {
118         return likely(n >= 0 && n < KVM_MAX_VCPUS);
119 }
120
121 int kvm_read_guest(struct kvm_vcpu *vcpu,
122                              gva_t addr,
123                              unsigned long size,
124                              void *dest)
125 {
126         unsigned char *host_buf = dest;
127         unsigned long req_size = size;
128
129         while (size) {
130                 hpa_t paddr;
131                 unsigned now;
132                 unsigned offset;
133                 hva_t guest_buf;
134
135                 paddr = gva_to_hpa(vcpu, addr);
136
137                 if (is_error_hpa(paddr))
138                         break;
139
140                 guest_buf = (hva_t)kmap_atomic(
141                                         pfn_to_page(paddr >> PAGE_SHIFT),
142                                         KM_USER0);
143                 offset = addr & ~PAGE_MASK;
144                 guest_buf |= offset;
145                 now = min(size, PAGE_SIZE - offset);
146                 memcpy(host_buf, (void*)guest_buf, now);
147                 host_buf += now;
148                 addr += now;
149                 size -= now;
150                 kunmap_atomic((void *)(guest_buf & PAGE_MASK), KM_USER0);
151         }
152         return req_size - size;
153 }
154 EXPORT_SYMBOL_GPL(kvm_read_guest);
155
156 int kvm_write_guest(struct kvm_vcpu *vcpu,
157                              gva_t addr,
158                              unsigned long size,
159                              void *data)
160 {
161         unsigned char *host_buf = data;
162         unsigned long req_size = size;
163
164         while (size) {
165                 hpa_t paddr;
166                 unsigned now;
167                 unsigned offset;
168                 hva_t guest_buf;
169
170                 paddr = gva_to_hpa(vcpu, addr);
171
172                 if (is_error_hpa(paddr))
173                         break;
174
175                 guest_buf = (hva_t)kmap_atomic(
176                                 pfn_to_page(paddr >> PAGE_SHIFT), KM_USER0);
177                 offset = addr & ~PAGE_MASK;
178                 guest_buf |= offset;
179                 now = min(size, PAGE_SIZE - offset);
180                 memcpy((void*)guest_buf, host_buf, now);
181                 host_buf += now;
182                 addr += now;
183                 size -= now;
184                 kunmap_atomic((void *)(guest_buf & PAGE_MASK), KM_USER0);
185         }
186         return req_size - size;
187 }
188 EXPORT_SYMBOL_GPL(kvm_write_guest);
189
190 static int vcpu_slot(struct kvm_vcpu *vcpu)
191 {
192         return vcpu - vcpu->kvm->vcpus;
193 }
194
195 /*
196  * Switches to specified vcpu, until a matching vcpu_put()
197  */
198 static struct kvm_vcpu *vcpu_load(struct kvm *kvm, int vcpu_slot)
199 {
200         struct kvm_vcpu *vcpu = &kvm->vcpus[vcpu_slot];
201
202         mutex_lock(&vcpu->mutex);
203         if (unlikely(!vcpu->vmcs)) {
204                 mutex_unlock(&vcpu->mutex);
205                 return 0;
206         }
207         return kvm_arch_ops->vcpu_load(vcpu);
208 }
209
210 static void vcpu_put(struct kvm_vcpu *vcpu)
211 {
212         kvm_arch_ops->vcpu_put(vcpu);
213         mutex_unlock(&vcpu->mutex);
214 }
215
216 static int kvm_dev_open(struct inode *inode, struct file *filp)
217 {
218         struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
219         int i;
220
221         if (!kvm)
222                 return -ENOMEM;
223
224         spin_lock_init(&kvm->lock);
225         INIT_LIST_HEAD(&kvm->active_mmu_pages);
226         for (i = 0; i < KVM_MAX_VCPUS; ++i) {
227                 struct kvm_vcpu *vcpu = &kvm->vcpus[i];
228
229                 mutex_init(&vcpu->mutex);
230                 vcpu->mmu.root_hpa = INVALID_PAGE;
231                 INIT_LIST_HEAD(&vcpu->free_pages);
232         }
233         filp->private_data = kvm;
234         return 0;
235 }
236
237 /*
238  * Free any memory in @free but not in @dont.
239  */
240 static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
241                                   struct kvm_memory_slot *dont)
242 {
243         int i;
244
245         if (!dont || free->phys_mem != dont->phys_mem)
246                 if (free->phys_mem) {
247                         for (i = 0; i < free->npages; ++i)
248                                 __free_page(free->phys_mem[i]);
249                         vfree(free->phys_mem);
250                 }
251
252         if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
253                 vfree(free->dirty_bitmap);
254
255         free->phys_mem = 0;
256         free->npages = 0;
257         free->dirty_bitmap = 0;
258 }
259
260 static void kvm_free_physmem(struct kvm *kvm)
261 {
262         int i;
263
264         for (i = 0; i < kvm->nmemslots; ++i)
265                 kvm_free_physmem_slot(&kvm->memslots[i], 0);
266 }
267
268 static void kvm_free_vcpu(struct kvm_vcpu *vcpu)
269 {
270         kvm_arch_ops->vcpu_free(vcpu);
271         kvm_mmu_destroy(vcpu);
272 }
273
274 static void kvm_free_vcpus(struct kvm *kvm)
275 {
276         unsigned int i;
277
278         for (i = 0; i < KVM_MAX_VCPUS; ++i)
279                 kvm_free_vcpu(&kvm->vcpus[i]);
280 }
281
282 static int kvm_dev_release(struct inode *inode, struct file *filp)
283 {
284         struct kvm *kvm = filp->private_data;
285
286         kvm_free_vcpus(kvm);
287         kvm_free_physmem(kvm);
288         kfree(kvm);
289         return 0;
290 }
291
292 static void inject_gp(struct kvm_vcpu *vcpu)
293 {
294         kvm_arch_ops->inject_gp(vcpu, 0);
295 }
296
297 static int pdptrs_have_reserved_bits_set(struct kvm_vcpu *vcpu,
298                                          unsigned long cr3)
299 {
300         gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
301         unsigned offset = (cr3 & (PAGE_SIZE-1)) >> 5;
302         int i;
303         u64 pdpte;
304         u64 *pdpt;
305         struct kvm_memory_slot *memslot;
306
307         spin_lock(&vcpu->kvm->lock);
308         memslot = gfn_to_memslot(vcpu->kvm, pdpt_gfn);
309         /* FIXME: !memslot - emulate? 0xff? */
310         pdpt = kmap_atomic(gfn_to_page(memslot, pdpt_gfn), KM_USER0);
311
312         for (i = 0; i < 4; ++i) {
313                 pdpte = pdpt[offset + i];
314                 if ((pdpte & 1) && (pdpte & 0xfffffff0000001e6ull))
315                         break;
316         }
317
318         kunmap_atomic(pdpt, KM_USER0);
319         spin_unlock(&vcpu->kvm->lock);
320
321         return i != 4;
322 }
323
324 void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
325 {
326         if (cr0 & CR0_RESEVED_BITS) {
327                 printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
328                        cr0, vcpu->cr0);
329                 inject_gp(vcpu);
330                 return;
331         }
332
333         if ((cr0 & CR0_NW_MASK) && !(cr0 & CR0_CD_MASK)) {
334                 printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
335                 inject_gp(vcpu);
336                 return;
337         }
338
339         if ((cr0 & CR0_PG_MASK) && !(cr0 & CR0_PE_MASK)) {
340                 printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
341                        "and a clear PE flag\n");
342                 inject_gp(vcpu);
343                 return;
344         }
345
346         if (!is_paging(vcpu) && (cr0 & CR0_PG_MASK)) {
347 #ifdef CONFIG_X86_64
348                 if ((vcpu->shadow_efer & EFER_LME)) {
349                         int cs_db, cs_l;
350
351                         if (!is_pae(vcpu)) {
352                                 printk(KERN_DEBUG "set_cr0: #GP, start paging "
353                                        "in long mode while PAE is disabled\n");
354                                 inject_gp(vcpu);
355                                 return;
356                         }
357                         kvm_arch_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
358                         if (cs_l) {
359                                 printk(KERN_DEBUG "set_cr0: #GP, start paging "
360                                        "in long mode while CS.L == 1\n");
361                                 inject_gp(vcpu);
362                                 return;
363
364                         }
365                 } else
366 #endif
367                 if (is_pae(vcpu) &&
368                             pdptrs_have_reserved_bits_set(vcpu, vcpu->cr3)) {
369                         printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
370                                "reserved bits\n");
371                         inject_gp(vcpu);
372                         return;
373                 }
374
375         }
376
377         kvm_arch_ops->set_cr0(vcpu, cr0);
378         vcpu->cr0 = cr0;
379
380         spin_lock(&vcpu->kvm->lock);
381         kvm_mmu_reset_context(vcpu);
382         spin_unlock(&vcpu->kvm->lock);
383         return;
384 }
385 EXPORT_SYMBOL_GPL(set_cr0);
386
387 void lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
388 {
389         set_cr0(vcpu, (vcpu->cr0 & ~0x0ful) | (msw & 0x0f));
390 }
391 EXPORT_SYMBOL_GPL(lmsw);
392
393 void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
394 {
395         if (cr4 & CR4_RESEVED_BITS) {
396                 printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
397                 inject_gp(vcpu);
398                 return;
399         }
400
401         if (kvm_arch_ops->is_long_mode(vcpu)) {
402                 if (!(cr4 & CR4_PAE_MASK)) {
403                         printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
404                                "in long mode\n");
405                         inject_gp(vcpu);
406                         return;
407                 }
408         } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & CR4_PAE_MASK)
409                    && pdptrs_have_reserved_bits_set(vcpu, vcpu->cr3)) {
410                 printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
411                 inject_gp(vcpu);
412         }
413
414         if (cr4 & CR4_VMXE_MASK) {
415                 printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
416                 inject_gp(vcpu);
417                 return;
418         }
419         kvm_arch_ops->set_cr4(vcpu, cr4);
420         spin_lock(&vcpu->kvm->lock);
421         kvm_mmu_reset_context(vcpu);
422         spin_unlock(&vcpu->kvm->lock);
423 }
424 EXPORT_SYMBOL_GPL(set_cr4);
425
426 void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
427 {
428         if (kvm_arch_ops->is_long_mode(vcpu)) {
429                 if ( cr3 & CR3_L_MODE_RESEVED_BITS) {
430                         printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
431                         inject_gp(vcpu);
432                         return;
433                 }
434         } else {
435                 if (cr3 & CR3_RESEVED_BITS) {
436                         printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
437                         inject_gp(vcpu);
438                         return;
439                 }
440                 if (is_paging(vcpu) && is_pae(vcpu) &&
441                     pdptrs_have_reserved_bits_set(vcpu, cr3)) {
442                         printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
443                                "reserved bits\n");
444                         inject_gp(vcpu);
445                         return;
446                 }
447         }
448
449         vcpu->cr3 = cr3;
450         spin_lock(&vcpu->kvm->lock);
451         vcpu->mmu.new_cr3(vcpu);
452         spin_unlock(&vcpu->kvm->lock);
453 }
454 EXPORT_SYMBOL_GPL(set_cr3);
455
456 void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
457 {
458         if ( cr8 & CR8_RESEVED_BITS) {
459                 printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
460                 inject_gp(vcpu);
461                 return;
462         }
463         vcpu->cr8 = cr8;
464 }
465 EXPORT_SYMBOL_GPL(set_cr8);
466
467 void fx_init(struct kvm_vcpu *vcpu)
468 {
469         struct __attribute__ ((__packed__)) fx_image_s {
470                 u16 control; //fcw
471                 u16 status; //fsw
472                 u16 tag; // ftw
473                 u16 opcode; //fop
474                 u64 ip; // fpu ip
475                 u64 operand;// fpu dp
476                 u32 mxcsr;
477                 u32 mxcsr_mask;
478
479         } *fx_image;
480
481         fx_save(vcpu->host_fx_image);
482         fpu_init();
483         fx_save(vcpu->guest_fx_image);
484         fx_restore(vcpu->host_fx_image);
485
486         fx_image = (struct fx_image_s *)vcpu->guest_fx_image;
487         fx_image->mxcsr = 0x1f80;
488         memset(vcpu->guest_fx_image + sizeof(struct fx_image_s),
489                0, FX_IMAGE_SIZE - sizeof(struct fx_image_s));
490 }
491 EXPORT_SYMBOL_GPL(fx_init);
492
493 /*
494  * Creates some virtual cpus.  Good luck creating more than one.
495  */
496 static int kvm_dev_ioctl_create_vcpu(struct kvm *kvm, int n)
497 {
498         int r;
499         struct kvm_vcpu *vcpu;
500
501         r = -EINVAL;
502         if (!valid_vcpu(n))
503                 goto out;
504
505         vcpu = &kvm->vcpus[n];
506
507         mutex_lock(&vcpu->mutex);
508
509         if (vcpu->vmcs) {
510                 mutex_unlock(&vcpu->mutex);
511                 return -EEXIST;
512         }
513
514         vcpu->host_fx_image = (char*)ALIGN((hva_t)vcpu->fx_buf,
515                                            FX_IMAGE_ALIGN);
516         vcpu->guest_fx_image = vcpu->host_fx_image + FX_IMAGE_SIZE;
517
518         vcpu->cpu = -1;  /* First load will set up TR */
519         vcpu->kvm = kvm;
520         r = kvm_arch_ops->vcpu_create(vcpu);
521         if (r < 0)
522                 goto out_free_vcpus;
523
524         kvm_arch_ops->vcpu_load(vcpu);
525
526         r = kvm_arch_ops->vcpu_setup(vcpu);
527         if (r >= 0)
528                 r = kvm_mmu_init(vcpu);
529
530         vcpu_put(vcpu);
531
532         if (r < 0)
533                 goto out_free_vcpus;
534
535         return 0;
536
537 out_free_vcpus:
538         kvm_free_vcpu(vcpu);
539         mutex_unlock(&vcpu->mutex);
540 out:
541         return r;
542 }
543
544 /*
545  * Allocate some memory and give it an address in the guest physical address
546  * space.
547  *
548  * Discontiguous memory is allowed, mostly for framebuffers.
549  */
550 static int kvm_dev_ioctl_set_memory_region(struct kvm *kvm,
551                                            struct kvm_memory_region *mem)
552 {
553         int r;
554         gfn_t base_gfn;
555         unsigned long npages;
556         unsigned long i;
557         struct kvm_memory_slot *memslot;
558         struct kvm_memory_slot old, new;
559         int memory_config_version;
560
561         r = -EINVAL;
562         /* General sanity checks */
563         if (mem->memory_size & (PAGE_SIZE - 1))
564                 goto out;
565         if (mem->guest_phys_addr & (PAGE_SIZE - 1))
566                 goto out;
567         if (mem->slot >= KVM_MEMORY_SLOTS)
568                 goto out;
569         if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
570                 goto out;
571
572         memslot = &kvm->memslots[mem->slot];
573         base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
574         npages = mem->memory_size >> PAGE_SHIFT;
575
576         if (!npages)
577                 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
578
579 raced:
580         spin_lock(&kvm->lock);
581
582         memory_config_version = kvm->memory_config_version;
583         new = old = *memslot;
584
585         new.base_gfn = base_gfn;
586         new.npages = npages;
587         new.flags = mem->flags;
588
589         /* Disallow changing a memory slot's size. */
590         r = -EINVAL;
591         if (npages && old.npages && npages != old.npages)
592                 goto out_unlock;
593
594         /* Check for overlaps */
595         r = -EEXIST;
596         for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
597                 struct kvm_memory_slot *s = &kvm->memslots[i];
598
599                 if (s == memslot)
600                         continue;
601                 if (!((base_gfn + npages <= s->base_gfn) ||
602                       (base_gfn >= s->base_gfn + s->npages)))
603                         goto out_unlock;
604         }
605         /*
606          * Do memory allocations outside lock.  memory_config_version will
607          * detect any races.
608          */
609         spin_unlock(&kvm->lock);
610
611         /* Deallocate if slot is being removed */
612         if (!npages)
613                 new.phys_mem = 0;
614
615         /* Free page dirty bitmap if unneeded */
616         if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
617                 new.dirty_bitmap = 0;
618
619         r = -ENOMEM;
620
621         /* Allocate if a slot is being created */
622         if (npages && !new.phys_mem) {
623                 new.phys_mem = vmalloc(npages * sizeof(struct page *));
624
625                 if (!new.phys_mem)
626                         goto out_free;
627
628                 memset(new.phys_mem, 0, npages * sizeof(struct page *));
629                 for (i = 0; i < npages; ++i) {
630                         new.phys_mem[i] = alloc_page(GFP_HIGHUSER
631                                                      | __GFP_ZERO);
632                         if (!new.phys_mem[i])
633                                 goto out_free;
634                 }
635         }
636
637         /* Allocate page dirty bitmap if needed */
638         if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
639                 unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
640
641                 new.dirty_bitmap = vmalloc(dirty_bytes);
642                 if (!new.dirty_bitmap)
643                         goto out_free;
644                 memset(new.dirty_bitmap, 0, dirty_bytes);
645         }
646
647         spin_lock(&kvm->lock);
648
649         if (memory_config_version != kvm->memory_config_version) {
650                 spin_unlock(&kvm->lock);
651                 kvm_free_physmem_slot(&new, &old);
652                 goto raced;
653         }
654
655         r = -EAGAIN;
656         if (kvm->busy)
657                 goto out_unlock;
658
659         if (mem->slot >= kvm->nmemslots)
660                 kvm->nmemslots = mem->slot + 1;
661
662         *memslot = new;
663         ++kvm->memory_config_version;
664
665         spin_unlock(&kvm->lock);
666
667         for (i = 0; i < KVM_MAX_VCPUS; ++i) {
668                 struct kvm_vcpu *vcpu;
669
670                 vcpu = vcpu_load(kvm, i);
671                 if (!vcpu)
672                         continue;
673                 kvm_mmu_reset_context(vcpu);
674                 vcpu_put(vcpu);
675         }
676
677         kvm_free_physmem_slot(&old, &new);
678         return 0;
679
680 out_unlock:
681         spin_unlock(&kvm->lock);
682 out_free:
683         kvm_free_physmem_slot(&new, &old);
684 out:
685         return r;
686 }
687
688 /*
689  * Get (and clear) the dirty memory log for a memory slot.
690  */
691 static int kvm_dev_ioctl_get_dirty_log(struct kvm *kvm,
692                                        struct kvm_dirty_log *log)
693 {
694         struct kvm_memory_slot *memslot;
695         int r, i;
696         int n;
697         unsigned long any = 0;
698
699         spin_lock(&kvm->lock);
700
701         /*
702          * Prevent changes to guest memory configuration even while the lock
703          * is not taken.
704          */
705         ++kvm->busy;
706         spin_unlock(&kvm->lock);
707         r = -EINVAL;
708         if (log->slot >= KVM_MEMORY_SLOTS)
709                 goto out;
710
711         memslot = &kvm->memslots[log->slot];
712         r = -ENOENT;
713         if (!memslot->dirty_bitmap)
714                 goto out;
715
716         n = ALIGN(memslot->npages, 8) / 8;
717
718         for (i = 0; !any && i < n; ++i)
719                 any = memslot->dirty_bitmap[i];
720
721         r = -EFAULT;
722         if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
723                 goto out;
724
725
726         if (any) {
727                 spin_lock(&kvm->lock);
728                 kvm_mmu_slot_remove_write_access(kvm, log->slot);
729                 spin_unlock(&kvm->lock);
730                 memset(memslot->dirty_bitmap, 0, n);
731                 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
732                         struct kvm_vcpu *vcpu = vcpu_load(kvm, i);
733
734                         if (!vcpu)
735                                 continue;
736                         kvm_arch_ops->tlb_flush(vcpu);
737                         vcpu_put(vcpu);
738                 }
739         }
740
741         r = 0;
742
743 out:
744         spin_lock(&kvm->lock);
745         --kvm->busy;
746         spin_unlock(&kvm->lock);
747         return r;
748 }
749
750 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
751 {
752         int i;
753
754         for (i = 0; i < kvm->nmemslots; ++i) {
755                 struct kvm_memory_slot *memslot = &kvm->memslots[i];
756
757                 if (gfn >= memslot->base_gfn
758                     && gfn < memslot->base_gfn + memslot->npages)
759                         return memslot;
760         }
761         return 0;
762 }
763 EXPORT_SYMBOL_GPL(gfn_to_memslot);
764
765 void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
766 {
767         int i;
768         struct kvm_memory_slot *memslot = 0;
769         unsigned long rel_gfn;
770
771         for (i = 0; i < kvm->nmemslots; ++i) {
772                 memslot = &kvm->memslots[i];
773
774                 if (gfn >= memslot->base_gfn
775                     && gfn < memslot->base_gfn + memslot->npages) {
776
777                         if (!memslot || !memslot->dirty_bitmap)
778                                 return;
779
780                         rel_gfn = gfn - memslot->base_gfn;
781
782                         /* avoid RMW */
783                         if (!test_bit(rel_gfn, memslot->dirty_bitmap))
784                                 set_bit(rel_gfn, memslot->dirty_bitmap);
785                         return;
786                 }
787         }
788 }
789
790 static int emulator_read_std(unsigned long addr,
791                              unsigned long *val,
792                              unsigned int bytes,
793                              struct x86_emulate_ctxt *ctxt)
794 {
795         struct kvm_vcpu *vcpu = ctxt->vcpu;
796         void *data = val;
797
798         while (bytes) {
799                 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
800                 unsigned offset = addr & (PAGE_SIZE-1);
801                 unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset);
802                 unsigned long pfn;
803                 struct kvm_memory_slot *memslot;
804                 void *page;
805
806                 if (gpa == UNMAPPED_GVA)
807                         return X86EMUL_PROPAGATE_FAULT;
808                 pfn = gpa >> PAGE_SHIFT;
809                 memslot = gfn_to_memslot(vcpu->kvm, pfn);
810                 if (!memslot)
811                         return X86EMUL_UNHANDLEABLE;
812                 page = kmap_atomic(gfn_to_page(memslot, pfn), KM_USER0);
813
814                 memcpy(data, page + offset, tocopy);
815
816                 kunmap_atomic(page, KM_USER0);
817
818                 bytes -= tocopy;
819                 data += tocopy;
820                 addr += tocopy;
821         }
822
823         return X86EMUL_CONTINUE;
824 }
825
826 static int emulator_write_std(unsigned long addr,
827                               unsigned long val,
828                               unsigned int bytes,
829                               struct x86_emulate_ctxt *ctxt)
830 {
831         printk(KERN_ERR "emulator_write_std: addr %lx n %d\n",
832                addr, bytes);
833         return X86EMUL_UNHANDLEABLE;
834 }
835
836 static int emulator_read_emulated(unsigned long addr,
837                                   unsigned long *val,
838                                   unsigned int bytes,
839                                   struct x86_emulate_ctxt *ctxt)
840 {
841         struct kvm_vcpu *vcpu = ctxt->vcpu;
842
843         if (vcpu->mmio_read_completed) {
844                 memcpy(val, vcpu->mmio_data, bytes);
845                 vcpu->mmio_read_completed = 0;
846                 return X86EMUL_CONTINUE;
847         } else if (emulator_read_std(addr, val, bytes, ctxt)
848                    == X86EMUL_CONTINUE)
849                 return X86EMUL_CONTINUE;
850         else {
851                 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
852                 if (gpa == UNMAPPED_GVA)
853                         return vcpu_printf(vcpu, "not present\n"), X86EMUL_PROPAGATE_FAULT;
854                 vcpu->mmio_needed = 1;
855                 vcpu->mmio_phys_addr = gpa;
856                 vcpu->mmio_size = bytes;
857                 vcpu->mmio_is_write = 0;
858
859                 return X86EMUL_UNHANDLEABLE;
860         }
861 }
862
863 static int emulator_write_emulated(unsigned long addr,
864                                    unsigned long val,
865                                    unsigned int bytes,
866                                    struct x86_emulate_ctxt *ctxt)
867 {
868         struct kvm_vcpu *vcpu = ctxt->vcpu;
869         gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
870
871         if (gpa == UNMAPPED_GVA)
872                 return X86EMUL_PROPAGATE_FAULT;
873
874         vcpu->mmio_needed = 1;
875         vcpu->mmio_phys_addr = gpa;
876         vcpu->mmio_size = bytes;
877         vcpu->mmio_is_write = 1;
878         memcpy(vcpu->mmio_data, &val, bytes);
879
880         return X86EMUL_CONTINUE;
881 }
882
883 static int emulator_cmpxchg_emulated(unsigned long addr,
884                                      unsigned long old,
885                                      unsigned long new,
886                                      unsigned int bytes,
887                                      struct x86_emulate_ctxt *ctxt)
888 {
889         static int reported;
890
891         if (!reported) {
892                 reported = 1;
893                 printk(KERN_WARNING "kvm: emulating exchange as write\n");
894         }
895         return emulator_write_emulated(addr, new, bytes, ctxt);
896 }
897
898 static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
899 {
900         return kvm_arch_ops->get_segment_base(vcpu, seg);
901 }
902
903 int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
904 {
905         spin_lock(&vcpu->kvm->lock);
906         vcpu->mmu.inval_page(vcpu, address);
907         spin_unlock(&vcpu->kvm->lock);
908         kvm_arch_ops->invlpg(vcpu, address);
909         return X86EMUL_CONTINUE;
910 }
911
912 int emulate_clts(struct kvm_vcpu *vcpu)
913 {
914         unsigned long cr0 = vcpu->cr0;
915
916         cr0 &= ~CR0_TS_MASK;
917         kvm_arch_ops->set_cr0(vcpu, cr0);
918         return X86EMUL_CONTINUE;
919 }
920
921 int emulator_get_dr(struct x86_emulate_ctxt* ctxt, int dr, unsigned long *dest)
922 {
923         struct kvm_vcpu *vcpu = ctxt->vcpu;
924
925         switch (dr) {
926         case 0 ... 3:
927                 *dest = kvm_arch_ops->get_dr(vcpu, dr);
928                 return X86EMUL_CONTINUE;
929         default:
930                 printk(KERN_DEBUG "%s: unexpected dr %u\n",
931                        __FUNCTION__, dr);
932                 return X86EMUL_UNHANDLEABLE;
933         }
934 }
935
936 int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
937 {
938         unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
939         int exception;
940
941         kvm_arch_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
942         if (exception) {
943                 /* FIXME: better handling */
944                 return X86EMUL_UNHANDLEABLE;
945         }
946         return X86EMUL_CONTINUE;
947 }
948
949 static void report_emulation_failure(struct x86_emulate_ctxt *ctxt)
950 {
951         static int reported;
952         u8 opcodes[4];
953         unsigned long rip = ctxt->vcpu->rip;
954         unsigned long rip_linear;
955
956         rip_linear = rip + get_segment_base(ctxt->vcpu, VCPU_SREG_CS);
957
958         if (reported)
959                 return;
960
961         emulator_read_std(rip_linear, (void *)opcodes, 4, ctxt);
962
963         printk(KERN_ERR "emulation failed but !mmio_needed?"
964                " rip %lx %02x %02x %02x %02x\n",
965                rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
966         reported = 1;
967 }
968
969 struct x86_emulate_ops emulate_ops = {
970         .read_std            = emulator_read_std,
971         .write_std           = emulator_write_std,
972         .read_emulated       = emulator_read_emulated,
973         .write_emulated      = emulator_write_emulated,
974         .cmpxchg_emulated    = emulator_cmpxchg_emulated,
975 };
976
977 int emulate_instruction(struct kvm_vcpu *vcpu,
978                         struct kvm_run *run,
979                         unsigned long cr2,
980                         u16 error_code)
981 {
982         struct x86_emulate_ctxt emulate_ctxt;
983         int r;
984         int cs_db, cs_l;
985
986         kvm_arch_ops->cache_regs(vcpu);
987
988         kvm_arch_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
989
990         emulate_ctxt.vcpu = vcpu;
991         emulate_ctxt.eflags = kvm_arch_ops->get_rflags(vcpu);
992         emulate_ctxt.cr2 = cr2;
993         emulate_ctxt.mode = (emulate_ctxt.eflags & X86_EFLAGS_VM)
994                 ? X86EMUL_MODE_REAL : cs_l
995                 ? X86EMUL_MODE_PROT64 : cs_db
996                 ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
997
998         if (emulate_ctxt.mode == X86EMUL_MODE_PROT64) {
999                 emulate_ctxt.cs_base = 0;
1000                 emulate_ctxt.ds_base = 0;
1001                 emulate_ctxt.es_base = 0;
1002                 emulate_ctxt.ss_base = 0;
1003         } else {
1004                 emulate_ctxt.cs_base = get_segment_base(vcpu, VCPU_SREG_CS);
1005                 emulate_ctxt.ds_base = get_segment_base(vcpu, VCPU_SREG_DS);
1006                 emulate_ctxt.es_base = get_segment_base(vcpu, VCPU_SREG_ES);
1007                 emulate_ctxt.ss_base = get_segment_base(vcpu, VCPU_SREG_SS);
1008         }
1009
1010         emulate_ctxt.gs_base = get_segment_base(vcpu, VCPU_SREG_GS);
1011         emulate_ctxt.fs_base = get_segment_base(vcpu, VCPU_SREG_FS);
1012
1013         vcpu->mmio_is_write = 0;
1014         r = x86_emulate_memop(&emulate_ctxt, &emulate_ops);
1015
1016         if ((r || vcpu->mmio_is_write) && run) {
1017                 run->mmio.phys_addr = vcpu->mmio_phys_addr;
1018                 memcpy(run->mmio.data, vcpu->mmio_data, 8);
1019                 run->mmio.len = vcpu->mmio_size;
1020                 run->mmio.is_write = vcpu->mmio_is_write;
1021         }
1022
1023         if (r) {
1024                 if (!vcpu->mmio_needed) {
1025                         report_emulation_failure(&emulate_ctxt);
1026                         return EMULATE_FAIL;
1027                 }
1028                 return EMULATE_DO_MMIO;
1029         }
1030
1031         kvm_arch_ops->decache_regs(vcpu);
1032         kvm_arch_ops->set_rflags(vcpu, emulate_ctxt.eflags);
1033
1034         if (vcpu->mmio_is_write)
1035                 return EMULATE_DO_MMIO;
1036
1037         return EMULATE_DONE;
1038 }
1039 EXPORT_SYMBOL_GPL(emulate_instruction);
1040
1041 static u64 mk_cr_64(u64 curr_cr, u32 new_val)
1042 {
1043         return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
1044 }
1045
1046 void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
1047 {
1048         struct descriptor_table dt = { limit, base };
1049
1050         kvm_arch_ops->set_gdt(vcpu, &dt);
1051 }
1052
1053 void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
1054 {
1055         struct descriptor_table dt = { limit, base };
1056
1057         kvm_arch_ops->set_idt(vcpu, &dt);
1058 }
1059
1060 void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
1061                    unsigned long *rflags)
1062 {
1063         lmsw(vcpu, msw);
1064         *rflags = kvm_arch_ops->get_rflags(vcpu);
1065 }
1066
1067 unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
1068 {
1069         switch (cr) {
1070         case 0:
1071                 return vcpu->cr0;
1072         case 2:
1073                 return vcpu->cr2;
1074         case 3:
1075                 return vcpu->cr3;
1076         case 4:
1077                 return vcpu->cr4;
1078         default:
1079                 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
1080                 return 0;
1081         }
1082 }
1083
1084 void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
1085                      unsigned long *rflags)
1086 {
1087         switch (cr) {
1088         case 0:
1089                 set_cr0(vcpu, mk_cr_64(vcpu->cr0, val));
1090                 *rflags = kvm_arch_ops->get_rflags(vcpu);
1091                 break;
1092         case 2:
1093                 vcpu->cr2 = val;
1094                 break;
1095         case 3:
1096                 set_cr3(vcpu, val);
1097                 break;
1098         case 4:
1099                 set_cr4(vcpu, mk_cr_64(vcpu->cr4, val));
1100                 break;
1101         default:
1102                 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
1103         }
1104 }
1105
1106 /*
1107  * Reads an msr value (of 'msr_index') into 'pdata'.
1108  * Returns 0 on success, non-0 otherwise.
1109  * Assumes vcpu_load() was already called.
1110  */
1111 static int get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
1112 {
1113         return kvm_arch_ops->get_msr(vcpu, msr_index, pdata);
1114 }
1115
1116 #ifdef CONFIG_X86_64
1117
1118 void set_efer(struct kvm_vcpu *vcpu, u64 efer)
1119 {
1120         if (efer & EFER_RESERVED_BITS) {
1121                 printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
1122                        efer);
1123                 inject_gp(vcpu);
1124                 return;
1125         }
1126
1127         if (is_paging(vcpu)
1128             && (vcpu->shadow_efer & EFER_LME) != (efer & EFER_LME)) {
1129                 printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
1130                 inject_gp(vcpu);
1131                 return;
1132         }
1133
1134         kvm_arch_ops->set_efer(vcpu, efer);
1135
1136         efer &= ~EFER_LMA;
1137         efer |= vcpu->shadow_efer & EFER_LMA;
1138
1139         vcpu->shadow_efer = efer;
1140 }
1141 EXPORT_SYMBOL_GPL(set_efer);
1142
1143 #endif
1144
1145 /*
1146  * Writes msr value into into the appropriate "register".
1147  * Returns 0 on success, non-0 otherwise.
1148  * Assumes vcpu_load() was already called.
1149  */
1150 static int set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
1151 {
1152         return kvm_arch_ops->set_msr(vcpu, msr_index, data);
1153 }
1154
1155 void kvm_resched(struct kvm_vcpu *vcpu)
1156 {
1157         vcpu_put(vcpu);
1158         cond_resched();
1159         /* Cannot fail -  no vcpu unplug yet. */
1160         vcpu_load(vcpu->kvm, vcpu_slot(vcpu));
1161 }
1162 EXPORT_SYMBOL_GPL(kvm_resched);
1163
1164 void load_msrs(struct vmx_msr_entry *e, int n)
1165 {
1166         int i;
1167
1168         for (i = 0; i < n; ++i)
1169                 wrmsrl(e[i].index, e[i].data);
1170 }
1171 EXPORT_SYMBOL_GPL(load_msrs);
1172
1173 void save_msrs(struct vmx_msr_entry *e, int n)
1174 {
1175         int i;
1176
1177         for (i = 0; i < n; ++i)
1178                 rdmsrl(e[i].index, e[i].data);
1179 }
1180 EXPORT_SYMBOL_GPL(save_msrs);
1181
1182 static int kvm_dev_ioctl_run(struct kvm *kvm, struct kvm_run *kvm_run)
1183 {
1184         struct kvm_vcpu *vcpu;
1185         int r;
1186
1187         if (!valid_vcpu(kvm_run->vcpu))
1188                 return -EINVAL;
1189
1190         vcpu = vcpu_load(kvm, kvm_run->vcpu);
1191         if (!vcpu)
1192                 return -ENOENT;
1193
1194         if (kvm_run->emulated) {
1195                 kvm_arch_ops->skip_emulated_instruction(vcpu);
1196                 kvm_run->emulated = 0;
1197         }
1198
1199         if (kvm_run->mmio_completed) {
1200                 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
1201                 vcpu->mmio_read_completed = 1;
1202         }
1203
1204         vcpu->mmio_needed = 0;
1205
1206         r = kvm_arch_ops->run(vcpu, kvm_run);
1207
1208         vcpu_put(vcpu);
1209         return r;
1210 }
1211
1212 static int kvm_dev_ioctl_get_regs(struct kvm *kvm, struct kvm_regs *regs)
1213 {
1214         struct kvm_vcpu *vcpu;
1215
1216         if (!valid_vcpu(regs->vcpu))
1217                 return -EINVAL;
1218
1219         vcpu = vcpu_load(kvm, regs->vcpu);
1220         if (!vcpu)
1221                 return -ENOENT;
1222
1223         kvm_arch_ops->cache_regs(vcpu);
1224
1225         regs->rax = vcpu->regs[VCPU_REGS_RAX];
1226         regs->rbx = vcpu->regs[VCPU_REGS_RBX];
1227         regs->rcx = vcpu->regs[VCPU_REGS_RCX];
1228         regs->rdx = vcpu->regs[VCPU_REGS_RDX];
1229         regs->rsi = vcpu->regs[VCPU_REGS_RSI];
1230         regs->rdi = vcpu->regs[VCPU_REGS_RDI];
1231         regs->rsp = vcpu->regs[VCPU_REGS_RSP];
1232         regs->rbp = vcpu->regs[VCPU_REGS_RBP];
1233 #ifdef CONFIG_X86_64
1234         regs->r8 = vcpu->regs[VCPU_REGS_R8];
1235         regs->r9 = vcpu->regs[VCPU_REGS_R9];
1236         regs->r10 = vcpu->regs[VCPU_REGS_R10];
1237         regs->r11 = vcpu->regs[VCPU_REGS_R11];
1238         regs->r12 = vcpu->regs[VCPU_REGS_R12];
1239         regs->r13 = vcpu->regs[VCPU_REGS_R13];
1240         regs->r14 = vcpu->regs[VCPU_REGS_R14];
1241         regs->r15 = vcpu->regs[VCPU_REGS_R15];
1242 #endif
1243
1244         regs->rip = vcpu->rip;
1245         regs->rflags = kvm_arch_ops->get_rflags(vcpu);
1246
1247         /*
1248          * Don't leak debug flags in case they were set for guest debugging
1249          */
1250         if (vcpu->guest_debug.enabled && vcpu->guest_debug.singlestep)
1251                 regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
1252
1253         vcpu_put(vcpu);
1254
1255         return 0;
1256 }
1257
1258 static int kvm_dev_ioctl_set_regs(struct kvm *kvm, struct kvm_regs *regs)
1259 {
1260         struct kvm_vcpu *vcpu;
1261
1262         if (!valid_vcpu(regs->vcpu))
1263                 return -EINVAL;
1264
1265         vcpu = vcpu_load(kvm, regs->vcpu);
1266         if (!vcpu)
1267                 return -ENOENT;
1268
1269         vcpu->regs[VCPU_REGS_RAX] = regs->rax;
1270         vcpu->regs[VCPU_REGS_RBX] = regs->rbx;
1271         vcpu->regs[VCPU_REGS_RCX] = regs->rcx;
1272         vcpu->regs[VCPU_REGS_RDX] = regs->rdx;
1273         vcpu->regs[VCPU_REGS_RSI] = regs->rsi;
1274         vcpu->regs[VCPU_REGS_RDI] = regs->rdi;
1275         vcpu->regs[VCPU_REGS_RSP] = regs->rsp;
1276         vcpu->regs[VCPU_REGS_RBP] = regs->rbp;
1277 #ifdef CONFIG_X86_64
1278         vcpu->regs[VCPU_REGS_R8] = regs->r8;
1279         vcpu->regs[VCPU_REGS_R9] = regs->r9;
1280         vcpu->regs[VCPU_REGS_R10] = regs->r10;
1281         vcpu->regs[VCPU_REGS_R11] = regs->r11;
1282         vcpu->regs[VCPU_REGS_R12] = regs->r12;
1283         vcpu->regs[VCPU_REGS_R13] = regs->r13;
1284         vcpu->regs[VCPU_REGS_R14] = regs->r14;
1285         vcpu->regs[VCPU_REGS_R15] = regs->r15;
1286 #endif
1287
1288         vcpu->rip = regs->rip;
1289         kvm_arch_ops->set_rflags(vcpu, regs->rflags);
1290
1291         kvm_arch_ops->decache_regs(vcpu);
1292
1293         vcpu_put(vcpu);
1294
1295         return 0;
1296 }
1297
1298 static void get_segment(struct kvm_vcpu *vcpu,
1299                         struct kvm_segment *var, int seg)
1300 {
1301         return kvm_arch_ops->get_segment(vcpu, var, seg);
1302 }
1303
1304 static int kvm_dev_ioctl_get_sregs(struct kvm *kvm, struct kvm_sregs *sregs)
1305 {
1306         struct kvm_vcpu *vcpu;
1307         struct descriptor_table dt;
1308
1309         if (!valid_vcpu(sregs->vcpu))
1310                 return -EINVAL;
1311         vcpu = vcpu_load(kvm, sregs->vcpu);
1312         if (!vcpu)
1313                 return -ENOENT;
1314
1315         get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
1316         get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
1317         get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
1318         get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
1319         get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
1320         get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
1321
1322         get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
1323         get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
1324
1325         kvm_arch_ops->get_idt(vcpu, &dt);
1326         sregs->idt.limit = dt.limit;
1327         sregs->idt.base = dt.base;
1328         kvm_arch_ops->get_gdt(vcpu, &dt);
1329         sregs->gdt.limit = dt.limit;
1330         sregs->gdt.base = dt.base;
1331
1332         sregs->cr0 = vcpu->cr0;
1333         sregs->cr2 = vcpu->cr2;
1334         sregs->cr3 = vcpu->cr3;
1335         sregs->cr4 = vcpu->cr4;
1336         sregs->cr8 = vcpu->cr8;
1337         sregs->efer = vcpu->shadow_efer;
1338         sregs->apic_base = vcpu->apic_base;
1339
1340         memcpy(sregs->interrupt_bitmap, vcpu->irq_pending,
1341                sizeof sregs->interrupt_bitmap);
1342
1343         vcpu_put(vcpu);
1344
1345         return 0;
1346 }
1347
1348 static void set_segment(struct kvm_vcpu *vcpu,
1349                         struct kvm_segment *var, int seg)
1350 {
1351         return kvm_arch_ops->set_segment(vcpu, var, seg);
1352 }
1353
1354 static int kvm_dev_ioctl_set_sregs(struct kvm *kvm, struct kvm_sregs *sregs)
1355 {
1356         struct kvm_vcpu *vcpu;
1357         int mmu_reset_needed = 0;
1358         int i;
1359         struct descriptor_table dt;
1360
1361         if (!valid_vcpu(sregs->vcpu))
1362                 return -EINVAL;
1363         vcpu = vcpu_load(kvm, sregs->vcpu);
1364         if (!vcpu)
1365                 return -ENOENT;
1366
1367         set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
1368         set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
1369         set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
1370         set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
1371         set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
1372         set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
1373
1374         set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
1375         set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
1376
1377         dt.limit = sregs->idt.limit;
1378         dt.base = sregs->idt.base;
1379         kvm_arch_ops->set_idt(vcpu, &dt);
1380         dt.limit = sregs->gdt.limit;
1381         dt.base = sregs->gdt.base;
1382         kvm_arch_ops->set_gdt(vcpu, &dt);
1383
1384         vcpu->cr2 = sregs->cr2;
1385         mmu_reset_needed |= vcpu->cr3 != sregs->cr3;
1386         vcpu->cr3 = sregs->cr3;
1387
1388         vcpu->cr8 = sregs->cr8;
1389
1390         mmu_reset_needed |= vcpu->shadow_efer != sregs->efer;
1391 #ifdef CONFIG_X86_64
1392         kvm_arch_ops->set_efer(vcpu, sregs->efer);
1393 #endif
1394         vcpu->apic_base = sregs->apic_base;
1395
1396         mmu_reset_needed |= vcpu->cr0 != sregs->cr0;
1397         kvm_arch_ops->set_cr0_no_modeswitch(vcpu, sregs->cr0);
1398
1399         mmu_reset_needed |= vcpu->cr4 != sregs->cr4;
1400         kvm_arch_ops->set_cr4(vcpu, sregs->cr4);
1401
1402         if (mmu_reset_needed)
1403                 kvm_mmu_reset_context(vcpu);
1404
1405         memcpy(vcpu->irq_pending, sregs->interrupt_bitmap,
1406                sizeof vcpu->irq_pending);
1407         vcpu->irq_summary = 0;
1408         for (i = 0; i < NR_IRQ_WORDS; ++i)
1409                 if (vcpu->irq_pending[i])
1410                         __set_bit(i, &vcpu->irq_summary);
1411
1412         vcpu_put(vcpu);
1413
1414         return 0;
1415 }
1416
1417 /*
1418  * List of msr numbers which we expose to userspace through KVM_GET_MSRS
1419  * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
1420  *
1421  * This list is modified at module load time to reflect the
1422  * capabilities of the host cpu.
1423  */
1424 static u32 msrs_to_save[] = {
1425         MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
1426         MSR_K6_STAR,
1427 #ifdef CONFIG_X86_64
1428         MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
1429 #endif
1430         MSR_IA32_TIME_STAMP_COUNTER,
1431 };
1432
1433 static unsigned num_msrs_to_save;
1434
1435 static __init void kvm_init_msr_list(void)
1436 {
1437         u32 dummy[2];
1438         unsigned i, j;
1439
1440         for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
1441                 if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
1442                         continue;
1443                 if (j < i)
1444                         msrs_to_save[j] = msrs_to_save[i];
1445                 j++;
1446         }
1447         num_msrs_to_save = j;
1448 }
1449
1450 /*
1451  * Adapt set_msr() to msr_io()'s calling convention
1452  */
1453 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
1454 {
1455         return set_msr(vcpu, index, *data);
1456 }
1457
1458 /*
1459  * Read or write a bunch of msrs. All parameters are kernel addresses.
1460  *
1461  * @return number of msrs set successfully.
1462  */
1463 static int __msr_io(struct kvm *kvm, struct kvm_msrs *msrs,
1464                     struct kvm_msr_entry *entries,
1465                     int (*do_msr)(struct kvm_vcpu *vcpu,
1466                                   unsigned index, u64 *data))
1467 {
1468         struct kvm_vcpu *vcpu;
1469         int i;
1470
1471         if (!valid_vcpu(msrs->vcpu))
1472                 return -EINVAL;
1473
1474         vcpu = vcpu_load(kvm, msrs->vcpu);
1475         if (!vcpu)
1476                 return -ENOENT;
1477
1478         for (i = 0; i < msrs->nmsrs; ++i)
1479                 if (do_msr(vcpu, entries[i].index, &entries[i].data))
1480                         break;
1481
1482         vcpu_put(vcpu);
1483
1484         return i;
1485 }
1486
1487 /*
1488  * Read or write a bunch of msrs. Parameters are user addresses.
1489  *
1490  * @return number of msrs set successfully.
1491  */
1492 static int msr_io(struct kvm *kvm, struct kvm_msrs __user *user_msrs,
1493                   int (*do_msr)(struct kvm_vcpu *vcpu,
1494                                 unsigned index, u64 *data),
1495                   int writeback)
1496 {
1497         struct kvm_msrs msrs;
1498         struct kvm_msr_entry *entries;
1499         int r, n;
1500         unsigned size;
1501
1502         r = -EFAULT;
1503         if (copy_from_user(&msrs, user_msrs, sizeof msrs))
1504                 goto out;
1505
1506         r = -E2BIG;
1507         if (msrs.nmsrs >= MAX_IO_MSRS)
1508                 goto out;
1509
1510         r = -ENOMEM;
1511         size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
1512         entries = vmalloc(size);
1513         if (!entries)
1514                 goto out;
1515
1516         r = -EFAULT;
1517         if (copy_from_user(entries, user_msrs->entries, size))
1518                 goto out_free;
1519
1520         r = n = __msr_io(kvm, &msrs, entries, do_msr);
1521         if (r < 0)
1522                 goto out_free;
1523
1524         r = -EFAULT;
1525         if (writeback && copy_to_user(user_msrs->entries, entries, size))
1526                 goto out_free;
1527
1528         r = n;
1529
1530 out_free:
1531         vfree(entries);
1532 out:
1533         return r;
1534 }
1535
1536 /*
1537  * Translate a guest virtual address to a guest physical address.
1538  */
1539 static int kvm_dev_ioctl_translate(struct kvm *kvm, struct kvm_translation *tr)
1540 {
1541         unsigned long vaddr = tr->linear_address;
1542         struct kvm_vcpu *vcpu;
1543         gpa_t gpa;
1544
1545         vcpu = vcpu_load(kvm, tr->vcpu);
1546         if (!vcpu)
1547                 return -ENOENT;
1548         spin_lock(&kvm->lock);
1549         gpa = vcpu->mmu.gva_to_gpa(vcpu, vaddr);
1550         tr->physical_address = gpa;
1551         tr->valid = gpa != UNMAPPED_GVA;
1552         tr->writeable = 1;
1553         tr->usermode = 0;
1554         spin_unlock(&kvm->lock);
1555         vcpu_put(vcpu);
1556
1557         return 0;
1558 }
1559
1560 static int kvm_dev_ioctl_interrupt(struct kvm *kvm, struct kvm_interrupt *irq)
1561 {
1562         struct kvm_vcpu *vcpu;
1563
1564         if (!valid_vcpu(irq->vcpu))
1565                 return -EINVAL;
1566         if (irq->irq < 0 || irq->irq >= 256)
1567                 return -EINVAL;
1568         vcpu = vcpu_load(kvm, irq->vcpu);
1569         if (!vcpu)
1570                 return -ENOENT;
1571
1572         set_bit(irq->irq, vcpu->irq_pending);
1573         set_bit(irq->irq / BITS_PER_LONG, &vcpu->irq_summary);
1574
1575         vcpu_put(vcpu);
1576
1577         return 0;
1578 }
1579
1580 static int kvm_dev_ioctl_debug_guest(struct kvm *kvm,
1581                                      struct kvm_debug_guest *dbg)
1582 {
1583         struct kvm_vcpu *vcpu;
1584         int r;
1585
1586         if (!valid_vcpu(dbg->vcpu))
1587                 return -EINVAL;
1588         vcpu = vcpu_load(kvm, dbg->vcpu);
1589         if (!vcpu)
1590                 return -ENOENT;
1591
1592         r = kvm_arch_ops->set_guest_debug(vcpu, dbg);
1593
1594         vcpu_put(vcpu);
1595
1596         return r;
1597 }
1598
1599 static long kvm_dev_ioctl(struct file *filp,
1600                           unsigned int ioctl, unsigned long arg)
1601 {
1602         struct kvm *kvm = filp->private_data;
1603         int r = -EINVAL;
1604
1605         switch (ioctl) {
1606         case KVM_CREATE_VCPU: {
1607                 r = kvm_dev_ioctl_create_vcpu(kvm, arg);
1608                 if (r)
1609                         goto out;
1610                 break;
1611         }
1612         case KVM_RUN: {
1613                 struct kvm_run kvm_run;
1614
1615                 r = -EFAULT;
1616                 if (copy_from_user(&kvm_run, (void *)arg, sizeof kvm_run))
1617                         goto out;
1618                 r = kvm_dev_ioctl_run(kvm, &kvm_run);
1619                 if (r < 0)
1620                         goto out;
1621                 r = -EFAULT;
1622                 if (copy_to_user((void *)arg, &kvm_run, sizeof kvm_run))
1623                         goto out;
1624                 r = 0;
1625                 break;
1626         }
1627         case KVM_GET_REGS: {
1628                 struct kvm_regs kvm_regs;
1629
1630                 r = -EFAULT;
1631                 if (copy_from_user(&kvm_regs, (void *)arg, sizeof kvm_regs))
1632                         goto out;
1633                 r = kvm_dev_ioctl_get_regs(kvm, &kvm_regs);
1634                 if (r)
1635                         goto out;
1636                 r = -EFAULT;
1637                 if (copy_to_user((void *)arg, &kvm_regs, sizeof kvm_regs))
1638                         goto out;
1639                 r = 0;
1640                 break;
1641         }
1642         case KVM_SET_REGS: {
1643                 struct kvm_regs kvm_regs;
1644
1645                 r = -EFAULT;
1646                 if (copy_from_user(&kvm_regs, (void *)arg, sizeof kvm_regs))
1647                         goto out;
1648                 r = kvm_dev_ioctl_set_regs(kvm, &kvm_regs);
1649                 if (r)
1650                         goto out;
1651                 r = 0;
1652                 break;
1653         }
1654         case KVM_GET_SREGS: {
1655                 struct kvm_sregs kvm_sregs;
1656
1657                 r = -EFAULT;
1658                 if (copy_from_user(&kvm_sregs, (void *)arg, sizeof kvm_sregs))
1659                         goto out;
1660                 r = kvm_dev_ioctl_get_sregs(kvm, &kvm_sregs);
1661                 if (r)
1662                         goto out;
1663                 r = -EFAULT;
1664                 if (copy_to_user((void *)arg, &kvm_sregs, sizeof kvm_sregs))
1665                         goto out;
1666                 r = 0;
1667                 break;
1668         }
1669         case KVM_SET_SREGS: {
1670                 struct kvm_sregs kvm_sregs;
1671
1672                 r = -EFAULT;
1673                 if (copy_from_user(&kvm_sregs, (void *)arg, sizeof kvm_sregs))
1674                         goto out;
1675                 r = kvm_dev_ioctl_set_sregs(kvm, &kvm_sregs);
1676                 if (r)
1677                         goto out;
1678                 r = 0;
1679                 break;
1680         }
1681         case KVM_TRANSLATE: {
1682                 struct kvm_translation tr;
1683
1684                 r = -EFAULT;
1685                 if (copy_from_user(&tr, (void *)arg, sizeof tr))
1686                         goto out;
1687                 r = kvm_dev_ioctl_translate(kvm, &tr);
1688                 if (r)
1689                         goto out;
1690                 r = -EFAULT;
1691                 if (copy_to_user((void *)arg, &tr, sizeof tr))
1692                         goto out;
1693                 r = 0;
1694                 break;
1695         }
1696         case KVM_INTERRUPT: {
1697                 struct kvm_interrupt irq;
1698
1699                 r = -EFAULT;
1700                 if (copy_from_user(&irq, (void *)arg, sizeof irq))
1701                         goto out;
1702                 r = kvm_dev_ioctl_interrupt(kvm, &irq);
1703                 if (r)
1704                         goto out;
1705                 r = 0;
1706                 break;
1707         }
1708         case KVM_DEBUG_GUEST: {
1709                 struct kvm_debug_guest dbg;
1710
1711                 r = -EFAULT;
1712                 if (copy_from_user(&dbg, (void *)arg, sizeof dbg))
1713                         goto out;
1714                 r = kvm_dev_ioctl_debug_guest(kvm, &dbg);
1715                 if (r)
1716                         goto out;
1717                 r = 0;
1718                 break;
1719         }
1720         case KVM_SET_MEMORY_REGION: {
1721                 struct kvm_memory_region kvm_mem;
1722
1723                 r = -EFAULT;
1724                 if (copy_from_user(&kvm_mem, (void *)arg, sizeof kvm_mem))
1725                         goto out;
1726                 r = kvm_dev_ioctl_set_memory_region(kvm, &kvm_mem);
1727                 if (r)
1728                         goto out;
1729                 break;
1730         }
1731         case KVM_GET_DIRTY_LOG: {
1732                 struct kvm_dirty_log log;
1733
1734                 r = -EFAULT;
1735                 if (copy_from_user(&log, (void *)arg, sizeof log))
1736                         goto out;
1737                 r = kvm_dev_ioctl_get_dirty_log(kvm, &log);
1738                 if (r)
1739                         goto out;
1740                 break;
1741         }
1742         case KVM_GET_MSRS:
1743                 r = msr_io(kvm, (void __user *)arg, get_msr, 1);
1744                 break;
1745         case KVM_SET_MSRS:
1746                 r = msr_io(kvm, (void __user *)arg, do_set_msr, 0);
1747                 break;
1748         case KVM_GET_MSR_INDEX_LIST: {
1749                 struct kvm_msr_list __user *user_msr_list = (void __user *)arg;
1750                 struct kvm_msr_list msr_list;
1751                 unsigned n;
1752
1753                 r = -EFAULT;
1754                 if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
1755                         goto out;
1756                 n = msr_list.nmsrs;
1757                 msr_list.nmsrs = num_msrs_to_save;
1758                 if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
1759                         goto out;
1760                 r = -E2BIG;
1761                 if (n < num_msrs_to_save)
1762                         goto out;
1763                 r = -EFAULT;
1764                 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
1765                                  num_msrs_to_save * sizeof(u32)))
1766                         goto out;
1767                 r = 0;
1768         }
1769         default:
1770                 ;
1771         }
1772 out:
1773         return r;
1774 }
1775
1776 static struct page *kvm_dev_nopage(struct vm_area_struct *vma,
1777                                    unsigned long address,
1778                                    int *type)
1779 {
1780         struct kvm *kvm = vma->vm_file->private_data;
1781         unsigned long pgoff;
1782         struct kvm_memory_slot *slot;
1783         struct page *page;
1784
1785         *type = VM_FAULT_MINOR;
1786         pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1787         slot = gfn_to_memslot(kvm, pgoff);
1788         if (!slot)
1789                 return NOPAGE_SIGBUS;
1790         page = gfn_to_page(slot, pgoff);
1791         if (!page)
1792                 return NOPAGE_SIGBUS;
1793         get_page(page);
1794         return page;
1795 }
1796
1797 static struct vm_operations_struct kvm_dev_vm_ops = {
1798         .nopage = kvm_dev_nopage,
1799 };
1800
1801 static int kvm_dev_mmap(struct file *file, struct vm_area_struct *vma)
1802 {
1803         vma->vm_ops = &kvm_dev_vm_ops;
1804         return 0;
1805 }
1806
1807 static struct file_operations kvm_chardev_ops = {
1808         .open           = kvm_dev_open,
1809         .release        = kvm_dev_release,
1810         .unlocked_ioctl = kvm_dev_ioctl,
1811         .compat_ioctl   = kvm_dev_ioctl,
1812         .mmap           = kvm_dev_mmap,
1813 };
1814
1815 static struct miscdevice kvm_dev = {
1816         MISC_DYNAMIC_MINOR,
1817         "kvm",
1818         &kvm_chardev_ops,
1819 };
1820
1821 static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
1822                        void *v)
1823 {
1824         if (val == SYS_RESTART) {
1825                 /*
1826                  * Some (well, at least mine) BIOSes hang on reboot if
1827                  * in vmx root mode.
1828                  */
1829                 printk(KERN_INFO "kvm: exiting hardware virtualization\n");
1830                 on_each_cpu(kvm_arch_ops->hardware_disable, 0, 0, 1);
1831         }
1832         return NOTIFY_OK;
1833 }
1834
1835 static struct notifier_block kvm_reboot_notifier = {
1836         .notifier_call = kvm_reboot,
1837         .priority = 0,
1838 };
1839
1840 static __init void kvm_init_debug(void)
1841 {
1842         struct kvm_stats_debugfs_item *p;
1843
1844         debugfs_dir = debugfs_create_dir("kvm", 0);
1845         for (p = debugfs_entries; p->name; ++p)
1846                 p->dentry = debugfs_create_u32(p->name, 0444, debugfs_dir,
1847                                                p->data);
1848 }
1849
1850 static void kvm_exit_debug(void)
1851 {
1852         struct kvm_stats_debugfs_item *p;
1853
1854         for (p = debugfs_entries; p->name; ++p)
1855                 debugfs_remove(p->dentry);
1856         debugfs_remove(debugfs_dir);
1857 }
1858
1859 hpa_t bad_page_address;
1860
1861 int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module)
1862 {
1863         int r;
1864
1865         kvm_arch_ops = ops;
1866
1867         if (!kvm_arch_ops->cpu_has_kvm_support()) {
1868                 printk(KERN_ERR "kvm: no hardware support\n");
1869                 return -EOPNOTSUPP;
1870         }
1871         if (kvm_arch_ops->disabled_by_bios()) {
1872                 printk(KERN_ERR "kvm: disabled by bios\n");
1873                 return -EOPNOTSUPP;
1874         }
1875
1876         r = kvm_arch_ops->hardware_setup();
1877         if (r < 0)
1878             return r;
1879
1880         on_each_cpu(kvm_arch_ops->hardware_enable, 0, 0, 1);
1881         register_reboot_notifier(&kvm_reboot_notifier);
1882
1883         kvm_chardev_ops.owner = module;
1884
1885         r = misc_register(&kvm_dev);
1886         if (r) {
1887                 printk (KERN_ERR "kvm: misc device register failed\n");
1888                 goto out_free;
1889         }
1890
1891         return r;
1892
1893 out_free:
1894         unregister_reboot_notifier(&kvm_reboot_notifier);
1895         on_each_cpu(kvm_arch_ops->hardware_disable, 0, 0, 1);
1896         kvm_arch_ops->hardware_unsetup();
1897         return r;
1898 }
1899
1900 void kvm_exit_arch(void)
1901 {
1902         misc_deregister(&kvm_dev);
1903
1904         unregister_reboot_notifier(&kvm_reboot_notifier);
1905         on_each_cpu(kvm_arch_ops->hardware_disable, 0, 0, 1);
1906         kvm_arch_ops->hardware_unsetup();
1907 }
1908
1909 static __init int kvm_init(void)
1910 {
1911         static struct page *bad_page;
1912         int r = 0;
1913
1914         kvm_init_debug();
1915
1916         kvm_init_msr_list();
1917
1918         if ((bad_page = alloc_page(GFP_KERNEL)) == NULL) {
1919                 r = -ENOMEM;
1920                 goto out;
1921         }
1922
1923         bad_page_address = page_to_pfn(bad_page) << PAGE_SHIFT;
1924         memset(__va(bad_page_address), 0, PAGE_SIZE);
1925
1926         return r;
1927
1928 out:
1929         kvm_exit_debug();
1930         return r;
1931 }
1932
1933 static __exit void kvm_exit(void)
1934 {
1935         kvm_exit_debug();
1936         __free_page(pfn_to_page(bad_page_address >> PAGE_SHIFT));
1937 }
1938
1939 module_init(kvm_init)
1940 module_exit(kvm_exit)
1941
1942 EXPORT_SYMBOL_GPL(kvm_init_arch);
1943 EXPORT_SYMBOL_GPL(kvm_exit_arch);