2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
9 * Copyright (C) 2006 Qumranet, Inc.
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
19 #include <linux/types.h>
20 #include <linux/string.h>
23 #include <linux/highmem.h>
24 #include <linux/module.h>
29 #define pgprintk(x...) do { } while (0)
33 printk(KERN_WARNING "assertion failed %s:%d: %s\n", \
34 __FILE__, __LINE__, #x); \
37 #define PT64_ENT_PER_PAGE 512
38 #define PT32_ENT_PER_PAGE 1024
40 #define PT_WRITABLE_SHIFT 1
42 #define PT_PRESENT_MASK (1ULL << 0)
43 #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
44 #define PT_USER_MASK (1ULL << 2)
45 #define PT_PWT_MASK (1ULL << 3)
46 #define PT_PCD_MASK (1ULL << 4)
47 #define PT_ACCESSED_MASK (1ULL << 5)
48 #define PT_DIRTY_MASK (1ULL << 6)
49 #define PT_PAGE_SIZE_MASK (1ULL << 7)
50 #define PT_PAT_MASK (1ULL << 7)
51 #define PT_GLOBAL_MASK (1ULL << 8)
52 #define PT64_NX_MASK (1ULL << 63)
54 #define PT_PAT_SHIFT 7
55 #define PT_DIR_PAT_SHIFT 12
56 #define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
58 #define PT32_DIR_PSE36_SIZE 4
59 #define PT32_DIR_PSE36_SHIFT 13
60 #define PT32_DIR_PSE36_MASK (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
63 #define PT32_PTE_COPY_MASK \
64 (PT_PRESENT_MASK | PT_PWT_MASK | PT_PCD_MASK | \
65 PT_ACCESSED_MASK | PT_DIRTY_MASK | PT_PAT_MASK | \
68 #define PT32_NON_PTE_COPY_MASK \
69 (PT_PRESENT_MASK | PT_PWT_MASK | PT_PCD_MASK | \
70 PT_ACCESSED_MASK | PT_DIRTY_MASK)
73 #define PT64_PTE_COPY_MASK \
74 (PT64_NX_MASK | PT32_PTE_COPY_MASK)
76 #define PT64_NON_PTE_COPY_MASK \
77 (PT64_NX_MASK | PT32_NON_PTE_COPY_MASK)
81 #define PT_FIRST_AVAIL_BITS_SHIFT 9
82 #define PT64_SECOND_AVAIL_BITS_SHIFT 52
84 #define PT_SHADOW_PS_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
85 #define PT_SHADOW_IO_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
87 #define PT_SHADOW_WRITABLE_SHIFT (PT_FIRST_AVAIL_BITS_SHIFT + 1)
88 #define PT_SHADOW_WRITABLE_MASK (1ULL << PT_SHADOW_WRITABLE_SHIFT)
90 #define PT_SHADOW_USER_SHIFT (PT_SHADOW_WRITABLE_SHIFT + 1)
91 #define PT_SHADOW_USER_MASK (1ULL << (PT_SHADOW_USER_SHIFT))
93 #define PT_SHADOW_BITS_OFFSET (PT_SHADOW_WRITABLE_SHIFT - PT_WRITABLE_SHIFT)
95 #define VALID_PAGE(x) ((x) != INVALID_PAGE)
97 #define PT64_LEVEL_BITS 9
99 #define PT64_LEVEL_SHIFT(level) \
100 ( PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS )
102 #define PT64_LEVEL_MASK(level) \
103 (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
105 #define PT64_INDEX(address, level)\
106 (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
109 #define PT32_LEVEL_BITS 10
111 #define PT32_LEVEL_SHIFT(level) \
112 ( PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS )
114 #define PT32_LEVEL_MASK(level) \
115 (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
117 #define PT32_INDEX(address, level)\
118 (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
121 #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & PAGE_MASK)
122 #define PT64_DIR_BASE_ADDR_MASK \
123 (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
125 #define PT32_BASE_ADDR_MASK PAGE_MASK
126 #define PT32_DIR_BASE_ADDR_MASK \
127 (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
130 #define PFERR_PRESENT_MASK (1U << 0)
131 #define PFERR_WRITE_MASK (1U << 1)
132 #define PFERR_USER_MASK (1U << 2)
134 #define PT64_ROOT_LEVEL 4
135 #define PT32_ROOT_LEVEL 2
136 #define PT32E_ROOT_LEVEL 3
138 #define PT_DIRECTORY_LEVEL 2
139 #define PT_PAGE_TABLE_LEVEL 1
141 static int is_write_protection(struct kvm_vcpu *vcpu)
143 return vcpu->cr0 & CR0_WP_MASK;
146 static int is_cpuid_PSE36(void)
151 static int is_present_pte(unsigned long pte)
153 return pte & PT_PRESENT_MASK;
156 static int is_writeble_pte(unsigned long pte)
158 return pte & PT_WRITABLE_MASK;
161 static int is_io_pte(unsigned long pte)
163 return pte & PT_SHADOW_IO_MARK;
166 static void kvm_mmu_free_page(struct kvm_vcpu *vcpu, hpa_t page_hpa)
168 struct kvm_mmu_page *page_head = page_header(page_hpa);
170 list_del(&page_head->link);
171 page_head->page_hpa = page_hpa;
172 list_add(&page_head->link, &vcpu->free_pages);
175 static int is_empty_shadow_page(hpa_t page_hpa)
179 for (pos = __va(page_hpa), end = pos + PAGE_SIZE / sizeof(u32);
186 static hpa_t kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, u64 *parent_pte)
188 struct kvm_mmu_page *page;
190 if (list_empty(&vcpu->free_pages))
193 page = list_entry(vcpu->free_pages.next, struct kvm_mmu_page, link);
194 list_del(&page->link);
195 list_add(&page->link, &vcpu->kvm->active_mmu_pages);
196 ASSERT(is_empty_shadow_page(page->page_hpa));
197 page->slot_bitmap = 0;
199 page->parent_pte = parent_pte;
200 return page->page_hpa;
203 static void page_header_update_slot(struct kvm *kvm, void *pte, gpa_t gpa)
205 int slot = memslot_id(kvm, gfn_to_memslot(kvm, gpa >> PAGE_SHIFT));
206 struct kvm_mmu_page *page_head = page_header(__pa(pte));
208 __set_bit(slot, &page_head->slot_bitmap);
211 hpa_t safe_gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa)
213 hpa_t hpa = gpa_to_hpa(vcpu, gpa);
215 return is_error_hpa(hpa) ? bad_page_address | (gpa & ~PAGE_MASK): hpa;
218 hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa)
220 struct kvm_memory_slot *slot;
223 ASSERT((gpa & HPA_ERR_MASK) == 0);
224 slot = gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT);
226 return gpa | HPA_ERR_MASK;
227 page = gfn_to_page(slot, gpa >> PAGE_SHIFT);
228 return ((hpa_t)page_to_pfn(page) << PAGE_SHIFT)
229 | (gpa & (PAGE_SIZE-1));
232 hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva)
234 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
236 if (gpa == UNMAPPED_GVA)
238 return gpa_to_hpa(vcpu, gpa);
242 static void release_pt_page_64(struct kvm_vcpu *vcpu, hpa_t page_hpa,
246 ASSERT(VALID_PAGE(page_hpa));
247 ASSERT(level <= PT64_ROOT_LEVEL && level > 0);
250 memset(__va(page_hpa), 0, PAGE_SIZE);
255 for (pos = __va(page_hpa), end = pos + PT64_ENT_PER_PAGE;
257 u64 current_ent = *pos;
260 if (is_present_pte(current_ent))
261 release_pt_page_64(vcpu,
267 kvm_mmu_free_page(vcpu, page_hpa);
270 static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
274 static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
276 int level = PT32E_ROOT_LEVEL;
277 hpa_t table_addr = vcpu->mmu.root_hpa;
280 u32 index = PT64_INDEX(v, level);
283 ASSERT(VALID_PAGE(table_addr));
284 table = __va(table_addr);
287 mark_page_dirty(vcpu->kvm, v >> PAGE_SHIFT);
288 page_header_update_slot(vcpu->kvm, table, v);
289 table[index] = p | PT_PRESENT_MASK | PT_WRITABLE_MASK |
294 if (table[index] == 0) {
295 hpa_t new_table = kvm_mmu_alloc_page(vcpu,
298 if (!VALID_PAGE(new_table)) {
299 pgprintk("nonpaging_map: ENOMEM\n");
303 if (level == PT32E_ROOT_LEVEL)
304 table[index] = new_table | PT_PRESENT_MASK;
306 table[index] = new_table | PT_PRESENT_MASK |
307 PT_WRITABLE_MASK | PT_USER_MASK;
309 table_addr = table[index] & PT64_BASE_ADDR_MASK;
313 static void nonpaging_flush(struct kvm_vcpu *vcpu)
315 hpa_t root = vcpu->mmu.root_hpa;
317 ++kvm_stat.tlb_flush;
318 pgprintk("nonpaging_flush\n");
319 ASSERT(VALID_PAGE(root));
320 release_pt_page_64(vcpu, root, vcpu->mmu.shadow_root_level);
321 root = kvm_mmu_alloc_page(vcpu, NULL);
322 ASSERT(VALID_PAGE(root));
323 vcpu->mmu.root_hpa = root;
325 root |= (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK));
326 kvm_arch_ops->set_cr3(vcpu, root);
327 kvm_arch_ops->tlb_flush(vcpu);
330 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
335 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
342 ASSERT(VALID_PAGE(vcpu->mmu.root_hpa));
347 paddr = gpa_to_hpa(vcpu , addr & PT64_BASE_ADDR_MASK);
349 if (is_error_hpa(paddr))
352 ret = nonpaging_map(vcpu, addr & PAGE_MASK, paddr);
354 nonpaging_flush(vcpu);
362 static void nonpaging_inval_page(struct kvm_vcpu *vcpu, gva_t addr)
366 static void nonpaging_free(struct kvm_vcpu *vcpu)
371 root = vcpu->mmu.root_hpa;
372 if (VALID_PAGE(root))
373 release_pt_page_64(vcpu, root, vcpu->mmu.shadow_root_level);
374 vcpu->mmu.root_hpa = INVALID_PAGE;
377 static int nonpaging_init_context(struct kvm_vcpu *vcpu)
379 struct kvm_mmu *context = &vcpu->mmu;
381 context->new_cr3 = nonpaging_new_cr3;
382 context->page_fault = nonpaging_page_fault;
383 context->inval_page = nonpaging_inval_page;
384 context->gva_to_gpa = nonpaging_gva_to_gpa;
385 context->free = nonpaging_free;
386 context->root_level = PT32E_ROOT_LEVEL;
387 context->shadow_root_level = PT32E_ROOT_LEVEL;
388 context->root_hpa = kvm_mmu_alloc_page(vcpu, NULL);
389 ASSERT(VALID_PAGE(context->root_hpa));
390 kvm_arch_ops->set_cr3(vcpu, context->root_hpa);
395 static void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
397 struct kvm_mmu_page *page, *npage;
399 list_for_each_entry_safe(page, npage, &vcpu->kvm->active_mmu_pages,
404 if (!page->parent_pte)
407 *page->parent_pte = 0;
408 release_pt_page_64(vcpu, page->page_hpa, 1);
410 ++kvm_stat.tlb_flush;
411 kvm_arch_ops->tlb_flush(vcpu);
414 static void paging_new_cr3(struct kvm_vcpu *vcpu)
416 kvm_mmu_flush_tlb(vcpu);
419 static void mark_pagetable_nonglobal(void *shadow_pte)
421 page_header(__pa(shadow_pte))->global = 0;
424 static inline void set_pte_common(struct kvm_vcpu *vcpu,
432 *shadow_pte |= access_bits << PT_SHADOW_BITS_OFFSET;
434 access_bits &= ~PT_WRITABLE_MASK;
436 if (access_bits & PT_WRITABLE_MASK)
437 mark_page_dirty(vcpu->kvm, gaddr >> PAGE_SHIFT);
439 *shadow_pte |= access_bits;
441 paddr = gpa_to_hpa(vcpu, gaddr & PT64_BASE_ADDR_MASK);
443 if (!(*shadow_pte & PT_GLOBAL_MASK))
444 mark_pagetable_nonglobal(shadow_pte);
446 if (is_error_hpa(paddr)) {
447 *shadow_pte |= gaddr;
448 *shadow_pte |= PT_SHADOW_IO_MARK;
449 *shadow_pte &= ~PT_PRESENT_MASK;
451 *shadow_pte |= paddr;
452 page_header_update_slot(vcpu->kvm, shadow_pte, gaddr);
456 static void inject_page_fault(struct kvm_vcpu *vcpu,
460 kvm_arch_ops->inject_page_fault(vcpu, addr, err_code);
463 static inline int fix_read_pf(u64 *shadow_ent)
465 if ((*shadow_ent & PT_SHADOW_USER_MASK) &&
466 !(*shadow_ent & PT_USER_MASK)) {
468 * If supervisor write protect is disabled, we shadow kernel
469 * pages as user pages so we can trap the write access.
471 *shadow_ent |= PT_USER_MASK;
472 *shadow_ent &= ~PT_WRITABLE_MASK;
480 static int may_access(u64 pte, int write, int user)
483 if (user && !(pte & PT_USER_MASK))
485 if (write && !(pte & PT_WRITABLE_MASK))
491 * Remove a shadow pte.
493 static void paging_inval_page(struct kvm_vcpu *vcpu, gva_t addr)
495 hpa_t page_addr = vcpu->mmu.root_hpa;
496 int level = vcpu->mmu.shadow_root_level;
501 u32 index = PT64_INDEX(addr, level);
502 u64 *table = __va(page_addr);
504 if (level == PT_PAGE_TABLE_LEVEL ) {
509 if (!is_present_pte(table[index]))
512 page_addr = table[index] & PT64_BASE_ADDR_MASK;
514 if (level == PT_DIRECTORY_LEVEL &&
515 (table[index] & PT_SHADOW_PS_MARK)) {
517 release_pt_page_64(vcpu, page_addr, PT_PAGE_TABLE_LEVEL);
519 kvm_arch_ops->tlb_flush(vcpu);
525 static void paging_free(struct kvm_vcpu *vcpu)
527 nonpaging_free(vcpu);
531 #include "paging_tmpl.h"
535 #include "paging_tmpl.h"
538 static int paging64_init_context(struct kvm_vcpu *vcpu)
540 struct kvm_mmu *context = &vcpu->mmu;
542 ASSERT(is_pae(vcpu));
543 context->new_cr3 = paging_new_cr3;
544 context->page_fault = paging64_page_fault;
545 context->inval_page = paging_inval_page;
546 context->gva_to_gpa = paging64_gva_to_gpa;
547 context->free = paging_free;
548 context->root_level = PT64_ROOT_LEVEL;
549 context->shadow_root_level = PT64_ROOT_LEVEL;
550 context->root_hpa = kvm_mmu_alloc_page(vcpu, NULL);
551 ASSERT(VALID_PAGE(context->root_hpa));
552 kvm_arch_ops->set_cr3(vcpu, context->root_hpa |
553 (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK)));
557 static int paging32_init_context(struct kvm_vcpu *vcpu)
559 struct kvm_mmu *context = &vcpu->mmu;
561 context->new_cr3 = paging_new_cr3;
562 context->page_fault = paging32_page_fault;
563 context->inval_page = paging_inval_page;
564 context->gva_to_gpa = paging32_gva_to_gpa;
565 context->free = paging_free;
566 context->root_level = PT32_ROOT_LEVEL;
567 context->shadow_root_level = PT32E_ROOT_LEVEL;
568 context->root_hpa = kvm_mmu_alloc_page(vcpu, NULL);
569 ASSERT(VALID_PAGE(context->root_hpa));
570 kvm_arch_ops->set_cr3(vcpu, context->root_hpa |
571 (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK)));
575 static int paging32E_init_context(struct kvm_vcpu *vcpu)
579 if ((ret = paging64_init_context(vcpu)))
582 vcpu->mmu.root_level = PT32E_ROOT_LEVEL;
583 vcpu->mmu.shadow_root_level = PT32E_ROOT_LEVEL;
587 static int init_kvm_mmu(struct kvm_vcpu *vcpu)
590 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
592 if (!is_paging(vcpu))
593 return nonpaging_init_context(vcpu);
594 else if (kvm_arch_ops->is_long_mode(vcpu))
595 return paging64_init_context(vcpu);
596 else if (is_pae(vcpu))
597 return paging32E_init_context(vcpu);
599 return paging32_init_context(vcpu);
602 static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
605 if (VALID_PAGE(vcpu->mmu.root_hpa)) {
606 vcpu->mmu.free(vcpu);
607 vcpu->mmu.root_hpa = INVALID_PAGE;
611 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
613 destroy_kvm_mmu(vcpu);
614 return init_kvm_mmu(vcpu);
617 static void free_mmu_pages(struct kvm_vcpu *vcpu)
619 while (!list_empty(&vcpu->free_pages)) {
620 struct kvm_mmu_page *page;
622 page = list_entry(vcpu->free_pages.next,
623 struct kvm_mmu_page, link);
624 list_del(&page->link);
625 __free_page(pfn_to_page(page->page_hpa >> PAGE_SHIFT));
626 page->page_hpa = INVALID_PAGE;
630 static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
636 for (i = 0; i < KVM_NUM_MMU_PAGES; i++) {
638 struct kvm_mmu_page *page_header = &vcpu->page_header_buf[i];
640 INIT_LIST_HEAD(&page_header->link);
641 if ((page = alloc_page(GFP_KVM_MMU)) == NULL)
643 page->private = (unsigned long)page_header;
644 page_header->page_hpa = (hpa_t)page_to_pfn(page) << PAGE_SHIFT;
645 memset(__va(page_header->page_hpa), 0, PAGE_SIZE);
646 list_add(&page_header->link, &vcpu->free_pages);
651 free_mmu_pages(vcpu);
655 int kvm_mmu_init(struct kvm_vcpu *vcpu)
660 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
661 ASSERT(list_empty(&vcpu->free_pages));
663 if ((r = alloc_mmu_pages(vcpu)))
666 if ((r = init_kvm_mmu(vcpu))) {
667 free_mmu_pages(vcpu);
673 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
677 destroy_kvm_mmu(vcpu);
678 free_mmu_pages(vcpu);
681 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
683 struct kvm_mmu_page *page;
685 list_for_each_entry(page, &kvm->active_mmu_pages, link) {
689 if (!test_bit(slot, &page->slot_bitmap))
692 pt = __va(page->page_hpa);
693 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
695 if (pt[i] & PT_WRITABLE_MASK)
696 pt[i] &= ~PT_WRITABLE_MASK;