2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
9 * Copyright (C) 2006 Qumranet, Inc.
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
23 #include <linux/kvm_host.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
27 #include <linux/highmem.h>
28 #include <linux/module.h>
29 #include <linux/swap.h>
30 #include <linux/hugetlb.h>
31 #include <linux/compiler.h>
34 #include <asm/cmpxchg.h>
38 * When setting this variable to true it enables Two-Dimensional-Paging
39 * where the hardware walks 2 page tables:
40 * 1. the guest-virtual to guest-physical
41 * 2. while doing 1. it walks guest-physical to host-physical
42 * If the hardware supports that we don't need to do shadow paging.
44 bool tdp_enabled = false;
51 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
53 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
58 #define pgprintk(x...) do { if (dbg) printk(x); } while (0)
59 #define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
63 #define pgprintk(x...) do { } while (0)
64 #define rmap_printk(x...) do { } while (0)
68 #if defined(MMU_DEBUG) || defined(AUDIT)
70 module_param(dbg, bool, 0644);
73 static int oos_shadow = 1;
74 module_param(oos_shadow, bool, 0644);
77 #define ASSERT(x) do { } while (0)
81 printk(KERN_WARNING "assertion failed %s:%d: %s\n", \
82 __FILE__, __LINE__, #x); \
86 #define PT_FIRST_AVAIL_BITS_SHIFT 9
87 #define PT64_SECOND_AVAIL_BITS_SHIFT 52
89 #define VALID_PAGE(x) ((x) != INVALID_PAGE)
91 #define PT64_LEVEL_BITS 9
93 #define PT64_LEVEL_SHIFT(level) \
94 (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
96 #define PT64_LEVEL_MASK(level) \
97 (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
99 #define PT64_INDEX(address, level)\
100 (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
103 #define PT32_LEVEL_BITS 10
105 #define PT32_LEVEL_SHIFT(level) \
106 (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
108 #define PT32_LEVEL_MASK(level) \
109 (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
111 #define PT32_INDEX(address, level)\
112 (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
115 #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
116 #define PT64_DIR_BASE_ADDR_MASK \
117 (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
119 #define PT32_BASE_ADDR_MASK PAGE_MASK
120 #define PT32_DIR_BASE_ADDR_MASK \
121 (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
123 #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
126 #define PFERR_PRESENT_MASK (1U << 0)
127 #define PFERR_WRITE_MASK (1U << 1)
128 #define PFERR_USER_MASK (1U << 2)
129 #define PFERR_FETCH_MASK (1U << 4)
131 #define PT_DIRECTORY_LEVEL 2
132 #define PT_PAGE_TABLE_LEVEL 1
136 #define ACC_EXEC_MASK 1
137 #define ACC_WRITE_MASK PT_WRITABLE_MASK
138 #define ACC_USER_MASK PT_USER_MASK
139 #define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
141 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
143 struct kvm_rmap_desc {
144 u64 *shadow_ptes[RMAP_EXT];
145 struct kvm_rmap_desc *more;
148 struct kvm_shadow_walk {
149 int (*entry)(struct kvm_shadow_walk *walk, struct kvm_vcpu *vcpu,
150 u64 addr, u64 *spte, int level);
153 struct kvm_unsync_walk {
154 int (*entry) (struct kvm_mmu_page *sp, struct kvm_unsync_walk *walk);
157 typedef int (*mmu_parent_walk_fn) (struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp);
159 static struct kmem_cache *pte_chain_cache;
160 static struct kmem_cache *rmap_desc_cache;
161 static struct kmem_cache *mmu_page_header_cache;
163 static u64 __read_mostly shadow_trap_nonpresent_pte;
164 static u64 __read_mostly shadow_notrap_nonpresent_pte;
165 static u64 __read_mostly shadow_base_present_pte;
166 static u64 __read_mostly shadow_nx_mask;
167 static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
168 static u64 __read_mostly shadow_user_mask;
169 static u64 __read_mostly shadow_accessed_mask;
170 static u64 __read_mostly shadow_dirty_mask;
172 void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
174 shadow_trap_nonpresent_pte = trap_pte;
175 shadow_notrap_nonpresent_pte = notrap_pte;
177 EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
179 void kvm_mmu_set_base_ptes(u64 base_pte)
181 shadow_base_present_pte = base_pte;
183 EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes);
185 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
186 u64 dirty_mask, u64 nx_mask, u64 x_mask)
188 shadow_user_mask = user_mask;
189 shadow_accessed_mask = accessed_mask;
190 shadow_dirty_mask = dirty_mask;
191 shadow_nx_mask = nx_mask;
192 shadow_x_mask = x_mask;
194 EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
196 static int is_write_protection(struct kvm_vcpu *vcpu)
198 return vcpu->arch.cr0 & X86_CR0_WP;
201 static int is_cpuid_PSE36(void)
206 static int is_nx(struct kvm_vcpu *vcpu)
208 return vcpu->arch.shadow_efer & EFER_NX;
211 static int is_present_pte(unsigned long pte)
213 return pte & PT_PRESENT_MASK;
216 static int is_shadow_present_pte(u64 pte)
218 return pte != shadow_trap_nonpresent_pte
219 && pte != shadow_notrap_nonpresent_pte;
222 static int is_large_pte(u64 pte)
224 return pte & PT_PAGE_SIZE_MASK;
227 static int is_writeble_pte(unsigned long pte)
229 return pte & PT_WRITABLE_MASK;
232 static int is_dirty_pte(unsigned long pte)
234 return pte & shadow_dirty_mask;
237 static int is_rmap_pte(u64 pte)
239 return is_shadow_present_pte(pte);
242 static pfn_t spte_to_pfn(u64 pte)
244 return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
247 static gfn_t pse36_gfn_delta(u32 gpte)
249 int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
251 return (gpte & PT32_DIR_PSE36_MASK) << shift;
254 static void set_shadow_pte(u64 *sptep, u64 spte)
257 set_64bit((unsigned long *)sptep, spte);
259 set_64bit((unsigned long long *)sptep, spte);
263 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
264 struct kmem_cache *base_cache, int min)
268 if (cache->nobjs >= min)
270 while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
271 obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
274 cache->objects[cache->nobjs++] = obj;
279 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
282 kfree(mc->objects[--mc->nobjs]);
285 static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
290 if (cache->nobjs >= min)
292 while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
293 page = alloc_page(GFP_KERNEL);
296 set_page_private(page, 0);
297 cache->objects[cache->nobjs++] = page_address(page);
302 static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
305 free_page((unsigned long)mc->objects[--mc->nobjs]);
308 static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
312 r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache,
316 r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
320 r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
323 r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
324 mmu_page_header_cache, 4);
329 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
331 mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache);
332 mmu_free_memory_cache(&vcpu->arch.mmu_rmap_desc_cache);
333 mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
334 mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
337 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
343 p = mc->objects[--mc->nobjs];
348 static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
350 return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_chain_cache,
351 sizeof(struct kvm_pte_chain));
354 static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
359 static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
361 return mmu_memory_cache_alloc(&vcpu->arch.mmu_rmap_desc_cache,
362 sizeof(struct kvm_rmap_desc));
365 static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
371 * Return the pointer to the largepage write count for a given
372 * gfn, handling slots that are not large page aligned.
374 static int *slot_largepage_idx(gfn_t gfn, struct kvm_memory_slot *slot)
378 idx = (gfn / KVM_PAGES_PER_HPAGE) -
379 (slot->base_gfn / KVM_PAGES_PER_HPAGE);
380 return &slot->lpage_info[idx].write_count;
383 static void account_shadowed(struct kvm *kvm, gfn_t gfn)
387 write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn));
391 static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
395 write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn));
397 WARN_ON(*write_count < 0);
400 static int has_wrprotected_page(struct kvm *kvm, gfn_t gfn)
402 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
406 largepage_idx = slot_largepage_idx(gfn, slot);
407 return *largepage_idx;
413 static int host_largepage_backed(struct kvm *kvm, gfn_t gfn)
415 struct vm_area_struct *vma;
419 addr = gfn_to_hva(kvm, gfn);
420 if (kvm_is_error_hva(addr))
423 down_read(¤t->mm->mmap_sem);
424 vma = find_vma(current->mm, addr);
425 if (vma && is_vm_hugetlb_page(vma))
427 up_read(¤t->mm->mmap_sem);
432 static int is_largepage_backed(struct kvm_vcpu *vcpu, gfn_t large_gfn)
434 struct kvm_memory_slot *slot;
436 if (has_wrprotected_page(vcpu->kvm, large_gfn))
439 if (!host_largepage_backed(vcpu->kvm, large_gfn))
442 slot = gfn_to_memslot(vcpu->kvm, large_gfn);
443 if (slot && slot->dirty_bitmap)
450 * Take gfn and return the reverse mapping to it.
451 * Note: gfn must be unaliased before this function get called
454 static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int lpage)
456 struct kvm_memory_slot *slot;
459 slot = gfn_to_memslot(kvm, gfn);
461 return &slot->rmap[gfn - slot->base_gfn];
463 idx = (gfn / KVM_PAGES_PER_HPAGE) -
464 (slot->base_gfn / KVM_PAGES_PER_HPAGE);
466 return &slot->lpage_info[idx].rmap_pde;
470 * Reverse mapping data structures:
472 * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
473 * that points to page_address(page).
475 * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
476 * containing more mappings.
478 static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
480 struct kvm_mmu_page *sp;
481 struct kvm_rmap_desc *desc;
482 unsigned long *rmapp;
485 if (!is_rmap_pte(*spte))
487 gfn = unalias_gfn(vcpu->kvm, gfn);
488 sp = page_header(__pa(spte));
489 sp->gfns[spte - sp->spt] = gfn;
490 rmapp = gfn_to_rmap(vcpu->kvm, gfn, lpage);
492 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
493 *rmapp = (unsigned long)spte;
494 } else if (!(*rmapp & 1)) {
495 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
496 desc = mmu_alloc_rmap_desc(vcpu);
497 desc->shadow_ptes[0] = (u64 *)*rmapp;
498 desc->shadow_ptes[1] = spte;
499 *rmapp = (unsigned long)desc | 1;
501 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
502 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
503 while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
505 if (desc->shadow_ptes[RMAP_EXT-1]) {
506 desc->more = mmu_alloc_rmap_desc(vcpu);
509 for (i = 0; desc->shadow_ptes[i]; ++i)
511 desc->shadow_ptes[i] = spte;
515 static void rmap_desc_remove_entry(unsigned long *rmapp,
516 struct kvm_rmap_desc *desc,
518 struct kvm_rmap_desc *prev_desc)
522 for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j)
524 desc->shadow_ptes[i] = desc->shadow_ptes[j];
525 desc->shadow_ptes[j] = NULL;
528 if (!prev_desc && !desc->more)
529 *rmapp = (unsigned long)desc->shadow_ptes[0];
532 prev_desc->more = desc->more;
534 *rmapp = (unsigned long)desc->more | 1;
535 mmu_free_rmap_desc(desc);
538 static void rmap_remove(struct kvm *kvm, u64 *spte)
540 struct kvm_rmap_desc *desc;
541 struct kvm_rmap_desc *prev_desc;
542 struct kvm_mmu_page *sp;
544 unsigned long *rmapp;
547 if (!is_rmap_pte(*spte))
549 sp = page_header(__pa(spte));
550 pfn = spte_to_pfn(*spte);
551 if (*spte & shadow_accessed_mask)
552 kvm_set_pfn_accessed(pfn);
553 if (is_writeble_pte(*spte))
554 kvm_release_pfn_dirty(pfn);
556 kvm_release_pfn_clean(pfn);
557 rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], is_large_pte(*spte));
559 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
561 } else if (!(*rmapp & 1)) {
562 rmap_printk("rmap_remove: %p %llx 1->0\n", spte, *spte);
563 if ((u64 *)*rmapp != spte) {
564 printk(KERN_ERR "rmap_remove: %p %llx 1->BUG\n",
570 rmap_printk("rmap_remove: %p %llx many->many\n", spte, *spte);
571 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
574 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
575 if (desc->shadow_ptes[i] == spte) {
576 rmap_desc_remove_entry(rmapp,
588 static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
590 struct kvm_rmap_desc *desc;
591 struct kvm_rmap_desc *prev_desc;
597 else if (!(*rmapp & 1)) {
599 return (u64 *)*rmapp;
602 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
606 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) {
607 if (prev_spte == spte)
608 return desc->shadow_ptes[i];
609 prev_spte = desc->shadow_ptes[i];
616 static void rmap_write_protect(struct kvm *kvm, u64 gfn)
618 unsigned long *rmapp;
620 int write_protected = 0;
622 gfn = unalias_gfn(kvm, gfn);
623 rmapp = gfn_to_rmap(kvm, gfn, 0);
625 spte = rmap_next(kvm, rmapp, NULL);
628 BUG_ON(!(*spte & PT_PRESENT_MASK));
629 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
630 if (is_writeble_pte(*spte)) {
631 set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
634 spte = rmap_next(kvm, rmapp, spte);
636 if (write_protected) {
639 spte = rmap_next(kvm, rmapp, NULL);
640 pfn = spte_to_pfn(*spte);
641 kvm_set_pfn_dirty(pfn);
644 /* check for huge page mappings */
645 rmapp = gfn_to_rmap(kvm, gfn, 1);
646 spte = rmap_next(kvm, rmapp, NULL);
649 BUG_ON(!(*spte & PT_PRESENT_MASK));
650 BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK));
651 pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
652 if (is_writeble_pte(*spte)) {
653 rmap_remove(kvm, spte);
655 set_shadow_pte(spte, shadow_trap_nonpresent_pte);
659 spte = rmap_next(kvm, rmapp, spte);
663 kvm_flush_remote_tlbs(kvm);
666 static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp)
669 int need_tlb_flush = 0;
671 while ((spte = rmap_next(kvm, rmapp, NULL))) {
672 BUG_ON(!(*spte & PT_PRESENT_MASK));
673 rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte);
674 rmap_remove(kvm, spte);
675 set_shadow_pte(spte, shadow_trap_nonpresent_pte);
678 return need_tlb_flush;
681 static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
682 int (*handler)(struct kvm *kvm, unsigned long *rmapp))
688 * If mmap_sem isn't taken, we can look the memslots with only
689 * the mmu_lock by skipping over the slots with userspace_addr == 0.
691 for (i = 0; i < kvm->nmemslots; i++) {
692 struct kvm_memory_slot *memslot = &kvm->memslots[i];
693 unsigned long start = memslot->userspace_addr;
696 /* mmu_lock protects userspace_addr */
700 end = start + (memslot->npages << PAGE_SHIFT);
701 if (hva >= start && hva < end) {
702 gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
703 retval |= handler(kvm, &memslot->rmap[gfn_offset]);
704 retval |= handler(kvm,
705 &memslot->lpage_info[
707 KVM_PAGES_PER_HPAGE].rmap_pde);
714 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
716 return kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
719 static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp)
724 /* always return old for EPT */
725 if (!shadow_accessed_mask)
728 spte = rmap_next(kvm, rmapp, NULL);
732 BUG_ON(!(_spte & PT_PRESENT_MASK));
733 _young = _spte & PT_ACCESSED_MASK;
736 clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
738 spte = rmap_next(kvm, rmapp, spte);
743 int kvm_age_hva(struct kvm *kvm, unsigned long hva)
745 return kvm_handle_hva(kvm, hva, kvm_age_rmapp);
749 static int is_empty_shadow_page(u64 *spt)
754 for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
755 if (is_shadow_present_pte(*pos)) {
756 printk(KERN_ERR "%s: %p %llx\n", __func__,
764 static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
766 ASSERT(is_empty_shadow_page(sp->spt));
768 __free_page(virt_to_page(sp->spt));
769 __free_page(virt_to_page(sp->gfns));
771 ++kvm->arch.n_free_mmu_pages;
774 static unsigned kvm_page_table_hashfn(gfn_t gfn)
776 return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1);
779 static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
782 struct kvm_mmu_page *sp;
784 sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp);
785 sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
786 sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
787 set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
788 list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
789 ASSERT(is_empty_shadow_page(sp->spt));
792 sp->parent_pte = parent_pte;
793 --vcpu->kvm->arch.n_free_mmu_pages;
797 static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
798 struct kvm_mmu_page *sp, u64 *parent_pte)
800 struct kvm_pte_chain *pte_chain;
801 struct hlist_node *node;
806 if (!sp->multimapped) {
807 u64 *old = sp->parent_pte;
810 sp->parent_pte = parent_pte;
814 pte_chain = mmu_alloc_pte_chain(vcpu);
815 INIT_HLIST_HEAD(&sp->parent_ptes);
816 hlist_add_head(&pte_chain->link, &sp->parent_ptes);
817 pte_chain->parent_ptes[0] = old;
819 hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) {
820 if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
822 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
823 if (!pte_chain->parent_ptes[i]) {
824 pte_chain->parent_ptes[i] = parent_pte;
828 pte_chain = mmu_alloc_pte_chain(vcpu);
830 hlist_add_head(&pte_chain->link, &sp->parent_ptes);
831 pte_chain->parent_ptes[0] = parent_pte;
834 static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
837 struct kvm_pte_chain *pte_chain;
838 struct hlist_node *node;
841 if (!sp->multimapped) {
842 BUG_ON(sp->parent_pte != parent_pte);
843 sp->parent_pte = NULL;
846 hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
847 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
848 if (!pte_chain->parent_ptes[i])
850 if (pte_chain->parent_ptes[i] != parent_pte)
852 while (i + 1 < NR_PTE_CHAIN_ENTRIES
853 && pte_chain->parent_ptes[i + 1]) {
854 pte_chain->parent_ptes[i]
855 = pte_chain->parent_ptes[i + 1];
858 pte_chain->parent_ptes[i] = NULL;
860 hlist_del(&pte_chain->link);
861 mmu_free_pte_chain(pte_chain);
862 if (hlist_empty(&sp->parent_ptes)) {
864 sp->parent_pte = NULL;
873 static void mmu_parent_walk(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
874 mmu_parent_walk_fn fn)
876 struct kvm_pte_chain *pte_chain;
877 struct hlist_node *node;
878 struct kvm_mmu_page *parent_sp;
881 if (!sp->multimapped && sp->parent_pte) {
882 parent_sp = page_header(__pa(sp->parent_pte));
884 mmu_parent_walk(vcpu, parent_sp, fn);
887 hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
888 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
889 if (!pte_chain->parent_ptes[i])
891 parent_sp = page_header(__pa(pte_chain->parent_ptes[i]));
893 mmu_parent_walk(vcpu, parent_sp, fn);
897 static void kvm_mmu_update_unsync_bitmap(u64 *spte)
900 struct kvm_mmu_page *sp = page_header(__pa(spte));
902 index = spte - sp->spt;
903 __set_bit(index, sp->unsync_child_bitmap);
904 sp->unsync_children = 1;
907 static void kvm_mmu_update_parents_unsync(struct kvm_mmu_page *sp)
909 struct kvm_pte_chain *pte_chain;
910 struct hlist_node *node;
916 if (!sp->multimapped) {
917 kvm_mmu_update_unsync_bitmap(sp->parent_pte);
921 hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
922 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
923 if (!pte_chain->parent_ptes[i])
925 kvm_mmu_update_unsync_bitmap(pte_chain->parent_ptes[i]);
929 static int unsync_walk_fn(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
931 sp->unsync_children = 1;
932 kvm_mmu_update_parents_unsync(sp);
936 static void kvm_mmu_mark_parents_unsync(struct kvm_vcpu *vcpu,
937 struct kvm_mmu_page *sp)
939 mmu_parent_walk(vcpu, sp, unsync_walk_fn);
940 kvm_mmu_update_parents_unsync(sp);
943 static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
944 struct kvm_mmu_page *sp)
948 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
949 sp->spt[i] = shadow_trap_nonpresent_pte;
952 static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
953 struct kvm_mmu_page *sp)
958 static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
962 #define for_each_unsync_children(bitmap, idx) \
963 for (idx = find_first_bit(bitmap, 512); \
965 idx = find_next_bit(bitmap, 512, idx+1))
967 static int mmu_unsync_walk(struct kvm_mmu_page *sp,
968 struct kvm_unsync_walk *walker)
972 if (!sp->unsync_children)
975 for_each_unsync_children(sp->unsync_child_bitmap, i) {
976 u64 ent = sp->spt[i];
978 if (is_shadow_present_pte(ent)) {
979 struct kvm_mmu_page *child;
980 child = page_header(ent & PT64_BASE_ADDR_MASK);
982 if (child->unsync_children) {
983 ret = mmu_unsync_walk(child, walker);
986 __clear_bit(i, sp->unsync_child_bitmap);
990 ret = walker->entry(child, walker);
991 __clear_bit(i, sp->unsync_child_bitmap);
998 if (find_first_bit(sp->unsync_child_bitmap, 512) == 512)
999 sp->unsync_children = 0;
1004 static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
1007 struct hlist_head *bucket;
1008 struct kvm_mmu_page *sp;
1009 struct hlist_node *node;
1011 pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
1012 index = kvm_page_table_hashfn(gfn);
1013 bucket = &kvm->arch.mmu_page_hash[index];
1014 hlist_for_each_entry(sp, node, bucket, hash_link)
1015 if (sp->gfn == gfn && !sp->role.metaphysical
1016 && !sp->role.invalid) {
1017 pgprintk("%s: found role %x\n",
1018 __func__, sp->role.word);
1024 static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1026 WARN_ON(!sp->unsync);
1028 --kvm->stat.mmu_unsync;
1031 static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp);
1033 static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1035 if (sp->role.glevels != vcpu->arch.mmu.root_level) {
1036 kvm_mmu_zap_page(vcpu->kvm, sp);
1040 rmap_write_protect(vcpu->kvm, sp->gfn);
1041 kvm_unlink_unsync_page(vcpu->kvm, sp);
1042 if (vcpu->arch.mmu.sync_page(vcpu, sp)) {
1043 kvm_mmu_zap_page(vcpu->kvm, sp);
1047 kvm_mmu_flush_tlb(vcpu);
1051 struct sync_walker {
1052 struct kvm_vcpu *vcpu;
1053 struct kvm_unsync_walk walker;
1056 static int mmu_sync_fn(struct kvm_mmu_page *sp, struct kvm_unsync_walk *walk)
1058 struct sync_walker *sync_walk = container_of(walk, struct sync_walker,
1060 struct kvm_vcpu *vcpu = sync_walk->vcpu;
1062 kvm_sync_page(vcpu, sp);
1063 return (need_resched() || spin_needbreak(&vcpu->kvm->mmu_lock));
1066 static void mmu_sync_children(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1068 struct sync_walker walker = {
1069 .walker = { .entry = mmu_sync_fn, },
1073 while (mmu_unsync_walk(sp, &walker.walker))
1074 cond_resched_lock(&vcpu->kvm->mmu_lock);
1077 static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1085 union kvm_mmu_page_role role;
1088 struct hlist_head *bucket;
1089 struct kvm_mmu_page *sp;
1090 struct hlist_node *node, *tmp;
1093 role.glevels = vcpu->arch.mmu.root_level;
1095 role.metaphysical = metaphysical;
1096 role.access = access;
1097 if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
1098 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
1099 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
1100 role.quadrant = quadrant;
1102 pgprintk("%s: looking gfn %lx role %x\n", __func__,
1104 index = kvm_page_table_hashfn(gfn);
1105 bucket = &vcpu->kvm->arch.mmu_page_hash[index];
1106 hlist_for_each_entry_safe(sp, node, tmp, bucket, hash_link)
1107 if (sp->gfn == gfn) {
1109 if (kvm_sync_page(vcpu, sp))
1112 if (sp->role.word != role.word)
1115 mmu_page_add_parent_pte(vcpu, sp, parent_pte);
1116 if (sp->unsync_children) {
1117 set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests);
1118 kvm_mmu_mark_parents_unsync(vcpu, sp);
1120 pgprintk("%s: found\n", __func__);
1123 ++vcpu->kvm->stat.mmu_cache_miss;
1124 sp = kvm_mmu_alloc_page(vcpu, parent_pte);
1127 pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word);
1130 hlist_add_head(&sp->hash_link, bucket);
1131 if (!metaphysical) {
1132 rmap_write_protect(vcpu->kvm, gfn);
1133 account_shadowed(vcpu->kvm, gfn);
1135 if (shadow_trap_nonpresent_pte != shadow_notrap_nonpresent_pte)
1136 vcpu->arch.mmu.prefetch_page(vcpu, sp);
1138 nonpaging_prefetch_page(vcpu, sp);
1142 static int walk_shadow(struct kvm_shadow_walk *walker,
1143 struct kvm_vcpu *vcpu, u64 addr)
1151 shadow_addr = vcpu->arch.mmu.root_hpa;
1152 level = vcpu->arch.mmu.shadow_root_level;
1153 if (level == PT32E_ROOT_LEVEL) {
1154 shadow_addr = vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
1155 shadow_addr &= PT64_BASE_ADDR_MASK;
1159 while (level >= PT_PAGE_TABLE_LEVEL) {
1160 index = SHADOW_PT_INDEX(addr, level);
1161 sptep = ((u64 *)__va(shadow_addr)) + index;
1162 r = walker->entry(walker, vcpu, addr, sptep, level);
1165 shadow_addr = *sptep & PT64_BASE_ADDR_MASK;
1171 static void kvm_mmu_page_unlink_children(struct kvm *kvm,
1172 struct kvm_mmu_page *sp)
1180 if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
1181 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1182 if (is_shadow_present_pte(pt[i]))
1183 rmap_remove(kvm, &pt[i]);
1184 pt[i] = shadow_trap_nonpresent_pte;
1189 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1192 if (is_shadow_present_pte(ent)) {
1193 if (!is_large_pte(ent)) {
1194 ent &= PT64_BASE_ADDR_MASK;
1195 mmu_page_remove_parent_pte(page_header(ent),
1199 rmap_remove(kvm, &pt[i]);
1202 pt[i] = shadow_trap_nonpresent_pte;
1206 static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
1208 mmu_page_remove_parent_pte(sp, parent_pte);
1211 static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
1215 for (i = 0; i < KVM_MAX_VCPUS; ++i)
1217 kvm->vcpus[i]->arch.last_pte_updated = NULL;
1220 static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
1224 while (sp->multimapped || sp->parent_pte) {
1225 if (!sp->multimapped)
1226 parent_pte = sp->parent_pte;
1228 struct kvm_pte_chain *chain;
1230 chain = container_of(sp->parent_ptes.first,
1231 struct kvm_pte_chain, link);
1232 parent_pte = chain->parent_ptes[0];
1234 BUG_ON(!parent_pte);
1235 kvm_mmu_put_page(sp, parent_pte);
1236 set_shadow_pte(parent_pte, shadow_trap_nonpresent_pte);
1241 struct kvm_unsync_walk walker;
1246 static int mmu_zap_fn(struct kvm_mmu_page *sp, struct kvm_unsync_walk *walk)
1248 struct zap_walker *zap_walk = container_of(walk, struct zap_walker,
1250 kvm_mmu_zap_page(zap_walk->kvm, sp);
1251 zap_walk->zapped = 1;
1255 static int mmu_zap_unsync_children(struct kvm *kvm, struct kvm_mmu_page *sp)
1257 struct zap_walker walker = {
1258 .walker = { .entry = mmu_zap_fn, },
1263 if (sp->role.level == PT_PAGE_TABLE_LEVEL)
1265 mmu_unsync_walk(sp, &walker.walker);
1266 return walker.zapped;
1269 static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1272 ++kvm->stat.mmu_shadow_zapped;
1273 ret = mmu_zap_unsync_children(kvm, sp);
1274 kvm_mmu_page_unlink_children(kvm, sp);
1275 kvm_mmu_unlink_parents(kvm, sp);
1276 kvm_flush_remote_tlbs(kvm);
1277 if (!sp->role.invalid && !sp->role.metaphysical)
1278 unaccount_shadowed(kvm, sp->gfn);
1280 kvm_unlink_unsync_page(kvm, sp);
1281 if (!sp->root_count) {
1282 hlist_del(&sp->hash_link);
1283 kvm_mmu_free_page(kvm, sp);
1285 sp->role.invalid = 1;
1286 list_move(&sp->link, &kvm->arch.active_mmu_pages);
1287 kvm_reload_remote_mmus(kvm);
1289 kvm_mmu_reset_last_pte_updated(kvm);
1294 * Changing the number of mmu pages allocated to the vm
1295 * Note: if kvm_nr_mmu_pages is too small, you will get dead lock
1297 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
1300 * If we set the number of mmu pages to be smaller be than the
1301 * number of actived pages , we must to free some mmu pages before we
1305 if ((kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages) >
1307 int n_used_mmu_pages = kvm->arch.n_alloc_mmu_pages
1308 - kvm->arch.n_free_mmu_pages;
1310 while (n_used_mmu_pages > kvm_nr_mmu_pages) {
1311 struct kvm_mmu_page *page;
1313 page = container_of(kvm->arch.active_mmu_pages.prev,
1314 struct kvm_mmu_page, link);
1315 kvm_mmu_zap_page(kvm, page);
1318 kvm->arch.n_free_mmu_pages = 0;
1321 kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages
1322 - kvm->arch.n_alloc_mmu_pages;
1324 kvm->arch.n_alloc_mmu_pages = kvm_nr_mmu_pages;
1327 static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
1330 struct hlist_head *bucket;
1331 struct kvm_mmu_page *sp;
1332 struct hlist_node *node, *n;
1335 pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
1337 index = kvm_page_table_hashfn(gfn);
1338 bucket = &kvm->arch.mmu_page_hash[index];
1339 hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
1340 if (sp->gfn == gfn && !sp->role.metaphysical) {
1341 pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
1344 if (kvm_mmu_zap_page(kvm, sp))
1350 static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
1352 struct kvm_mmu_page *sp;
1354 while ((sp = kvm_mmu_lookup_page(kvm, gfn)) != NULL) {
1355 pgprintk("%s: zap %lx %x\n", __func__, gfn, sp->role.word);
1356 kvm_mmu_zap_page(kvm, sp);
1360 static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
1362 int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn));
1363 struct kvm_mmu_page *sp = page_header(__pa(pte));
1365 __set_bit(slot, &sp->slot_bitmap);
1368 static void mmu_convert_notrap(struct kvm_mmu_page *sp)
1373 if (shadow_trap_nonpresent_pte == shadow_notrap_nonpresent_pte)
1376 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1377 if (pt[i] == shadow_notrap_nonpresent_pte)
1378 set_shadow_pte(&pt[i], shadow_trap_nonpresent_pte);
1382 struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
1386 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
1388 if (gpa == UNMAPPED_GVA)
1391 page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
1396 static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1399 struct hlist_head *bucket;
1400 struct kvm_mmu_page *s;
1401 struct hlist_node *node, *n;
1403 index = kvm_page_table_hashfn(sp->gfn);
1404 bucket = &vcpu->kvm->arch.mmu_page_hash[index];
1405 /* don't unsync if pagetable is shadowed with multiple roles */
1406 hlist_for_each_entry_safe(s, node, n, bucket, hash_link) {
1407 if (s->gfn != sp->gfn || s->role.metaphysical)
1409 if (s->role.word != sp->role.word)
1412 kvm_mmu_mark_parents_unsync(vcpu, sp);
1413 ++vcpu->kvm->stat.mmu_unsync;
1415 mmu_convert_notrap(sp);
1419 static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
1422 struct kvm_mmu_page *shadow;
1424 shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
1426 if (shadow->role.level != PT_PAGE_TABLE_LEVEL)
1430 if (can_unsync && oos_shadow)
1431 return kvm_unsync_page(vcpu, shadow);
1437 static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1438 unsigned pte_access, int user_fault,
1439 int write_fault, int dirty, int largepage,
1440 gfn_t gfn, pfn_t pfn, bool speculative,
1446 * We don't set the accessed bit, since we sometimes want to see
1447 * whether the guest actually used the pte (in order to detect
1450 spte = shadow_base_present_pte | shadow_dirty_mask;
1452 spte |= shadow_accessed_mask;
1454 pte_access &= ~ACC_WRITE_MASK;
1455 if (pte_access & ACC_EXEC_MASK)
1456 spte |= shadow_x_mask;
1458 spte |= shadow_nx_mask;
1459 if (pte_access & ACC_USER_MASK)
1460 spte |= shadow_user_mask;
1462 spte |= PT_PAGE_SIZE_MASK;
1464 spte |= (u64)pfn << PAGE_SHIFT;
1466 if ((pte_access & ACC_WRITE_MASK)
1467 || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
1469 if (largepage && has_wrprotected_page(vcpu->kvm, gfn)) {
1471 spte = shadow_trap_nonpresent_pte;
1475 spte |= PT_WRITABLE_MASK;
1477 if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
1478 pgprintk("%s: found shadow page for %lx, marking ro\n",
1481 pte_access &= ~ACC_WRITE_MASK;
1482 if (is_writeble_pte(spte))
1483 spte &= ~PT_WRITABLE_MASK;
1487 if (pte_access & ACC_WRITE_MASK)
1488 mark_page_dirty(vcpu->kvm, gfn);
1491 set_shadow_pte(shadow_pte, spte);
1495 static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1496 unsigned pt_access, unsigned pte_access,
1497 int user_fault, int write_fault, int dirty,
1498 int *ptwrite, int largepage, gfn_t gfn,
1499 pfn_t pfn, bool speculative)
1501 int was_rmapped = 0;
1502 int was_writeble = is_writeble_pte(*shadow_pte);
1504 pgprintk("%s: spte %llx access %x write_fault %d"
1505 " user_fault %d gfn %lx\n",
1506 __func__, *shadow_pte, pt_access,
1507 write_fault, user_fault, gfn);
1509 if (is_rmap_pte(*shadow_pte)) {
1511 * If we overwrite a PTE page pointer with a 2MB PMD, unlink
1512 * the parent of the now unreachable PTE.
1514 if (largepage && !is_large_pte(*shadow_pte)) {
1515 struct kvm_mmu_page *child;
1516 u64 pte = *shadow_pte;
1518 child = page_header(pte & PT64_BASE_ADDR_MASK);
1519 mmu_page_remove_parent_pte(child, shadow_pte);
1520 } else if (pfn != spte_to_pfn(*shadow_pte)) {
1521 pgprintk("hfn old %lx new %lx\n",
1522 spte_to_pfn(*shadow_pte), pfn);
1523 rmap_remove(vcpu->kvm, shadow_pte);
1526 was_rmapped = is_large_pte(*shadow_pte);
1531 if (set_spte(vcpu, shadow_pte, pte_access, user_fault, write_fault,
1532 dirty, largepage, gfn, pfn, speculative, true)) {
1535 kvm_x86_ops->tlb_flush(vcpu);
1538 pgprintk("%s: setting spte %llx\n", __func__, *shadow_pte);
1539 pgprintk("instantiating %s PTE (%s) at %ld (%llx) addr %p\n",
1540 is_large_pte(*shadow_pte)? "2MB" : "4kB",
1541 is_present_pte(*shadow_pte)?"RW":"R", gfn,
1542 *shadow_pte, shadow_pte);
1543 if (!was_rmapped && is_large_pte(*shadow_pte))
1544 ++vcpu->kvm->stat.lpages;
1546 page_header_update_slot(vcpu->kvm, shadow_pte, gfn);
1548 rmap_add(vcpu, shadow_pte, gfn, largepage);
1549 if (!is_rmap_pte(*shadow_pte))
1550 kvm_release_pfn_clean(pfn);
1553 kvm_release_pfn_dirty(pfn);
1555 kvm_release_pfn_clean(pfn);
1558 vcpu->arch.last_pte_updated = shadow_pte;
1559 vcpu->arch.last_pte_gfn = gfn;
1563 static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
1567 struct direct_shadow_walk {
1568 struct kvm_shadow_walk walker;
1575 static int direct_map_entry(struct kvm_shadow_walk *_walk,
1576 struct kvm_vcpu *vcpu,
1577 u64 addr, u64 *sptep, int level)
1579 struct direct_shadow_walk *walk =
1580 container_of(_walk, struct direct_shadow_walk, walker);
1581 struct kvm_mmu_page *sp;
1583 gfn_t gfn = addr >> PAGE_SHIFT;
1585 if (level == PT_PAGE_TABLE_LEVEL
1586 || (walk->largepage && level == PT_DIRECTORY_LEVEL)) {
1587 mmu_set_spte(vcpu, sptep, ACC_ALL, ACC_ALL,
1588 0, walk->write, 1, &walk->pt_write,
1589 walk->largepage, gfn, walk->pfn, false);
1590 ++vcpu->stat.pf_fixed;
1594 if (*sptep == shadow_trap_nonpresent_pte) {
1595 pseudo_gfn = (addr & PT64_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT;
1596 sp = kvm_mmu_get_page(vcpu, pseudo_gfn, (gva_t)addr, level - 1,
1599 pgprintk("nonpaging_map: ENOMEM\n");
1600 kvm_release_pfn_clean(walk->pfn);
1604 set_shadow_pte(sptep,
1606 | PT_PRESENT_MASK | PT_WRITABLE_MASK
1607 | shadow_user_mask | shadow_x_mask);
1612 static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
1613 int largepage, gfn_t gfn, pfn_t pfn)
1616 struct direct_shadow_walk walker = {
1617 .walker = { .entry = direct_map_entry, },
1619 .largepage = largepage,
1624 r = walk_shadow(&walker.walker, vcpu, gfn << PAGE_SHIFT);
1627 return walker.pt_write;
1630 static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
1635 unsigned long mmu_seq;
1637 if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
1638 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
1642 mmu_seq = vcpu->kvm->mmu_notifier_seq;
1644 pfn = gfn_to_pfn(vcpu->kvm, gfn);
1647 if (is_error_pfn(pfn)) {
1648 kvm_release_pfn_clean(pfn);
1652 spin_lock(&vcpu->kvm->mmu_lock);
1653 if (mmu_notifier_retry(vcpu, mmu_seq))
1655 kvm_mmu_free_some_pages(vcpu);
1656 r = __direct_map(vcpu, v, write, largepage, gfn, pfn);
1657 spin_unlock(&vcpu->kvm->mmu_lock);
1663 spin_unlock(&vcpu->kvm->mmu_lock);
1664 kvm_release_pfn_clean(pfn);
1669 static void mmu_free_roots(struct kvm_vcpu *vcpu)
1672 struct kvm_mmu_page *sp;
1674 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
1676 spin_lock(&vcpu->kvm->mmu_lock);
1677 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1678 hpa_t root = vcpu->arch.mmu.root_hpa;
1680 sp = page_header(root);
1682 if (!sp->root_count && sp->role.invalid)
1683 kvm_mmu_zap_page(vcpu->kvm, sp);
1684 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
1685 spin_unlock(&vcpu->kvm->mmu_lock);
1688 for (i = 0; i < 4; ++i) {
1689 hpa_t root = vcpu->arch.mmu.pae_root[i];
1692 root &= PT64_BASE_ADDR_MASK;
1693 sp = page_header(root);
1695 if (!sp->root_count && sp->role.invalid)
1696 kvm_mmu_zap_page(vcpu->kvm, sp);
1698 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
1700 spin_unlock(&vcpu->kvm->mmu_lock);
1701 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
1704 static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
1708 struct kvm_mmu_page *sp;
1709 int metaphysical = 0;
1711 root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
1713 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1714 hpa_t root = vcpu->arch.mmu.root_hpa;
1716 ASSERT(!VALID_PAGE(root));
1719 sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
1720 PT64_ROOT_LEVEL, metaphysical,
1722 root = __pa(sp->spt);
1724 vcpu->arch.mmu.root_hpa = root;
1727 metaphysical = !is_paging(vcpu);
1730 for (i = 0; i < 4; ++i) {
1731 hpa_t root = vcpu->arch.mmu.pae_root[i];
1733 ASSERT(!VALID_PAGE(root));
1734 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
1735 if (!is_present_pte(vcpu->arch.pdptrs[i])) {
1736 vcpu->arch.mmu.pae_root[i] = 0;
1739 root_gfn = vcpu->arch.pdptrs[i] >> PAGE_SHIFT;
1740 } else if (vcpu->arch.mmu.root_level == 0)
1742 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
1743 PT32_ROOT_LEVEL, metaphysical,
1745 root = __pa(sp->spt);
1747 vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
1749 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
1752 static void mmu_sync_roots(struct kvm_vcpu *vcpu)
1755 struct kvm_mmu_page *sp;
1757 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
1759 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1760 hpa_t root = vcpu->arch.mmu.root_hpa;
1761 sp = page_header(root);
1762 mmu_sync_children(vcpu, sp);
1765 for (i = 0; i < 4; ++i) {
1766 hpa_t root = vcpu->arch.mmu.pae_root[i];
1769 root &= PT64_BASE_ADDR_MASK;
1770 sp = page_header(root);
1771 mmu_sync_children(vcpu, sp);
1776 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
1778 spin_lock(&vcpu->kvm->mmu_lock);
1779 mmu_sync_roots(vcpu);
1780 spin_unlock(&vcpu->kvm->mmu_lock);
1783 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
1788 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
1794 pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
1795 r = mmu_topup_memory_caches(vcpu);
1800 ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
1802 gfn = gva >> PAGE_SHIFT;
1804 return nonpaging_map(vcpu, gva & PAGE_MASK,
1805 error_code & PFERR_WRITE_MASK, gfn);
1808 static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
1814 gfn_t gfn = gpa >> PAGE_SHIFT;
1815 unsigned long mmu_seq;
1818 ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
1820 r = mmu_topup_memory_caches(vcpu);
1824 if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
1825 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
1828 mmu_seq = vcpu->kvm->mmu_notifier_seq;
1830 pfn = gfn_to_pfn(vcpu->kvm, gfn);
1831 if (is_error_pfn(pfn)) {
1832 kvm_release_pfn_clean(pfn);
1835 spin_lock(&vcpu->kvm->mmu_lock);
1836 if (mmu_notifier_retry(vcpu, mmu_seq))
1838 kvm_mmu_free_some_pages(vcpu);
1839 r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
1840 largepage, gfn, pfn);
1841 spin_unlock(&vcpu->kvm->mmu_lock);
1846 spin_unlock(&vcpu->kvm->mmu_lock);
1847 kvm_release_pfn_clean(pfn);
1851 static void nonpaging_free(struct kvm_vcpu *vcpu)
1853 mmu_free_roots(vcpu);
1856 static int nonpaging_init_context(struct kvm_vcpu *vcpu)
1858 struct kvm_mmu *context = &vcpu->arch.mmu;
1860 context->new_cr3 = nonpaging_new_cr3;
1861 context->page_fault = nonpaging_page_fault;
1862 context->gva_to_gpa = nonpaging_gva_to_gpa;
1863 context->free = nonpaging_free;
1864 context->prefetch_page = nonpaging_prefetch_page;
1865 context->sync_page = nonpaging_sync_page;
1866 context->invlpg = nonpaging_invlpg;
1867 context->root_level = 0;
1868 context->shadow_root_level = PT32E_ROOT_LEVEL;
1869 context->root_hpa = INVALID_PAGE;
1873 void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
1875 ++vcpu->stat.tlb_flush;
1876 kvm_x86_ops->tlb_flush(vcpu);
1879 static void paging_new_cr3(struct kvm_vcpu *vcpu)
1881 pgprintk("%s: cr3 %lx\n", __func__, vcpu->arch.cr3);
1882 mmu_free_roots(vcpu);
1885 static void inject_page_fault(struct kvm_vcpu *vcpu,
1889 kvm_inject_page_fault(vcpu, addr, err_code);
1892 static void paging_free(struct kvm_vcpu *vcpu)
1894 nonpaging_free(vcpu);
1898 #include "paging_tmpl.h"
1902 #include "paging_tmpl.h"
1905 static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
1907 struct kvm_mmu *context = &vcpu->arch.mmu;
1909 ASSERT(is_pae(vcpu));
1910 context->new_cr3 = paging_new_cr3;
1911 context->page_fault = paging64_page_fault;
1912 context->gva_to_gpa = paging64_gva_to_gpa;
1913 context->prefetch_page = paging64_prefetch_page;
1914 context->sync_page = paging64_sync_page;
1915 context->invlpg = paging64_invlpg;
1916 context->free = paging_free;
1917 context->root_level = level;
1918 context->shadow_root_level = level;
1919 context->root_hpa = INVALID_PAGE;
1923 static int paging64_init_context(struct kvm_vcpu *vcpu)
1925 return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
1928 static int paging32_init_context(struct kvm_vcpu *vcpu)
1930 struct kvm_mmu *context = &vcpu->arch.mmu;
1932 context->new_cr3 = paging_new_cr3;
1933 context->page_fault = paging32_page_fault;
1934 context->gva_to_gpa = paging32_gva_to_gpa;
1935 context->free = paging_free;
1936 context->prefetch_page = paging32_prefetch_page;
1937 context->sync_page = paging32_sync_page;
1938 context->invlpg = paging32_invlpg;
1939 context->root_level = PT32_ROOT_LEVEL;
1940 context->shadow_root_level = PT32E_ROOT_LEVEL;
1941 context->root_hpa = INVALID_PAGE;
1945 static int paging32E_init_context(struct kvm_vcpu *vcpu)
1947 return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
1950 static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
1952 struct kvm_mmu *context = &vcpu->arch.mmu;
1954 context->new_cr3 = nonpaging_new_cr3;
1955 context->page_fault = tdp_page_fault;
1956 context->free = nonpaging_free;
1957 context->prefetch_page = nonpaging_prefetch_page;
1958 context->sync_page = nonpaging_sync_page;
1959 context->invlpg = nonpaging_invlpg;
1960 context->shadow_root_level = kvm_x86_ops->get_tdp_level();
1961 context->root_hpa = INVALID_PAGE;
1963 if (!is_paging(vcpu)) {
1964 context->gva_to_gpa = nonpaging_gva_to_gpa;
1965 context->root_level = 0;
1966 } else if (is_long_mode(vcpu)) {
1967 context->gva_to_gpa = paging64_gva_to_gpa;
1968 context->root_level = PT64_ROOT_LEVEL;
1969 } else if (is_pae(vcpu)) {
1970 context->gva_to_gpa = paging64_gva_to_gpa;
1971 context->root_level = PT32E_ROOT_LEVEL;
1973 context->gva_to_gpa = paging32_gva_to_gpa;
1974 context->root_level = PT32_ROOT_LEVEL;
1980 static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
1983 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
1985 if (!is_paging(vcpu))
1986 return nonpaging_init_context(vcpu);
1987 else if (is_long_mode(vcpu))
1988 return paging64_init_context(vcpu);
1989 else if (is_pae(vcpu))
1990 return paging32E_init_context(vcpu);
1992 return paging32_init_context(vcpu);
1995 static int init_kvm_mmu(struct kvm_vcpu *vcpu)
1997 vcpu->arch.update_pte.pfn = bad_pfn;
2000 return init_kvm_tdp_mmu(vcpu);
2002 return init_kvm_softmmu(vcpu);
2005 static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
2008 if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
2009 vcpu->arch.mmu.free(vcpu);
2010 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
2014 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
2016 destroy_kvm_mmu(vcpu);
2017 return init_kvm_mmu(vcpu);
2019 EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
2021 int kvm_mmu_load(struct kvm_vcpu *vcpu)
2025 r = mmu_topup_memory_caches(vcpu);
2028 spin_lock(&vcpu->kvm->mmu_lock);
2029 kvm_mmu_free_some_pages(vcpu);
2030 mmu_alloc_roots(vcpu);
2031 mmu_sync_roots(vcpu);
2032 spin_unlock(&vcpu->kvm->mmu_lock);
2033 kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
2034 kvm_mmu_flush_tlb(vcpu);
2038 EXPORT_SYMBOL_GPL(kvm_mmu_load);
2040 void kvm_mmu_unload(struct kvm_vcpu *vcpu)
2042 mmu_free_roots(vcpu);
2045 static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
2046 struct kvm_mmu_page *sp,
2050 struct kvm_mmu_page *child;
2053 if (is_shadow_present_pte(pte)) {
2054 if (sp->role.level == PT_PAGE_TABLE_LEVEL ||
2056 rmap_remove(vcpu->kvm, spte);
2058 child = page_header(pte & PT64_BASE_ADDR_MASK);
2059 mmu_page_remove_parent_pte(child, spte);
2062 set_shadow_pte(spte, shadow_trap_nonpresent_pte);
2063 if (is_large_pte(pte))
2064 --vcpu->kvm->stat.lpages;
2067 static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
2068 struct kvm_mmu_page *sp,
2072 if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
2073 if (!vcpu->arch.update_pte.largepage ||
2074 sp->role.glevels == PT32_ROOT_LEVEL) {
2075 ++vcpu->kvm->stat.mmu_pde_zapped;
2080 ++vcpu->kvm->stat.mmu_pte_updated;
2081 if (sp->role.glevels == PT32_ROOT_LEVEL)
2082 paging32_update_pte(vcpu, sp, spte, new);
2084 paging64_update_pte(vcpu, sp, spte, new);
2087 static bool need_remote_flush(u64 old, u64 new)
2089 if (!is_shadow_present_pte(old))
2091 if (!is_shadow_present_pte(new))
2093 if ((old ^ new) & PT64_BASE_ADDR_MASK)
2095 old ^= PT64_NX_MASK;
2096 new ^= PT64_NX_MASK;
2097 return (old & ~new & PT64_PERM_MASK) != 0;
2100 static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, u64 old, u64 new)
2102 if (need_remote_flush(old, new))
2103 kvm_flush_remote_tlbs(vcpu->kvm);
2105 kvm_mmu_flush_tlb(vcpu);
2108 static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
2110 u64 *spte = vcpu->arch.last_pte_updated;
2112 return !!(spte && (*spte & shadow_accessed_mask));
2115 static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
2116 const u8 *new, int bytes)
2123 vcpu->arch.update_pte.largepage = 0;
2125 if (bytes != 4 && bytes != 8)
2129 * Assume that the pte write on a page table of the same type
2130 * as the current vcpu paging mode. This is nearly always true
2131 * (might be false while changing modes). Note it is verified later
2135 /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
2136 if ((bytes == 4) && (gpa % 4 == 0)) {
2137 r = kvm_read_guest(vcpu->kvm, gpa & ~(u64)7, &gpte, 8);
2140 memcpy((void *)&gpte + (gpa % 8), new, 4);
2141 } else if ((bytes == 8) && (gpa % 8 == 0)) {
2142 memcpy((void *)&gpte, new, 8);
2145 if ((bytes == 4) && (gpa % 4 == 0))
2146 memcpy((void *)&gpte, new, 4);
2148 if (!is_present_pte(gpte))
2150 gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
2152 if (is_large_pte(gpte) && is_largepage_backed(vcpu, gfn)) {
2153 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
2154 vcpu->arch.update_pte.largepage = 1;
2156 vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq;
2158 pfn = gfn_to_pfn(vcpu->kvm, gfn);
2160 if (is_error_pfn(pfn)) {
2161 kvm_release_pfn_clean(pfn);
2164 vcpu->arch.update_pte.gfn = gfn;
2165 vcpu->arch.update_pte.pfn = pfn;
2168 static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn)
2170 u64 *spte = vcpu->arch.last_pte_updated;
2173 && vcpu->arch.last_pte_gfn == gfn
2174 && shadow_accessed_mask
2175 && !(*spte & shadow_accessed_mask)
2176 && is_shadow_present_pte(*spte))
2177 set_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
2180 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
2181 const u8 *new, int bytes)
2183 gfn_t gfn = gpa >> PAGE_SHIFT;
2184 struct kvm_mmu_page *sp;
2185 struct hlist_node *node, *n;
2186 struct hlist_head *bucket;
2190 unsigned offset = offset_in_page(gpa);
2192 unsigned page_offset;
2193 unsigned misaligned;
2200 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
2201 mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes);
2202 spin_lock(&vcpu->kvm->mmu_lock);
2203 kvm_mmu_access_page(vcpu, gfn);
2204 kvm_mmu_free_some_pages(vcpu);
2205 ++vcpu->kvm->stat.mmu_pte_write;
2206 kvm_mmu_audit(vcpu, "pre pte write");
2207 if (gfn == vcpu->arch.last_pt_write_gfn
2208 && !last_updated_pte_accessed(vcpu)) {
2209 ++vcpu->arch.last_pt_write_count;
2210 if (vcpu->arch.last_pt_write_count >= 3)
2213 vcpu->arch.last_pt_write_gfn = gfn;
2214 vcpu->arch.last_pt_write_count = 1;
2215 vcpu->arch.last_pte_updated = NULL;
2217 index = kvm_page_table_hashfn(gfn);
2218 bucket = &vcpu->kvm->arch.mmu_page_hash[index];
2219 hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
2220 if (sp->gfn != gfn || sp->role.metaphysical || sp->role.invalid)
2222 pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
2223 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
2224 misaligned |= bytes < 4;
2225 if (misaligned || flooded) {
2227 * Misaligned accesses are too much trouble to fix
2228 * up; also, they usually indicate a page is not used
2231 * If we're seeing too many writes to a page,
2232 * it may no longer be a page table, or we may be
2233 * forking, in which case it is better to unmap the
2236 pgprintk("misaligned: gpa %llx bytes %d role %x\n",
2237 gpa, bytes, sp->role.word);
2238 if (kvm_mmu_zap_page(vcpu->kvm, sp))
2240 ++vcpu->kvm->stat.mmu_flooded;
2243 page_offset = offset;
2244 level = sp->role.level;
2246 if (sp->role.glevels == PT32_ROOT_LEVEL) {
2247 page_offset <<= 1; /* 32->64 */
2249 * A 32-bit pde maps 4MB while the shadow pdes map
2250 * only 2MB. So we need to double the offset again
2251 * and zap two pdes instead of one.
2253 if (level == PT32_ROOT_LEVEL) {
2254 page_offset &= ~7; /* kill rounding error */
2258 quadrant = page_offset >> PAGE_SHIFT;
2259 page_offset &= ~PAGE_MASK;
2260 if (quadrant != sp->role.quadrant)
2263 spte = &sp->spt[page_offset / sizeof(*spte)];
2264 if ((gpa & (pte_size - 1)) || (bytes < pte_size)) {
2266 r = kvm_read_guest_atomic(vcpu->kvm,
2267 gpa & ~(u64)(pte_size - 1),
2269 new = (const void *)&gentry;
2275 mmu_pte_write_zap_pte(vcpu, sp, spte);
2277 mmu_pte_write_new_pte(vcpu, sp, spte, new);
2278 mmu_pte_write_flush_tlb(vcpu, entry, *spte);
2282 kvm_mmu_audit(vcpu, "post pte write");
2283 spin_unlock(&vcpu->kvm->mmu_lock);
2284 if (!is_error_pfn(vcpu->arch.update_pte.pfn)) {
2285 kvm_release_pfn_clean(vcpu->arch.update_pte.pfn);
2286 vcpu->arch.update_pte.pfn = bad_pfn;
2290 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
2295 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
2297 spin_lock(&vcpu->kvm->mmu_lock);
2298 r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
2299 spin_unlock(&vcpu->kvm->mmu_lock);
2302 EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
2304 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
2306 while (vcpu->kvm->arch.n_free_mmu_pages < KVM_REFILL_PAGES) {
2307 struct kvm_mmu_page *sp;
2309 sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
2310 struct kvm_mmu_page, link);
2311 kvm_mmu_zap_page(vcpu->kvm, sp);
2312 ++vcpu->kvm->stat.mmu_recycled;
2316 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
2319 enum emulation_result er;
2321 r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code);
2330 r = mmu_topup_memory_caches(vcpu);
2334 er = emulate_instruction(vcpu, vcpu->run, cr2, error_code, 0);
2339 case EMULATE_DO_MMIO:
2340 ++vcpu->stat.mmio_exits;
2343 kvm_report_emulation_failure(vcpu, "pagetable");
2351 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
2353 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
2355 spin_lock(&vcpu->kvm->mmu_lock);
2356 vcpu->arch.mmu.invlpg(vcpu, gva);
2357 spin_unlock(&vcpu->kvm->mmu_lock);
2358 kvm_mmu_flush_tlb(vcpu);
2359 ++vcpu->stat.invlpg;
2361 EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
2363 void kvm_enable_tdp(void)
2367 EXPORT_SYMBOL_GPL(kvm_enable_tdp);
2369 void kvm_disable_tdp(void)
2371 tdp_enabled = false;
2373 EXPORT_SYMBOL_GPL(kvm_disable_tdp);
2375 static void free_mmu_pages(struct kvm_vcpu *vcpu)
2377 struct kvm_mmu_page *sp;
2379 while (!list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
2380 sp = container_of(vcpu->kvm->arch.active_mmu_pages.next,
2381 struct kvm_mmu_page, link);
2382 kvm_mmu_zap_page(vcpu->kvm, sp);
2385 free_page((unsigned long)vcpu->arch.mmu.pae_root);
2388 static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
2395 if (vcpu->kvm->arch.n_requested_mmu_pages)
2396 vcpu->kvm->arch.n_free_mmu_pages =
2397 vcpu->kvm->arch.n_requested_mmu_pages;
2399 vcpu->kvm->arch.n_free_mmu_pages =
2400 vcpu->kvm->arch.n_alloc_mmu_pages;
2402 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
2403 * Therefore we need to allocate shadow page tables in the first
2404 * 4GB of memory, which happens to fit the DMA32 zone.
2406 page = alloc_page(GFP_KERNEL | __GFP_DMA32);
2409 vcpu->arch.mmu.pae_root = page_address(page);
2410 for (i = 0; i < 4; ++i)
2411 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
2416 free_mmu_pages(vcpu);
2420 int kvm_mmu_create(struct kvm_vcpu *vcpu)
2423 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
2425 return alloc_mmu_pages(vcpu);
2428 int kvm_mmu_setup(struct kvm_vcpu *vcpu)
2431 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
2433 return init_kvm_mmu(vcpu);
2436 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
2440 destroy_kvm_mmu(vcpu);
2441 free_mmu_pages(vcpu);
2442 mmu_free_memory_caches(vcpu);
2445 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
2447 struct kvm_mmu_page *sp;
2449 spin_lock(&kvm->mmu_lock);
2450 list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
2454 if (!test_bit(slot, &sp->slot_bitmap))
2458 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
2460 if (pt[i] & PT_WRITABLE_MASK)
2461 pt[i] &= ~PT_WRITABLE_MASK;
2463 kvm_flush_remote_tlbs(kvm);
2464 spin_unlock(&kvm->mmu_lock);
2467 void kvm_mmu_zap_all(struct kvm *kvm)
2469 struct kvm_mmu_page *sp, *node;
2471 spin_lock(&kvm->mmu_lock);
2472 list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
2473 if (kvm_mmu_zap_page(kvm, sp))
2474 node = container_of(kvm->arch.active_mmu_pages.next,
2475 struct kvm_mmu_page, link);
2476 spin_unlock(&kvm->mmu_lock);
2478 kvm_flush_remote_tlbs(kvm);
2481 static void kvm_mmu_remove_one_alloc_mmu_page(struct kvm *kvm)
2483 struct kvm_mmu_page *page;
2485 page = container_of(kvm->arch.active_mmu_pages.prev,
2486 struct kvm_mmu_page, link);
2487 kvm_mmu_zap_page(kvm, page);
2490 static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
2493 struct kvm *kvm_freed = NULL;
2494 int cache_count = 0;
2496 spin_lock(&kvm_lock);
2498 list_for_each_entry(kvm, &vm_list, vm_list) {
2501 if (!down_read_trylock(&kvm->slots_lock))
2503 spin_lock(&kvm->mmu_lock);
2504 npages = kvm->arch.n_alloc_mmu_pages -
2505 kvm->arch.n_free_mmu_pages;
2506 cache_count += npages;
2507 if (!kvm_freed && nr_to_scan > 0 && npages > 0) {
2508 kvm_mmu_remove_one_alloc_mmu_page(kvm);
2514 spin_unlock(&kvm->mmu_lock);
2515 up_read(&kvm->slots_lock);
2518 list_move_tail(&kvm_freed->vm_list, &vm_list);
2520 spin_unlock(&kvm_lock);
2525 static struct shrinker mmu_shrinker = {
2526 .shrink = mmu_shrink,
2527 .seeks = DEFAULT_SEEKS * 10,
2530 static void mmu_destroy_caches(void)
2532 if (pte_chain_cache)
2533 kmem_cache_destroy(pte_chain_cache);
2534 if (rmap_desc_cache)
2535 kmem_cache_destroy(rmap_desc_cache);
2536 if (mmu_page_header_cache)
2537 kmem_cache_destroy(mmu_page_header_cache);
2540 void kvm_mmu_module_exit(void)
2542 mmu_destroy_caches();
2543 unregister_shrinker(&mmu_shrinker);
2546 int kvm_mmu_module_init(void)
2548 pte_chain_cache = kmem_cache_create("kvm_pte_chain",
2549 sizeof(struct kvm_pte_chain),
2551 if (!pte_chain_cache)
2553 rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
2554 sizeof(struct kvm_rmap_desc),
2556 if (!rmap_desc_cache)
2559 mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
2560 sizeof(struct kvm_mmu_page),
2562 if (!mmu_page_header_cache)
2565 register_shrinker(&mmu_shrinker);
2570 mmu_destroy_caches();
2575 * Caculate mmu pages needed for kvm.
2577 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
2580 unsigned int nr_mmu_pages;
2581 unsigned int nr_pages = 0;
2583 for (i = 0; i < kvm->nmemslots; i++)
2584 nr_pages += kvm->memslots[i].npages;
2586 nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
2587 nr_mmu_pages = max(nr_mmu_pages,
2588 (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
2590 return nr_mmu_pages;
2593 static void *pv_mmu_peek_buffer(struct kvm_pv_mmu_op_buffer *buffer,
2596 if (len > buffer->len)
2601 static void *pv_mmu_read_buffer(struct kvm_pv_mmu_op_buffer *buffer,
2606 ret = pv_mmu_peek_buffer(buffer, len);
2611 buffer->processed += len;
2615 static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
2616 gpa_t addr, gpa_t value)
2621 if (!is_long_mode(vcpu) && !is_pae(vcpu))
2624 r = mmu_topup_memory_caches(vcpu);
2628 if (!emulator_write_phys(vcpu, addr, &value, bytes))
2634 static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
2636 kvm_x86_ops->tlb_flush(vcpu);
2637 set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests);
2641 static int kvm_pv_mmu_release_pt(struct kvm_vcpu *vcpu, gpa_t addr)
2643 spin_lock(&vcpu->kvm->mmu_lock);
2644 mmu_unshadow(vcpu->kvm, addr >> PAGE_SHIFT);
2645 spin_unlock(&vcpu->kvm->mmu_lock);
2649 static int kvm_pv_mmu_op_one(struct kvm_vcpu *vcpu,
2650 struct kvm_pv_mmu_op_buffer *buffer)
2652 struct kvm_mmu_op_header *header;
2654 header = pv_mmu_peek_buffer(buffer, sizeof *header);
2657 switch (header->op) {
2658 case KVM_MMU_OP_WRITE_PTE: {
2659 struct kvm_mmu_op_write_pte *wpte;
2661 wpte = pv_mmu_read_buffer(buffer, sizeof *wpte);
2664 return kvm_pv_mmu_write(vcpu, wpte->pte_phys,
2667 case KVM_MMU_OP_FLUSH_TLB: {
2668 struct kvm_mmu_op_flush_tlb *ftlb;
2670 ftlb = pv_mmu_read_buffer(buffer, sizeof *ftlb);
2673 return kvm_pv_mmu_flush_tlb(vcpu);
2675 case KVM_MMU_OP_RELEASE_PT: {
2676 struct kvm_mmu_op_release_pt *rpt;
2678 rpt = pv_mmu_read_buffer(buffer, sizeof *rpt);
2681 return kvm_pv_mmu_release_pt(vcpu, rpt->pt_phys);
2687 int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
2688 gpa_t addr, unsigned long *ret)
2691 struct kvm_pv_mmu_op_buffer *buffer = &vcpu->arch.mmu_op_buffer;
2693 buffer->ptr = buffer->buf;
2694 buffer->len = min_t(unsigned long, bytes, sizeof buffer->buf);
2695 buffer->processed = 0;
2697 r = kvm_read_guest(vcpu->kvm, addr, buffer->buf, buffer->len);
2701 while (buffer->len) {
2702 r = kvm_pv_mmu_op_one(vcpu, buffer);
2711 *ret = buffer->processed;
2717 static const char *audit_msg;
2719 static gva_t canonicalize(gva_t gva)
2721 #ifdef CONFIG_X86_64
2722 gva = (long long)(gva << 16) >> 16;
2727 static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
2728 gva_t va, int level)
2730 u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
2732 gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
2734 for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
2737 if (ent == shadow_trap_nonpresent_pte)
2740 va = canonicalize(va);
2742 if (ent == shadow_notrap_nonpresent_pte)
2743 printk(KERN_ERR "audit: (%s) nontrapping pte"
2744 " in nonleaf level: levels %d gva %lx"
2745 " level %d pte %llx\n", audit_msg,
2746 vcpu->arch.mmu.root_level, va, level, ent);
2748 audit_mappings_page(vcpu, ent, va, level - 1);
2750 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va);
2751 hpa_t hpa = (hpa_t)gpa_to_pfn(vcpu, gpa) << PAGE_SHIFT;
2753 if (is_shadow_present_pte(ent)
2754 && (ent & PT64_BASE_ADDR_MASK) != hpa)
2755 printk(KERN_ERR "xx audit error: (%s) levels %d"
2756 " gva %lx gpa %llx hpa %llx ent %llx %d\n",
2757 audit_msg, vcpu->arch.mmu.root_level,
2759 is_shadow_present_pte(ent));
2760 else if (ent == shadow_notrap_nonpresent_pte
2761 && !is_error_hpa(hpa))
2762 printk(KERN_ERR "audit: (%s) notrap shadow,"
2763 " valid guest gva %lx\n", audit_msg, va);
2764 kvm_release_pfn_clean(pfn);
2770 static void audit_mappings(struct kvm_vcpu *vcpu)
2774 if (vcpu->arch.mmu.root_level == 4)
2775 audit_mappings_page(vcpu, vcpu->arch.mmu.root_hpa, 0, 4);
2777 for (i = 0; i < 4; ++i)
2778 if (vcpu->arch.mmu.pae_root[i] & PT_PRESENT_MASK)
2779 audit_mappings_page(vcpu,
2780 vcpu->arch.mmu.pae_root[i],
2785 static int count_rmaps(struct kvm_vcpu *vcpu)
2790 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
2791 struct kvm_memory_slot *m = &vcpu->kvm->memslots[i];
2792 struct kvm_rmap_desc *d;
2794 for (j = 0; j < m->npages; ++j) {
2795 unsigned long *rmapp = &m->rmap[j];
2799 if (!(*rmapp & 1)) {
2803 d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
2805 for (k = 0; k < RMAP_EXT; ++k)
2806 if (d->shadow_ptes[k])
2817 static int count_writable_mappings(struct kvm_vcpu *vcpu)
2820 struct kvm_mmu_page *sp;
2823 list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
2826 if (sp->role.level != PT_PAGE_TABLE_LEVEL)
2829 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
2832 if (!(ent & PT_PRESENT_MASK))
2834 if (!(ent & PT_WRITABLE_MASK))
2842 static void audit_rmap(struct kvm_vcpu *vcpu)
2844 int n_rmap = count_rmaps(vcpu);
2845 int n_actual = count_writable_mappings(vcpu);
2847 if (n_rmap != n_actual)
2848 printk(KERN_ERR "%s: (%s) rmap %d actual %d\n",
2849 __func__, audit_msg, n_rmap, n_actual);
2852 static void audit_write_protection(struct kvm_vcpu *vcpu)
2854 struct kvm_mmu_page *sp;
2855 struct kvm_memory_slot *slot;
2856 unsigned long *rmapp;
2859 list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
2860 if (sp->role.metaphysical)
2863 slot = gfn_to_memslot(vcpu->kvm, sp->gfn);
2864 gfn = unalias_gfn(vcpu->kvm, sp->gfn);
2865 rmapp = &slot->rmap[gfn - slot->base_gfn];
2867 printk(KERN_ERR "%s: (%s) shadow page has writable"
2868 " mappings: gfn %lx role %x\n",
2869 __func__, audit_msg, sp->gfn,
2874 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
2881 audit_write_protection(vcpu);
2882 audit_mappings(vcpu);