2 * vtlb.c: guest virtual tlb handling module.
3 * Copyright (c) 2004, Intel Corporation.
4 * Yaozu Dong (Eddie Dong) <Eddie.dong@intel.com>
5 * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
7 * Copyright (c) 2007, Intel Corporation.
8 * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
9 * Xiantao Zhang <xiantao.zhang@intel.com>
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms and conditions of the GNU General Public License,
13 * version 2, as published by the Free Software Foundation.
15 * This program is distributed in the hope it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20 * You should have received a copy of the GNU General Public License along with
21 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
22 * Place - Suite 330, Boston, MA 02111-1307 USA.
28 #include <linux/rwsem.h>
33 * Check to see if the address rid:va is translated by the TLB
36 static int __is_tr_translated(struct thash_data *trp, u64 rid, u64 va)
38 return ((trp->p) && (trp->rid == rid)
39 && ((va-trp->vadr) < PSIZE(trp->ps)));
43 * Only for GUEST TR format.
45 static int __is_tr_overlap(struct thash_data *trp, u64 rid, u64 sva, u64 eva)
49 if (!trp->p || trp->rid != rid)
53 ea1 = sa1 + PSIZE(trp->ps) - 1;
55 if ((sva > ea1) || (sa1 > eva))
62 void machine_tlb_purge(u64 va, u64 ps)
64 ia64_ptcl(va, ps << 2);
67 void local_flush_tlb_all(void)
70 unsigned long flags, count0, count1;
71 unsigned long stride0, stride1, addr;
73 addr = current_vcpu->arch.ptce_base;
74 count0 = current_vcpu->arch.ptce_count[0];
75 count1 = current_vcpu->arch.ptce_count[1];
76 stride0 = current_vcpu->arch.ptce_stride[0];
77 stride1 = current_vcpu->arch.ptce_stride[1];
79 local_irq_save(flags);
80 for (i = 0; i < count0; ++i) {
81 for (j = 0; j < count1; ++j) {
87 local_irq_restore(flags);
88 ia64_srlz_i(); /* srlz.i implies srlz.d */
91 int vhpt_enabled(struct kvm_vcpu *vcpu, u64 vadr, enum vhpt_ref ref)
97 vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
98 vrr.val = vcpu_get_rr(vcpu, vadr);
99 vpta.val = vcpu_get_pta(vcpu);
101 if (vrr.ve & vpta.ve) {
107 return vpsr.dt && vpsr.it && vpsr.ic;
109 return vpsr.dt && vpsr.rt;
116 struct thash_data *vsa_thash(union ia64_pta vpta, u64 va, u64 vrr, u64 *tag)
118 u64 index, pfn, rid, pfn_bits;
120 pfn_bits = vpta.size - 5 - 8;
121 pfn = REGION_OFFSET(va) >> _REGION_PAGE_SIZE(vrr);
122 rid = _REGION_ID(vrr);
123 index = ((rid & 0xff) << pfn_bits)|(pfn & ((1UL << pfn_bits) - 1));
124 *tag = ((rid >> 8) & 0xffff) | ((pfn >> pfn_bits) << 16);
126 return (struct thash_data *)((vpta.base << PTA_BASE_SHIFT) +
130 struct thash_data *__vtr_lookup(struct kvm_vcpu *vcpu, u64 va, int type)
133 struct thash_data *trp;
137 rid = vcpu_get_rr(vcpu, va);
138 rid = rid & RR_RID_MASK;;
140 if (vcpu_quick_region_check(vcpu->arch.dtr_regions, va)) {
141 for (trp = (struct thash_data *)&vcpu->arch.dtrs, i = 0;
142 i < NDTRS; i++, trp++) {
143 if (__is_tr_translated(trp, rid, va))
148 if (vcpu_quick_region_check(vcpu->arch.itr_regions, va)) {
149 for (trp = (struct thash_data *)&vcpu->arch.itrs, i = 0;
150 i < NITRS; i++, trp++) {
151 if (__is_tr_translated(trp, rid, va))
160 static void vhpt_insert(u64 pte, u64 itir, u64 ifa, u64 gpte)
163 struct thash_data *head;
164 unsigned long ps, gpaddr;
168 gpaddr = ((gpte & _PAGE_PPN_MASK) >> ps << ps) |
169 (ifa & ((1UL << ps) - 1));
171 rr.val = ia64_get_rr(ifa);
172 head = (struct thash_data *)ia64_thash(ifa);
173 head->etag = INVALID_TI_TAG;
175 head->page_flags = pte & ~PAGE_FLAGS_RV_MASK;
176 head->itir = rr.ps << 2;
177 head->etag = ia64_ttag(ifa);
178 head->gpaddr = gpaddr;
181 void mark_pages_dirty(struct kvm_vcpu *v, u64 pte, u64 ps)
183 u64 i, dirty_pages = 1;
184 u64 base_gfn = (pte&_PAGE_PPN_MASK) >> PAGE_SHIFT;
185 spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa);
186 void *dirty_bitmap = (void *)KVM_MEM_DIRTY_LOG_BASE;
188 dirty_pages <<= ps <= PAGE_SHIFT ? 0 : ps - PAGE_SHIFT;
191 for (i = 0; i < dirty_pages; i++) {
193 if (!test_bit(base_gfn + i, dirty_bitmap))
194 set_bit(base_gfn + i , dirty_bitmap);
196 vmm_spin_unlock(lock);
199 void thash_vhpt_insert(struct kvm_vcpu *v, u64 pte, u64 itir, u64 va, int type)
204 mrr.val = ia64_get_rr(va);
205 phy_pte = translate_phy_pte(&pte, itir, va);
207 if (itir_ps(itir) >= mrr.ps) {
208 vhpt_insert(phy_pte, itir, va, pte);
210 phy_pte &= ~PAGE_FLAGS_RV_MASK;
211 psr = ia64_clear_ic();
212 ia64_itc(type, va, phy_pte, itir_ps(itir));
216 if (!(pte&VTLB_PTE_IO))
217 mark_pages_dirty(v, pte, itir_ps(itir));
223 struct thash_data *vhpt_lookup(u64 va)
225 struct thash_data *head;
228 head = (struct thash_data *)ia64_thash(va);
230 if (head->etag == tag)
235 u64 guest_vhpt_lookup(u64 iha, u64 *pte)
238 struct thash_data *data;
240 data = __vtr_lookup(current_vcpu, iha, D_TLB);
242 thash_vhpt_insert(current_vcpu, data->page_flags,
243 data->itir, iha, D_TLB);
245 asm volatile ("rsm psr.ic|psr.i;;"
251 "(p7) extr.u r9=r9,0,53;;"
256 /* "ssm psr.i;;" Once interrupts in vmm open, need fix*/
257 : "=r"(ret) : "r"(iha), "r"(pte):"memory");
263 * purge software guest tlb
266 static void vtlb_purge(struct kvm_vcpu *v, u64 va, u64 ps)
268 struct thash_data *cur;
269 u64 start, curadr, size, psbits, tag, rr_ps, num;
271 struct thash_cb *hcb = &v->arch.vtlb;
273 vrr.val = vcpu_get_rr(v, va);
274 psbits = VMX(v, psbits[(va >> 61)]);
275 start = va & ~((1UL << ps) - 1);
278 rr_ps = __ffs(psbits);
279 psbits &= ~(1UL << rr_ps);
280 num = 1UL << ((ps < rr_ps) ? 0 : (ps - rr_ps));
284 cur = vsa_thash(hcb->pta, curadr, vrr.val, &tag);
285 if (cur->etag == tag && cur->ps == rr_ps)
286 cur->etag = INVALID_TI_TAG;
295 * purge VHPT and machine TLB
297 static void vhpt_purge(struct kvm_vcpu *v, u64 va, u64 ps)
299 struct thash_data *cur;
300 u64 start, size, tag, num;
303 start = va & ~((1UL << ps) - 1);
304 rr.val = ia64_get_rr(va);
306 num = 1UL << ((ps < rr.ps) ? 0 : (ps - rr.ps));
308 cur = (struct thash_data *)ia64_thash(start);
309 tag = ia64_ttag(start);
310 if (cur->etag == tag)
311 cur->etag = INVALID_TI_TAG;
315 machine_tlb_purge(va, ps);
319 * Insert an entry into hash TLB or VHPT.
321 * 1: When inserting VHPT to thash, "va" is a must covered
322 * address by the inserted machine VHPT entry.
323 * 2: The format of entry is always in TLB.
324 * 3: The caller need to make sure the new entry will not overlap
325 * with any existed entry.
327 void vtlb_insert(struct kvm_vcpu *v, u64 pte, u64 itir, u64 va)
329 struct thash_data *head;
332 struct thash_cb *hcb = &v->arch.vtlb;
334 vrr.val = vcpu_get_rr(v, va);
335 vrr.ps = itir_ps(itir);
336 VMX(v, psbits[va >> 61]) |= (1UL << vrr.ps);
337 head = vsa_thash(hcb->pta, va, vrr.val, &tag);
338 head->page_flags = pte;
343 int vtr_find_overlap(struct kvm_vcpu *vcpu, u64 va, u64 ps, int type)
345 struct thash_data *trp;
349 rid = vcpu_get_rr(vcpu, va);
350 rid = rid & RR_RID_MASK;
351 end = va + PSIZE(ps);
353 if (vcpu_quick_region_check(vcpu->arch.dtr_regions, va)) {
354 for (trp = (struct thash_data *)&vcpu->arch.dtrs, i = 0;
355 i < NDTRS; i++, trp++) {
356 if (__is_tr_overlap(trp, rid, va, end))
361 if (vcpu_quick_region_check(vcpu->arch.itr_regions, va)) {
362 for (trp = (struct thash_data *)&vcpu->arch.itrs, i = 0;
363 i < NITRS; i++, trp++) {
364 if (__is_tr_overlap(trp, rid, va, end))
373 * Purge entries in VTLB and VHPT
375 void thash_purge_entries(struct kvm_vcpu *v, u64 va, u64 ps)
377 if (vcpu_quick_region_check(v->arch.tc_regions, va))
378 vtlb_purge(v, va, ps);
379 vhpt_purge(v, va, ps);
382 void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps)
385 va = REGION_OFFSET(va);
386 if (vcpu_quick_region_check(v->arch.tc_regions, old_va))
387 vtlb_purge(v, va, ps);
388 vhpt_purge(v, va, ps);
391 u64 translate_phy_pte(u64 *pte, u64 itir, u64 va)
393 u64 ps, ps_mask, paddr, maddr, io_mask;
394 union pte_flags phy_pte;
397 ps_mask = ~((1UL << ps) - 1);
400 paddr = ((paddr & _PAGE_PPN_MASK) & ps_mask) | (va & ~ps_mask);
401 maddr = kvm_get_mpt_entry(paddr >> PAGE_SHIFT);
402 io_mask = maddr & GPFN_IO_MASK;
403 if (io_mask && (io_mask != GPFN_PHYS_MMIO)) {
407 maddr = ((maddr & _PAGE_PPN_MASK) & PAGE_MASK) |
408 (paddr & ~PAGE_MASK);
409 phy_pte.ppn = maddr >> ARCH_PAGE_SHIFT;
414 * Purge overlap TCs and then insert the new entry to emulate itc ops.
415 * Notes: Only TC entry can purge and insert.
416 * 1 indicates this is MMIO
418 int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir,
422 u64 phy_pte, io_mask, index;
423 union ia64_rr vrr, mrr;
427 vrr.val = vcpu_get_rr(v, ifa);
428 mrr.val = ia64_get_rr(ifa);
430 index = (pte & _PAGE_PPN_MASK) >> PAGE_SHIFT;
431 io_mask = kvm_get_mpt_entry(index) & GPFN_IO_MASK;
432 phy_pte = translate_phy_pte(&pte, itir, ifa);
434 /* Ensure WB attribute if pte is related to a normal mem page,
435 * which is required by vga acceleration since qemu maps shared
436 * vram buffer with WB.
438 if (!(pte & VTLB_PTE_IO) && ((pte & _PAGE_MA_MASK) != _PAGE_MA_NAT) &&
439 io_mask != GPFN_PHYS_MMIO) {
440 pte &= ~_PAGE_MA_MASK;
441 phy_pte &= ~_PAGE_MA_MASK;
444 if (pte & VTLB_PTE_IO)
447 vtlb_purge(v, ifa, ps);
448 vhpt_purge(v, ifa, ps);
451 if (!(pte&VTLB_PTE_IO)) {
452 vhpt_insert(phy_pte, itir, ifa, pte);
454 vtlb_insert(v, pte, itir, ifa);
455 vcpu_quick_region_set(VMX(v, tc_regions), ifa);
457 } else if (ps > mrr.ps) {
458 vtlb_insert(v, pte, itir, ifa);
459 vcpu_quick_region_set(VMX(v, tc_regions), ifa);
460 if (!(pte&VTLB_PTE_IO))
461 vhpt_insert(phy_pte, itir, ifa, pte);
464 phy_pte &= ~PAGE_FLAGS_RV_MASK;
465 psr = ia64_clear_ic();
466 ia64_itc(type, ifa, phy_pte, ps);
469 if (!(pte&VTLB_PTE_IO))
470 mark_pages_dirty(v, pte, ps);
476 * Purge all TCs or VHPT entries including those in Hash table.
480 void thash_purge_all(struct kvm_vcpu *v)
483 struct thash_data *head;
484 struct thash_cb *vtlb, *vhpt;
485 vtlb = &v->arch.vtlb;
486 vhpt = &v->arch.vhpt;
488 for (i = 0; i < 8; i++)
489 VMX(v, psbits[i]) = 0;
492 for (i = 0; i < vtlb->num; i++) {
493 head->page_flags = 0;
494 head->etag = INVALID_TI_TAG;
501 for (i = 0; i < vhpt->num; i++) {
502 head->page_flags = 0;
503 head->etag = INVALID_TI_TAG;
509 local_flush_tlb_all();
514 * Lookup the hash table and its collision chain to find an entry
515 * covering this address rid:va or the entry.
518 * in: TLB format for both VHPT & TLB.
521 struct thash_data *vtlb_lookup(struct kvm_vcpu *v, u64 va, int is_data)
523 struct thash_data *cch;
527 struct thash_cb *hcb = &v->arch.vtlb;
529 cch = __vtr_lookup(v, va, is_data);;
533 if (vcpu_quick_region_check(v->arch.tc_regions, va) == 0)
536 psbits = VMX(v, psbits[(va >> 61)]);
537 vrr.val = vcpu_get_rr(v, va);
540 psbits &= ~(1UL << ps);
542 cch = vsa_thash(hcb->pta, va, vrr.val, &tag);
543 if (cch->etag == tag && cch->ps == ps)
552 * Initialize internal control data before service.
554 void thash_init(struct thash_cb *hcb, u64 sz)
557 struct thash_data *head;
559 hcb->pta.val = (unsigned long)hcb->hash;
564 for (i = 0; i < hcb->num; i++) {
565 head->page_flags = 0;
567 head->etag = INVALID_TI_TAG;
573 u64 kvm_get_mpt_entry(u64 gpfn)
575 u64 *base = (u64 *) KVM_P2M_BASE;
576 return *(base + gpfn);
579 u64 kvm_lookup_mpa(u64 gpfn)
582 maddr = kvm_get_mpt_entry(gpfn);
583 return maddr&_PAGE_PPN_MASK;
586 u64 kvm_gpa_to_mpa(u64 gpa)
588 u64 pte = kvm_lookup_mpa(gpa >> PAGE_SHIFT);
589 return (pte >> PAGE_SHIFT << PAGE_SHIFT) | (gpa & ~PAGE_MASK);
594 * Fetch guest bundle code.
597 * pbundle: used to return fetched bundle.
599 int fetch_code(struct kvm_vcpu *vcpu, u64 gip, IA64_BUNDLE *pbundle)
601 u64 gpip = 0; /* guest physical IP*/
603 struct thash_data *tlb;
606 if (!(VCPU(vcpu, vpsr) & IA64_PSR_IT)) {
607 /* I-side physical mode */
610 tlb = vtlb_lookup(vcpu, gip, I_TLB);
612 gpip = (tlb->ppn >> (tlb->ps - 12) << tlb->ps) |
613 (gip & (PSIZE(tlb->ps) - 1));
616 maddr = kvm_gpa_to_mpa(gpip);
618 tlb = vhpt_lookup(gip);
620 ia64_ptcl(gip, ARCH_PAGE_SHIFT << 2);
623 maddr = (tlb->ppn >> (tlb->ps - 12) << tlb->ps)
624 | (gip & (PSIZE(tlb->ps) - 1));
626 vpa = (u64 *)__kvm_va(maddr);
628 pbundle->i64[0] = *vpa++;
629 pbundle->i64[1] = *vpa;
631 return IA64_NO_FAULT;
635 void kvm_init_vhpt(struct kvm_vcpu *v)
637 v->arch.vhpt.num = VHPT_NUM_ENTRIES;
638 thash_init(&v->arch.vhpt, VHPT_SHIFT);
639 ia64_set_pta(v->arch.vhpt.pta.val);
640 /*Enable VHPT here?*/
643 void kvm_init_vtlb(struct kvm_vcpu *v)
645 v->arch.vtlb.num = VTLB_NUM_ENTRIES;
646 thash_init(&v->arch.vtlb, VTLB_SHIFT);