2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright IBM Corp. 2007
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
20 #include <linux/types.h>
21 #include <linux/string.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/highmem.h>
25 #include <asm/mmu-44x.h>
26 #include <asm/kvm_ppc.h>
27 #include <asm/kvm_44x.h>
31 #ifndef PPC44x_TLBE_SIZE
32 #define PPC44x_TLBE_SIZE PPC44x_TLB_4K
35 #define PAGE_SIZE_4K (1<<12)
36 #define PAGE_MASK_4K (~(PAGE_SIZE_4K - 1))
38 #define PPC44x_TLB_UATTR_MASK \
39 (PPC44x_TLB_U0|PPC44x_TLB_U1|PPC44x_TLB_U2|PPC44x_TLB_U3)
40 #define PPC44x_TLB_USER_PERM_MASK (PPC44x_TLB_UX|PPC44x_TLB_UR|PPC44x_TLB_UW)
41 #define PPC44x_TLB_SUPER_PERM_MASK (PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW)
43 static unsigned int kvmppc_tlb_44x_pos;
46 void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
48 struct kvmppc_44x_tlbe *tlbe;
51 printk("vcpu %d TLB dump:\n", vcpu->vcpu_id);
52 printk("| %2s | %3s | %8s | %8s | %8s |\n",
53 "nr", "tid", "word0", "word1", "word2");
55 for (i = 0; i < PPC44x_TLB_SIZE; i++) {
56 tlbe = &vcpu_44x->guest_tlb[i];
57 if (tlbe->word0 & PPC44x_TLB_VALID)
58 printk(" G%2d | %02X | %08X | %08X | %08X |\n",
59 i, tlbe->tid, tlbe->word0, tlbe->word1,
63 for (i = 0; i < PPC44x_TLB_SIZE; i++) {
64 tlbe = &vcpu_44x->shadow_tlb[i];
65 if (tlbe->word0 & PPC44x_TLB_VALID)
66 printk(" S%2d | %02X | %08X | %08X | %08X |\n",
67 i, tlbe->tid, tlbe->word0, tlbe->word1,
73 static u32 kvmppc_44x_tlb_shadow_attrib(u32 attrib, int usermode)
75 /* We only care about the guest's permission and user bits. */
76 attrib &= PPC44x_TLB_PERM_MASK|PPC44x_TLB_UATTR_MASK;
79 /* Guest is in supervisor mode, so we need to translate guest
80 * supervisor permissions into user permissions. */
81 attrib &= ~PPC44x_TLB_USER_PERM_MASK;
82 attrib |= (attrib & PPC44x_TLB_SUPER_PERM_MASK) << 3;
85 /* Make sure host can always access this memory. */
86 attrib |= PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW;
89 attrib |= PPC44x_TLB_M;
94 /* Search the guest TLB for a matching entry. */
95 int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid,
98 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
101 /* XXX Replace loop with fancy data structures. */
102 for (i = 0; i < PPC44x_TLB_SIZE; i++) {
103 struct kvmppc_44x_tlbe *tlbe = &vcpu_44x->guest_tlb[i];
106 if (eaddr < get_tlb_eaddr(tlbe))
109 if (eaddr > get_tlb_end(tlbe))
112 tid = get_tlb_tid(tlbe);
113 if (tid && (tid != pid))
116 if (!get_tlb_v(tlbe))
119 if (get_tlb_ts(tlbe) != as)
128 struct kvmppc_44x_tlbe *kvmppc_44x_itlb_search(struct kvm_vcpu *vcpu,
131 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
132 unsigned int as = !!(vcpu->arch.msr & MSR_IS);
135 index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
138 return &vcpu_44x->guest_tlb[index];
141 struct kvmppc_44x_tlbe *kvmppc_44x_dtlb_search(struct kvm_vcpu *vcpu,
144 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
145 unsigned int as = !!(vcpu->arch.msr & MSR_DS);
148 index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
151 return &vcpu_44x->guest_tlb[index];
154 static int kvmppc_44x_tlbe_is_writable(struct kvmppc_44x_tlbe *tlbe)
156 return tlbe->word2 & (PPC44x_TLB_SW|PPC44x_TLB_UW);
159 static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu,
162 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
163 struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[index];
164 struct page *page = vcpu_44x->shadow_pages[index];
166 if (get_tlb_v(stlbe)) {
167 if (kvmppc_44x_tlbe_is_writable(stlbe))
168 kvm_release_page_dirty(page);
170 kvm_release_page_clean(page);
174 void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu)
178 for (i = 0; i <= tlb_44x_hwater; i++)
179 kvmppc_44x_shadow_release(vcpu, i);
182 void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i)
184 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
186 vcpu_44x->shadow_tlb_mod[i] = 1;
190 * kvmppc_mmu_map -- create a host mapping for guest memory
192 * If the guest wanted a larger page than the host supports, only the first
193 * host page is mapped here and the rest are demand faulted.
195 * If the guest wanted a smaller page than the host page size, we map only the
196 * guest-size page (i.e. not a full host page mapping).
198 * Caller must ensure that the specified guest TLB entry is safe to insert into
201 void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, u64 asid,
202 u32 flags, u32 max_bytes)
204 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
205 struct page *new_page;
206 struct kvmppc_44x_tlbe *stlbe;
211 /* Future optimization: don't overwrite the TLB entry containing the
212 * current PC (or stack?). */
213 victim = kvmppc_tlb_44x_pos++;
214 if (kvmppc_tlb_44x_pos > tlb_44x_hwater)
215 kvmppc_tlb_44x_pos = 0;
216 stlbe = &vcpu_44x->shadow_tlb[victim];
218 /* Get reference to new page. */
219 gfn = gpaddr >> PAGE_SHIFT;
220 new_page = gfn_to_page(vcpu->kvm, gfn);
221 if (is_error_page(new_page)) {
222 printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn);
223 kvm_release_page_clean(new_page);
226 hpaddr = page_to_phys(new_page);
228 /* Drop reference to old page. */
229 kvmppc_44x_shadow_release(vcpu, victim);
231 vcpu_44x->shadow_pages[victim] = new_page;
233 /* XXX Make sure (va, size) doesn't overlap any other
234 * entries. 440x6 user manual says the result would be
237 /* XXX what about AS? */
239 stlbe->tid = !(asid & 0xff);
241 /* Force TS=1 for all guest mappings. */
242 stlbe->word0 = PPC44x_TLB_VALID | PPC44x_TLB_TS;
244 if (max_bytes >= PAGE_SIZE) {
245 /* Guest mapping is larger than or equal to host page size. We can use
246 * a "native" host mapping. */
247 stlbe->word0 |= (gvaddr & PAGE_MASK) | PPC44x_TLBE_SIZE;
249 /* Guest mapping is smaller than host page size. We must restrict the
250 * size of the mapping to be at most the smaller of the two, but for
251 * simplicity we fall back to a 4K mapping (this is probably what the
252 * guest is using anyways). */
253 stlbe->word0 |= (gvaddr & PAGE_MASK_4K) | PPC44x_TLB_4K;
255 /* 'hpaddr' is a host page, which is larger than the mapping we're
256 * inserting here. To compensate, we must add the in-page offset to the
258 hpaddr |= gpaddr & (PAGE_MASK ^ PAGE_MASK_4K);
261 stlbe->word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf);
262 stlbe->word2 = kvmppc_44x_tlb_shadow_attrib(flags,
263 vcpu->arch.msr & MSR_PR);
264 kvmppc_tlbe_set_modified(vcpu, victim);
266 KVMTRACE_5D(STLB_WRITE, vcpu, victim,
267 stlbe->tid, stlbe->word0, stlbe->word1, stlbe->word2,
271 static void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr,
272 gva_t eend, u32 asid)
274 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
275 unsigned int pid = !(asid & 0xff);
278 /* XXX Replace loop with fancy data structures. */
279 for (i = 0; i <= tlb_44x_hwater; i++) {
280 struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i];
283 if (!get_tlb_v(stlbe))
286 if (eend < get_tlb_eaddr(stlbe))
289 if (eaddr > get_tlb_end(stlbe))
292 tid = get_tlb_tid(stlbe);
293 if (tid && (tid != pid))
296 kvmppc_44x_shadow_release(vcpu, i);
298 kvmppc_tlbe_set_modified(vcpu, i);
299 KVMTRACE_5D(STLB_INVAL, vcpu, i,
300 stlbe->tid, stlbe->word0, stlbe->word1,
301 stlbe->word2, handler);
305 void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode)
307 vcpu->arch.shadow_pid = !usermode;
310 void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid)
312 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
315 if (unlikely(vcpu->arch.pid == new_pid))
318 vcpu->arch.pid = new_pid;
320 /* Guest userspace runs with TID=0 mappings and PID=0, to make sure it
321 * can't access guest kernel mappings (TID=1). When we switch to a new
322 * guest PID, which will also use host PID=0, we must discard the old guest
323 * userspace mappings. */
324 for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_tlb); i++) {
325 struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i];
327 if (get_tlb_tid(stlbe) == 0) {
328 kvmppc_44x_shadow_release(vcpu, i);
330 kvmppc_tlbe_set_modified(vcpu, i);
335 static int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
336 const struct kvmppc_44x_tlbe *tlbe)
340 if (!get_tlb_v(tlbe))
343 /* Does it match current guest AS? */
344 /* XXX what about IS != DS? */
345 if (get_tlb_ts(tlbe) != !!(vcpu->arch.msr & MSR_IS))
348 gpa = get_tlb_raddr(tlbe);
349 if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT))
350 /* Mapping is not for RAM. */
356 int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
358 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
361 struct kvmppc_44x_tlbe *tlbe;
364 index = vcpu->arch.gpr[ra];
365 if (index > PPC44x_TLB_SIZE) {
366 printk("%s: index %d\n", __func__, index);
367 kvmppc_dump_vcpu(vcpu);
371 tlbe = &vcpu_44x->guest_tlb[index];
373 /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
374 if (tlbe->word0 & PPC44x_TLB_VALID) {
375 eaddr = get_tlb_eaddr(tlbe);
376 asid = (tlbe->word0 & PPC44x_TLB_TS) | tlbe->tid;
377 kvmppc_mmu_invalidate(vcpu, eaddr, get_tlb_end(tlbe), asid);
381 case PPC44x_TLB_PAGEID:
382 tlbe->tid = get_mmucr_stid(vcpu);
383 tlbe->word0 = vcpu->arch.gpr[rs];
386 case PPC44x_TLB_XLAT:
387 tlbe->word1 = vcpu->arch.gpr[rs];
390 case PPC44x_TLB_ATTRIB:
391 tlbe->word2 = vcpu->arch.gpr[rs];
398 if (tlbe_is_host_safe(vcpu, tlbe)) {
403 eaddr = get_tlb_eaddr(tlbe);
404 gpaddr = get_tlb_raddr(tlbe);
406 /* Use the advertised page size to mask effective and real addrs. */
407 bytes = get_tlb_bytes(tlbe);
408 eaddr &= ~(bytes - 1);
409 gpaddr &= ~(bytes - 1);
411 asid = (tlbe->word0 & PPC44x_TLB_TS) | tlbe->tid;
412 flags = tlbe->word2 & 0xffff;
414 kvmppc_mmu_map(vcpu, eaddr, gpaddr, asid, flags, bytes);
417 KVMTRACE_5D(GTLB_WRITE, vcpu, index,
418 tlbe->tid, tlbe->word0, tlbe->word1, tlbe->word2,
424 int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb, u8 rc)
428 unsigned int as = get_mmucr_sts(vcpu);
429 unsigned int pid = get_mmucr_stid(vcpu);
431 ea = vcpu->arch.gpr[rb];
433 ea += vcpu->arch.gpr[ra];
435 index = kvmppc_44x_tlb_index(vcpu, ea, pid, as);
438 vcpu->arch.cr &= ~0x20000000;
440 vcpu->arch.cr |= 0x20000000;
442 vcpu->arch.gpr[rt] = index;