2 * kvm_vcpu.c: handling all virtual cpu related thing.
3 * Copyright (c) 2005, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 * Shaofan Li (Susue Li) <susie.li@intel.com>
19 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
20 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
21 * Xiantao Zhang <xiantao.zhang@intel.com>
24 #include <linux/kvm_host.h>
25 #include <linux/types.h>
27 #include <asm/processor.h>
28 #include <asm/ia64regs.h>
29 #include <asm/gcc_intrin.h>
30 #include <asm/kregs.h>
31 #include <asm/pgtable.h>
34 #include "asm-offsets.h"
39 * - Index by it/dt/rt sequence
40 * - Only existing mode transitions are allowed in this table
41 * - RSE is placed at lazy mode when emulating guest partial mode
42 * - If gva happens to be rr0 and rr4, only allowed case is identity
43 * mapping (gva=gpa), or panic! (How?)
45 int mm_switch_table[8][8] = {
46 /* 2004/09/12(Kevin): Allow switch to self */
48 * (it,dt,rt): (0,0,0) -> (1,1,1)
49 * This kind of transition usually occurs in the very early
50 * stage of Linux boot up procedure. Another case is in efi
51 * and pal calls. (see "arch/ia64/kernel/head.S")
53 * (it,dt,rt): (0,0,0) -> (0,1,1)
54 * This kind of transition is found when OSYa exits efi boot
55 * service. Due to gva = gpa in this case (Same region),
56 * data access can be satisfied though itlb entry for physical
59 {SW_SELF, 0, 0, SW_NOP, 0, 0, 0, SW_P2V},
60 {0, 0, 0, 0, 0, 0, 0, 0},
61 {0, 0, 0, 0, 0, 0, 0, 0},
63 * (it,dt,rt): (0,1,1) -> (1,1,1)
64 * This kind of transition is found in OSYa.
66 * (it,dt,rt): (0,1,1) -> (0,0,0)
67 * This kind of transition is found in OSYa
69 {SW_NOP, 0, 0, SW_SELF, 0, 0, 0, SW_P2V},
70 /* (1,0,0)->(1,1,1) */
71 {0, 0, 0, 0, 0, 0, 0, SW_P2V},
73 * (it,dt,rt): (1,0,1) -> (1,1,1)
74 * This kind of transition usually occurs when Linux returns
75 * from the low level TLB miss handlers.
76 * (see "arch/ia64/kernel/ivt.S")
78 {0, 0, 0, 0, 0, SW_SELF, 0, SW_P2V},
79 {0, 0, 0, 0, 0, 0, 0, 0},
81 * (it,dt,rt): (1,1,1) -> (1,0,1)
82 * This kind of transition usually occurs in Linux low level
83 * TLB miss handler. (see "arch/ia64/kernel/ivt.S")
85 * (it,dt,rt): (1,1,1) -> (0,0,0)
86 * This kind of transition usually occurs in pal and efi calls,
87 * which requires running in physical mode.
88 * (see "arch/ia64/kernel/head.S")
92 {SW_V2P, 0, 0, 0, SW_V2P, SW_V2P, 0, SW_SELF},
95 void physical_mode_init(struct kvm_vcpu *vcpu)
97 vcpu->arch.mode_flags = GUEST_IN_PHY;
100 void switch_to_physical_rid(struct kvm_vcpu *vcpu)
104 /* Save original virtual mode rr[0] and rr[4] */
105 psr = ia64_clear_ic();
106 ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_rr0);
108 ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->arch.metaphysical_rr4);
116 void switch_to_virtual_rid(struct kvm_vcpu *vcpu)
120 psr = ia64_clear_ic();
121 ia64_set_rr(VRN0 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr0);
123 ia64_set_rr(VRN4 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr4);
129 static int mm_switch_action(struct ia64_psr opsr, struct ia64_psr npsr)
131 return mm_switch_table[MODE_IND(opsr)][MODE_IND(npsr)];
134 void switch_mm_mode(struct kvm_vcpu *vcpu, struct ia64_psr old_psr,
135 struct ia64_psr new_psr)
138 act = mm_switch_action(old_psr, new_psr);
141 /*printk("V -> P mode transition: (0x%lx -> 0x%lx)\n",
142 old_psr.val, new_psr.val);*/
143 switch_to_physical_rid(vcpu);
145 * Set rse to enforced lazy, to prevent active rse
146 *save/restor when guest physical mode.
148 vcpu->arch.mode_flags |= GUEST_IN_PHY;
151 switch_to_virtual_rid(vcpu);
153 * recover old mode which is saved when entering
154 * guest physical mode
156 vcpu->arch.mode_flags &= ~GUEST_IN_PHY;
172 * In physical mode, insert tc/tr for region 0 and 4 uses
173 * RID[0] and RID[4] which is for physical mode emulation.
174 * However what those inserted tc/tr wants is rid for
175 * virtual mode. So original virtual rid needs to be restored
178 * Operations which required such switch include:
179 * - insertions (itc.*, itr.*)
180 * - purges (ptc.* and ptr.*)
184 * All above needs actual virtual rid for destination entry.
187 void check_mm_mode_switch(struct kvm_vcpu *vcpu, struct ia64_psr old_psr,
188 struct ia64_psr new_psr)
191 if ((old_psr.dt != new_psr.dt)
192 || (old_psr.it != new_psr.it)
193 || (old_psr.rt != new_psr.rt))
194 switch_mm_mode(vcpu, old_psr, new_psr);
201 * In physical mode, insert tc/tr for region 0 and 4 uses
202 * RID[0] and RID[4] which is for physical mode emulation.
203 * However what those inserted tc/tr wants is rid for
204 * virtual mode. So original virtual rid needs to be restored
207 * Operations which required such switch include:
208 * - insertions (itc.*, itr.*)
209 * - purges (ptc.* and ptr.*)
213 * All above needs actual virtual rid for destination entry.
216 void prepare_if_physical_mode(struct kvm_vcpu *vcpu)
218 if (is_physical_mode(vcpu)) {
219 vcpu->arch.mode_flags |= GUEST_PHY_EMUL;
220 switch_to_virtual_rid(vcpu);
225 /* Recover always follows prepare */
226 void recover_if_physical_mode(struct kvm_vcpu *vcpu)
228 if (is_physical_mode(vcpu))
229 switch_to_physical_rid(vcpu);
230 vcpu->arch.mode_flags &= ~GUEST_PHY_EMUL;
234 #define RPT(x) ((u16) &((struct kvm_pt_regs *)0)->x)
236 static u16 gr_info[32] = {
237 0, /* r0 is read-only : WE SHOULD NEVER GET THIS */
238 RPT(r1), RPT(r2), RPT(r3),
239 RPT(r4), RPT(r5), RPT(r6), RPT(r7),
240 RPT(r8), RPT(r9), RPT(r10), RPT(r11),
241 RPT(r12), RPT(r13), RPT(r14), RPT(r15),
242 RPT(r16), RPT(r17), RPT(r18), RPT(r19),
243 RPT(r20), RPT(r21), RPT(r22), RPT(r23),
244 RPT(r24), RPT(r25), RPT(r26), RPT(r27),
245 RPT(r28), RPT(r29), RPT(r30), RPT(r31)
248 #define IA64_FIRST_STACKED_GR 32
249 #define IA64_FIRST_ROTATING_FR 32
251 static inline unsigned long
252 rotate_reg(unsigned long sor, unsigned long rrb, unsigned long reg)
261 * Return the (rotated) index for floating point register
262 * be in the REGNUM (REGNUM must range from 32-127,
263 * result is in the range from 0-95.
265 static inline unsigned long fph_index(struct kvm_pt_regs *regs,
268 unsigned long rrb_fr = (regs->cr_ifs >> 25) & 0x7f;
269 return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR));
274 * The inverse of the above: given bspstore and the number of
275 * registers, calculate ar.bsp.
277 static inline unsigned long *kvm_rse_skip_regs(unsigned long *addr,
280 long delta = ia64_rse_slot_num(addr) + num_regs;
286 while (delta <= -0x3f) {
291 while (delta >= 0x3f) {
297 return addr + num_regs + i;
300 static void get_rse_reg(struct kvm_pt_regs *regs, unsigned long r1,
301 unsigned long *val, int *nat)
303 unsigned long *bsp, *addr, *rnat_addr, *bspstore;
304 unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET;
305 unsigned long nat_mask;
306 unsigned long old_rsc, new_rsc;
307 long sof = (regs->cr_ifs) & 0x7f;
308 long sor = (((regs->cr_ifs >> 14) & 0xf) << 3);
309 long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
313 ridx = rotate_reg(sor, rrb_gr, ridx);
315 old_rsc = ia64_getreg(_IA64_REG_AR_RSC);
316 new_rsc = old_rsc&(~(0x3));
317 ia64_setreg(_IA64_REG_AR_RSC, new_rsc);
319 bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
320 bsp = kbs + (regs->loadrs >> 19);
322 addr = kvm_rse_skip_regs(bsp, -sof + ridx);
323 nat_mask = 1UL << ia64_rse_slot_num(addr);
324 rnat_addr = ia64_rse_rnat_addr(addr);
326 if (addr >= bspstore) {
329 bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
333 if (bspstore < rnat_addr)
334 *nat = (int)!!(ia64_getreg(_IA64_REG_AR_RNAT)
337 *nat = (int)!!((*rnat_addr) & nat_mask);
338 ia64_setreg(_IA64_REG_AR_RSC, old_rsc);
342 void set_rse_reg(struct kvm_pt_regs *regs, unsigned long r1,
343 unsigned long val, unsigned long nat)
345 unsigned long *bsp, *bspstore, *addr, *rnat_addr;
346 unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET;
347 unsigned long nat_mask;
348 unsigned long old_rsc, new_rsc, psr;
350 long sof = (regs->cr_ifs) & 0x7f;
351 long sor = (((regs->cr_ifs >> 14) & 0xf) << 3);
352 long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
356 ridx = rotate_reg(sor, rrb_gr, ridx);
358 old_rsc = ia64_getreg(_IA64_REG_AR_RSC);
359 /* put RSC to lazy mode, and set loadrs 0 */
360 new_rsc = old_rsc & (~0x3fff0003);
361 ia64_setreg(_IA64_REG_AR_RSC, new_rsc);
362 bsp = kbs + (regs->loadrs >> 19); /* 16 + 3 */
364 addr = kvm_rse_skip_regs(bsp, -sof + ridx);
365 nat_mask = 1UL << ia64_rse_slot_num(addr);
366 rnat_addr = ia64_rse_rnat_addr(addr);
369 bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
370 if (addr >= bspstore) {
375 bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
376 rnat = ia64_getreg(_IA64_REG_AR_RNAT);
377 if (bspstore < rnat_addr)
378 rnat = rnat & (~nat_mask);
380 *rnat_addr = (*rnat_addr)&(~nat_mask);
384 ia64_setreg(_IA64_REG_AR_RNAT, rnat);
386 rnat = ia64_getreg(_IA64_REG_AR_RNAT);
388 if (bspstore < rnat_addr)
389 rnat = rnat&(~nat_mask);
391 *rnat_addr = (*rnat_addr) & (~nat_mask);
393 ia64_setreg(_IA64_REG_AR_BSPSTORE, bspstore);
394 ia64_setreg(_IA64_REG_AR_RNAT, rnat);
396 local_irq_restore(psr);
397 ia64_setreg(_IA64_REG_AR_RSC, old_rsc);
400 void getreg(unsigned long regnum, unsigned long *val,
401 int *nat, struct kvm_pt_regs *regs)
403 unsigned long addr, *unat;
404 if (regnum >= IA64_FIRST_STACKED_GR) {
405 get_rse_reg(regs, regnum, val, nat);
410 * Now look at registers in [0-31] range and init correct UNAT
412 addr = (unsigned long)regs;
413 unat = ®s->eml_unat;;
415 addr += gr_info[regnum];
417 *val = *(unsigned long *)addr;
419 * do it only when requested
422 *nat = (*unat >> ((addr >> 3) & 0x3f)) & 0x1UL;
425 void setreg(unsigned long regnum, unsigned long val,
426 int nat, struct kvm_pt_regs *regs)
429 unsigned long bitmask;
433 * First takes care of stacked registers
435 if (regnum >= IA64_FIRST_STACKED_GR) {
436 set_rse_reg(regs, regnum, val, nat);
441 * Now look at registers in [0-31] range and init correct UNAT
443 addr = (unsigned long)regs;
444 unat = ®s->eml_unat;
446 * add offset from base of struct
449 addr += gr_info[regnum];
451 *(unsigned long *)addr = val;
454 * We need to clear the corresponding UNAT bit to fully emulate the load
455 * UNAT bit_pos = GR[r3]{8:3} form EAS-2.4
457 bitmask = 1UL << ((addr >> 3) & 0x3f);
465 u64 vcpu_get_gr(struct kvm_vcpu *vcpu, unsigned long reg)
467 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
472 getreg(reg, &val, 0, regs);
476 void vcpu_set_gr(struct kvm_vcpu *vcpu, u64 reg, u64 value, int nat)
478 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
479 long sof = (regs->cr_ifs) & 0x7f;
485 setreg(reg, value, nat, regs); /* FIXME: handle NATs later*/
488 void getfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
489 struct kvm_pt_regs *regs)
491 /* Take floating register rotation into consideration*/
492 if (regnum >= IA64_FIRST_ROTATING_FR)
493 regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum);
494 #define CASE_FIXED_FP(reg) \
496 ia64_stf_spill(fpval, reg); \
634 void setfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
635 struct kvm_pt_regs *regs)
637 /* Take floating register rotation into consideration*/
638 if (regnum >= IA64_FIRST_ROTATING_FR)
639 regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum);
641 #define CASE_FIXED_FP(reg) \
643 ia64_ldf_fill(reg, fpval); \
778 void vcpu_get_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
779 struct ia64_fpreg *val)
781 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
783 getfpreg(reg, val, regs); /* FIXME: handle NATs later*/
786 void vcpu_set_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
787 struct ia64_fpreg *val)
789 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
792 setfpreg(reg, val, regs); /* FIXME: handle NATs later*/
795 /************************************************************************
797 ***********************************************************************/
798 u64 vcpu_get_itc(struct kvm_vcpu *vcpu)
800 unsigned long guest_itc;
801 guest_itc = VMX(vcpu, itc_offset) + ia64_getreg(_IA64_REG_AR_ITC);
803 if (guest_itc >= VMX(vcpu, last_itc)) {
804 VMX(vcpu, last_itc) = guest_itc;
807 return VMX(vcpu, last_itc);
810 static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val);
811 static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val)
815 long itc_offset = val - ia64_getreg(_IA64_REG_AR_ITC);
816 unsigned long vitv = VCPU(vcpu, itv);
818 if (vcpu->vcpu_id == 0) {
819 for (i = 0; i < KVM_MAX_VCPUS; i++) {
820 v = (struct kvm_vcpu *)((char *)vcpu +
821 sizeof(struct kvm_vcpu_data) * i);
822 VMX(v, itc_offset) = itc_offset;
823 VMX(v, last_itc) = 0;
826 VMX(vcpu, last_itc) = 0;
827 if (VCPU(vcpu, itm) <= val) {
828 VMX(vcpu, itc_check) = 0;
829 vcpu_unpend_interrupt(vcpu, vitv);
831 VMX(vcpu, itc_check) = 1;
832 vcpu_set_itm(vcpu, VCPU(vcpu, itm));
837 static inline u64 vcpu_get_itm(struct kvm_vcpu *vcpu)
839 return ((u64)VCPU(vcpu, itm));
842 static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val)
844 unsigned long vitv = VCPU(vcpu, itv);
845 VCPU(vcpu, itm) = val;
847 if (val > vcpu_get_itc(vcpu)) {
848 VMX(vcpu, itc_check) = 1;
849 vcpu_unpend_interrupt(vcpu, vitv);
850 VMX(vcpu, timer_pending) = 0;
852 VMX(vcpu, itc_check) = 0;
855 #define ITV_VECTOR(itv) (itv&0xff)
856 #define ITV_IRQ_MASK(itv) (itv&(1<<16))
858 static inline void vcpu_set_itv(struct kvm_vcpu *vcpu, u64 val)
860 VCPU(vcpu, itv) = val;
861 if (!ITV_IRQ_MASK(val) && vcpu->arch.timer_pending) {
862 vcpu_pend_interrupt(vcpu, ITV_VECTOR(val));
863 vcpu->arch.timer_pending = 0;
867 static inline void vcpu_set_eoi(struct kvm_vcpu *vcpu, u64 val)
871 vec = highest_inservice_irq(vcpu);
872 if (vec == NULL_VECTOR)
874 VMX(vcpu, insvc[vec >> 6]) &= ~(1UL << (vec & 63));
876 vcpu->arch.irq_new_pending = 1;
880 /* See Table 5-8 in SDM vol2 for the definition */
881 int irq_masked(struct kvm_vcpu *vcpu, int h_pending, int h_inservice)
885 vtpr.val = VCPU(vcpu, tpr);
887 if (h_inservice == NMI_VECTOR)
888 return IRQ_MASKED_BY_INSVC;
890 if (h_pending == NMI_VECTOR) {
891 /* Non Maskable Interrupt */
892 return IRQ_NO_MASKED;
895 if (h_inservice == ExtINT_VECTOR)
896 return IRQ_MASKED_BY_INSVC;
898 if (h_pending == ExtINT_VECTOR) {
900 /* mask all external IRQ */
901 return IRQ_MASKED_BY_VTPR;
903 return IRQ_NO_MASKED;
906 if (is_higher_irq(h_pending, h_inservice)) {
907 if (is_higher_class(h_pending, vtpr.mic + (vtpr.mmi << 4)))
908 return IRQ_NO_MASKED;
910 return IRQ_MASKED_BY_VTPR;
912 return IRQ_MASKED_BY_INSVC;
916 void vcpu_pend_interrupt(struct kvm_vcpu *vcpu, u8 vec)
921 local_irq_save(spsr);
922 ret = test_and_set_bit(vec, &VCPU(vcpu, irr[0]));
923 local_irq_restore(spsr);
925 vcpu->arch.irq_new_pending = 1;
928 void vcpu_unpend_interrupt(struct kvm_vcpu *vcpu, u8 vec)
933 local_irq_save(spsr);
934 ret = test_and_clear_bit(vec, &VCPU(vcpu, irr[0]));
935 local_irq_restore(spsr);
937 vcpu->arch.irq_new_pending = 1;
942 void update_vhpi(struct kvm_vcpu *vcpu, int vec)
946 if (vec == NULL_VECTOR)
948 else if (vec == NMI_VECTOR)
950 else if (vec == ExtINT_VECTOR)
955 VCPU(vcpu, vhpi) = vhpi;
956 if (VCPU(vcpu, vac).a_int)
957 ia64_call_vsa(PAL_VPS_SET_PENDING_INTERRUPT,
958 (u64)vcpu->arch.vpd, 0, 0, 0, 0, 0, 0);
961 u64 vcpu_get_ivr(struct kvm_vcpu *vcpu)
963 int vec, h_inservice, mask;
965 vec = highest_pending_irq(vcpu);
966 h_inservice = highest_inservice_irq(vcpu);
967 mask = irq_masked(vcpu, vec, h_inservice);
968 if (vec == NULL_VECTOR || mask == IRQ_MASKED_BY_INSVC) {
969 if (VCPU(vcpu, vhpi))
970 update_vhpi(vcpu, NULL_VECTOR);
971 return IA64_SPURIOUS_INT_VECTOR;
973 if (mask == IRQ_MASKED_BY_VTPR) {
974 update_vhpi(vcpu, vec);
975 return IA64_SPURIOUS_INT_VECTOR;
977 VMX(vcpu, insvc[vec >> 6]) |= (1UL << (vec & 63));
978 vcpu_unpend_interrupt(vcpu, vec);
982 /**************************************************************************
983 Privileged operation emulation routines
984 **************************************************************************/
985 u64 vcpu_thash(struct kvm_vcpu *vcpu, u64 vadr)
992 vpta.val = vcpu_get_pta(vcpu);
993 vrr.val = vcpu_get_rr(vcpu, vadr);
994 vhpt_offset = ((vadr >> vrr.ps) << 3) & ((1UL << (vpta.size)) - 1);
996 pval = ia64_call_vsa(PAL_VPS_THASH, vadr, vrr.val,
997 vpta.val, 0, 0, 0, 0);
999 pval = (vadr & VRN_MASK) | vhpt_offset |
1000 (vpta.val << 3 >> (vpta.size + 3) << (vpta.size));
1005 u64 vcpu_ttag(struct kvm_vcpu *vcpu, u64 vadr)
1008 union ia64_pta vpta;
1011 vpta.val = vcpu_get_pta(vcpu);
1012 vrr.val = vcpu_get_rr(vcpu, vadr);
1014 pval = ia64_call_vsa(PAL_VPS_TTAG, vadr, vrr.val,
1022 u64 vcpu_tak(struct kvm_vcpu *vcpu, u64 vadr)
1024 struct thash_data *data;
1025 union ia64_pta vpta;
1028 vpta.val = vcpu_get_pta(vcpu);
1033 data = vtlb_lookup(vcpu, vadr, D_TLB);
1034 if (!data || !data->p)
1044 void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst)
1046 unsigned long thash, vadr;
1048 vadr = vcpu_get_gr(vcpu, inst.M46.r3);
1049 thash = vcpu_thash(vcpu, vadr);
1050 vcpu_set_gr(vcpu, inst.M46.r1, thash, 0);
1054 void kvm_ttag(struct kvm_vcpu *vcpu, INST64 inst)
1056 unsigned long tag, vadr;
1058 vadr = vcpu_get_gr(vcpu, inst.M46.r3);
1059 tag = vcpu_ttag(vcpu, vadr);
1060 vcpu_set_gr(vcpu, inst.M46.r1, tag, 0);
1063 int vcpu_tpa(struct kvm_vcpu *vcpu, u64 vadr, u64 *padr)
1065 struct thash_data *data;
1066 union ia64_isr visr, pt_isr;
1067 struct kvm_pt_regs *regs;
1068 struct ia64_psr vpsr;
1070 regs = vcpu_regs(vcpu);
1071 pt_isr.val = VMX(vcpu, cr_isr);
1073 visr.ei = pt_isr.ei;
1074 visr.ir = pt_isr.ir;
1075 vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
1078 data = vhpt_lookup(vadr);
1081 vcpu_set_isr(vcpu, visr.val);
1082 data_page_not_present(vcpu, vadr);
1084 } else if (data->ma == VA_MATTR_NATPAGE) {
1085 vcpu_set_isr(vcpu, visr.val);
1086 dnat_page_consumption(vcpu, vadr);
1089 *padr = (data->gpaddr >> data->ps << data->ps) |
1090 (vadr & (PSIZE(data->ps) - 1));
1091 return IA64_NO_FAULT;
1095 data = vtlb_lookup(vcpu, vadr, D_TLB);
1098 vcpu_set_isr(vcpu, visr.val);
1099 data_page_not_present(vcpu, vadr);
1101 } else if (data->ma == VA_MATTR_NATPAGE) {
1102 vcpu_set_isr(vcpu, visr.val);
1103 dnat_page_consumption(vcpu, vadr);
1106 *padr = ((data->ppn >> (data->ps - 12)) << data->ps)
1107 | (vadr & (PSIZE(data->ps) - 1));
1108 return IA64_NO_FAULT;
1111 if (!vhpt_enabled(vcpu, vadr, NA_REF)) {
1113 vcpu_set_isr(vcpu, visr.val);
1114 alt_dtlb(vcpu, vadr);
1122 vcpu_set_isr(vcpu, visr.val);
1123 dvhpt_fault(vcpu, vadr);
1131 return IA64_NO_FAULT;
1135 int kvm_tpa(struct kvm_vcpu *vcpu, INST64 inst)
1137 unsigned long r1, r3;
1139 r3 = vcpu_get_gr(vcpu, inst.M46.r3);
1141 if (vcpu_tpa(vcpu, r3, &r1))
1144 vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
1145 return(IA64_NO_FAULT);
1148 void kvm_tak(struct kvm_vcpu *vcpu, INST64 inst)
1150 unsigned long r1, r3;
1152 r3 = vcpu_get_gr(vcpu, inst.M46.r3);
1153 r1 = vcpu_tak(vcpu, r3);
1154 vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
1158 /************************************
1159 * Insert/Purge translation register/cache
1160 ************************************/
1161 void vcpu_itc_i(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa)
1163 thash_purge_and_insert(vcpu, pte, itir, ifa, I_TLB);
1166 void vcpu_itc_d(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa)
1168 thash_purge_and_insert(vcpu, pte, itir, ifa, D_TLB);
1171 void vcpu_itr_i(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
1174 struct thash_data *p_itr;
1177 va = PAGEALIGN(ifa, ps);
1178 pte &= ~PAGE_FLAGS_RV_MASK;
1179 rid = vcpu_get_rr(vcpu, ifa);
1180 rid = rid & RR_RID_MASK;
1181 p_itr = (struct thash_data *)&vcpu->arch.itrs[slot];
1182 vcpu_set_tr(p_itr, pte, itir, va, rid);
1183 vcpu_quick_region_set(VMX(vcpu, itr_regions), va);
1187 void vcpu_itr_d(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
1191 struct thash_data *p_dtr;
1194 va = PAGEALIGN(ifa, ps);
1195 pte &= ~PAGE_FLAGS_RV_MASK;
1197 if (ps != _PAGE_SIZE_16M)
1198 thash_purge_entries(vcpu, va, ps);
1199 gpfn = (pte & _PAGE_PPN_MASK) >> PAGE_SHIFT;
1200 if (__gpfn_is_io(gpfn))
1202 rid = vcpu_get_rr(vcpu, va);
1203 rid = rid & RR_RID_MASK;
1204 p_dtr = (struct thash_data *)&vcpu->arch.dtrs[slot];
1205 vcpu_set_tr((struct thash_data *)&vcpu->arch.dtrs[slot],
1206 pte, itir, va, rid);
1207 vcpu_quick_region_set(VMX(vcpu, dtr_regions), va);
1210 void vcpu_ptr_d(struct kvm_vcpu *vcpu, u64 ifa, u64 ps)
1215 va = PAGEALIGN(ifa, ps);
1216 while ((index = vtr_find_overlap(vcpu, va, ps, D_TLB)) >= 0)
1217 vcpu->arch.dtrs[index].page_flags = 0;
1219 thash_purge_entries(vcpu, va, ps);
1222 void vcpu_ptr_i(struct kvm_vcpu *vcpu, u64 ifa, u64 ps)
1227 va = PAGEALIGN(ifa, ps);
1228 while ((index = vtr_find_overlap(vcpu, va, ps, I_TLB)) >= 0)
1229 vcpu->arch.itrs[index].page_flags = 0;
1231 thash_purge_entries(vcpu, va, ps);
1234 void vcpu_ptc_l(struct kvm_vcpu *vcpu, u64 va, u64 ps)
1236 va = PAGEALIGN(va, ps);
1237 thash_purge_entries(vcpu, va, ps);
1240 void vcpu_ptc_e(struct kvm_vcpu *vcpu, u64 va)
1242 thash_purge_all(vcpu);
1245 void vcpu_ptc_ga(struct kvm_vcpu *vcpu, u64 va, u64 ps)
1247 struct exit_ctl_data *p = &vcpu->arch.exit_data;
1249 local_irq_save(psr);
1250 p->exit_reason = EXIT_REASON_PTC_G;
1252 p->u.ptc_g_data.rr = vcpu_get_rr(vcpu, va);
1253 p->u.ptc_g_data.vaddr = va;
1254 p->u.ptc_g_data.ps = ps;
1255 vmm_transition(vcpu);
1256 /* Do Local Purge Here*/
1257 vcpu_ptc_l(vcpu, va, ps);
1258 local_irq_restore(psr);
1262 void vcpu_ptc_g(struct kvm_vcpu *vcpu, u64 va, u64 ps)
1264 vcpu_ptc_ga(vcpu, va, ps);
1267 void kvm_ptc_e(struct kvm_vcpu *vcpu, INST64 inst)
1271 ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1272 vcpu_ptc_e(vcpu, ifa);
1275 void kvm_ptc_g(struct kvm_vcpu *vcpu, INST64 inst)
1277 unsigned long ifa, itir;
1279 ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1280 itir = vcpu_get_gr(vcpu, inst.M45.r2);
1281 vcpu_ptc_g(vcpu, ifa, itir_ps(itir));
1284 void kvm_ptc_ga(struct kvm_vcpu *vcpu, INST64 inst)
1286 unsigned long ifa, itir;
1288 ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1289 itir = vcpu_get_gr(vcpu, inst.M45.r2);
1290 vcpu_ptc_ga(vcpu, ifa, itir_ps(itir));
1293 void kvm_ptc_l(struct kvm_vcpu *vcpu, INST64 inst)
1295 unsigned long ifa, itir;
1297 ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1298 itir = vcpu_get_gr(vcpu, inst.M45.r2);
1299 vcpu_ptc_l(vcpu, ifa, itir_ps(itir));
1302 void kvm_ptr_d(struct kvm_vcpu *vcpu, INST64 inst)
1304 unsigned long ifa, itir;
1306 ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1307 itir = vcpu_get_gr(vcpu, inst.M45.r2);
1308 vcpu_ptr_d(vcpu, ifa, itir_ps(itir));
1311 void kvm_ptr_i(struct kvm_vcpu *vcpu, INST64 inst)
1313 unsigned long ifa, itir;
1315 ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1316 itir = vcpu_get_gr(vcpu, inst.M45.r2);
1317 vcpu_ptr_i(vcpu, ifa, itir_ps(itir));
1320 void kvm_itr_d(struct kvm_vcpu *vcpu, INST64 inst)
1322 unsigned long itir, ifa, pte, slot;
1324 slot = vcpu_get_gr(vcpu, inst.M45.r3);
1325 pte = vcpu_get_gr(vcpu, inst.M45.r2);
1326 itir = vcpu_get_itir(vcpu);
1327 ifa = vcpu_get_ifa(vcpu);
1328 vcpu_itr_d(vcpu, slot, pte, itir, ifa);
1333 void kvm_itr_i(struct kvm_vcpu *vcpu, INST64 inst)
1335 unsigned long itir, ifa, pte, slot;
1337 slot = vcpu_get_gr(vcpu, inst.M45.r3);
1338 pte = vcpu_get_gr(vcpu, inst.M45.r2);
1339 itir = vcpu_get_itir(vcpu);
1340 ifa = vcpu_get_ifa(vcpu);
1341 vcpu_itr_i(vcpu, slot, pte, itir, ifa);
1344 void kvm_itc_d(struct kvm_vcpu *vcpu, INST64 inst)
1346 unsigned long itir, ifa, pte;
1348 itir = vcpu_get_itir(vcpu);
1349 ifa = vcpu_get_ifa(vcpu);
1350 pte = vcpu_get_gr(vcpu, inst.M45.r2);
1351 vcpu_itc_d(vcpu, pte, itir, ifa);
1354 void kvm_itc_i(struct kvm_vcpu *vcpu, INST64 inst)
1356 unsigned long itir, ifa, pte;
1358 itir = vcpu_get_itir(vcpu);
1359 ifa = vcpu_get_ifa(vcpu);
1360 pte = vcpu_get_gr(vcpu, inst.M45.r2);
1361 vcpu_itc_i(vcpu, pte, itir, ifa);
1364 /*************************************
1365 * Moves to semi-privileged registers
1366 *************************************/
1368 void kvm_mov_to_ar_imm(struct kvm_vcpu *vcpu, INST64 inst)
1373 imm = -inst.M30.imm;
1377 vcpu_set_itc(vcpu, imm);
1380 void kvm_mov_to_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
1384 r2 = vcpu_get_gr(vcpu, inst.M29.r2);
1385 vcpu_set_itc(vcpu, r2);
1389 void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
1393 r1 = vcpu_get_itc(vcpu);
1394 vcpu_set_gr(vcpu, inst.M31.r1, r1, 0);
1396 /**************************************************************************
1397 struct kvm_vcpu*protection key register access routines
1398 **************************************************************************/
1400 unsigned long vcpu_get_pkr(struct kvm_vcpu *vcpu, unsigned long reg)
1402 return ((unsigned long)ia64_get_pkr(reg));
1405 void vcpu_set_pkr(struct kvm_vcpu *vcpu, unsigned long reg, unsigned long val)
1407 ia64_set_pkr(reg, val);
1411 unsigned long vcpu_get_itir_on_fault(struct kvm_vcpu *vcpu, unsigned long ifa)
1413 union ia64_rr rr, rr1;
1415 rr.val = vcpu_get_rr(vcpu, ifa);
1424 /********************************
1425 * Moves to privileged registers
1426 ********************************/
1427 unsigned long vcpu_set_rr(struct kvm_vcpu *vcpu, unsigned long reg,
1430 union ia64_rr oldrr, newrr;
1431 unsigned long rrval;
1432 struct exit_ctl_data *p = &vcpu->arch.exit_data;
1435 oldrr.val = vcpu_get_rr(vcpu, reg);
1437 vcpu->arch.vrr[reg >> VRN_SHIFT] = val;
1439 switch ((unsigned long)(reg >> VRN_SHIFT)) {
1441 vcpu->arch.vmm_rr = vrrtomrr(val);
1442 local_irq_save(psr);
1443 p->exit_reason = EXIT_REASON_SWITCH_RR6;
1444 vmm_transition(vcpu);
1445 local_irq_restore(psr);
1448 rrval = vrrtomrr(val);
1449 vcpu->arch.metaphysical_saved_rr4 = rrval;
1450 if (!is_physical_mode(vcpu))
1451 ia64_set_rr(reg, rrval);
1454 rrval = vrrtomrr(val);
1455 vcpu->arch.metaphysical_saved_rr0 = rrval;
1456 if (!is_physical_mode(vcpu))
1457 ia64_set_rr(reg, rrval);
1460 ia64_set_rr(reg, vrrtomrr(val));
1464 return (IA64_NO_FAULT);
1469 void kvm_mov_to_rr(struct kvm_vcpu *vcpu, INST64 inst)
1471 unsigned long r3, r2;
1473 r3 = vcpu_get_gr(vcpu, inst.M42.r3);
1474 r2 = vcpu_get_gr(vcpu, inst.M42.r2);
1475 vcpu_set_rr(vcpu, r3, r2);
1478 void kvm_mov_to_dbr(struct kvm_vcpu *vcpu, INST64 inst)
1482 void kvm_mov_to_ibr(struct kvm_vcpu *vcpu, INST64 inst)
1486 void kvm_mov_to_pmc(struct kvm_vcpu *vcpu, INST64 inst)
1488 unsigned long r3, r2;
1490 r3 = vcpu_get_gr(vcpu, inst.M42.r3);
1491 r2 = vcpu_get_gr(vcpu, inst.M42.r2);
1492 vcpu_set_pmc(vcpu, r3, r2);
1495 void kvm_mov_to_pmd(struct kvm_vcpu *vcpu, INST64 inst)
1497 unsigned long r3, r2;
1499 r3 = vcpu_get_gr(vcpu, inst.M42.r3);
1500 r2 = vcpu_get_gr(vcpu, inst.M42.r2);
1501 vcpu_set_pmd(vcpu, r3, r2);
1504 void kvm_mov_to_pkr(struct kvm_vcpu *vcpu, INST64 inst)
1508 r3 = vcpu_get_gr(vcpu, inst.M42.r3);
1509 r2 = vcpu_get_gr(vcpu, inst.M42.r2);
1510 vcpu_set_pkr(vcpu, r3, r2);
1515 void kvm_mov_from_rr(struct kvm_vcpu *vcpu, INST64 inst)
1517 unsigned long r3, r1;
1519 r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1520 r1 = vcpu_get_rr(vcpu, r3);
1521 vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1524 void kvm_mov_from_pkr(struct kvm_vcpu *vcpu, INST64 inst)
1526 unsigned long r3, r1;
1528 r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1529 r1 = vcpu_get_pkr(vcpu, r3);
1530 vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1533 void kvm_mov_from_dbr(struct kvm_vcpu *vcpu, INST64 inst)
1535 unsigned long r3, r1;
1537 r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1538 r1 = vcpu_get_dbr(vcpu, r3);
1539 vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1542 void kvm_mov_from_ibr(struct kvm_vcpu *vcpu, INST64 inst)
1544 unsigned long r3, r1;
1546 r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1547 r1 = vcpu_get_ibr(vcpu, r3);
1548 vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1551 void kvm_mov_from_pmc(struct kvm_vcpu *vcpu, INST64 inst)
1553 unsigned long r3, r1;
1555 r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1556 r1 = vcpu_get_pmc(vcpu, r3);
1557 vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1561 unsigned long vcpu_get_cpuid(struct kvm_vcpu *vcpu, unsigned long reg)
1563 /* FIXME: This could get called as a result of a rsvd-reg fault */
1564 if (reg > (ia64_get_cpuid(3) & 0xff))
1567 return ia64_get_cpuid(reg);
1570 void kvm_mov_from_cpuid(struct kvm_vcpu *vcpu, INST64 inst)
1572 unsigned long r3, r1;
1574 r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1575 r1 = vcpu_get_cpuid(vcpu, r3);
1576 vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1579 void vcpu_set_tpr(struct kvm_vcpu *vcpu, unsigned long val)
1581 VCPU(vcpu, tpr) = val;
1582 vcpu->arch.irq_check = 1;
1585 unsigned long kvm_mov_to_cr(struct kvm_vcpu *vcpu, INST64 inst)
1589 r2 = vcpu_get_gr(vcpu, inst.M32.r2);
1590 VCPU(vcpu, vcr[inst.M32.cr3]) = r2;
1592 switch (inst.M32.cr3) {
1594 vcpu_set_dcr(vcpu, r2);
1597 vcpu_set_itm(vcpu, r2);
1600 vcpu_set_tpr(vcpu, r2);
1603 vcpu_set_eoi(vcpu, r2);
1613 unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst)
1615 unsigned long tgt = inst.M33.r1;
1618 switch (inst.M33.cr3) {
1620 val = vcpu_get_ivr(vcpu);
1621 vcpu_set_gr(vcpu, tgt, val, 0);
1625 vcpu_set_gr(vcpu, tgt, 0L, 0);
1628 val = VCPU(vcpu, vcr[inst.M33.cr3]);
1629 vcpu_set_gr(vcpu, tgt, val, 0);
1638 void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val)
1642 struct kvm_pt_regs *regs;
1643 struct ia64_psr old_psr, new_psr;
1645 old_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
1647 regs = vcpu_regs(vcpu);
1648 /* We only support guest as:
1653 if (val & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM))
1654 panic_vm(vcpu, "Only support guests with vpsr.pk =0 \
1658 * For those IA64_PSR bits: id/da/dd/ss/ed/ia
1659 * Since these bits will become 0, after success execution of each
1660 * instruction, we will change set them to mIA64_PSR
1662 VCPU(vcpu, vpsr) = val
1663 & (~(IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD |
1664 IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA));
1666 if (!old_psr.i && (val & IA64_PSR_I)) {
1668 vcpu->arch.irq_check = 1;
1670 new_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
1673 * All vIA64_PSR bits shall go to mPSR (v->tf->tf_special.psr)
1674 * , except for the following bits:
1675 * ic/i/dt/si/rt/mc/it/bn/vm
1677 mask = IA64_PSR_IC + IA64_PSR_I + IA64_PSR_DT + IA64_PSR_SI +
1678 IA64_PSR_RT + IA64_PSR_MC + IA64_PSR_IT + IA64_PSR_BN +
1681 regs->cr_ipsr = (regs->cr_ipsr & mask) | (val & (~mask));
1683 check_mm_mode_switch(vcpu, old_psr, new_psr);
1688 unsigned long vcpu_cover(struct kvm_vcpu *vcpu)
1690 struct ia64_psr vpsr;
1692 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1693 vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
1696 VCPU(vcpu, ifs) = regs->cr_ifs;
1697 regs->cr_ifs = IA64_IFS_V;
1698 return (IA64_NO_FAULT);
1703 /**************************************************************************
1704 VCPU banked general register access routines
1705 **************************************************************************/
1706 #define vcpu_bsw0_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT) \
1708 __asm__ __volatile__ ( \
1709 ";;extr.u %0 = %3,%6,16;;\n" \
1710 "dep %1 = %0, %1, 0, 16;;\n" \
1712 "extr.u %0 = %2, 16, 16;;\n" \
1713 "dep %3 = %0, %3, %6, 16;;\n" \
1715 ::"r"(i), "r"(*b1unat), "r"(*b0unat), \
1716 "r"(*runat), "r"(b1unat), "r"(runat), \
1717 "i"(VMM_PT_REGS_R16_SLOT) : "memory"); \
1720 void vcpu_bsw0(struct kvm_vcpu *vcpu)
1724 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1725 unsigned long *r = ®s->r16;
1726 unsigned long *b0 = &VCPU(vcpu, vbgr[0]);
1727 unsigned long *b1 = &VCPU(vcpu, vgr[0]);
1728 unsigned long *runat = ®s->eml_unat;
1729 unsigned long *b0unat = &VCPU(vcpu, vbnat);
1730 unsigned long *b1unat = &VCPU(vcpu, vnat);
1733 if (VCPU(vcpu, vpsr) & IA64_PSR_BN) {
1734 for (i = 0; i < 16; i++) {
1738 vcpu_bsw0_unat(i, b0unat, b1unat, runat,
1739 VMM_PT_REGS_R16_SLOT);
1740 VCPU(vcpu, vpsr) &= ~IA64_PSR_BN;
1744 #define vcpu_bsw1_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT) \
1746 __asm__ __volatile__ (";;extr.u %0 = %3, %6, 16;;\n" \
1747 "dep %1 = %0, %1, 16, 16;;\n" \
1749 "extr.u %0 = %2, 0, 16;;\n" \
1750 "dep %3 = %0, %3, %6, 16;;\n" \
1752 ::"r"(i), "r"(*b0unat), "r"(*b1unat), \
1753 "r"(*runat), "r"(b0unat), "r"(runat), \
1754 "i"(VMM_PT_REGS_R16_SLOT) : "memory"); \
1757 void vcpu_bsw1(struct kvm_vcpu *vcpu)
1760 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1761 unsigned long *r = ®s->r16;
1762 unsigned long *b0 = &VCPU(vcpu, vbgr[0]);
1763 unsigned long *b1 = &VCPU(vcpu, vgr[0]);
1764 unsigned long *runat = ®s->eml_unat;
1765 unsigned long *b0unat = &VCPU(vcpu, vbnat);
1766 unsigned long *b1unat = &VCPU(vcpu, vnat);
1768 if (!(VCPU(vcpu, vpsr) & IA64_PSR_BN)) {
1769 for (i = 0; i < 16; i++) {
1773 vcpu_bsw1_unat(i, b0unat, b1unat, runat,
1774 VMM_PT_REGS_R16_SLOT);
1775 VCPU(vcpu, vpsr) |= IA64_PSR_BN;
1782 void vcpu_rfi(struct kvm_vcpu *vcpu)
1784 unsigned long ifs, psr;
1785 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1787 psr = VCPU(vcpu, ipsr);
1788 if (psr & IA64_PSR_BN)
1792 vcpu_set_psr(vcpu, psr);
1793 ifs = VCPU(vcpu, ifs);
1796 regs->cr_iip = VCPU(vcpu, iip);
1801 VPSR can't keep track of below bits of guest PSR
1802 This function gets guest PSR
1805 unsigned long vcpu_get_psr(struct kvm_vcpu *vcpu)
1808 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1810 mask = IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL |
1811 IA64_PSR_MFH | IA64_PSR_CPL | IA64_PSR_RI;
1812 return (VCPU(vcpu, vpsr) & ~mask) | (regs->cr_ipsr & mask);
1815 void kvm_rsm(struct kvm_vcpu *vcpu, INST64 inst)
1818 unsigned long imm24 = (inst.M44.i<<23) | (inst.M44.i2<<21)
1821 vpsr = vcpu_get_psr(vcpu);
1823 vcpu_set_psr(vcpu, vpsr);
1826 void kvm_ssm(struct kvm_vcpu *vcpu, INST64 inst)
1829 unsigned long imm24 = (inst.M44.i << 23) | (inst.M44.i2 << 21)
1832 vpsr = vcpu_get_psr(vcpu);
1834 vcpu_set_psr(vcpu, vpsr);
1839 * bit -- starting bit
1840 * len -- how many bits
1842 #define MASK(bit,len) \
1846 __asm __volatile("dep %0=-1, r0, %1, %2"\
1853 void vcpu_set_psr_l(struct kvm_vcpu *vcpu, unsigned long val)
1855 val = (val & MASK(0, 32)) | (vcpu_get_psr(vcpu) & MASK(32, 32));
1856 vcpu_set_psr(vcpu, val);
1859 void kvm_mov_to_psr(struct kvm_vcpu *vcpu, INST64 inst)
1863 val = vcpu_get_gr(vcpu, inst.M35.r2);
1864 vcpu_set_psr_l(vcpu, val);
1867 void kvm_mov_from_psr(struct kvm_vcpu *vcpu, INST64 inst)
1871 val = vcpu_get_psr(vcpu);
1872 val = (val & MASK(0, 32)) | (val & MASK(35, 2));
1873 vcpu_set_gr(vcpu, inst.M33.r1, val, 0);
1876 void vcpu_increment_iip(struct kvm_vcpu *vcpu)
1878 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1879 struct ia64_psr *ipsr = (struct ia64_psr *)®s->cr_ipsr;
1880 if (ipsr->ri == 2) {
1887 void vcpu_decrement_iip(struct kvm_vcpu *vcpu)
1889 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1890 struct ia64_psr *ipsr = (struct ia64_psr *)®s->cr_ipsr;
1892 if (ipsr->ri == 0) {
1899 /** Emulate a privileged operation.
1902 * @param vcpu virtual cpu
1903 * @cause the reason cause virtualization fault
1904 * @opcode the instruction code which cause virtualization fault
1907 void kvm_emulate(struct kvm_vcpu *vcpu, struct kvm_pt_regs *regs)
1909 unsigned long status, cause, opcode ;
1912 status = IA64_NO_FAULT;
1913 cause = VMX(vcpu, cause);
1914 opcode = VMX(vcpu, opcode);
1917 * Switch to actual virtual rid in rr0 and rr4,
1918 * which is required by some tlb related instructions.
1920 prepare_if_physical_mode(vcpu);
1924 kvm_rsm(vcpu, inst);
1927 kvm_ssm(vcpu, inst);
1929 case EVENT_MOV_TO_PSR:
1930 kvm_mov_to_psr(vcpu, inst);
1932 case EVENT_MOV_FROM_PSR:
1933 kvm_mov_from_psr(vcpu, inst);
1935 case EVENT_MOV_FROM_CR:
1936 kvm_mov_from_cr(vcpu, inst);
1938 case EVENT_MOV_TO_CR:
1939 kvm_mov_to_cr(vcpu, inst);
1954 kvm_itr_d(vcpu, inst);
1957 kvm_itr_i(vcpu, inst);
1960 kvm_ptr_d(vcpu, inst);
1963 kvm_ptr_i(vcpu, inst);
1966 kvm_itc_d(vcpu, inst);
1969 kvm_itc_i(vcpu, inst);
1972 kvm_ptc_l(vcpu, inst);
1975 kvm_ptc_g(vcpu, inst);
1978 kvm_ptc_ga(vcpu, inst);
1981 kvm_ptc_e(vcpu, inst);
1983 case EVENT_MOV_TO_RR:
1984 kvm_mov_to_rr(vcpu, inst);
1986 case EVENT_MOV_FROM_RR:
1987 kvm_mov_from_rr(vcpu, inst);
1990 kvm_thash(vcpu, inst);
1993 kvm_ttag(vcpu, inst);
1996 status = kvm_tpa(vcpu, inst);
1999 kvm_tak(vcpu, inst);
2001 case EVENT_MOV_TO_AR_IMM:
2002 kvm_mov_to_ar_imm(vcpu, inst);
2004 case EVENT_MOV_TO_AR:
2005 kvm_mov_to_ar_reg(vcpu, inst);
2007 case EVENT_MOV_FROM_AR:
2008 kvm_mov_from_ar_reg(vcpu, inst);
2010 case EVENT_MOV_TO_DBR:
2011 kvm_mov_to_dbr(vcpu, inst);
2013 case EVENT_MOV_TO_IBR:
2014 kvm_mov_to_ibr(vcpu, inst);
2016 case EVENT_MOV_TO_PMC:
2017 kvm_mov_to_pmc(vcpu, inst);
2019 case EVENT_MOV_TO_PMD:
2020 kvm_mov_to_pmd(vcpu, inst);
2022 case EVENT_MOV_TO_PKR:
2023 kvm_mov_to_pkr(vcpu, inst);
2025 case EVENT_MOV_FROM_DBR:
2026 kvm_mov_from_dbr(vcpu, inst);
2028 case EVENT_MOV_FROM_IBR:
2029 kvm_mov_from_ibr(vcpu, inst);
2031 case EVENT_MOV_FROM_PMC:
2032 kvm_mov_from_pmc(vcpu, inst);
2034 case EVENT_MOV_FROM_PKR:
2035 kvm_mov_from_pkr(vcpu, inst);
2037 case EVENT_MOV_FROM_CPUID:
2038 kvm_mov_from_cpuid(vcpu, inst);
2041 status = IA64_FAULT;
2046 /*Assume all status is NO_FAULT ?*/
2047 if (status == IA64_NO_FAULT && cause != EVENT_RFI)
2048 vcpu_increment_iip(vcpu);
2050 recover_if_physical_mode(vcpu);
2053 void init_vcpu(struct kvm_vcpu *vcpu)
2057 vcpu->arch.mode_flags = GUEST_IN_PHY;
2058 VMX(vcpu, vrr[0]) = 0x38;
2059 VMX(vcpu, vrr[1]) = 0x38;
2060 VMX(vcpu, vrr[2]) = 0x38;
2061 VMX(vcpu, vrr[3]) = 0x38;
2062 VMX(vcpu, vrr[4]) = 0x38;
2063 VMX(vcpu, vrr[5]) = 0x38;
2064 VMX(vcpu, vrr[6]) = 0x38;
2065 VMX(vcpu, vrr[7]) = 0x38;
2066 VCPU(vcpu, vpsr) = IA64_PSR_BN;
2067 VCPU(vcpu, dcr) = 0;
2068 /* pta.size must not be 0. The minimum is 15 (32k) */
2069 VCPU(vcpu, pta) = 15 << 2;
2070 VCPU(vcpu, itv) = 0x10000;
2071 VCPU(vcpu, itm) = 0;
2072 VMX(vcpu, last_itc) = 0;
2074 VCPU(vcpu, lid) = VCPU_LID(vcpu);
2075 VCPU(vcpu, ivr) = 0;
2076 VCPU(vcpu, tpr) = 0x10000;
2077 VCPU(vcpu, eoi) = 0;
2078 VCPU(vcpu, irr[0]) = 0;
2079 VCPU(vcpu, irr[1]) = 0;
2080 VCPU(vcpu, irr[2]) = 0;
2081 VCPU(vcpu, irr[3]) = 0;
2082 VCPU(vcpu, pmv) = 0x10000;
2083 VCPU(vcpu, cmcv) = 0x10000;
2084 VCPU(vcpu, lrr0) = 0x10000; /* default reset value? */
2085 VCPU(vcpu, lrr1) = 0x10000; /* default reset value? */
2086 update_vhpi(vcpu, NULL_VECTOR);
2087 VLSAPIC_XTP(vcpu) = 0x80; /* disabled */
2089 for (i = 0; i < 4; i++)
2090 VLSAPIC_INSVC(vcpu, i) = 0;
2093 void kvm_init_all_rr(struct kvm_vcpu *vcpu)
2097 local_irq_save(psr);
2099 /* WARNING: not allow co-exist of both virtual mode and physical
2100 * mode in same region
2103 vcpu->arch.metaphysical_saved_rr0 = vrrtomrr(VMX(vcpu, vrr[VRN0]));
2104 vcpu->arch.metaphysical_saved_rr4 = vrrtomrr(VMX(vcpu, vrr[VRN4]));
2106 if (is_physical_mode(vcpu)) {
2107 if (vcpu->arch.mode_flags & GUEST_PHY_EMUL)
2108 panic_vm(vcpu, "Machine Status conflicts!\n");
2110 ia64_set_rr((VRN0 << VRN_SHIFT), vcpu->arch.metaphysical_rr0);
2111 ia64_dv_serialize_data();
2112 ia64_set_rr((VRN4 << VRN_SHIFT), vcpu->arch.metaphysical_rr4);
2113 ia64_dv_serialize_data();
2115 ia64_set_rr((VRN0 << VRN_SHIFT),
2116 vcpu->arch.metaphysical_saved_rr0);
2117 ia64_dv_serialize_data();
2118 ia64_set_rr((VRN4 << VRN_SHIFT),
2119 vcpu->arch.metaphysical_saved_rr4);
2120 ia64_dv_serialize_data();
2122 ia64_set_rr((VRN1 << VRN_SHIFT),
2123 vrrtomrr(VMX(vcpu, vrr[VRN1])));
2124 ia64_dv_serialize_data();
2125 ia64_set_rr((VRN2 << VRN_SHIFT),
2126 vrrtomrr(VMX(vcpu, vrr[VRN2])));
2127 ia64_dv_serialize_data();
2128 ia64_set_rr((VRN3 << VRN_SHIFT),
2129 vrrtomrr(VMX(vcpu, vrr[VRN3])));
2130 ia64_dv_serialize_data();
2131 ia64_set_rr((VRN5 << VRN_SHIFT),
2132 vrrtomrr(VMX(vcpu, vrr[VRN5])));
2133 ia64_dv_serialize_data();
2134 ia64_set_rr((VRN7 << VRN_SHIFT),
2135 vrrtomrr(VMX(vcpu, vrr[VRN7])));
2136 ia64_dv_serialize_data();
2146 ia64_call_vsa(PAL_VPS_RESTORE, (unsigned long)v->arch.vpd,
2157 static void kvm_show_registers(struct kvm_pt_regs *regs)
2159 unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri;
2161 struct kvm_vcpu *vcpu = current_vcpu;
2163 printk("vcpu 0x%p vcpu %d\n",
2164 vcpu, vcpu->vcpu_id);
2166 printk("psr : %016lx ifs : %016lx ip : [<%016lx>]\n",
2167 regs->cr_ipsr, regs->cr_ifs, ip);
2169 printk("unat: %016lx pfs : %016lx rsc : %016lx\n",
2170 regs->ar_unat, regs->ar_pfs, regs->ar_rsc);
2171 printk("rnat: %016lx bspstore: %016lx pr : %016lx\n",
2172 regs->ar_rnat, regs->ar_bspstore, regs->pr);
2173 printk("ldrs: %016lx ccv : %016lx fpsr: %016lx\n",
2174 regs->loadrs, regs->ar_ccv, regs->ar_fpsr);
2175 printk("csd : %016lx ssd : %016lx\n", regs->ar_csd, regs->ar_ssd);
2176 printk("b0 : %016lx b6 : %016lx b7 : %016lx\n", regs->b0,
2177 regs->b6, regs->b7);
2178 printk("f6 : %05lx%016lx f7 : %05lx%016lx\n",
2179 regs->f6.u.bits[1], regs->f6.u.bits[0],
2180 regs->f7.u.bits[1], regs->f7.u.bits[0]);
2181 printk("f8 : %05lx%016lx f9 : %05lx%016lx\n",
2182 regs->f8.u.bits[1], regs->f8.u.bits[0],
2183 regs->f9.u.bits[1], regs->f9.u.bits[0]);
2184 printk("f10 : %05lx%016lx f11 : %05lx%016lx\n",
2185 regs->f10.u.bits[1], regs->f10.u.bits[0],
2186 regs->f11.u.bits[1], regs->f11.u.bits[0]);
2188 printk("r1 : %016lx r2 : %016lx r3 : %016lx\n", regs->r1,
2189 regs->r2, regs->r3);
2190 printk("r8 : %016lx r9 : %016lx r10 : %016lx\n", regs->r8,
2191 regs->r9, regs->r10);
2192 printk("r11 : %016lx r12 : %016lx r13 : %016lx\n", regs->r11,
2193 regs->r12, regs->r13);
2194 printk("r14 : %016lx r15 : %016lx r16 : %016lx\n", regs->r14,
2195 regs->r15, regs->r16);
2196 printk("r17 : %016lx r18 : %016lx r19 : %016lx\n", regs->r17,
2197 regs->r18, regs->r19);
2198 printk("r20 : %016lx r21 : %016lx r22 : %016lx\n", regs->r20,
2199 regs->r21, regs->r22);
2200 printk("r23 : %016lx r24 : %016lx r25 : %016lx\n", regs->r23,
2201 regs->r24, regs->r25);
2202 printk("r26 : %016lx r27 : %016lx r28 : %016lx\n", regs->r26,
2203 regs->r27, regs->r28);
2204 printk("r29 : %016lx r30 : %016lx r31 : %016lx\n", regs->r29,
2205 regs->r30, regs->r31);
2209 void panic_vm(struct kvm_vcpu *v, const char *fmt, ...)
2214 struct kvm_pt_regs *regs = vcpu_regs(v);
2215 struct exit_ctl_data *p = &v->arch.exit_data;
2216 va_start(args, fmt);
2217 vsnprintf(buf, sizeof(buf), fmt, args);
2220 kvm_show_registers(regs);
2221 p->exit_reason = EXIT_REASON_VM_PANIC;