2 * kvm_vcpu.c: handling all virtual cpu related thing.
3 * Copyright (c) 2005, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 * Shaofan Li (Susue Li) <susie.li@intel.com>
19 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
20 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
21 * Xiantao Zhang <xiantao.zhang@intel.com>
24 #include <linux/kvm_host.h>
25 #include <linux/types.h>
27 #include <asm/processor.h>
28 #include <asm/ia64regs.h>
29 #include <asm/gcc_intrin.h>
30 #include <asm/kregs.h>
31 #include <asm/pgtable.h>
34 #include "asm-offsets.h"
39 * - Index by it/dt/rt sequence
40 * - Only existing mode transitions are allowed in this table
41 * - RSE is placed at lazy mode when emulating guest partial mode
42 * - If gva happens to be rr0 and rr4, only allowed case is identity
43 * mapping (gva=gpa), or panic! (How?)
45 int mm_switch_table[8][8] = {
46 /* 2004/09/12(Kevin): Allow switch to self */
48 * (it,dt,rt): (0,0,0) -> (1,1,1)
49 * This kind of transition usually occurs in the very early
50 * stage of Linux boot up procedure. Another case is in efi
51 * and pal calls. (see "arch/ia64/kernel/head.S")
53 * (it,dt,rt): (0,0,0) -> (0,1,1)
54 * This kind of transition is found when OSYa exits efi boot
55 * service. Due to gva = gpa in this case (Same region),
56 * data access can be satisfied though itlb entry for physical
59 {SW_SELF, 0, 0, SW_NOP, 0, 0, 0, SW_P2V},
60 {0, 0, 0, 0, 0, 0, 0, 0},
61 {0, 0, 0, 0, 0, 0, 0, 0},
63 * (it,dt,rt): (0,1,1) -> (1,1,1)
64 * This kind of transition is found in OSYa.
66 * (it,dt,rt): (0,1,1) -> (0,0,0)
67 * This kind of transition is found in OSYa
69 {SW_NOP, 0, 0, SW_SELF, 0, 0, 0, SW_P2V},
70 /* (1,0,0)->(1,1,1) */
71 {0, 0, 0, 0, 0, 0, 0, SW_P2V},
73 * (it,dt,rt): (1,0,1) -> (1,1,1)
74 * This kind of transition usually occurs when Linux returns
75 * from the low level TLB miss handlers.
76 * (see "arch/ia64/kernel/ivt.S")
78 {0, 0, 0, 0, 0, SW_SELF, 0, SW_P2V},
79 {0, 0, 0, 0, 0, 0, 0, 0},
81 * (it,dt,rt): (1,1,1) -> (1,0,1)
82 * This kind of transition usually occurs in Linux low level
83 * TLB miss handler. (see "arch/ia64/kernel/ivt.S")
85 * (it,dt,rt): (1,1,1) -> (0,0,0)
86 * This kind of transition usually occurs in pal and efi calls,
87 * which requires running in physical mode.
88 * (see "arch/ia64/kernel/head.S")
92 {SW_V2P, 0, 0, 0, SW_V2P, SW_V2P, 0, SW_SELF},
95 void physical_mode_init(struct kvm_vcpu *vcpu)
97 vcpu->arch.mode_flags = GUEST_IN_PHY;
100 void switch_to_physical_rid(struct kvm_vcpu *vcpu)
104 /* Save original virtual mode rr[0] and rr[4] */
105 psr = ia64_clear_ic();
106 ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_rr0);
108 ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->arch.metaphysical_rr4);
116 void switch_to_virtual_rid(struct kvm_vcpu *vcpu)
120 psr = ia64_clear_ic();
121 ia64_set_rr(VRN0 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr0);
123 ia64_set_rr(VRN4 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr4);
129 static int mm_switch_action(struct ia64_psr opsr, struct ia64_psr npsr)
131 return mm_switch_table[MODE_IND(opsr)][MODE_IND(npsr)];
134 void switch_mm_mode(struct kvm_vcpu *vcpu, struct ia64_psr old_psr,
135 struct ia64_psr new_psr)
138 act = mm_switch_action(old_psr, new_psr);
141 /*printk("V -> P mode transition: (0x%lx -> 0x%lx)\n",
142 old_psr.val, new_psr.val);*/
143 switch_to_physical_rid(vcpu);
145 * Set rse to enforced lazy, to prevent active rse
146 *save/restor when guest physical mode.
148 vcpu->arch.mode_flags |= GUEST_IN_PHY;
151 switch_to_virtual_rid(vcpu);
153 * recover old mode which is saved when entering
154 * guest physical mode
156 vcpu->arch.mode_flags &= ~GUEST_IN_PHY;
172 * In physical mode, insert tc/tr for region 0 and 4 uses
173 * RID[0] and RID[4] which is for physical mode emulation.
174 * However what those inserted tc/tr wants is rid for
175 * virtual mode. So original virtual rid needs to be restored
178 * Operations which required such switch include:
179 * - insertions (itc.*, itr.*)
180 * - purges (ptc.* and ptr.*)
184 * All above needs actual virtual rid for destination entry.
187 void check_mm_mode_switch(struct kvm_vcpu *vcpu, struct ia64_psr old_psr,
188 struct ia64_psr new_psr)
191 if ((old_psr.dt != new_psr.dt)
192 || (old_psr.it != new_psr.it)
193 || (old_psr.rt != new_psr.rt))
194 switch_mm_mode(vcpu, old_psr, new_psr);
201 * In physical mode, insert tc/tr for region 0 and 4 uses
202 * RID[0] and RID[4] which is for physical mode emulation.
203 * However what those inserted tc/tr wants is rid for
204 * virtual mode. So original virtual rid needs to be restored
207 * Operations which required such switch include:
208 * - insertions (itc.*, itr.*)
209 * - purges (ptc.* and ptr.*)
213 * All above needs actual virtual rid for destination entry.
216 void prepare_if_physical_mode(struct kvm_vcpu *vcpu)
218 if (is_physical_mode(vcpu)) {
219 vcpu->arch.mode_flags |= GUEST_PHY_EMUL;
220 switch_to_virtual_rid(vcpu);
225 /* Recover always follows prepare */
226 void recover_if_physical_mode(struct kvm_vcpu *vcpu)
228 if (is_physical_mode(vcpu))
229 switch_to_physical_rid(vcpu);
230 vcpu->arch.mode_flags &= ~GUEST_PHY_EMUL;
234 #define RPT(x) ((u16) &((struct kvm_pt_regs *)0)->x)
236 static u16 gr_info[32] = {
237 0, /* r0 is read-only : WE SHOULD NEVER GET THIS */
238 RPT(r1), RPT(r2), RPT(r3),
239 RPT(r4), RPT(r5), RPT(r6), RPT(r7),
240 RPT(r8), RPT(r9), RPT(r10), RPT(r11),
241 RPT(r12), RPT(r13), RPT(r14), RPT(r15),
242 RPT(r16), RPT(r17), RPT(r18), RPT(r19),
243 RPT(r20), RPT(r21), RPT(r22), RPT(r23),
244 RPT(r24), RPT(r25), RPT(r26), RPT(r27),
245 RPT(r28), RPT(r29), RPT(r30), RPT(r31)
248 #define IA64_FIRST_STACKED_GR 32
249 #define IA64_FIRST_ROTATING_FR 32
251 static inline unsigned long
252 rotate_reg(unsigned long sor, unsigned long rrb, unsigned long reg)
261 * Return the (rotated) index for floating point register
262 * be in the REGNUM (REGNUM must range from 32-127,
263 * result is in the range from 0-95.
265 static inline unsigned long fph_index(struct kvm_pt_regs *regs,
268 unsigned long rrb_fr = (regs->cr_ifs >> 25) & 0x7f;
269 return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR));
274 * The inverse of the above: given bspstore and the number of
275 * registers, calculate ar.bsp.
277 static inline unsigned long *kvm_rse_skip_regs(unsigned long *addr,
280 long delta = ia64_rse_slot_num(addr) + num_regs;
286 while (delta <= -0x3f) {
291 while (delta >= 0x3f) {
297 return addr + num_regs + i;
300 static void get_rse_reg(struct kvm_pt_regs *regs, unsigned long r1,
301 unsigned long *val, int *nat)
303 unsigned long *bsp, *addr, *rnat_addr, *bspstore;
304 unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET;
305 unsigned long nat_mask;
306 unsigned long old_rsc, new_rsc;
307 long sof = (regs->cr_ifs) & 0x7f;
308 long sor = (((regs->cr_ifs >> 14) & 0xf) << 3);
309 long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
313 ridx = rotate_reg(sor, rrb_gr, ridx);
315 old_rsc = ia64_getreg(_IA64_REG_AR_RSC);
316 new_rsc = old_rsc&(~(0x3));
317 ia64_setreg(_IA64_REG_AR_RSC, new_rsc);
319 bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
320 bsp = kbs + (regs->loadrs >> 19);
322 addr = kvm_rse_skip_regs(bsp, -sof + ridx);
323 nat_mask = 1UL << ia64_rse_slot_num(addr);
324 rnat_addr = ia64_rse_rnat_addr(addr);
326 if (addr >= bspstore) {
329 bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
333 if (bspstore < rnat_addr)
334 *nat = (int)!!(ia64_getreg(_IA64_REG_AR_RNAT)
337 *nat = (int)!!((*rnat_addr) & nat_mask);
338 ia64_setreg(_IA64_REG_AR_RSC, old_rsc);
342 void set_rse_reg(struct kvm_pt_regs *regs, unsigned long r1,
343 unsigned long val, unsigned long nat)
345 unsigned long *bsp, *bspstore, *addr, *rnat_addr;
346 unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET;
347 unsigned long nat_mask;
348 unsigned long old_rsc, new_rsc, psr;
350 long sof = (regs->cr_ifs) & 0x7f;
351 long sor = (((regs->cr_ifs >> 14) & 0xf) << 3);
352 long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
356 ridx = rotate_reg(sor, rrb_gr, ridx);
358 old_rsc = ia64_getreg(_IA64_REG_AR_RSC);
359 /* put RSC to lazy mode, and set loadrs 0 */
360 new_rsc = old_rsc & (~0x3fff0003);
361 ia64_setreg(_IA64_REG_AR_RSC, new_rsc);
362 bsp = kbs + (regs->loadrs >> 19); /* 16 + 3 */
364 addr = kvm_rse_skip_regs(bsp, -sof + ridx);
365 nat_mask = 1UL << ia64_rse_slot_num(addr);
366 rnat_addr = ia64_rse_rnat_addr(addr);
369 bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
370 if (addr >= bspstore) {
375 bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
376 rnat = ia64_getreg(_IA64_REG_AR_RNAT);
377 if (bspstore < rnat_addr)
378 rnat = rnat & (~nat_mask);
380 *rnat_addr = (*rnat_addr)&(~nat_mask);
384 ia64_setreg(_IA64_REG_AR_RNAT, rnat);
386 rnat = ia64_getreg(_IA64_REG_AR_RNAT);
388 if (bspstore < rnat_addr)
389 rnat = rnat&(~nat_mask);
391 *rnat_addr = (*rnat_addr) & (~nat_mask);
393 ia64_setreg(_IA64_REG_AR_BSPSTORE, bspstore);
394 ia64_setreg(_IA64_REG_AR_RNAT, rnat);
396 local_irq_restore(psr);
397 ia64_setreg(_IA64_REG_AR_RSC, old_rsc);
400 void getreg(unsigned long regnum, unsigned long *val,
401 int *nat, struct kvm_pt_regs *regs)
403 unsigned long addr, *unat;
404 if (regnum >= IA64_FIRST_STACKED_GR) {
405 get_rse_reg(regs, regnum, val, nat);
410 * Now look at registers in [0-31] range and init correct UNAT
412 addr = (unsigned long)regs;
413 unat = ®s->eml_unat;;
415 addr += gr_info[regnum];
417 *val = *(unsigned long *)addr;
419 * do it only when requested
422 *nat = (*unat >> ((addr >> 3) & 0x3f)) & 0x1UL;
425 void setreg(unsigned long regnum, unsigned long val,
426 int nat, struct kvm_pt_regs *regs)
429 unsigned long bitmask;
433 * First takes care of stacked registers
435 if (regnum >= IA64_FIRST_STACKED_GR) {
436 set_rse_reg(regs, regnum, val, nat);
441 * Now look at registers in [0-31] range and init correct UNAT
443 addr = (unsigned long)regs;
444 unat = ®s->eml_unat;
446 * add offset from base of struct
449 addr += gr_info[regnum];
451 *(unsigned long *)addr = val;
454 * We need to clear the corresponding UNAT bit to fully emulate the load
455 * UNAT bit_pos = GR[r3]{8:3} form EAS-2.4
457 bitmask = 1UL << ((addr >> 3) & 0x3f);
465 u64 vcpu_get_gr(struct kvm_vcpu *vcpu, unsigned long reg)
467 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
472 getreg(reg, &val, 0, regs);
476 void vcpu_set_gr(struct kvm_vcpu *vcpu, u64 reg, u64 value, int nat)
478 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
479 long sof = (regs->cr_ifs) & 0x7f;
485 setreg(reg, value, nat, regs); /* FIXME: handle NATs later*/
488 void getfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
489 struct kvm_pt_regs *regs)
491 /* Take floating register rotation into consideration*/
492 if (regnum >= IA64_FIRST_ROTATING_FR)
493 regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum);
494 #define CASE_FIXED_FP(reg) \
496 ia64_stf_spill(fpval, reg); \
634 void setfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
635 struct kvm_pt_regs *regs)
637 /* Take floating register rotation into consideration*/
638 if (regnum >= IA64_FIRST_ROTATING_FR)
639 regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum);
641 #define CASE_FIXED_FP(reg) \
643 ia64_ldf_fill(reg, fpval); \
778 void vcpu_get_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
779 struct ia64_fpreg *val)
781 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
783 getfpreg(reg, val, regs); /* FIXME: handle NATs later*/
786 void vcpu_set_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
787 struct ia64_fpreg *val)
789 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
792 setfpreg(reg, val, regs); /* FIXME: handle NATs later*/
795 /************************************************************************
797 ***********************************************************************/
798 u64 vcpu_get_itc(struct kvm_vcpu *vcpu)
800 unsigned long guest_itc;
801 guest_itc = VMX(vcpu, itc_offset) + ia64_getreg(_IA64_REG_AR_ITC);
803 if (guest_itc >= VMX(vcpu, last_itc)) {
804 VMX(vcpu, last_itc) = guest_itc;
807 return VMX(vcpu, last_itc);
810 static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val);
811 static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val)
815 long itc_offset = val - ia64_getreg(_IA64_REG_AR_ITC);
816 unsigned long vitv = VCPU(vcpu, itv);
818 if (vcpu->vcpu_id == 0) {
819 for (i = 0; i < MAX_VCPU_NUM; i++) {
820 v = (struct kvm_vcpu *)((char *)vcpu + VCPU_SIZE * i);
821 VMX(v, itc_offset) = itc_offset;
822 VMX(v, last_itc) = 0;
825 VMX(vcpu, last_itc) = 0;
826 if (VCPU(vcpu, itm) <= val) {
827 VMX(vcpu, itc_check) = 0;
828 vcpu_unpend_interrupt(vcpu, vitv);
830 VMX(vcpu, itc_check) = 1;
831 vcpu_set_itm(vcpu, VCPU(vcpu, itm));
836 static inline u64 vcpu_get_itm(struct kvm_vcpu *vcpu)
838 return ((u64)VCPU(vcpu, itm));
841 static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val)
843 unsigned long vitv = VCPU(vcpu, itv);
844 VCPU(vcpu, itm) = val;
846 if (val > vcpu_get_itc(vcpu)) {
847 VMX(vcpu, itc_check) = 1;
848 vcpu_unpend_interrupt(vcpu, vitv);
849 VMX(vcpu, timer_pending) = 0;
851 VMX(vcpu, itc_check) = 0;
854 #define ITV_VECTOR(itv) (itv&0xff)
855 #define ITV_IRQ_MASK(itv) (itv&(1<<16))
857 static inline void vcpu_set_itv(struct kvm_vcpu *vcpu, u64 val)
859 VCPU(vcpu, itv) = val;
860 if (!ITV_IRQ_MASK(val) && vcpu->arch.timer_pending) {
861 vcpu_pend_interrupt(vcpu, ITV_VECTOR(val));
862 vcpu->arch.timer_pending = 0;
866 static inline void vcpu_set_eoi(struct kvm_vcpu *vcpu, u64 val)
870 vec = highest_inservice_irq(vcpu);
871 if (vec == NULL_VECTOR)
873 VMX(vcpu, insvc[vec >> 6]) &= ~(1UL << (vec & 63));
875 vcpu->arch.irq_new_pending = 1;
879 /* See Table 5-8 in SDM vol2 for the definition */
880 int irq_masked(struct kvm_vcpu *vcpu, int h_pending, int h_inservice)
884 vtpr.val = VCPU(vcpu, tpr);
886 if (h_inservice == NMI_VECTOR)
887 return IRQ_MASKED_BY_INSVC;
889 if (h_pending == NMI_VECTOR) {
890 /* Non Maskable Interrupt */
891 return IRQ_NO_MASKED;
894 if (h_inservice == ExtINT_VECTOR)
895 return IRQ_MASKED_BY_INSVC;
897 if (h_pending == ExtINT_VECTOR) {
899 /* mask all external IRQ */
900 return IRQ_MASKED_BY_VTPR;
902 return IRQ_NO_MASKED;
905 if (is_higher_irq(h_pending, h_inservice)) {
906 if (is_higher_class(h_pending, vtpr.mic + (vtpr.mmi << 4)))
907 return IRQ_NO_MASKED;
909 return IRQ_MASKED_BY_VTPR;
911 return IRQ_MASKED_BY_INSVC;
915 void vcpu_pend_interrupt(struct kvm_vcpu *vcpu, u8 vec)
920 local_irq_save(spsr);
921 ret = test_and_set_bit(vec, &VCPU(vcpu, irr[0]));
922 local_irq_restore(spsr);
924 vcpu->arch.irq_new_pending = 1;
927 void vcpu_unpend_interrupt(struct kvm_vcpu *vcpu, u8 vec)
932 local_irq_save(spsr);
933 ret = test_and_clear_bit(vec, &VCPU(vcpu, irr[0]));
934 local_irq_restore(spsr);
936 vcpu->arch.irq_new_pending = 1;
941 void update_vhpi(struct kvm_vcpu *vcpu, int vec)
945 if (vec == NULL_VECTOR)
947 else if (vec == NMI_VECTOR)
949 else if (vec == ExtINT_VECTOR)
954 VCPU(vcpu, vhpi) = vhpi;
955 if (VCPU(vcpu, vac).a_int)
956 ia64_call_vsa(PAL_VPS_SET_PENDING_INTERRUPT,
957 (u64)vcpu->arch.vpd, 0, 0, 0, 0, 0, 0);
960 u64 vcpu_get_ivr(struct kvm_vcpu *vcpu)
962 int vec, h_inservice, mask;
964 vec = highest_pending_irq(vcpu);
965 h_inservice = highest_inservice_irq(vcpu);
966 mask = irq_masked(vcpu, vec, h_inservice);
967 if (vec == NULL_VECTOR || mask == IRQ_MASKED_BY_INSVC) {
968 if (VCPU(vcpu, vhpi))
969 update_vhpi(vcpu, NULL_VECTOR);
970 return IA64_SPURIOUS_INT_VECTOR;
972 if (mask == IRQ_MASKED_BY_VTPR) {
973 update_vhpi(vcpu, vec);
974 return IA64_SPURIOUS_INT_VECTOR;
976 VMX(vcpu, insvc[vec >> 6]) |= (1UL << (vec & 63));
977 vcpu_unpend_interrupt(vcpu, vec);
981 /**************************************************************************
982 Privileged operation emulation routines
983 **************************************************************************/
984 u64 vcpu_thash(struct kvm_vcpu *vcpu, u64 vadr)
991 vpta.val = vcpu_get_pta(vcpu);
992 vrr.val = vcpu_get_rr(vcpu, vadr);
993 vhpt_offset = ((vadr >> vrr.ps) << 3) & ((1UL << (vpta.size)) - 1);
995 pval = ia64_call_vsa(PAL_VPS_THASH, vadr, vrr.val,
996 vpta.val, 0, 0, 0, 0);
998 pval = (vadr & VRN_MASK) | vhpt_offset |
999 (vpta.val << 3 >> (vpta.size + 3) << (vpta.size));
1004 u64 vcpu_ttag(struct kvm_vcpu *vcpu, u64 vadr)
1007 union ia64_pta vpta;
1010 vpta.val = vcpu_get_pta(vcpu);
1011 vrr.val = vcpu_get_rr(vcpu, vadr);
1013 pval = ia64_call_vsa(PAL_VPS_TTAG, vadr, vrr.val,
1021 u64 vcpu_tak(struct kvm_vcpu *vcpu, u64 vadr)
1023 struct thash_data *data;
1024 union ia64_pta vpta;
1027 vpta.val = vcpu_get_pta(vcpu);
1032 data = vtlb_lookup(vcpu, vadr, D_TLB);
1033 if (!data || !data->p)
1043 void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst)
1045 unsigned long thash, vadr;
1047 vadr = vcpu_get_gr(vcpu, inst.M46.r3);
1048 thash = vcpu_thash(vcpu, vadr);
1049 vcpu_set_gr(vcpu, inst.M46.r1, thash, 0);
1053 void kvm_ttag(struct kvm_vcpu *vcpu, INST64 inst)
1055 unsigned long tag, vadr;
1057 vadr = vcpu_get_gr(vcpu, inst.M46.r3);
1058 tag = vcpu_ttag(vcpu, vadr);
1059 vcpu_set_gr(vcpu, inst.M46.r1, tag, 0);
1062 int vcpu_tpa(struct kvm_vcpu *vcpu, u64 vadr, u64 *padr)
1064 struct thash_data *data;
1065 union ia64_isr visr, pt_isr;
1066 struct kvm_pt_regs *regs;
1067 struct ia64_psr vpsr;
1069 regs = vcpu_regs(vcpu);
1070 pt_isr.val = VMX(vcpu, cr_isr);
1072 visr.ei = pt_isr.ei;
1073 visr.ir = pt_isr.ir;
1074 vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
1077 data = vhpt_lookup(vadr);
1080 vcpu_set_isr(vcpu, visr.val);
1081 data_page_not_present(vcpu, vadr);
1083 } else if (data->ma == VA_MATTR_NATPAGE) {
1084 vcpu_set_isr(vcpu, visr.val);
1085 dnat_page_consumption(vcpu, vadr);
1088 *padr = (data->gpaddr >> data->ps << data->ps) |
1089 (vadr & (PSIZE(data->ps) - 1));
1090 return IA64_NO_FAULT;
1094 data = vtlb_lookup(vcpu, vadr, D_TLB);
1097 vcpu_set_isr(vcpu, visr.val);
1098 data_page_not_present(vcpu, vadr);
1100 } else if (data->ma == VA_MATTR_NATPAGE) {
1101 vcpu_set_isr(vcpu, visr.val);
1102 dnat_page_consumption(vcpu, vadr);
1105 *padr = ((data->ppn >> (data->ps - 12)) << data->ps)
1106 | (vadr & (PSIZE(data->ps) - 1));
1107 return IA64_NO_FAULT;
1110 if (!vhpt_enabled(vcpu, vadr, NA_REF)) {
1112 vcpu_set_isr(vcpu, visr.val);
1113 alt_dtlb(vcpu, vadr);
1121 vcpu_set_isr(vcpu, visr.val);
1122 dvhpt_fault(vcpu, vadr);
1130 return IA64_NO_FAULT;
1134 int kvm_tpa(struct kvm_vcpu *vcpu, INST64 inst)
1136 unsigned long r1, r3;
1138 r3 = vcpu_get_gr(vcpu, inst.M46.r3);
1140 if (vcpu_tpa(vcpu, r3, &r1))
1143 vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
1144 return(IA64_NO_FAULT);
1147 void kvm_tak(struct kvm_vcpu *vcpu, INST64 inst)
1149 unsigned long r1, r3;
1151 r3 = vcpu_get_gr(vcpu, inst.M46.r3);
1152 r1 = vcpu_tak(vcpu, r3);
1153 vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
1157 /************************************
1158 * Insert/Purge translation register/cache
1159 ************************************/
1160 void vcpu_itc_i(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa)
1162 thash_purge_and_insert(vcpu, pte, itir, ifa, I_TLB);
1165 void vcpu_itc_d(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa)
1167 thash_purge_and_insert(vcpu, pte, itir, ifa, D_TLB);
1170 void vcpu_itr_i(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
1173 struct thash_data *p_itr;
1176 va = PAGEALIGN(ifa, ps);
1177 pte &= ~PAGE_FLAGS_RV_MASK;
1178 rid = vcpu_get_rr(vcpu, ifa);
1179 rid = rid & RR_RID_MASK;
1180 p_itr = (struct thash_data *)&vcpu->arch.itrs[slot];
1181 vcpu_set_tr(p_itr, pte, itir, va, rid);
1182 vcpu_quick_region_set(VMX(vcpu, itr_regions), va);
1186 void vcpu_itr_d(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
1190 struct thash_data *p_dtr;
1193 va = PAGEALIGN(ifa, ps);
1194 pte &= ~PAGE_FLAGS_RV_MASK;
1196 if (ps != _PAGE_SIZE_16M)
1197 thash_purge_entries(vcpu, va, ps);
1198 gpfn = (pte & _PAGE_PPN_MASK) >> PAGE_SHIFT;
1199 if (__gpfn_is_io(gpfn))
1201 rid = vcpu_get_rr(vcpu, va);
1202 rid = rid & RR_RID_MASK;
1203 p_dtr = (struct thash_data *)&vcpu->arch.dtrs[slot];
1204 vcpu_set_tr((struct thash_data *)&vcpu->arch.dtrs[slot],
1205 pte, itir, va, rid);
1206 vcpu_quick_region_set(VMX(vcpu, dtr_regions), va);
1209 void vcpu_ptr_d(struct kvm_vcpu *vcpu, u64 ifa, u64 ps)
1214 va = PAGEALIGN(ifa, ps);
1215 while ((index = vtr_find_overlap(vcpu, va, ps, D_TLB)) >= 0)
1216 vcpu->arch.dtrs[index].page_flags = 0;
1218 thash_purge_entries(vcpu, va, ps);
1221 void vcpu_ptr_i(struct kvm_vcpu *vcpu, u64 ifa, u64 ps)
1226 va = PAGEALIGN(ifa, ps);
1227 while ((index = vtr_find_overlap(vcpu, va, ps, I_TLB)) >= 0)
1228 vcpu->arch.itrs[index].page_flags = 0;
1230 thash_purge_entries(vcpu, va, ps);
1233 void vcpu_ptc_l(struct kvm_vcpu *vcpu, u64 va, u64 ps)
1235 va = PAGEALIGN(va, ps);
1236 thash_purge_entries(vcpu, va, ps);
1239 void vcpu_ptc_e(struct kvm_vcpu *vcpu, u64 va)
1241 thash_purge_all(vcpu);
1244 void vcpu_ptc_ga(struct kvm_vcpu *vcpu, u64 va, u64 ps)
1246 struct exit_ctl_data *p = &vcpu->arch.exit_data;
1248 local_irq_save(psr);
1249 p->exit_reason = EXIT_REASON_PTC_G;
1251 p->u.ptc_g_data.rr = vcpu_get_rr(vcpu, va);
1252 p->u.ptc_g_data.vaddr = va;
1253 p->u.ptc_g_data.ps = ps;
1254 vmm_transition(vcpu);
1255 /* Do Local Purge Here*/
1256 vcpu_ptc_l(vcpu, va, ps);
1257 local_irq_restore(psr);
1261 void vcpu_ptc_g(struct kvm_vcpu *vcpu, u64 va, u64 ps)
1263 vcpu_ptc_ga(vcpu, va, ps);
1266 void kvm_ptc_e(struct kvm_vcpu *vcpu, INST64 inst)
1270 ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1271 vcpu_ptc_e(vcpu, ifa);
1274 void kvm_ptc_g(struct kvm_vcpu *vcpu, INST64 inst)
1276 unsigned long ifa, itir;
1278 ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1279 itir = vcpu_get_gr(vcpu, inst.M45.r2);
1280 vcpu_ptc_g(vcpu, ifa, itir_ps(itir));
1283 void kvm_ptc_ga(struct kvm_vcpu *vcpu, INST64 inst)
1285 unsigned long ifa, itir;
1287 ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1288 itir = vcpu_get_gr(vcpu, inst.M45.r2);
1289 vcpu_ptc_ga(vcpu, ifa, itir_ps(itir));
1292 void kvm_ptc_l(struct kvm_vcpu *vcpu, INST64 inst)
1294 unsigned long ifa, itir;
1296 ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1297 itir = vcpu_get_gr(vcpu, inst.M45.r2);
1298 vcpu_ptc_l(vcpu, ifa, itir_ps(itir));
1301 void kvm_ptr_d(struct kvm_vcpu *vcpu, INST64 inst)
1303 unsigned long ifa, itir;
1305 ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1306 itir = vcpu_get_gr(vcpu, inst.M45.r2);
1307 vcpu_ptr_d(vcpu, ifa, itir_ps(itir));
1310 void kvm_ptr_i(struct kvm_vcpu *vcpu, INST64 inst)
1312 unsigned long ifa, itir;
1314 ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1315 itir = vcpu_get_gr(vcpu, inst.M45.r2);
1316 vcpu_ptr_i(vcpu, ifa, itir_ps(itir));
1319 void kvm_itr_d(struct kvm_vcpu *vcpu, INST64 inst)
1321 unsigned long itir, ifa, pte, slot;
1323 slot = vcpu_get_gr(vcpu, inst.M45.r3);
1324 pte = vcpu_get_gr(vcpu, inst.M45.r2);
1325 itir = vcpu_get_itir(vcpu);
1326 ifa = vcpu_get_ifa(vcpu);
1327 vcpu_itr_d(vcpu, slot, pte, itir, ifa);
1332 void kvm_itr_i(struct kvm_vcpu *vcpu, INST64 inst)
1334 unsigned long itir, ifa, pte, slot;
1336 slot = vcpu_get_gr(vcpu, inst.M45.r3);
1337 pte = vcpu_get_gr(vcpu, inst.M45.r2);
1338 itir = vcpu_get_itir(vcpu);
1339 ifa = vcpu_get_ifa(vcpu);
1340 vcpu_itr_i(vcpu, slot, pte, itir, ifa);
1343 void kvm_itc_d(struct kvm_vcpu *vcpu, INST64 inst)
1345 unsigned long itir, ifa, pte;
1347 itir = vcpu_get_itir(vcpu);
1348 ifa = vcpu_get_ifa(vcpu);
1349 pte = vcpu_get_gr(vcpu, inst.M45.r2);
1350 vcpu_itc_d(vcpu, pte, itir, ifa);
1353 void kvm_itc_i(struct kvm_vcpu *vcpu, INST64 inst)
1355 unsigned long itir, ifa, pte;
1357 itir = vcpu_get_itir(vcpu);
1358 ifa = vcpu_get_ifa(vcpu);
1359 pte = vcpu_get_gr(vcpu, inst.M45.r2);
1360 vcpu_itc_i(vcpu, pte, itir, ifa);
1363 /*************************************
1364 * Moves to semi-privileged registers
1365 *************************************/
1367 void kvm_mov_to_ar_imm(struct kvm_vcpu *vcpu, INST64 inst)
1372 imm = -inst.M30.imm;
1376 vcpu_set_itc(vcpu, imm);
1379 void kvm_mov_to_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
1383 r2 = vcpu_get_gr(vcpu, inst.M29.r2);
1384 vcpu_set_itc(vcpu, r2);
1388 void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
1392 r1 = vcpu_get_itc(vcpu);
1393 vcpu_set_gr(vcpu, inst.M31.r1, r1, 0);
1395 /**************************************************************************
1396 struct kvm_vcpu*protection key register access routines
1397 **************************************************************************/
1399 unsigned long vcpu_get_pkr(struct kvm_vcpu *vcpu, unsigned long reg)
1401 return ((unsigned long)ia64_get_pkr(reg));
1404 void vcpu_set_pkr(struct kvm_vcpu *vcpu, unsigned long reg, unsigned long val)
1406 ia64_set_pkr(reg, val);
1410 unsigned long vcpu_get_itir_on_fault(struct kvm_vcpu *vcpu, unsigned long ifa)
1412 union ia64_rr rr, rr1;
1414 rr.val = vcpu_get_rr(vcpu, ifa);
1423 /********************************
1424 * Moves to privileged registers
1425 ********************************/
1426 unsigned long vcpu_set_rr(struct kvm_vcpu *vcpu, unsigned long reg,
1429 union ia64_rr oldrr, newrr;
1430 unsigned long rrval;
1431 struct exit_ctl_data *p = &vcpu->arch.exit_data;
1434 oldrr.val = vcpu_get_rr(vcpu, reg);
1436 vcpu->arch.vrr[reg >> VRN_SHIFT] = val;
1438 switch ((unsigned long)(reg >> VRN_SHIFT)) {
1440 vcpu->arch.vmm_rr = vrrtomrr(val);
1441 local_irq_save(psr);
1442 p->exit_reason = EXIT_REASON_SWITCH_RR6;
1443 vmm_transition(vcpu);
1444 local_irq_restore(psr);
1447 rrval = vrrtomrr(val);
1448 vcpu->arch.metaphysical_saved_rr4 = rrval;
1449 if (!is_physical_mode(vcpu))
1450 ia64_set_rr(reg, rrval);
1453 rrval = vrrtomrr(val);
1454 vcpu->arch.metaphysical_saved_rr0 = rrval;
1455 if (!is_physical_mode(vcpu))
1456 ia64_set_rr(reg, rrval);
1459 ia64_set_rr(reg, vrrtomrr(val));
1463 return (IA64_NO_FAULT);
1468 void kvm_mov_to_rr(struct kvm_vcpu *vcpu, INST64 inst)
1470 unsigned long r3, r2;
1472 r3 = vcpu_get_gr(vcpu, inst.M42.r3);
1473 r2 = vcpu_get_gr(vcpu, inst.M42.r2);
1474 vcpu_set_rr(vcpu, r3, r2);
1477 void kvm_mov_to_dbr(struct kvm_vcpu *vcpu, INST64 inst)
1481 void kvm_mov_to_ibr(struct kvm_vcpu *vcpu, INST64 inst)
1485 void kvm_mov_to_pmc(struct kvm_vcpu *vcpu, INST64 inst)
1487 unsigned long r3, r2;
1489 r3 = vcpu_get_gr(vcpu, inst.M42.r3);
1490 r2 = vcpu_get_gr(vcpu, inst.M42.r2);
1491 vcpu_set_pmc(vcpu, r3, r2);
1494 void kvm_mov_to_pmd(struct kvm_vcpu *vcpu, INST64 inst)
1496 unsigned long r3, r2;
1498 r3 = vcpu_get_gr(vcpu, inst.M42.r3);
1499 r2 = vcpu_get_gr(vcpu, inst.M42.r2);
1500 vcpu_set_pmd(vcpu, r3, r2);
1503 void kvm_mov_to_pkr(struct kvm_vcpu *vcpu, INST64 inst)
1507 r3 = vcpu_get_gr(vcpu, inst.M42.r3);
1508 r2 = vcpu_get_gr(vcpu, inst.M42.r2);
1509 vcpu_set_pkr(vcpu, r3, r2);
1514 void kvm_mov_from_rr(struct kvm_vcpu *vcpu, INST64 inst)
1516 unsigned long r3, r1;
1518 r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1519 r1 = vcpu_get_rr(vcpu, r3);
1520 vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1523 void kvm_mov_from_pkr(struct kvm_vcpu *vcpu, INST64 inst)
1525 unsigned long r3, r1;
1527 r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1528 r1 = vcpu_get_pkr(vcpu, r3);
1529 vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1532 void kvm_mov_from_dbr(struct kvm_vcpu *vcpu, INST64 inst)
1534 unsigned long r3, r1;
1536 r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1537 r1 = vcpu_get_dbr(vcpu, r3);
1538 vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1541 void kvm_mov_from_ibr(struct kvm_vcpu *vcpu, INST64 inst)
1543 unsigned long r3, r1;
1545 r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1546 r1 = vcpu_get_ibr(vcpu, r3);
1547 vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1550 void kvm_mov_from_pmc(struct kvm_vcpu *vcpu, INST64 inst)
1552 unsigned long r3, r1;
1554 r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1555 r1 = vcpu_get_pmc(vcpu, r3);
1556 vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1560 unsigned long vcpu_get_cpuid(struct kvm_vcpu *vcpu, unsigned long reg)
1562 /* FIXME: This could get called as a result of a rsvd-reg fault */
1563 if (reg > (ia64_get_cpuid(3) & 0xff))
1566 return ia64_get_cpuid(reg);
1569 void kvm_mov_from_cpuid(struct kvm_vcpu *vcpu, INST64 inst)
1571 unsigned long r3, r1;
1573 r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1574 r1 = vcpu_get_cpuid(vcpu, r3);
1575 vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1578 void vcpu_set_tpr(struct kvm_vcpu *vcpu, unsigned long val)
1580 VCPU(vcpu, tpr) = val;
1581 vcpu->arch.irq_check = 1;
1584 unsigned long kvm_mov_to_cr(struct kvm_vcpu *vcpu, INST64 inst)
1588 r2 = vcpu_get_gr(vcpu, inst.M32.r2);
1589 VCPU(vcpu, vcr[inst.M32.cr3]) = r2;
1591 switch (inst.M32.cr3) {
1593 vcpu_set_dcr(vcpu, r2);
1596 vcpu_set_itm(vcpu, r2);
1599 vcpu_set_tpr(vcpu, r2);
1602 vcpu_set_eoi(vcpu, r2);
1612 unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst)
1614 unsigned long tgt = inst.M33.r1;
1617 switch (inst.M33.cr3) {
1619 val = vcpu_get_ivr(vcpu);
1620 vcpu_set_gr(vcpu, tgt, val, 0);
1624 vcpu_set_gr(vcpu, tgt, 0L, 0);
1627 val = VCPU(vcpu, vcr[inst.M33.cr3]);
1628 vcpu_set_gr(vcpu, tgt, val, 0);
1637 void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val)
1641 struct kvm_pt_regs *regs;
1642 struct ia64_psr old_psr, new_psr;
1644 old_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
1646 regs = vcpu_regs(vcpu);
1647 /* We only support guest as:
1652 if (val & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM))
1656 * For those IA64_PSR bits: id/da/dd/ss/ed/ia
1657 * Since these bits will become 0, after success execution of each
1658 * instruction, we will change set them to mIA64_PSR
1660 VCPU(vcpu, vpsr) = val
1661 & (~(IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD |
1662 IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA));
1664 if (!old_psr.i && (val & IA64_PSR_I)) {
1666 vcpu->arch.irq_check = 1;
1668 new_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
1671 * All vIA64_PSR bits shall go to mPSR (v->tf->tf_special.psr)
1672 * , except for the following bits:
1673 * ic/i/dt/si/rt/mc/it/bn/vm
1675 mask = IA64_PSR_IC + IA64_PSR_I + IA64_PSR_DT + IA64_PSR_SI +
1676 IA64_PSR_RT + IA64_PSR_MC + IA64_PSR_IT + IA64_PSR_BN +
1679 regs->cr_ipsr = (regs->cr_ipsr & mask) | (val & (~mask));
1681 check_mm_mode_switch(vcpu, old_psr, new_psr);
1686 unsigned long vcpu_cover(struct kvm_vcpu *vcpu)
1688 struct ia64_psr vpsr;
1690 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1691 vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
1694 VCPU(vcpu, ifs) = regs->cr_ifs;
1695 regs->cr_ifs = IA64_IFS_V;
1696 return (IA64_NO_FAULT);
1701 /**************************************************************************
1702 VCPU banked general register access routines
1703 **************************************************************************/
1704 #define vcpu_bsw0_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT) \
1706 __asm__ __volatile__ ( \
1707 ";;extr.u %0 = %3,%6,16;;\n" \
1708 "dep %1 = %0, %1, 0, 16;;\n" \
1710 "extr.u %0 = %2, 16, 16;;\n" \
1711 "dep %3 = %0, %3, %6, 16;;\n" \
1713 ::"r"(i), "r"(*b1unat), "r"(*b0unat), \
1714 "r"(*runat), "r"(b1unat), "r"(runat), \
1715 "i"(VMM_PT_REGS_R16_SLOT) : "memory"); \
1718 void vcpu_bsw0(struct kvm_vcpu *vcpu)
1722 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1723 unsigned long *r = ®s->r16;
1724 unsigned long *b0 = &VCPU(vcpu, vbgr[0]);
1725 unsigned long *b1 = &VCPU(vcpu, vgr[0]);
1726 unsigned long *runat = ®s->eml_unat;
1727 unsigned long *b0unat = &VCPU(vcpu, vbnat);
1728 unsigned long *b1unat = &VCPU(vcpu, vnat);
1731 if (VCPU(vcpu, vpsr) & IA64_PSR_BN) {
1732 for (i = 0; i < 16; i++) {
1736 vcpu_bsw0_unat(i, b0unat, b1unat, runat,
1737 VMM_PT_REGS_R16_SLOT);
1738 VCPU(vcpu, vpsr) &= ~IA64_PSR_BN;
1742 #define vcpu_bsw1_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT) \
1744 __asm__ __volatile__ (";;extr.u %0 = %3, %6, 16;;\n" \
1745 "dep %1 = %0, %1, 16, 16;;\n" \
1747 "extr.u %0 = %2, 0, 16;;\n" \
1748 "dep %3 = %0, %3, %6, 16;;\n" \
1750 ::"r"(i), "r"(*b0unat), "r"(*b1unat), \
1751 "r"(*runat), "r"(b0unat), "r"(runat), \
1752 "i"(VMM_PT_REGS_R16_SLOT) : "memory"); \
1755 void vcpu_bsw1(struct kvm_vcpu *vcpu)
1758 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1759 unsigned long *r = ®s->r16;
1760 unsigned long *b0 = &VCPU(vcpu, vbgr[0]);
1761 unsigned long *b1 = &VCPU(vcpu, vgr[0]);
1762 unsigned long *runat = ®s->eml_unat;
1763 unsigned long *b0unat = &VCPU(vcpu, vbnat);
1764 unsigned long *b1unat = &VCPU(vcpu, vnat);
1766 if (!(VCPU(vcpu, vpsr) & IA64_PSR_BN)) {
1767 for (i = 0; i < 16; i++) {
1771 vcpu_bsw1_unat(i, b0unat, b1unat, runat,
1772 VMM_PT_REGS_R16_SLOT);
1773 VCPU(vcpu, vpsr) |= IA64_PSR_BN;
1780 void vcpu_rfi(struct kvm_vcpu *vcpu)
1782 unsigned long ifs, psr;
1783 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1785 psr = VCPU(vcpu, ipsr);
1786 if (psr & IA64_PSR_BN)
1790 vcpu_set_psr(vcpu, psr);
1791 ifs = VCPU(vcpu, ifs);
1794 regs->cr_iip = VCPU(vcpu, iip);
1799 VPSR can't keep track of below bits of guest PSR
1800 This function gets guest PSR
1803 unsigned long vcpu_get_psr(struct kvm_vcpu *vcpu)
1806 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1808 mask = IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL |
1809 IA64_PSR_MFH | IA64_PSR_CPL | IA64_PSR_RI;
1810 return (VCPU(vcpu, vpsr) & ~mask) | (regs->cr_ipsr & mask);
1813 void kvm_rsm(struct kvm_vcpu *vcpu, INST64 inst)
1816 unsigned long imm24 = (inst.M44.i<<23) | (inst.M44.i2<<21)
1819 vpsr = vcpu_get_psr(vcpu);
1821 vcpu_set_psr(vcpu, vpsr);
1824 void kvm_ssm(struct kvm_vcpu *vcpu, INST64 inst)
1827 unsigned long imm24 = (inst.M44.i << 23) | (inst.M44.i2 << 21)
1830 vpsr = vcpu_get_psr(vcpu);
1832 vcpu_set_psr(vcpu, vpsr);
1837 * bit -- starting bit
1838 * len -- how many bits
1840 #define MASK(bit,len) \
1844 __asm __volatile("dep %0=-1, r0, %1, %2"\
1851 void vcpu_set_psr_l(struct kvm_vcpu *vcpu, unsigned long val)
1853 val = (val & MASK(0, 32)) | (vcpu_get_psr(vcpu) & MASK(32, 32));
1854 vcpu_set_psr(vcpu, val);
1857 void kvm_mov_to_psr(struct kvm_vcpu *vcpu, INST64 inst)
1861 val = vcpu_get_gr(vcpu, inst.M35.r2);
1862 vcpu_set_psr_l(vcpu, val);
1865 void kvm_mov_from_psr(struct kvm_vcpu *vcpu, INST64 inst)
1869 val = vcpu_get_psr(vcpu);
1870 val = (val & MASK(0, 32)) | (val & MASK(35, 2));
1871 vcpu_set_gr(vcpu, inst.M33.r1, val, 0);
1874 void vcpu_increment_iip(struct kvm_vcpu *vcpu)
1876 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1877 struct ia64_psr *ipsr = (struct ia64_psr *)®s->cr_ipsr;
1878 if (ipsr->ri == 2) {
1885 void vcpu_decrement_iip(struct kvm_vcpu *vcpu)
1887 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1888 struct ia64_psr *ipsr = (struct ia64_psr *)®s->cr_ipsr;
1890 if (ipsr->ri == 0) {
1897 /** Emulate a privileged operation.
1900 * @param vcpu virtual cpu
1901 * @cause the reason cause virtualization fault
1902 * @opcode the instruction code which cause virtualization fault
1905 void kvm_emulate(struct kvm_vcpu *vcpu, struct kvm_pt_regs *regs)
1907 unsigned long status, cause, opcode ;
1910 status = IA64_NO_FAULT;
1911 cause = VMX(vcpu, cause);
1912 opcode = VMX(vcpu, opcode);
1915 * Switch to actual virtual rid in rr0 and rr4,
1916 * which is required by some tlb related instructions.
1918 prepare_if_physical_mode(vcpu);
1922 kvm_rsm(vcpu, inst);
1925 kvm_ssm(vcpu, inst);
1927 case EVENT_MOV_TO_PSR:
1928 kvm_mov_to_psr(vcpu, inst);
1930 case EVENT_MOV_FROM_PSR:
1931 kvm_mov_from_psr(vcpu, inst);
1933 case EVENT_MOV_FROM_CR:
1934 kvm_mov_from_cr(vcpu, inst);
1936 case EVENT_MOV_TO_CR:
1937 kvm_mov_to_cr(vcpu, inst);
1952 kvm_itr_d(vcpu, inst);
1955 kvm_itr_i(vcpu, inst);
1958 kvm_ptr_d(vcpu, inst);
1961 kvm_ptr_i(vcpu, inst);
1964 kvm_itc_d(vcpu, inst);
1967 kvm_itc_i(vcpu, inst);
1970 kvm_ptc_l(vcpu, inst);
1973 kvm_ptc_g(vcpu, inst);
1976 kvm_ptc_ga(vcpu, inst);
1979 kvm_ptc_e(vcpu, inst);
1981 case EVENT_MOV_TO_RR:
1982 kvm_mov_to_rr(vcpu, inst);
1984 case EVENT_MOV_FROM_RR:
1985 kvm_mov_from_rr(vcpu, inst);
1988 kvm_thash(vcpu, inst);
1991 kvm_ttag(vcpu, inst);
1994 status = kvm_tpa(vcpu, inst);
1997 kvm_tak(vcpu, inst);
1999 case EVENT_MOV_TO_AR_IMM:
2000 kvm_mov_to_ar_imm(vcpu, inst);
2002 case EVENT_MOV_TO_AR:
2003 kvm_mov_to_ar_reg(vcpu, inst);
2005 case EVENT_MOV_FROM_AR:
2006 kvm_mov_from_ar_reg(vcpu, inst);
2008 case EVENT_MOV_TO_DBR:
2009 kvm_mov_to_dbr(vcpu, inst);
2011 case EVENT_MOV_TO_IBR:
2012 kvm_mov_to_ibr(vcpu, inst);
2014 case EVENT_MOV_TO_PMC:
2015 kvm_mov_to_pmc(vcpu, inst);
2017 case EVENT_MOV_TO_PMD:
2018 kvm_mov_to_pmd(vcpu, inst);
2020 case EVENT_MOV_TO_PKR:
2021 kvm_mov_to_pkr(vcpu, inst);
2023 case EVENT_MOV_FROM_DBR:
2024 kvm_mov_from_dbr(vcpu, inst);
2026 case EVENT_MOV_FROM_IBR:
2027 kvm_mov_from_ibr(vcpu, inst);
2029 case EVENT_MOV_FROM_PMC:
2030 kvm_mov_from_pmc(vcpu, inst);
2032 case EVENT_MOV_FROM_PKR:
2033 kvm_mov_from_pkr(vcpu, inst);
2035 case EVENT_MOV_FROM_CPUID:
2036 kvm_mov_from_cpuid(vcpu, inst);
2039 status = IA64_FAULT;
2044 /*Assume all status is NO_FAULT ?*/
2045 if (status == IA64_NO_FAULT && cause != EVENT_RFI)
2046 vcpu_increment_iip(vcpu);
2048 recover_if_physical_mode(vcpu);
2051 void init_vcpu(struct kvm_vcpu *vcpu)
2055 vcpu->arch.mode_flags = GUEST_IN_PHY;
2056 VMX(vcpu, vrr[0]) = 0x38;
2057 VMX(vcpu, vrr[1]) = 0x38;
2058 VMX(vcpu, vrr[2]) = 0x38;
2059 VMX(vcpu, vrr[3]) = 0x38;
2060 VMX(vcpu, vrr[4]) = 0x38;
2061 VMX(vcpu, vrr[5]) = 0x38;
2062 VMX(vcpu, vrr[6]) = 0x38;
2063 VMX(vcpu, vrr[7]) = 0x38;
2064 VCPU(vcpu, vpsr) = IA64_PSR_BN;
2065 VCPU(vcpu, dcr) = 0;
2066 /* pta.size must not be 0. The minimum is 15 (32k) */
2067 VCPU(vcpu, pta) = 15 << 2;
2068 VCPU(vcpu, itv) = 0x10000;
2069 VCPU(vcpu, itm) = 0;
2070 VMX(vcpu, last_itc) = 0;
2072 VCPU(vcpu, lid) = VCPU_LID(vcpu);
2073 VCPU(vcpu, ivr) = 0;
2074 VCPU(vcpu, tpr) = 0x10000;
2075 VCPU(vcpu, eoi) = 0;
2076 VCPU(vcpu, irr[0]) = 0;
2077 VCPU(vcpu, irr[1]) = 0;
2078 VCPU(vcpu, irr[2]) = 0;
2079 VCPU(vcpu, irr[3]) = 0;
2080 VCPU(vcpu, pmv) = 0x10000;
2081 VCPU(vcpu, cmcv) = 0x10000;
2082 VCPU(vcpu, lrr0) = 0x10000; /* default reset value? */
2083 VCPU(vcpu, lrr1) = 0x10000; /* default reset value? */
2084 update_vhpi(vcpu, NULL_VECTOR);
2085 VLSAPIC_XTP(vcpu) = 0x80; /* disabled */
2087 for (i = 0; i < 4; i++)
2088 VLSAPIC_INSVC(vcpu, i) = 0;
2091 void kvm_init_all_rr(struct kvm_vcpu *vcpu)
2095 local_irq_save(psr);
2097 /* WARNING: not allow co-exist of both virtual mode and physical
2098 * mode in same region
2101 vcpu->arch.metaphysical_saved_rr0 = vrrtomrr(VMX(vcpu, vrr[VRN0]));
2102 vcpu->arch.metaphysical_saved_rr4 = vrrtomrr(VMX(vcpu, vrr[VRN4]));
2104 if (is_physical_mode(vcpu)) {
2105 if (vcpu->arch.mode_flags & GUEST_PHY_EMUL)
2108 ia64_set_rr((VRN0 << VRN_SHIFT), vcpu->arch.metaphysical_rr0);
2109 ia64_dv_serialize_data();
2110 ia64_set_rr((VRN4 << VRN_SHIFT), vcpu->arch.metaphysical_rr4);
2111 ia64_dv_serialize_data();
2113 ia64_set_rr((VRN0 << VRN_SHIFT),
2114 vcpu->arch.metaphysical_saved_rr0);
2115 ia64_dv_serialize_data();
2116 ia64_set_rr((VRN4 << VRN_SHIFT),
2117 vcpu->arch.metaphysical_saved_rr4);
2118 ia64_dv_serialize_data();
2120 ia64_set_rr((VRN1 << VRN_SHIFT),
2121 vrrtomrr(VMX(vcpu, vrr[VRN1])));
2122 ia64_dv_serialize_data();
2123 ia64_set_rr((VRN2 << VRN_SHIFT),
2124 vrrtomrr(VMX(vcpu, vrr[VRN2])));
2125 ia64_dv_serialize_data();
2126 ia64_set_rr((VRN3 << VRN_SHIFT),
2127 vrrtomrr(VMX(vcpu, vrr[VRN3])));
2128 ia64_dv_serialize_data();
2129 ia64_set_rr((VRN5 << VRN_SHIFT),
2130 vrrtomrr(VMX(vcpu, vrr[VRN5])));
2131 ia64_dv_serialize_data();
2132 ia64_set_rr((VRN7 << VRN_SHIFT),
2133 vrrtomrr(VMX(vcpu, vrr[VRN7])));
2134 ia64_dv_serialize_data();
2144 ia64_call_vsa(PAL_VPS_RESTORE, (unsigned long)v->arch.vpd,
2155 void panic_vm(struct kvm_vcpu *v)
2157 struct exit_ctl_data *p = &v->arch.exit_data;
2159 p->exit_reason = EXIT_REASON_VM_PANIC;