2 * arch/ia64/kvm/optvfault.S
3 * optimize virtualization fault handler
5 * Copyright (C) 2006 Intel Co
6 * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
7 * Copyright (C) 2008 Intel Co
8 * Add the support for Tukwila processors.
9 * Xiantao Zhang <xiantao.zhang@intel.com>
12 #include <asm/asmmacro.h>
13 #include <asm/processor.h>
14 #include <asm/kvm_host.h>
17 #include "asm-offsets.h"
19 #define ACCE_MOV_FROM_AR
20 #define ACCE_MOV_FROM_RR
21 #define ACCE_MOV_TO_RR
24 #define ACCE_MOV_TO_PSR
27 #define VMX_VPS_SYNC_READ \
28 add r16=VMM_VPD_BASE_OFFSET,r21; \
43 br.sptk.many kvm_vps_sync_read; \
51 adds r29 = VMM_VCPU_VSA_BASE_OFFSET,r21
63 * r24 : return address
68 GLOBAL_ENTRY(kvm_vps_sync_read)
69 movl r30 = PAL_VPS_SYNC_READ
71 br.sptk.many kvm_vps_entry
72 END(kvm_vps_sync_read)
76 * r24 : return address
81 GLOBAL_ENTRY(kvm_vps_sync_write)
82 movl r30 = PAL_VPS_SYNC_WRITE
84 br.sptk.many kvm_vps_entry
85 END(kvm_vps_sync_write)
94 GLOBAL_ENTRY(kvm_vps_resume_normal)
95 movl r30 = PAL_VPS_RESUME_NORMAL
98 br.sptk.many kvm_vps_entry
99 END(kvm_vps_resume_normal)
108 GLOBAL_ENTRY(kvm_vps_resume_handler)
109 movl r30 = PAL_VPS_RESUME_HANDLER
112 shr r17=r17,IA64_ISR_IR_BIT
114 dep r26=r17,r26,63,1 // bit 63 of r26 indicate whether enable CFLE
116 br.sptk.many kvm_vps_entry
117 END(kvm_vps_resume_handler)
120 GLOBAL_ENTRY(kvm_asm_mov_from_ar)
121 #ifndef ACCE_MOV_FROM_AR
122 br.many kvm_virtualization_fault_back
124 add r18=VMM_VCPU_ITC_OFS_OFFSET, r21
125 add r16=VMM_VCPU_LAST_ITC_OFFSET,r21
133 addl r20=@gprel(asm_mov_to_reg),gp
136 adds r30=kvm_resume_to_guest-asm_mov_to_reg,r20
142 END(kvm_asm_mov_from_ar)
145 * Special SGI SN2 optimized version of mov_from_ar using the SN2 RTC
146 * clock as it's source for emulating the ITC. This version will be
147 * copied on top of the original version if the host is determined to
150 GLOBAL_ENTRY(kvm_asm_mov_from_ar_sn2)
151 add r18=VMM_VCPU_ITC_OFS_OFFSET, r21
152 movl r19 = (KVM_VMM_BASE+(1<<KVM_VMM_SHIFT))
154 add r16=VMM_VCPU_LAST_ITC_OFFSET,r21
160 addl r20=@gprel(asm_mov_to_reg),gp
165 adds r30=kvm_resume_to_guest-asm_mov_to_reg,r20
170 END(kvm_asm_mov_from_ar_sn2)
175 GLOBAL_ENTRY(kvm_asm_mov_from_rr)
176 #ifndef ACCE_MOV_FROM_RR
177 br.many kvm_virtualization_fault_back
181 addl r20=@gprel(asm_mov_from_reg),gp
183 adds r30=kvm_asm_mov_from_rr_back_1-asm_mov_from_reg,r20
187 add r27=VMM_VCPU_VRR0_OFFSET,r21
191 kvm_asm_mov_from_rr_back_1:
192 adds r30=kvm_resume_to_guest-asm_mov_from_reg,r20
193 adds r22=asm_mov_to_reg-asm_mov_from_reg,r20
202 END(kvm_asm_mov_from_rr)
206 GLOBAL_ENTRY(kvm_asm_mov_to_rr)
207 #ifndef ACCE_MOV_TO_RR
208 br.many kvm_virtualization_fault_back
212 addl r20=@gprel(asm_mov_from_reg),gp
214 adds r30=kvm_asm_mov_to_rr_back_1-asm_mov_from_reg,r20
218 add r27=VMM_VCPU_VRR0_OFFSET,r21
222 kvm_asm_mov_to_rr_back_1:
223 adds r30=kvm_asm_mov_to_rr_back_2-asm_mov_from_reg,r20
230 (p6) br.cond.dpnt.many kvm_virtualization_fault_back
235 kvm_asm_mov_to_rr_back_2:
236 adds r30=kvm_resume_to_guest-asm_mov_from_reg,r20
238 ;; // vrr.rid<<4 |0xe
246 shladd r16 = r16, 4, r17
258 (p6) dep r19=r18,r19,2,6
262 cmp.eq.or p6,p0=4,r23
264 adds r16=VMM_VCPU_MODE_FLAGS_OFFSET,r21
265 (p6) adds r17=VMM_VCPU_META_SAVED_RR0_OFFSET,r21
269 (p6) shladd r17=r23,1,r17
272 (p6) tbit.nz p6,p7=r16,0
277 END(kvm_asm_mov_to_rr)
281 GLOBAL_ENTRY(kvm_asm_rsm)
283 br.many kvm_virtualization_fault_back
293 add r17=VPD_VPSR_START_OFFSET,r16
294 add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21
299 movl r28=IA64_PSR_IC+IA64_PSR_I+IA64_PSR_DT+IA64_PSR_SI
310 /* Comment it out due to short of fp lazy alorgithm support
311 adds r27=IA64_VCPU_FP_PSR_OFFSET,r21
315 tbit.nz p8,p0= r27,IA64_PSR_DFH_BIT
317 (p8) dep r20=-1,r20,IA64_PSR_DFH_BIT,1
323 tbit.z.or p6,p0=r26,IA64_PSR_DT_BIT
324 (p6) br.dptk kvm_resume_to_guest_with_sync
326 add r26=VMM_VCPU_META_RR0_OFFSET,r21
327 add r27=VMM_VCPU_META_RR0_OFFSET+8,r21
340 br.many kvm_resume_to_guest_with_sync
345 GLOBAL_ENTRY(kvm_asm_ssm)
347 br.many kvm_virtualization_fault_back
357 add r27=VPD_VPSR_START_OFFSET,r16
363 add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21
372 movl r28=IA64_PSR_DT+IA64_PSR_RT+IA64_PSR_IT
377 cmp.ne.or p6,p0=r28,r19
378 (p6) br.dptk kvm_asm_ssm_1
380 add r26=VMM_VCPU_META_SAVED_RR0_OFFSET,r21
381 add r27=VMM_VCPU_META_SAVED_RR0_OFFSET+8,r21
396 tbit.nz p6,p0=r29,IA64_PSR_I_BIT
398 tbit.z.or p6,p0=r19,IA64_PSR_I_BIT
399 (p6) br.dptk kvm_resume_to_guest_with_sync
401 add r29=VPD_VTPR_START_OFFSET,r16
402 add r30=VPD_VHPI_START_OFFSET,r16
413 (p6) br.dpnt.few kvm_asm_dispatch_vexirq
414 br.many kvm_resume_to_guest_with_sync
419 GLOBAL_ENTRY(kvm_asm_mov_to_psr)
420 #ifndef ACCE_MOV_TO_PSR
421 br.many kvm_virtualization_fault_back
425 extr.u r26=r25,13,7 //r2
426 addl r20=@gprel(asm_mov_from_reg),gp
428 adds r30=kvm_asm_mov_to_psr_back-asm_mov_from_reg,r20
432 add r27=VPD_VPSR_START_OFFSET,r16
436 kvm_asm_mov_to_psr_back:
438 add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21
445 movl r28=IA64_PSR_DT+IA64_PSR_RT+IA64_PSR_IT
453 (p5) br.many kvm_asm_mov_to_psr_1
455 //virtual to physical
456 (p7) add r26=VMM_VCPU_META_RR0_OFFSET,r21
457 (p7) add r27=VMM_VCPU_META_RR0_OFFSET+8,r21
458 (p7) dep r23=-1,r23,0,1
460 //physical to virtual
461 (p6) add r26=VMM_VCPU_META_SAVED_RR0_OFFSET,r21
462 (p6) add r27=VMM_VCPU_META_SAVED_RR0_OFFSET+8,r21
463 (p6) dep r23=0,r23,0,1
476 kvm_asm_mov_to_psr_1:
478 movl r28=IA64_PSR_IC+IA64_PSR_I+IA64_PSR_DT+IA64_PSR_SI+IA64_PSR_RT
486 /* Comment it out due to short of fp lazy algorithm support
487 adds r27=IA64_VCPU_FP_PSR_OFFSET,r21
491 tbit.nz p8,p0=r27,IA64_PSR_DFH_BIT
493 (p8) dep r20=-1,r20,IA64_PSR_DFH_BIT,1
499 tbit.nz.or p6,p0=r17,IA64_PSR_I_BIT
500 tbit.z.or p6,p0=r30,IA64_PSR_I_BIT
501 (p6) br.dpnt.few kvm_resume_to_guest_with_sync
503 add r29=VPD_VTPR_START_OFFSET,r16
504 add r30=VPD_VHPI_START_OFFSET,r16
515 (p6) br.dpnt.few kvm_asm_dispatch_vexirq
516 br.many kvm_resume_to_guest_with_sync
517 END(kvm_asm_mov_to_psr)
520 ENTRY(kvm_asm_dispatch_vexirq)
525 add r25=VMM_VPD_BASE_OFFSET,r21
533 br.sptk.many kvm_vps_sync_write
540 extr.u r17=r16,IA64_PSR_RI_BIT,2
541 tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1
547 (p6) add r18=0x10,r18
548 dep r16=r17,r16,IA64_PSR_RI_BIT,2
553 br.many kvm_dispatch_vexirq
554 END(kvm_asm_dispatch_vexirq)
557 // TODO: add support when pta.vf = 1
558 GLOBAL_ENTRY(kvm_asm_thash)
560 br.many kvm_virtualization_fault_back
562 extr.u r17=r25,20,7 // get r3 from opcode in r25
563 extr.u r18=r25,6,7 // get r1 from opcode in r25
564 addl r20=@gprel(asm_mov_from_reg),gp
566 adds r30=kvm_asm_thash_back1-asm_mov_from_reg,r20
567 shladd r17=r17,4,r20 // get addr of MOVE_FROM_REG(r17)
568 adds r16=VMM_VPD_BASE_OFFSET,r21 // get vcpu.arch.priveregs
572 ld8 r16=[r16] // get VPD addr
574 br.many b0 // r19 return value
577 shr.u r23=r19,61 // get RR number
578 adds r28=VMM_VCPU_VRR0_OFFSET,r21 // get vcpu->arch.vrr[0]'s addr
579 adds r16=VMM_VPD_VPTA_OFFSET,r16 // get vpta
581 shladd r27=r23,3,r28 // get vcpu->arch.vrr[r23]'s addr
582 ld8 r17=[r16] // get PTA
585 extr.u r29=r17,2,6 // get pta.size
586 ld8 r28=[r27] // get vcpu->arch.vrr[r23]'s value
589 //Fallback to C if pta.vf is set
592 (p6) mov r24=EVENT_THASH
593 (p6) br.cond.dpnt.many kvm_virtualization_fault_back
594 extr.u r28=r28,2,6 // get rr.ps
595 shl r22=r26,r29 // 1UL << pta.size
597 shr.u r23=r19,r28 // vaddr >> rr.ps
598 adds r26=3,r29 // pta.size + 3
599 shl r27=r17,3 // pta << 3
601 shl r23=r23,3 // (vaddr >> rr.ps) << 3
602 shr.u r27=r27,r26 // (pta << 3) >> (pta.size+3)
605 adds r22=-1,r22 // (1UL << pta.size) - 1
606 shl r27=r27,r29 // ((pta<<3)>>(pta.size+3))<<pta.size
607 and r19=r19,r16 // vaddr & VRN_MASK
609 and r22=r22,r23 // vhpt_offset
610 or r19=r19,r27 // (vadr&VRN_MASK)|(((pta<<3)>>(pta.size + 3))<<pta.size)
611 adds r26=asm_mov_to_reg-asm_mov_from_reg,r20
613 or r19=r19,r22 // calc pval
615 adds r30=kvm_resume_to_guest-asm_mov_from_reg,r20
621 #define MOV_TO_REG0 \
630 #define MOV_TO_REG(n) \
639 #define MOV_FROM_REG(n) \
648 #define MOV_TO_BANK0_REG(n) \
649 ENTRY_MIN_ALIGN(asm_mov_to_bank0_reg##n##); \
668 END(asm_mov_to_bank0_reg##n##)
671 #define MOV_FROM_BANK0_REG(n) \
672 ENTRY_MIN_ALIGN(asm_mov_from_bank0_reg##n##); \
696 END(asm_mov_from_bank0_reg##n##)
699 #define JMP_TO_MOV_TO_BANK0_REG(n) \
703 br.sptk.many asm_mov_to_bank0_reg##n##; \
708 #define JMP_TO_MOV_FROM_BANK0_REG(n) \
712 br.sptk.many asm_mov_from_bank0_reg##n##; \
717 MOV_FROM_BANK0_REG(16)
718 MOV_FROM_BANK0_REG(17)
719 MOV_FROM_BANK0_REG(18)
720 MOV_FROM_BANK0_REG(19)
721 MOV_FROM_BANK0_REG(20)
722 MOV_FROM_BANK0_REG(21)
723 MOV_FROM_BANK0_REG(22)
724 MOV_FROM_BANK0_REG(23)
725 MOV_FROM_BANK0_REG(24)
726 MOV_FROM_BANK0_REG(25)
727 MOV_FROM_BANK0_REG(26)
728 MOV_FROM_BANK0_REG(27)
729 MOV_FROM_BANK0_REG(28)
730 MOV_FROM_BANK0_REG(29)
731 MOV_FROM_BANK0_REG(30)
732 MOV_FROM_BANK0_REG(31)
735 // mov from reg table
736 ENTRY(asm_mov_from_reg)
753 JMP_TO_MOV_FROM_BANK0_REG(16)
754 JMP_TO_MOV_FROM_BANK0_REG(17)
755 JMP_TO_MOV_FROM_BANK0_REG(18)
756 JMP_TO_MOV_FROM_BANK0_REG(19)
757 JMP_TO_MOV_FROM_BANK0_REG(20)
758 JMP_TO_MOV_FROM_BANK0_REG(21)
759 JMP_TO_MOV_FROM_BANK0_REG(22)
760 JMP_TO_MOV_FROM_BANK0_REG(23)
761 JMP_TO_MOV_FROM_BANK0_REG(24)
762 JMP_TO_MOV_FROM_BANK0_REG(25)
763 JMP_TO_MOV_FROM_BANK0_REG(26)
764 JMP_TO_MOV_FROM_BANK0_REG(27)
765 JMP_TO_MOV_FROM_BANK0_REG(28)
766 JMP_TO_MOV_FROM_BANK0_REG(29)
767 JMP_TO_MOV_FROM_BANK0_REG(30)
768 JMP_TO_MOV_FROM_BANK0_REG(31)
865 END(asm_mov_from_reg)
873 ENTRY(kvm_resume_to_guest_with_sync)
874 adds r19=VMM_VPD_BASE_OFFSET,r21
887 br.sptk.many kvm_vps_sync_write
893 br.sptk.many kvm_resume_to_guest
894 END(kvm_resume_to_guest_with_sync)
896 ENTRY(kvm_resume_to_guest)
897 adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21
900 adds r20 = VMM_VCPU_VSA_BASE_OFFSET,r21
905 adds r19=VMM_VPD_BASE_OFFSET,r21
908 extr.u r17=r16,IA64_PSR_RI_BIT,2
909 tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1
914 (p6) add r18=0x10,r18
918 dep r16=r17,r16,IA64_PSR_RI_BIT,2
921 adds r19= VPD_VPSR_START_OFFSET,r25
922 add r28=PAL_VPS_RESUME_NORMAL,r20
923 add r29=PAL_VPS_RESUME_HANDLER,r20
929 tbit.z p6,p7 = r19,IA64_PSR_IC_BIT // p7=vpsr.ic
930 shr r27=r27,IA64_ISR_IR_BIT
935 (p6) dep r26=r27,r26,63,1
937 br.sptk.many b0 // call pal service
939 END(kvm_resume_to_guest)
961 ENTRY(asm_mov_to_reg)
978 JMP_TO_MOV_TO_BANK0_REG(16)
979 JMP_TO_MOV_TO_BANK0_REG(17)
980 JMP_TO_MOV_TO_BANK0_REG(18)
981 JMP_TO_MOV_TO_BANK0_REG(19)
982 JMP_TO_MOV_TO_BANK0_REG(20)
983 JMP_TO_MOV_TO_BANK0_REG(21)
984 JMP_TO_MOV_TO_BANK0_REG(22)
985 JMP_TO_MOV_TO_BANK0_REG(23)
986 JMP_TO_MOV_TO_BANK0_REG(24)
987 JMP_TO_MOV_TO_BANK0_REG(25)
988 JMP_TO_MOV_TO_BANK0_REG(26)
989 JMP_TO_MOV_TO_BANK0_REG(27)
990 JMP_TO_MOV_TO_BANK0_REG(28)
991 JMP_TO_MOV_TO_BANK0_REG(29)
992 JMP_TO_MOV_TO_BANK0_REG(30)
993 JMP_TO_MOV_TO_BANK0_REG(31)