2 * arch/ia64/vmx/optvfault.S
3 * optimize virtualization fault handler
5 * Copyright (C) 2006 Intel Co
6 * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
9 #include <asm/asmmacro.h>
10 #include <asm/processor.h>
13 #include "asm-offsets.h"
15 #define ACCE_MOV_FROM_AR
16 #define ACCE_MOV_FROM_RR
17 #define ACCE_MOV_TO_RR
20 #define ACCE_MOV_TO_PSR
24 adds r29 = VMM_VCPU_VSA_BASE_OFFSET,r21
36 * r24 : return address
41 GLOBAL_ENTRY(kvm_vps_sync_read)
42 movl r30 = PAL_VPS_SYNC_READ
44 br.sptk.many kvm_vps_entry
45 END(kvm_vps_sync_read)
49 * r24 : return address
54 GLOBAL_ENTRY(kvm_vps_sync_write)
55 movl r30 = PAL_VPS_SYNC_WRITE
57 br.sptk.many kvm_vps_entry
58 END(kvm_vps_sync_write)
67 GLOBAL_ENTRY(kvm_vps_resume_normal)
68 movl r30 = PAL_VPS_RESUME_NORMAL
71 br.sptk.many kvm_vps_entry
72 END(kvm_vps_resume_normal)
81 GLOBAL_ENTRY(kvm_vps_resume_handler)
82 movl r30 = PAL_VPS_RESUME_HANDLER
85 shr r17=r17,IA64_ISR_IR_BIT
87 dep r27=r17,r27,63,1 // bit 63 of r27 indicate whether enable CFLE
89 br.sptk.many kvm_vps_entry
90 END(kvm_vps_resume_handler)
93 GLOBAL_ENTRY(kvm_asm_mov_from_ar)
94 #ifndef ACCE_MOV_FROM_AR
95 br.many kvm_virtualization_fault_back
97 add r18=VMM_VCPU_ITC_OFS_OFFSET, r21
98 add r16=VMM_VCPU_LAST_ITC_OFFSET,r21
106 addl r20=@gprel(asm_mov_to_reg),gp
109 adds r30=kvm_resume_to_guest-asm_mov_to_reg,r20
115 END(kvm_asm_mov_from_ar)
119 GLOBAL_ENTRY(kvm_asm_mov_from_rr)
120 #ifndef ACCE_MOV_FROM_RR
121 br.many kvm_virtualization_fault_back
125 addl r20=@gprel(asm_mov_from_reg),gp
127 adds r30=kvm_asm_mov_from_rr_back_1-asm_mov_from_reg,r20
131 add r27=VMM_VCPU_VRR0_OFFSET,r21
135 kvm_asm_mov_from_rr_back_1:
136 adds r30=kvm_resume_to_guest-asm_mov_from_reg,r20
137 adds r22=asm_mov_to_reg-asm_mov_from_reg,r20
146 END(kvm_asm_mov_from_rr)
150 GLOBAL_ENTRY(kvm_asm_mov_to_rr)
151 #ifndef ACCE_MOV_TO_RR
152 br.many kvm_virtualization_fault_back
156 addl r20=@gprel(asm_mov_from_reg),gp
158 adds r30=kvm_asm_mov_to_rr_back_1-asm_mov_from_reg,r20
162 add r27=VMM_VCPU_VRR0_OFFSET,r21
166 kvm_asm_mov_to_rr_back_1:
167 adds r30=kvm_asm_mov_to_rr_back_2-asm_mov_from_reg,r20
174 (p6) br.cond.dpnt.many kvm_virtualization_fault_back
179 kvm_asm_mov_to_rr_back_2:
180 adds r30=kvm_resume_to_guest-asm_mov_from_reg,r20
182 ;; // vrr.rid<<4 |0xe
190 shladd r16 = r16, 4, r17
202 (p6) dep r19=r18,r19,2,6
206 cmp.eq.or p6,p0=4,r23
208 adds r16=VMM_VCPU_MODE_FLAGS_OFFSET,r21
209 (p6) adds r17=VMM_VCPU_META_SAVED_RR0_OFFSET,r21
213 (p6) shladd r17=r23,1,r17
216 (p6) tbit.nz p6,p7=r16,0
221 END(kvm_asm_mov_to_rr)
225 GLOBAL_ENTRY(kvm_asm_rsm)
227 br.many kvm_virtualization_fault_back
229 add r16=VMM_VPD_BASE_OFFSET,r21
237 add r17=VPD_VPSR_START_OFFSET,r16
238 add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21
243 movl r28=IA64_PSR_IC+IA64_PSR_I+IA64_PSR_DT+IA64_PSR_SI
254 /* Comment it out due to short of fp lazy alorgithm support
255 adds r27=IA64_VCPU_FP_PSR_OFFSET,r21
259 tbit.nz p8,p0= r27,IA64_PSR_DFH_BIT
261 (p8) dep r20=-1,r20,IA64_PSR_DFH_BIT,1
267 tbit.z.or p6,p0=r26,IA64_PSR_DT_BIT
268 (p6) br.dptk kvm_resume_to_guest
270 add r26=VMM_VCPU_META_RR0_OFFSET,r21
271 add r27=VMM_VCPU_META_RR0_OFFSET+8,r21
284 br.many kvm_resume_to_guest
289 GLOBAL_ENTRY(kvm_asm_ssm)
291 br.many kvm_virtualization_fault_back
293 add r16=VMM_VPD_BASE_OFFSET,r21
301 add r27=VPD_VPSR_START_OFFSET,r16
307 add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21
316 movl r28=IA64_PSR_DT+IA64_PSR_RT+IA64_PSR_IT
321 cmp.ne.or p6,p0=r28,r19
322 (p6) br.dptk kvm_asm_ssm_1
324 add r26=VMM_VCPU_META_SAVED_RR0_OFFSET,r21
325 add r27=VMM_VCPU_META_SAVED_RR0_OFFSET+8,r21
340 tbit.nz p6,p0=r29,IA64_PSR_I_BIT
342 tbit.z.or p6,p0=r19,IA64_PSR_I_BIT
343 (p6) br.dptk kvm_resume_to_guest
345 add r29=VPD_VTPR_START_OFFSET,r16
346 add r30=VPD_VHPI_START_OFFSET,r16
357 (p6) br.dpnt.few kvm_asm_dispatch_vexirq
358 br.many kvm_resume_to_guest
363 GLOBAL_ENTRY(kvm_asm_mov_to_psr)
364 #ifndef ACCE_MOV_TO_PSR
365 br.many kvm_virtualization_fault_back
367 add r16=VMM_VPD_BASE_OFFSET,r21
368 extr.u r26=r25,13,7 //r2
371 addl r20=@gprel(asm_mov_from_reg),gp
373 adds r30=kvm_asm_mov_to_psr_back-asm_mov_from_reg,r20
377 add r27=VPD_VPSR_START_OFFSET,r16
381 kvm_asm_mov_to_psr_back:
383 add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21
390 movl r28=IA64_PSR_DT+IA64_PSR_RT+IA64_PSR_IT
398 (p5) br.many kvm_asm_mov_to_psr_1
400 //virtual to physical
401 (p7) add r26=VMM_VCPU_META_RR0_OFFSET,r21
402 (p7) add r27=VMM_VCPU_META_RR0_OFFSET+8,r21
403 (p7) dep r23=-1,r23,0,1
405 //physical to virtual
406 (p6) add r26=VMM_VCPU_META_SAVED_RR0_OFFSET,r21
407 (p6) add r27=VMM_VCPU_META_SAVED_RR0_OFFSET+8,r21
408 (p6) dep r23=0,r23,0,1
421 kvm_asm_mov_to_psr_1:
423 movl r28=IA64_PSR_IC+IA64_PSR_I+IA64_PSR_DT+IA64_PSR_SI+IA64_PSR_RT
431 /* Comment it out due to short of fp lazy algorithm support
432 adds r27=IA64_VCPU_FP_PSR_OFFSET,r21
436 tbit.nz p8,p0=r27,IA64_PSR_DFH_BIT
438 (p8) dep r20=-1,r20,IA64_PSR_DFH_BIT,1
444 tbit.nz.or p6,p0=r17,IA64_PSR_I_BIT
445 tbit.z.or p6,p0=r30,IA64_PSR_I_BIT
446 (p6) br.dpnt.few kvm_resume_to_guest
448 add r29=VPD_VTPR_START_OFFSET,r16
449 add r30=VPD_VHPI_START_OFFSET,r16
460 (p6) br.dpnt.few kvm_asm_dispatch_vexirq
461 br.many kvm_resume_to_guest
462 END(kvm_asm_mov_to_psr)
465 ENTRY(kvm_asm_dispatch_vexirq)
469 extr.u r17=r16,IA64_PSR_RI_BIT,2
470 tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1
476 (p6) add r18=0x10,r18
477 dep r16=r17,r16,IA64_PSR_RI_BIT,2
482 br.many kvm_dispatch_vexirq
483 END(kvm_asm_dispatch_vexirq)
486 // TODO: add support when pta.vf = 1
487 GLOBAL_ENTRY(kvm_asm_thash)
489 br.many kvm_virtualization_fault_back
491 extr.u r17=r25,20,7 // get r3 from opcode in r25
492 extr.u r18=r25,6,7 // get r1 from opcode in r25
493 addl r20=@gprel(asm_mov_from_reg),gp
495 adds r30=kvm_asm_thash_back1-asm_mov_from_reg,r20
496 shladd r17=r17,4,r20 // get addr of MOVE_FROM_REG(r17)
497 adds r16=VMM_VPD_BASE_OFFSET,r21 // get vcpu.arch.priveregs
501 ld8 r16=[r16] // get VPD addr
503 br.many b0 // r19 return value
506 shr.u r23=r19,61 // get RR number
507 adds r25=VMM_VCPU_VRR0_OFFSET,r21 // get vcpu->arch.vrr[0]'s addr
508 adds r16=VMM_VPD_VPTA_OFFSET,r16 // get vpta
510 shladd r27=r23,3,r25 // get vcpu->arch.vrr[r23]'s addr
511 ld8 r17=[r16] // get PTA
514 extr.u r29=r17,2,6 // get pta.size
515 ld8 r25=[r27] // get vcpu->arch.vrr[r23]'s value
517 extr.u r25=r25,2,6 // get rr.ps
518 shl r22=r26,r29 // 1UL << pta.size
520 shr.u r23=r19,r25 // vaddr >> rr.ps
521 adds r26=3,r29 // pta.size + 3
522 shl r27=r17,3 // pta << 3
524 shl r23=r23,3 // (vaddr >> rr.ps) << 3
525 shr.u r27=r27,r26 // (pta << 3) >> (pta.size+3)
528 adds r22=-1,r22 // (1UL << pta.size) - 1
529 shl r27=r27,r29 // ((pta<<3)>>(pta.size+3))<<pta.size
530 and r19=r19,r16 // vaddr & VRN_MASK
532 and r22=r22,r23 // vhpt_offset
533 or r19=r19,r27 // (vadr&VRN_MASK)|(((pta<<3)>>(pta.size + 3))<<pta.size)
534 adds r26=asm_mov_to_reg-asm_mov_from_reg,r20
536 or r19=r19,r22 // calc pval
538 adds r30=kvm_resume_to_guest-asm_mov_from_reg,r20
544 #define MOV_TO_REG0 \
553 #define MOV_TO_REG(n) \
562 #define MOV_FROM_REG(n) \
571 #define MOV_TO_BANK0_REG(n) \
572 ENTRY_MIN_ALIGN(asm_mov_to_bank0_reg##n##); \
591 END(asm_mov_to_bank0_reg##n##)
594 #define MOV_FROM_BANK0_REG(n) \
595 ENTRY_MIN_ALIGN(asm_mov_from_bank0_reg##n##); \
619 END(asm_mov_from_bank0_reg##n##)
622 #define JMP_TO_MOV_TO_BANK0_REG(n) \
626 br.sptk.many asm_mov_to_bank0_reg##n##; \
631 #define JMP_TO_MOV_FROM_BANK0_REG(n) \
635 br.sptk.many asm_mov_from_bank0_reg##n##; \
640 MOV_FROM_BANK0_REG(16)
641 MOV_FROM_BANK0_REG(17)
642 MOV_FROM_BANK0_REG(18)
643 MOV_FROM_BANK0_REG(19)
644 MOV_FROM_BANK0_REG(20)
645 MOV_FROM_BANK0_REG(21)
646 MOV_FROM_BANK0_REG(22)
647 MOV_FROM_BANK0_REG(23)
648 MOV_FROM_BANK0_REG(24)
649 MOV_FROM_BANK0_REG(25)
650 MOV_FROM_BANK0_REG(26)
651 MOV_FROM_BANK0_REG(27)
652 MOV_FROM_BANK0_REG(28)
653 MOV_FROM_BANK0_REG(29)
654 MOV_FROM_BANK0_REG(30)
655 MOV_FROM_BANK0_REG(31)
658 // mov from reg table
659 ENTRY(asm_mov_from_reg)
676 JMP_TO_MOV_FROM_BANK0_REG(16)
677 JMP_TO_MOV_FROM_BANK0_REG(17)
678 JMP_TO_MOV_FROM_BANK0_REG(18)
679 JMP_TO_MOV_FROM_BANK0_REG(19)
680 JMP_TO_MOV_FROM_BANK0_REG(20)
681 JMP_TO_MOV_FROM_BANK0_REG(21)
682 JMP_TO_MOV_FROM_BANK0_REG(22)
683 JMP_TO_MOV_FROM_BANK0_REG(23)
684 JMP_TO_MOV_FROM_BANK0_REG(24)
685 JMP_TO_MOV_FROM_BANK0_REG(25)
686 JMP_TO_MOV_FROM_BANK0_REG(26)
687 JMP_TO_MOV_FROM_BANK0_REG(27)
688 JMP_TO_MOV_FROM_BANK0_REG(28)
689 JMP_TO_MOV_FROM_BANK0_REG(29)
690 JMP_TO_MOV_FROM_BANK0_REG(30)
691 JMP_TO_MOV_FROM_BANK0_REG(31)
788 END(asm_mov_from_reg)
796 ENTRY(kvm_resume_to_guest)
797 adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21
800 adds r20 = VMM_VCPU_VSA_BASE_OFFSET,r21
805 adds r19=VMM_VPD_BASE_OFFSET,r21
808 extr.u r17=r16,IA64_PSR_RI_BIT,2
809 tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1
814 (p6) add r18=0x10,r18
818 dep r16=r17,r16,IA64_PSR_RI_BIT,2
821 adds r19= VPD_VPSR_START_OFFSET,r25
822 add r28=PAL_VPS_RESUME_NORMAL,r20
823 add r29=PAL_VPS_RESUME_HANDLER,r20
829 tbit.z p6,p7 = r19,IA64_PSR_IC_BIT // p1=vpsr.ic
834 br.sptk.many b0 // call pal service
836 END(kvm_resume_to_guest)
858 ENTRY(asm_mov_to_reg)
875 JMP_TO_MOV_TO_BANK0_REG(16)
876 JMP_TO_MOV_TO_BANK0_REG(17)
877 JMP_TO_MOV_TO_BANK0_REG(18)
878 JMP_TO_MOV_TO_BANK0_REG(19)
879 JMP_TO_MOV_TO_BANK0_REG(20)
880 JMP_TO_MOV_TO_BANK0_REG(21)
881 JMP_TO_MOV_TO_BANK0_REG(22)
882 JMP_TO_MOV_TO_BANK0_REG(23)
883 JMP_TO_MOV_TO_BANK0_REG(24)
884 JMP_TO_MOV_TO_BANK0_REG(25)
885 JMP_TO_MOV_TO_BANK0_REG(26)
886 JMP_TO_MOV_TO_BANK0_REG(27)
887 JMP_TO_MOV_TO_BANK0_REG(28)
888 JMP_TO_MOV_TO_BANK0_REG(29)
889 JMP_TO_MOV_TO_BANK0_REG(30)
890 JMP_TO_MOV_TO_BANK0_REG(31)