Like FPU exception handler, check LSX capability in LSX exception handler rather than kvm_own_lsx(). Signed-off-by: Bibo Mao --- arch/loongarch/kvm/exit.c | 4 +++- arch/loongarch/kvm/vcpu.c | 3 --- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index cb493980d874..76eec3f24953 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -792,8 +792,10 @@ static long kvm_save_notify(struct kvm_vcpu *vcpu) */ static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu, int ecode) { - if (kvm_own_lsx(vcpu)) + if (!kvm_guest_has_lsx(&vcpu->arch)) kvm_queue_exception(vcpu, EXCCODE_INE, 0); + else + kvm_own_lsx(vcpu); return RESUME_GUEST; } diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 656b954c1134..aff295aa6b0b 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -1358,9 +1358,6 @@ void kvm_own_fpu(struct kvm_vcpu *vcpu) /* Enable LSX and restore context */ int kvm_own_lsx(struct kvm_vcpu *vcpu) { - if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch)) - return -EINVAL; - preempt_disable(); /* Enable LSX for guest */ -- 2.39.3 Like FPU exception handler, check LASX capability in LASX exception handler rather than function kvm_own_lasx(). Signed-off-by: Bibo Mao --- arch/loongarch/kvm/exit.c | 4 +++- arch/loongarch/kvm/vcpu.c | 3 --- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index 76eec3f24953..74b427287e96 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -810,8 +810,10 @@ static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu, int ecode) */ static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu, int ecode) { - if (kvm_own_lasx(vcpu)) + if (!kvm_guest_has_lasx(&vcpu->arch)) kvm_queue_exception(vcpu, EXCCODE_INE, 0); + else + kvm_own_lasx(vcpu); return RESUME_GUEST; } diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index aff295aa6b0b..d91a1160a309 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -1391,9 +1391,6 @@ int kvm_own_lsx(struct kvm_vcpu *vcpu) /* Enable LASX and restore context */ int kvm_own_lasx(struct kvm_vcpu *vcpu) { - if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch) || !kvm_guest_has_lasx(&vcpu->arch)) - return -EINVAL; - preempt_disable(); kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr); -- 2.39.3 FPU is lazy enabled with KVM hypervisor. After FPU is enabled and loaded, vCPU can be preempted and FPU will be lost again. Here FPU is delay load until guest enter entry. Signed-off-by: Bibo Mao --- arch/loongarch/include/asm/kvm_host.h | 3 +++ arch/loongarch/kvm/exit.c | 6 +++--- arch/loongarch/kvm/vcpu.c | 24 +++++++++--------------- 3 files changed, 15 insertions(+), 18 deletions(-) diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h index e4fe5b8e8149..2ad61f2dc3a9 100644 --- a/arch/loongarch/include/asm/kvm_host.h +++ b/arch/loongarch/include/asm/kvm_host.h @@ -37,6 +37,9 @@ #define KVM_REQ_TLB_FLUSH_GPA KVM_ARCH_REQ(0) #define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(1) #define KVM_REQ_PMU KVM_ARCH_REQ(2) +#define KVM_REQ_FPU_LOAD KVM_ARCH_REQ(3) +#define KVM_REQ_LSX_LOAD KVM_ARCH_REQ(4) +#define KVM_REQ_LASX_LOAD KVM_ARCH_REQ(5) #define KVM_GUESTDBG_SW_BP_MASK \ (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP) diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index 74b427287e96..f979e70da7c3 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -754,7 +754,7 @@ static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu, int ecode) return RESUME_HOST; } - kvm_own_fpu(vcpu); + kvm_make_request(KVM_REQ_FPU_LOAD, vcpu); return RESUME_GUEST; } @@ -795,7 +795,7 @@ static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu, int ecode) if (!kvm_guest_has_lsx(&vcpu->arch)) kvm_queue_exception(vcpu, EXCCODE_INE, 0); else - kvm_own_lsx(vcpu); + kvm_make_request(KVM_REQ_LSX_LOAD, vcpu); return RESUME_GUEST; } @@ -813,7 +813,7 @@ static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu, int ecode) if (!kvm_guest_has_lasx(&vcpu->arch)) kvm_queue_exception(vcpu, EXCCODE_INE, 0); else - kvm_own_lasx(vcpu); + kvm_make_request(KVM_REQ_LASX_LOAD, vcpu); return RESUME_GUEST; } diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index d91a1160a309..572cae4f7882 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -232,6 +232,15 @@ static void kvm_late_check_requests(struct kvm_vcpu *vcpu) kvm_flush_tlb_gpa(vcpu, vcpu->arch.flush_gpa); vcpu->arch.flush_gpa = INVALID_GPA; } + + if (kvm_check_request(KVM_REQ_FPU_LOAD, vcpu)) + kvm_own_fpu(vcpu); + + if (kvm_check_request(KVM_REQ_LSX_LOAD, vcpu)) + kvm_own_lsx(vcpu); + + if (kvm_check_request(KVM_REQ_LASX_LOAD, vcpu)) + kvm_own_lasx(vcpu); } /* @@ -1338,8 +1347,6 @@ static inline void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu) { } /* Enable FPU and restore context */ void kvm_own_fpu(struct kvm_vcpu *vcpu) { - preempt_disable(); - /* * Enable FPU for guest * Set FR and FRE according to guest context @@ -1349,17 +1356,12 @@ void kvm_own_fpu(struct kvm_vcpu *vcpu) kvm_restore_fpu(&vcpu->arch.fpu); vcpu->arch.aux_inuse |= KVM_LARCH_FPU; - trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU); - - preempt_enable(); } #ifdef CONFIG_CPU_HAS_LSX /* Enable LSX and restore context */ int kvm_own_lsx(struct kvm_vcpu *vcpu) { - preempt_disable(); - /* Enable LSX for guest */ kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr); set_csr_euen(CSR_EUEN_LSXEN | CSR_EUEN_FPEN); @@ -1379,10 +1381,7 @@ int kvm_own_lsx(struct kvm_vcpu *vcpu) break; } - trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LSX); vcpu->arch.aux_inuse |= KVM_LARCH_LSX | KVM_LARCH_FPU; - preempt_enable(); - return 0; } #endif @@ -1391,8 +1390,6 @@ int kvm_own_lsx(struct kvm_vcpu *vcpu) /* Enable LASX and restore context */ int kvm_own_lasx(struct kvm_vcpu *vcpu) { - preempt_disable(); - kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr); set_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN); switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) { @@ -1412,10 +1409,7 @@ int kvm_own_lasx(struct kvm_vcpu *vcpu) break; } - trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LASX); vcpu->arch.aux_inuse |= KVM_LARCH_LASX | KVM_LARCH_LSX | KVM_LARCH_FPU; - preempt_enable(); - return 0; } #endif -- 2.39.3