While we currently track that we are emulating a nested ERET from L1 to L2, we currently don't track the reverse direction (an exception going from L2 to L1). Add a new vcpu state flag for this purpose, which will see some use shortly. Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 3 ++- arch/arm64/kvm/emulate-nested.c | 4 ++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 65eead8362e0b..c79747d5f4dd1 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -1112,7 +1112,8 @@ struct kvm_vcpu_arch { #define IN_NESTED_ERET __vcpu_single_flag(sflags, BIT(7)) /* SError pending for nested guest */ #define NESTED_SERROR_PENDING __vcpu_single_flag(sflags, BIT(8)) - +/* KVM is currently emulating an L2 to L1 exception */ +#define IN_NESTED_EXCEPTION __vcpu_single_flag(sflags, BIT(9)) /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */ #define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) + \ diff --git a/arch/arm64/kvm/emulate-nested.c b/arch/arm64/kvm/emulate-nested.c index dba7ced74ca5e..15c691a6266d5 100644 --- a/arch/arm64/kvm/emulate-nested.c +++ b/arch/arm64/kvm/emulate-nested.c @@ -2862,6 +2862,8 @@ static int kvm_inject_nested(struct kvm_vcpu *vcpu, u64 esr_el2, preempt_disable(); + vcpu_set_flag(vcpu, IN_NESTED_EXCEPTION); + /* * We may have an exception or PC update in the EL0/EL1 context. * Commit it before entering EL2. @@ -2884,6 +2886,8 @@ static int kvm_inject_nested(struct kvm_vcpu *vcpu, u64 esr_el2, __kvm_adjust_pc(vcpu); kvm_arch_vcpu_load(vcpu, smp_processor_id()); + vcpu_clear_flag(vcpu, IN_NESTED_EXCEPTION); + preempt_enable(); if (kvm_vcpu_has_pmu(vcpu)) -- 2.47.3