When testing for VMLAUNCH vs. VMRESUME, use the copy of @flags from the stack instead of first moving it to EBX, and then propagating VMX_RUN_VMRESUME to RFLAGS.CF (because RBX is clobbered with the guest value prior to the conditional branch to VMLAUNCH). Stashing information in RFLAGS is gross, especially with the writer and reader being bifurcated by yet more gnarly assembly code. Opportunistically drop the SHIFT macros as they existed purely to allow the VM-Enter flow to use Bit Test. Suggested-by: Borislav Petkov Signed-off-by: Sean Christopherson --- arch/x86/kvm/vmx/run_flags.h | 10 +++------- arch/x86/kvm/vmx/vmenter.S | 13 ++++--------- 2 files changed, 7 insertions(+), 16 deletions(-) diff --git a/arch/x86/kvm/vmx/run_flags.h b/arch/x86/kvm/vmx/run_flags.h index 2f20fb170def..6a87a12135fb 100644 --- a/arch/x86/kvm/vmx/run_flags.h +++ b/arch/x86/kvm/vmx/run_flags.h @@ -2,12 +2,8 @@ #ifndef __KVM_X86_VMX_RUN_FLAGS_H #define __KVM_X86_VMX_RUN_FLAGS_H -#define VMX_RUN_VMRESUME_SHIFT 0 -#define VMX_RUN_SAVE_SPEC_CTRL_SHIFT 1 -#define VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO_SHIFT 2 - -#define VMX_RUN_VMRESUME BIT(VMX_RUN_VMRESUME_SHIFT) -#define VMX_RUN_SAVE_SPEC_CTRL BIT(VMX_RUN_SAVE_SPEC_CTRL_SHIFT) -#define VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO BIT(VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO_SHIFT) +#define VMX_RUN_VMRESUME BIT(0) +#define VMX_RUN_SAVE_SPEC_CTRL BIT(1) +#define VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO BIT(2) #endif /* __KVM_X86_VMX_RUN_FLAGS_H */ diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S index 574159a84ee9..93cf2ca7919a 100644 --- a/arch/x86/kvm/vmx/vmenter.S +++ b/arch/x86/kvm/vmx/vmenter.S @@ -92,7 +92,7 @@ SYM_FUNC_START(__vmx_vcpu_run) /* Save @vmx for SPEC_CTRL handling */ push %_ASM_ARG1 - /* Save @flags for SPEC_CTRL handling */ + /* Save @flags (used for VMLAUNCH vs. VMRESUME and mitigations). */ push %_ASM_ARG3 /* @@ -101,9 +101,6 @@ SYM_FUNC_START(__vmx_vcpu_run) */ push %_ASM_ARG2 - /* Copy @flags to EBX, _ASM_ARG3 is volatile. */ - mov %_ASM_ARG3L, %ebx - lea (%_ASM_SP), %_ASM_ARG2 call vmx_update_host_rsp @@ -147,9 +144,6 @@ SYM_FUNC_START(__vmx_vcpu_run) /* Load @regs to RAX. */ mov (%_ASM_SP), %_ASM_AX - /* Check if vmlaunch or vmresume is needed */ - bt $VMX_RUN_VMRESUME_SHIFT, %ebx - /* Load guest registers. Don't clobber flags. */ mov VCPU_RCX(%_ASM_AX), %_ASM_CX mov VCPU_RDX(%_ASM_AX), %_ASM_DX @@ -173,8 +167,9 @@ SYM_FUNC_START(__vmx_vcpu_run) /* Clobbers EFLAGS.ZF */ CLEAR_CPU_BUFFERS - /* Check EFLAGS.CF from the VMX_RUN_VMRESUME bit test above. */ - jnc .Lvmlaunch + /* Check @flags to see if vmlaunch or vmresume is needed. */ + testl $VMX_RUN_VMRESUME, WORD_SIZE(%_ASM_SP) + jz .Lvmlaunch /* * After a successful VMRESUME/VMLAUNCH, control flow "magically" -- 2.52.0.rc1.455.g30608eb744-goog