Now that VMX encodes its own sequence for clearing CPU buffers, move VM_CLEAR_CPU_BUFFERS into SVM to minimize the chances of KVM botching a mitigation in the future, e.g. using VM_CLEAR_CPU_BUFFERS instead of checking multiple mitigation flags. No functional change intended. Reviewed-by: Brendan Jackman Acked-by: Borislav Petkov (AMD) Signed-off-by: Sean Christopherson --- arch/x86/include/asm/nospec-branch.h | 3 --- arch/x86/kvm/svm/vmenter.S | 6 ++++-- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index b1b1811216c5..3a5ac39877a0 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -324,9 +324,6 @@ #define CLEAR_CPU_BUFFERS \ ALTERNATIVE "", __CLEAR_CPU_BUFFERS, X86_FEATURE_CLEAR_CPU_BUF -#define VM_CLEAR_CPU_BUFFERS \ - ALTERNATIVE "", __CLEAR_CPU_BUFFERS, X86_FEATURE_CLEAR_CPU_BUF_VM - #ifdef CONFIG_X86_64 .macro CLEAR_BRANCH_HISTORY ALTERNATIVE "", "call clear_bhb_loop", X86_FEATURE_CLEAR_BHB_LOOP diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S index 98bfa2e00d88..3392bcadfb89 100644 --- a/arch/x86/kvm/svm/vmenter.S +++ b/arch/x86/kvm/svm/vmenter.S @@ -116,6 +116,8 @@ jmp 901b .endm +#define SVM_CLEAR_CPU_BUFFERS \ + ALTERNATIVE "", __CLEAR_CPU_BUFFERS, X86_FEATURE_CLEAR_CPU_BUF_VM /** * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode @@ -194,7 +196,7 @@ SYM_FUNC_START(__svm_vcpu_run) mov VCPU_RDI(%_ASM_DI), %_ASM_DI /* Clobbers EFLAGS.ZF */ - VM_CLEAR_CPU_BUFFERS + SVM_CLEAR_CPU_BUFFERS /* Enter guest mode */ 3: vmrun %_ASM_AX @@ -366,7 +368,7 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run) mov KVM_VMCB_pa(%rax), %rax /* Clobbers EFLAGS.ZF */ - VM_CLEAR_CPU_BUFFERS + SVM_CLEAR_CPU_BUFFERS /* Enter guest mode */ 1: vmrun %rax -- 2.52.0.rc1.455.g30608eb744-goog