When testing for VMLAUNCH vs. VMRESUME, use the copy of @flags from the stack instead of first moving it to EBX, and then propagating VMX_RUN_VMRESUME to RFLAGS.CF (because RBX is clobbered with the guest value prior to the conditional branch to VMLAUNCH). Stashing information in RFLAGS is gross, especially with the writer and reader being bifurcated by yet more gnarly assembly code. Opportunistically drop the SHIFT macros as they existed purely to allow the VM-Enter flow to use Bit Test. Suggested-by: Borislav Petkov Signed-off-by: Sean Christopherson --- arch/x86/kvm/vmx/run_flags.h | 10 +++------- arch/x86/kvm/vmx/vmenter.S | 13 ++++--------- 2 files changed, 7 insertions(+), 16 deletions(-) diff --git a/arch/x86/kvm/vmx/run_flags.h b/arch/x86/kvm/vmx/run_flags.h index 2f20fb170def..6a87a12135fb 100644 --- a/arch/x86/kvm/vmx/run_flags.h +++ b/arch/x86/kvm/vmx/run_flags.h @@ -2,12 +2,8 @@ #ifndef __KVM_X86_VMX_RUN_FLAGS_H #define __KVM_X86_VMX_RUN_FLAGS_H -#define VMX_RUN_VMRESUME_SHIFT 0 -#define VMX_RUN_SAVE_SPEC_CTRL_SHIFT 1 -#define VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO_SHIFT 2 - -#define VMX_RUN_VMRESUME BIT(VMX_RUN_VMRESUME_SHIFT) -#define VMX_RUN_SAVE_SPEC_CTRL BIT(VMX_RUN_SAVE_SPEC_CTRL_SHIFT) -#define VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO BIT(VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO_SHIFT) +#define VMX_RUN_VMRESUME BIT(0) +#define VMX_RUN_SAVE_SPEC_CTRL BIT(1) +#define VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO BIT(2) #endif /* __KVM_X86_VMX_RUN_FLAGS_H */ diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S index 574159a84ee9..93cf2ca7919a 100644 --- a/arch/x86/kvm/vmx/vmenter.S +++ b/arch/x86/kvm/vmx/vmenter.S @@ -92,7 +92,7 @@ SYM_FUNC_START(__vmx_vcpu_run) /* Save @vmx for SPEC_CTRL handling */ push %_ASM_ARG1 - /* Save @flags for SPEC_CTRL handling */ + /* Save @flags (used for VMLAUNCH vs. VMRESUME and mitigations). */ push %_ASM_ARG3 /* @@ -101,9 +101,6 @@ SYM_FUNC_START(__vmx_vcpu_run) */ push %_ASM_ARG2 - /* Copy @flags to EBX, _ASM_ARG3 is volatile. */ - mov %_ASM_ARG3L, %ebx - lea (%_ASM_SP), %_ASM_ARG2 call vmx_update_host_rsp @@ -147,9 +144,6 @@ SYM_FUNC_START(__vmx_vcpu_run) /* Load @regs to RAX. */ mov (%_ASM_SP), %_ASM_AX - /* Check if vmlaunch or vmresume is needed */ - bt $VMX_RUN_VMRESUME_SHIFT, %ebx - /* Load guest registers. Don't clobber flags. */ mov VCPU_RCX(%_ASM_AX), %_ASM_CX mov VCPU_RDX(%_ASM_AX), %_ASM_DX @@ -173,8 +167,9 @@ SYM_FUNC_START(__vmx_vcpu_run) /* Clobbers EFLAGS.ZF */ CLEAR_CPU_BUFFERS - /* Check EFLAGS.CF from the VMX_RUN_VMRESUME bit test above. */ - jnc .Lvmlaunch + /* Check @flags to see if vmlaunch or vmresume is needed. */ + testl $VMX_RUN_VMRESUME, WORD_SIZE(%_ASM_SP) + jz .Lvmlaunch /* * After a successful VMRESUME/VMLAUNCH, control flow "magically" -- 2.52.0.rc1.455.g30608eb744-goog From: Pawan Gupta TSA mitigation: d8010d4ba43e ("x86/bugs: Add a Transient Scheduler Attacks mitigation") introduced VM_CLEAR_CPU_BUFFERS for guests on AMD CPUs. Currently on Intel CLEAR_CPU_BUFFERS is being used for guests which has a much broader scope (kernel->user also). Make mitigations on Intel consistent with TSA. This would help handling the guest-only mitigations better in future. Signed-off-by: Pawan Gupta [sean: make CLEAR_CPU_BUF_VM mutually exclusive with the MMIO mitigation] Acked-by: Borislav Petkov (AMD) Signed-off-by: Sean Christopherson --- arch/x86/kernel/cpu/bugs.c | 13 +++++++++---- arch/x86/kvm/vmx/vmenter.S | 2 +- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index d7fa03bf51b4..c3a26532a209 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -194,7 +194,7 @@ DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush); /* * Controls CPU Fill buffer clear before VMenter. This is a subset of - * X86_FEATURE_CLEAR_CPU_BUF, and should only be enabled when KVM-only + * X86_FEATURE_CLEAR_CPU_BUF_VM, and should only be enabled when KVM-only * mitigation is required. */ DEFINE_STATIC_KEY_FALSE(cpu_buf_vm_clear); @@ -489,8 +489,8 @@ static enum rfds_mitigations rfds_mitigation __ro_after_init = IS_ENABLED(CONFIG_MITIGATION_RFDS) ? RFDS_MITIGATION_AUTO : RFDS_MITIGATION_OFF; /* - * Set if any of MDS/TAA/MMIO/RFDS are going to enable VERW clearing - * through X86_FEATURE_CLEAR_CPU_BUF on kernel and guest entry. + * Set if any of MDS/TAA/MMIO/RFDS are going to enable VERW clearing on exit to + * userspace *and* on entry to KVM guests. */ static bool verw_clear_cpu_buf_mitigation_selected __ro_after_init; @@ -536,6 +536,7 @@ static void __init mds_apply_mitigation(void) if (mds_mitigation == MDS_MITIGATION_FULL || mds_mitigation == MDS_MITIGATION_VMWERV) { setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); + setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM); if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) && (mds_nosmt || smt_mitigations == SMT_MITIGATIONS_ON)) cpu_smt_disable(false); @@ -647,6 +648,7 @@ static void __init taa_apply_mitigation(void) * present on host, enable the mitigation for UCODE_NEEDED as well. */ setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); + setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM); if (taa_nosmt || smt_mitigations == SMT_MITIGATIONS_ON) cpu_smt_disable(false); @@ -748,6 +750,7 @@ static void __init mmio_apply_mitigation(void) */ if (verw_clear_cpu_buf_mitigation_selected) { setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); + setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM); static_branch_disable(&cpu_buf_vm_clear); } else { static_branch_enable(&cpu_buf_vm_clear); @@ -839,8 +842,10 @@ static void __init rfds_update_mitigation(void) static void __init rfds_apply_mitigation(void) { - if (rfds_mitigation == RFDS_MITIGATION_VERW) + if (rfds_mitigation == RFDS_MITIGATION_VERW) { setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); + setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM); + } } static __init int rfds_parse_cmdline(char *str) diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S index 93cf2ca7919a..7e7bb9b7162f 100644 --- a/arch/x86/kvm/vmx/vmenter.S +++ b/arch/x86/kvm/vmx/vmenter.S @@ -165,7 +165,7 @@ SYM_FUNC_START(__vmx_vcpu_run) mov VCPU_RAX(%_ASM_AX), %_ASM_AX /* Clobbers EFLAGS.ZF */ - CLEAR_CPU_BUFFERS + VM_CLEAR_CPU_BUFFERS /* Check @flags to see if vmlaunch or vmresume is needed. */ testl $VMX_RUN_VMRESUME, WORD_SIZE(%_ASM_SP) -- 2.52.0.rc1.455.g30608eb744-goog Decouple the use of ALTERNATIVE from the encoding of VERW to clear CPU buffers so that KVM can use ALTERNATIVE_2 to handle "always clear buffers" and "clear if guest can access host MMIO" in a single statement. No functional change intended. Reviewed-by: Brendan Jackman Reviewed-by: Pawan Gupta Signed-off-by: Sean Christopherson --- arch/x86/include/asm/nospec-branch.h | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 08ed5a2e46a5..8b4885a1b2ef 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -308,24 +308,24 @@ * CFLAGS.ZF. * Note: Only the memory operand variant of VERW clears the CPU buffers. */ -.macro __CLEAR_CPU_BUFFERS feature #ifdef CONFIG_X86_64 - ALTERNATIVE "", "verw x86_verw_sel(%rip)", \feature +#define VERW verw x86_verw_sel(%rip) #else - /* - * In 32bit mode, the memory operand must be a %cs reference. The data - * segments may not be usable (vm86 mode), and the stack segment may not - * be flat (ESPFIX32). - */ - ALTERNATIVE "", "verw %cs:x86_verw_sel", \feature +/* + * In 32bit mode, the memory operand must be a %cs reference. The data segments + * may not be usable (vm86 mode), and the stack segment may not be flat (ESPFIX32). + */ +#define VERW verw %cs:x86_verw_sel #endif -.endm +#define __CLEAR_CPU_BUFFERS __stringify(VERW) + +/* If necessary, emit VERW on exit-to-userspace to clear CPU buffers. */ #define CLEAR_CPU_BUFFERS \ - __CLEAR_CPU_BUFFERS X86_FEATURE_CLEAR_CPU_BUF + ALTERNATIVE "", __CLEAR_CPU_BUFFERS, X86_FEATURE_CLEAR_CPU_BUF #define VM_CLEAR_CPU_BUFFERS \ - __CLEAR_CPU_BUFFERS X86_FEATURE_CLEAR_CPU_BUF_VM + ALTERNATIVE "", __CLEAR_CPU_BUFFERS, X86_FEATURE_CLEAR_CPU_BUF_VM #ifdef CONFIG_X86_64 .macro CLEAR_BRANCH_HISTORY -- 2.52.0.rc1.455.g30608eb744-goog Convert the MMIO Stale Data mitigation tracking from a static branch into an x86 feature flag so that it can be used via ALTERNATIVE_2 in KVM. No functional change intended. Reviewed-by: Pawan Gupta Reviewed-by: Brendan Jackman Signed-off-by: Sean Christopherson --- arch/x86/include/asm/cpufeatures.h | 5 +++++ arch/x86/include/asm/nospec-branch.h | 2 -- arch/x86/kernel/cpu/bugs.c | 11 +---------- arch/x86/kvm/mmu/spte.c | 2 +- arch/x86/kvm/vmx/vmx.c | 4 ++-- 5 files changed, 9 insertions(+), 15 deletions(-) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 7129eb44adad..646d2a77a2e2 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -501,6 +501,11 @@ #define X86_FEATURE_ABMC (21*32+15) /* Assignable Bandwidth Monitoring Counters */ #define X86_FEATURE_MSR_IMM (21*32+16) /* MSR immediate form instructions */ #define X86_FEATURE_X2AVIC_EXT (21*32+17) /* AMD SVM x2AVIC support for 4k vCPUs */ +#define X86_FEATURE_CLEAR_CPU_BUF_VM_MMIO (21*32+18) /* + * Clear CPU buffers before VM-Enter if the vCPU + * can access host MMIO (ignored for all intents + * and purposes if CLEAR_CPU_BUF_VM is set). + */ /* * BUG word(s) diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 8b4885a1b2ef..b1b1811216c5 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -580,8 +580,6 @@ DECLARE_STATIC_KEY_FALSE(cpu_buf_idle_clear); DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush); -DECLARE_STATIC_KEY_FALSE(cpu_buf_vm_clear); - extern u16 x86_verw_sel; #include diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index c3a26532a209..70bb48ab46d7 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -192,14 +192,6 @@ EXPORT_SYMBOL_GPL(cpu_buf_idle_clear); */ DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush); -/* - * Controls CPU Fill buffer clear before VMenter. This is a subset of - * X86_FEATURE_CLEAR_CPU_BUF_VM, and should only be enabled when KVM-only - * mitigation is required. - */ -DEFINE_STATIC_KEY_FALSE(cpu_buf_vm_clear); -EXPORT_SYMBOL_GPL(cpu_buf_vm_clear); - #undef pr_fmt #define pr_fmt(fmt) "mitigations: " fmt @@ -751,9 +743,8 @@ static void __init mmio_apply_mitigation(void) if (verw_clear_cpu_buf_mitigation_selected) { setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM); - static_branch_disable(&cpu_buf_vm_clear); } else { - static_branch_enable(&cpu_buf_vm_clear); + setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM_MMIO); } /* diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c index 37647afde7d3..85a0473809b0 100644 --- a/arch/x86/kvm/mmu/spte.c +++ b/arch/x86/kvm/mmu/spte.c @@ -292,7 +292,7 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, mark_page_dirty_in_slot(vcpu->kvm, slot, gfn); } - if (static_branch_unlikely(&cpu_buf_vm_clear) && + if (cpu_feature_enabled(X86_FEATURE_CLEAR_CPU_BUF_VM_MMIO) && !kvm_vcpu_can_access_host_mmio(vcpu) && kvm_is_mmio_pfn(pfn, &is_host_mmio)) kvm_track_host_mmio_mapping(vcpu); diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 6f374c815ce2..18d9e0eacd0f 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -903,7 +903,7 @@ unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx) if (!msr_write_intercepted(vmx, MSR_IA32_SPEC_CTRL)) flags |= VMX_RUN_SAVE_SPEC_CTRL; - if (static_branch_unlikely(&cpu_buf_vm_clear) && + if (cpu_feature_enabled(X86_FEATURE_CLEAR_CPU_BUF_VM_MMIO) && kvm_vcpu_can_access_host_mmio(&vmx->vcpu)) flags |= VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO; @@ -7352,7 +7352,7 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu, */ if (static_branch_unlikely(&vmx_l1d_should_flush)) vmx_l1d_flush(vcpu); - else if (static_branch_unlikely(&cpu_buf_vm_clear) && + else if (cpu_feature_enabled(X86_FEATURE_CLEAR_CPU_BUF_VM_MMIO) && (flags & VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO)) x86_clear_cpu_buffers(); -- 2.52.0.rc1.455.g30608eb744-goog Rework the handling of the MMIO Stale Data mitigation to clear CPU buffers immediately prior to VM-Enter, i.e. in the same location that KVM emits a VERW for unconditional (at runtime) clearing. Co-locating the code and using a single ALTERNATIVES_2 makes it more obvious how VMX mitigates the various vulnerabilities. Deliberately order the alternatives as: 0. Do nothing 1. Clear if vCPU can access MMIO 2. Clear always since the last alternative wins in ALTERNATIVES_2(), i.e. so that KVM will honor the strictest mitigation (always clear CPU buffers) if multiple mitigations are selected. E.g. even if the kernel chooses to mitigate MMIO Stale Data via X86_FEATURE_CLEAR_CPU_BUF_VM_MMIO, another mitigation may enable X86_FEATURE_CLEAR_CPU_BUF_VM, and that other thing needs to win. Note, decoupling the MMIO mitigation from the L1TF mitigation also fixes a mostly-benign flaw where KVM wouldn't do any clearing/flushing if the L1TF mitigation is configured to conditionally flush the L1D, and the MMIO mitigation but not any other "clear CPU buffers" mitigation is enabled. For that specific scenario, KVM would skip clearing CPU buffers for the MMIO mitigation even though the kernel requested a clear on every VM-Enter. Note #2, the flaw goes back to the introduction of the MDS mitigation. The MDS mitigation was inadvertently fixed by commit 43fb862de8f6 ("KVM/VMX: Move VERW closer to VMentry for MDS mitigation"), but previous kernels that flush CPU buffers in vmx_vcpu_enter_exit() are affected (though it's unlikely the flaw is meaningfully exploitable even older kernels). Fixes: 650b68a0622f ("x86/kvm/vmx: Add MDS protection when L1D Flush is not active") Suggested-by: Pawan Gupta Reviewed-by: Pawan Gupta Signed-off-by: Sean Christopherson --- arch/x86/kvm/vmx/vmenter.S | 16 ++++++++++++++-- arch/x86/kvm/vmx/vmx.c | 13 ------------- 2 files changed, 14 insertions(+), 15 deletions(-) diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S index 7e7bb9b7162f..92a85d95b3e4 100644 --- a/arch/x86/kvm/vmx/vmenter.S +++ b/arch/x86/kvm/vmx/vmenter.S @@ -71,6 +71,7 @@ * @regs: unsigned long * (to guest registers) * @flags: VMX_RUN_VMRESUME: use VMRESUME instead of VMLAUNCH * VMX_RUN_SAVE_SPEC_CTRL: save guest SPEC_CTRL into vmx->spec_ctrl + * VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO: vCPU can access host MMIO * * Returns: * 0 on VM-Exit, 1 on VM-Fail @@ -164,8 +165,19 @@ SYM_FUNC_START(__vmx_vcpu_run) /* Load guest RAX. This kills the @regs pointer! */ mov VCPU_RAX(%_ASM_AX), %_ASM_AX - /* Clobbers EFLAGS.ZF */ - VM_CLEAR_CPU_BUFFERS + /* + * Note, ALTERNATIVE_2 works in reverse order. If CLEAR_CPU_BUF_VM is + * enabled, do VERW unconditionally. If CPU_BUF_VM_MMIO is enabled, + * check @flags to see if the vCPU has access to host MMIO, and if so, + * do VERW. Else, do nothing (no mitigations needed/enabled). + */ + ALTERNATIVE_2 "", \ + __stringify(testl $VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO, WORD_SIZE(%_ASM_SP); \ + jz .Lskip_mmio_verw; \ + VERW; \ + .Lskip_mmio_verw:), \ + X86_FEATURE_CLEAR_CPU_BUF_VM_MMIO, \ + __stringify(VERW), X86_FEATURE_CLEAR_CPU_BUF_VM /* Check @flags to see if vmlaunch or vmresume is needed. */ testl $VMX_RUN_VMRESUME, WORD_SIZE(%_ASM_SP) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 18d9e0eacd0f..e378bd4d875c 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -7340,21 +7340,8 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu, guest_state_enter_irqoff(); - /* - * L1D Flush includes CPU buffer clear to mitigate MDS, but VERW - * mitigation for MDS is done late in VMentry and is still - * executed in spite of L1D Flush. This is because an extra VERW - * should not matter much after the big hammer L1D Flush. - * - * cpu_buf_vm_clear is used when system is not vulnerable to MDS/TAA, - * and is affected by MMIO Stale Data. In such cases mitigation in only - * needed against an MMIO capable guest. - */ if (static_branch_unlikely(&vmx_l1d_should_flush)) vmx_l1d_flush(vcpu); - else if (cpu_feature_enabled(X86_FEATURE_CLEAR_CPU_BUF_VM_MMIO) && - (flags & VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO)) - x86_clear_cpu_buffers(); vmx_disable_fb_clear(vmx); -- 2.52.0.rc1.455.g30608eb744-goog Now that VMX encodes its own sequence for clearing CPU buffers, move VM_CLEAR_CPU_BUFFERS into SVM to minimize the chances of KVM botching a mitigation in the future, e.g. using VM_CLEAR_CPU_BUFFERS instead of checking multiple mitigation flags. No functional change intended. Reviewed-by: Brendan Jackman Acked-by: Borislav Petkov (AMD) Signed-off-by: Sean Christopherson --- arch/x86/include/asm/nospec-branch.h | 3 --- arch/x86/kvm/svm/vmenter.S | 6 ++++-- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index b1b1811216c5..3a5ac39877a0 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -324,9 +324,6 @@ #define CLEAR_CPU_BUFFERS \ ALTERNATIVE "", __CLEAR_CPU_BUFFERS, X86_FEATURE_CLEAR_CPU_BUF -#define VM_CLEAR_CPU_BUFFERS \ - ALTERNATIVE "", __CLEAR_CPU_BUFFERS, X86_FEATURE_CLEAR_CPU_BUF_VM - #ifdef CONFIG_X86_64 .macro CLEAR_BRANCH_HISTORY ALTERNATIVE "", "call clear_bhb_loop", X86_FEATURE_CLEAR_BHB_LOOP diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S index 98bfa2e00d88..3392bcadfb89 100644 --- a/arch/x86/kvm/svm/vmenter.S +++ b/arch/x86/kvm/svm/vmenter.S @@ -116,6 +116,8 @@ jmp 901b .endm +#define SVM_CLEAR_CPU_BUFFERS \ + ALTERNATIVE "", __CLEAR_CPU_BUFFERS, X86_FEATURE_CLEAR_CPU_BUF_VM /** * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode @@ -194,7 +196,7 @@ SYM_FUNC_START(__svm_vcpu_run) mov VCPU_RDI(%_ASM_DI), %_ASM_DI /* Clobbers EFLAGS.ZF */ - VM_CLEAR_CPU_BUFFERS + SVM_CLEAR_CPU_BUFFERS /* Enter guest mode */ 3: vmrun %_ASM_AX @@ -366,7 +368,7 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run) mov KVM_VMCB_pa(%rax), %rax /* Clobbers EFLAGS.ZF */ - VM_CLEAR_CPU_BUFFERS + SVM_CLEAR_CPU_BUFFERS /* Enter guest mode */ 1: vmrun %rax -- 2.52.0.rc1.455.g30608eb744-goog Move vmx_l1d_flush(), vmx_cleanup_l1d_flush(), and the vmentry_l1d_flush param code up in vmx.c so that all of the L1 data cache flushing code is bundled together. This will allow conditioning the mitigation code on CONFIG_CPU_MITIGATIONS=y with minimal #ifdefs. No functional change intended. Reviewed-by: Brendan Jackman Reviewed-by: Pawan Gupta Signed-off-by: Sean Christopherson --- arch/x86/kvm/vmx/vmx.c | 174 ++++++++++++++++++++--------------------- 1 file changed, 87 insertions(+), 87 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index e378bd4d875c..b39e083671bc 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -302,6 +302,16 @@ static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf) return 0; } +static void vmx_cleanup_l1d_flush(void) +{ + if (vmx_l1d_flush_pages) { + free_pages((unsigned long)vmx_l1d_flush_pages, L1D_CACHE_ORDER); + vmx_l1d_flush_pages = NULL; + } + /* Restore state so sysfs ignores VMX */ + l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO; +} + static int vmentry_l1d_flush_parse(const char *s) { unsigned int i; @@ -352,6 +362,83 @@ static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp) return sysfs_emit(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option); } +/* + * Software based L1D cache flush which is used when microcode providing + * the cache control MSR is not loaded. + * + * The L1D cache is 32 KiB on Nehalem and later microarchitectures, but to + * flush it is required to read in 64 KiB because the replacement algorithm + * is not exactly LRU. This could be sized at runtime via topology + * information but as all relevant affected CPUs have 32KiB L1D cache size + * there is no point in doing so. + */ +static noinstr void vmx_l1d_flush(struct kvm_vcpu *vcpu) +{ + int size = PAGE_SIZE << L1D_CACHE_ORDER; + + /* + * This code is only executed when the flush mode is 'cond' or + * 'always' + */ + if (static_branch_likely(&vmx_l1d_flush_cond)) { + bool flush_l1d; + + /* + * Clear the per-vcpu flush bit, it gets set again if the vCPU + * is reloaded, i.e. if the vCPU is scheduled out or if KVM + * exits to userspace, or if KVM reaches one of the unsafe + * VMEXIT handlers, e.g. if KVM calls into the emulator. + */ + flush_l1d = vcpu->arch.l1tf_flush_l1d; + vcpu->arch.l1tf_flush_l1d = false; + + /* + * Clear the per-cpu flush bit, it gets set again from + * the interrupt handlers. + */ + flush_l1d |= kvm_get_cpu_l1tf_flush_l1d(); + kvm_clear_cpu_l1tf_flush_l1d(); + + if (!flush_l1d) + return; + } + + vcpu->stat.l1d_flush++; + + if (static_cpu_has(X86_FEATURE_FLUSH_L1D)) { + native_wrmsrq(MSR_IA32_FLUSH_CMD, L1D_FLUSH); + return; + } + + asm volatile( + /* First ensure the pages are in the TLB */ + "xorl %%eax, %%eax\n" + ".Lpopulate_tlb:\n\t" + "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t" + "addl $4096, %%eax\n\t" + "cmpl %%eax, %[size]\n\t" + "jne .Lpopulate_tlb\n\t" + "xorl %%eax, %%eax\n\t" + "cpuid\n\t" + /* Now fill the cache */ + "xorl %%eax, %%eax\n" + ".Lfill_cache:\n" + "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t" + "addl $64, %%eax\n\t" + "cmpl %%eax, %[size]\n\t" + "jne .Lfill_cache\n\t" + "lfence\n" + :: [flush_pages] "r" (vmx_l1d_flush_pages), + [size] "r" (size) + : "eax", "ebx", "ecx", "edx"); +} + +static const struct kernel_param_ops vmentry_l1d_flush_ops = { + .set = vmentry_l1d_flush_set, + .get = vmentry_l1d_flush_get, +}; +module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, NULL, 0644); + static __always_inline void vmx_disable_fb_clear(struct vcpu_vmx *vmx) { u64 msr; @@ -404,12 +491,6 @@ static void vmx_update_fb_clear_dis(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx) vmx->disable_fb_clear = false; } -static const struct kernel_param_ops vmentry_l1d_flush_ops = { - .set = vmentry_l1d_flush_set, - .get = vmentry_l1d_flush_get, -}; -module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, NULL, 0644); - static u32 vmx_segment_access_rights(struct kvm_segment *var); void vmx_vmexit(void); @@ -6673,77 +6754,6 @@ int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) return ret; } -/* - * Software based L1D cache flush which is used when microcode providing - * the cache control MSR is not loaded. - * - * The L1D cache is 32 KiB on Nehalem and later microarchitectures, but to - * flush it is required to read in 64 KiB because the replacement algorithm - * is not exactly LRU. This could be sized at runtime via topology - * information but as all relevant affected CPUs have 32KiB L1D cache size - * there is no point in doing so. - */ -static noinstr void vmx_l1d_flush(struct kvm_vcpu *vcpu) -{ - int size = PAGE_SIZE << L1D_CACHE_ORDER; - - /* - * This code is only executed when the flush mode is 'cond' or - * 'always' - */ - if (static_branch_likely(&vmx_l1d_flush_cond)) { - bool flush_l1d; - - /* - * Clear the per-vcpu flush bit, it gets set again if the vCPU - * is reloaded, i.e. if the vCPU is scheduled out or if KVM - * exits to userspace, or if KVM reaches one of the unsafe - * VMEXIT handlers, e.g. if KVM calls into the emulator. - */ - flush_l1d = vcpu->arch.l1tf_flush_l1d; - vcpu->arch.l1tf_flush_l1d = false; - - /* - * Clear the per-cpu flush bit, it gets set again from - * the interrupt handlers. - */ - flush_l1d |= kvm_get_cpu_l1tf_flush_l1d(); - kvm_clear_cpu_l1tf_flush_l1d(); - - if (!flush_l1d) - return; - } - - vcpu->stat.l1d_flush++; - - if (static_cpu_has(X86_FEATURE_FLUSH_L1D)) { - native_wrmsrq(MSR_IA32_FLUSH_CMD, L1D_FLUSH); - return; - } - - asm volatile( - /* First ensure the pages are in the TLB */ - "xorl %%eax, %%eax\n" - ".Lpopulate_tlb:\n\t" - "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t" - "addl $4096, %%eax\n\t" - "cmpl %%eax, %[size]\n\t" - "jne .Lpopulate_tlb\n\t" - "xorl %%eax, %%eax\n\t" - "cpuid\n\t" - /* Now fill the cache */ - "xorl %%eax, %%eax\n" - ".Lfill_cache:\n" - "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t" - "addl $64, %%eax\n\t" - "cmpl %%eax, %[size]\n\t" - "jne .Lfill_cache\n\t" - "lfence\n" - :: [flush_pages] "r" (vmx_l1d_flush_pages), - [size] "r" (size) - : "eax", "ebx", "ecx", "edx"); -} - void vmx_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) { struct vmcs12 *vmcs12 = get_vmcs12(vcpu); @@ -8671,16 +8681,6 @@ __init int vmx_hardware_setup(void) return r; } -static void vmx_cleanup_l1d_flush(void) -{ - if (vmx_l1d_flush_pages) { - free_pages((unsigned long)vmx_l1d_flush_pages, L1D_CACHE_ORDER); - vmx_l1d_flush_pages = NULL; - } - /* Restore state so sysfs ignores VMX */ - l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO; -} - void vmx_exit(void) { allow_smaller_maxphyaddr = false; -- 2.52.0.rc1.455.g30608eb744-goog Disable support for flushing the L1 data cache to mitigate L1TF if CPU mitigations are disabled for the entire kernel. KVM's mitigation of L1TF is in no way special enough to justify ignoring CONFIG_CPU_MITIGATIONS=n. Deliberately use CPU_MITIGATIONS instead of the more precise MITIGATION_L1TF, as MITIGATION_L1TF only controls the default behavior, i.e. CONFIG_MITIGATION_L1TF=n doesn't completely disable L1TF mitigations in the kernel. Keep the vmentry_l1d_flush module param to avoid breaking existing setups, and leverage the .set path to alert the user to the fact that vmentry_l1d_flush will be ignored. Don't bother validating the incoming value; if an admin misconfigures vmentry_l1d_flush, the fact that the bad configuration won't be detected when running with CONFIG_CPU_MITIGATIONS=n is likely the least of their worries. Reviewed-by: Brendan Jackman Signed-off-by: Sean Christopherson --- arch/x86/include/asm/hardirq.h | 4 +-- arch/x86/kvm/vmx/vmx.c | 56 ++++++++++++++++++++++++++-------- 2 files changed, 46 insertions(+), 14 deletions(-) diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h index f00c09ffe6a9..6b6d472baa0b 100644 --- a/arch/x86/include/asm/hardirq.h +++ b/arch/x86/include/asm/hardirq.h @@ -5,7 +5,7 @@ #include typedef struct { -#if IS_ENABLED(CONFIG_KVM_INTEL) +#if IS_ENABLED(CONFIG_CPU_MITIGATIONS) && IS_ENABLED(CONFIG_KVM_INTEL) u8 kvm_cpu_l1tf_flush_l1d; #endif unsigned int __nmi_count; /* arch dependent */ @@ -68,7 +68,7 @@ extern u64 arch_irq_stat(void); DECLARE_PER_CPU_CACHE_HOT(u16, __softirq_pending); #define local_softirq_pending_ref __softirq_pending -#if IS_ENABLED(CONFIG_KVM_INTEL) +#if IS_ENABLED(CONFIG_CPU_MITIGATIONS) && IS_ENABLED(CONFIG_KVM_INTEL) /* * This function is called from noinstr interrupt contexts * and must be inlined to not get instrumentation. diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index b39e083671bc..ae6b102b1570 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -203,6 +203,7 @@ module_param(pt_mode, int, S_IRUGO); struct x86_pmu_lbr __ro_after_init vmx_lbr_caps; +#ifdef CONFIG_CPU_MITIGATIONS static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush); static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_cond); static DEFINE_MUTEX(vmx_l1d_flush_mutex); @@ -225,7 +226,7 @@ static const struct { #define L1D_CACHE_ORDER 4 static void *vmx_l1d_flush_pages; -static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf) +static int __vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf) { struct page *page; unsigned int i; @@ -302,6 +303,16 @@ static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf) return 0; } +static int vmx_setup_l1d_flush(void) +{ + /* + * Hand the parameter mitigation value in which was stored in the pre + * module init parser. If no parameter was given, it will contain + * 'auto' which will be turned into the default 'cond' mitigation mode. + */ + return __vmx_setup_l1d_flush(vmentry_l1d_flush_param); +} + static void vmx_cleanup_l1d_flush(void) { if (vmx_l1d_flush_pages) { @@ -349,7 +360,7 @@ static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp) } mutex_lock(&vmx_l1d_flush_mutex); - ret = vmx_setup_l1d_flush(l1tf); + ret = __vmx_setup_l1d_flush(l1tf); mutex_unlock(&vmx_l1d_flush_mutex); return ret; } @@ -376,6 +387,9 @@ static noinstr void vmx_l1d_flush(struct kvm_vcpu *vcpu) { int size = PAGE_SIZE << L1D_CACHE_ORDER; + if (!static_branch_unlikely(&vmx_l1d_should_flush)) + return; + /* * This code is only executed when the flush mode is 'cond' or * 'always' @@ -433,6 +447,31 @@ static noinstr void vmx_l1d_flush(struct kvm_vcpu *vcpu) : "eax", "ebx", "ecx", "edx"); } +#else /* CONFIG_CPU_MITIGATIONS*/ +static int vmx_setup_l1d_flush(void) +{ + l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NEVER; + return 0; +} +static void vmx_cleanup_l1d_flush(void) +{ + l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO; +} +static __always_inline void vmx_l1d_flush(struct kvm_vcpu *vcpu) +{ + +} +static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp) +{ + pr_warn_once("Kernel compiled without mitigations, ignoring vmentry_l1d_flush\n"); + return 0; +} +static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp) +{ + return sysfs_emit(s, "never\n"); +} +#endif + static const struct kernel_param_ops vmentry_l1d_flush_ops = { .set = vmentry_l1d_flush_set, .get = vmentry_l1d_flush_get, @@ -7350,8 +7389,7 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu, guest_state_enter_irqoff(); - if (static_branch_unlikely(&vmx_l1d_should_flush)) - vmx_l1d_flush(vcpu); + vmx_l1d_flush(vcpu); vmx_disable_fb_clear(vmx); @@ -8716,14 +8754,8 @@ int __init vmx_init(void) if (r) return r; - /* - * Must be called after common x86 init so enable_ept is properly set - * up. Hand the parameter mitigation value in which was stored in - * the pre module init parser. If no parameter was given, it will - * contain 'auto' which will be turned into the default 'cond' - * mitigation mode. - */ - r = vmx_setup_l1d_flush(vmentry_l1d_flush_param); + /* Must be called after common x86 init so enable_ept is setup. */ + r = vmx_setup_l1d_flush(); if (r) goto err_l1d_flush; -- 2.52.0.rc1.455.g30608eb744-goog From: Brendan Jackman Currently the tracking of the need to flush L1D for L1TF is tracked by two bits: one per-CPU and one per-vCPU. The per-vCPU bit is always set when the vCPU shows up on a core, so there is no interesting state that's truly per-vCPU. Indeed, this is a requirement, since L1D is a part of the physical CPU. So simplify this by combining the two bits. The vCPU bit was being written from preemption-enabled regions. To play nice with those cases, wrap all calls from KVM and use a raw write so that request a flush with preemption enabled doesn't trigger what would effectively be DEBUG_PREEMPT false positives. Preemption doesn't need to be disabled, as kvm_arch_vcpu_load() will mark the new CPU as needing a flush if the vCPU task is migrated, or if userspace runs the vCPU on a different task. Signed-off-by: Brendan Jackman [sean: put raw write in KVM instead of in a hardirq.h variant] Signed-off-by: Sean Christopherson --- arch/x86/include/asm/kvm_host.h | 3 --- arch/x86/kvm/mmu/mmu.c | 2 +- arch/x86/kvm/vmx/nested.c | 2 +- arch/x86/kvm/vmx/vmx.c | 20 +++++--------------- arch/x86/kvm/x86.c | 6 +++--- arch/x86/kvm/x86.h | 14 ++++++++++++++ 6 files changed, 24 insertions(+), 23 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 1a8ea5dc6699..9f9839bbce13 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1055,9 +1055,6 @@ struct kvm_vcpu_arch { /* be preempted when it's in kernel-mode(cpl=0) */ bool preempted_in_kernel; - /* Flush the L1 Data cache for L1TF mitigation on VMENTER */ - bool l1tf_flush_l1d; - /* Host CPU on which VM-entry was most recently attempted */ int last_vmentry_cpu; diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 3cfabfbdd843..02c450686b4a 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4859,7 +4859,7 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code, */ BUILD_BUG_ON(lower_32_bits(PFERR_SYNTHETIC_MASK)); - vcpu->arch.l1tf_flush_l1d = true; + kvm_request_l1tf_flush_l1d(); if (!flags) { trace_kvm_page_fault(vcpu, fault_address, error_code); diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 564f5af5ae86..40777278eabb 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -3828,7 +3828,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) goto vmentry_failed; /* Hide L1D cache contents from the nested guest. */ - vcpu->arch.l1tf_flush_l1d = true; + kvm_request_l1tf_flush_l1d(); /* * Must happen outside of nested_vmx_enter_non_root_mode() as it will diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index ae6b102b1570..df4cfcc6591a 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -395,26 +395,16 @@ static noinstr void vmx_l1d_flush(struct kvm_vcpu *vcpu) * 'always' */ if (static_branch_likely(&vmx_l1d_flush_cond)) { - bool flush_l1d; - /* - * Clear the per-vcpu flush bit, it gets set again if the vCPU + * Clear the per-cpu flush bit, it gets set again if the vCPU * is reloaded, i.e. if the vCPU is scheduled out or if KVM * exits to userspace, or if KVM reaches one of the unsafe - * VMEXIT handlers, e.g. if KVM calls into the emulator. + * VMEXIT handlers, e.g. if KVM calls into the emulator, + * or from the interrupt handlers. */ - flush_l1d = vcpu->arch.l1tf_flush_l1d; - vcpu->arch.l1tf_flush_l1d = false; - - /* - * Clear the per-cpu flush bit, it gets set again from - * the interrupt handlers. - */ - flush_l1d |= kvm_get_cpu_l1tf_flush_l1d(); + if (!kvm_get_cpu_l1tf_flush_l1d()) + return; kvm_clear_cpu_l1tf_flush_l1d(); - - if (!flush_l1d) - return; } vcpu->stat.l1d_flush++; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 9c2e28028c2b..ae774519b701 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5209,7 +5209,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); - vcpu->arch.l1tf_flush_l1d = true; + kvm_request_l1tf_flush_l1d(); if (vcpu->scheduled_out && pmu->version && pmu->event_count) { pmu->need_cleanup = true; @@ -8032,7 +8032,7 @@ int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception) { /* kvm_write_guest_virt_system can pull in tons of pages. */ - vcpu->arch.l1tf_flush_l1d = true; + kvm_request_l1tf_flush_l1d(); return kvm_write_guest_virt_helper(addr, val, bytes, vcpu, PFERR_WRITE_MASK, exception); @@ -9440,7 +9440,7 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, return handle_emulation_failure(vcpu, emulation_type); } - vcpu->arch.l1tf_flush_l1d = true; + kvm_request_l1tf_flush_l1d(); if (!(emulation_type & EMULTYPE_NO_DECODE)) { kvm_clear_exception_queue(vcpu); diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 24c754b0db2e..fdab0ad49098 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -420,6 +420,20 @@ static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk) return !(kvm->arch.disabled_quirks & quirk); } +static __always_inline void kvm_request_l1tf_flush_l1d(void) +{ +#if IS_ENABLED(CONFIG_CPU_MITIGATIONS) && IS_ENABLED(CONFIG_KVM_INTEL) + /* + * Use a raw write to set the per-CPU flag, as KVM will ensure a flush + * even if preemption is currently enabled.. If the current vCPU task + * is migrated to a different CPU (or userspace runs the vCPU on a + * different task) before the next VM-Entry, then kvm_arch_vcpu_load() + * will request a flush on the new CPU. + */ + raw_cpu_write(irq_stat.kvm_cpu_l1tf_flush_l1d, 1); +#endif +} + void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip); u64 get_kvmclock_ns(struct kvm *kvm); -- 2.52.0.rc1.455.g30608eb744-goog