TSA mitigation: d8010d4ba43e ("x86/bugs: Add a Transient Scheduler Attacks mitigation") introduced VM_CLEAR_CPU_BUFFERS for guests on AMD CPUs. Currently on Intel CLEAR_CPU_BUFFERS is being used for guests which has a much broader scope (kernel->user also). Make mitigations on Intel consistent with TSA. This would help handling the guest-only mitigations better in future. Signed-off-by: Pawan Gupta --- arch/x86/kernel/cpu/bugs.c | 9 +++++++-- arch/x86/kvm/vmx/vmenter.S | 3 ++- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index d7fa03bf51b4517c12cc68e7c441f7589a4983d1..6d00a9ea7b4f28da291114a7a096b26cc129b57e 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -194,7 +194,7 @@ DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush); /* * Controls CPU Fill buffer clear before VMenter. This is a subset of - * X86_FEATURE_CLEAR_CPU_BUF, and should only be enabled when KVM-only + * X86_FEATURE_CLEAR_CPU_BUF_VM, and should only be enabled when KVM-only * mitigation is required. */ DEFINE_STATIC_KEY_FALSE(cpu_buf_vm_clear); @@ -536,6 +536,7 @@ static void __init mds_apply_mitigation(void) if (mds_mitigation == MDS_MITIGATION_FULL || mds_mitigation == MDS_MITIGATION_VMWERV) { setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); + setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM); if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) && (mds_nosmt || smt_mitigations == SMT_MITIGATIONS_ON)) cpu_smt_disable(false); @@ -647,6 +648,7 @@ static void __init taa_apply_mitigation(void) * present on host, enable the mitigation for UCODE_NEEDED as well. */ setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); + setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM); if (taa_nosmt || smt_mitigations == SMT_MITIGATIONS_ON) cpu_smt_disable(false); @@ -752,6 +754,7 @@ static void __init mmio_apply_mitigation(void) } else { static_branch_enable(&cpu_buf_vm_clear); } + setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM); /* * If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can @@ -839,8 +842,10 @@ static void __init rfds_update_mitigation(void) static void __init rfds_apply_mitigation(void) { - if (rfds_mitigation == RFDS_MITIGATION_VERW) + if (rfds_mitigation == RFDS_MITIGATION_VERW) { setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); + setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM); + } } static __init int rfds_parse_cmdline(char *str) diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S index bc255d709d8a16ae22b5bc401965d209a89a8692..0dd23beae207795484150698d1674dc4044cc520 100644 --- a/arch/x86/kvm/vmx/vmenter.S +++ b/arch/x86/kvm/vmx/vmenter.S @@ -161,7 +161,8 @@ SYM_FUNC_START(__vmx_vcpu_run) mov VCPU_RAX(%_ASM_AX), %_ASM_AX /* Clobbers EFLAGS.ZF */ - CLEAR_CPU_BUFFERS + VM_CLEAR_CPU_BUFFERS +.Lskip_clear_cpu_buffers: /* Check EFLAGS.CF from the VMX_RUN_VMRESUME bit test above. */ jnc .Lvmlaunch -- 2.34.1 cpu_buf_vm_clear static key is only used by the MMIO Stale Data mitigation. Rename it to avoid mixing it up with X86_FEATURE_CLEAR_CPU_BUF_VM. Signed-off-by: Pawan Gupta --- arch/x86/include/asm/nospec-branch.h | 2 +- arch/x86/kernel/cpu/bugs.c | 8 ++++---- arch/x86/kvm/mmu/spte.c | 2 +- arch/x86/kvm/vmx/vmx.c | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 08ed5a2e46a5fd790bcb1b73feb6469518809c06..cb46f5d188de47834466474ec8030bb2a2e4fdf3 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -580,7 +580,7 @@ DECLARE_STATIC_KEY_FALSE(cpu_buf_idle_clear); DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush); -DECLARE_STATIC_KEY_FALSE(cpu_buf_vm_clear); +DECLARE_STATIC_KEY_FALSE(cpu_buf_vm_clear_mmio_only); extern u16 x86_verw_sel; diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 6d00a9ea7b4f28da291114a7a096b26cc129b57e..e7c31c23fbeeb1aba4f538934c1e8a997adff522 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -197,8 +197,8 @@ DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush); * X86_FEATURE_CLEAR_CPU_BUF_VM, and should only be enabled when KVM-only * mitigation is required. */ -DEFINE_STATIC_KEY_FALSE(cpu_buf_vm_clear); -EXPORT_SYMBOL_GPL(cpu_buf_vm_clear); +DEFINE_STATIC_KEY_FALSE(cpu_buf_vm_clear_mmio_only); +EXPORT_SYMBOL_GPL(cpu_buf_vm_clear_mmio_only); #undef pr_fmt #define pr_fmt(fmt) "mitigations: " fmt @@ -750,9 +750,9 @@ static void __init mmio_apply_mitigation(void) */ if (verw_clear_cpu_buf_mitigation_selected) { setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); - static_branch_disable(&cpu_buf_vm_clear); + static_branch_disable(&cpu_buf_vm_clear_mmio_only); } else { - static_branch_enable(&cpu_buf_vm_clear); + static_branch_enable(&cpu_buf_vm_clear_mmio_only); } setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM); diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c index 37647afde7d3acfa1301a771ac44792eab879495..380d6675027499715e49e5b35ef76e17451fd77b 100644 --- a/arch/x86/kvm/mmu/spte.c +++ b/arch/x86/kvm/mmu/spte.c @@ -292,7 +292,7 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, mark_page_dirty_in_slot(vcpu->kvm, slot, gfn); } - if (static_branch_unlikely(&cpu_buf_vm_clear) && + if (static_branch_unlikely(&cpu_buf_vm_clear_mmio_only) && !kvm_vcpu_can_access_host_mmio(vcpu) && kvm_is_mmio_pfn(pfn, &is_host_mmio)) kvm_track_host_mmio_mapping(vcpu); diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index f87c216d976d7d344c924aa4cc18fe1bf8f9b731..451be757b3d1b2fec6b2b79157f26dd43bc368b8 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -903,7 +903,7 @@ unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx) if (!msr_write_intercepted(vmx, MSR_IA32_SPEC_CTRL)) flags |= VMX_RUN_SAVE_SPEC_CTRL; - if (static_branch_unlikely(&cpu_buf_vm_clear) && + if (static_branch_unlikely(&cpu_buf_vm_clear_mmio_only) && kvm_vcpu_can_access_host_mmio(&vmx->vcpu)) flags |= VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO; -- 2.34.1 When a system is only affected by MMIO Stale Data, VERW mitigation is currently handled differently than other data sampling attacks like MDS/TAA/RFDS, that do the VERW in asm. This is because for MMIO Stale Data, VERW is needed only when the guest can access host MMIO, this was tricky to check in asm. Refactoring done by: 83ebe7157483 ("KVM: VMX: Apply MMIO Stale Data mitigation if KVM maps MMIO into the guest") now makes it easier to execute VERW conditionally in asm based on VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO. Unify MMIO Stale Data mitigation with other VERW-based mitigations and only have single VERW callsite in __vmx_vcpu_run(). Remove the now unnecessary call to x86_clear_cpu_buffer() in vmx_vcpu_enter_exit(). This also untangles L1D Flush and MMIO Stale Data mitigation. Earlier, an L1D Flush would skip the VERW for MMIO Stale Data. Now, both the mitigations are independent of each other. Although, this has little practical implication since there are no CPUs that are affected by L1TF and are *only* affected by MMIO Stale Data (i.e. not affected by MDS/TAA/RFDS). But, this makes the code cleaner and easier to maintain. Signed-off-by: Pawan Gupta --- arch/x86/kvm/vmx/run_flags.h | 12 ++++++------ arch/x86/kvm/vmx/vmenter.S | 5 +++++ arch/x86/kvm/vmx/vmx.c | 26 ++++++++++---------------- 3 files changed, 21 insertions(+), 22 deletions(-) diff --git a/arch/x86/kvm/vmx/run_flags.h b/arch/x86/kvm/vmx/run_flags.h index 2f20fb170def8b10c8c0c46f7ba751f845c19e2c..004fe1ca89f05524bf3986540056de2caf0abbad 100644 --- a/arch/x86/kvm/vmx/run_flags.h +++ b/arch/x86/kvm/vmx/run_flags.h @@ -2,12 +2,12 @@ #ifndef __KVM_X86_VMX_RUN_FLAGS_H #define __KVM_X86_VMX_RUN_FLAGS_H -#define VMX_RUN_VMRESUME_SHIFT 0 -#define VMX_RUN_SAVE_SPEC_CTRL_SHIFT 1 -#define VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO_SHIFT 2 +#define VMX_RUN_VMRESUME_SHIFT 0 +#define VMX_RUN_SAVE_SPEC_CTRL_SHIFT 1 +#define VMX_RUN_CLEAR_CPU_BUFFERS_SHIFT 2 -#define VMX_RUN_VMRESUME BIT(VMX_RUN_VMRESUME_SHIFT) -#define VMX_RUN_SAVE_SPEC_CTRL BIT(VMX_RUN_SAVE_SPEC_CTRL_SHIFT) -#define VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO BIT(VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO_SHIFT) +#define VMX_RUN_VMRESUME BIT(VMX_RUN_VMRESUME_SHIFT) +#define VMX_RUN_SAVE_SPEC_CTRL BIT(VMX_RUN_SAVE_SPEC_CTRL_SHIFT) +#define VMX_RUN_CLEAR_CPU_BUFFERS BIT(VMX_RUN_CLEAR_CPU_BUFFERS_SHIFT) #endif /* __KVM_X86_VMX_RUN_FLAGS_H */ diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S index 0dd23beae207795484150698d1674dc4044cc520..ec91f4267eca319ffa8e6079887e8dfecc7f96d8 100644 --- a/arch/x86/kvm/vmx/vmenter.S +++ b/arch/x86/kvm/vmx/vmenter.S @@ -137,6 +137,9 @@ SYM_FUNC_START(__vmx_vcpu_run) /* Load @regs to RAX. */ mov (%_ASM_SP), %_ASM_AX + /* jz .Lskip_clear_cpu_buffers below relies on this */ + test $VMX_RUN_CLEAR_CPU_BUFFERS, %ebx + /* Check if vmlaunch or vmresume is needed */ bt $VMX_RUN_VMRESUME_SHIFT, %ebx @@ -160,6 +163,8 @@ SYM_FUNC_START(__vmx_vcpu_run) /* Load guest RAX. This kills the @regs pointer! */ mov VCPU_RAX(%_ASM_AX), %_ASM_AX + /* Check EFLAGS.ZF from the VMX_RUN_CLEAR_CPU_BUFFERS bit test above */ + jz .Lskip_clear_cpu_buffers /* Clobbers EFLAGS.ZF */ VM_CLEAR_CPU_BUFFERS .Lskip_clear_cpu_buffers: diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 451be757b3d1b2fec6b2b79157f26dd43bc368b8..303935882a9f8d1d8f81a499cdce1fdc8dad62f0 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -903,9 +903,16 @@ unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx) if (!msr_write_intercepted(vmx, MSR_IA32_SPEC_CTRL)) flags |= VMX_RUN_SAVE_SPEC_CTRL; - if (static_branch_unlikely(&cpu_buf_vm_clear_mmio_only) && - kvm_vcpu_can_access_host_mmio(&vmx->vcpu)) - flags |= VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO; + /* + * When affected by MMIO Stale Data only (and not other data sampling + * attacks) only clear for MMIO-capable guests. + */ + if (static_branch_unlikely(&cpu_buf_vm_clear_mmio_only)) { + if (kvm_vcpu_can_access_host_mmio(&vmx->vcpu)) + flags |= VMX_RUN_CLEAR_CPU_BUFFERS; + } else { + flags |= VMX_RUN_CLEAR_CPU_BUFFERS; + } return flags; } @@ -7320,21 +7327,8 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu, guest_state_enter_irqoff(); - /* - * L1D Flush includes CPU buffer clear to mitigate MDS, but VERW - * mitigation for MDS is done late in VMentry and is still - * executed in spite of L1D Flush. This is because an extra VERW - * should not matter much after the big hammer L1D Flush. - * - * cpu_buf_vm_clear is used when system is not vulnerable to MDS/TAA, - * and is affected by MMIO Stale Data. In such cases mitigation in only - * needed against an MMIO capable guest. - */ if (static_branch_unlikely(&vmx_l1d_should_flush)) vmx_l1d_flush(vcpu); - else if (static_branch_unlikely(&cpu_buf_vm_clear) && - (flags & VMX_RUN_CLEAR_CPU_BUFFERS_FOR_MMIO)) - x86_clear_cpu_buffers(); vmx_disable_fb_clear(vmx); -- 2.34.1