Adding more mitigation options at exit-to-userspace for VMSCAPE would usually require a series of checks to decide which mitigation to use. In this case, the mitigation is done by calling a function, which is decided at boot. So, adding more feature flags and multiple checks can be avoided by using static_call() to the mitigating function. Replace the flag-based mitigation selector with a static_call(). This also frees the existing X86_FEATURE_IBPB_EXIT_TO_USER. Suggested-by: Dave Hansen Tested-by: Jon Kohler Signed-off-by: Pawan Gupta --- arch/x86/Kconfig | 1 + arch/x86/include/asm/cpufeatures.h | 2 +- arch/x86/include/asm/entry-common.h | 7 +++---- arch/x86/include/asm/nospec-branch.h | 3 +++ arch/x86/kernel/cpu/bugs.c | 9 ++++++++- arch/x86/kvm/x86.c | 2 +- 6 files changed, 17 insertions(+), 7 deletions(-) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index e2df1b147184..5b8def9ddb98 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -2720,6 +2720,7 @@ config MITIGATION_TSA config MITIGATION_VMSCAPE bool "Mitigate VMSCAPE" depends on KVM + depends on HAVE_STATIC_CALL default y help Enable mitigation for VMSCAPE attacks. VMSCAPE is a hardware security diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index dbe104df339b..b4d529dd6d30 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -503,7 +503,7 @@ #define X86_FEATURE_TSA_SQ_NO (21*32+11) /* AMD CPU not vulnerable to TSA-SQ */ #define X86_FEATURE_TSA_L1_NO (21*32+12) /* AMD CPU not vulnerable to TSA-L1 */ #define X86_FEATURE_CLEAR_CPU_BUF_VM (21*32+13) /* Clear CPU buffers using VERW before VMRUN */ -#define X86_FEATURE_IBPB_EXIT_TO_USER (21*32+14) /* Use IBPB on exit-to-userspace, see VMSCAPE bug */ +/* Free */ #define X86_FEATURE_ABMC (21*32+15) /* Assignable Bandwidth Monitoring Counters */ #define X86_FEATURE_MSR_IMM (21*32+16) /* MSR immediate form instructions */ #define X86_FEATURE_SGX_EUPDATESVN (21*32+17) /* Support for ENCLS[EUPDATESVN] instruction */ diff --git a/arch/x86/include/asm/entry-common.h b/arch/x86/include/asm/entry-common.h index 78b143673ca7..783e7cb50cae 100644 --- a/arch/x86/include/asm/entry-common.h +++ b/arch/x86/include/asm/entry-common.h @@ -4,6 +4,7 @@ #include #include +#include #include #include @@ -94,10 +95,8 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, */ choose_random_kstack_offset(rdtsc()); - /* Avoid unnecessary reads of 'x86_predictor_flush_exit_to_user' */ - if (cpu_feature_enabled(X86_FEATURE_IBPB_EXIT_TO_USER) && - this_cpu_read(x86_predictor_flush_exit_to_user)) { - write_ibpb(); + if (unlikely(this_cpu_read(x86_predictor_flush_exit_to_user))) { + static_call_cond(vmscape_predictor_flush)(); this_cpu_write(x86_predictor_flush_exit_to_user, false); } } diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 0381db59c39d..066fd8095200 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -542,6 +542,9 @@ static inline void indirect_branch_prediction_barrier(void) :: "rax", "rcx", "rdx", "memory"); } +#include +DECLARE_STATIC_CALL(vmscape_predictor_flush, write_ibpb); + /* The Intel SPEC CTRL MSR base value cache */ extern u64 x86_spec_ctrl_base; DECLARE_PER_CPU(u64, x86_spec_ctrl_current); diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 636280c612f0..bfc0e41697f6 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -144,6 +144,13 @@ EXPORT_SYMBOL_GPL(cpu_buf_idle_clear); */ DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush); +/* + * Controls how vmscape is mitigated e.g. via IBPB or BHB-clear + * sequence. This defaults to no mitigation. + */ +DEFINE_STATIC_CALL_NULL(vmscape_predictor_flush, write_ibpb); +EXPORT_STATIC_CALL_FOR_KVM(vmscape_predictor_flush); + #undef pr_fmt #define pr_fmt(fmt) "mitigations: " fmt @@ -3133,7 +3140,7 @@ static void __init vmscape_update_mitigation(void) static void __init vmscape_apply_mitigation(void) { if (vmscape_mitigation == VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER) - setup_force_cpu_cap(X86_FEATURE_IBPB_EXIT_TO_USER); + static_call_update(vmscape_predictor_flush, write_ibpb); } #undef pr_fmt diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 45d7cfedc507..5582056b2fa1 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -11463,7 +11463,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) * set for the CPU that actually ran the guest, and not the CPU that it * may migrate to. */ - if (cpu_feature_enabled(X86_FEATURE_IBPB_EXIT_TO_USER)) + if (static_call_query(vmscape_predictor_flush)) this_cpu_write(x86_predictor_flush_exit_to_user, true); /* -- 2.34.1