From: Santosh Shukla IBS virtualization (VIBS) allows a guest to collect Instruction-Based Sampling (IBS) data using hardware-assisted virtualization. With VIBS enabled, the hardware automatically saves and restores guest IBS state during VM-Entry and VM-Exit via the VMCB State Save Area. IBS-generated interrupts are delivered directly to the guest without causing a VMEXIT. VIBS depends on mediated PMU mode and requires either AVIC or NMI virtualization for interrupt delivery. However, since AVIC can be dynamically inhibited, VIBS requires VNMI to be enabled to ensure reliable interrupt delivery. If AVIC is inhibited and VNMI is disabled, the guest can encounter a VMEXIT_INVALID when IBS virtualization is enabled for the guest. Because IBS state is classified as swap type C, the hypervisor must save its own IBS state before VMRUN and restore it after VMEXIT. It must also disable IBS before VMRUN and re-enable it afterward. This will be handled using mediated PMU support in subsequent patches by enabling mediated PMU capability for IBS PMUs. More details about IBS virtualization can be found at [1]. [1]: https://bugzilla.kernel.org/attachment.cgi?id=306250 AMD64 Architecture Programmer’s Manual, Vol 2, Section 15.38 Instruction-Based Sampling Virtualization. Signed-off-by: Santosh Shukla Co-developed-by: Manali Shukla Signed-off-by: Manali Shukla --- arch/x86/include/asm/svm.h | 2 + arch/x86/kvm/svm/svm.c | 94 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 96 insertions(+) diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h index 269a8327ab2a..9416a20bf4d3 100644 --- a/arch/x86/include/asm/svm.h +++ b/arch/x86/include/asm/svm.h @@ -222,6 +222,8 @@ struct __attribute__ ((__packed__)) vmcb_control_area { #define LBR_CTL_ENABLE_MASK BIT_ULL(0) #define VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK BIT_ULL(1) +#define VIRTUAL_IBS_ENABLE_MASK BIT_ULL(2) + #define SVM_INTERRUPT_SHADOW_MASK BIT_ULL(0) #define SVM_GUEST_INTERRUPT_MASK BIT_ULL(1) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 0471d72a7382..0be24cf03675 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -155,6 +155,10 @@ module_param(vgif, int, 0444); int lbrv = true; module_param(lbrv, int, 0444); +/* enable/disable IBS virtualization */ +static int vibs = true; +module_param(vibs, int, 0444); + static int tsc_scaling = true; module_param(tsc_scaling, int, 0444); @@ -977,6 +981,20 @@ void disable_nmi_singlestep(struct vcpu_svm *svm) } } +static void svm_ibs_msr_interception(struct vcpu_svm *svm, bool intercept) +{ + svm_set_intercept_for_msr(&svm->vcpu, MSR_AMD64_IBSFETCHCTL, MSR_TYPE_RW, intercept); + svm_set_intercept_for_msr(&svm->vcpu, MSR_AMD64_IBSFETCHLINAD, MSR_TYPE_RW, intercept); + svm_set_intercept_for_msr(&svm->vcpu, MSR_AMD64_IBSOPCTL, MSR_TYPE_RW, intercept); + svm_set_intercept_for_msr(&svm->vcpu, MSR_AMD64_IBSOPRIP, MSR_TYPE_RW, intercept); + svm_set_intercept_for_msr(&svm->vcpu, MSR_AMD64_IBSOPDATA, MSR_TYPE_RW, intercept); + svm_set_intercept_for_msr(&svm->vcpu, MSR_AMD64_IBSOPDATA2, MSR_TYPE_RW, intercept); + svm_set_intercept_for_msr(&svm->vcpu, MSR_AMD64_IBSOPDATA3, MSR_TYPE_RW, intercept); + svm_set_intercept_for_msr(&svm->vcpu, MSR_AMD64_IBSDCLINAD, MSR_TYPE_RW, intercept); + svm_set_intercept_for_msr(&svm->vcpu, MSR_AMD64_IBSBRTARGET, MSR_TYPE_RW, intercept); + svm_set_intercept_for_msr(&svm->vcpu, MSR_AMD64_ICIBSEXTDCTL, MSR_TYPE_RW, intercept); +} + static void grow_ple_window(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); @@ -1118,6 +1136,20 @@ static void svm_recalc_instruction_intercepts(struct kvm_vcpu *vcpu) svm_clr_intercept(svm, INTERCEPT_VMSAVE); svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK; } + + /* + * If hardware supports VIBS then no need to intercept IBS MSRs + * when VIBS is enabled in guest. + * + * Enable VIBS by setting bit 2 at offset 0xb8 in VMCB. + */ + if (vibs) { + if (guest_cpu_cap_has(&svm->vcpu, X86_FEATURE_IBS) && + kvm_vcpu_has_mediated_pmu(vcpu)) { + svm_ibs_msr_interception(svm, false); + svm->vmcb->control.virt_ext |= VIRTUAL_IBS_ENABLE_MASK; + } + } } if (kvm_need_rdpmc_intercept(vcpu)) @@ -2894,6 +2926,27 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_AMD64_DE_CFG: msr_info->data = svm->msr_decfg; break; + + case MSR_AMD64_IBSCTL: + if (guest_cpu_cap_has(vcpu, X86_FEATURE_IBS)) + msr_info->data = IBSCTL_LVT_OFFSET_VALID; + else + msr_info->data = 0; + break; + + + /* + * When IBS virtualization is enabled, guest reads from + * MSR_AMD64_IBSFETCHPHYSAD and MSR_AMD64_IBSDCPHYSAD must return 0. + * This is done for security reasons, as guests should not be allowed to + * access or infer any information about the system's physical + * addresses. + */ + case MSR_AMD64_IBSDCPHYSAD: + case MSR_AMD64_IBSFETCHPHYSAD: + msr_info->data = 0; + break; + default: return kvm_get_msr_common(vcpu, msr_info); } @@ -3138,6 +3191,16 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) svm->msr_decfg = data; break; } + /* + * When IBS virtualization is enabled, guest writes to + * MSR_AMD64_IBSFETCHPHYSAD and MSR_AMD64_IBSDCPHYSAD must be ignored. + * This is done for security reasons, as guests should not be allowed to + * access or infer any information about the system's physical + * addresses. + */ + case MSR_AMD64_IBSDCPHYSAD: + case MSR_AMD64_IBSFETCHPHYSAD: + return 1; default: return kvm_set_msr_common(vcpu, msr); } @@ -5284,6 +5347,28 @@ static __init void svm_adjust_mmio_mask(void) kvm_mmu_set_mmio_spte_mask(mask, mask, PT_WRITABLE_MASK | PT_USER_MASK); } +static void svm_ibs_set_cpu_caps(void) +{ + kvm_cpu_cap_check_and_set(X86_FEATURE_IBS); + kvm_cpu_cap_check_and_set(X86_FEATURE_EXTLVT); + kvm_cpu_cap_check_and_set(X86_FEATURE_EXTAPIC); + if (kvm_cpu_cap_has(X86_FEATURE_IBS)) { + kvm_cpu_cap_check_and_set(X86_FEATURE_IBS_AVAIL); + kvm_cpu_cap_check_and_set(X86_FEATURE_IBS_FETCHSAM); + kvm_cpu_cap_check_and_set(X86_FEATURE_IBS_OPSAM); + kvm_cpu_cap_check_and_set(X86_FEATURE_IBS_RDWROPCNT); + kvm_cpu_cap_check_and_set(X86_FEATURE_IBS_OPCNT); + kvm_cpu_cap_check_and_set(X86_FEATURE_IBS_BRNTRGT); + kvm_cpu_cap_check_and_set(X86_FEATURE_IBS_OPCNTEXT); + kvm_cpu_cap_check_and_set(X86_FEATURE_IBS_RIPINVALIDCHK); + kvm_cpu_cap_check_and_set(X86_FEATURE_IBS_OPBRNFUSE); + kvm_cpu_cap_check_and_set(X86_FEATURE_IBS_FETCHCTLEXTD); + kvm_cpu_cap_check_and_set(X86_FEATURE_IBS_ZEN4_EXT); + kvm_cpu_cap_check_and_set(X86_FEATURE_IBS_LOADLATFIL); + kvm_cpu_cap_check_and_set(X86_FEATURE_IBS_DTLBSTAT); + } +} + static __init void svm_set_cpu_caps(void) { kvm_set_cpu_caps(); @@ -5336,6 +5421,9 @@ static __init void svm_set_cpu_caps(void) if (cpu_feature_enabled(X86_FEATURE_BUS_LOCK_THRESHOLD)) kvm_caps.has_bus_lock_exit = true; + if (vibs) + svm_ibs_set_cpu_caps(); + /* CPUID 0x80000008 */ if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) || boot_cpu_has(X86_FEATURE_AMD_SSBD)) @@ -5509,6 +5597,12 @@ static __init int svm_hardware_setup(void) svm_x86_ops.set_vnmi_pending = NULL; } + vibs = enable_mediated_pmu && vnmi && vibs + && boot_cpu_has(X86_FEATURE_VIBS); + + if (vibs) + pr_info("IBS virtualization supported\n"); + if (!enable_pmu) pr_info("PMU virtualization is disabled\n"); -- 2.43.0