Consider the following scenario, While scheduling out an IBS event from perf's core scheduling path, event_sched_out() disables the IBS event by clearing the IBS enable bit in perf_ibs_disable_event(). However, if a delayed IBS NMI is delivered after the IBS enable bit is cleared, the IBS NMI handler may still observe the valid bit set and incorrectly treat the sample as valid. As a result, it re-enables IBS by setting the enable bit, even though the event has already been scheduled out. This leads to a situation where IBS is re-enabled after being explicitly disabled, which is incorrect. Although this race does not have visible side effects, it violates the expected behavior of the perf subsystem. The race is particularly noticeable when userspace repeatedly disables and re-enables IBS using PERF_EVENT_IOC_DISABLE and PERF_EVENT_IOC_ENABLE ioctls in a loop. Fix this by checking the IBS_STOPPING bit in the IBS NMI handler before re-enabling the IBS event. If the IBS_STOPPING bit is set, it indicates that the event is either disabled or in the process of being disabled, and the NMI handler should not re-enable it. Signed-off-by: Manali Shukla --- arch/x86/events/amd/ibs.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c index eeb607b84dda..09b56bab510a 100644 --- a/arch/x86/events/amd/ibs.c +++ b/arch/x86/events/amd/ibs.c @@ -1582,7 +1582,8 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs) } new_config |= period >> 4; - perf_ibs_enable_event(perf_ibs, hwc, new_config); + if (!test_bit(IBS_STOPPING, pcpu->state)) + perf_ibs_enable_event(perf_ibs, hwc, new_config); } perf_event_update_userpage(event); -- 2.43.0 From: Santosh Shukla The virtualized IBS (VIBS) feature allows the guest to collect IBS samples without exiting the guest. Presence of the VIBS feature is indicated via CPUID function 0x8000000A_EDX[26]. Signed-off-by: Santosh Shukla Signed-off-by: Manali Shukla --- arch/x86/include/asm/cpufeatures.h | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index b1631eb15e74..a1cd6437a052 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -382,6 +382,7 @@ #define X86_FEATURE_X2AVIC (15*32+18) /* "x2avic" Virtual x2apic */ #define X86_FEATURE_V_SPEC_CTRL (15*32+20) /* "v_spec_ctrl" Virtual SPEC_CTRL */ #define X86_FEATURE_VNMI (15*32+25) /* "vnmi" Virtual NMI */ +#define X86_FEATURE_VIBS (15*32+26) /* Virtual IBS */ #define X86_FEATURE_AVIC_EXTLVT (15*32+27) /* Extended LVT AVIC acceleration support */ #define X86_FEATURE_SVME_ADDR_CHK (15*32+28) /* SVME addr check */ #define X86_FEATURE_BUS_LOCK_THRESHOLD (15*32+29) /* Bus lock threshold */ -- 2.43.0 Add a KVM-only leaf for AMD's Instruction Based Sampling capabilities. Multiple IBS related capabilities are added to KVM-only leaf, so that KVM can set these capabilities for the guest, when IBS feature bit is enabled on the guest. Signed-off-by: Manali Shukla --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/reverse_cpuid.h | 16 ++++++++++++++++ 2 files changed, 17 insertions(+) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 32dd2d55e6f0..01abdf7f112b 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -782,6 +782,7 @@ enum kvm_only_cpuid_leafs { CPUID_12_EAX = NCAPINTS, CPUID_7_1_EDX, CPUID_8000_0007_EDX, + CPUID_8000_001B_EAX, CPUID_8000_0022_EAX, CPUID_7_2_EDX, CPUID_24_0_EBX, diff --git a/arch/x86/kvm/reverse_cpuid.h b/arch/x86/kvm/reverse_cpuid.h index 657f5f743ed9..22cfdb331e9e 100644 --- a/arch/x86/kvm/reverse_cpuid.h +++ b/arch/x86/kvm/reverse_cpuid.h @@ -76,6 +76,21 @@ #define KVM_X86_FEATURE_TSA_SQ_NO KVM_X86_FEATURE(CPUID_8000_0021_ECX, 1) #define KVM_X86_FEATURE_TSA_L1_NO KVM_X86_FEATURE(CPUID_8000_0021_ECX, 2) +/* AMD defined Instruction-base Sampling capabilities. CPUID level 0x8000001B (EAX). */ +#define X86_FEATURE_IBS_AVAIL KVM_X86_FEATURE(CPUID_8000_001B_EAX, 0) +#define X86_FEATURE_IBS_FETCHSAM KVM_X86_FEATURE(CPUID_8000_001B_EAX, 1) +#define X86_FEATURE_IBS_OPSAM KVM_X86_FEATURE(CPUID_8000_001B_EAX, 2) +#define X86_FEATURE_IBS_RDWROPCNT KVM_X86_FEATURE(CPUID_8000_001B_EAX, 3) +#define X86_FEATURE_IBS_OPCNT KVM_X86_FEATURE(CPUID_8000_001B_EAX, 4) +#define X86_FEATURE_IBS_BRNTRGT KVM_X86_FEATURE(CPUID_8000_001B_EAX, 5) +#define X86_FEATURE_IBS_OPCNTEXT KVM_X86_FEATURE(CPUID_8000_001B_EAX, 6) +#define X86_FEATURE_IBS_RIPINVALIDCHK KVM_X86_FEATURE(CPUID_8000_001B_EAX, 7) +#define X86_FEATURE_IBS_OPBRNFUSE KVM_X86_FEATURE(CPUID_8000_001B_EAX, 8) +#define X86_FEATURE_IBS_FETCHCTLEXTD KVM_X86_FEATURE(CPUID_8000_001B_EAX, 9) +#define X86_FEATURE_IBS_ZEN4_EXT KVM_X86_FEATURE(CPUID_8000_001B_EAX, 11) +#define X86_FEATURE_IBS_LOADLATFIL KVM_X86_FEATURE(CPUID_8000_001B_EAX, 12) +#define X86_FEATURE_IBS_ZEN4_DTLBSTAT KVM_X86_FEATURE(CPUID_8000_001B_EAX, 19) + struct cpuid_reg { u32 function; u32 index; @@ -105,6 +120,7 @@ static const struct cpuid_reg reverse_cpuid[] = { [CPUID_8000_0022_EAX] = {0x80000022, 0, CPUID_EAX}, [CPUID_7_2_EDX] = { 7, 2, CPUID_EDX}, [CPUID_24_0_EBX] = { 0x24, 0, CPUID_EBX}, + [CPUID_8000_001B_EAX] = {0x8000001b, 0, CPUID_EAX}, [CPUID_8000_0021_ECX] = {0x80000021, 0, CPUID_ECX}, [CPUID_7_1_ECX] = { 7, 1, CPUID_ECX}, [CPUID_1E_1_EAX] = { 0x1e, 1, CPUID_EAX}, -- 2.43.0 CPUID leaf 0x8000001b (EAX) provides information about Instruction-Based sampling capabilities on AMD Platforms. Add the new leaf to kvm_cpu_cap_init() using F() macros, which automatically gate each capability bits against raw hardware CPUID via raw_cpuid_get(). This allows vendor code to simply clear entire leaf when vibs is not enabled, rather than reading hardware CPUID and calling kvm_cpu_cap_set() for each capability bits inidividually in later patches. Signed-off-by: Manali Shukla --- arch/x86/kvm/cpuid.c | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 96a08a556543..4e626e77e6a6 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -1226,6 +1226,22 @@ void kvm_initialize_cpu_caps(void) VENDOR_F(SVME_ADDR_CHK), ); + kvm_cpu_cap_init(CPUID_8000_001B_EAX, + F(IBS_AVAIL), + F(IBS_FETCHSAM), + F(IBS_OPSAM), + F(IBS_RDWROPCNT), + F(IBS_OPCNT), + F(IBS_BRNTRGT), + F(IBS_OPCNTEXT), + F(IBS_RIPINVALIDCHK), + F(IBS_OPBRNFUSE), + F(IBS_FETCHCTLEXTD), + F(IBS_ZEN4_EXT), + F(IBS_LOADLATFIL), + F(IBS_ZEN4_DTLBSTAT), + ); + kvm_cpu_cap_init(CPUID_8000_001F_EAX, VENDOR_F(SME), VENDOR_F(SEV), @@ -1848,6 +1864,15 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) entry->eax = entry->ebx = entry->ecx = 0; entry->edx = 0; /* reserved */ break; + /* AMD IBS capability */ + case 0x8000001B: + if (!kvm_cpu_cap_has(X86_FEATURE_IBS)) + entry->eax = 0; + else + cpuid_entry_override(entry, CPUID_8000_001B_EAX); + + entry->ebx = entry->ecx = entry->edx = 0; + break; case 0x8000001F: if (!kvm_cpu_cap_has(X86_FEATURE_SEV)) { entry->eax = entry->ebx = entry->ecx = entry->edx = 0; -- 2.43.0 From: Santosh Shukla Define the new VMCB fields that will be used to save and restore the state of the following fetch and op IBS related MSRs. * MSRC001_1030 [IBS Fetch Control] * MSRC001_1031 [IBS Fetch Linear Address] * MSRC001_1033 [IBS Execution Control] * MSRC001_1034 [IBS Op Logical Address] * MSRC001_1035 [IBS Op Data] * MSRC001_1036 [IBS Op Data 2] * MSRC001_1037 [IBS Op Data 3] * MSRC001_1038 [IBS DC Linear Address] * MSRC001_103B [IBS Branch Target Address] * MSRC001_103C [IBS Fetch Control Extended] Signed-off-by: Santosh Shukla Signed-off-by: Manali Shukla --- arch/x86/include/asm/svm.h | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h index bcfeb5e7c0ed..4296efc1dafe 100644 --- a/arch/x86/include/asm/svm.h +++ b/arch/x86/include/asm/svm.h @@ -369,6 +369,17 @@ struct vmcb_save_area { u64 last_excp_to; u8 reserved_0x298[72]; u64 spec_ctrl; /* Guest version of SPEC_CTRL at 0x2E0 */ + u8 reserved_0x2e8[1168]; + u64 ibs_fetch_ctl; + u64 ibs_fetch_linear_addr; + u64 ibs_op_ctl; + u64 ibs_op_rip; + u64 ibs_op_data; + u64 ibs_op_data2; + u64 ibs_op_data3; + u64 ibs_dc_linear_addr; + u64 ibs_br_target; + u64 ibs_fetch_extd_ctl; } __packed; /* Save area definition for SEV-ES and SEV-SNP guests */ @@ -551,7 +562,7 @@ struct vmcb { }; } __packed; -#define EXPECTED_VMCB_SAVE_AREA_SIZE 744 +#define EXPECTED_VMCB_SAVE_AREA_SIZE 1992 #define EXPECTED_GHCB_SAVE_AREA_SIZE 1032 #define EXPECTED_SEV_ES_SAVE_AREA_SIZE 1648 #define EXPECTED_VMCB_CONTROL_AREA_SIZE 1024 @@ -577,6 +588,7 @@ static inline void __unused_size_checks(void) BUILD_BUG_RESERVED_OFFSET(vmcb_save_area, 0x180); BUILD_BUG_RESERVED_OFFSET(vmcb_save_area, 0x248); BUILD_BUG_RESERVED_OFFSET(vmcb_save_area, 0x298); + BUILD_BUG_RESERVED_OFFSET(vmcb_save_area, 0x2e8); BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0xc8); BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0xcc); -- 2.43.0 From: Santosh Shukla IBS virtualization (VIBS) allows a guest to collect Instruction-Based Sampling (IBS) data using hardware-assisted virtualization. With VIBS enabled, the hardware automatically saves and restores guest IBS state during VM-Entry and VM-Exit via the VMCB State Save Area. IBS-generated interrupts are delivered directly to the guest without causing a VMEXIT. VIBS depends on mediated PMU mode and requires either AVIC or NMI virtualization for interrupt delivery. However, since AVIC can be dynamically inhibited, VIBS requires VNMI to be enabled to ensure reliable interrupt delivery. If AVIC is inhibited and VNMI is disabled, the guest can encounter a VMEXIT_INVALID when IBS virtualization is enabled for the guest. Because IBS state is classified as swap type C, the hypervisor must save its own IBS state before VMRUN and restore it after VMEXIT. It must also disable IBS before VMRUN and re-enable it afterward. This will be handled using mediated PMU support in subsequent patches by enabling mediated PMU capability for IBS PMUs. More details about IBS virtualization can be found at [1]. [1]: https://bugzilla.kernel.org/attachment.cgi?id=306250 AMD64 Architecture Programmer’s Manual, Vol 2, Section 15.38 Instruction-Based Sampling Virtualization. Signed-off-by: Santosh Shukla Co-developed-by: Manali Shukla Signed-off-by: Manali Shukla --- arch/x86/include/asm/svm.h | 2 ++ arch/x86/kvm/svm/svm.c | 73 +++++++++++++++++++++++++++++++++++++- 2 files changed, 74 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h index 4296efc1dafe..17aa6bf76bce 100644 --- a/arch/x86/include/asm/svm.h +++ b/arch/x86/include/asm/svm.h @@ -226,6 +226,8 @@ struct __attribute__ ((__packed__)) vmcb_control_area { #define SVM_INT_VECTOR_MASK GENMASK(7, 0) +#define SVM_MISC_ENABLE_V_IBS BIT_ULL(2) + #define SVM_INTERRUPT_SHADOW_MASK BIT_ULL(0) #define SVM_GUEST_INTERRUPT_MASK BIT_ULL(1) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 5af3479cd264..421a929398da 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -161,12 +161,15 @@ module_param(lbrv, int, 0444); static int __ro_after_init tsc_scaling = true; module_param(tsc_scaling, int, 0444); +/* enable/disable IBS virtualization */ +static bool __ro_after_init vibs = true; +module_param(vibs, bool, 0444); + module_param(enable_device_posted_irqs, bool, 0444); bool __read_mostly dump_invalid_vmcb; module_param(dump_invalid_vmcb, bool, 0644); - bool __ro_after_init intercept_smi = true; module_param(intercept_smi, bool, 0444); @@ -779,6 +782,26 @@ static void svm_recalc_pmu_msr_intercepts(struct kvm_vcpu *vcpu) MSR_TYPE_RW, intercept); } +static void svm_recalc_ibs_msr_intercepts(struct kvm_vcpu *vcpu) +{ + bool intercept = !(guest_cpu_cap_has(vcpu, X86_FEATURE_IBS) && + kvm_vcpu_has_mediated_pmu(vcpu)); + + if (!enable_mediated_pmu || !vibs) + return; + + svm_set_intercept_for_msr(vcpu, MSR_AMD64_IBSFETCHCTL, MSR_TYPE_RW, intercept); + svm_set_intercept_for_msr(vcpu, MSR_AMD64_IBSFETCHLINAD, MSR_TYPE_RW, intercept); + svm_set_intercept_for_msr(vcpu, MSR_AMD64_IBSOPCTL, MSR_TYPE_RW, intercept); + svm_set_intercept_for_msr(vcpu, MSR_AMD64_IBSOPRIP, MSR_TYPE_RW, intercept); + svm_set_intercept_for_msr(vcpu, MSR_AMD64_IBSOPDATA, MSR_TYPE_RW, intercept); + svm_set_intercept_for_msr(vcpu, MSR_AMD64_IBSOPDATA2, MSR_TYPE_RW, intercept); + svm_set_intercept_for_msr(vcpu, MSR_AMD64_IBSOPDATA3, MSR_TYPE_RW, intercept); + svm_set_intercept_for_msr(vcpu, MSR_AMD64_IBSDCLINAD, MSR_TYPE_RW, intercept); + svm_set_intercept_for_msr(vcpu, MSR_AMD64_IBSBRTARGET, MSR_TYPE_RW, intercept); + svm_set_intercept_for_msr(vcpu, MSR_AMD64_ICIBSEXTDCTL, MSR_TYPE_RW, intercept); +} + static void svm_recalc_msr_intercepts(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); @@ -848,6 +871,7 @@ static void svm_recalc_msr_intercepts(struct kvm_vcpu *vcpu) sev_es_recalc_msr_intercepts(vcpu); svm_recalc_pmu_msr_intercepts(vcpu); + svm_recalc_ibs_msr_intercepts(vcpu); /* * x2APIC intercepts are modified on-demand and cannot be filtered by @@ -2880,6 +2904,27 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_AMD64_DE_CFG: msr_info->data = svm->msr_decfg; break; + + case MSR_AMD64_IBSCTL: + if (guest_cpu_cap_has(vcpu, X86_FEATURE_IBS)) + msr_info->data = IBSCTL_LVT_OFFSET_VALID; + else + msr_info->data = 0; + break; + + + /* + * When IBS virtualization is enabled, guest reads from + * MSR_AMD64_IBSFETCHPHYSAD and MSR_AMD64_IBSDCPHYSAD must return 0. + * This is done for security reasons, as guests should not be allowed to + * access or infer any information about the system's physical + * addresses. + */ + case MSR_AMD64_IBSDCPHYSAD: + case MSR_AMD64_IBSFETCHPHYSAD: + msr_info->data = 0; + break; + default: return kvm_get_msr_common(vcpu, msr_info); } @@ -3171,6 +3216,16 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) svm->msr_decfg = data; break; } + /* + * When IBS virtualization is enabled, guest writes to + * MSR_AMD64_IBSFETCHPHYSAD and MSR_AMD64_IBSDCPHYSAD must be ignored. + * This is done for security reasons, as guests should not be allowed to + * access or infer any information about the system's physical + * addresses. + */ + case MSR_AMD64_IBSDCPHYSAD: + case MSR_AMD64_IBSFETCHPHYSAD: + return 1; default: return kvm_set_msr_common(vcpu, msr); } @@ -4678,6 +4733,11 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) if (guest_cpuid_is_intel_compatible(vcpu)) guest_cpu_cap_clear(vcpu, X86_FEATURE_V_VMSAVE_VMLOAD); + if (guest_cpu_cap_has(vcpu, X86_FEATURE_IBS)) + svm->vmcb->control.misc_ctl2 |= SVM_MISC_ENABLE_V_IBS; + else + svm->vmcb->control.misc_ctl2 &= ~SVM_MISC_ENABLE_V_IBS; + if (sev_guest(vcpu->kvm)) sev_vcpu_after_set_cpuid(svm); } @@ -5510,6 +5570,11 @@ static __init void svm_set_cpu_caps(void) if (cpu_feature_enabled(X86_FEATURE_EXTAPIC)) kvm_caps.has_extapic = true; + if (vibs) + kvm_cpu_cap_check_and_set(X86_FEATURE_IBS); + else + kvm_cpu_caps[CPUID_8000_001B_EAX] = 0; + /* CPUID 0x80000008 */ if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) || boot_cpu_has(X86_FEATURE_AMD_SSBD)) @@ -5698,6 +5763,12 @@ static __init int svm_hardware_setup(void) svm_x86_ops.set_vnmi_pending = NULL; } + vibs = enable_mediated_pmu && vnmi && vibs + && boot_cpu_has(X86_FEATURE_VIBS); + + if (vibs) + pr_info("IBS virtualization supported\n"); + if (!enable_pmu) pr_info("PMU virtualization is disabled\n"); -- 2.43.0 IBS MSRs are classified as Swap Type C, which requires the hypervisor to save and restore its own IBS state before VMENTRY and after VMEXIT. To support this, set the ibs_op and ibs_fetch PMUs with the PERF_PMU_CAP_MEDIATED_VPMU capability. This ensures that these PMUs are exclusively owned by the guest while it is running, allowing the hypervisor to manage IBS state transitions correctly. Signed-off-by: Manali Shukla --- arch/x86/events/amd/ibs.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c index 09b56bab510a..034a992bbfe3 100644 --- a/arch/x86/events/amd/ibs.c +++ b/arch/x86/events/amd/ibs.c @@ -971,6 +971,7 @@ static struct perf_ibs perf_ibs_fetch = { .stop = perf_ibs_stop, .read = perf_ibs_read, .check_period = perf_ibs_check_period, + .capabilities = PERF_PMU_CAP_MEDIATED_VPMU, }, .msr = MSR_AMD64_IBSFETCHCTL, .msr2 = MSR_AMD64_IBSFETCHCTL2, @@ -997,6 +998,7 @@ static struct perf_ibs perf_ibs_op = { .stop = perf_ibs_stop, .read = perf_ibs_read, .check_period = perf_ibs_check_period, + .capabilities = PERF_PMU_CAP_MEDIATED_VPMU, }, .msr = MSR_AMD64_IBSOPCTL, .msr2 = MSR_AMD64_IBSOPCTL2, -- 2.43.0 Currently IBS driver doesn't allow the creation of IBS event with exclude_guest set. As a result, amd_ibs_init() returns -EINVAL if IBS event is created with exclude_guest set. With the introduction of mediated PMU support, software-based handling of exclude_guest is permitted for PMUs that have the PERF_PMU_CAP_MEDIATED_VPMU capability. Since ibs_op and ibs_fetch pmus has PERF_PMU_CAP_MEDIATED_VPMU capability set, update perf_ibs_init() to remove exclude_guest check. Signed-off-by: Manali Shukla --- arch/x86/events/amd/ibs.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c index 034a992bbfe3..7da06c143b32 100644 --- a/arch/x86/events/amd/ibs.c +++ b/arch/x86/events/amd/ibs.c @@ -327,8 +327,7 @@ static int perf_ibs_init(struct perf_event *event) return -EOPNOTSUPP; /* handle exclude_{user,kernel} in the IRQ handler */ - if (event->attr.exclude_host || event->attr.exclude_guest || - event->attr.exclude_idle) + if (event->attr.exclude_host || event->attr.exclude_idle) return -EINVAL; ret = validate_group(event); -- 2.43.0 IBS on upcoming microarch introduced two new control MSRs and a couple of new features. Define macros for them. Add these newly added IBS capabilities to KVM-only leaf 0x8000001b, so that when IBS feature bit is enabled on the guest, these newly added features can be used by guests if the hardware and guest os supports it. - X86_FEATURE_IBS_DISABLE: Independent IBS disable capability to avoid RMW race - X86_FEATURE_IBS_FETCHLATFIL: Fetch Latency filtering - X86_FEATURE_IBS_ADDRFILTER: Address Bit 63 based filtering - X86_FEATURE_IBS_STRMST_RMTSOCKET: Streaming store filter and indicator. Remote socket indicator. - X86_FEATURE_IBS_BUFFER1: IBS buffering v1 - X86_FEATURE_IBS_MEMPROFILER: IBS memory profiler Extend VMCB save area to include to the newly added MSRs: MSR_AMD64_IBSFETCHCTL2 and MSR_AMD64_IBSOPCTL2. Signed-off-by: Manali Shukla --- arch/x86/include/asm/svm.h | 4 +++- arch/x86/kvm/cpuid.c | 6 ++++++ arch/x86/kvm/reverse_cpuid.h | 6 ++++++ arch/x86/kvm/svm/svm.c | 7 +++++++ 4 files changed, 22 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h index 17aa6bf76bce..88833db2e739 100644 --- a/arch/x86/include/asm/svm.h +++ b/arch/x86/include/asm/svm.h @@ -382,6 +382,8 @@ struct vmcb_save_area { u64 ibs_dc_linear_addr; u64 ibs_br_target; u64 ibs_fetch_extd_ctl; + u64 ibs_fetch_ctl2; + u64 ibs_op_ctl2; } __packed; /* Save area definition for SEV-ES and SEV-SNP guests */ @@ -564,7 +566,7 @@ struct vmcb { }; } __packed; -#define EXPECTED_VMCB_SAVE_AREA_SIZE 1992 +#define EXPECTED_VMCB_SAVE_AREA_SIZE 2008 #define EXPECTED_GHCB_SAVE_AREA_SIZE 1032 #define EXPECTED_SEV_ES_SAVE_AREA_SIZE 1648 #define EXPECTED_VMCB_CONTROL_AREA_SIZE 1024 diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 4e626e77e6a6..e8a664cb0bb8 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -1239,6 +1239,12 @@ void kvm_initialize_cpu_caps(void) F(IBS_FETCHCTLEXTD), F(IBS_ZEN4_EXT), F(IBS_LOADLATFIL), + F(IBS_DISABLE), + F(IBS_FETCHLATFIL), + F(IBS_ADDRFILTER), + F(IBS_STRMST_RMTSOCKET), + F(IBS_BUFFER1), + F(IBS_MEMPROFILER), F(IBS_ZEN4_DTLBSTAT), ); diff --git a/arch/x86/kvm/reverse_cpuid.h b/arch/x86/kvm/reverse_cpuid.h index 22cfdb331e9e..1af2ba207b8a 100644 --- a/arch/x86/kvm/reverse_cpuid.h +++ b/arch/x86/kvm/reverse_cpuid.h @@ -89,6 +89,12 @@ #define X86_FEATURE_IBS_FETCHCTLEXTD KVM_X86_FEATURE(CPUID_8000_001B_EAX, 9) #define X86_FEATURE_IBS_ZEN4_EXT KVM_X86_FEATURE(CPUID_8000_001B_EAX, 11) #define X86_FEATURE_IBS_LOADLATFIL KVM_X86_FEATURE(CPUID_8000_001B_EAX, 12) +#define X86_FEATURE_IBS_DISABLE KVM_X86_FEATURE(CPUID_8000_001B_EAX, 13) +#define X86_FEATURE_IBS_FETCHLATFIL KVM_X86_FEATURE(CPUID_8000_001B_EAX, 14) +#define X86_FEATURE_IBS_ADDRFILTER KVM_X86_FEATURE(CPUID_8000_001B_EAX, 15) +#define X86_FEATURE_IBS_STRMST_RMTSOCKET KVM_X86_FEATURE(CPUID_8000_001B_EAX, 16) +#define X86_FEATURE_IBS_BUFFER1 KVM_X86_FEATURE(CPUID_8000_001B_EAX, 17) +#define X86_FEATURE_IBS_MEMPROFILER KVM_X86_FEATURE(CPUID_8000_001B_EAX, 18) #define X86_FEATURE_IBS_ZEN4_DTLBSTAT KVM_X86_FEATURE(CPUID_8000_001B_EAX, 19) struct cpuid_reg { diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 421a929398da..9bf0d5f66239 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -800,6 +800,13 @@ static void svm_recalc_ibs_msr_intercepts(struct kvm_vcpu *vcpu) svm_set_intercept_for_msr(vcpu, MSR_AMD64_IBSDCLINAD, MSR_TYPE_RW, intercept); svm_set_intercept_for_msr(vcpu, MSR_AMD64_IBSBRTARGET, MSR_TYPE_RW, intercept); svm_set_intercept_for_msr(vcpu, MSR_AMD64_ICIBSEXTDCTL, MSR_TYPE_RW, intercept); + + if (guest_cpu_cap_has(vcpu, X86_FEATURE_IBS_DISABLE)) { + svm_set_intercept_for_msr(vcpu, MSR_AMD64_IBSFETCHCTL2, MSR_TYPE_RW, + intercept); + svm_set_intercept_for_msr(vcpu, MSR_AMD64_IBSOPCTL2, MSR_TYPE_RW, + intercept); + } } static void svm_recalc_msr_intercepts(struct kvm_vcpu *vcpu) -- 2.43.0