BTS (Branch Trace Store), enumerated by IA32_MISC_ENABLE.BTS_UNAVAILABLE (bit 11), is deprecated and has been superseded by LBR and Intel PT. KVM yields control of the above mentioned bit to userspace since KVM commit 9fc222967a39 ("KVM: x86: Give host userspace full control of MSR_IA32_MISC_ENABLES"). However, QEMU does not set this bit, which allows guests to write the BTS and BTINT bits in IA32_DEBUGCTL. Since KVM doesn't support BTS, this may lead to unexpected MSR access errors. Signed-off-by: Zide Chen --- V2: - Address Dapeng's comments. - Remove mention of VMState version_id from the commit message. target/i386/cpu.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/target/i386/cpu.h b/target/i386/cpu.h index 2bbc977d9088..f02812bfd19f 100644 --- a/target/i386/cpu.h +++ b/target/i386/cpu.h @@ -474,8 +474,11 @@ typedef enum X86Seg { #define MSR_IA32_MISC_ENABLE 0x1a0 /* Indicates good rep/movs microcode on some processors: */ -#define MSR_IA32_MISC_ENABLE_DEFAULT 1 +#define MSR_IA32_MISC_ENABLE_FASTSTRING (1ULL << 0) +#define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL (1ULL << 11) #define MSR_IA32_MISC_ENABLE_MWAIT (1ULL << 18) +#define MSR_IA32_MISC_ENABLE_DEFAULT (MSR_IA32_MISC_ENABLE_FASTSTRING |\ + MSR_IA32_MISC_ENABLE_BTS_UNAVAIL) #define MSR_MTRRphysBase(reg) (0x200 + 2 * (reg)) #define MSR_MTRRphysMask(reg) (0x200 + 2 * (reg) + 1) -- 2.52.0 From: Dapeng Mi MSR_CORE_PERF_GLOBAL_OVF_CTRL is a write-only MSR and reads always return zero. Saving and restoring this MSR is therefore unnecessary. Replace VMSTATE_UINT64 with VMSTATE_UNUSED in the VMStateDescription to ignore env.msr_global_ovf_ctrl during migration. This avoids the need to bump version_id and does not introduce any migration incompatibility. Signed-off-by: Dapeng Mi Signed-off-by: Zide Chen --- V2: - No changes. target/i386/cpu.h | 1 - target/i386/kvm/kvm.c | 6 ------ target/i386/machine.c | 4 ++-- 3 files changed, 2 insertions(+), 9 deletions(-) diff --git a/target/i386/cpu.h b/target/i386/cpu.h index f02812bfd19f..f6e9b274e2ff 100644 --- a/target/i386/cpu.h +++ b/target/i386/cpu.h @@ -2086,7 +2086,6 @@ typedef struct CPUArchState { uint64_t msr_fixed_ctr_ctrl; uint64_t msr_global_ctrl; uint64_t msr_global_status; - uint64_t msr_global_ovf_ctrl; uint64_t msr_fixed_counters[MAX_FIXED_COUNTERS]; uint64_t msr_gp_counters[MAX_GP_COUNTERS]; uint64_t msr_gp_evtsel[MAX_GP_COUNTERS]; diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c index 7b9b740a8e5a..cffbc90d1c50 100644 --- a/target/i386/kvm/kvm.c +++ b/target/i386/kvm/kvm.c @@ -4069,8 +4069,6 @@ static int kvm_put_msrs(X86CPU *cpu, KvmPutState level) if (has_architectural_pmu_version > 1) { kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, env->msr_global_status); - kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, - env->msr_global_ovf_ctrl); /* Now start the PMU. */ kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, @@ -4588,7 +4586,6 @@ static int kvm_get_msrs(X86CPU *cpu) kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0); kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0); kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, 0); - kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, 0); } for (i = 0; i < num_architectural_pmu_fixed_counters; i++) { kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i, 0); @@ -4917,9 +4914,6 @@ static int kvm_get_msrs(X86CPU *cpu) case MSR_CORE_PERF_GLOBAL_STATUS: env->msr_global_status = msrs[i].data; break; - case MSR_CORE_PERF_GLOBAL_OVF_CTRL: - env->msr_global_ovf_ctrl = msrs[i].data; - break; case MSR_CORE_PERF_FIXED_CTR0 ... MSR_CORE_PERF_FIXED_CTR0 + MAX_FIXED_COUNTERS - 1: env->msr_fixed_counters[index - MSR_CORE_PERF_FIXED_CTR0] = msrs[i].data; break; diff --git a/target/i386/machine.c b/target/i386/machine.c index c9139612813b..1125c8a64ec5 100644 --- a/target/i386/machine.c +++ b/target/i386/machine.c @@ -666,7 +666,7 @@ static bool pmu_enable_needed(void *opaque) int i; if (env->msr_fixed_ctr_ctrl || env->msr_global_ctrl || - env->msr_global_status || env->msr_global_ovf_ctrl) { + env->msr_global_status) { return true; } for (i = 0; i < MAX_FIXED_COUNTERS; i++) { @@ -692,7 +692,7 @@ static const VMStateDescription vmstate_msr_architectural_pmu = { VMSTATE_UINT64(env.msr_fixed_ctr_ctrl, X86CPU), VMSTATE_UINT64(env.msr_global_ctrl, X86CPU), VMSTATE_UINT64(env.msr_global_status, X86CPU), - VMSTATE_UINT64(env.msr_global_ovf_ctrl, X86CPU), + VMSTATE_UNUSED(sizeof(uint64_t)), VMSTATE_UINT64_ARRAY(env.msr_fixed_counters, X86CPU, MAX_FIXED_COUNTERS), VMSTATE_UINT64_ARRAY(env.msr_gp_counters, X86CPU, MAX_GP_COUNTERS), VMSTATE_UINT64_ARRAY(env.msr_gp_evtsel, X86CPU, MAX_GP_COUNTERS), -- 2.52.0 Guest PMU support requires KVM. Clear cpu->enable_pmu when KVM is not enabled, so PMU-related code can rely solely on cpu->enable_pmu. This reduces duplication and avoids bugs where one of the checks is missed. For example, cpu_x86_cpuid() enables CPUID.0AH when cpu->enable_pmu is set but does not check kvm_enabled(). This is implicitly fixed by this patch: if (cpu->enable_pmu) { x86_cpu_get_supported_cpuid(0xA, count, eax, ebx, ecx, edx); } Also fix two places that check kvm_enabled() but not cpu->enable_pmu. Reviewed-by: Dapeng Mi Signed-off-by: Zide Chen --- V2: - Replace a tab with spaces. target/i386/cpu.c | 9 ++++++--- target/i386/kvm/kvm.c | 4 ++-- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/target/i386/cpu.c b/target/i386/cpu.c index 37803cd72490..d3e9d3c40b0a 100644 --- a/target/i386/cpu.c +++ b/target/i386/cpu.c @@ -8671,7 +8671,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, *ecx = 0; *edx = 0; if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) || - !kvm_enabled()) { + !cpu->enable_pmu) { break; } @@ -9018,7 +9018,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, case 0x80000022: *eax = *ebx = *ecx = *edx = 0; /* AMD Extended Performance Monitoring and Debug */ - if (kvm_enabled() && cpu->enable_pmu && + if (cpu->enable_pmu && (env->features[FEAT_8000_0022_EAX] & CPUID_8000_0022_EAX_PERFMON_V2)) { *eax |= CPUID_8000_0022_EAX_PERFMON_V2; *ebx |= kvm_arch_get_supported_cpuid(cs->kvm_state, index, count, @@ -9642,7 +9642,7 @@ static bool x86_cpu_filter_features(X86CPU *cpu, bool verbose) * are advertised by cpu_x86_cpuid(). Keep these two in sync. */ if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) && - kvm_enabled()) { + cpu->enable_pmu) { x86_cpu_get_supported_cpuid(0x14, 0, &eax_0, &ebx_0, &ecx_0, &edx_0); x86_cpu_get_supported_cpuid(0x14, 1, @@ -9790,6 +9790,9 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp) Error *local_err = NULL; unsigned requested_lbr_fmt; + if (!kvm_enabled()) + cpu->enable_pmu = false; + #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY) /* Use pc-relative instructions in system-mode */ tcg_cflags_set(cs, CF_PCREL); diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c index cffbc90d1c50..e81fa46ed66c 100644 --- a/target/i386/kvm/kvm.c +++ b/target/i386/kvm/kvm.c @@ -4222,7 +4222,7 @@ static int kvm_put_msrs(X86CPU *cpu, KvmPutState level) env->msr_xfd_err); } - if (kvm_enabled() && cpu->enable_pmu && + if (cpu->enable_pmu && (env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR)) { uint64_t depth; int ret; @@ -4698,7 +4698,7 @@ static int kvm_get_msrs(X86CPU *cpu) kvm_msr_entry_add(cpu, MSR_IA32_XFD_ERR, 0); } - if (kvm_enabled() && cpu->enable_pmu && + if (cpu->enable_pmu && (env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR)) { uint64_t depth; -- 2.52.0 From: Dapeng Mi If IA32_PERF_CAPABILITIES.FW_WRITE (bit 13) is set, each general- purpose counter IA32_PMCi (starting at 0xc1) is accompanied by a corresponding 64-bit alias MSR starting at 0x4c1 (IA32_A_PMC0). The legacy IA32_PMCi MSRs are not full-width and their effective width is determined by CPUID.0AH:EAX[23:16]. Since these MSRs are architectural aliases, when IA32_A_PMCi is supported it is safe to use it for save/restore instead of the legacy IA32_PMCi MSRs. Full-width write is a user-visible feature and can be disabled individually. Reduce MAX_GP_COUNTERS from 18 to 15 to avoid conflicts between the full-width MSR range and MSR_MCG_EXT_CTL. Current CPUs support at most 10 general-purpose counters, so 15 is sufficient for now and leaves room for future expansion. Bump minimum_version_id to avoid migration from older QEMU, as this may otherwise cause VMState overflow. This also requires bumping version_id, which prevents migration to older QEMU as well. Signed-off-by: Dapeng Mi Signed-off-by: Zide Chen --- V2: - Slightly improve the commit message wording. - Update the comment for MSR_IA32_PMC0 definition. target/i386/cpu.h | 5 ++++- target/i386/kvm/kvm.c | 19 +++++++++++++++++-- target/i386/machine.c | 4 ++-- 3 files changed, 23 insertions(+), 5 deletions(-) diff --git a/target/i386/cpu.h b/target/i386/cpu.h index f6e9b274e2ff..812d53e22c41 100644 --- a/target/i386/cpu.h +++ b/target/i386/cpu.h @@ -421,6 +421,7 @@ typedef enum X86Seg { #define MSR_IA32_PERF_CAPABILITIES 0x345 #define PERF_CAP_LBR_FMT 0x3f +#define PERF_CAP_FULL_WRITE (1U << 13) #define MSR_IA32_TSX_CTRL 0x122 #define MSR_IA32_TSCDEADLINE 0x6e0 @@ -448,6 +449,8 @@ typedef enum X86Seg { #define MSR_IA32_SGXLEPUBKEYHASH3 0x8f #define MSR_P6_PERFCTR0 0xc1 +/* Alias MSR range for full-width general-purpose performance counters */ +#define MSR_IA32_PMC0 0x4c1 #define MSR_IA32_SMBASE 0x9e #define MSR_SMI_COUNT 0x34 @@ -1740,7 +1743,7 @@ typedef struct { #endif #define MAX_FIXED_COUNTERS 3 -#define MAX_GP_COUNTERS (MSR_IA32_PERF_STATUS - MSR_P6_EVNTSEL0) +#define MAX_GP_COUNTERS 15 #define NB_OPMASK_REGS 8 diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c index e81fa46ed66c..530f50e4b218 100644 --- a/target/i386/kvm/kvm.c +++ b/target/i386/kvm/kvm.c @@ -4049,6 +4049,12 @@ static int kvm_put_msrs(X86CPU *cpu, KvmPutState level) } if (has_architectural_pmu_version > 0) { + uint32_t perf_cntr_base = MSR_P6_PERFCTR0; + + if (env->features[FEAT_PERF_CAPABILITIES] & PERF_CAP_FULL_WRITE) { + perf_cntr_base = MSR_IA32_PMC0; + } + if (has_architectural_pmu_version > 1) { /* Stop the counter. */ kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0); @@ -4061,7 +4067,7 @@ static int kvm_put_msrs(X86CPU *cpu, KvmPutState level) env->msr_fixed_counters[i]); } for (i = 0; i < num_architectural_pmu_gp_counters; i++) { - kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i, + kvm_msr_entry_add(cpu, perf_cntr_base + i, env->msr_gp_counters[i]); kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i, env->msr_gp_evtsel[i]); @@ -4582,6 +4588,12 @@ static int kvm_get_msrs(X86CPU *cpu) kvm_msr_entry_add(cpu, MSR_KVM_POLL_CONTROL, 1); } if (has_architectural_pmu_version > 0) { + uint32_t perf_cntr_base = MSR_P6_PERFCTR0; + + if (env->features[FEAT_PERF_CAPABILITIES] & PERF_CAP_FULL_WRITE) { + perf_cntr_base = MSR_IA32_PMC0; + } + if (has_architectural_pmu_version > 1) { kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0); kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0); @@ -4591,7 +4603,7 @@ static int kvm_get_msrs(X86CPU *cpu) kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i, 0); } for (i = 0; i < num_architectural_pmu_gp_counters; i++) { - kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i, 0); + kvm_msr_entry_add(cpu, perf_cntr_base + i, 0); kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i, 0); } } @@ -4920,6 +4932,9 @@ static int kvm_get_msrs(X86CPU *cpu) case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR0 + MAX_GP_COUNTERS - 1: env->msr_gp_counters[index - MSR_P6_PERFCTR0] = msrs[i].data; break; + case MSR_IA32_PMC0 ... MSR_IA32_PMC0 + MAX_GP_COUNTERS - 1: + env->msr_gp_counters[index - MSR_IA32_PMC0] = msrs[i].data; + break; case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL0 + MAX_GP_COUNTERS - 1: env->msr_gp_evtsel[index - MSR_P6_EVNTSEL0] = msrs[i].data; break; diff --git a/target/i386/machine.c b/target/i386/machine.c index 1125c8a64ec5..7d08a05835fc 100644 --- a/target/i386/machine.c +++ b/target/i386/machine.c @@ -685,8 +685,8 @@ static bool pmu_enable_needed(void *opaque) static const VMStateDescription vmstate_msr_architectural_pmu = { .name = "cpu/msr_architectural_pmu", - .version_id = 1, - .minimum_version_id = 1, + .version_id = 2, + .minimum_version_id = 2, .needed = pmu_enable_needed, .fields = (const VMStateField[]) { VMSTATE_UINT64(env.msr_fixed_ctr_ctrl, X86CPU), -- 2.52.0 Newer Intel server CPUs support a large number of PMU MSRs. Currently, QEMU allocates cpu->kvm_msr_buf as a single-page buffer, which is not sufficient to hold all possible MSRs. Increase MSR_BUF_SIZE to 8192 bytes, providing space for up to 511 MSRs. This is sufficient even for the theoretical worst case, such as architectural LBR with a depth of 64. KVM_[GET/SET]_MSRS is limited to 255 MSRs per call. Raising this limit to 511 would require changes in KVM and would introduce backward compatibility issues. Instead, split requests into multiple KVM_[GET/SET]_MSRS calls when the number of MSRs exceeds the API limit. Signed-off-by: Zide Chen --- V2: - No changes. target/i386/kvm/kvm.c | 109 +++++++++++++++++++++++++++++++++++------- 1 file changed, 92 insertions(+), 17 deletions(-) diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c index 530f50e4b218..a2cf9b5df35d 100644 --- a/target/i386/kvm/kvm.c +++ b/target/i386/kvm/kvm.c @@ -98,9 +98,12 @@ #define KVM_APIC_BUS_CYCLE_NS 1 #define KVM_APIC_BUS_FREQUENCY (1000000000ULL / KVM_APIC_BUS_CYCLE_NS) -/* A 4096-byte buffer can hold the 8-byte kvm_msrs header, plus - * 255 kvm_msr_entry structs */ -#define MSR_BUF_SIZE 4096 +/* A 8192-byte buffer can hold the 8-byte kvm_msrs header, plus + * 511 kvm_msr_entry structs */ +#define MSR_BUF_SIZE 8192 + +/* Maximum number of MSRs in one single KVM_[GET/SET]_MSRS call. */ +#define KVM_MAX_IO_MSRS 255 typedef bool QEMURDMSRHandler(X86CPU *cpu, uint32_t msr, uint64_t *val); typedef bool QEMUWRMSRHandler(X86CPU *cpu, uint32_t msr, uint64_t val); @@ -3878,23 +3881,102 @@ static void kvm_msr_entry_add_perf(X86CPU *cpu, FeatureWordArray f) } } -static int kvm_buf_set_msrs(X86CPU *cpu) +static int __kvm_buf_set_msrs(X86CPU *cpu, struct kvm_msrs *msrs) { - int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf); + int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, msrs); if (ret < 0) { return ret; } - if (ret < cpu->kvm_msr_buf->nmsrs) { - struct kvm_msr_entry *e = &cpu->kvm_msr_buf->entries[ret]; + if (ret < msrs->nmsrs) { + struct kvm_msr_entry *e = &msrs->entries[ret]; error_report("error: failed to set MSR 0x%" PRIx32 " to 0x%" PRIx64, (uint32_t)e->index, (uint64_t)e->data); } - assert(ret == cpu->kvm_msr_buf->nmsrs); + assert(ret == msrs->nmsrs); + return ret; +} + +static int __kvm_buf_get_msrs(X86CPU *cpu, struct kvm_msrs *msrs) +{ + int ret; + + ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, msrs); + if (ret < 0) { + return ret; + } + + if (ret < msrs->nmsrs) { + struct kvm_msr_entry *e = &msrs->entries[ret]; + error_report("error: failed to get MSR 0x%" PRIx32, + (uint32_t)e->index); + } + + assert(ret == msrs->nmsrs); + return ret; +} + +static int kvm_buf_set_or_get_msrs(X86CPU *cpu, bool is_write) +{ + struct kvm_msr_entry *entries = cpu->kvm_msr_buf->entries; + struct kvm_msrs *buf = NULL; + int current, remaining, ret = 0; + size_t buf_size; + + buf_size = KVM_MAX_IO_MSRS * sizeof(struct kvm_msr_entry) + + sizeof(struct kvm_msrs); + buf = g_malloc(buf_size); + + remaining = cpu->kvm_msr_buf->nmsrs; + current = 0; + while (remaining) { + size_t size; + + memset(buf, 0, buf_size); + + if (remaining > KVM_MAX_IO_MSRS) { + buf->nmsrs = KVM_MAX_IO_MSRS; + } else { + buf->nmsrs = remaining; + } + + size = buf->nmsrs * sizeof(entries[0]); + memcpy(buf->entries, &entries[current], size); + + if (is_write) { + ret = __kvm_buf_set_msrs(cpu, buf); + } else { + ret = __kvm_buf_get_msrs(cpu, buf); + } + + if (ret < 0) { + goto out; + } + + if (!is_write) + memcpy(&entries[current], buf->entries, size); + + current += buf->nmsrs; + remaining -= buf->nmsrs; + } + +out: + g_free(buf); + return ret < 0 ? ret : cpu->kvm_msr_buf->nmsrs; +} + +static int kvm_buf_set_msrs(X86CPU *cpu) +{ + kvm_buf_set_or_get_msrs(cpu, true); return 0; } +static int kvm_buf_get_msrs(X86CPU *cpu) +{ + return kvm_buf_set_or_get_msrs(cpu, false); +} + static void kvm_init_msrs(X86CPU *cpu) { CPUX86State *env = &cpu->env; @@ -3928,7 +4010,7 @@ static void kvm_init_msrs(X86CPU *cpu) if (has_msr_ucode_rev) { kvm_msr_entry_add(cpu, MSR_IA32_UCODE_REV, cpu->ucode_rev); } - assert(kvm_buf_set_msrs(cpu) == 0); + kvm_buf_set_msrs(cpu); } static int kvm_put_msrs(X86CPU *cpu, KvmPutState level) @@ -4746,18 +4828,11 @@ static int kvm_get_msrs(X86CPU *cpu) } } - ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, cpu->kvm_msr_buf); + ret = kvm_buf_get_msrs(cpu); if (ret < 0) { return ret; } - if (ret < cpu->kvm_msr_buf->nmsrs) { - struct kvm_msr_entry *e = &cpu->kvm_msr_buf->entries[ret]; - error_report("error: failed to get MSR 0x%" PRIx32, - (uint32_t)e->index); - } - - assert(ret == cpu->kvm_msr_buf->nmsrs); /* * MTRR masks: Each mask consists of 5 parts * a 10..0: must be zero -- 2.52.0 From: Dapeng Mi DS-based PEBS introduces three MSRs: MSR_IA32_DS_AREA, MSR_PEBS_DATA_CFG, and MSR_IA32_PEBS_ENABLE. Save and restore these MSRs when legacy DS PEBS is enabled. Signed-off-by: Dapeng Mi Signed-off-by: Zide Chen --- V2: - No changes. target/i386/cpu.h | 9 +++++++++ target/i386/kvm/kvm.c | 25 +++++++++++++++++++++++++ target/i386/machine.c | 27 ++++++++++++++++++++++++++- 3 files changed, 60 insertions(+), 1 deletion(-) diff --git a/target/i386/cpu.h b/target/i386/cpu.h index 812d53e22c41..3e2222e105bc 100644 --- a/target/i386/cpu.h +++ b/target/i386/cpu.h @@ -422,6 +422,7 @@ typedef enum X86Seg { #define MSR_IA32_PERF_CAPABILITIES 0x345 #define PERF_CAP_LBR_FMT 0x3f #define PERF_CAP_FULL_WRITE (1U << 13) +#define PERF_CAP_PEBS_BASELINE (1U << 14) #define MSR_IA32_TSX_CTRL 0x122 #define MSR_IA32_TSCDEADLINE 0x6e0 @@ -512,6 +513,11 @@ typedef enum X86Seg { #define MSR_CORE_PERF_GLOBAL_CTRL 0x38f #define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x390 +/* Legacy DS based PEBS MSRs */ +#define MSR_IA32_PEBS_ENABLE 0x3f1 +#define MSR_PEBS_DATA_CFG 0x3f2 +#define MSR_IA32_DS_AREA 0x600 + #define MSR_MC0_CTL 0x400 #define MSR_MC0_STATUS 0x401 #define MSR_MC0_ADDR 0x402 @@ -2089,6 +2095,9 @@ typedef struct CPUArchState { uint64_t msr_fixed_ctr_ctrl; uint64_t msr_global_ctrl; uint64_t msr_global_status; + uint64_t msr_ds_area; + uint64_t msr_pebs_data_cfg; + uint64_t msr_pebs_enable; uint64_t msr_fixed_counters[MAX_FIXED_COUNTERS]; uint64_t msr_gp_counters[MAX_GP_COUNTERS]; uint64_t msr_gp_evtsel[MAX_GP_COUNTERS]; diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c index a2cf9b5df35d..a72e4d60dfa2 100644 --- a/target/i386/kvm/kvm.c +++ b/target/i386/kvm/kvm.c @@ -4143,6 +4143,15 @@ static int kvm_put_msrs(X86CPU *cpu, KvmPutState level) kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0); } + if (env->features[FEAT_1_EDX] & CPUID_DTS) { + kvm_msr_entry_add(cpu, MSR_IA32_DS_AREA, env->msr_ds_area); + } + + if (env->features[FEAT_PERF_CAPABILITIES] & PERF_CAP_PEBS_BASELINE) { + kvm_msr_entry_add(cpu, MSR_IA32_PEBS_ENABLE, env->msr_pebs_enable); + kvm_msr_entry_add(cpu, MSR_PEBS_DATA_CFG, env->msr_pebs_data_cfg); + } + /* Set the counter values. */ for (i = 0; i < num_architectural_pmu_fixed_counters; i++) { kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i, @@ -4688,6 +4697,13 @@ static int kvm_get_msrs(X86CPU *cpu) kvm_msr_entry_add(cpu, perf_cntr_base + i, 0); kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i, 0); } + if (env->features[FEAT_1_EDX] & CPUID_DTS) { + kvm_msr_entry_add(cpu, MSR_IA32_DS_AREA, 0); + } + if (env->features[FEAT_PERF_CAPABILITIES] & PERF_CAP_PEBS_BASELINE) { + kvm_msr_entry_add(cpu, MSR_IA32_PEBS_ENABLE, 0); + kvm_msr_entry_add(cpu, MSR_PEBS_DATA_CFG, 0); + } } if (env->mcg_cap) { @@ -5013,6 +5029,15 @@ static int kvm_get_msrs(X86CPU *cpu) case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL0 + MAX_GP_COUNTERS - 1: env->msr_gp_evtsel[index - MSR_P6_EVNTSEL0] = msrs[i].data; break; + case MSR_IA32_DS_AREA: + env->msr_ds_area = msrs[i].data; + break; + case MSR_PEBS_DATA_CFG: + env->msr_pebs_data_cfg = msrs[i].data; + break; + case MSR_IA32_PEBS_ENABLE: + env->msr_pebs_enable = msrs[i].data; + break; case HV_X64_MSR_HYPERCALL: env->msr_hv_hypercall = msrs[i].data; break; diff --git a/target/i386/machine.c b/target/i386/machine.c index 7d08a05835fc..7f45db1247b1 100644 --- a/target/i386/machine.c +++ b/target/i386/machine.c @@ -659,6 +659,27 @@ static const VMStateDescription vmstate_msr_ia32_feature_control = { } }; +static bool ds_pebs_enabled(void *opaque) +{ + X86CPU *cpu = opaque; + CPUX86State *env = &cpu->env; + + return (env->msr_ds_area || env->msr_pebs_enable || + env->msr_pebs_data_cfg); +} + +static const VMStateDescription vmstate_msr_ds_pebs = { + .name = "cpu/msr_ds_pebs", + .version_id = 1, + .minimum_version_id = 1, + .needed = ds_pebs_enabled, + .fields = (const VMStateField[]){ + VMSTATE_UINT64(env.msr_ds_area, X86CPU), + VMSTATE_UINT64(env.msr_pebs_data_cfg, X86CPU), + VMSTATE_UINT64(env.msr_pebs_enable, X86CPU), + VMSTATE_END_OF_LIST()} +}; + static bool pmu_enable_needed(void *opaque) { X86CPU *cpu = opaque; @@ -697,7 +718,11 @@ static const VMStateDescription vmstate_msr_architectural_pmu = { VMSTATE_UINT64_ARRAY(env.msr_gp_counters, X86CPU, MAX_GP_COUNTERS), VMSTATE_UINT64_ARRAY(env.msr_gp_evtsel, X86CPU, MAX_GP_COUNTERS), VMSTATE_END_OF_LIST() - } + }, + .subsections = (const VMStateDescription * const []) { + &vmstate_msr_ds_pebs, + NULL, + }, }; static bool mpx_needed(void *opaque) -- 2.52.0 Populate selected PEBS feature names in FEAT_PERF_CAPABILITIES to make the corresponding bits user-visible CPU feature knobs, allowing them to be explicitly enabled or disabled via -cpu +/-. Once named, these bits become part of the guest CPU configuration contract. If a VM is configured with such a feature enabled, migration to a destination that does not support the feature may fail, as the destination cannot honor the guest-visible CPU model. The PEBS_FMT bits are not exposed, as target/i386 currently does not support multi-bit CPU properties. Co-developed-by: Dapeng Mi Signed-off-by: Dapeng Mi Signed-off-by: Zide Chen --- V2: - Add the missing comma after "pebs-arch-reg". - Simplify the PEBS_FMT description in the commit message. target/i386/cpu.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/target/i386/cpu.c b/target/i386/cpu.c index d3e9d3c40b0a..f2c83b4f259c 100644 --- a/target/i386/cpu.c +++ b/target/i386/cpu.c @@ -1618,10 +1618,10 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { .type = MSR_FEATURE_WORD, .feat_names = { NULL, NULL, NULL, NULL, + NULL, NULL, "pebs-trap", "pebs-arch-reg", NULL, NULL, NULL, NULL, - NULL, NULL, NULL, NULL, - NULL, "full-width-write", NULL, NULL, - NULL, NULL, NULL, NULL, + NULL, "full-width-write", "pebs-baseline", NULL, + NULL, "pebs-timing-info", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, -- 2.52.0 Since the lbr-fmt property is masked with PERF_CAP_LBR_FMT in DEFINE_PROP_UINT64_CHECKMASK(), there is no need to explicitly validate user-requested lbr-fmt values. The PMU feature is only supported when running under KVM, so initialize cpu->lbr_fmt in kvm_cpu_instance_init(). Use -1 as the default lbr-fmt, rather than initializing it with ~PERF_CAP_LBR_FMT, which is misleading as it suggests a semantic relationship that does not exist. Rename requested_lbr_fmt to a more generic guest_fmt. When lbr-fmt is not specified and cpu->migratable is false, the guest lbr_fmt value is not user-requested. Signed-off-by: Zide Chen --- V2: - New patch. target/i386/cpu.c | 18 ++++++------------ target/i386/kvm/kvm-cpu.c | 2 ++ 2 files changed, 8 insertions(+), 12 deletions(-) diff --git a/target/i386/cpu.c b/target/i386/cpu.c index f2c83b4f259c..09180c718d58 100644 --- a/target/i386/cpu.c +++ b/target/i386/cpu.c @@ -9788,7 +9788,7 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp) X86CPUClass *xcc = X86_CPU_GET_CLASS(dev); CPUX86State *env = &cpu->env; Error *local_err = NULL; - unsigned requested_lbr_fmt; + unsigned guest_fmt; if (!kvm_enabled()) cpu->enable_pmu = false; @@ -9828,11 +9828,7 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp) * Override env->features[FEAT_PERF_CAPABILITIES].LBR_FMT * with user-provided setting. */ - if (cpu->lbr_fmt != ~PERF_CAP_LBR_FMT) { - if ((cpu->lbr_fmt & PERF_CAP_LBR_FMT) != cpu->lbr_fmt) { - error_setg(errp, "invalid lbr-fmt"); - return; - } + if (cpu->lbr_fmt != -1) { env->features[FEAT_PERF_CAPABILITIES] &= ~PERF_CAP_LBR_FMT; env->features[FEAT_PERF_CAPABILITIES] |= cpu->lbr_fmt; } @@ -9841,9 +9837,8 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp) * vPMU LBR is supported when 1) KVM is enabled 2) Option pmu=on and * 3)vPMU LBR format matches that of host setting. */ - requested_lbr_fmt = - env->features[FEAT_PERF_CAPABILITIES] & PERF_CAP_LBR_FMT; - if (requested_lbr_fmt && kvm_enabled()) { + guest_fmt = env->features[FEAT_PERF_CAPABILITIES] & PERF_CAP_LBR_FMT; + if (guest_fmt) { uint64_t host_perf_cap = x86_cpu_get_supported_feature_word(NULL, FEAT_PERF_CAPABILITIES); unsigned host_lbr_fmt = host_perf_cap & PERF_CAP_LBR_FMT; @@ -9852,10 +9847,10 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp) error_setg(errp, "vPMU: LBR is unsupported without pmu=on"); return; } - if (requested_lbr_fmt != host_lbr_fmt) { + if (guest_fmt != host_lbr_fmt) { error_setg(errp, "vPMU: the lbr-fmt value (0x%x) does not match " "the host value (0x%x).", - requested_lbr_fmt, host_lbr_fmt); + guest_fmt, host_lbr_fmt); return; } } @@ -10279,7 +10274,6 @@ static void x86_cpu_initfn(Object *obj) object_property_add_alias(obj, "sse4_2", obj, "sse4.2"); object_property_add_alias(obj, "hv-apicv", obj, "hv-avic"); - cpu->lbr_fmt = ~PERF_CAP_LBR_FMT; object_property_add_alias(obj, "lbr_fmt", obj, "lbr-fmt"); if (xcc->model) { diff --git a/target/i386/kvm/kvm-cpu.c b/target/i386/kvm/kvm-cpu.c index 33a8c26bc27c..b4500ab69f82 100644 --- a/target/i386/kvm/kvm-cpu.c +++ b/target/i386/kvm/kvm-cpu.c @@ -231,6 +231,8 @@ static void kvm_cpu_instance_init(CPUState *cs) kvm_cpu_max_instance_init(cpu); } + cpu->lbr_fmt = -1; + kvm_cpu_xsave_init(); } -- 2.52.0 Detach x86_cpu_pmu_realize() from x86_cpu_realizefn() to keep the latter focused and easier to follow. Introduce a dedicated helper, x86_cpu_apply_lbr_pebs_fmt(), in preparation for adding PEBS format support without duplicating code. Convert PERF_CAP_LBR_FMT into separate mask and shift macros to allow x86_cpu_apply_lbr_pebs_fmt() to be shared with PEBS format handling. No functional change intended Signed-off-by: Zide Chen --- V2: - New patch. target/i386/cpu.c | 94 +++++++++++++++++++++++++++++++---------------- target/i386/cpu.h | 3 +- 2 files changed, 65 insertions(+), 32 deletions(-) diff --git a/target/i386/cpu.c b/target/i386/cpu.c index 09180c718d58..54f04adb0b48 100644 --- a/target/i386/cpu.c +++ b/target/i386/cpu.c @@ -9781,6 +9781,66 @@ static bool x86_cpu_update_smp_cache_topo(MachineState *ms, X86CPU *cpu, } #endif +static bool x86_cpu_apply_lbr_pebs_fmt(X86CPU *cpu, uint64_t host_perf_cap, + uint64_t user_req, bool is_lbr_fmt, + Error **errp) +{ + CPUX86State *env = &cpu->env; + uint64_t mask; + unsigned shift; + unsigned user_fmt; + const char *name; + + if (is_lbr_fmt) { + mask = PERF_CAP_LBR_FMT_MASK; + shift = PERF_CAP_LBR_FMT_SHIFT; + name = "lbr"; + } else { + return false; + } + + if (user_req != -1) { + env->features[FEAT_PERF_CAPABILITIES] &= ~(mask << shift); + env->features[FEAT_PERF_CAPABILITIES] |= (user_req << shift); + } + + user_fmt = (env->features[FEAT_PERF_CAPABILITIES] >> shift) & mask; + + if (user_fmt) { + unsigned host_fmt = (host_perf_cap >> shift) & mask; + + if (!cpu->enable_pmu) { + error_setg(errp, "vPMU: %s is unsupported without pmu=on", name); + return false; + } + if (user_fmt != host_fmt) { + error_setg(errp, "vPMU: the %s-fmt value (0x%x) does not match " + "the host value (0x%x).", + name, user_fmt, host_fmt); + return false; + } + } + + return true; +} + +static int x86_cpu_pmu_realize(X86CPU *cpu, Error **errp) +{ + uint64_t host_perf_cap = + x86_cpu_get_supported_feature_word(NULL, FEAT_PERF_CAPABILITIES); + + /* + * Override env->features[FEAT_PERF_CAPABILITIES].LBR_FMT + * with user-provided setting. + */ + if (!x86_cpu_apply_lbr_pebs_fmt(cpu, host_perf_cap, + cpu->lbr_fmt, true, errp)) { + return -EINVAL; + } + + return 0; +} + static void x86_cpu_realizefn(DeviceState *dev, Error **errp) { CPUState *cs = CPU(dev); @@ -9788,7 +9848,6 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp) X86CPUClass *xcc = X86_CPU_GET_CLASS(dev); CPUX86State *env = &cpu->env; Error *local_err = NULL; - unsigned guest_fmt; if (!kvm_enabled()) cpu->enable_pmu = false; @@ -9824,35 +9883,8 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp) goto out; } - /* - * Override env->features[FEAT_PERF_CAPABILITIES].LBR_FMT - * with user-provided setting. - */ - if (cpu->lbr_fmt != -1) { - env->features[FEAT_PERF_CAPABILITIES] &= ~PERF_CAP_LBR_FMT; - env->features[FEAT_PERF_CAPABILITIES] |= cpu->lbr_fmt; - } - - /* - * vPMU LBR is supported when 1) KVM is enabled 2) Option pmu=on and - * 3)vPMU LBR format matches that of host setting. - */ - guest_fmt = env->features[FEAT_PERF_CAPABILITIES] & PERF_CAP_LBR_FMT; - if (guest_fmt) { - uint64_t host_perf_cap = - x86_cpu_get_supported_feature_word(NULL, FEAT_PERF_CAPABILITIES); - unsigned host_lbr_fmt = host_perf_cap & PERF_CAP_LBR_FMT; - - if (!cpu->enable_pmu) { - error_setg(errp, "vPMU: LBR is unsupported without pmu=on"); - return; - } - if (guest_fmt != host_lbr_fmt) { - error_setg(errp, "vPMU: the lbr-fmt value (0x%x) does not match " - "the host value (0x%x).", - guest_fmt, host_lbr_fmt); - return; - } + if (x86_cpu_pmu_realize(cpu, errp)) { + return; } if (x86_cpu_filter_features(cpu, cpu->check_cpuid || cpu->enforce_cpuid)) { @@ -10445,7 +10477,7 @@ static const Property x86_cpu_properties[] = { #endif DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID), DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false), - DEFINE_PROP_UINT64_CHECKMASK("lbr-fmt", X86CPU, lbr_fmt, PERF_CAP_LBR_FMT), + DEFINE_PROP_UINT64_CHECKMASK("lbr-fmt", X86CPU, lbr_fmt, PERF_CAP_LBR_FMT_MASK), DEFINE_PROP_UINT32("hv-spinlocks", X86CPU, hyperv_spinlock_attempts, HYPERV_SPINLOCK_NEVER_NOTIFY), diff --git a/target/i386/cpu.h b/target/i386/cpu.h index 3e2222e105bc..aa3c24e0ba13 100644 --- a/target/i386/cpu.h +++ b/target/i386/cpu.h @@ -420,7 +420,8 @@ typedef enum X86Seg { #define ARCH_CAP_TSX_CTRL_MSR (1<<7) #define MSR_IA32_PERF_CAPABILITIES 0x345 -#define PERF_CAP_LBR_FMT 0x3f +#define PERF_CAP_LBR_FMT_MASK 0x3f +#define PERF_CAP_LBR_FMT_SHIFT 0x0 #define PERF_CAP_FULL_WRITE (1U << 13) #define PERF_CAP_PEBS_BASELINE (1U << 14) -- 2.52.0 Similar to lbr-fmt, target/i386 does not support multi-bit CPU properties, so the PEBS record format cannot be exposed as a user-visible CPU feature. Add a pebs-fmt option to allow users to specify the PEBS format via the command line. Since the PEBS state is part of the vmstate, this option is considered migratable. With this option, PEBS can be enabled when migratable=on. Signed-off-by: Zide Chen --- V2: New patch target/i386/cpu.c | 11 ++++++++++- target/i386/cpu.h | 5 +++++ target/i386/kvm/kvm-cpu.c | 1 + 3 files changed, 16 insertions(+), 1 deletion(-) diff --git a/target/i386/cpu.c b/target/i386/cpu.c index 54f04adb0b48..ec6f49916de3 100644 --- a/target/i386/cpu.c +++ b/target/i386/cpu.c @@ -9796,7 +9796,9 @@ static bool x86_cpu_apply_lbr_pebs_fmt(X86CPU *cpu, uint64_t host_perf_cap, shift = PERF_CAP_LBR_FMT_SHIFT; name = "lbr"; } else { - return false; + mask = PERF_CAP_PEBS_FMT_MASK; + shift = PERF_CAP_PEBS_FMT_SHIFT; + name = "pebs"; } if (user_req != -1) { @@ -9838,6 +9840,11 @@ static int x86_cpu_pmu_realize(X86CPU *cpu, Error **errp) return -EINVAL; } + if (!x86_cpu_apply_lbr_pebs_fmt(cpu, host_perf_cap, + cpu->pebs_fmt, false, errp)) { + return -EINVAL; + } + return 0; } @@ -10307,6 +10314,7 @@ static void x86_cpu_initfn(Object *obj) object_property_add_alias(obj, "hv-apicv", obj, "hv-avic"); object_property_add_alias(obj, "lbr_fmt", obj, "lbr-fmt"); + object_property_add_alias(obj, "pebs_fmt", obj, "pebs-fmt"); if (xcc->model) { x86_cpu_load_model(cpu, xcc->model); @@ -10478,6 +10486,7 @@ static const Property x86_cpu_properties[] = { DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID), DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false), DEFINE_PROP_UINT64_CHECKMASK("lbr-fmt", X86CPU, lbr_fmt, PERF_CAP_LBR_FMT_MASK), + DEFINE_PROP_UINT64_CHECKMASK("pebs-fmt", X86CPU, pebs_fmt, PERF_CAP_PEBS_FMT_MASK), DEFINE_PROP_UINT32("hv-spinlocks", X86CPU, hyperv_spinlock_attempts, HYPERV_SPINLOCK_NEVER_NOTIFY), diff --git a/target/i386/cpu.h b/target/i386/cpu.h index aa3c24e0ba13..5ab107dfa29f 100644 --- a/target/i386/cpu.h +++ b/target/i386/cpu.h @@ -422,6 +422,8 @@ typedef enum X86Seg { #define MSR_IA32_PERF_CAPABILITIES 0x345 #define PERF_CAP_LBR_FMT_MASK 0x3f #define PERF_CAP_LBR_FMT_SHIFT 0x0 +#define PERF_CAP_PEBS_FMT_MASK 0xf +#define PERF_CAP_PEBS_FMT_SHIFT 0x8 #define PERF_CAP_FULL_WRITE (1U << 13) #define PERF_CAP_PEBS_BASELINE (1U << 14) @@ -2399,6 +2401,9 @@ struct ArchCPU { */ uint64_t lbr_fmt; + /* PEBS_FMT bits in IA32_PERF_CAPABILITIES MSR. */ + uint64_t pebs_fmt; + /* LMCE support can be enabled/disabled via cpu option 'lmce=on/off'. It is * disabled by default to avoid breaking migration between QEMU with * different LMCE configurations. diff --git a/target/i386/kvm/kvm-cpu.c b/target/i386/kvm/kvm-cpu.c index b4500ab69f82..7029629a9d09 100644 --- a/target/i386/kvm/kvm-cpu.c +++ b/target/i386/kvm/kvm-cpu.c @@ -232,6 +232,7 @@ static void kvm_cpu_instance_init(CPUState *cs) } cpu->lbr_fmt = -1; + cpu->pebs_fmt = -1; kvm_cpu_xsave_init(); } -- 2.52.0 When PMU is disabled, guest CPUID must not advertise Debug Store support. Clear both CPUID.01H:EDX[21] (DS) and CPUID.01H:ECX[2] (DS64) in this case. Set IA32_MISC_ENABLE[12] (PEBS_UNAVAILABLE) when Debug Store is not exposed to the guest. Note: Do not infer that PEBS is unsupported from IA32_PERF_CAPABILITIES[11:8] (PEBS_FMT) being 0. A value of 0 is a valid PEBS record format on some CPUs. Signed-off-by: Zide Chen --- V2: - New patch. target/i386/cpu.c | 6 ++++++ target/i386/cpu.h | 1 + 2 files changed, 7 insertions(+) diff --git a/target/i386/cpu.c b/target/i386/cpu.c index ec6f49916de3..445361ab7a06 100644 --- a/target/i386/cpu.c +++ b/target/i386/cpu.c @@ -9180,6 +9180,10 @@ static void x86_cpu_reset_hold(Object *obj, ResetType type) env->msr_ia32_misc_enable |= MSR_IA32_MISC_ENABLE_MWAIT; } + if (!(env->features[FEAT_1_EDX] & CPUID_DTS)) { + env->msr_ia32_misc_enable |= MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL; + } + memset(env->dr, 0, sizeof(env->dr)); env->dr[6] = DR6_FIXED_1; env->dr[7] = DR7_FIXED_1; @@ -9474,6 +9478,8 @@ void x86_cpu_expand_features(X86CPU *cpu, Error **errp) env->features[FEAT_1_ECX] &= ~CPUID_EXT_PDCM; } + env->features[FEAT_1_ECX] &= ~CPUID_EXT_DTES64; + env->features[FEAT_1_EDX] &= ~CPUID_DTS; env->features[FEAT_7_0_EDX] &= ~CPUID_7_0_EDX_ARCH_LBR; } diff --git a/target/i386/cpu.h b/target/i386/cpu.h index 5ab107dfa29f..0fecf561173e 100644 --- a/target/i386/cpu.h +++ b/target/i386/cpu.h @@ -483,6 +483,7 @@ typedef enum X86Seg { /* Indicates good rep/movs microcode on some processors: */ #define MSR_IA32_MISC_ENABLE_FASTSTRING (1ULL << 0) #define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL (1ULL << 11) +#define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL (1ULL << 12) #define MSR_IA32_MISC_ENABLE_MWAIT (1ULL << 18) #define MSR_IA32_MISC_ENABLE_DEFAULT (MSR_IA32_MISC_ENABLE_FASTSTRING |\ MSR_IA32_MISC_ENABLE_BTS_UNAVAIL) -- 2.52.0