For legacy guests, the host-initiated requests from the hypervisor to access PMC-related MSRs will always be for MSR_F15H_PERF_CTLx and MSR_F15H_PERF_CTRx instead of MSR_K7_EVNTSELx and MSR_K7_PERFCTRx because of how GP_EVENTSEL_BASE and GP_COUNTER_BASE are set in the PMU ops. In such cases, translate the index to the equivalent legacy MSR as get_gp_pmc_amd() will otherwise return NULL. Signed-off-by: Sandipan Das --- arch/x86/kvm/svm/pmu.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c index 63d177df4daf..c893c1bef131 100644 --- a/arch/x86/kvm/svm/pmu.c +++ b/arch/x86/kvm/svm/pmu.c @@ -124,6 +124,16 @@ static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) return amd_msr_idx_to_pmc(vcpu, msr); } +static inline u32 amd_pmu_adjust_msr_idx(struct kvm_vcpu *vcpu, u32 msr) +{ + if (!guest_cpu_cap_has(vcpu, X86_FEATURE_PERFCTR_CORE) && + msr >= MSR_F15H_PERF_CTL0 && msr <= MSR_F15H_PERF_CTR5) + msr = ((msr & 0x1) ? MSR_K7_PERFCTR0 : MSR_K7_EVNTSEL0) + + ((msr - MSR_F15H_PERF_CTL0) / 2); + + return msr; +} + static int amd_virtualized_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { @@ -142,6 +152,8 @@ static int amd_virtualized_pmu_get_msr(struct kvm_vcpu *vcpu, return 0; } + msr = amd_pmu_adjust_msr_idx(vcpu, msr); + /* MSR_PERFCTRn */ pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER); if (pmc) { @@ -203,6 +215,8 @@ static int amd_virtualized_pmu_set_msr(struct kvm_vcpu *vcpu, return 0; } + msr = amd_pmu_adjust_msr_idx(vcpu, msr); + /* MSR_PERFCTRn */ pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER); if (pmc) { -- 2.43.0