Introduce a per-vendor PMU callback for reprogramming counters with a mediated PMU, and register a callback on AMD to disable a counter based on the vCPU's setting of Host-Only or Guest-Only EVENT_SELECT bits (if EFER.SVME is set). In other words, disable the counter if Host-Only and guest_mode() or Guest-Only and !guest_mode(). kvm_mediated_pmu_refresh_event_filter() ensures that ARCH_PERFMON_EVENTSEL_ENABLE is set for any enabled counters before mediated_reprogram_counter() callback, and kvm_mediated_pmu_load() writes the updated value of eventsel_hw to the appropriate MSR after the counters are reprogrammed through KVM_REQ_PMU. Note that the behavior is equivalent if both bits are cleared or if both bits are set, events are counted regardless of host/guest state (from L1's perspective), so KVM should always keep the counter enabled unless only one of the bits is set. It's a bit unnatural to check if both bits are set or cleared, then check EFER.SVME, then go back to checking the set bit. This ordering will be needed by following changes that will track counters with only one Host-Only/Guest-Only bit set regardless of EFER.SVME. Host-Only and Guest-Only bits are currently reserved, so this change is a noop, but the bits will be allowed with mediated PMU in a following change when fully supported. Originally-by: Jim Mattson Signed-off-by: Yosry Ahmed --- arch/x86/include/asm/kvm-x86-pmu-ops.h | 1 + arch/x86/include/asm/perf_event.h | 2 ++ arch/x86/kvm/pmu.c | 1 + arch/x86/kvm/pmu.h | 1 + arch/x86/kvm/svm/pmu.c | 29 ++++++++++++++++++++++++++ 5 files changed, 34 insertions(+) diff --git a/arch/x86/include/asm/kvm-x86-pmu-ops.h b/arch/x86/include/asm/kvm-x86-pmu-ops.h index d5452b3433b7d..11ce0012b8301 100644 --- a/arch/x86/include/asm/kvm-x86-pmu-ops.h +++ b/arch/x86/include/asm/kvm-x86-pmu-ops.h @@ -27,6 +27,7 @@ KVM_X86_PMU_OP_OPTIONAL(cleanup) KVM_X86_PMU_OP_OPTIONAL(write_global_ctrl) KVM_X86_PMU_OP(mediated_load) KVM_X86_PMU_OP(mediated_put) +KVM_X86_PMU_OP_OPTIONAL(mediated_reprogram_counter) #endif #undef KVM_X86_PMU_OP diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index ff5acb8b199b0..5961c002b28eb 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -60,6 +60,8 @@ #define AMD64_EVENTSEL_INT_CORE_ENABLE (1ULL << 36) #define AMD64_EVENTSEL_GUESTONLY (1ULL << 40) #define AMD64_EVENTSEL_HOSTONLY (1ULL << 41) +#define AMD64_EVENTSEL_HOST_GUEST_MASK \ + (AMD64_EVENTSEL_HOSTONLY | AMD64_EVENTSEL_GUESTONLY) #define AMD64_EVENTSEL_INT_CORE_SEL_SHIFT 37 #define AMD64_EVENTSEL_INT_CORE_SEL_MASK \ diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index d6ac3c55fce55..e35d598f809a2 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -559,6 +559,7 @@ static int reprogram_counter(struct kvm_pmc *pmc) if (kvm_vcpu_has_mediated_pmu(pmu_to_vcpu(pmu))) { kvm_mediated_pmu_refresh_event_filter(pmc); + kvm_pmu_call(mediated_reprogram_counter)(pmc); return 0; } diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h index b1f2418e960ac..bdbe0456049d0 100644 --- a/arch/x86/kvm/pmu.h +++ b/arch/x86/kvm/pmu.h @@ -40,6 +40,7 @@ struct kvm_pmu_ops { bool (*is_mediated_pmu_supported)(struct x86_pmu_capability *host_pmu); void (*mediated_load)(struct kvm_vcpu *vcpu); void (*mediated_put)(struct kvm_vcpu *vcpu); + void (*mediated_reprogram_counter)(struct kvm_pmc *pmc); void (*write_global_ctrl)(u64 global_ctrl); const u64 EVENTSEL_EVENT; diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c index 7aa298eeb0721..60931dfd624b2 100644 --- a/arch/x86/kvm/svm/pmu.c +++ b/arch/x86/kvm/svm/pmu.c @@ -260,6 +260,34 @@ static void amd_mediated_pmu_put(struct kvm_vcpu *vcpu) wrmsrq(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR, pmu->global_status); } +static void amd_mediated_pmu_handle_host_guest_bits(struct kvm_pmc *pmc) +{ + struct kvm_vcpu *vcpu = pmc->vcpu; + u64 host_guest_bits; + + if (!(pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE)) + return; + + /* Count all events if both bits are cleared or both bits are set */ + host_guest_bits = pmc->eventsel & AMD64_EVENTSEL_HOST_GUEST_MASK; + if (hweight64(host_guest_bits) != 1) + return; + + /* Host-Only and Guest-Only are ignored if EFER.SVME == 0 */ + if (!(vcpu->arch.efer & EFER_SVME)) + return; + + if (!!(host_guest_bits & AMD64_EVENTSEL_GUESTONLY) == is_guest_mode(vcpu)) + return; + + pmc->eventsel_hw &= ~ARCH_PERFMON_EVENTSEL_ENABLE; +} + +static void amd_mediated_pmu_reprogram_counter(struct kvm_pmc *pmc) +{ + amd_mediated_pmu_handle_host_guest_bits(pmc); +} + struct kvm_pmu_ops amd_pmu_ops __initdata = { .rdpmc_ecx_to_pmc = amd_rdpmc_ecx_to_pmc, .msr_idx_to_pmc = amd_msr_idx_to_pmc, @@ -273,6 +301,7 @@ struct kvm_pmu_ops amd_pmu_ops __initdata = { .is_mediated_pmu_supported = amd_pmu_is_mediated_pmu_supported, .mediated_load = amd_mediated_pmu_load, .mediated_put = amd_mediated_pmu_put, + .mediated_reprogram_counter = amd_mediated_pmu_reprogram_counter, .EVENTSEL_EVENT = AMD64_EVENTSEL_EVENT, .MAX_NR_GP_COUNTERS = KVM_MAX_NR_AMD_GP_COUNTERS, -- 2.53.0.1018.g2bb0e51243-goog