In order to gain the best performance benefit from partitioning the PMU, utilize fine grain traps (FEAT_FGT and FEAT_FGT2) to avoid trapping common PMU register accesses by the guest to remove that overhead. Untrapped: * PMCR_EL0 * PMUSERENR_EL0 * PMSELR_EL0 * PMCCNTR_EL0 * PMCNTEN_EL0 * PMINTEN_EL1 * PMEVCNTRn_EL0 These are safe to untrap because writing MDCR_EL2.HPMN as this series will do limits the effect of writes to any of these registers to the partition of counters 0..HPMN-1. Reads from these registers will not leak information from between guests as all these registers are context swapped by a later patch in this series. Reads from these registers also do not leak any information about the host's hardware beyond what is promised by PMUv3. Trapped: * PMOVS_EL0 * PMEVTYPERn_EL0 * PMCCFILTR_EL0 * PMICNTR_EL0 * PMICFILTR_EL0 * PMCEIDn_EL0 * PMMIR_EL1 PMOVS remains trapped so KVM can track overflow IRQs that will need to be injected into the guest. PMICNTR and PMIFILTR remain trapped because KVM is not handling them yet. PMEVTYPERn remains trapped so KVM can limit which events guests can count, such as disallowing counting at EL2. PMCCFILTR and PMCIFILTR are special cases of the same. PMCEIDn and PMMIR remain trapped because they can leak information specific to the host hardware implementation. NOTE: This patch temporarily forces kvm_vcpu_pmu_is_partitioned() to be false to prevent partial feature activation for easier debugging. Signed-off-by: Colton Lewis --- arch/arm64/include/asm/kvm_pmu.h | 33 ++++++++++++++++++++++ arch/arm64/kvm/config.c | 34 ++++++++++++++++++++-- arch/arm64/kvm/pmu-direct.c | 48 ++++++++++++++++++++++++++++++++ 3 files changed, 112 insertions(+), 3 deletions(-) diff --git a/arch/arm64/include/asm/kvm_pmu.h b/arch/arm64/include/asm/kvm_pmu.h index 8887f39c25e60..7297a697a4a62 100644 --- a/arch/arm64/include/asm/kvm_pmu.h +++ b/arch/arm64/include/asm/kvm_pmu.h @@ -96,6 +96,23 @@ u64 kvm_pmu_guest_counter_mask(struct arm_pmu *pmu); void kvm_pmu_host_counters_enable(void); void kvm_pmu_host_counters_disable(void); +#if !defined(__KVM_NVHE_HYPERVISOR__) +bool kvm_vcpu_pmu_is_partitioned(struct kvm_vcpu *vcpu); +bool kvm_vcpu_pmu_use_fgt(struct kvm_vcpu *vcpu); +#else +static inline bool kvm_vcpu_pmu_is_partitioned(struct kvm_vcpu *vcpu) +{ + return false; +} + +static inline bool kvm_vcpu_pmu_use_fgt(struct kvm_vcpu *vcpu) +{ + return false; +} +#endif +u64 kvm_pmu_fgt_bits(void); +u64 kvm_pmu_fgt2_bits(void); + /* * Updates the vcpu's view of the pmu events for this cpu. * Must be called before every vcpu run after disabling interrupts, to ensure @@ -135,6 +152,22 @@ static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, { return 0; } +static inline bool kvm_vcpu_pmu_is_partitioned(struct kvm_vcpu *vcpu) +{ + return false; +} +static inline bool kvm_vcpu_pmu_use_fgt(struct kvm_vcpu *vcpu) +{ + return false; +} +static inline u64 kvm_pmu_fgt_bits(void) +{ + return 0; +} +static inline u64 kvm_pmu_fgt2_bits(void) +{ + return 0; +} static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val) {} static inline void kvm_pmu_set_counter_value_user(struct kvm_vcpu *vcpu, diff --git a/arch/arm64/kvm/config.c b/arch/arm64/kvm/config.c index 24bb3f36e9d59..064dc6aa06f76 100644 --- a/arch/arm64/kvm/config.c +++ b/arch/arm64/kvm/config.c @@ -6,6 +6,7 @@ #include #include +#include #include #include @@ -1489,12 +1490,39 @@ static void __compute_hfgwtr(struct kvm_vcpu *vcpu) *vcpu_fgt(vcpu, HFGWTR_EL2) |= HFGWTR_EL2_TCR_EL1; } +static void __compute_hdfgrtr(struct kvm_vcpu *vcpu) +{ + __compute_fgt(vcpu, HDFGRTR_EL2); + + if (kvm_vcpu_pmu_use_fgt(vcpu)) + *vcpu_fgt(vcpu, HDFGRTR_EL2) |= kvm_pmu_fgt_bits(); +} + static void __compute_hdfgwtr(struct kvm_vcpu *vcpu) { __compute_fgt(vcpu, HDFGWTR_EL2); if (is_hyp_ctxt(vcpu)) *vcpu_fgt(vcpu, HDFGWTR_EL2) |= HDFGWTR_EL2_MDSCR_EL1; + + if (kvm_vcpu_pmu_use_fgt(vcpu)) + *vcpu_fgt(vcpu, HDFGWTR_EL2) |= kvm_pmu_fgt_bits(); +} + +static void __compute_hdfgrtr2(struct kvm_vcpu *vcpu) +{ + __compute_fgt(vcpu, HDFGRTR2_EL2); + + if (kvm_vcpu_pmu_use_fgt(vcpu)) + *vcpu_fgt(vcpu, HDFGRTR2_EL2) |= kvm_pmu_fgt2_bits(); +} + +static void __compute_hdfgwtr2(struct kvm_vcpu *vcpu) +{ + __compute_fgt(vcpu, HDFGWTR2_EL2); + + if (kvm_vcpu_pmu_use_fgt(vcpu)) + *vcpu_fgt(vcpu, HDFGWTR2_EL2) |= kvm_pmu_fgt2_bits(); } void kvm_vcpu_load_fgt(struct kvm_vcpu *vcpu) @@ -1505,7 +1533,7 @@ void kvm_vcpu_load_fgt(struct kvm_vcpu *vcpu) __compute_fgt(vcpu, HFGRTR_EL2); __compute_hfgwtr(vcpu); __compute_fgt(vcpu, HFGITR_EL2); - __compute_fgt(vcpu, HDFGRTR_EL2); + __compute_hdfgrtr(vcpu); __compute_hdfgwtr(vcpu); __compute_fgt(vcpu, HAFGRTR_EL2); @@ -1515,6 +1543,6 @@ void kvm_vcpu_load_fgt(struct kvm_vcpu *vcpu) __compute_fgt(vcpu, HFGRTR2_EL2); __compute_fgt(vcpu, HFGWTR2_EL2); __compute_fgt(vcpu, HFGITR2_EL2); - __compute_fgt(vcpu, HDFGRTR2_EL2); - __compute_fgt(vcpu, HDFGWTR2_EL2); + __compute_hdfgrtr2(vcpu); + __compute_hdfgwtr2(vcpu); } diff --git a/arch/arm64/kvm/pmu-direct.c b/arch/arm64/kvm/pmu-direct.c index d5de7fdd059f4..4dd160c878862 100644 --- a/arch/arm64/kvm/pmu-direct.c +++ b/arch/arm64/kvm/pmu-direct.c @@ -43,6 +43,54 @@ bool kvm_pmu_is_partitioned(struct arm_pmu *pmu) pmu->hpmn_max <= *host_data_ptr(nr_event_counters); } +/** + * kvm_vcpu_pmu_is_partitioned() - Determine if given VCPU has a partitioned PMU + * @vcpu: Pointer to kvm_vcpu struct + * + * Determine if given VCPU has a partitioned PMU by extracting that + * field and passing it to :c:func:`kvm_pmu_is_partitioned` + * + * Return: True if the VCPU PMU is partitioned, false otherwise + */ +bool kvm_vcpu_pmu_is_partitioned(struct kvm_vcpu *vcpu) +{ + return kvm_pmu_is_partitioned(vcpu->kvm->arch.arm_pmu) && + false; +} + +/** + * kvm_vcpu_pmu_use_fgt() - Determine if we can use FGT + * @vcpu: Pointer to struct kvm_vcpu + * + * Determine if we can use FGT for direct access to registers. We can + * if capabilities permit the number of guest counters requested. + * + * Return: True if we can use FGT, false otherwise + */ +bool kvm_vcpu_pmu_use_fgt(struct kvm_vcpu *vcpu) +{ + u8 hpmn = vcpu->kvm->arch.nr_pmu_counters; + + return kvm_vcpu_pmu_is_partitioned(vcpu) && + cpus_have_final_cap(ARM64_HAS_FGT) && + (hpmn != 0 || cpus_have_final_cap(ARM64_HAS_HPMN0)); +} + +u64 kvm_pmu_fgt_bits(void) +{ + return HDFGRTR_EL2_PMOVS + | HDFGRTR_EL2_PMCCFILTR_EL0 + | HDFGRTR_EL2_PMEVTYPERn_EL0 + | HDFGRTR_EL2_PMCEIDn_EL0 + | HDFGRTR_EL2_PMMIR_EL1; +} + +u64 kvm_pmu_fgt2_bits(void) +{ + return HDFGRTR2_EL2_nPMICFILTR_EL0 + | HDFGRTR2_EL2_nPMICNTR_EL0; +} + /** * kvm_pmu_host_counter_mask() - Compute bitmask of host-reserved counters * @pmu: Pointer to arm_pmu struct -- 2.52.0.239.gd5f0c6e74e-goog