Set the guest's view of the GCIE field to IMP when running a GICv5 VM, NI otherwise. Reject any writes to the register that try to do anything but set GCIE to IMP when running a GICv5 VM. As part of this change, we also introduce vgic_is_v5(kvm), in order to check if the guest is a GICv5-native VM. We're also required to extend vgic_is_v3_compat to check for the actual vgic_model. This has one potential issue - if any of the vgic_is_v* checks are used prior to setting the vgic_model (that is, before kvm_vgic_create) then vgic_model will be set to 0, which can result in a false-positive. Co-authored-by: Timothy Hayes Signed-off-by: Timothy Hayes Signed-off-by: Sascha Bischoff --- arch/arm64/kvm/sys_regs.c | 39 ++++++++++++++++++++++++++++++-------- arch/arm64/kvm/vgic/vgic.h | 15 ++++++++++++++- 2 files changed, 45 insertions(+), 9 deletions(-) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index c8fd7c6a12a13..a065f8939bc8f 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1758,6 +1758,7 @@ static u8 pmuver_to_perfmon(u8 pmuver) static u64 sanitise_id_aa64pfr0_el1(const struct kvm_vcpu *vcpu, u64 val); static u64 sanitise_id_aa64pfr1_el1(const struct kvm_vcpu *vcpu, u64 val); +static u64 sanitise_id_aa64pfr2_el1(const struct kvm_vcpu *vcpu, u64 val); static u64 sanitise_id_aa64dfr0_el1(const struct kvm_vcpu *vcpu, u64 val); /* Read a sanitised cpufeature ID register by sys_reg_desc */ @@ -1783,10 +1784,7 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu, val = sanitise_id_aa64pfr1_el1(vcpu, val); break; case SYS_ID_AA64PFR2_EL1: - val &= ID_AA64PFR2_EL1_FPMR | - (kvm_has_mte(vcpu->kvm) ? - ID_AA64PFR2_EL1_MTEFAR | ID_AA64PFR2_EL1_MTESTOREONLY : - 0); + val = sanitise_id_aa64pfr2_el1(vcpu, val); break; case SYS_ID_AA64ISAR1_EL1: if (!vcpu_has_ptrauth(vcpu)) @@ -2024,6 +2022,20 @@ static u64 sanitise_id_aa64pfr1_el1(const struct kvm_vcpu *vcpu, u64 val) return val; } +static u64 sanitise_id_aa64pfr2_el1(const struct kvm_vcpu *vcpu, u64 val) +{ + val &= ID_AA64PFR2_EL1_FPMR | + (kvm_has_mte(vcpu->kvm) ? + ID_AA64PFR2_EL1_MTEFAR | ID_AA64PFR2_EL1_MTESTOREONLY : 0); + + if (vgic_is_v5(vcpu->kvm)) { + val &= ~ID_AA64PFR2_EL1_GCIE_MASK; + val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR2_EL1, GCIE, IMP); + } + + return val; +} + static u64 sanitise_id_aa64dfr0_el1(const struct kvm_vcpu *vcpu, u64 val) { val = ID_REG_LIMIT_FIELD_ENUM(val, ID_AA64DFR0_EL1, DebugVer, V8P8); @@ -2221,6 +2233,16 @@ static int set_id_aa64pfr1_el1(struct kvm_vcpu *vcpu, return set_id_reg(vcpu, rd, user_val); } +static int set_id_aa64pfr2_el1(struct kvm_vcpu *vcpu, + const struct sys_reg_desc *rd, u64 user_val) +{ + if (vgic_is_v5(vcpu->kvm) && + FIELD_GET(ID_AA64PFR2_EL1_GCIE_MASK, user_val) != ID_AA64PFR2_EL1_GCIE_IMP) + return -EINVAL; + + return set_id_reg(vcpu, rd, user_val); +} + /* * Allow userspace to de-feature a stage-2 translation granule but prevent it * from claiming the impossible. @@ -3202,10 +3224,11 @@ static const struct sys_reg_desc sys_reg_descs[] = { ID_AA64PFR1_EL1_RES0 | ID_AA64PFR1_EL1_MPAM_frac | ID_AA64PFR1_EL1_MTE)), - ID_WRITABLE(ID_AA64PFR2_EL1, - ID_AA64PFR2_EL1_FPMR | - ID_AA64PFR2_EL1_MTEFAR | - ID_AA64PFR2_EL1_MTESTOREONLY), + ID_FILTERED(ID_AA64PFR2_EL1, id_aa64pfr2_el1, + ~(ID_AA64PFR2_EL1_FPMR | + ID_AA64PFR2_EL1_MTEFAR | + ID_AA64PFR2_EL1_MTESTOREONLY | + ID_AA64PFR2_EL1_GCIE)), ID_UNALLOCATED(4,3), ID_WRITABLE(ID_AA64ZFR0_EL1, ~ID_AA64ZFR0_EL1_RES0), ID_HIDDEN(ID_AA64SMFR0_EL1), diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h index 5f0fc96b4dc29..bf5bae023751b 100644 --- a/arch/arm64/kvm/vgic/vgic.h +++ b/arch/arm64/kvm/vgic/vgic.h @@ -455,8 +455,16 @@ void vgic_v3_nested_update_mi(struct kvm_vcpu *vcpu); static inline bool vgic_is_v3_compat(struct kvm *kvm) { + /* + * We need to be careful here. This could be called early, + * which means that there is no vgic_model set. For the time + * being, fall back to assuming that we're trying run a legacy + * VM in that case, which keeps existing software happy. Long + * term, this will need to be revisited a little. + */ return cpus_have_final_cap(ARM64_HAS_GICV5_CPUIF) && - kvm_vgic_global_state.has_gcie_v3_compat; + kvm_vgic_global_state.has_gcie_v3_compat && + kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V5; } static inline bool vgic_is_v3(struct kvm *kvm) @@ -464,6 +472,11 @@ static inline bool vgic_is_v3(struct kvm *kvm) return kvm_vgic_global_state.type == VGIC_V3 || vgic_is_v3_compat(kvm); } +static inline bool vgic_is_v5(struct kvm *kvm) +{ + return kvm_vgic_global_state.type == VGIC_V5 && !vgic_is_v3_compat(kvm); +} + int vgic_its_debug_init(struct kvm_device *dev); void vgic_its_debug_destroy(struct kvm_device *dev); -- 2.34.1