Update kvm_vgic_create to create a vgic_v5 device. When creating a vgic, FEAT_GCIE in the ID_AA64PFR2 is only exposed to vgic_v5-based guests, and is hidden otherwise. GIC in ~ID_AA64PFR0_EL1 is never exposed for a vgic_v5 guest. When initialising a vgic_v5, skip kvm_vgic_dist_init as GICv5 doesn't support one. The current vgic_v5 implementation only supports PPIs, so no SPIs are initialised either. The current vgic_v5 support doesn't extend to nested guests. Therefore, the init of vgic_v5 for a nested guest is failed in vgic_v5_init. As the current vgic_v5 doesn't require any resources to be mapped, vgic_v5_map_resources is simply used to check that the vgic has indeed been initialised. Again, this will change as more GICv5 support is merged in. Signed-off-by: Sascha Bischoff --- arch/arm64/kvm/vgic/vgic-init.c | 51 ++++++++++++++++++++++----------- arch/arm64/kvm/vgic/vgic-v5.c | 26 +++++++++++++++++ arch/arm64/kvm/vgic/vgic.h | 2 ++ include/kvm/arm_vgic.h | 1 + 4 files changed, 63 insertions(+), 17 deletions(-) diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c index 51f4443cebcef..69e8746516799 100644 --- a/arch/arm64/kvm/vgic/vgic-init.c +++ b/arch/arm64/kvm/vgic/vgic-init.c @@ -66,12 +66,12 @@ static int vgic_allocate_private_irqs_locked(struct kvm_vcpu *vcpu, u32 type); * or through the generic KVM_CREATE_DEVICE API ioctl. * irqchip_in_kernel() tells you if this function succeeded or not. * @kvm: kvm struct pointer - * @type: KVM_DEV_TYPE_ARM_VGIC_V[23] + * @type: KVM_DEV_TYPE_ARM_VGIC_V[235] */ int kvm_vgic_create(struct kvm *kvm, u32 type) { struct kvm_vcpu *vcpu; - u64 aa64pfr0, pfr1; + u64 aa64pfr0, aa64pfr2, pfr1; unsigned long i; int ret; @@ -132,8 +132,11 @@ int kvm_vgic_create(struct kvm *kvm, u32 type) if (type == KVM_DEV_TYPE_ARM_VGIC_V2) kvm->max_vcpus = VGIC_V2_MAX_CPUS; - else + else if (type == KVM_DEV_TYPE_ARM_VGIC_V3) kvm->max_vcpus = VGIC_V3_MAX_CPUS; + else if (type == KVM_DEV_TYPE_ARM_VGIC_V5) + kvm->max_vcpus = min(VGIC_V5_MAX_CPUS, + kvm_vgic_global_state.max_gic_vcpus); if (atomic_read(&kvm->online_vcpus) > kvm->max_vcpus) { ret = -E2BIG; @@ -163,17 +166,21 @@ int kvm_vgic_create(struct kvm *kvm, u32 type) } aa64pfr0 = kvm_read_vm_id_reg(kvm, SYS_ID_AA64PFR0_EL1) & ~ID_AA64PFR0_EL1_GIC; + aa64pfr2 = kvm_read_vm_id_reg(kvm, SYS_ID_AA64PFR2_EL1) & ~ID_AA64PFR2_EL1_GCIE; pfr1 = kvm_read_vm_id_reg(kvm, SYS_ID_PFR1_EL1) & ~ID_PFR1_EL1_GIC; if (type == KVM_DEV_TYPE_ARM_VGIC_V2) { kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF; - } else { + } else if (type == KVM_DEV_TYPE_ARM_VGIC_V3) { INIT_LIST_HEAD(&kvm->arch.vgic.rd_regions); aa64pfr0 |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, GIC, IMP); pfr1 |= SYS_FIELD_PREP_ENUM(ID_PFR1_EL1, GIC, GICv3); + } else { + aa64pfr2 |= SYS_FIELD_PREP_ENUM(ID_AA64PFR2_EL1, GCIE, IMP); } kvm_set_vm_id_reg(kvm, SYS_ID_AA64PFR0_EL1, aa64pfr0); + kvm_set_vm_id_reg(kvm, SYS_ID_AA64PFR2_EL1, aa64pfr2); kvm_set_vm_id_reg(kvm, SYS_ID_PFR1_EL1, pfr1); if (type == KVM_DEV_TYPE_ARM_VGIC_V3) @@ -420,20 +427,26 @@ int vgic_init(struct kvm *kvm) if (kvm->created_vcpus != atomic_read(&kvm->online_vcpus)) return -EBUSY; - /* freeze the number of spis */ - if (!dist->nr_spis) - dist->nr_spis = VGIC_NR_IRQS_LEGACY - VGIC_NR_PRIVATE_IRQS; + if (!vgic_is_v5(kvm)) { + /* freeze the number of spis */ + if (!dist->nr_spis) + dist->nr_spis = VGIC_NR_IRQS_LEGACY - VGIC_NR_PRIVATE_IRQS; - ret = kvm_vgic_dist_init(kvm, dist->nr_spis); - if (ret) - goto out; + ret = kvm_vgic_dist_init(kvm, dist->nr_spis); + if (ret) + goto out; - /* - * Ensure vPEs are allocated if direct IRQ injection (e.g. vSGIs, - * vLPIs) is supported. - */ - if (vgic_supports_direct_irqs(kvm)) { - ret = vgic_v4_init(kvm); + /* + * Ensure vPEs are allocated if direct IRQ injection (e.g. vSGIs, + * vLPIs) is supported. + */ + if (vgic_supports_direct_irqs(kvm)) { + ret = vgic_v4_init(kvm); + if (ret) + goto out; + } + } else { + ret = vgic_v5_init(kvm); if (ret) goto out; } @@ -610,9 +623,13 @@ int kvm_vgic_map_resources(struct kvm *kvm) if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2) { ret = vgic_v2_map_resources(kvm); type = VGIC_V2; - } else { + } else if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) { ret = vgic_v3_map_resources(kvm); type = VGIC_V3; + } else { + ret = vgic_v5_map_resources(kvm); + type = VGIC_V5; + goto out; } if (ret) diff --git a/arch/arm64/kvm/vgic/vgic-v5.c b/arch/arm64/kvm/vgic/vgic-v5.c index 35740e88b3591..3567754ae1459 100644 --- a/arch/arm64/kvm/vgic/vgic-v5.c +++ b/arch/arm64/kvm/vgic/vgic-v5.c @@ -54,6 +54,32 @@ int vgic_v5_probe(const struct gic_kvm_info *info) return 0; } +int vgic_v5_init(struct kvm *kvm) +{ + struct kvm_vcpu *vcpu; + unsigned long idx; + + if (vgic_initialized(kvm)) + return 0; + + kvm_for_each_vcpu(idx, vcpu, kvm) { + if (vcpu_has_nv(vcpu)) { + kvm_err("Nested GICv5 VMs are currently unsupported\n"); + return -EINVAL; + } + } + + return 0; +} + +int vgic_v5_map_resources(struct kvm *kvm) +{ + if (!vgic_initialized(kvm)) + return -EBUSY; + + return 0; +} + static u32 vgic_v5_get_effective_priority_mask(struct kvm_vcpu *vcpu) { struct vgic_v5_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v5; diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h index 4b3a1e7ca3fb4..66698973b2872 100644 --- a/arch/arm64/kvm/vgic/vgic.h +++ b/arch/arm64/kvm/vgic/vgic.h @@ -385,6 +385,8 @@ void vgic_debug_init(struct kvm *kvm); void vgic_debug_destroy(struct kvm *kvm); int vgic_v5_probe(const struct gic_kvm_info *info); +int vgic_v5_init(struct kvm *kvm); +int vgic_v5_map_resources(struct kvm *kvm); void vgic_v5_set_ppi_ops(struct vgic_irq *irq); int vgic_v5_set_ppi_dvi(struct kvm_vcpu *vcpu, u32 irq, bool dvi); bool vgic_v5_has_pending_ppi(struct kvm_vcpu *vcpu); diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 5a46fe3c35b5c..099b8ac02999e 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -21,6 +21,7 @@ #include #include +#define VGIC_V5_MAX_CPUS 512 #define VGIC_V3_MAX_CPUS 512 #define VGIC_V2_MAX_CPUS 8 #define VGIC_NR_IRQS_LEGACY 256 -- 2.34.1