We only want to expose a subset of the PPIs to a guest. If a PPI does not have an owner, it is not being actively driven by a device. The SW_PPI is a special case, as it is likely for userspace to wish to inject that. Therefore, just prior to running the guest for the first time, we need to finalize the PPIs. A mask is generated which, when combined with trapping a guest's PPI accesses, allows for the guest's view of the PPI to be filtered. Signed-off-by: Sascha Bischoff --- arch/arm64/kvm/arm.c | 4 +++ arch/arm64/kvm/vgic/vgic-v5.c | 60 +++++++++++++++++++++++++++++++++++ include/kvm/arm_vgic.h | 9 ++++++ 3 files changed, 73 insertions(+) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index b7cf9d86aabb7..94f8d13ab3b58 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -888,6 +888,10 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) return ret; } + ret = vgic_v5_finalize_ppi_state(kvm); + if (ret) + return ret; + if (is_protected_kvm_enabled()) { ret = pkvm_create_hyp_vm(kvm); if (ret) diff --git a/arch/arm64/kvm/vgic/vgic-v5.c b/arch/arm64/kvm/vgic/vgic-v5.c index c7ecc4f40b1e5..f1fa63e67c1f6 100644 --- a/arch/arm64/kvm/vgic/vgic-v5.c +++ b/arch/arm64/kvm/vgic/vgic-v5.c @@ -81,6 +81,66 @@ static u32 vgic_v5_get_effective_priority_mask(struct kvm_vcpu *vcpu) return priority_mask; } +static int vgic_v5_finalize_state(struct kvm_vcpu *vcpu) +{ + if (!ppi_caps) + return -ENXIO; + + vcpu->arch.vgic_cpu.vgic_v5.vgic_ppi_mask[0] = 0; + vcpu->arch.vgic_cpu.vgic_v5.vgic_ppi_mask[1] = 0; + vcpu->arch.vgic_cpu.vgic_v5.vgic_ppi_hmr[0] = 0; + vcpu->arch.vgic_cpu.vgic_v5.vgic_ppi_hmr[1] = 0; + for (int i = 0; i < VGIC_V5_NR_PRIVATE_IRQS; ++i) { + int reg = i / 64; + u64 bit = BIT_ULL(i % 64); + struct vgic_irq *irq = &vcpu->arch.vgic_cpu.private_irqs[i]; + + raw_spin_lock(&irq->irq_lock); + + /* + * We only expose PPIs with an owner or thw SW_PPI to + * the guest. + */ + if (!irq->owner && irq->intid == GICV5_SW_PPI) + goto unlock; + + /* + * If the PPI isn't implemented, we can't pass it + * through to a guest anyhow. + */ + if (!(ppi_caps->impl_ppi_mask[reg] & bit)) + goto unlock; + + vcpu->arch.vgic_cpu.vgic_v5.vgic_ppi_mask[reg] |= bit; + + if (irq->config == VGIC_CONFIG_LEVEL) + vcpu->arch.vgic_cpu.vgic_v5.vgic_ppi_hmr[reg] |= bit; + +unlock: + raw_spin_unlock(&irq->irq_lock); + } + + return 0; +} + +int vgic_v5_finalize_ppi_state(struct kvm *kvm) +{ + struct kvm_vcpu *vcpu; + unsigned long c; + int ret; + + if (!vgic_is_v5(kvm)) + return 0; + + kvm_for_each_vcpu(c, vcpu, kvm) { + ret = vgic_v5_finalize_state(vcpu); + if (ret) + return ret; + } + + return 0; +} + static bool vgic_v5_ppi_set_pending_state(struct kvm_vcpu *vcpu, struct vgic_irq *irq) { diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index b5180edbd1165..dc7bac0226b3c 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -455,6 +455,13 @@ struct vgic_v5_cpu_if { u64 vgic_ich_ppi_enabler_exit[2]; u64 vgic_ppi_pendr_exit[2]; + /* + * We only expose a subset of PPIs to the guest. This subset + * is a combination of the PPIs that are actually implemented + * and what we actually choose to expose. + */ + u64 vgic_ppi_mask[2]; + /* * The ICSR is re-used across host and guest, and hence it needs to be * saved/restored. Only one copy is required as the host should block @@ -592,6 +599,8 @@ int vgic_v4_load(struct kvm_vcpu *vcpu); void vgic_v4_commit(struct kvm_vcpu *vcpu); int vgic_v4_put(struct kvm_vcpu *vcpu); +int vgic_v5_finalize_ppi_state(struct kvm *kvm); + bool vgic_state_is_nested(struct kvm_vcpu *vcpu); /* CPU HP callbacks */ -- 2.34.1