Currently the number of list registers available is stored in a global (kvm_vgic_global_state.nr_lr). With Arm CCA the RMM is permitted to reserve list registers for its own use and so the number of available list registers can be fewer for a realm VM. Provide wrapper functions to fetch the global in preparation for restricting nr_lr when dealing with a realm VM. Signed-off-by: Steven Price --- Changes in v12: * Upstream changes mean that vcpu isn't available everywhere we need it, so update helpers to take vcpu. * Note that the VGIC handling will be reworked for the RMM 2.0 spec. New patch for v6 --- arch/arm64/kvm/vgic/vgic-v2.c | 6 +++--- arch/arm64/kvm/vgic/vgic-v3.c | 8 ++++---- arch/arm64/kvm/vgic/vgic.c | 6 +++--- arch/arm64/kvm/vgic/vgic.h | 18 ++++++++++++------ 4 files changed, 22 insertions(+), 16 deletions(-) diff --git a/arch/arm64/kvm/vgic/vgic-v2.c b/arch/arm64/kvm/vgic/vgic-v2.c index 585491fbda80..990bf693f65d 100644 --- a/arch/arm64/kvm/vgic/vgic-v2.c +++ b/arch/arm64/kvm/vgic/vgic-v2.c @@ -34,11 +34,11 @@ void vgic_v2_configure_hcr(struct kvm_vcpu *vcpu, cpuif->vgic_hcr = GICH_HCR_EN; - if (irqs_pending_outside_lrs(als)) + if (irqs_pending_outside_lrs(als, vcpu)) cpuif->vgic_hcr |= GICH_HCR_NPIE; - if (irqs_active_outside_lrs(als)) + if (irqs_active_outside_lrs(als, vcpu)) cpuif->vgic_hcr |= GICH_HCR_LRENPIE; - if (irqs_outside_lrs(als)) + if (irqs_outside_lrs(als, vcpu)) cpuif->vgic_hcr |= GICH_HCR_UIE; cpuif->vgic_hcr |= (cpuif->vgic_vmcr & GICH_VMCR_ENABLE_GRP0_MASK) ? diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c index 1d6dd1b545bd..c9ff4f90c975 100644 --- a/arch/arm64/kvm/vgic/vgic-v3.c +++ b/arch/arm64/kvm/vgic/vgic-v3.c @@ -31,11 +31,11 @@ void vgic_v3_configure_hcr(struct kvm_vcpu *vcpu, cpuif->vgic_hcr = ICH_HCR_EL2_En; - if (irqs_pending_outside_lrs(als)) + if (irqs_pending_outside_lrs(als, vcpu)) cpuif->vgic_hcr |= ICH_HCR_EL2_NPIE; - if (irqs_active_outside_lrs(als)) + if (irqs_active_outside_lrs(als, vcpu)) cpuif->vgic_hcr |= ICH_HCR_EL2_LRENPIE; - if (irqs_outside_lrs(als)) + if (irqs_outside_lrs(als, vcpu)) cpuif->vgic_hcr |= ICH_HCR_EL2_UIE; if (!als->nr_sgi) @@ -60,7 +60,7 @@ void vgic_v3_configure_hcr(struct kvm_vcpu *vcpu, * can change behind our back without any warning... */ if (!cpus_have_final_cap(ARM64_HAS_ICH_HCR_EL2_TDIR) || - irqs_active_outside_lrs(als) || + irqs_active_outside_lrs(als, vcpu) || atomic_read(&vcpu->kvm->arch.vgic.active_spis)) cpuif->vgic_hcr |= ICH_HCR_EL2_TDIR; } diff --git a/arch/arm64/kvm/vgic/vgic.c b/arch/arm64/kvm/vgic/vgic.c index 430aa98888fd..2fdcef3d28d1 100644 --- a/arch/arm64/kvm/vgic/vgic.c +++ b/arch/arm64/kvm/vgic/vgic.c @@ -957,7 +957,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu) summarize_ap_list(vcpu, &als); - if (irqs_outside_lrs(&als)) + if (irqs_outside_lrs(&als, vcpu)) vgic_sort_ap_list(vcpu); list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { @@ -967,12 +967,12 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu) } } - if (count == kvm_vgic_global_state.nr_lr) + if (count == kvm_vcpu_vgic_nr_lr(vcpu)) break; } /* Nuke remaining LRs */ - for (int i = count ; i < kvm_vgic_global_state.nr_lr; i++) + for (int i = count ; i < kvm_vcpu_vgic_nr_lr(vcpu); i++) vgic_clear_lr(vcpu, i); if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) { diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h index 5f0fc96b4dc2..55a1142efc6f 100644 --- a/arch/arm64/kvm/vgic/vgic.h +++ b/arch/arm64/kvm/vgic/vgic.h @@ -7,6 +7,7 @@ #include #include +#include #define PRODUCT_ID_KVM 0x4b /* ASCII code K */ #define IMPLEMENTER_ARM 0x43b @@ -242,14 +243,19 @@ struct ap_list_summary { unsigned int nr_sgi; /* any SGI */ }; -#define irqs_outside_lrs(s) \ - (((s)->nr_pend + (s)->nr_act) > kvm_vgic_global_state.nr_lr) +static inline int kvm_vcpu_vgic_nr_lr(struct kvm_vcpu *vcpu) +{ + return kvm_vgic_global_state.nr_lr; +} + +#define irqs_outside_lrs(s, vcpu) \ + (((s)->nr_pend + (s)->nr_act) > kvm_vcpu_vgic_nr_lr(vcpu)) -#define irqs_pending_outside_lrs(s) \ - ((s)->nr_pend > kvm_vgic_global_state.nr_lr) +#define irqs_pending_outside_lrs(s, vcpu) \ + ((s)->nr_pend > kvm_vcpu_vgic_nr_lr(vcpu)) -#define irqs_active_outside_lrs(s) \ - ((s)->nr_act && irqs_outside_lrs(s)) +#define irqs_active_outside_lrs(s, vcpu) \ + ((s)->nr_act && irqs_outside_lrs(s, vcpu)) int vgic_v3_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr, struct vgic_reg_attr *reg_attr); -- 2.43.0