When VM is migrating, interrupts status are stored in software CSR estat register, also new injected interrupts are cached in vcpu::arch::irq_pending. With interrupt status acquire interface, there is expensive vcpu_load() and vcpu_put() function call to sync cached vcpu::arch::irq_pending. Here new internal API kvm_vcpu_sync_intr() is added to sync cached pending irq to software CSR estat register. Signed-off-by: Bibo Mao --- arch/loongarch/include/asm/kvm_vcpu.h | 1 + arch/loongarch/kvm/interrupt.c | 42 +++++++++++++++++++++++++++ arch/loongarch/kvm/vcpu.c | 15 ++++++---- 3 files changed, 52 insertions(+), 6 deletions(-) diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h index efe26b04b35f..e78bb2527329 100644 --- a/arch/loongarch/include/asm/kvm_vcpu.h +++ b/arch/loongarch/include/asm/kvm_vcpu.h @@ -64,6 +64,7 @@ int kvm_emu_idle(struct kvm_vcpu *vcpu); int kvm_pending_timer(struct kvm_vcpu *vcpu); int kvm_handle_fault(struct kvm_vcpu *vcpu, int fault); void kvm_deliver_intr(struct kvm_vcpu *vcpu); +void kvm_vcpu_sync_intr(struct kvm_vcpu *vcpu); void kvm_deliver_exception(struct kvm_vcpu *vcpu); void kvm_own_fpu(struct kvm_vcpu *vcpu); diff --git a/arch/loongarch/kvm/interrupt.c b/arch/loongarch/kvm/interrupt.c index 380aabb3d4d0..24925c238a65 100644 --- a/arch/loongarch/kvm/interrupt.c +++ b/arch/loongarch/kvm/interrupt.c @@ -71,6 +71,48 @@ void kvm_deliver_intr(struct kvm_vcpu *vcpu) } } +void kvm_vcpu_sync_intr(struct kvm_vcpu *vcpu) +{ + struct loongarch_csrs *csr = vcpu->arch.csr; + unsigned long mask, val; + + if (!csr) + return; + + mask = READ_ONCE(vcpu->arch.irq_clear); + if (mask) { + mask = xchg_relaxed(&vcpu->arch.irq_clear, 0); + + /* + * sync cached irq_clear to sw state + * + * When VM is migrated to other physical machines or + * snapshot is created, cached irq pending state should + * be synced + */ + val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT); + val &= ~(mask & KVM_ESTAT_IRQ_MASK); + kvm_write_sw_gcsr(csr, LOONGARCH_CSR_ESTAT, val); + + val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_GINTC); + val &= ~((mask >> 2) & KVM_GINTC_IRQ_MASK); + kvm_write_sw_gcsr(csr, LOONGARCH_CSR_GINTC, val); + } + + mask = READ_ONCE(vcpu->arch.irq_pending); + if (mask) { + mask = xchg_relaxed(&vcpu->arch.irq_pending, 0); + /* sync cached irq_pending to sw state */ + val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT); + val |= (mask & KVM_ESTAT_IRQ_MASK); + kvm_write_sw_gcsr(csr, LOONGARCH_CSR_ESTAT, val); + + val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_GINTC); + val |= (mask >> 2) & KVM_GINTC_IRQ_MASK; + kvm_write_sw_gcsr(csr, LOONGARCH_CSR_GINTC, val); + } +} + int kvm_pending_timer(struct kvm_vcpu *vcpu) { return test_bit(INT_TI, &vcpu->arch.irq_pending); diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 2f4fd6fa5b0e..5f94360e8a4b 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -610,14 +610,11 @@ static int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *val) if (id == LOONGARCH_CSR_ESTAT) { preempt_disable(); - vcpu_load(vcpu); /* * Sync pending interrupts into ESTAT so that interrupt * remains during VM migration stage */ - kvm_deliver_intr(vcpu); - vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST; - vcpu_put(vcpu); + kvm_vcpu_sync_intr(vcpu); preempt_enable(); /* ESTAT IP0~IP7 get from GINTC */ @@ -1649,6 +1646,14 @@ static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) /* Restore timer state regardless */ kvm_restore_timer(vcpu); + + /* + * Restore Root.GINTC from unused Guest.GINTC register + * + * SW state about LOONGARCH_CSR_GINTC is updated with get_csr() + * ioctl command only. Update HW state from changed SW state. + */ + write_csr_gintc(csr->csrs[LOONGARCH_CSR_GINTC]); kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); /* Don't bother restoring registers multiple times unless necessary */ @@ -1711,8 +1716,6 @@ static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ISR3); } - /* Restore Root.GINTC from unused Guest.GINTC register */ - write_csr_gintc(csr->csrs[LOONGARCH_CSR_GINTC]); write_csr_gstat(csr->csrs[LOONGARCH_CSR_GSTAT]); /* -- 2.39.3