From: David Woodhouse Now that the hardirq path (xen_timer_callback and set_evtchn_fast) uses read_trylock() instead of read_lock_irqsave(), the remaining GPC lock users in xen.c are only called from process context (vcpu_run, ioctls). There is no need to disable interrupts to prevent concurrent access from a hardirq user, since the hardirq path no longer takes the lock. Convert read_lock_irqsave()/read_unlock_irqrestore() to plain read_lock()/read_unlock() in: - kvm_xen_update_runstate_guest() - kvm_xen_shared_info_init() - xen_get_guest_pvclock() - kvm_xen_inject_pending_events() - __kvm_xen_has_interrupt() - wait_pending_event() Signed-off-by: David Woodhouse --- arch/x86/kvm/xen.c | 60 +++++++++++++++++++--------------------------- 1 file changed, 25 insertions(+), 35 deletions(-) diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index 9bdb8e3cad58..b1fae42bf295 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -45,15 +45,15 @@ static int kvm_xen_shared_info_init(struct kvm *kvm) int ret = 0; int idx = srcu_read_lock(&kvm->srcu); - read_lock_irq(&gpc->lock); + read_lock(&gpc->lock); while (!kvm_gpc_check(gpc, PAGE_SIZE)) { - read_unlock_irq(&gpc->lock); + read_unlock(&gpc->lock); ret = kvm_gpc_refresh(gpc, PAGE_SIZE); if (ret) goto out; - read_lock_irq(&gpc->lock); + read_lock(&gpc->lock); } /* @@ -96,7 +96,7 @@ static int kvm_xen_shared_info_init(struct kvm *kvm) smp_wmb(); wc->version = wc_version + 1; - read_unlock_irq(&gpc->lock); + read_unlock(&gpc->lock); kvm_make_all_cpus_request(kvm, KVM_REQ_MASTERCLOCK_UPDATE); @@ -155,22 +155,21 @@ static int xen_get_guest_pvclock(struct kvm_vcpu *vcpu, struct gfn_to_pfn_cache *gpc, unsigned int offset) { - unsigned long flags; int r; - read_lock_irqsave(&gpc->lock, flags); + read_lock(&gpc->lock); while (!kvm_gpc_check(gpc, offset + sizeof(*hv_clock))) { - read_unlock_irqrestore(&gpc->lock, flags); + read_unlock(&gpc->lock); r = kvm_gpc_refresh(gpc, offset + sizeof(*hv_clock)); if (r) return r; - read_lock_irqsave(&gpc->lock, flags); + read_lock(&gpc->lock); } memcpy(hv_clock, gpc->khva + offset, sizeof(*hv_clock)); - read_unlock_irqrestore(&gpc->lock, flags); + read_unlock(&gpc->lock); /* * Sanity check TSC shift+multiplier to verify the guest's view of time @@ -325,7 +324,6 @@ static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic) struct gfn_to_pfn_cache *gpc2 = &vx->runstate2_cache; size_t user_len, user_len1, user_len2; struct vcpu_runstate_info rs; - unsigned long flags; size_t times_ofs; uint8_t *update_bit = NULL; uint64_t entry_time; @@ -421,16 +419,14 @@ static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic) * gfn_to_pfn caches that cover the region. */ if (atomic) { - local_irq_save(flags); if (!read_trylock(&gpc1->lock)) { - local_irq_restore(flags); return; } } else { - read_lock_irqsave(&gpc1->lock, flags); + read_lock(&gpc1->lock); } while (!kvm_gpc_check(gpc1, user_len1)) { - read_unlock_irqrestore(&gpc1->lock, flags); + read_unlock(&gpc1->lock); /* When invoked from kvm_sched_out() we cannot sleep */ if (atomic) @@ -439,7 +435,7 @@ static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic) if (kvm_gpc_refresh(gpc1, user_len1)) return; - read_lock_irqsave(&gpc1->lock, flags); + read_lock(&gpc1->lock); } if (likely(!user_len2)) { @@ -467,7 +463,7 @@ static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic) lock_set_subclass(&gpc1->lock.dep_map, 1, _THIS_IP_); if (atomic) { if (!read_trylock(&gpc2->lock)) { - read_unlock_irqrestore(&gpc1->lock, flags); + read_unlock(&gpc1->lock); return; } } else { @@ -476,7 +472,7 @@ static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic) if (!kvm_gpc_check(gpc2, user_len2)) { read_unlock(&gpc2->lock); - read_unlock_irqrestore(&gpc1->lock, flags); + read_unlock(&gpc1->lock); /* When invoked from kvm_sched_out() we cannot sleep */ if (atomic) @@ -581,7 +577,7 @@ static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic) } kvm_gpc_mark_dirty_in_slot(gpc1); - read_unlock_irqrestore(&gpc1->lock, flags); + read_unlock(&gpc1->lock); } void kvm_xen_update_runstate(struct kvm_vcpu *v, int state) @@ -640,7 +636,6 @@ void kvm_xen_inject_pending_events(struct kvm_vcpu *v) { unsigned long evtchn_pending_sel = READ_ONCE(v->arch.xen.evtchn_pending_sel); struct gfn_to_pfn_cache *gpc = &v->arch.xen.vcpu_info_cache; - unsigned long flags; if (!evtchn_pending_sel) return; @@ -650,14 +645,14 @@ void kvm_xen_inject_pending_events(struct kvm_vcpu *v) * does anyway. Page it in and retry the instruction. We're just a * little more honest about it. */ - read_lock_irqsave(&gpc->lock, flags); + read_lock(&gpc->lock); while (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) { - read_unlock_irqrestore(&gpc->lock, flags); + read_unlock(&gpc->lock); if (kvm_gpc_refresh(gpc, sizeof(struct vcpu_info))) return; - read_lock_irqsave(&gpc->lock, flags); + read_lock(&gpc->lock); } /* Now gpc->khva is a valid kernel address for the vcpu_info */ @@ -687,7 +682,7 @@ void kvm_xen_inject_pending_events(struct kvm_vcpu *v) } kvm_gpc_mark_dirty_in_slot(gpc); - read_unlock_irqrestore(&gpc->lock, flags); + read_unlock(&gpc->lock); /* For the per-vCPU lapic vector, deliver it as MSI. */ if (v->arch.xen.upcall_vector) @@ -698,7 +693,6 @@ int __kvm_xen_has_interrupt(struct kvm_vcpu *v) { struct gfn_to_pfn_cache *gpc = &v->arch.xen.vcpu_info_cache; bool atomic = in_atomic() || !task_is_running(current); - unsigned long flags; u8 rc = 0; /* @@ -715,16 +709,13 @@ int __kvm_xen_has_interrupt(struct kvm_vcpu *v) sizeof_field(struct compat_vcpu_info, evtchn_upcall_pending)); if (atomic) { - local_irq_save(flags); - if (!read_trylock(&gpc->lock)) { - local_irq_restore(flags); + if (!read_trylock(&gpc->lock)) return 1; - } } else { - read_lock_irqsave(&gpc->lock, flags); + read_lock(&gpc->lock); } while (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) { - read_unlock_irqrestore(&gpc->lock, flags); + read_unlock(&gpc->lock); /* * This function gets called from kvm_vcpu_block() after setting the @@ -744,11 +735,11 @@ int __kvm_xen_has_interrupt(struct kvm_vcpu *v) */ return 0; } - read_lock_irqsave(&gpc->lock, flags); + read_lock(&gpc->lock); } rc = ((struct vcpu_info *)gpc->khva)->evtchn_upcall_pending; - read_unlock_irqrestore(&gpc->lock, flags); + read_unlock(&gpc->lock); return rc; } @@ -1445,12 +1436,11 @@ static bool wait_pending_event(struct kvm_vcpu *vcpu, int nr_ports, struct kvm *kvm = vcpu->kvm; struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache; unsigned long *pending_bits; - unsigned long flags; bool ret = true; int idx, i; idx = srcu_read_lock(&kvm->srcu); - read_lock_irqsave(&gpc->lock, flags); + read_lock(&gpc->lock); if (!kvm_gpc_check(gpc, PAGE_SIZE)) goto out_rcu; @@ -1471,7 +1461,7 @@ static bool wait_pending_event(struct kvm_vcpu *vcpu, int nr_ports, } out_rcu: - read_unlock_irqrestore(&gpc->lock, flags); + read_unlock(&gpc->lock); srcu_read_unlock(&kvm->srcu, idx); return ret; -- 2.51.0