Feature KVM_FEATURE_PREEMPT_HINT is added to show whether vCPU is preempted or not. It is to help guest OS scheduling or lock checking etc. Here add KVM_FEATURE_PREEMPT_HINT feature and use one byte as preempted flag in steal time structure. Signed-off-by: Bibo Mao --- arch/loongarch/include/asm/kvm_host.h | 2 + arch/loongarch/include/asm/kvm_para.h | 5 +- arch/loongarch/include/uapi/asm/kvm.h | 1 + arch/loongarch/include/uapi/asm/kvm_para.h | 1 + arch/loongarch/kvm/vcpu.c | 54 +++++++++++++++++++++- arch/loongarch/kvm/vm.c | 5 +- 6 files changed, 65 insertions(+), 3 deletions(-) diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h index 0cecbd038bb3..04c6dd171877 100644 --- a/arch/loongarch/include/asm/kvm_host.h +++ b/arch/loongarch/include/asm/kvm_host.h @@ -163,6 +163,7 @@ enum emulation_result { #define LOONGARCH_PV_FEAT_UPDATED BIT_ULL(63) #define LOONGARCH_PV_FEAT_MASK (BIT(KVM_FEATURE_IPI) | \ BIT(KVM_FEATURE_STEAL_TIME) | \ + BIT(KVM_FEATURE_PREEMPT_HINT) |\ BIT(KVM_FEATURE_USER_HCALL) | \ BIT(KVM_FEATURE_VIRT_EXTIOI)) @@ -250,6 +251,7 @@ struct kvm_vcpu_arch { u64 guest_addr; u64 last_steal; struct gfn_to_hva_cache cache; + u8 preempted; } st; }; diff --git a/arch/loongarch/include/asm/kvm_para.h b/arch/loongarch/include/asm/kvm_para.h index 3e4b397f423f..d8592a7f5922 100644 --- a/arch/loongarch/include/asm/kvm_para.h +++ b/arch/loongarch/include/asm/kvm_para.h @@ -37,8 +37,11 @@ struct kvm_steal_time { __u64 steal; __u32 version; __u32 flags; - __u32 pad[12]; + __u8 preempted; + __u8 u8_pad[3]; + __u32 pad[11]; }; +#define KVM_VCPU_PREEMPTED (1 << 0) /* * Hypercall interface for KVM hypervisor diff --git a/arch/loongarch/include/uapi/asm/kvm.h b/arch/loongarch/include/uapi/asm/kvm.h index 57ba1a563bb1..bca7154aa651 100644 --- a/arch/loongarch/include/uapi/asm/kvm.h +++ b/arch/loongarch/include/uapi/asm/kvm.h @@ -104,6 +104,7 @@ struct kvm_fpu { #define KVM_LOONGARCH_VM_FEAT_PV_IPI 6 #define KVM_LOONGARCH_VM_FEAT_PV_STEALTIME 7 #define KVM_LOONGARCH_VM_FEAT_PTW 8 +#define KVM_LOONGARCH_VM_FEAT_PV_PREEMPT_HINT 10 /* Device Control API on vcpu fd */ #define KVM_LOONGARCH_VCPU_CPUCFG 0 diff --git a/arch/loongarch/include/uapi/asm/kvm_para.h b/arch/loongarch/include/uapi/asm/kvm_para.h index 76d802ef01ce..fe4107869ce6 100644 --- a/arch/loongarch/include/uapi/asm/kvm_para.h +++ b/arch/loongarch/include/uapi/asm/kvm_para.h @@ -15,6 +15,7 @@ #define CPUCFG_KVM_FEATURE (CPUCFG_KVM_BASE + 4) #define KVM_FEATURE_IPI 1 #define KVM_FEATURE_STEAL_TIME 2 +#define KVM_FEATURE_PREEMPT_HINT 3 /* BIT 24 - 31 are features configurable by user space vmm */ #define KVM_FEATURE_VIRT_EXTIOI 24 #define KVM_FEATURE_USER_HCALL 25 diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 1245a6b35896..33a94b191b5d 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -180,6 +180,11 @@ static void kvm_update_stolen_time(struct kvm_vcpu *vcpu) } st = (struct kvm_steal_time __user *)ghc->hva; + if (kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_PREEMPT_HINT)) { + unsafe_put_user(0, &st->preempted, out); + vcpu->arch.st.preempted = 0; + } + unsafe_get_user(version, &st->version, out); if (version & 1) version += 1; /* first time write, random junk */ @@ -1757,11 +1762,58 @@ static int _kvm_vcpu_put(struct kvm_vcpu *vcpu, int cpu) return 0; } +static void _kvm_set_vcpu_preempted(struct kvm_vcpu *vcpu) +{ + struct gfn_to_hva_cache *ghc; + struct kvm_steal_time __user *st; + struct kvm_memslots *slots; + static const u8 preempted = KVM_VCPU_PREEMPTED; + gpa_t gpa; + + gpa = vcpu->arch.st.guest_addr; + if (!(gpa & KVM_STEAL_PHYS_VALID)) + return; + + /* vCPU may be preempted for many times */ + if (vcpu->arch.st.preempted) + return; + + /* This happens on process exit */ + if (unlikely(current->mm != vcpu->kvm->mm)) + return; + + gpa &= KVM_STEAL_PHYS_MASK; + ghc = &vcpu->arch.st.cache; + slots = kvm_memslots(vcpu->kvm); + if (slots->generation != ghc->generation || gpa != ghc->gpa) { + if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st))) { + ghc->gpa = INVALID_GPA; + return; + } + } + + st = (struct kvm_steal_time __user *)ghc->hva; + unsafe_put_user(preempted, &st->preempted, out); + vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED; +out: + mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa)); +} + void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { - int cpu; + int cpu, idx; unsigned long flags; + if (vcpu->preempted && kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_PREEMPT_HINT)) { + /* + * Take the srcu lock as memslots will be accessed to check the gfn + * cache generation against the memslots generation. + */ + idx = srcu_read_lock(&vcpu->kvm->srcu); + _kvm_set_vcpu_preempted(vcpu); + srcu_read_unlock(&vcpu->kvm->srcu, idx); + } + local_irq_save(flags); cpu = smp_processor_id(); vcpu->arch.last_sched_cpu = cpu; diff --git a/arch/loongarch/kvm/vm.c b/arch/loongarch/kvm/vm.c index a49b1c1a3dd1..b8879110a0a1 100644 --- a/arch/loongarch/kvm/vm.c +++ b/arch/loongarch/kvm/vm.c @@ -45,8 +45,10 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) /* Enable all PV features by default */ kvm->arch.pv_features = BIT(KVM_FEATURE_IPI); - if (kvm_pvtime_supported()) + if (kvm_pvtime_supported()) { kvm->arch.pv_features |= BIT(KVM_FEATURE_STEAL_TIME); + kvm->arch.pv_features |= BIT(KVM_FEATURE_PREEMPT_HINT); + } /* * cpu_vabits means user address space only (a half of total). @@ -143,6 +145,7 @@ static int kvm_vm_feature_has_attr(struct kvm *kvm, struct kvm_device_attr *attr case KVM_LOONGARCH_VM_FEAT_PV_IPI: return 0; case KVM_LOONGARCH_VM_FEAT_PV_STEALTIME: + case KVM_LOONGARCH_VM_FEAT_PV_PREEMPT_HINT: if (kvm_pvtime_supported()) return 0; return -ENXIO; -- 2.39.3 Function vcpu_is_preempted() is used to check whether vCPU is preempted or not. Here add implementation with vcpu_is_preempted() when option CONFIG_PARAVIRT is enabled. Signed-off-by: Bibo Mao --- arch/loongarch/include/asm/smp.h | 1 + arch/loongarch/include/asm/spinlock.h | 5 +++++ arch/loongarch/kernel/paravirt.c | 16 ++++++++++++++++ arch/loongarch/kernel/smp.c | 6 ++++++ 4 files changed, 28 insertions(+) diff --git a/arch/loongarch/include/asm/smp.h b/arch/loongarch/include/asm/smp.h index 3a47f52959a8..5b37f7bf2060 100644 --- a/arch/loongarch/include/asm/smp.h +++ b/arch/loongarch/include/asm/smp.h @@ -18,6 +18,7 @@ struct smp_ops { void (*init_ipi)(void); void (*send_ipi_single)(int cpu, unsigned int action); void (*send_ipi_mask)(const struct cpumask *mask, unsigned int action); + bool (*vcpu_is_preempted)(int cpu); }; extern struct smp_ops mp_ops; diff --git a/arch/loongarch/include/asm/spinlock.h b/arch/loongarch/include/asm/spinlock.h index 7cb3476999be..c001cef893aa 100644 --- a/arch/loongarch/include/asm/spinlock.h +++ b/arch/loongarch/include/asm/spinlock.h @@ -5,6 +5,11 @@ #ifndef _ASM_SPINLOCK_H #define _ASM_SPINLOCK_H +#ifdef CONFIG_PARAVIRT +#define vcpu_is_preempted vcpu_is_preempted +bool vcpu_is_preempted(int cpu); +#endif + #include #include #include diff --git a/arch/loongarch/kernel/paravirt.c b/arch/loongarch/kernel/paravirt.c index b1b51f920b23..b99404b6b13f 100644 --- a/arch/loongarch/kernel/paravirt.c +++ b/arch/loongarch/kernel/paravirt.c @@ -52,6 +52,13 @@ static u64 paravt_steal_clock(int cpu) #ifdef CONFIG_SMP static struct smp_ops native_ops; +static bool pv_vcpu_is_preempted(int cpu) +{ + struct kvm_steal_time *src = &per_cpu(steal_time, cpu); + + return !!(src->preempted & KVM_VCPU_PREEMPTED); +} + static void pv_send_ipi_single(int cpu, unsigned int action) { int min, old; @@ -308,6 +315,9 @@ int __init pv_time_init(void) pr_err("Failed to install cpu hotplug callbacks\n"); return r; } + + if (kvm_para_has_feature(KVM_FEATURE_PREEMPT_HINT)) + mp_ops.vcpu_is_preempted = pv_vcpu_is_preempted; #endif static_call_update(pv_steal_clock, paravt_steal_clock); @@ -332,3 +342,9 @@ int __init pv_spinlock_init(void) return 0; } + +bool notrace vcpu_is_preempted(int cpu) +{ + return mp_ops.vcpu_is_preempted(cpu); +} +EXPORT_SYMBOL(vcpu_is_preempted); diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c index 46036d98da75..f04192fedf8d 100644 --- a/arch/loongarch/kernel/smp.c +++ b/arch/loongarch/kernel/smp.c @@ -307,10 +307,16 @@ static void loongson_init_ipi(void) panic("IPI IRQ request failed\n"); } +static bool loongson_vcpu_is_preempted(int cpu) +{ + return false; +} + struct smp_ops mp_ops = { .init_ipi = loongson_init_ipi, .send_ipi_single = loongson_send_ipi_single, .send_ipi_mask = loongson_send_ipi_mask, + .vcpu_is_preempted = loongson_vcpu_is_preempted, }; static void __init fdt_smp_setup(void) -- 2.39.3 Add paravirt preempt hint print prompt together with steal timer information, so that it is easy to check whether paravirt preempt hint feature is enabled or not. Signed-off-by: Bibo Mao --- arch/loongarch/kernel/paravirt.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/arch/loongarch/kernel/paravirt.c b/arch/loongarch/kernel/paravirt.c index b99404b6b13f..b7ea511c288b 100644 --- a/arch/loongarch/kernel/paravirt.c +++ b/arch/loongarch/kernel/paravirt.c @@ -294,6 +294,7 @@ static struct notifier_block pv_reboot_nb = { int __init pv_time_init(void) { int r; + bool pv_preempted = false; if (!kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) return 0; @@ -316,8 +317,10 @@ int __init pv_time_init(void) return r; } - if (kvm_para_has_feature(KVM_FEATURE_PREEMPT_HINT)) + if (kvm_para_has_feature(KVM_FEATURE_PREEMPT_HINT)) { mp_ops.vcpu_is_preempted = pv_vcpu_is_preempted; + pv_preempted = true; + } #endif static_call_update(pv_steal_clock, paravt_steal_clock); @@ -328,7 +331,10 @@ int __init pv_time_init(void) static_key_slow_inc(¶virt_steal_rq_enabled); #endif - pr_info("Using paravirt steal-time\n"); + if (pv_preempted) + pr_info("Using paravirt steal-time with preempt hint enabled\n"); + else + pr_info("Using paravirt steal-time with preempt hint disabled\n"); return 0; } -- 2.39.3