From: Kai Huang Move nested PML dirty logging update logic from VMX-specific code to common x86 infrastructure. Both VMX and SVM share identical logic: defer CPU dirty logging updates when running in L2, then process pending updates when exiting to L1. No functional change. Signed-off-by: Kai Huang Co-developed-by: Nikunj A Dadhania Signed-off-by: Nikunj A Dadhania --- arch/x86/include/asm/kvm_host.h | 3 ++- arch/x86/kvm/kvm_cache_regs.h | 7 +++++++ arch/x86/kvm/vmx/main.c | 4 ++-- arch/x86/kvm/vmx/nested.c | 5 ----- arch/x86/kvm/vmx/vmx.c | 23 ++++------------------- arch/x86/kvm/vmx/vmx.h | 3 +-- arch/x86/kvm/vmx/x86_ops.h | 2 +- arch/x86/kvm/x86.c | 22 +++++++++++++++++++++- 8 files changed, 38 insertions(+), 31 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 73b16cecc06d..ca5def4f3585 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -862,6 +862,7 @@ struct kvm_vcpu_arch { struct kvm_mmu_memory_cache mmu_external_spt_cache; struct page *pml_page; + bool update_cpu_dirty_logging_pending; /* * QEMU userspace and the guest each have their own FPU state. @@ -1884,7 +1885,7 @@ struct kvm_x86_ops { struct x86_exception *exception); void (*handle_exit_irqoff)(struct kvm_vcpu *vcpu); - void (*update_cpu_dirty_logging)(struct kvm_vcpu *vcpu); + void (*update_cpu_dirty_logging)(struct kvm_vcpu *vcpu, bool enable); const struct kvm_x86_nested_ops *nested_ops; diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h index 8ddb01191d6f..0c4a832a9dab 100644 --- a/arch/x86/kvm/kvm_cache_regs.h +++ b/arch/x86/kvm/kvm_cache_regs.h @@ -238,6 +238,13 @@ static inline void leave_guest_mode(struct kvm_vcpu *vcpu) kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu); } + /* Also see kvm_vcpu_update_cpu_dirty_logging() */ + if (vcpu->arch.update_cpu_dirty_logging_pending) { + vcpu->arch.update_cpu_dirty_logging_pending = false; + kvm_x86_call(update_cpu_dirty_logging)(vcpu, + atomic_read(&vcpu->kvm->nr_memslots_dirty_logging)); + } + vcpu->stat.guest_mode = 0; } diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c index 0eb2773b2ae2..6fb97f6ce48e 100644 --- a/arch/x86/kvm/vmx/main.c +++ b/arch/x86/kvm/vmx/main.c @@ -103,7 +103,7 @@ static void vt_vcpu_load(struct kvm_vcpu *vcpu, int cpu) vmx_vcpu_load(vcpu, cpu); } -static void vt_update_cpu_dirty_logging(struct kvm_vcpu *vcpu) +static void vt_update_cpu_dirty_logging(struct kvm_vcpu *vcpu, bool enable) { /* * Basic TDX does not support feature PML. KVM does not enable PML in @@ -112,7 +112,7 @@ static void vt_update_cpu_dirty_logging(struct kvm_vcpu *vcpu) if (WARN_ON_ONCE(is_td_vcpu(vcpu))) return; - vmx_update_cpu_dirty_logging(vcpu); + vmx_update_cpu_dirty_logging(vcpu, enable); } static void vt_prepare_switch_to_guest(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 76271962cb70..0093fc389eae 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -5202,11 +5202,6 @@ void __nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason, vmx_set_virtual_apic_mode(vcpu); } - if (vmx->nested.update_vmcs01_cpu_dirty_logging) { - vmx->nested.update_vmcs01_cpu_dirty_logging = false; - vmx_update_cpu_dirty_logging(vcpu); - } - nested_put_vmcs12_pages(vcpu); if (vmx->nested.reload_vmcs01_apic_access_page) { diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 81216deb3959..ede5aaf24278 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -8194,27 +8194,12 @@ void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu) } #endif -void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu) +void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu, bool enable) { - struct vcpu_vmx *vmx = to_vmx(vcpu); - - if (WARN_ON_ONCE(!enable_pml)) - return; - - if (is_guest_mode(vcpu)) { - vmx->nested.update_vmcs01_cpu_dirty_logging = true; - return; - } - - /* - * Note, nr_memslots_dirty_logging can be changed concurrent with this - * code, but in that case another update request will be made and so - * the guest will never run with a stale PML value. - */ - if (atomic_read(&vcpu->kvm->nr_memslots_dirty_logging)) - secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_ENABLE_PML); + if (enable) + secondary_exec_controls_setbit(to_vmx(vcpu), SECONDARY_EXEC_ENABLE_PML); else - secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_ENABLE_PML); + secondary_exec_controls_clearbit(to_vmx(vcpu), SECONDARY_EXEC_ENABLE_PML); } void vmx_setup_mce(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index d2dd63194ee2..22bf8860add4 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -133,7 +133,6 @@ struct nested_vmx { bool change_vmcs01_virtual_apic_mode; bool reload_vmcs01_apic_access_page; - bool update_vmcs01_cpu_dirty_logging; bool update_vmcs01_apicv_status; bool update_vmcs01_hwapic_isr; @@ -401,7 +400,7 @@ u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu); gva_t vmx_get_untagged_addr(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags); -void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu); +void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu, bool enable); u64 vmx_get_supported_debugctl(struct kvm_vcpu *vcpu, bool host_initiated); bool vmx_is_valid_debugctl(struct kvm_vcpu *vcpu, u64 data, bool host_initiated); diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h index 9697368d65b3..1ae01fa592cd 100644 --- a/arch/x86/kvm/vmx/x86_ops.h +++ b/arch/x86/kvm/vmx/x86_ops.h @@ -113,7 +113,7 @@ u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu); u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu); void vmx_write_tsc_offset(struct kvm_vcpu *vcpu); void vmx_write_tsc_multiplier(struct kvm_vcpu *vcpu); -void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu); +void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu, bool enable); #ifdef CONFIG_X86_64 int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc, bool *expired); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 2b23d7721444..42479fcda688 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -149,6 +149,7 @@ struct kvm_x86_ops kvm_x86_ops __read_mostly; #include EXPORT_STATIC_CALL_GPL(kvm_x86_get_cs_db_l_bits); EXPORT_STATIC_CALL_GPL(kvm_x86_cache_reg); +EXPORT_STATIC_CALL_GPL(kvm_x86_update_cpu_dirty_logging); static bool __read_mostly ignore_msrs = 0; module_param(ignore_msrs, bool, 0644); @@ -11055,6 +11056,25 @@ static void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) kvm_x86_call(set_apic_access_page_addr)(vcpu); } +static void kvm_vcpu_update_cpu_dirty_logging(struct kvm_vcpu *vcpu) +{ + if (WARN_ON_ONCE(!enable_pml)) + return; + + if (is_guest_mode(vcpu)) { + vcpu->arch.update_cpu_dirty_logging_pending = true; + return; + } + + /* + * Note, nr_memslots_dirty_logging can be changed concurrently with this + * code, but in that case another update request will be made and so the + * guest will never run with a stale PML value. + */ + kvm_x86_call(update_cpu_dirty_logging)(vcpu, + atomic_read(&vcpu->kvm->nr_memslots_dirty_logging)); +} + /* * Called within kvm->srcu read side. * Returns 1 to let vcpu_run() continue the guest execution loop without @@ -11221,7 +11241,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) kvm_x86_call(recalc_intercepts)(vcpu); if (kvm_check_request(KVM_REQ_UPDATE_CPU_DIRTY_LOGGING, vcpu)) - kvm_x86_call(update_cpu_dirty_logging)(vcpu); + kvm_vcpu_update_cpu_dirty_logging(vcpu); if (kvm_check_request(KVM_REQ_UPDATE_PROTECTED_GUEST_STATE, vcpu)) { kvm_vcpu_reset(vcpu, true); -- 2.48.1