Currently, dirty logging relies on write protecting guest memory and marking dirty GFNs during subsequent write faults. This method works but incurs overhead due to additional write faults for each dirty GFN. Implement support for the Page Modification Logging (PML) feature, a hardware-assisted method for efficient dirty logging. PML automatically logs dirty GPA[51:12] to a 4K buffer when the CPU sets NPT D-bits. Two new VMCB fields are utilized: PML_ADDR and PML_INDEX. The PML_INDEX is initialized to 511 (8 bytes per GPA entry), and the CPU decreases the PML_INDEX after logging each GPA. When the PML buffer is full, a VMEXIT(PML_FULL) with exit code 0x407 is generated. Disable PML for nested guests and defer L1 dirty logging updates until L2 guest VM exit. PML is enabled by default when supported and can be disabled via the 'pml' module parameter. Signed-off-by: Nikunj A Dadhania --- arch/x86/include/asm/svm.h | 6 +- arch/x86/include/uapi/asm/svm.h | 2 + arch/x86/kvm/svm/nested.c | 13 ++++- arch/x86/kvm/svm/sev.c | 2 +- arch/x86/kvm/svm/svm.c | 100 +++++++++++++++++++++++++++++++- arch/x86/kvm/svm/svm.h | 5 ++ 6 files changed, 121 insertions(+), 7 deletions(-) diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h index e2c28884ff32..6be641210469 100644 --- a/arch/x86/include/asm/svm.h +++ b/arch/x86/include/asm/svm.h @@ -165,7 +165,10 @@ struct __attribute__ ((__packed__)) vmcb_control_area { u8 reserved_9[22]; u64 allowed_sev_features; /* Offset 0x138 */ u64 guest_sev_features; /* Offset 0x140 */ - u8 reserved_10[664]; + u8 reserved_10[128]; + u64 pml_addr; /* Offset 0x1c8 */ + u16 pml_index; /* Offset 0x1d0 */ + u8 reserved_11[526]; /* * Offset 0x3e0, 32 bytes reserved * for use by hypervisor/software. @@ -239,6 +242,7 @@ struct __attribute__ ((__packed__)) vmcb_control_area { #define SVM_NESTED_CTL_NP_ENABLE BIT_ULL(0) #define SVM_NESTED_CTL_SEV_ENABLE BIT_ULL(1) #define SVM_NESTED_CTL_SEV_ES_ENABLE BIT_ULL(2) +#define SVM_NESTED_CTL_PML_ENABLE BIT_ULL(11) #define SVM_TSC_RATIO_RSVD 0xffffff0000000000ULL diff --git a/arch/x86/include/uapi/asm/svm.h b/arch/x86/include/uapi/asm/svm.h index 9c640a521a67..f329dca167de 100644 --- a/arch/x86/include/uapi/asm/svm.h +++ b/arch/x86/include/uapi/asm/svm.h @@ -101,6 +101,7 @@ #define SVM_EXIT_AVIC_INCOMPLETE_IPI 0x401 #define SVM_EXIT_AVIC_UNACCELERATED_ACCESS 0x402 #define SVM_EXIT_VMGEXIT 0x403 +#define SVM_EXIT_PML_FULL 0x407 /* SEV-ES software-defined VMGEXIT events */ #define SVM_VMGEXIT_MMIO_READ 0x80000001 @@ -232,6 +233,7 @@ { SVM_EXIT_AVIC_INCOMPLETE_IPI, "avic_incomplete_ipi" }, \ { SVM_EXIT_AVIC_UNACCELERATED_ACCESS, "avic_unaccelerated_access" }, \ { SVM_EXIT_VMGEXIT, "vmgexit" }, \ + { SVM_EXIT_PML_FULL, "pml_full" }, \ { SVM_VMGEXIT_MMIO_READ, "vmgexit_mmio_read" }, \ { SVM_VMGEXIT_MMIO_WRITE, "vmgexit_mmio_write" }, \ { SVM_VMGEXIT_NMI_COMPLETE, "vmgexit_nmi_complete" }, \ diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index b7fd2e869998..b37a1bb938e0 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -740,8 +740,11 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm, V_NMI_BLOCKING_MASK); } - /* Copied from vmcb01. msrpm_base can be overwritten later. */ - vmcb02->control.nested_ctl = vmcb01->control.nested_ctl; + /* + * Copied from vmcb01. msrpm_base can be overwritten later. + * Disable PML for nested guest. + */ + vmcb02->control.nested_ctl = vmcb01->control.nested_ctl & ~SVM_NESTED_CTL_PML_ENABLE; vmcb02->control.iopm_base_pa = vmcb01->control.iopm_base_pa; vmcb02->control.msrpm_base_pa = vmcb01->control.msrpm_base_pa; @@ -1177,6 +1180,12 @@ int nested_svm_vmexit(struct vcpu_svm *svm) svm_update_lbrv(vcpu); } + /* Update dirty logging that might have changed while L2 ran */ + if (svm->nested.update_vmcb01_cpu_dirty_logging) { + svm->nested.update_vmcb01_cpu_dirty_logging = false; + svm_update_cpu_dirty_logging(vcpu); + } + if (vnmi) { if (vmcb02->control.int_ctl & V_NMI_BLOCKING_MASK) vmcb01->control.int_ctl |= V_NMI_BLOCKING_MASK; diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 5bac4d20aec0..b179a0a2581a 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -4669,7 +4669,7 @@ struct page *snp_safe_alloc_page_node(int node, gfp_t gfp) * Allocate an SNP-safe page to workaround the SNP erratum where * the CPU will incorrectly signal an RMP violation #PF if a * hugepage (2MB or 1GB) collides with the RMP entry of a - * 2MB-aligned VMCB, VMSA, or AVIC backing page. + * 2MB-aligned VMCB, VMSA, PML or AVIC backing page. * * Allocate one extra page, choose a page which is not * 2MB-aligned, and free the other. diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 8a66e2e985a4..042fca4dc0f8 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -178,6 +178,9 @@ module_param(intercept_smi, bool, 0444); bool vnmi = true; module_param(vnmi, bool, 0444); +bool pml = true; +module_param(pml, bool, 0444); + static bool svm_gp_erratum_intercept = true; static u8 rsm_ins_bytes[] = "\x0f\xaa"; @@ -1220,6 +1223,16 @@ static void init_vmcb(struct kvm_vcpu *vcpu) if (vcpu->kvm->arch.bus_lock_detection_enabled) svm_set_intercept(svm, INTERCEPT_BUSLOCK); + if (pml) { + /* + * Populate the page address and index here, PML is enabled + * when dirty logging is enabled on the memslot through + * svm_update_cpu_dirty_logging() + */ + control->pml_addr = (u64)__sme_set(page_to_phys(vcpu->arch.pml_page)); + control->pml_index = PML_HEAD_INDEX; + } + if (sev_guest(vcpu->kvm)) sev_init_vmcb(svm); @@ -1296,14 +1309,20 @@ static int svm_vcpu_create(struct kvm_vcpu *vcpu) goto error_free_vmcb_page; } + if (pml) { + vcpu->arch.pml_page = snp_safe_alloc_page(); + if (!vcpu->arch.pml_page) + goto error_free_vmsa_page; + } + err = avic_init_vcpu(svm); if (err) - goto error_free_vmsa_page; + goto error_free_pml_page; svm->msrpm = svm_vcpu_alloc_msrpm(); if (!svm->msrpm) { err = -ENOMEM; - goto error_free_vmsa_page; + goto error_free_pml_page; } svm->x2avic_msrs_intercepted = true; @@ -1319,6 +1338,9 @@ static int svm_vcpu_create(struct kvm_vcpu *vcpu) return 0; +error_free_pml_page: + if (vcpu->arch.pml_page) + __free_page(vcpu->arch.pml_page); error_free_vmsa_page: if (vmsa_page) __free_page(vmsa_page); @@ -1339,6 +1361,9 @@ static void svm_vcpu_free(struct kvm_vcpu *vcpu) sev_free_vcpu(vcpu); + if (pml) + __free_page(vcpu->arch.pml_page); + __free_page(__sme_pa_to_page(svm->vmcb01.pa)); svm_vcpu_free_msrpm(svm->msrpm); } @@ -3206,6 +3231,55 @@ static int bus_lock_exit(struct kvm_vcpu *vcpu) return 0; } +void svm_update_cpu_dirty_logging(struct kvm_vcpu *vcpu) +{ + struct vcpu_svm *svm = to_svm(vcpu); + + if (WARN_ON_ONCE(!pml)) + return; + + if (is_guest_mode(vcpu)) { + svm->nested.update_vmcb01_cpu_dirty_logging = true; + return; + } + + /* + * Note, nr_memslots_dirty_logging can be changed concurrently with this + * code, but in that case another update request will be made and so the + * guest will never run with a stale PML value. + */ + if (atomic_read(&vcpu->kvm->nr_memslots_dirty_logging)) + svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_PML_ENABLE; + else + svm->vmcb->control.nested_ctl &= ~SVM_NESTED_CTL_PML_ENABLE; +} + +static void svm_flush_pml_buffer(struct kvm_vcpu *vcpu) +{ + struct vcpu_svm *svm = to_svm(vcpu); + struct vmcb_control_area *control = &svm->vmcb->control; + + /* Do nothing if PML buffer is empty */ + if (control->pml_index == PML_HEAD_INDEX) + return; + + kvm_flush_pml_buffer(vcpu, control->pml_index); + + /* Reset the PML index */ + control->pml_index = PML_HEAD_INDEX; +} + +static int pml_full_interception(struct kvm_vcpu *vcpu) +{ + trace_kvm_pml_full(vcpu->vcpu_id); + + /* + * PML buffer is already flushed at the beginning of svm_handle_exit(). + * Nothing to do here. + */ + return 1; +} + static int (*const svm_exit_handlers[])(struct kvm_vcpu *vcpu) = { [SVM_EXIT_READ_CR0] = cr_interception, [SVM_EXIT_READ_CR3] = cr_interception, @@ -3282,6 +3356,7 @@ static int (*const svm_exit_handlers[])(struct kvm_vcpu *vcpu) = { #ifdef CONFIG_KVM_AMD_SEV [SVM_EXIT_VMGEXIT] = sev_handle_vmgexit, #endif + [SVM_EXIT_PML_FULL] = pml_full_interception, }; static void dump_vmcb(struct kvm_vcpu *vcpu) @@ -3330,8 +3405,10 @@ static void dump_vmcb(struct kvm_vcpu *vcpu) pr_err("%-20s%016llx\n", "exit_info2:", control->exit_info_2); pr_err("%-20s%08x\n", "exit_int_info:", control->exit_int_info); pr_err("%-20s%08x\n", "exit_int_info_err:", control->exit_int_info_err); - pr_err("%-20s%lld\n", "nested_ctl:", control->nested_ctl); + pr_err("%-20s%llx\n", "nested_ctl:", control->nested_ctl); pr_err("%-20s%016llx\n", "nested_cr3:", control->nested_cr3); + pr_err("%-20s%016llx\n", "pml_addr:", control->pml_addr); + pr_err("%-20s%04x\n", "pml_index:", control->pml_index); pr_err("%-20s%016llx\n", "avic_vapic_bar:", control->avic_vapic_bar); pr_err("%-20s%016llx\n", "ghcb:", control->ghcb_gpa); pr_err("%-20s%08x\n", "event_inj:", control->event_inj); @@ -3562,6 +3639,14 @@ static int svm_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) struct kvm_run *kvm_run = vcpu->run; u32 exit_code = svm->vmcb->control.exit_code; + /* + * Opportunistically flush the PML buffer on VM exit. This keeps the + * dirty bitmap current by processing logged GPAs rather than waiting for + * PML_FULL exit. + */ + if (pml && !is_guest_mode(vcpu)) + svm_flush_pml_buffer(vcpu); + /* SEV-ES guests must use the CR write traps to track CR registers. */ if (!sev_es_guest(vcpu->kvm)) { if (!svm_is_intercept(svm, INTERCEPT_CR0_WRITE)) @@ -5028,6 +5113,9 @@ static int svm_vm_init(struct kvm *kvm) return ret; } + if (pml) + kvm->arch.cpu_dirty_log_size = PML_LOG_NR_ENTRIES; + svm_srso_vm_init(); return 0; } @@ -5181,6 +5269,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .gmem_prepare = sev_gmem_prepare, .gmem_invalidate = sev_gmem_invalidate, .gmem_max_mapping_level = sev_gmem_max_mapping_level, + + .update_cpu_dirty_logging = svm_update_cpu_dirty_logging, }; /* @@ -5382,6 +5472,10 @@ static __init int svm_hardware_setup(void) nrips = nrips && boot_cpu_has(X86_FEATURE_NRIPS); + pml = pml && npt_enabled && cpu_feature_enabled(X86_FEATURE_PML); + if (pml) + pr_info("Page modification logging supported\n"); + if (lbrv) { if (!boot_cpu_has(X86_FEATURE_LBRV)) lbrv = false; diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 70df7c6413cf..ce38f4a885d3 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -216,6 +216,9 @@ struct svm_nested_state { * on its side. */ bool force_msr_bitmap_recalc; + + /* Indicates whether dirty logging changed while nested guest ran */ + bool update_vmcb01_cpu_dirty_logging; }; struct vcpu_sev_es_state { @@ -717,6 +720,8 @@ static inline void svm_enable_intercept_for_msr(struct kvm_vcpu *vcpu, svm_set_intercept_for_msr(vcpu, msr, type, true); } +void svm_update_cpu_dirty_logging(struct kvm_vcpu *vcpu); + /* nested.c */ #define NESTED_EXIT_HOST 0 /* Exit handled on host level */ -- 2.48.1