Move the PML (Page Modification Logging) buffer flushing logic from VMX-specific code to common x86 KVM code to enable reuse by SVM and avoid code duplication. The PML constants (PML_LOG_NR_ENTRIES and PML_HEAD_INDEX) are moved from vmx.h to x86.h to make them available to both VMX and SVM. No functional change intended for VMX, except tone down the WARN_ON() to WARN_ON_ONCE() for the page alignment check. If hardware exhibits this behavior once, it's likely to occur repeatedly, so use WARN_ON_ONCE() to avoid log flooding while still capturing the unexpected condition. The refactoring prepares for SVM to leverage the same PML flushing implementation. Signed-off-by: Nikunj A Dadhania --- arch/x86/kvm/vmx/vmx.c | 26 ++------------------------ arch/x86/kvm/vmx/vmx.h | 5 ----- arch/x86/kvm/x86.c | 31 +++++++++++++++++++++++++++++++ arch/x86/kvm/x86.h | 7 +++++++ 4 files changed, 40 insertions(+), 29 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index aa157fe5b7b3..a0955155d7ca 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -6107,37 +6107,15 @@ static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx) static void vmx_flush_pml_buffer(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); - u16 pml_idx, pml_tail_index; - u64 *pml_buf; - int i; + u16 pml_idx; pml_idx = vmcs_read16(GUEST_PML_INDEX); /* Do nothing if PML buffer is empty */ if (pml_idx == PML_HEAD_INDEX) return; - /* - * PML index always points to the next available PML buffer entity - * unless PML log has just overflowed. - */ - pml_tail_index = (pml_idx >= PML_LOG_NR_ENTRIES) ? 0 : pml_idx + 1; - /* - * PML log is written backwards: the CPU first writes the entry 511 - * then the entry 510, and so on. - * - * Read the entries in the same order they were written, to ensure that - * the dirty ring is filled in the same order the CPU wrote them. - */ - pml_buf = page_address(vmx->pml_pg); - - for (i = PML_HEAD_INDEX; i >= pml_tail_index; i--) { - u64 gpa; - - gpa = pml_buf[i]; - WARN_ON(gpa & (PAGE_SIZE - 1)); - kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT); - } + kvm_flush_pml_buffer(vcpu, vmx->pml_pg, pml_idx); /* reset PML index */ vmcs_write16(GUEST_PML_INDEX, PML_HEAD_INDEX); diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index d3389baf3ab3..4494c253727f 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -269,11 +269,6 @@ struct vcpu_vmx { unsigned int ple_window; bool ple_window_dirty; - /* Support for PML */ -#define PML_LOG_NR_ENTRIES 512 - /* PML is written backwards: this is the first entry written by the CPU */ -#define PML_HEAD_INDEX (PML_LOG_NR_ENTRIES-1) - struct page *pml_pg; /* apic deadline value in host tsc */ diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index a1c49bc681c4..054ba09d3737 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -6417,6 +6417,37 @@ void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) kvm_vcpu_kick(vcpu); } +void kvm_flush_pml_buffer(struct kvm_vcpu *vcpu, struct page *pml_page, u16 pml_idx) +{ + u16 pml_tail_index; + u64 *pml_buf; + int i; + + /* + * PML index always points to the next available PML buffer entity + * unless PML log has just overflowed. + */ + pml_tail_index = (pml_idx >= PML_LOG_NR_ENTRIES) ? 0 : pml_idx + 1; + + /* + * PML log is written backwards: the CPU first writes the entry 511 + * then the entry 510, and so on. + * + * Read the entries in the same order they were written, to ensure that + * the dirty ring is filled in the same order the CPU wrote them. + */ + pml_buf = page_address(pml_page); + + for (i = PML_HEAD_INDEX; i >= pml_tail_index; i--) { + u64 gpa; + + gpa = pml_buf[i]; + WARN_ON_ONCE(gpa & (PAGE_SIZE - 1)); + kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT); + } +} +EXPORT_SYMBOL_GPL(kvm_flush_pml_buffer); + int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) { diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index bcfd9b719ada..23c188c0a24b 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -699,4 +699,11 @@ int ____kvm_emulate_hypercall(struct kvm_vcpu *vcpu, int cpl, int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); +/* Support for PML */ +#define PML_LOG_NR_ENTRIES 512 +/* PML is written backwards: this is the first entry written by the CPU */ +#define PML_HEAD_INDEX (PML_LOG_NR_ENTRIES-1) + +void kvm_flush_pml_buffer(struct kvm_vcpu *vcpu, struct page *pml_pg, u16 pml_idx); + #endif -- 2.43.0