Move the PML page pointer from VMX-specific vcpu_vmx structure to the common kvm_vcpu_arch structure to enable sharing between VMX and SVM implementations. Only the page pointer is moved to x86 common code while keeping allocation logic vendor-specific, since AMD requires snp_safe_alloc_page() for PML buffer allocation. Update all VMX references accordingly, and simplify the kvm_flush_pml_buffer() interface by removing the page parameter since it can now access the page directly from the vcpu structure. No functional change, restructuring to prepare for SVM PML support. Suggested-by: Kai Huang Signed-off-by: Nikunj A Dadhania --- arch/x86/include/asm/kvm_host.h | 2 ++ arch/x86/kvm/vmx/vmx.c | 24 ++++++++++++------------ arch/x86/kvm/vmx/vmx.h | 2 -- arch/x86/kvm/x86.c | 4 ++-- arch/x86/kvm/x86.h | 2 +- 5 files changed, 17 insertions(+), 17 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 48598d017d6f..7e5dceb4530e 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -861,6 +861,8 @@ struct kvm_vcpu_arch { */ struct kvm_mmu_memory_cache mmu_external_spt_cache; + struct page *pml_page; + /* * QEMU userspace and the guest each have their own FPU state. * In vcpu_run, we switch between the user and guest FPU contexts. diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index db1379cffbcb..aa1ba8db6392 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -4677,7 +4677,8 @@ int vmx_vcpu_precreate(struct kvm *kvm) static void init_vmcs(struct vcpu_vmx *vmx) { - struct kvm *kvm = vmx->vcpu.kvm; + struct kvm_vcpu *vcpu = &vmx->vcpu; + struct kvm *kvm = vcpu->kvm; struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm); if (nested) @@ -4768,7 +4769,7 @@ static void init_vmcs(struct vcpu_vmx *vmx) vmcs_write64(XSS_EXIT_BITMAP, VMX_XSS_EXIT_BITMAP); if (enable_pml) { - vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg)); + vmcs_write64(PML_ADDRESS, page_to_phys(vcpu->arch.pml_page)); vmcs_write16(GUEST_PML_INDEX, PML_HEAD_INDEX); } @@ -6195,17 +6196,16 @@ void vmx_get_entry_info(struct kvm_vcpu *vcpu, u32 *intr_info, u32 *error_code) *error_code = 0; } -static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx) +static void vmx_destroy_pml_buffer(struct kvm_vcpu *vcpu) { - if (vmx->pml_pg) { - __free_page(vmx->pml_pg); - vmx->pml_pg = NULL; + if (vcpu->arch.pml_page) { + __free_page(vcpu->arch.pml_page); + vcpu->arch.pml_page = NULL; } } static void vmx_flush_pml_buffer(struct kvm_vcpu *vcpu) { - struct vcpu_vmx *vmx = to_vmx(vcpu); u16 pml_idx; pml_idx = vmcs_read16(GUEST_PML_INDEX); @@ -6214,7 +6214,7 @@ static void vmx_flush_pml_buffer(struct kvm_vcpu *vcpu) if (pml_idx == PML_HEAD_INDEX) return; - kvm_flush_pml_buffer(vcpu, vmx->pml_pg, pml_idx); + kvm_flush_pml_buffer(vcpu, pml_idx); /* reset PML index */ vmcs_write16(GUEST_PML_INDEX, PML_HEAD_INDEX); @@ -7502,7 +7502,7 @@ void vmx_vcpu_free(struct kvm_vcpu *vcpu) struct vcpu_vmx *vmx = to_vmx(vcpu); if (enable_pml) - vmx_destroy_pml_buffer(vmx); + vmx_destroy_pml_buffer(vcpu); free_vpid(vmx->vpid); nested_vmx_free_vcpu(vcpu); free_loaded_vmcs(vmx->loaded_vmcs); @@ -7531,8 +7531,8 @@ int vmx_vcpu_create(struct kvm_vcpu *vcpu) * for the guest), etc. */ if (enable_pml) { - vmx->pml_pg = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); - if (!vmx->pml_pg) + vcpu->arch.pml_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); + if (!vcpu->arch.pml_page) goto free_vpid; } @@ -7603,7 +7603,7 @@ int vmx_vcpu_create(struct kvm_vcpu *vcpu) free_vmcs: free_loaded_vmcs(vmx->loaded_vmcs); free_pml: - vmx_destroy_pml_buffer(vmx); + vmx_destroy_pml_buffer(vcpu); free_vpid: free_vpid(vmx->vpid); return err; diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index fe9d2b10f4be..d2dd63194ee2 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -272,8 +272,6 @@ struct vcpu_vmx { unsigned int ple_window; bool ple_window_dirty; - struct page *pml_pg; - /* apic deadline value in host tsc */ u64 hv_deadline_tsc; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 732d8a4b7dff..be8483d20fbc 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -6737,7 +6737,7 @@ void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) kvm_vcpu_kick(vcpu); } -void kvm_flush_pml_buffer(struct kvm_vcpu *vcpu, struct page *pml_page, u16 pml_idx) +void kvm_flush_pml_buffer(struct kvm_vcpu *vcpu, u16 pml_idx) { u16 pml_tail_index; u64 *pml_buf; @@ -6756,7 +6756,7 @@ void kvm_flush_pml_buffer(struct kvm_vcpu *vcpu, struct page *pml_page, u16 pml_ * Read the entries in the same order they were written, to ensure that * the dirty ring is filled in the same order the CPU wrote them. */ - pml_buf = page_address(pml_page); + pml_buf = page_address(vcpu->arch.pml_page); for (i = PML_HEAD_INDEX; i >= pml_tail_index; i--) { u64 gpa; diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 199d39492df8..6bf6645c4fe4 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -743,6 +743,6 @@ static inline bool kvm_is_valid_u_s_cet(struct kvm_vcpu *vcpu, u64 data) /* PML is written backwards: this is the first entry written by the CPU */ #define PML_HEAD_INDEX (PML_LOG_NR_ENTRIES-1) -void kvm_flush_pml_buffer(struct kvm_vcpu *vcpu, struct page *pml_pg, u16 pml_idx); +void kvm_flush_pml_buffer(struct kvm_vcpu *vcpu, u16 pml_idx); #endif -- 2.48.1