Move the PML page from VMX-specific vcpu_vmx structure to the common kvm_vcpu_arch structure to share it between VMX and SVM implementations. Update all VMX references accordingly, and simplify the kvm_flush_pml_buffer() interface by removing the page parameter since it can now access the page directly from the vcpu structure. No functional change, restructuring to prepare for SVM PML support. Suggested-by: Kai Huang Signed-off-by: Nikunj A Dadhania --- arch/x86/include/asm/kvm_host.h | 2 ++ arch/x86/kvm/vmx/vmx.c | 24 ++++++++++++------------ arch/x86/kvm/vmx/vmx.h | 2 -- arch/x86/kvm/x86.c | 4 ++-- arch/x86/kvm/x86.h | 2 +- 5 files changed, 17 insertions(+), 17 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index c56cc54d682a..62a7d519fbaf 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -857,6 +857,8 @@ struct kvm_vcpu_arch { */ struct kvm_mmu_memory_cache mmu_external_spt_cache; + struct page *pml_page; + /* * QEMU userspace and the guest each have their own FPU state. * In vcpu_run, we switch between the user and guest FPU contexts. diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index a0955155d7ca..9520e11b08d0 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -4612,7 +4612,8 @@ int vmx_vcpu_precreate(struct kvm *kvm) static void init_vmcs(struct vcpu_vmx *vmx) { - struct kvm *kvm = vmx->vcpu.kvm; + struct kvm_vcpu *vcpu = &vmx->vcpu; + struct kvm *kvm = vcpu->kvm; struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm); if (nested) @@ -4703,7 +4704,7 @@ static void init_vmcs(struct vcpu_vmx *vmx) vmcs_write64(XSS_EXIT_BITMAP, VMX_XSS_EXIT_BITMAP); if (enable_pml) { - vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg)); + vmcs_write64(PML_ADDRESS, page_to_phys(vcpu->arch.pml_page)); vmcs_write16(GUEST_PML_INDEX, PML_HEAD_INDEX); } @@ -6096,17 +6097,16 @@ void vmx_get_entry_info(struct kvm_vcpu *vcpu, u32 *intr_info, u32 *error_code) *error_code = 0; } -static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx) +static void vmx_destroy_pml_buffer(struct kvm_vcpu *vcpu) { - if (vmx->pml_pg) { - __free_page(vmx->pml_pg); - vmx->pml_pg = NULL; + if (vcpu->arch.pml_page) { + __free_page(vcpu->arch.pml_page); + vcpu->arch.pml_page = NULL; } } static void vmx_flush_pml_buffer(struct kvm_vcpu *vcpu) { - struct vcpu_vmx *vmx = to_vmx(vcpu); u16 pml_idx; pml_idx = vmcs_read16(GUEST_PML_INDEX); @@ -6115,7 +6115,7 @@ static void vmx_flush_pml_buffer(struct kvm_vcpu *vcpu) if (pml_idx == PML_HEAD_INDEX) return; - kvm_flush_pml_buffer(vcpu, vmx->pml_pg, pml_idx); + kvm_flush_pml_buffer(vcpu, pml_idx); /* reset PML index */ vmcs_write16(GUEST_PML_INDEX, PML_HEAD_INDEX); @@ -7388,7 +7388,7 @@ void vmx_vcpu_free(struct kvm_vcpu *vcpu) struct vcpu_vmx *vmx = to_vmx(vcpu); if (enable_pml) - vmx_destroy_pml_buffer(vmx); + vmx_destroy_pml_buffer(vcpu); free_vpid(vmx->vpid); nested_vmx_free_vcpu(vcpu); free_loaded_vmcs(vmx->loaded_vmcs); @@ -7417,8 +7417,8 @@ int vmx_vcpu_create(struct kvm_vcpu *vcpu) * for the guest), etc. */ if (enable_pml) { - vmx->pml_pg = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); - if (!vmx->pml_pg) + vcpu->arch.pml_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); + if (!vcpu->arch.pml_page) goto free_vpid; } @@ -7489,7 +7489,7 @@ int vmx_vcpu_create(struct kvm_vcpu *vcpu) free_vmcs: free_loaded_vmcs(vmx->loaded_vmcs); free_pml: - vmx_destroy_pml_buffer(vmx); + vmx_destroy_pml_buffer(vcpu); free_vpid: free_vpid(vmx->vpid); return err; diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 4494c253727f..6fafb6228c17 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -269,8 +269,6 @@ struct vcpu_vmx { unsigned int ple_window; bool ple_window_dirty; - struct page *pml_pg; - /* apic deadline value in host tsc */ u64 hv_deadline_tsc; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 123ebe7be184..afa7f8b46416 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -6417,7 +6417,7 @@ void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) kvm_vcpu_kick(vcpu); } -void kvm_flush_pml_buffer(struct kvm_vcpu *vcpu, struct page *pml_page, u16 pml_idx) +void kvm_flush_pml_buffer(struct kvm_vcpu *vcpu, u16 pml_idx) { u16 pml_tail_index; u64 *pml_buf; @@ -6436,7 +6436,7 @@ void kvm_flush_pml_buffer(struct kvm_vcpu *vcpu, struct page *pml_page, u16 pml_ * Read the entries in the same order they were written, to ensure that * the dirty ring is filled in the same order the CPU wrote them. */ - pml_buf = page_address(pml_page); + pml_buf = page_address(vcpu->arch.pml_page); for (i = PML_HEAD_INDEX; i >= pml_tail_index; i--) { u64 gpa; diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 23c188c0a24b..92016081a7e7 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -704,6 +704,6 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); /* PML is written backwards: this is the first entry written by the CPU */ #define PML_HEAD_INDEX (PML_LOG_NR_ENTRIES-1) -void kvm_flush_pml_buffer(struct kvm_vcpu *vcpu, struct page *pml_pg, u16 pml_idx); +void kvm_flush_pml_buffer(struct kvm_vcpu *vcpu, u16 pml_idx); #endif -- 2.48.1