Instead of passing in a pointer to struct vmx_pages, pass in the GPA of the root of the EPTs, as that's the only member being used. Furthermore, only use ept_pte_masks for VMX, and use x86_pte_masks otherwise (which is what NPT uses). This is in preparation of supporting NPTs as well. No functional change intended. Signed-off-by: Yosry Ahmed --- tools/testing/selftests/kvm/include/x86/vmx.h | 6 +++--- .../testing/selftests/kvm/lib/x86/memstress.c | 4 ++-- tools/testing/selftests/kvm/lib/x86/vmx.c | 20 ++++++++++--------- .../selftests/kvm/x86/vmx_dirty_log_test.c | 6 +++--- 4 files changed, 19 insertions(+), 17 deletions(-) diff --git a/tools/testing/selftests/kvm/include/x86/vmx.h b/tools/testing/selftests/kvm/include/x86/vmx.h index 5aa14ceed050a..4429e83e1f52c 100644 --- a/tools/testing/selftests/kvm/include/x86/vmx.h +++ b/tools/testing/selftests/kvm/include/x86/vmx.h @@ -561,11 +561,11 @@ bool load_vmcs(struct vmx_pages *vmx); bool ept_1g_pages_supported(void); -void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm, +void nested_map(struct kvm_vm *vm, vm_paddr_t root_gpa, uint64_t nested_paddr, uint64_t paddr, uint64_t size); -void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm, +void nested_map_memslot(struct kvm_vm *vm, vm_paddr_t root_gpa, uint32_t memslot); -void nested_identity_map_1g(struct vmx_pages *vmx, struct kvm_vm *vm, +void nested_identity_map_1g(struct kvm_vm *vm, vm_paddr_t root_gpa, uint64_t addr, uint64_t size); bool kvm_cpu_has_ept(void); void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm); diff --git a/tools/testing/selftests/kvm/lib/x86/memstress.c b/tools/testing/selftests/kvm/lib/x86/memstress.c index 0b1f288ad5564..5ca970a8a5c14 100644 --- a/tools/testing/selftests/kvm/lib/x86/memstress.c +++ b/tools/testing/selftests/kvm/lib/x86/memstress.c @@ -70,11 +70,11 @@ void memstress_setup_ept(struct vmx_pages *vmx, struct kvm_vm *vm) * KVM can shadow the EPT12 with the maximum huge page size supported * by the backing source. */ - nested_identity_map_1g(vmx, vm, 0, 0x100000000ULL); + nested_identity_map_1g(vm, vmx->eptp_gpa, 0, 0x100000000ULL); start = align_down(memstress_args.gpa, PG_SIZE_1G); end = align_up(memstress_args.gpa + memstress_args.size, PG_SIZE_1G); - nested_identity_map_1g(vmx, vm, start, end - start); + nested_identity_map_1g(vm, vmx->eptp_gpa, start, end - start); } void memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[]) diff --git a/tools/testing/selftests/kvm/lib/x86/vmx.c b/tools/testing/selftests/kvm/lib/x86/vmx.c index 75996fc00501e..0573b3ea717cb 100644 --- a/tools/testing/selftests/kvm/lib/x86/vmx.c +++ b/tools/testing/selftests/kvm/lib/x86/vmx.c @@ -378,34 +378,36 @@ void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp) * Within the VM given by vm, creates a nested guest translation for the * page range starting at nested_paddr to the page range starting at paddr. */ -void __nested_map(struct vmx_pages *vmx, struct kvm_vm *vm, +void __nested_map(struct kvm_vm *vm, vm_paddr_t root_gpa, uint64_t nested_paddr, uint64_t paddr, uint64_t size, int level) { size_t page_size = PG_LEVEL_SIZE(level); size_t npages = size / page_size; + const struct pte_masks *masks; + + masks = kvm_cpu_has(X86_FEATURE_VMX) ? &ept_pte_masks : &x86_pte_masks; TEST_ASSERT(nested_paddr + size > nested_paddr, "Vaddr overflow"); TEST_ASSERT(paddr + size > paddr, "Paddr overflow"); while (npages--) { - __virt_pg_map(vm, vmx->eptp_gpa, nested_paddr, paddr, - level, &ept_pte_masks); + __virt_pg_map(vm, root_gpa, nested_paddr, paddr, level, masks); nested_paddr += page_size; paddr += page_size; } } -void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm, +void nested_map(struct kvm_vm *vm, vm_paddr_t root_gpa, uint64_t nested_paddr, uint64_t paddr, uint64_t size) { - __nested_map(vmx, vm, nested_paddr, paddr, size, PG_LEVEL_4K); + __nested_map(vm, root_gpa, nested_paddr, paddr, size, PG_LEVEL_4K); } /* Prepare an identity extended page table that maps all the * physical pages in VM. */ -void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm, +void nested_map_memslot(struct kvm_vm *vm, vm_paddr_t root_gpa, uint32_t memslot) { sparsebit_idx_t i, last; @@ -419,7 +421,7 @@ void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm, if (i > last) break; - nested_map(vmx, vm, + nested_map(vm, root_gpa, (uint64_t)i << vm->page_shift, (uint64_t)i << vm->page_shift, 1 << vm->page_shift); @@ -427,10 +429,10 @@ void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm, } /* Identity map a region with 1GiB Pages. */ -void nested_identity_map_1g(struct vmx_pages *vmx, struct kvm_vm *vm, +void nested_identity_map_1g(struct kvm_vm *vm, vm_paddr_t root_gpa, uint64_t addr, uint64_t size) { - __nested_map(vmx, vm, addr, addr, size, PG_LEVEL_1G); + __nested_map(vm, root_gpa, addr, addr, size, PG_LEVEL_1G); } bool kvm_cpu_has_ept(void) diff --git a/tools/testing/selftests/kvm/x86/vmx_dirty_log_test.c b/tools/testing/selftests/kvm/x86/vmx_dirty_log_test.c index 98cb6bdab3e6d..e54e6111164e7 100644 --- a/tools/testing/selftests/kvm/x86/vmx_dirty_log_test.c +++ b/tools/testing/selftests/kvm/x86/vmx_dirty_log_test.c @@ -121,9 +121,9 @@ static void test_vmx_dirty_log(bool enable_ept) */ if (enable_ept) { prepare_eptp(vmx, vm); - nested_map_memslot(vmx, vm, 0); - nested_map(vmx, vm, NESTED_TEST_MEM1, GUEST_TEST_MEM, PAGE_SIZE); - nested_map(vmx, vm, NESTED_TEST_MEM2, GUEST_TEST_MEM, PAGE_SIZE); + nested_map_memslot(vm, vmx->eptp_gpa, 0); + nested_map(vm, vmx->eptp_gpa, NESTED_TEST_MEM1, GUEST_TEST_MEM, PAGE_SIZE); + nested_map(vm, vmx->eptp_gpa, NESTED_TEST_MEM2, GUEST_TEST_MEM, PAGE_SIZE); } bmap = bitmap_zalloc(TEST_MEM_PAGES); -- 2.51.0.869.ge66316f041-goog