Similar to KVM's shadow MMU (in most cases), set the user bit on nested PTEs. This is in preparation for supporting NPT mappings, which require the user bit to be set. This should be nop for VMX. Signed-off-by: Yosry Ahmed --- tools/testing/selftests/kvm/lib/x86/processor.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/kvm/lib/x86/processor.c b/tools/testing/selftests/kvm/lib/x86/processor.c index 958389ec1722d..c2912b0a49e90 100644 --- a/tools/testing/selftests/kvm/lib/x86/processor.c +++ b/tools/testing/selftests/kvm/lib/x86/processor.c @@ -211,7 +211,7 @@ static uint64_t *virt_create_upper_pte(struct kvm_vm *vm, paddr = vm_untag_gpa(vm, paddr); if (!(*pte & masks->present)) { - *pte = masks->present | masks->writeable | masks->x; + *pte = masks->present | masks->writeable | masks->x | masks->user; if (current_level == target_level) *pte |= masks->large | (paddr & PHYSICAL_PAGE_MASK); else @@ -276,7 +276,7 @@ void __virt_pg_map(struct kvm_vm *vm, vm_paddr_t root_gpa, uint64_t vaddr, pte = virt_get_pte(vm, root_gpa, pte, vaddr, PG_LEVEL_4K, masks); TEST_ASSERT(!(*pte & masks->present), "PTE already present for 4k page at vaddr: 0x%lx", vaddr); - *pte = masks->present | masks->writeable | masks->x + *pte = masks->present | masks->writeable | masks->x | masks->user | (paddr & PHYSICAL_PAGE_MASK); /* -- 2.51.0.869.ge66316f041-goog