From: Yosry Ahmed According to the APM, NPT walks are treated as user accesses. In preparation for supporting NPT mappings, set the 'user' bit on NPTs by adding a mask of bits to always be set on PTEs in kvm_mmu. Signed-off-by: Yosry Ahmed Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/include/x86/kvm_util_arch.h | 2 ++ tools/testing/selftests/kvm/include/x86/processor.h | 1 + tools/testing/selftests/kvm/lib/x86/processor.c | 5 +++-- tools/testing/selftests/kvm/lib/x86/svm.c | 3 +++ 4 files changed, 9 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/kvm/include/x86/kvm_util_arch.h b/tools/testing/selftests/kvm/include/x86/kvm_util_arch.h index 1cf84b8212c6..be35d26bb320 100644 --- a/tools/testing/selftests/kvm/include/x86/kvm_util_arch.h +++ b/tools/testing/selftests/kvm/include/x86/kvm_util_arch.h @@ -22,6 +22,8 @@ struct pte_masks { uint64_t nx; uint64_t c; uint64_t s; + + uint64_t always_set; }; struct kvm_mmu_arch { diff --git a/tools/testing/selftests/kvm/include/x86/processor.h b/tools/testing/selftests/kvm/include/x86/processor.h index deb471fb9b51..7b7d962244d6 100644 --- a/tools/testing/selftests/kvm/include/x86/processor.h +++ b/tools/testing/selftests/kvm/include/x86/processor.h @@ -1450,6 +1450,7 @@ enum pg_level { #define PTE_NX_MASK(mmu) ((mmu)->arch.pte_masks.nx) #define PTE_C_BIT_MASK(mmu) ((mmu)->arch.pte_masks.c) #define PTE_S_BIT_MASK(mmu) ((mmu)->arch.pte_masks.s) +#define PTE_ALWAYS_SET_MASK(mmu) ((mmu)->arch.pte_masks.always_set) /* * For PTEs without a PRESENT bit (i.e. EPT entries), treat the PTE as present diff --git a/tools/testing/selftests/kvm/lib/x86/processor.c b/tools/testing/selftests/kvm/lib/x86/processor.c index a3a4c9a4cbcb..5a3385d48902 100644 --- a/tools/testing/selftests/kvm/lib/x86/processor.c +++ b/tools/testing/selftests/kvm/lib/x86/processor.c @@ -231,7 +231,8 @@ static uint64_t *virt_create_upper_pte(struct kvm_vm *vm, if (!is_present_pte(mmu, pte)) { *pte = PTE_PRESENT_MASK(mmu) | PTE_READABLE_MASK(mmu) | - PTE_WRITABLE_MASK(mmu) | PTE_EXECUTABLE_MASK(mmu); + PTE_WRITABLE_MASK(mmu) | PTE_EXECUTABLE_MASK(mmu) | + PTE_ALWAYS_SET_MASK(mmu); if (current_level == target_level) *pte |= PTE_HUGE_MASK(mmu) | (paddr & PHYSICAL_PAGE_MASK); else @@ -299,7 +300,7 @@ void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, uint64_t vaddr, "PTE already present for 4k page at vaddr: 0x%lx", vaddr); *pte = PTE_PRESENT_MASK(mmu) | PTE_READABLE_MASK(mmu) | PTE_WRITABLE_MASK(mmu) | PTE_EXECUTABLE_MASK(mmu) | - (paddr & PHYSICAL_PAGE_MASK); + PTE_ALWAYS_SET_MASK(mmu) | (paddr & PHYSICAL_PAGE_MASK); /* * Neither SEV nor TDX supports shared page tables, so only the final diff --git a/tools/testing/selftests/kvm/lib/x86/svm.c b/tools/testing/selftests/kvm/lib/x86/svm.c index 8e4795225595..18e9e9089643 100644 --- a/tools/testing/selftests/kvm/lib/x86/svm.c +++ b/tools/testing/selftests/kvm/lib/x86/svm.c @@ -72,6 +72,9 @@ void vm_enable_npt(struct kvm_vm *vm) pte_masks = vm->mmu.arch.pte_masks; pte_masks.c = 0; + /* NPT walks are treated as user accesses, so set the 'user' bit. */ + pte_masks.always_set = pte_masks.user; + tdp_mmu_init(vm, vm->mmu.pgtable_levels, &pte_masks); } -- 2.52.0.351.gbe84eed79e-goog