Extend "struct kvm_mmu_memory_cache" to support a custom page allocator so that x86's TDX can update per-page metadata on allocation and free(). Name the allocator page_get() to align with __get_free_page(), e.g. to communicate that it returns an "unsigned long", not a "struct page", and to avoid collisions with macros, e.g. with alloc_page. Suggested-by: Kai Huang Signed-off-by: Sean Christopherson --- include/linux/kvm_types.h | 2 ++ virt/kvm/kvm_main.c | 7 ++++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index a568d8e6f4e8..87fa9deffdb7 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h @@ -112,6 +112,8 @@ struct kvm_mmu_memory_cache { gfp_t gfp_custom; u64 init_value; struct kmem_cache *kmem_cache; + unsigned long (*page_get)(gfp_t gfp); + void (*page_free)(unsigned long addr); int capacity; int nobjs; void **objects; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 571cf0d6ec01..7015edce5bd8 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -356,7 +356,10 @@ static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc, if (mc->kmem_cache) return kmem_cache_alloc(mc->kmem_cache, gfp_flags); - page = (void *)__get_free_page(gfp_flags); + if (mc->page_get) + page = (void *)mc->page_get(gfp_flags); + else + page = (void *)__get_free_page(gfp_flags); if (page && mc->init_value) memset64(page, mc->init_value, PAGE_SIZE / sizeof(u64)); return page; @@ -416,6 +419,8 @@ void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) while (mc->nobjs) { if (mc->kmem_cache) kmem_cache_free(mc->kmem_cache, mc->objects[--mc->nobjs]); + else if (mc->page_free) + mc->page_free((unsigned long)mc->objects[--mc->nobjs]); else free_page((unsigned long)mc->objects[--mc->nobjs]); } -- 2.53.0.rc1.217.geba53bf80e-goog