IOMMU_MT is another TDX Module defined structure similar to HPA_ARRAY_T and HPA_LIST_INFO. The difference is it supports multi-order contiguous pages. It adds an additional NUM_PAGES field for every multi-order page entry. Add an dedicated allocation helper for IOMMU_MT. Maybe a general allocation helper for multi-order is better but could postponed until another user appears. Signed-off-by: Xu Yilun Signed-off-by: Dan Williams --- arch/x86/include/asm/tdx.h | 2 ++ arch/x86/virt/vmx/tdx/tdx.c | 71 +++++++++++++++++++++++++++++++++++-- 2 files changed, 70 insertions(+), 3 deletions(-) diff --git a/arch/x86/include/asm/tdx.h b/arch/x86/include/asm/tdx.h index 1eeb77a6790a..4078fc497779 100644 --- a/arch/x86/include/asm/tdx.h +++ b/arch/x86/include/asm/tdx.h @@ -157,6 +157,8 @@ void tdx_page_array_ctrl_leak(struct tdx_page_array *array); int tdx_page_array_ctrl_release(struct tdx_page_array *array, unsigned int nr_released, u64 released_hpa); +struct tdx_page_array * +tdx_page_array_create_iommu_mt(unsigned int iq_order, unsigned int nr_mt_pages); struct tdx_td { /* TD root structure: */ diff --git a/arch/x86/virt/vmx/tdx/tdx.c b/arch/x86/virt/vmx/tdx/tdx.c index bbf93cad5bf2..46cdb5aaaf68 100644 --- a/arch/x86/virt/vmx/tdx/tdx.c +++ b/arch/x86/virt/vmx/tdx/tdx.c @@ -316,8 +316,15 @@ static int tdx_page_array_fill_root(struct tdx_page_array *array, TDX_PAGE_ARRAY_MAX_NENTS); entries = (u64 *)page_address(array->root); - for (i = 0; i < array->nents; i++) - entries[i] = page_to_phys(array->pages[offset + i]); + for (i = 0; i < array->nents; i++) { + struct page *page = array->pages[offset + i]; + + entries[i] = page_to_phys(page); + + /* Now only for iommu_mt */ + if (compound_nr(page) > 1) + entries[i] |= compound_nr(page); + } return array->nents; } @@ -327,7 +334,7 @@ static void tdx_free_pages_bulk(unsigned int nr_pages, struct page **pages) unsigned long i; for (i = 0; i < nr_pages; i++) - __free_page(pages[i]); + put_page(pages[i]); } static int tdx_alloc_pages_bulk(unsigned int nr_pages, struct page **pages) @@ -466,6 +473,10 @@ static bool tdx_page_array_validate_release(struct tdx_page_array *array, struct page *page = array->pages[offset + i]; u64 val = page_to_phys(page); + /* Now only for iommu_mt */ + if (compound_nr(page) > 1) + val |= compound_nr(page); + if (val != entries[i]) { pr_err("%s entry[%d] [0x%llx] doesn't match page hpa [0x%llx]\n", __func__, i, entries[i], val); @@ -516,6 +527,60 @@ int tdx_page_array_ctrl_release(struct tdx_page_array *array, } EXPORT_SYMBOL_GPL(tdx_page_array_ctrl_release); +struct tdx_page_array * +tdx_page_array_create_iommu_mt(unsigned int iq_order, unsigned int nr_mt_pages) +{ + unsigned int nr_entries = 2 + nr_mt_pages; + int ret; + + if (nr_entries > TDX_PAGE_ARRAY_MAX_NENTS) + return NULL; + + struct tdx_page_array *array __free(kfree) = kzalloc(sizeof(*array), + GFP_KERNEL); + if (!array) + return NULL; + + struct page *root __free(__free_page) = alloc_page(GFP_KERNEL | + __GFP_ZERO); + if (!root) + return NULL; + + struct page **pages __free(kfree) = kcalloc(nr_entries, sizeof(*pages), + GFP_KERNEL); + if (!pages) + return NULL; + + /* TODO: folio_alloc_node() is preferred, but need numa info */ + struct folio *t_iq __free(folio_put) = folio_alloc(GFP_KERNEL | + __GFP_ZERO, + iq_order); + if (!t_iq) + return NULL; + + struct folio *t_ctxiq __free(folio_put) = folio_alloc(GFP_KERNEL | + __GFP_ZERO, + iq_order); + if (!t_ctxiq) + return NULL; + + ret = tdx_alloc_pages_bulk(nr_mt_pages, pages + 2); + if (ret) + return NULL; + + pages[0] = folio_page(no_free_ptr(t_iq), 0); + pages[1] = folio_page(no_free_ptr(t_ctxiq), 0); + + array->nr_pages = nr_entries; + array->pages = no_free_ptr(pages); + array->root = no_free_ptr(root); + + tdx_page_array_fill_root(array, 0); + + return no_free_ptr(array); +} +EXPORT_SYMBOL_GPL(tdx_page_array_create_iommu_mt); + #define HPA_LIST_INFO_FIRST_ENTRY GENMASK_U64(11, 3) #define HPA_LIST_INFO_PFN GENMASK_U64(51, 12) #define HPA_LIST_INFO_LAST_ENTRY GENMASK_U64(63, 55) -- 2.25.1