For each memory region that the TDX module might use (TDMR), the three separate PAMT allocations are needed. One for each supported page size (1GB, 2MB, 4KB). These store information on each page in the TDMR. In Linux, they are allocated out of one physically contiguous block, in order to more efficiently use some internal TDX module book keeping resources. So some simple math is needed to break the single large allocation into three smaller allocations for each page size. There are some commonalities in the math needed to calculate the base and size for each smaller allocation, and so an effort was made to share logic across the three. Unfortunately doing this turned out naturally tortured, with a loop iterating over the three page sizes, only to call into a function with a case statement for each page size. In the future Dynamic PAMT will add more logic that is special to the 4KB page size, making the benefit of the math sharing even more questionable. Three is not a very high number, so get rid of the loop and just duplicate the small calculation three times. In doing so, setup for future Dynamic PAMT changes and drop a net 33 lines of code. Since the loop that iterates over it is gone, further simplify the code by dropping the array of intermediate size and base storage. Just store the values to their final locations. Accept the small complication of having to clear tdmr->pamt_4k_base in the error path, so that tdmr_do_pamt_func() will not try to operate on the TDMR struct when attempting to free it. Signed-off-by: Rick Edgecombe --- v3: - New patch --- arch/x86/virt/vmx/tdx/tdx.c | 69 ++++++++++--------------------------- 1 file changed, 18 insertions(+), 51 deletions(-) diff --git a/arch/x86/virt/vmx/tdx/tdx.c b/arch/x86/virt/vmx/tdx/tdx.c index e962fffa73a6..38dae825bbb9 100644 --- a/arch/x86/virt/vmx/tdx/tdx.c +++ b/arch/x86/virt/vmx/tdx/tdx.c @@ -445,30 +445,16 @@ static int fill_out_tdmrs(struct list_head *tmb_list, * PAMT size is always aligned up to 4K page boundary. */ static unsigned long tdmr_get_pamt_sz(struct tdmr_info *tdmr, int pgsz, - u16 pamt_entry_size) + u16 pamt_entry_size[]) { unsigned long pamt_sz, nr_pamt_entries; + const int tdx_pg_size_shift[] = { PAGE_SHIFT, PMD_SHIFT, PUD_SHIFT }; - switch (pgsz) { - case TDX_PS_4K: - nr_pamt_entries = tdmr->size >> PAGE_SHIFT; - break; - case TDX_PS_2M: - nr_pamt_entries = tdmr->size >> PMD_SHIFT; - break; - case TDX_PS_1G: - nr_pamt_entries = tdmr->size >> PUD_SHIFT; - break; - default: - WARN_ON_ONCE(1); - return 0; - } + nr_pamt_entries = tdmr->size >> tdx_pg_size_shift[pgsz]; + pamt_sz = nr_pamt_entries * pamt_entry_size[pgsz]; - pamt_sz = nr_pamt_entries * pamt_entry_size; /* TDX requires PAMT size must be 4K aligned */ - pamt_sz = ALIGN(pamt_sz, PAGE_SIZE); - - return pamt_sz; + return PAGE_ALIGN(pamt_sz); } /* @@ -509,25 +495,19 @@ static int tdmr_set_up_pamt(struct tdmr_info *tdmr, struct list_head *tmb_list, u16 pamt_entry_size[]) { - unsigned long pamt_base[TDX_PS_NR]; - unsigned long pamt_size[TDX_PS_NR]; - unsigned long tdmr_pamt_base; unsigned long tdmr_pamt_size; struct page *pamt; - int pgsz, nid; - + int nid; nid = tdmr_get_nid(tdmr, tmb_list); /* * Calculate the PAMT size for each TDX supported page size * and the total PAMT size. */ - tdmr_pamt_size = 0; - for (pgsz = TDX_PS_4K; pgsz < TDX_PS_NR; pgsz++) { - pamt_size[pgsz] = tdmr_get_pamt_sz(tdmr, pgsz, - pamt_entry_size[pgsz]); - tdmr_pamt_size += pamt_size[pgsz]; - } + tdmr->pamt_4k_size = tdmr_get_pamt_sz(tdmr, TDX_PS_4K, pamt_entry_size); + tdmr->pamt_2m_size = tdmr_get_pamt_sz(tdmr, TDX_PS_2M, pamt_entry_size); + tdmr->pamt_1g_size = tdmr_get_pamt_sz(tdmr, TDX_PS_1G, pamt_entry_size); + tdmr_pamt_size = tdmr->pamt_4k_size + tdmr->pamt_2m_size + tdmr->pamt_1g_size; /* * Allocate one chunk of physically contiguous memory for all @@ -535,26 +515,16 @@ static int tdmr_set_up_pamt(struct tdmr_info *tdmr, * in overlapped TDMRs. */ pamt = alloc_contig_pages(tdmr_pamt_size >> PAGE_SHIFT, GFP_KERNEL, - nid, &node_online_map); - if (!pamt) + nid, &node_online_map); + if (!pamt) { + /* Zero base so that the error path will skip freeing. */ + tdmr->pamt_4k_base = 0; return -ENOMEM; - - /* - * Break the contiguous allocation back up into the - * individual PAMTs for each page size. - */ - tdmr_pamt_base = page_to_pfn(pamt) << PAGE_SHIFT; - for (pgsz = TDX_PS_4K; pgsz < TDX_PS_NR; pgsz++) { - pamt_base[pgsz] = tdmr_pamt_base; - tdmr_pamt_base += pamt_size[pgsz]; } - tdmr->pamt_4k_base = pamt_base[TDX_PS_4K]; - tdmr->pamt_4k_size = pamt_size[TDX_PS_4K]; - tdmr->pamt_2m_base = pamt_base[TDX_PS_2M]; - tdmr->pamt_2m_size = pamt_size[TDX_PS_2M]; - tdmr->pamt_1g_base = pamt_base[TDX_PS_1G]; - tdmr->pamt_1g_size = pamt_size[TDX_PS_1G]; + tdmr->pamt_4k_base = page_to_phys(pamt); + tdmr->pamt_2m_base = tdmr->pamt_4k_base + tdmr->pamt_4k_size; + tdmr->pamt_1g_base = tdmr->pamt_2m_base + tdmr->pamt_2m_size; return 0; } @@ -585,10 +555,7 @@ static void tdmr_do_pamt_func(struct tdmr_info *tdmr, tdmr_get_pamt(tdmr, &pamt_base, &pamt_size); /* Do nothing if PAMT hasn't been allocated for this TDMR */ - if (!pamt_size) - return; - - if (WARN_ON_ONCE(!pamt_base)) + if (!pamt_base) return; pamt_func(pamt_base, pamt_size); -- 2.51.0