In order to separately allocate ptdescs from pages, we need all allocation and free sites to use the appropriate functions. Convert these pte allocation/free sites to use ptdescs. Signed-off-by: Vishal Moola (Oracle) --- arch/x86/mm/pat/set_memory.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c index 6c6eb486f7a6..f9f9d4ca8e71 100644 --- a/arch/x86/mm/pat/set_memory.c +++ b/arch/x86/mm/pat/set_memory.c @@ -1408,7 +1408,7 @@ static bool try_to_free_pte_page(pte_t *pte) if (!pte_none(pte[i])) return false; - free_page((unsigned long)pte); + pagetable_free(virt_to_ptdesc((void *)pte)); return true; } @@ -1537,12 +1537,15 @@ static void unmap_pud_range(p4d_t *p4d, unsigned long start, unsigned long end) */ } -static int alloc_pte_page(pmd_t *pmd) +static int alloc_pte_ptdesc(pmd_t *pmd) { - pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL); - if (!pte) + pte_t *pte; + struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL | __GFP_ZERO, 0); + + if (!ptdesc) return -1; + pte = (pte_t *) ptdesc_address(ptdesc); set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE)); return 0; } @@ -1600,7 +1603,7 @@ static long populate_pmd(struct cpa_data *cpa, */ pmd = pmd_offset(pud, start); if (pmd_none(*pmd)) - if (alloc_pte_page(pmd)) + if (alloc_pte_ptdesc(pmd)) return -1; populate_pte(cpa, start, pre_end, cur_pages, pmd, pgprot); @@ -1641,7 +1644,7 @@ static long populate_pmd(struct cpa_data *cpa, if (start < end) { pmd = pmd_offset(pud, start); if (pmd_none(*pmd)) - if (alloc_pte_page(pmd)) + if (alloc_pte_ptdesc(pmd)) return -1; populate_pte(cpa, start, end, num_pages - cur_pages, -- 2.52.0