In order to separately allocate ptdescs from pages, we need all allocation and free sites to use the appropriate functions. Convert these pte allocation/free sites to use ptdescs. Signed-off-by: Vishal Moola (Oracle) --- arch/x86/mm/pat/set_memory.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c index 6c6eb486f7a6..2dcb565d8f9b 100644 --- a/arch/x86/mm/pat/set_memory.c +++ b/arch/x86/mm/pat/set_memory.c @@ -1408,7 +1408,7 @@ static bool try_to_free_pte_page(pte_t *pte) if (!pte_none(pte[i])) return false; - free_page((unsigned long)pte); + pagetable_free(virt_to_ptdesc((void *)pte)); return true; } @@ -1537,9 +1537,10 @@ static void unmap_pud_range(p4d_t *p4d, unsigned long start, unsigned long end) */ } -static int alloc_pte_page(pmd_t *pmd) +static int alloc_pte_ptdesc(pmd_t *pmd) { - pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL); + pte_t *pte = (pte_t *) ptdesc_address( + pagetable_alloc(GFP_KERNEL | __GFP_ZERO, 0)); if (!pte) return -1; @@ -1600,7 +1601,7 @@ static long populate_pmd(struct cpa_data *cpa, */ pmd = pmd_offset(pud, start); if (pmd_none(*pmd)) - if (alloc_pte_page(pmd)) + if (alloc_pte_ptdesc(pmd)) return -1; populate_pte(cpa, start, pre_end, cur_pages, pmd, pgprot); @@ -1641,7 +1642,7 @@ static long populate_pmd(struct cpa_data *cpa, if (start < end) { pmd = pmd_offset(pud, start); if (pmd_none(*pmd)) - if (alloc_pte_page(pmd)) + if (alloc_pte_ptdesc(pmd)) return -1; populate_pte(cpa, start, end, num_pages - cur_pages, -- 2.52.0