Use the ptdesc APIs for all page table allocation and free sites to allow their separate allocation from struct page in the future. Convert the PTE allocation and free sites to use the generic page table APIs, as they already use ptdescs. Pass through init_mm since these are kernel page tables; otherwise, pte_alloc_one_kernel() becomes a no-op. Signed-off-by: Vishal Moola (Oracle) Acked-by: Mike Rapoport (Microsoft) --- arch/x86/mm/pat/set_memory.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c index 40581a720fe8..a4b1b329c23d 100644 --- a/arch/x86/mm/pat/set_memory.c +++ b/arch/x86/mm/pat/set_memory.c @@ -1408,7 +1408,7 @@ static bool try_to_free_pte_page(pte_t *pte) if (!pte_none(pte[i])) return false; - free_page((unsigned long)pte); + pte_free_kernel(&init_mm, pte); return true; } @@ -1539,7 +1539,7 @@ static void unmap_pud_range(p4d_t *p4d, unsigned long start, unsigned long end) static int alloc_pte_page(pmd_t *pmd) { - pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL); + pte_t *pte = pte_alloc_one_kernel(&init_mm); if (!pte) return -1; -- 2.53.0 Use the ptdesc APIs for all page table allocation and free sites to allow their separate allocation from struct page in the future. Convert the PMD allocation and free sites to use the generic page table APIs, as they already use ptdescs. Pass through init_mm since these are kernel page tables, as pmd_alloc_one() requires it to identify kernel page tables. Because the generic implementation does not use the second argument, pass a placeholder to avoid reimplementing it or risking breakage on other architectures. Signed-off-by: Vishal Moola (Oracle) Acked-by: Mike Rapoport (Microsoft) --- arch/x86/mm/pat/set_memory.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c index a4b1b329c23d..72a260007996 100644 --- a/arch/x86/mm/pat/set_memory.c +++ b/arch/x86/mm/pat/set_memory.c @@ -1420,7 +1420,7 @@ static bool try_to_free_pmd_page(pmd_t *pmd) if (!pmd_none(pmd[i])) return false; - free_page((unsigned long)pmd); + pmd_free(&init_mm, pmd); return true; } @@ -1549,7 +1549,11 @@ static int alloc_pte_page(pmd_t *pmd) static int alloc_pmd_page(pud_t *pud) { - pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL); + /* + * Pass 0 as a placeholder for the second argument, since the + * generic implementation of pmd_alloc_one() does not use it. + */ + pmd_t *pmd = pmd_alloc_one(&init_mm, 0); if (!pmd) return -1; -- 2.53.0 Use the ptdesc APIs for all page table allocation and free sites to allow their separate allocation from struct page in the future. Convert the remaining get_zeroed_page() calls to the generic page table APIs, as they already use ptdescs. Pass through init_mm since these are kernel page tables, as both functions require it to identify kernel page tables. Because the generic implementations do not use the second argument, pass a placeholder to avoid reimplementing them or risking breakage on other architectures. It is not obvious whether these pages are freed. Regardless, convert the remaining free paths as needed, noting that the only other possible free paths have already been converted and that a frozen page table test kernel has not reported any issues. Signed-off-by: Vishal Moola (Oracle) Acked-by: Mike Rapoport (Microsoft) --- arch/x86/mm/pat/set_memory.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c index 72a260007996..17c1c28e3491 100644 --- a/arch/x86/mm/pat/set_memory.c +++ b/arch/x86/mm/pat/set_memory.c @@ -1747,7 +1747,11 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr) pgd_entry = cpa->pgd + pgd_index(addr); if (pgd_none(*pgd_entry)) { - p4d = (p4d_t *)get_zeroed_page(GFP_KERNEL); + /* + * Pass 0 as a placeholder for the second argument, since the + * generic implementation of p4d_alloc_one() does not use it. + */ + p4d = p4d_alloc_one(&init_mm, 0); if (!p4d) return -1; @@ -1759,7 +1763,11 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr) */ p4d = p4d_offset(pgd_entry, addr); if (p4d_none(*p4d)) { - pud = (pud_t *)get_zeroed_page(GFP_KERNEL); + /* + * Pass 0 as a placeholder for the second argument, since the + * generic implementation of pud_alloc_one() does not use it. + */ + pud = pud_alloc_one(&init_mm, 0); if (!pud) return -1; -- 2.53.0 Use the ptdesc APIs for all page table allocation and free sites to allow their separate allocation from struct page in the future. Update split_large_page() to allocate a ptdesc instead of allocating a page for use as a page table. Signed-off-by: Vishal Moola (Oracle) Acked-by: Mike Rapoport (Microsoft) --- arch/x86/mm/pat/set_memory.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c index 17c1c28e3491..cba907c39718 100644 --- a/arch/x86/mm/pat/set_memory.c +++ b/arch/x86/mm/pat/set_memory.c @@ -1119,9 +1119,10 @@ static void split_set_pte(struct cpa_data *cpa, pte_t *pte, unsigned long pfn, static int __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address, - struct page *base) + struct ptdesc *ptdesc) { unsigned long lpaddr, lpinc, ref_pfn, pfn, pfninc = 1; + struct page *base = ptdesc_page(ptdesc); pte_t *pbase = (pte_t *)page_address(base); unsigned int i, level; pgprot_t ref_prot; @@ -1226,18 +1227,18 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address, static int split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address) { - struct page *base; + struct ptdesc *ptdesc; if (!debug_pagealloc_enabled()) spin_unlock(&cpa_lock); - base = alloc_pages(GFP_KERNEL, 0); + ptdesc = pagetable_alloc(GFP_KERNEL, 0); if (!debug_pagealloc_enabled()) spin_lock(&cpa_lock); - if (!base) + if (!ptdesc) return -ENOMEM; - if (__split_large_page(cpa, kpte, address, base)) - __free_page(base); + if (__split_large_page(cpa, kpte, address, ptdesc)) + pagetable_free(ptdesc); return 0; } -- 2.53.0