From: Chengkaitao 1. Added the vmemmap_false_pmd function to accommodate architectures that do not support basepages. 2. In the SPARC architecture, reimplemented vmemmap_populate using vmemmap_populate_hugepages. Signed-off-by: Chengkaitao --- v2: 1. Revert the whitespace deletions 2. Change vmemmap_false_pmd to vmemmap_pte_fallback_allowed Link to V1: https://lore.kernel.org/all/20251217120858.18713-1-pilgrimtao@gmail.com/ arch/sparc/mm/init_64.c | 50 +++++++++++++++-------------------------- include/linux/mm.h | 1 + mm/sparse-vmemmap.c | 7 +++++- 3 files changed, 25 insertions(+), 33 deletions(-) diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index df9f7c444c39..86b11150e701 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -2581,8 +2581,8 @@ unsigned long _PAGE_CACHE __read_mostly; EXPORT_SYMBOL(_PAGE_CACHE); #ifdef CONFIG_SPARSEMEM_VMEMMAP -int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend, - int node, struct vmem_altmap *altmap) +void __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node, + unsigned long addr, unsigned long next) { unsigned long pte_base; @@ -2595,39 +2595,25 @@ int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend, pte_base |= _PAGE_PMD_HUGE; - vstart = vstart & PMD_MASK; - vend = ALIGN(vend, PMD_SIZE); - for (; vstart < vend; vstart += PMD_SIZE) { - pgd_t *pgd = vmemmap_pgd_populate(vstart, node); - unsigned long pte; - p4d_t *p4d; - pud_t *pud; - pmd_t *pmd; - - if (!pgd) - return -ENOMEM; - - p4d = vmemmap_p4d_populate(pgd, vstart, node); - if (!p4d) - return -ENOMEM; - - pud = vmemmap_pud_populate(p4d, vstart, node); - if (!pud) - return -ENOMEM; - - pmd = pmd_offset(pud, vstart); - pte = pmd_val(*pmd); - if (!(pte & _PAGE_VALID)) { - void *block = vmemmap_alloc_block(PMD_SIZE, node); + pmd_val(*pmd) = pte_base | __pa(p); +} - if (!block) - return -ENOMEM; +bool __meminit vmemmap_pte_fallback_allowed(void) +{ + return false; +} - pmd_val(*pmd) = pte_base | __pa(block); - } - } +int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node, + unsigned long addr, unsigned long next) +{ + vmemmap_verify((pte_t *)pmdp, node, addr, next); + return 1; +} - return 0; +int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend, + int node, struct vmem_altmap *altmap) +{ + return vmemmap_populate_hugepages(vstart, vend, node, altmap); } #endif /* CONFIG_SPARSEMEM_VMEMMAP */ diff --git a/include/linux/mm.h b/include/linux/mm.h index 15076261d0c2..ca159b029a5d 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -4248,6 +4248,7 @@ void *vmemmap_alloc_block_buf(unsigned long size, int node, void vmemmap_verify(pte_t *, int, unsigned long, unsigned long); void vmemmap_set_pmd(pmd_t *pmd, void *p, int node, unsigned long addr, unsigned long next); +bool vmemmap_pte_fallback_allowed(void); int vmemmap_check_pmd(pmd_t *pmd, int node, unsigned long addr, unsigned long next); int vmemmap_populate_basepages(unsigned long start, unsigned long end, diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index 37522d6cb398..45eb38048949 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c @@ -407,6 +407,11 @@ void __weak __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node, { } +bool __weak __meminit vmemmap_pte_fallback_allowed(void) +{ + return true; +} + int __weak __meminit vmemmap_check_pmd(pmd_t *pmd, int node, unsigned long addr, unsigned long next) { @@ -446,7 +451,7 @@ int __meminit vmemmap_populate_hugepages(unsigned long start, unsigned long end, if (p) { vmemmap_set_pmd(pmd, p, node, addr, next); continue; - } else if (altmap) { + } else if (altmap || !vmemmap_pte_fallback_allowed()) { /* * No fallback: In any case we care about, the * altmap should be reasonably sized and aligned -- 2.50.1 (Apple Git-155)