The sparse_buffer was originally introduced in commit 9bdac9142407 ("sparsemem: Put mem map for one node together.") to allocate a contiguous block of memory for all memmaps of a NUMA node. However, the original commit message did not clearly state the actual benefits or the necessity of keeping all memmap areas strictly contiguous for a given node. With the evolution of memory management over the years, the current code only requires a 2MB contiguous allocation to support huge page mappings for CONFIG_SPARSEMEM_VMEMMAP. Thus, it seems we no longer need such complex logic to keep all memmap allocations completely contiguous across the entire node. Since the original commit was merged 16 years ago and no additional context regarding its original intention could be found, this patch proposes removing this mechanism to reduce the maintenance burden. If anyone knows the historical background or if there are specific architectures (Note that the mechanism implemented in 9bdac9142407 was restricted to x86_64. Therefore, I doubt there are any functional dependencies for other architectures.) or edge cases that still rely on this, sharing that context would be highly appreciated. Signed-off-by: Muchun Song --- include/linux/mm.h | 1 - mm/sparse-vmemmap.c | 7 +----- mm/sparse.c | 58 +-------------------------------------------- 3 files changed, 2 insertions(+), 64 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 0b776907152e..1d676fef4303 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -4855,7 +4855,6 @@ static inline void print_vma_addr(char *prefix, unsigned long rip) } #endif -void *sparse_buffer_alloc(unsigned long size); unsigned long section_map_size(void); struct page * __populate_section_memmap(unsigned long pfn, unsigned long nr_pages, int nid, struct vmem_altmap *altmap, diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index 6eadb9d116e4..aca1b00e86dd 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c @@ -87,15 +87,10 @@ static void * __meminit altmap_alloc_block_buf(unsigned long size, void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node, struct vmem_altmap *altmap) { - void *ptr; - if (altmap) return altmap_alloc_block_buf(size, altmap); - ptr = sparse_buffer_alloc(size); - if (!ptr) - ptr = vmemmap_alloc_block(size, node); - return ptr; + return vmemmap_alloc_block(size, node); } static unsigned long __meminit vmem_altmap_next_pfn(struct vmem_altmap *altmap) diff --git a/mm/sparse.c b/mm/sparse.c index effdac6b0ab1..672e2ad396a8 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -241,12 +241,9 @@ struct page __init *__populate_section_memmap(unsigned long pfn, struct dev_pagemap *pgmap) { unsigned long size = section_map_size(); - struct page *map = sparse_buffer_alloc(size); + struct page *map; phys_addr_t addr = __pa(MAX_DMA_ADDRESS); - if (map) - return map; - map = memmap_alloc(size, size, addr, nid, false); if (!map) panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa\n", @@ -256,55 +253,6 @@ struct page __init *__populate_section_memmap(unsigned long pfn, } #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ -static void *sparsemap_buf __meminitdata; -static void *sparsemap_buf_end __meminitdata; - -static inline void __meminit sparse_buffer_free(unsigned long size) -{ - WARN_ON(!sparsemap_buf || size == 0); - memblock_free(sparsemap_buf, size); -} - -static void __init sparse_buffer_init(unsigned long size, int nid) -{ - phys_addr_t addr = __pa(MAX_DMA_ADDRESS); - WARN_ON(sparsemap_buf); /* forgot to call sparse_buffer_fini()? */ - /* - * Pre-allocated buffer is mainly used by __populate_section_memmap - * and we want it to be properly aligned to the section size - this is - * especially the case for VMEMMAP which maps memmap to PMDs - */ - sparsemap_buf = memmap_alloc(size, section_map_size(), addr, nid, true); - sparsemap_buf_end = sparsemap_buf + size; -} - -static void __init sparse_buffer_fini(void) -{ - unsigned long size = sparsemap_buf_end - sparsemap_buf; - - if (sparsemap_buf && size > 0) - sparse_buffer_free(size); - sparsemap_buf = NULL; -} - -void * __meminit sparse_buffer_alloc(unsigned long size) -{ - void *ptr = NULL; - - if (sparsemap_buf) { - ptr = (void *) roundup((unsigned long)sparsemap_buf, size); - if (ptr + size > sparsemap_buf_end) - ptr = NULL; - else { - /* Free redundant aligned space */ - if ((unsigned long)(ptr - sparsemap_buf) > 0) - sparse_buffer_free((unsigned long)(ptr - sparsemap_buf)); - sparsemap_buf = ptr + size; - } - } - return ptr; -} - void __weak __meminit vmemmap_populate_print_last(void) { } @@ -362,8 +310,6 @@ static void __init sparse_init_nid(int nid, unsigned long pnum_begin, goto failed; } - sparse_buffer_init(map_count * section_map_size(), nid); - sparse_vmemmap_init_nid_early(nid); for_each_present_section_nr(pnum_begin, pnum) { @@ -381,7 +327,6 @@ static void __init sparse_init_nid(int nid, unsigned long pnum_begin, __func__, nid); pnum_begin = pnum; sparse_usage_fini(); - sparse_buffer_fini(); goto failed; } memmap_boot_pages_add(DIV_ROUND_UP(PAGES_PER_SECTION * sizeof(struct page), @@ -390,7 +335,6 @@ static void __init sparse_init_nid(int nid, unsigned long pnum_begin, } } sparse_usage_fini(); - sparse_buffer_fini(); return; failed: /* -- 2.20.1