In many cases, the pages passed to vmap() may include high-order pages allocated with __GFP_COMP flags. For example, the systemheap often allocates pages in descending order: order 8, then 4, then 0. Currently, vmap() iterates over every page individually—even pages inside a high-order block are handled one by one. This patch detects high-order pages and maps them as a single contiguous block whenever possible. An alternative would be to implement a new API, vmap_sg(), but that change seems to be large in scope. Signed-off-by: Barry Song (Xiaomi) --- mm/vmalloc.c | 51 +++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 49 insertions(+), 2 deletions(-) diff --git a/mm/vmalloc.c b/mm/vmalloc.c index eba436386929..e8dbfada42bc 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -3529,6 +3529,53 @@ void vunmap(const void *addr) } EXPORT_SYMBOL(vunmap); +static inline int get_vmap_batch_order(struct page **pages, + unsigned int max_steps, unsigned int idx) +{ + unsigned int nr_pages; + + if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP) || + ioremap_max_page_shift == PAGE_SHIFT) + return 0; + + nr_pages = compound_nr(pages[idx]); + if (nr_pages == 1 || max_steps < nr_pages) + return 0; + + if (num_pages_contiguous(&pages[idx], nr_pages) == nr_pages) + return compound_order(pages[idx]); + return 0; +} + +static int vmap_contig_pages_range(unsigned long addr, unsigned long end, + pgprot_t prot, struct page **pages) +{ + unsigned int count = (end - addr) >> PAGE_SHIFT; + int err; + + err = kmsan_vmap_pages_range_noflush(addr, end, prot, pages, + PAGE_SHIFT, GFP_KERNEL); + if (err) + goto out; + + for (unsigned int i = 0; i < count; ) { + unsigned int shift = PAGE_SHIFT + + get_vmap_batch_order(pages, count - i, i); + + err = vmap_range_noflush(addr, addr + (1UL << shift), + page_to_phys(pages[i]), prot, shift); + if (err) + goto out; + + addr += 1UL << shift; + i += 1U << (shift - PAGE_SHIFT); + } + +out: + flush_cache_vmap(addr, end); + return err; +} + /** * vmap - map an array of pages into virtually contiguous space * @pages: array of page pointers @@ -3572,8 +3619,8 @@ void *vmap(struct page **pages, unsigned int count, return NULL; addr = (unsigned long)area->addr; - if (vmap_pages_range(addr, addr + size, pgprot_nx(prot), - pages, PAGE_SHIFT) < 0) { + if (vmap_contig_pages_range(addr, addr + size, pgprot_nx(prot), + pages) < 0) { vunmap(area->addr); return NULL; } -- 2.39.3 (Apple Git-146)