Try to align the vmap virtual address to PMD_SHIFT or a larger PTE mapping size hinted by the architecture, so contiguous pages can be batch-mapped when setting PMD or PTE entries. Signed-off-by: Barry Song (Xiaomi) --- mm/vmalloc.c | 31 ++++++++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/mm/vmalloc.c b/mm/vmalloc.c index e8dbfada42bc..6643ec0288cd 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -3576,6 +3576,35 @@ static int vmap_contig_pages_range(unsigned long addr, unsigned long end, return err; } +static struct vm_struct *get_aligned_vm_area(unsigned long size, unsigned long flags) +{ + unsigned int shift = (size >= PMD_SIZE) ? PMD_SHIFT : + arch_vmap_pte_supported_shift(size); + struct vm_struct *vm_area = NULL; + + /* + * Try to allocate an aligned vm_area so contiguous pages can be + * mapped in batches. + */ + while (1) { + unsigned long align = 1UL << shift; + + vm_area = __get_vm_area_node(size, align, PAGE_SHIFT, flags, + VMALLOC_START, VMALLOC_END, + NUMA_NO_NODE, GFP_KERNEL, + __builtin_return_address(0)); + if (vm_area || shift <= PAGE_SHIFT) + goto out; + if (shift == PMD_SHIFT) + shift = arch_vmap_pte_supported_shift(size); + else if (shift > PAGE_SHIFT) + shift = PAGE_SHIFT; + } + +out: + return vm_area; +} + /** * vmap - map an array of pages into virtually contiguous space * @pages: array of page pointers @@ -3614,7 +3643,7 @@ void *vmap(struct page **pages, unsigned int count, return NULL; size = (unsigned long)count << PAGE_SHIFT; - area = get_vm_area_caller(size, flags, __builtin_return_address(0)); + area = get_aligned_vm_area(size, flags); if (!area) return NULL; -- 2.39.3 (Apple Git-146)