Vmalloc explicitly supports a list of flags, but we never enforce them. vmalloc has been trying to handle unsupported flags by clearing and setting flags wherever necessary. This is messy and makes the code harder to understand, when we could simply check for a supported input immediately instead. Define a helper mask and function telling callers they have passed in invalid flags, and clear those unsupported vmalloc flags. Suggested-by: Christoph Hellwig Signed-off-by: Vishal Moola (Oracle) Reviewed-by: Christoph Hellwig --- mm/vmalloc.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 0832f944544c..40d4bdcadb6d 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -3911,6 +3911,24 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, return NULL; } +/* + * See __vmalloc_node_range() for a clear list of supported vmalloc flags. + * This gfp lists all flags currently passed through vmalloc. Currently, + * __GFP_ZERO is used by BFP and __GFP_NORETRY is used by percpu. + */ +#define GFP_VMALLOC_SUPPORTED (GFP_KERNEL | GFP_ATOMIC | GFP_NOWAIT |\ + __GFP_NOFAIL | __GFP_ZERO | __GFP_NORETRY) + +static gfp_t vmalloc_fix_flags(gfp_t flags) +{ + gfp_t invalid_mask = flags & ~GFP_VMALLOC_SUPPORTED; + + flags &= GFP_VMALLOC_SUPPORTED; + WARN(1, "Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n", + invalid_mask, &invalid_mask, flags, &flags); + return flags; +} + /** * __vmalloc_node_range - allocate virtually contiguous memory * @size: allocation size @@ -4092,6 +4110,8 @@ EXPORT_SYMBOL_GPL(__vmalloc_node_noprof); void *__vmalloc_noprof(unsigned long size, gfp_t gfp_mask) { + if (unlikely(gfp_mask & ~GFP_VMALLOC_SUPPORTED)) + gfp_mask = vmalloc_fix_flags(gfp_mask); return __vmalloc_node_noprof(size, 1, gfp_mask, NUMA_NO_NODE, __builtin_return_address(0)); } @@ -4131,6 +4151,8 @@ EXPORT_SYMBOL(vmalloc_noprof); */ void *vmalloc_huge_node_noprof(unsigned long size, gfp_t gfp_mask, int node) { + if (unlikely(gfp_mask & ~GFP_VMALLOC_SUPPORTED)) + gfp_mask = vmalloc_fix_flags(gfp_mask); return __vmalloc_node_range_noprof(size, 1, VMALLOC_START, VMALLOC_END, gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP, node, __builtin_return_address(0)); -- 2.51.1 vm_area_alloc_pages() attempts to use different gfp flags as a way to optimize allocations. This has been done inline which makes things harder to read. Add a helper function to make the code more readable. Signed-off-by: Vishal Moola (Oracle) --- mm/vmalloc.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 40d4bdcadb6d..8fbf78e8eb67 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -3614,6 +3614,17 @@ void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot) EXPORT_SYMBOL_GPL(vmap_pfn); #endif /* CONFIG_VMAP_PFN */ +/* + * Helper for vmalloc to adjust the gfp flags for certain allocations. + */ +static inline gfp_t vmalloc_gfp_adjust(gfp_t flags, const bool large) +{ + flags |= __GFP_NOWARN; + if (large) + flags &= ~__GFP_NOFAIL; + return flags; +} + static inline unsigned int vm_area_alloc_pages(gfp_t gfp, int nid, unsigned int order, unsigned int nr_pages, struct page **pages) @@ -3852,9 +3863,9 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, * Please note, the __vmalloc_node_range_noprof() falls-back * to order-0 pages if high-order attempt is unsuccessful. */ - area->nr_pages = vm_area_alloc_pages((page_order ? - gfp_mask & ~__GFP_NOFAIL : gfp_mask) | __GFP_NOWARN, - node, page_order, nr_small_pages, area->pages); + area->nr_pages = vm_area_alloc_pages( + vmalloc_gfp_adjust(gfp_mask, page_order), node, + page_order, nr_small_pages, area->pages); atomic_long_add(area->nr_pages, &nr_vmalloc_pages); /* All pages of vm should be charged to same memcg, so use first one. */ -- 2.51.1 Now that we have already checked for unsupported flags, we can use the helper function to set the necessary gfp flags for the large order allocation optimization. Signed-off-by: Vishal Moola (Oracle) --- mm/vmalloc.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 8fbf78e8eb67..970ee6756909 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -3634,10 +3634,8 @@ vm_area_alloc_pages(gfp_t gfp, int nid, unsigned int max_attempt_order = MAX_PAGE_ORDER; struct page *page; int i; - gfp_t large_gfp = (gfp & - ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL | __GFP_COMP)) - | __GFP_NOWARN; unsigned int large_order = ilog2(nr_remaining); + gfp_t large_gfp = vmalloc_gfp_adjust(gfp, large_order) & ~__GFP_DIRECT_RECLAIM; large_order = min(max_attempt_order, large_order); -- 2.51.1 The only caller, vb_alloc(), passes GFP_KERNEL into new_vmap_block() which is a subset of GFP_RECLAIM_MASK. Since there's no reason to use this mask here, remove it. Signed-off-by: Vishal Moola (Oracle) Reviewed-by: Uladzislau Rezki (Sony) --- mm/vmalloc.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 970ee6756909..eb4e68985190 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -2699,8 +2699,7 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) node = numa_node_id(); - vb = kmalloc_node(sizeof(struct vmap_block), - gfp_mask & GFP_RECLAIM_MASK, node); + vb = kmalloc_node(sizeof(struct vmap_block), gfp_mask, node); if (unlikely(!vb)) return ERR_PTR(-ENOMEM); -- 2.51.1