Commit 1ebbb21811b7 ("mm/page_alloc: explicitly define how __GFP_HIGH non-blocking allocations accesses reserves") renamed ALLOC_HARDER to ALLOC_NON_BLOCK because the former is "a vague description". However, vagueness is accurate here, this is a vague flag. It is not set for __GFP_NOMEMALLOC. It doesn't really mean "allocate without blocking" but rather "allow dipping into atomic reserves, _because_ of the need not to block". A later commit will need an alloc flag that really means "don't block here", so go back to the flag's old name and update the commentary to try and give it a slightly clearer meaning. Signed-off-by: Brendan Jackman --- mm/internal.h | 9 +++++---- mm/page_alloc.c | 8 ++++---- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/mm/internal.h b/mm/internal.h index 6006cfb2b9c7e771a0c647c471901dc7fcdad242..513aba6c00bed813c9e38464aec5a15e65edaa58 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -1297,9 +1297,10 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone, #define ALLOC_OOM ALLOC_NO_WATERMARKS #endif -#define ALLOC_NON_BLOCK 0x10 /* Caller cannot block. Allow access - * to 25% of the min watermark or - * 62.5% if __GFP_HIGH is set. +#define ALLOC_HARDER 0x10 /* Because the caller cannot block, + * allow access * to 25% of the min + * watermark or 62.5% if __GFP_HIGH is + * set. */ #define ALLOC_MIN_RESERVE 0x20 /* __GFP_HIGH set. Allow access to 50% * of the min watermark. @@ -1316,7 +1317,7 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone, #define ALLOC_KSWAPD 0x800 /* allow waking of kswapd, __GFP_KSWAPD_RECLAIM set */ /* Flags that allow allocations below the min watermark. */ -#define ALLOC_RESERVES (ALLOC_NON_BLOCK|ALLOC_MIN_RESERVE|ALLOC_HIGHATOMIC|ALLOC_OOM) +#define ALLOC_RESERVES (ALLOC_HARDER|ALLOC_MIN_RESERVE|ALLOC_HIGHATOMIC|ALLOC_OOM) enum ttu_flags; struct tlbflush_unmap_batch; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 0b205aefd27e188c492c32754db08a4488317bd8..cd47cfaae820ce696d2e6e0c47436e00d3feef60 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3295,7 +3295,7 @@ struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone, * reserves as failing now is worse than failing a * high-order atomic allocation in the future. */ - if (!page && (alloc_flags & (ALLOC_OOM|ALLOC_NON_BLOCK))) + if (!page && (alloc_flags & (ALLOC_OOM|ALLOC_HARDER))) page = __rmqueue_smallest(zone, order, ft_high); if (!page) { @@ -3662,7 +3662,7 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, * or (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) do not get * access to the min reserve. */ - if (alloc_flags & ALLOC_NON_BLOCK) + if (alloc_flags & ALLOC_HARDER) min -= min / 4; } @@ -4546,7 +4546,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order) * The caller may dip into page reserves a bit more if the caller * cannot run direct reclaim, or if the caller has realtime scheduling * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will - * set both ALLOC_NON_BLOCK and ALLOC_MIN_RESERVE(__GFP_HIGH). + * set both ALLOC_HARDER and ALLOC_MIN_RESERVE(__GFP_HIGH). */ alloc_flags |= (__force int) (gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM)); @@ -4557,7 +4557,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order) * if it can't schedule. */ if (!(gfp_mask & __GFP_NOMEMALLOC)) { - alloc_flags |= ALLOC_NON_BLOCK; + alloc_flags |= ALLOC_HARDER; if (order > 0 && (alloc_flags & ALLOC_MIN_RESERVE)) alloc_flags |= ALLOC_HIGHATOMIC; -- 2.50.1