When free_pages becomes critically low, the kernel prevents other tasks from entering the slow path to ensure that reclaiming tasks can successfully allocate memory. This blocking is important to avoid memory contention with reclaiming tasks. However, in some cases it is unnecessary because the PCP list may already contain sufficient pages, as freed pages are first placed there and are not immediately visible to the buddy system. By accounting PCP pages as part of pfmemalloc_reserve, we can reduce unnecessary blocking and improve system responsiveness under low-memory conditions. Signed-off-by: zhongjinji --- mm/internal.h | 1 + mm/page_alloc.c | 14 ++++++++++++++ mm/vmscan.c | 1 + 3 files changed, 16 insertions(+) diff --git a/mm/internal.h b/mm/internal.h index 45b725c3dc03..c8fcee51d662 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -842,6 +842,7 @@ static inline struct page *alloc_frozen_pages_noprof(gfp_t gfp, unsigned int ord #define alloc_frozen_pages(...) \ alloc_hooks(alloc_frozen_pages_noprof(__VA_ARGS__)) +extern int zone_pcp_pages_count(struct zone *zone); extern void zone_pcp_reset(struct zone *zone); extern void zone_pcp_disable(struct zone *zone); extern void zone_pcp_enable(struct zone *zone); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 1999eb7e7c14..e34031946adb 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -7134,6 +7134,20 @@ void zone_pcp_reset(struct zone *zone) } } +int zone_pcp_pages_count(struct zone *zone) +{ + struct per_cpu_pages *pcp; + int total_pcp_pages = 0; + int cpu; + + for_each_online_cpu(cpu) { + pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); + total_pcp_pages += pcp->count; + } + + return total_pcp_pages; +} + #ifdef CONFIG_MEMORY_HOTREMOVE /* * All pages in the range must be in a single zone, must not contain holes, diff --git a/mm/vmscan.c b/mm/vmscan.c index 674999999cd0..148f452d9cf5 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -6498,6 +6498,7 @@ static bool allow_direct_reclaim(pg_data_t *pgdat) pfmemalloc_reserve += min_wmark_pages(zone); free_pages += zone_page_state_snapshot(zone, NR_FREE_PAGES); + free_pages += zone_pcp_pages_count(zone); } /* If there are no reserves (unexpected config) then do not throttle */ -- 2.17.1