From: liuqiqi In the zone_reclaimable_pages() function, if the page counts for NR_ZONE_INACTIVE_FILE, NR_ZONE_ACTIVE_FILE, NR_ZONE_INACTIVE_ANON, and NR_ZONE_ACTIVE_ANON are all zero, the function returns the number of free pages as the result. In this case, when should_reclaim_retry() calculates reclaimable pages, it will inadvertently double-count the free pages in its accounting. static inline bool should_reclaim_retry(gfp_t gfp_mask, unsigned order, struct alloc_context *ac, int alloc_flags, bool did_some_progress, int *no_progress_loops) { ... available = reclaimable = zone_reclaimable_pages(zone); available += zone_page_state_snapshot(zone, NR_FREE_PAGES); Signed-off-by: liuqiqi --- mm/vmscan.c | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/mm/vmscan.c b/mm/vmscan.c index 34410d24dc15..a9aaefdba7a2 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -393,14 +393,7 @@ unsigned long zone_reclaimable_pages(struct zone *zone) if (can_reclaim_anon_pages(NULL, zone_to_nid(zone), NULL)) nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) + zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON); - /* - * If there are no reclaimable file-backed or anonymous pages, - * ensure zones with sufficient free pages are not skipped. - * This prevents zones like DMA32 from being ignored in reclaim - * scenarios where they can still help alleviate memory pressure. - */ - if (nr == 0) - nr = zone_page_state_snapshot(zone, NR_FREE_PAGES); + return nr; } @@ -6417,7 +6410,7 @@ static bool allow_direct_reclaim(pg_data_t *pgdat) return true; for_each_managed_zone_pgdat(zone, pgdat, i, ZONE_NORMAL) { - if (!zone_reclaimable_pages(zone)) + if (!zone_reclaimable_pages(zone) && zone_page_state_snapshot(zone, NR_FREE_PAGES)) continue; pfmemalloc_reserve += min_wmark_pages(zone); -- 2.25.1