From: Chen Ridong The previous patch extended shrink_one to support both lrugen and non-lrugen reclaim. Now shrink_many and shrink_node_memcgs are almost identical, except that shrink_many also calls should_abort_scan for lrugen root reclaim. This patch adds the should_abort_scan check to shrink_node_memcgs (which is only meaningful for gen-LRU root reclaim). After this change, shrink_node_memcgs can be used directly instead of shrink_many, allowing shrink_many to be safely removed. Suggested-by: Shakeel Butt Signed-off-by: Chen Ridong --- mm/vmscan.c | 67 ++++++++++++----------------------------------------- 1 file changed, 15 insertions(+), 52 deletions(-) diff --git a/mm/vmscan.c b/mm/vmscan.c index 795f5ebd9341..dbf2cfbe3243 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -4758,57 +4758,6 @@ static bool try_to_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) return nr_to_scan < 0; } -static void shrink_one(struct lruvec *lruvec, struct scan_control *sc); - -static void shrink_many(struct pglist_data *pgdat, struct scan_control *sc) -{ - struct mem_cgroup *target = sc->target_mem_cgroup; - struct mem_cgroup_reclaim_cookie reclaim = { - .pgdat = pgdat, - }; - struct mem_cgroup_reclaim_cookie *cookie = &reclaim; - struct mem_cgroup *memcg; - - if (current_is_kswapd() || sc->memcg_full_walk) - cookie = NULL; - - memcg = mem_cgroup_iter(target, NULL, cookie); - while (memcg) { - struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); - - cond_resched(); - - mem_cgroup_calculate_protection(target, memcg); - - if (mem_cgroup_below_min(target, memcg)) - goto next; - - if (mem_cgroup_below_low(target, memcg)) { - if (!sc->memcg_low_reclaim) { - sc->memcg_low_skipped = 1; - goto next; - } - memcg_memory_event(memcg, MEMCG_LOW); - } - - shrink_one(lruvec, sc); - - if (should_abort_scan(lruvec, sc)) { - if (cookie) - mem_cgroup_iter_break(target, memcg); - break; - } - -next: - if (cookie && sc->nr_reclaimed >= sc->nr_to_reclaim) { - mem_cgroup_iter_break(target, memcg); - break; - } - - memcg = mem_cgroup_iter(target, memcg, cookie); - } -} - static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) { struct blk_plug plug; @@ -4829,6 +4778,9 @@ static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc blk_finish_plug(&plug); } +static void shrink_one(struct lruvec *lruvec, struct scan_control *sc); +static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc); + static void lru_gen_shrink_node(struct pglist_data *pgdat, struct scan_control *sc) { struct blk_plug plug; @@ -4858,7 +4810,7 @@ static void lru_gen_shrink_node(struct pglist_data *pgdat, struct scan_control * if (mem_cgroup_disabled()) shrink_one(&pgdat->__lruvec, sc); else - shrink_many(pgdat, sc); + shrink_node_memcgs(pgdat, sc); if (current_is_kswapd()) sc->nr_reclaimed += reclaimed; @@ -5554,6 +5506,11 @@ static void lru_gen_shrink_node(struct pglist_data *pgdat, struct scan_control * BUILD_BUG(); } +static bool should_abort_scan(struct lruvec *lruvec, struct scan_control *sc) +{ + return false; +} + #endif /* CONFIG_LRU_GEN */ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) @@ -5822,6 +5779,12 @@ static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc) shrink_one(lruvec, sc); + if (should_abort_scan(lruvec, sc)) { + if (partial) + mem_cgroup_iter_break(target_memcg, memcg); + break; + } + /* If partial walks are allowed, bail once goal is reached */ if (partial && sc->nr_reclaimed >= sc->nr_to_reclaim) { mem_cgroup_iter_break(target_memcg, memcg); -- 2.34.1