From: Chen Ridong Currently, flush_reclaim_state is placed differently between shrink_node_memcgs and shrink_many. shrink_many (only used for gen-LRU) calls it after each lruvec is shrunk, while shrink_node_memcgs calls it only after all lruvecs have been shrunk. This patch moves flush_reclaim_state into shrink_node_memcgs and calls it after each lruvec. This unifies the behavior and is reasonable because: 1. flush_reclaim_state adds current->reclaim_state->reclaimed to sc->nr_reclaimed. 2. For non-MGLRU root reclaim, this can help stop the iteration earlier when nr_to_reclaim is reached. 3. For non-root reclaim, the effect is negligible since flush_reclaim_state does nothing in that case. After moving flush_reclaim_state into shrink_node_memcgs, shrink_one can be extended to support both lrugen and non-lrugen paths. It will call try_to_shrink_lruvec for lrugen root reclaim and shrink_lruvec otherwise. Signed-off-by: Chen Ridong --- mm/vmscan.c | 57 +++++++++++++++++++++-------------------------------- 1 file changed, 23 insertions(+), 34 deletions(-) diff --git a/mm/vmscan.c b/mm/vmscan.c index 584f41eb4c14..795f5ebd9341 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -4758,23 +4758,7 @@ static bool try_to_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) return nr_to_scan < 0; } -static void shrink_one(struct lruvec *lruvec, struct scan_control *sc) -{ - unsigned long scanned = sc->nr_scanned; - unsigned long reclaimed = sc->nr_reclaimed; - struct pglist_data *pgdat = lruvec_pgdat(lruvec); - struct mem_cgroup *memcg = lruvec_memcg(lruvec); - - try_to_shrink_lruvec(lruvec, sc); - - shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, sc->priority); - - if (!sc->proactive) - vmpressure(sc->gfp_mask, memcg, false, sc->nr_scanned - scanned, - sc->nr_reclaimed - reclaimed); - - flush_reclaim_state(sc); -} +static void shrink_one(struct lruvec *lruvec, struct scan_control *sc); static void shrink_many(struct pglist_data *pgdat, struct scan_control *sc) { @@ -5760,6 +5744,27 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat, return inactive_lru_pages > pages_for_compaction; } +static void shrink_one(struct lruvec *lruvec, struct scan_control *sc) +{ + unsigned long scanned = sc->nr_scanned; + unsigned long reclaimed = sc->nr_reclaimed; + struct pglist_data *pgdat = lruvec_pgdat(lruvec); + struct mem_cgroup *memcg = lruvec_memcg(lruvec); + + if (lru_gen_enabled() && root_reclaim(sc)) + try_to_shrink_lruvec(lruvec, sc); + else + shrink_lruvec(lruvec, sc); + + shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, sc->priority); + + if (!sc->proactive) + vmpressure(sc->gfp_mask, memcg, false, sc->nr_scanned - scanned, + sc->nr_reclaimed - reclaimed); + + flush_reclaim_state(sc); +} + static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc) { struct mem_cgroup *target_memcg = sc->target_mem_cgroup; @@ -5784,8 +5789,6 @@ static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc) memcg = mem_cgroup_iter(target_memcg, NULL, partial); do { struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); - unsigned long reclaimed; - unsigned long scanned; /* * This loop can become CPU-bound when target memcgs @@ -5817,19 +5820,7 @@ static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc) memcg_memory_event(memcg, MEMCG_LOW); } - reclaimed = sc->nr_reclaimed; - scanned = sc->nr_scanned; - - shrink_lruvec(lruvec, sc); - - shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, - sc->priority); - - /* Record the group's reclaim efficiency */ - if (!sc->proactive) - vmpressure(sc->gfp_mask, memcg, false, - sc->nr_scanned - scanned, - sc->nr_reclaimed - reclaimed); + shrink_one(lruvec, sc); /* If partial walks are allowed, bail once goal is reached */ if (partial && sc->nr_reclaimed >= sc->nr_to_reclaim) { @@ -5863,8 +5854,6 @@ static void shrink_node(pg_data_t *pgdat, struct scan_control *sc) shrink_node_memcgs(pgdat, sc); - flush_reclaim_state(sc); - nr_node_reclaimed = sc->nr_reclaimed - nr_reclaimed; /* Record the subtree's reclaim efficiency */ -- 2.34.1