From: Chen Ridong Currently, mem_cgroup_usage is only used for v1, just move it to memcontrol-v1.c Signed-off-by: Chen Ridong --- mm/memcontrol-v1.c | 22 ++++++++++++++++++++++ mm/memcontrol-v1.h | 2 -- mm/memcontrol.c | 22 ---------------------- 3 files changed, 22 insertions(+), 24 deletions(-) diff --git a/mm/memcontrol-v1.c b/mm/memcontrol-v1.c index 6eed14bff742..0b50cb122ff3 100644 --- a/mm/memcontrol-v1.c +++ b/mm/memcontrol-v1.c @@ -427,6 +427,28 @@ static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, } #endif +static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) +{ + unsigned long val; + + if (mem_cgroup_is_root(memcg)) { + /* + * Approximate root's usage from global state. This isn't + * perfect, but the root usage was always an approximation. + */ + val = global_node_page_state(NR_FILE_PAGES) + + global_node_page_state(NR_ANON_MAPPED); + if (swap) + val += total_swap_pages - get_nr_swap_pages(); + } else { + if (!swap) + val = page_counter_read(&memcg->memory); + else + val = page_counter_read(&memcg->memsw); + } + return val; +} + static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) { struct mem_cgroup_threshold_ary *t; diff --git a/mm/memcontrol-v1.h b/mm/memcontrol-v1.h index 6358464bb416..e92b21af92b1 100644 --- a/mm/memcontrol-v1.h +++ b/mm/memcontrol-v1.h @@ -22,8 +22,6 @@ iter != NULL; \ iter = mem_cgroup_iter(NULL, iter, NULL)) -unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap); - void drain_all_stock(struct mem_cgroup *root_memcg); unsigned long memcg_events(struct mem_cgroup *memcg, int event); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index e2e49f4ec9e0..dbe7d8f93072 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3272,28 +3272,6 @@ void folio_split_memcg_refs(struct folio *folio, unsigned old_order, css_get_many(&__folio_memcg(folio)->css, new_refs); } -unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) -{ - unsigned long val; - - if (mem_cgroup_is_root(memcg)) { - /* - * Approximate root's usage from global state. This isn't - * perfect, but the root usage was always an approximation. - */ - val = global_node_page_state(NR_FILE_PAGES) + - global_node_page_state(NR_ANON_MAPPED); - if (swap) - val += total_swap_pages - get_nr_swap_pages(); - } else { - if (!swap) - val = page_counter_read(&memcg->memory); - else - val = page_counter_read(&memcg->memsw); - } - return val; -} - static int memcg_online_kmem(struct mem_cgroup *memcg) { struct obj_cgroup *objcg; -- 2.34.1 From: Chen Ridong The mem_cgroup_size helper is used only in apply_proportional_protection to read the current memory usage. Its semantics are unclear and inconsistent with other sites, which directly call page_counter_read for the same purpose. Remove this helper and replace its usage with page_counter_read for clarity. Additionally, rename the local variable 'cgroup_size' to 'usage' to better reflect its meaning. This change is safe because page_counter_read() is only called when memcg is enabled in the apply_proportional_protection. No functional changes intended. Signed-off-by: Chen Ridong --- include/linux/memcontrol.h | 7 ------- mm/memcontrol.c | 5 ----- mm/vmscan.c | 8 +++++--- 3 files changed, 5 insertions(+), 15 deletions(-) diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 6a48398a1f4e..bedeb606c691 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -919,8 +919,6 @@ static inline void mem_cgroup_handle_over_high(gfp_t gfp_mask) unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg); -unsigned long mem_cgroup_size(struct mem_cgroup *memcg); - void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p); @@ -1328,11 +1326,6 @@ static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) return 0; } -static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg) -{ - return 0; -} - static inline void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) { diff --git a/mm/memcontrol.c b/mm/memcontrol.c index dbe7d8f93072..659ce171b1b3 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1621,11 +1621,6 @@ unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) return max; } -unsigned long mem_cgroup_size(struct mem_cgroup *memcg) -{ - return page_counter_read(&memcg->memory); -} - void __memcg_memory_event(struct mem_cgroup *memcg, enum memcg_memory_event event, bool allow_spinning) { diff --git a/mm/vmscan.c b/mm/vmscan.c index 670fe9fae5ba..fe48d0376e7c 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2451,6 +2451,7 @@ static inline void calculate_pressure_balance(struct scan_control *sc, static unsigned long apply_proportional_protection(struct mem_cgroup *memcg, struct scan_control *sc, unsigned long scan) { +#ifdef CONFIG_MEMCG unsigned long min, low; mem_cgroup_protection(sc->target_mem_cgroup, memcg, &min, &low); @@ -2485,7 +2486,7 @@ static unsigned long apply_proportional_protection(struct mem_cgroup *memcg, * again by how much of the total memory used is under * hard protection. */ - unsigned long cgroup_size = mem_cgroup_size(memcg); + unsigned long usage = page_counter_read(&memcg->memory); unsigned long protection; /* memory.low scaling, make sure we retry before OOM */ @@ -2497,9 +2498,9 @@ static unsigned long apply_proportional_protection(struct mem_cgroup *memcg, } /* Avoid TOCTOU with earlier protection check */ - cgroup_size = max(cgroup_size, protection); + usage = max(usage, protection); - scan -= scan * protection / (cgroup_size + 1); + scan -= scan * protection / (usage + 1); /* * Minimally target SWAP_CLUSTER_MAX pages to keep @@ -2508,6 +2509,7 @@ static unsigned long apply_proportional_protection(struct mem_cgroup *memcg, */ scan = max(scan, SWAP_CLUSTER_MAX); } +#endif return scan; } -- 2.34.1