From: Chen Ridong Currently, mem_cgroup_usage is only used for v1, just move it to memcontrol-v1.c Signed-off-by: Chen Ridong Acked-by: Michal Hocko Acked-by: Johannes Weiner --- mm/memcontrol-v1.c | 22 ++++++++++++++++++++++ mm/memcontrol-v1.h | 2 -- mm/memcontrol.c | 22 ---------------------- 3 files changed, 22 insertions(+), 24 deletions(-) diff --git a/mm/memcontrol-v1.c b/mm/memcontrol-v1.c index 6eed14bff742..0b50cb122ff3 100644 --- a/mm/memcontrol-v1.c +++ b/mm/memcontrol-v1.c @@ -427,6 +427,28 @@ static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, } #endif +static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) +{ + unsigned long val; + + if (mem_cgroup_is_root(memcg)) { + /* + * Approximate root's usage from global state. This isn't + * perfect, but the root usage was always an approximation. + */ + val = global_node_page_state(NR_FILE_PAGES) + + global_node_page_state(NR_ANON_MAPPED); + if (swap) + val += total_swap_pages - get_nr_swap_pages(); + } else { + if (!swap) + val = page_counter_read(&memcg->memory); + else + val = page_counter_read(&memcg->memsw); + } + return val; +} + static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) { struct mem_cgroup_threshold_ary *t; diff --git a/mm/memcontrol-v1.h b/mm/memcontrol-v1.h index 6358464bb416..e92b21af92b1 100644 --- a/mm/memcontrol-v1.h +++ b/mm/memcontrol-v1.h @@ -22,8 +22,6 @@ iter != NULL; \ iter = mem_cgroup_iter(NULL, iter, NULL)) -unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap); - void drain_all_stock(struct mem_cgroup *root_memcg); unsigned long memcg_events(struct mem_cgroup *memcg, int event); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index e2e49f4ec9e0..dbe7d8f93072 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3272,28 +3272,6 @@ void folio_split_memcg_refs(struct folio *folio, unsigned old_order, css_get_many(&__folio_memcg(folio)->css, new_refs); } -unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) -{ - unsigned long val; - - if (mem_cgroup_is_root(memcg)) { - /* - * Approximate root's usage from global state. This isn't - * perfect, but the root usage was always an approximation. - */ - val = global_node_page_state(NR_FILE_PAGES) + - global_node_page_state(NR_ANON_MAPPED); - if (swap) - val += total_swap_pages - get_nr_swap_pages(); - } else { - if (!swap) - val = page_counter_read(&memcg->memory); - else - val = page_counter_read(&memcg->memsw); - } - return val; -} - static int memcg_online_kmem(struct mem_cgroup *memcg) { struct obj_cgroup *objcg; -- 2.34.1 From: Chen Ridong The mem_cgroup_size helper is used only in apply_proportional_protection to read the current memory usage. Its semantics are unclear and inconsistent with other sites, which directly call page_counter_read for the same purpose. Remove this helper and get its usage via mem_cgroup_protection for clarity. Additionally, rename the local variable 'cgroup_size' to 'usage' to better reflect its meaning. No functional changes intended. Signed-off-by: Chen Ridong --- include/linux/memcontrol.h | 18 +++++++----------- mm/memcontrol.c | 5 ----- mm/vmscan.c | 9 ++++----- 3 files changed, 11 insertions(+), 21 deletions(-) diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 6a48398a1f4e..603252e3169c 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -557,13 +557,15 @@ static inline bool mem_cgroup_disabled(void) static inline void mem_cgroup_protection(struct mem_cgroup *root, struct mem_cgroup *memcg, unsigned long *min, - unsigned long *low) + unsigned long *low, + unsigned long *usage) { - *min = *low = 0; + *min = *low = *usage = 0; if (mem_cgroup_disabled()) return; + *usage = page_counter_read(&memcg->memory); /* * There is no reclaim protection applied to a targeted reclaim. * We are special casing this specific case here because @@ -919,8 +921,6 @@ static inline void mem_cgroup_handle_over_high(gfp_t gfp_mask) unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg); -unsigned long mem_cgroup_size(struct mem_cgroup *memcg); - void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p); @@ -1102,9 +1102,10 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm, static inline void mem_cgroup_protection(struct mem_cgroup *root, struct mem_cgroup *memcg, unsigned long *min, - unsigned long *low) + unsigned long *low, + unsigned long *usage) { - *min = *low = 0; + *min = *low = *usage = 0; } static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root, @@ -1328,11 +1329,6 @@ static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) return 0; } -static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg) -{ - return 0; -} - static inline void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) { diff --git a/mm/memcontrol.c b/mm/memcontrol.c index dbe7d8f93072..659ce171b1b3 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1621,11 +1621,6 @@ unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) return max; } -unsigned long mem_cgroup_size(struct mem_cgroup *memcg) -{ - return page_counter_read(&memcg->memory); -} - void __memcg_memory_event(struct mem_cgroup *memcg, enum memcg_memory_event event, bool allow_spinning) { diff --git a/mm/vmscan.c b/mm/vmscan.c index 670fe9fae5ba..9a6ee80275fc 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2451,9 +2451,9 @@ static inline void calculate_pressure_balance(struct scan_control *sc, static unsigned long apply_proportional_protection(struct mem_cgroup *memcg, struct scan_control *sc, unsigned long scan) { - unsigned long min, low; + unsigned long min, low, usage; - mem_cgroup_protection(sc->target_mem_cgroup, memcg, &min, &low); + mem_cgroup_protection(sc->target_mem_cgroup, memcg, &min, &low, &usage); if (min || low) { /* @@ -2485,7 +2485,6 @@ static unsigned long apply_proportional_protection(struct mem_cgroup *memcg, * again by how much of the total memory used is under * hard protection. */ - unsigned long cgroup_size = mem_cgroup_size(memcg); unsigned long protection; /* memory.low scaling, make sure we retry before OOM */ @@ -2497,9 +2496,9 @@ static unsigned long apply_proportional_protection(struct mem_cgroup *memcg, } /* Avoid TOCTOU with earlier protection check */ - cgroup_size = max(cgroup_size, protection); + usage = max(usage, protection); - scan -= scan * protection / (cgroup_size + 1); + scan -= scan * protection / (usage + 1); /* * Minimally target SWAP_CLUSTER_MAX pages to keep -- 2.34.1