Use get_shrinker_id() to retrieve the id of shrinker. No functional change. Signed-off-by: Haifeng Xu --- mm/shrinker.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/mm/shrinker.c b/mm/shrinker.c index 7b61fc0ee78f..f0c6dfa026b0 100644 --- a/mm/shrinker.c +++ b/mm/shrinker.c @@ -193,6 +193,11 @@ static inline int calc_shrinker_id(int index, int offset) return index * SHRINKER_UNIT_BITS + offset; } +static inline int get_shrinker_id(struct mem_cgroup *memcg, struct shrinker *shrinker) +{ + return shrinker->id; +} + void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id) { if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) { @@ -255,11 +260,13 @@ static long xchg_nr_deferred_memcg(int nid, struct shrinker *shrinker, struct shrinker_info *info; struct shrinker_info_unit *unit; long nr_deferred; + int id; rcu_read_lock(); + id = get_shrinker_id(memcg, shrinker); info = rcu_dereference(memcg->nodeinfo[nid]->shrinker_info); - unit = info->unit[shrinker_id_to_index(shrinker->id)]; - nr_deferred = atomic_long_xchg(&unit->nr_deferred[shrinker_id_to_offset(shrinker->id)], 0); + unit = info->unit[shrinker_id_to_index(id)]; + nr_deferred = atomic_long_xchg(&unit->nr_deferred[shrinker_id_to_offset(id)], 0); rcu_read_unlock(); return nr_deferred; @@ -271,12 +278,14 @@ static long add_nr_deferred_memcg(long nr, int nid, struct shrinker *shrinker, struct shrinker_info *info; struct shrinker_info_unit *unit; long nr_deferred; + int id; rcu_read_lock(); + id = get_shrinker_id(memcg, shrinker); info = rcu_dereference(memcg->nodeinfo[nid]->shrinker_info); - unit = info->unit[shrinker_id_to_index(shrinker->id)]; + unit = info->unit[shrinker_id_to_index(id)]; nr_deferred = - atomic_long_add_return(nr, &unit->nr_deferred[shrinker_id_to_offset(shrinker->id)]); + atomic_long_add_return(nr, &unit->nr_deferred[shrinker_id_to_offset(id)]); rcu_read_unlock(); return nr_deferred; -- 2.43.0 When kmem is disabled, memcg slab shrink only call non-slab shrinkers, so just allocates shrinker info for non-slab shrinkers to non-root memcgs. Therefore, if memcg_kmem_online is true, all things keep same as before. Otherwise, root memcg allocates id from shrinker_idr to identify each shrinker and non-root memcgs use nonslab_id to identify non-slab shrinkers. The size of shrinkers_info in non-root memcgs can be very low because the number of shrinkers marked as SHRINKER_NONSLAB | SHRINKER_MEMCG_AWARE is few. Also, the time spending in expand_shrinker_info() can reduce a lot. When setting shrinker bit or updating nr_deferred, use nonslab_id for non-root memcgs if the shrinker is marked as SHRINKER_NONSLAB. Signed-off-by: Haifeng Xu --- include/linux/shrinker.h | 3 + mm/huge_memory.c | 21 ++++-- mm/shrinker.c | 135 ++++++++++++++++++++++++++++++++++----- 3 files changed, 138 insertions(+), 21 deletions(-) diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h index 1a00be90d93a..df53008ed8b5 100644 --- a/include/linux/shrinker.h +++ b/include/linux/shrinker.h @@ -107,6 +107,9 @@ struct shrinker { #ifdef CONFIG_MEMCG /* ID in shrinker_idr */ int id; + + /* ID in shrinker_nonslab_idr */ + int nonslab_id; #endif #ifdef CONFIG_SHRINKER_DEBUG int debugfs_id; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 8e2746ea74ad..319349b5da5d 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -4351,9 +4351,14 @@ void deferred_split_folio(struct folio *folio, bool partially_mapped) memcg = folio_split_queue_memcg(folio, ds_queue); list_add_tail(&folio->_deferred_list, &ds_queue->split_queue); ds_queue->split_queue_len++; - if (memcg) - set_shrinker_bit(memcg, folio_nid(folio), - shrinker_id(deferred_split_shrinker)); + if (memcg) { + int id = deferred_split_shrinker->id; + + if (!memcg_kmem_online() && memcg != root_mem_cgroup) + id = deferred_split_shrinker->nonslab_id; + + set_shrinker_bit(memcg, folio_nid(folio), id); + } } split_queue_unlock_irqrestore(ds_queue, flags); } @@ -4508,8 +4513,14 @@ void reparent_deferred_split_queue(struct mem_cgroup *memcg) parent_ds_queue->split_queue_len += ds_queue->split_queue_len; ds_queue->split_queue_len = 0; - for_each_node(nid) - set_shrinker_bit(parent, nid, shrinker_id(deferred_split_shrinker)); + for_each_node(nid) { + int id = deferred_split_shrinker->id; + + if (!memcg_kmem_online() && parent != root_mem_cgroup) + id = deferred_split_shrinker->nonslab_id; + + set_shrinker_bit(parent, nid, id); + } unlock: spin_unlock(&parent_ds_queue->split_queue_lock); diff --git a/mm/shrinker.c b/mm/shrinker.c index f0c6dfa026b0..52ea0e6391af 100644 --- a/mm/shrinker.c +++ b/mm/shrinker.c @@ -12,6 +12,7 @@ DEFINE_MUTEX(shrinker_mutex); #ifdef CONFIG_MEMCG static int shrinker_nr_max; +static int shrinker_nonslab_nr_max; static inline int shrinker_unit_size(int nr_items) { @@ -78,15 +79,25 @@ int alloc_shrinker_info(struct mem_cgroup *memcg) { int nid, ret = 0; int array_size = 0; + int alloc_nr_max; + + if (memcg_kmem_online()) { + alloc_nr_max = shrinker_nr_max; + } else { + if (memcg == root_mem_cgroup) + alloc_nr_max = shrinker_nr_max; + else + alloc_nr_max = shrinker_nonslab_nr_max; + } mutex_lock(&shrinker_mutex); - array_size = shrinker_unit_size(shrinker_nr_max); + array_size = shrinker_unit_size(alloc_nr_max); for_each_node(nid) { struct shrinker_info *info = kvzalloc_node(sizeof(*info) + array_size, GFP_KERNEL, nid); if (!info) goto err; - info->map_nr_max = shrinker_nr_max; + info->map_nr_max = alloc_nr_max; if (shrinker_unit_alloc(info, NULL, nid)) { kvfree(info); goto err; @@ -147,33 +158,47 @@ static int expand_one_shrinker_info(struct mem_cgroup *memcg, int new_size, return 0; } -static int expand_shrinker_info(int new_id) +static int expand_shrinker_info(int new_id, bool full, bool root) { int ret = 0; int new_nr_max = round_up(new_id + 1, SHRINKER_UNIT_BITS); int new_size, old_size = 0; struct mem_cgroup *memcg; + struct mem_cgroup *start = NULL; + int old_nr_max = shrinker_nr_max; if (!root_mem_cgroup) goto out; lockdep_assert_held(&shrinker_mutex); + if (!full && !root) { + start = root_mem_cgroup; + old_nr_max = shrinker_nonslab_nr_max; + } + new_size = shrinker_unit_size(new_nr_max); - old_size = shrinker_unit_size(shrinker_nr_max); + old_size = shrinker_unit_size(old_nr_max); + + memcg = mem_cgroup_iter(NULL, start, NULL); + if (!memcg) + goto out; - memcg = mem_cgroup_iter(NULL, NULL, NULL); do { ret = expand_one_shrinker_info(memcg, new_size, old_size, new_nr_max); - if (ret) { + if (ret || (root && memcg == root_mem_cgroup)) { mem_cgroup_iter_break(NULL, memcg); goto out; } } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL); out: - if (!ret) - shrinker_nr_max = new_nr_max; + if (!ret) { + if (!full && !root) + shrinker_nonslab_nr_max = new_nr_max; + else + shrinker_nr_max = new_nr_max; + } return ret; } @@ -195,7 +220,13 @@ static inline int calc_shrinker_id(int index, int offset) static inline int get_shrinker_id(struct mem_cgroup *memcg, struct shrinker *shrinker) { - return shrinker->id; + int id = shrinker->id; + + if (!memcg_kmem_online() && (shrinker->flags & SHRINKER_NONSLAB) && + memcg != root_mem_cgroup) + id = shrinker->nonslab_id; + + return id; } void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id) @@ -217,6 +248,8 @@ void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id) } static DEFINE_IDR(shrinker_idr); +static DEFINE_IDR(shrinker_nonslab_idr); + static int shrinker_memcg_alloc(struct shrinker *shrinker) { @@ -230,10 +263,46 @@ static int shrinker_memcg_alloc(struct shrinker *shrinker) if (id < 0) goto unlock; - if (id >= shrinker_nr_max) { - if (expand_shrinker_info(id)) { - idr_remove(&shrinker_idr, id); - goto unlock; + shrinker->nonslab_id = -1; + + if (!mem_cgroup_kmem_disabled()) { + if (id >= shrinker_nr_max) { + /* expand shrinker info for all memory cgroups */ + if (expand_shrinker_info(id, true, false)) { + idr_remove(&shrinker_idr, id); + goto unlock; + } + } + } else { + /* + * If cgroup_memory_nokmem is set, every shrinker needs to be recorded in + * root memory cgroup because gloal slab shrink traverse all shrinkers. For + * non-root memcgs, record shrinkers with SHRINKER_NONSLAB because memcg + * slab shrink only call non-slab shrinkers. + */ + if (id >= shrinker_nr_max) { + /* expand shrinker info for root memory cgroup only */ + if (expand_shrinker_info(id, false, true)) { + idr_remove(&shrinker_idr, id); + goto unlock; + } + } + + if (shrinker->flags & SHRINKER_NONSLAB) { + int nonslab_id; + + nonslab_id = idr_alloc(&shrinker_nonslab_idr, shrinker, 0, 0, GFP_KERNEL); + if (nonslab_id < 0) + goto unlock; + + if (nonslab_id >= shrinker_nonslab_nr_max) { + /* expand shrinker info for non-root memory cgroups */ + if (expand_shrinker_info(nonslab_id, false, false)) { + idr_remove(&shrinker_nonslab_idr, id); + goto unlock; + } + } + shrinker->nonslab_id = nonslab_id; } } shrinker->id = id; @@ -252,6 +321,12 @@ static void shrinker_memcg_remove(struct shrinker *shrinker) lockdep_assert_held(&shrinker_mutex); idr_remove(&shrinker_idr, id); + + if (shrinker->flags & SHRINKER_NONSLAB) { + id = shrinker->nonslab_id; + if (id >= 0) + idr_remove(&shrinker_nonslab_idr, id); + } } static long xchg_nr_deferred_memcg(int nid, struct shrinker *shrinker, @@ -310,10 +385,33 @@ void reparent_shrinker_deferred(struct mem_cgroup *memcg) parent_info = shrinker_info_protected(parent, nid); for (index = 0; index < shrinker_id_to_index(child_info->map_nr_max); index++) { child_unit = child_info->unit[index]; - parent_unit = parent_info->unit[index]; for (offset = 0; offset < SHRINKER_UNIT_BITS; offset++) { nr = atomic_long_read(&child_unit->nr_deferred[offset]); - atomic_long_add(nr, &parent_unit->nr_deferred[offset]); + + /* + * If memcg_kmem_online() is false, the non-root memcgs use + * nonslab_id but root memory cgroup use id. When reparenting + * shrinker info to it, must convert the nonslab_id to id. + */ + if (!memcg_kmem_online() && parent == root_mem_cgroup) { + int id, p_index, p_off; + struct shrinker *shrinker; + + id = calc_shrinker_id(index, offset); + shrinker = idr_find(&shrinker_nonslab_idr, id); + if (shrinker) { + id = shrinker->id; + p_index = shrinker_id_to_index(id); + p_off = shrinker_id_to_offset(id); + + parent_unit = parent_info->unit[p_index]; + atomic_long_add(nr, + &parent_unit->nr_deferred[p_off]); + } + } else { + parent_unit = parent_info->unit[index]; + atomic_long_add(nr, &parent_unit->nr_deferred[offset]); + } } } } @@ -543,7 +641,12 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid, int shrinker_id = calc_shrinker_id(index, offset); rcu_read_lock(); - shrinker = idr_find(&shrinker_idr, shrinker_id); + + if (memcg_kmem_online()) + shrinker = idr_find(&shrinker_idr, shrinker_id); + else + shrinker = idr_find(&shrinker_nonslab_idr, shrinker_id); + if (unlikely(!shrinker || !shrinker_try_get(shrinker))) { clear_bit(offset, unit->map); rcu_read_unlock(); -- 2.43.0 If memcg_kmem_online() is false, only non-slab shrinkers are recorded in the map, so remove the check. Signed-off-by: Haifeng Xu --- mm/shrinker.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/mm/shrinker.c b/mm/shrinker.c index 52ea0e6391af..c8b090e8a972 100644 --- a/mm/shrinker.c +++ b/mm/shrinker.c @@ -654,11 +654,6 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid, } rcu_read_unlock(); - /* Call non-slab shrinkers even though kmem is disabled */ - if (!memcg_kmem_online() && - !(shrinker->flags & SHRINKER_NONSLAB)) - continue; - ret = do_shrink_slab(&sc, shrinker, priority); if (ret == SHRINK_EMPTY) { clear_bit(offset, unit->map); -- 2.43.0