The cpu slabs and their deactivations were removed, so remove the unused stat items. Weirdly enough the values were also used to control __add_partial() adding to head or tail of the list, so replace that with a new enum add_mode, which is cleaner. Signed-off-by: Vlastimil Babka --- mm/slub.c | 31 +++++++++++++++---------------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index 5b2d7c387646..a473fa29a905 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -324,6 +324,11 @@ static void debugfs_slab_add(struct kmem_cache *); static inline void debugfs_slab_add(struct kmem_cache *s) { } #endif +enum add_mode { + ADD_TO_HEAD, + ADD_TO_TAIL, +}; + enum stat_item { ALLOC_PCS, /* Allocation from percpu sheaf */ ALLOC_FASTPATH, /* Allocation from cpu slab */ @@ -343,8 +348,6 @@ enum stat_item { CPUSLAB_FLUSH, /* Abandoning of the cpu slab */ DEACTIVATE_FULL, /* Cpu slab was full when deactivated */ DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */ - DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */ - DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */ DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */ DEACTIVATE_BYPASS, /* Implicit deactivation */ ORDER_FALLBACK, /* Number of times fallback was necessary */ @@ -3253,10 +3256,10 @@ static inline void slab_clear_node_partial(struct slab *slab) * Management of partially allocated slabs. */ static inline void -__add_partial(struct kmem_cache_node *n, struct slab *slab, int tail) +__add_partial(struct kmem_cache_node *n, struct slab *slab, enum add_mode mode) { n->nr_partial++; - if (tail == DEACTIVATE_TO_TAIL) + if (mode == ADD_TO_TAIL) list_add_tail(&slab->slab_list, &n->partial); else list_add(&slab->slab_list, &n->partial); @@ -3264,10 +3267,10 @@ __add_partial(struct kmem_cache_node *n, struct slab *slab, int tail) } static inline void add_partial(struct kmem_cache_node *n, - struct slab *slab, int tail) + struct slab *slab, enum add_mode mode) { lockdep_assert_held(&n->list_lock); - __add_partial(n, slab, tail); + __add_partial(n, slab, mode); } static inline void remove_partial(struct kmem_cache_node *n, @@ -3360,7 +3363,7 @@ static void *alloc_single_from_new_slab(struct kmem_cache *s, struct slab *slab, if (slab->inuse == slab->objects) add_full(s, n, slab); else - add_partial(n, slab, DEACTIVATE_TO_HEAD); + add_partial(n, slab, ADD_TO_HEAD); inc_slabs_node(s, nid, slab->objects); spin_unlock_irqrestore(&n->list_lock, flags); @@ -3979,7 +3982,7 @@ static unsigned int alloc_from_new_slab(struct kmem_cache *s, struct slab *slab, n = get_node(s, slab_nid(slab)); spin_lock_irqsave(&n->list_lock, flags); } - add_partial(n, slab, DEACTIVATE_TO_HEAD); + add_partial(n, slab, ADD_TO_HEAD); spin_unlock_irqrestore(&n->list_lock, flags); } @@ -5054,7 +5057,7 @@ static noinline void free_to_partial_list( /* was on full list */ remove_full(s, n, slab); if (!slab_free) { - add_partial(n, slab, DEACTIVATE_TO_TAIL); + add_partial(n, slab, ADD_TO_TAIL); stat(s, FREE_ADD_PARTIAL); } } else if (slab_free) { @@ -5174,7 +5177,7 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab, * then add it. */ if (unlikely(was_full)) { - add_partial(n, slab, DEACTIVATE_TO_TAIL); + add_partial(n, slab, ADD_TO_TAIL); stat(s, FREE_ADD_PARTIAL); } spin_unlock_irqrestore(&n->list_lock, flags); @@ -6557,7 +6560,7 @@ __refill_objects_node(struct kmem_cache *s, void **p, gfp_t gfp, unsigned int mi continue; list_del(&slab->slab_list); - add_partial(n, slab, DEACTIVATE_TO_HEAD); + add_partial(n, slab, ADD_TO_HEAD); } spin_unlock_irqrestore(&n->list_lock, flags); @@ -7025,7 +7028,7 @@ static void early_kmem_cache_node_alloc(int node) * No locks need to be taken here as it has just been * initialized and there is no concurrent access. */ - __add_partial(n, slab, DEACTIVATE_TO_HEAD); + __add_partial(n, slab, ADD_TO_HEAD); } static void free_kmem_cache_nodes(struct kmem_cache *s) @@ -8713,8 +8716,6 @@ STAT_ATTR(FREE_SLAB, free_slab); STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush); STAT_ATTR(DEACTIVATE_FULL, deactivate_full); STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty); -STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head); -STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail); STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees); STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass); STAT_ATTR(ORDER_FALLBACK, order_fallback); @@ -8817,8 +8818,6 @@ static struct attribute *slab_attrs[] = { &cpuslab_flush_attr.attr, &deactivate_full_attr.attr, &deactivate_empty_attr.attr, - &deactivate_to_head_attr.attr, - &deactivate_to_tail_attr.attr, &deactivate_remote_frees_attr.attr, &deactivate_bypass_attr.attr, &order_fallback_attr.attr, -- 2.52.0