Currently slabs are only frozen after consistency checks failed. This can happen only in caches with debugging enabled, and those use free_to_partial_list() for freeing. The non-debug operation of __slab_free() can thus stop considering the frozen field, and we can remove the FREE_FROZEN stat. Signed-off-by: Vlastimil Babka --- mm/slub.c | 20 +++++--------------- 1 file changed, 5 insertions(+), 15 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index 515a2b59cb52..9b551c48c2eb 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -336,7 +336,6 @@ enum stat_item { FREE_RCU_SHEAF_FAIL, /* Failed to free to a rcu_free sheaf */ FREE_FASTPATH, /* Free to cpu slab */ FREE_SLOWPATH, /* Freeing not to cpu slab */ - FREE_FROZEN, /* Freeing to frozen slab */ FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */ FREE_REMOVE_PARTIAL, /* Freeing removes last object */ ALLOC_FROM_PARTIAL, /* Cpu slab acquired from node partial list */ @@ -5036,7 +5035,6 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab, { void *prior; - int was_frozen; struct slab new; unsigned long counters; struct kmem_cache_node *n = NULL; @@ -5059,9 +5057,8 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab, counters = slab->counters; set_freepointer(s, tail, prior); new.counters = counters; - was_frozen = new.frozen; new.inuse -= cnt; - if ((!new.inuse || !prior) && !was_frozen) { + if (!new.inuse || !prior) { /* Needs to be taken off a list */ n = get_node(s, slab_nid(slab)); /* @@ -5083,15 +5080,10 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab, "__slab_free")); if (likely(!n)) { - - if (likely(was_frozen)) { - /* - * The list lock was not taken therefore no list - * activity can be necessary. - */ - stat(s, FREE_FROZEN); - } - + /* + * The list lock was not taken therefore no list activity can be + * necessary. + */ return; } @@ -8648,7 +8640,6 @@ STAT_ATTR(FREE_RCU_SHEAF, free_rcu_sheaf); STAT_ATTR(FREE_RCU_SHEAF_FAIL, free_rcu_sheaf_fail); STAT_ATTR(FREE_FASTPATH, free_fastpath); STAT_ATTR(FREE_SLOWPATH, free_slowpath); -STAT_ATTR(FREE_FROZEN, free_frozen); STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial); STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial); STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial); @@ -8753,7 +8744,6 @@ static struct attribute *slab_attrs[] = { &free_rcu_sheaf_fail_attr.attr, &free_fastpath_attr.attr, &free_slowpath_attr.attr, - &free_frozen_attr.attr, &free_add_partial_attr.attr, &free_remove_partial_attr.attr, &alloc_from_partial_attr.attr, -- 2.51.1