Currently slabs are only frozen after consistency checks failed. This can happen only in caches with debugging enabled, and those use free_to_partial_list() for freeing. The non-debug operation of __slab_free() can thus stop considering the frozen field, and we can remove the FREE_FROZEN stat. Signed-off-by: Vlastimil Babka --- mm/slub.c | 22 ++++------------------ 1 file changed, 4 insertions(+), 18 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index 7f675659d93b..5b2d7c387646 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -333,7 +333,6 @@ enum stat_item { FREE_RCU_SHEAF_FAIL, /* Failed to free to a rcu_free sheaf */ FREE_FASTPATH, /* Free to cpu slab */ FREE_SLOWPATH, /* Freeing not to cpu slab */ - FREE_FROZEN, /* Freeing to frozen slab */ FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */ FREE_REMOVE_PARTIAL, /* Freeing removes last object */ ALLOC_FROM_PARTIAL, /* Cpu slab acquired from node partial list */ @@ -5093,7 +5092,7 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab, unsigned long addr) { - bool was_frozen, was_full; + bool was_full; struct freelist_counters old, new; struct kmem_cache_node *n = NULL; unsigned long flags; @@ -5116,7 +5115,6 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab, old.counters = slab->counters; was_full = (old.freelist == NULL); - was_frozen = old.frozen; set_freepointer(s, tail, old.freelist); @@ -5129,7 +5127,7 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab, * to (due to not being full anymore) the partial list. * Unless it's frozen. */ - if ((!new.inuse || was_full) && !was_frozen) { + if (!new.inuse || was_full) { n = get_node(s, slab_nid(slab)); /* @@ -5148,20 +5146,10 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab, } while (!slab_update_freelist(s, slab, &old, &new, "__slab_free")); if (likely(!n)) { - - if (likely(was_frozen)) { - /* - * The list lock was not taken therefore no list - * activity can be necessary. - */ - stat(s, FREE_FROZEN); - } - /* - * In other cases we didn't take the list_lock because the slab - * was already on the partial list and will remain there. + * We didn't take the list_lock because the slab was already on + * the partial list and will remain there. */ - return; } @@ -8715,7 +8703,6 @@ STAT_ATTR(FREE_RCU_SHEAF, free_rcu_sheaf); STAT_ATTR(FREE_RCU_SHEAF_FAIL, free_rcu_sheaf_fail); STAT_ATTR(FREE_FASTPATH, free_fastpath); STAT_ATTR(FREE_SLOWPATH, free_slowpath); -STAT_ATTR(FREE_FROZEN, free_frozen); STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial); STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial); STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial); @@ -8820,7 +8807,6 @@ static struct attribute *slab_attrs[] = { &free_rcu_sheaf_fail_attr.attr, &free_fastpath_attr.attr, &free_slowpath_attr.attr, - &free_frozen_attr.attr, &free_add_partial_attr.attr, &free_remove_partial_attr.attr, &alloc_from_partial_attr.attr, -- 2.52.0