While SLAB_OBJ_EXT_IN_OBJ allows to reduce memory overhead to account slab objects, it prevents slab merging because merging can change the metadata layout. As pointed out Vlastimil Babka, disabling merging solely for this memory optimization may not be a net win, because disabling slab merging tends to increase overall memory usage. Restrict SLAB_OBJ_EXT_IN_OBJ to caches that are already unmergeable for other reasons (e.g., those with constructors or SLAB_TYPESAFE_BY_RCU). Suggested-by: Vlastimil Babka Signed-off-by: Harry Yoo --- mm/slab.h | 1 + mm/slab_common.c | 3 +-- mm/slub.c | 3 ++- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/mm/slab.h b/mm/slab.h index 8593c506cbf1..a5c4f981ee8b 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -388,6 +388,7 @@ extern void create_boot_cache(struct kmem_cache *, const char *name, unsigned int useroffset, unsigned int usersize); int slab_unmergeable(struct kmem_cache *s); +bool slab_args_unmergeable(struct kmem_cache_args *args, slab_flags_t flags); slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name); diff --git a/mm/slab_common.c b/mm/slab_common.c index 904414c3ebb8..d5a70a831a2a 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -174,8 +174,7 @@ int slab_unmergeable(struct kmem_cache *s) return 0; } -static bool slab_args_unmergeable(struct kmem_cache_args *args, - slab_flags_t flags) +bool slab_args_unmergeable(struct kmem_cache_args *args, slab_flags_t flags) { if (slab_nomerge) return true; diff --git a/mm/slub.c b/mm/slub.c index ae9af184a18b..0581847e7dac 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -7676,7 +7676,8 @@ static int calculate_sizes(struct kmem_cache_args *args, struct kmem_cache *s) */ aligned_size = ALIGN(size, s->align); #if defined(CONFIG_SLAB_OBJ_EXT) && defined(CONFIG_64BIT) - if (aligned_size - size >= sizeof(struct slabobj_ext)) + if (slab_args_unmergeable(args, s->flags) && + (aligned_size - size >= sizeof(struct slabobj_ext))) s->flags |= SLAB_OBJ_EXT_IN_OBJ; #endif size = aligned_size; -- 2.43.0