slab_mergeable() determines whether a slab cache can be merged, but it should not be used when the cache is not fully created yet. Extract the pre-cache-creation mergeability checks into slab_args_unmergeable(), which evaluates kmem_cache_args, slab flags, and slab_nomerge to determine if a cache will be mergeable before it is created. Signed-off-by: Harry Yoo --- mm/slab_common.c | 28 ++++++++++++++++++---------- 1 file changed, 18 insertions(+), 10 deletions(-) diff --git a/mm/slab_common.c b/mm/slab_common.c index 2e80d323f550..904414c3ebb8 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -174,24 +174,32 @@ int slab_unmergeable(struct kmem_cache *s) return 0; } -static struct kmem_cache *find_mergeable(unsigned int size, slab_flags_t flags, - const char *name, struct kmem_cache_args *args) +static bool slab_args_unmergeable(struct kmem_cache_args *args, + slab_flags_t flags) { - struct kmem_cache *s; - unsigned int align; - if (slab_nomerge) - return NULL; + return true; if (args->ctor) - return NULL; + return true; if (IS_ENABLED(CONFIG_HARDENED_USERCOPY) && args->usersize) - return NULL; - - flags = kmem_cache_flags(flags, name); + return true; if (flags & SLAB_NEVER_MERGE) + return true; + + return false; +} + +static struct kmem_cache *find_mergeable(unsigned int size, slab_flags_t flags, + const char *name, struct kmem_cache_args *args) +{ + struct kmem_cache *s; + unsigned int align; + + flags = kmem_cache_flags(flags, name); + if (slab_args_unmergeable(args, flags)) return NULL; size = ALIGN(size, sizeof(void *)); -- 2.43.0 While SLAB_OBJ_EXT_IN_OBJ allows to reduce memory overhead to account slab objects, it prevents slab merging because merging can change the metadata layout. As pointed out Vlastimil Babka, disabling merging solely for this memory optimization may not be a net win, because disabling slab merging tends to increase overall memory usage. Restrict SLAB_OBJ_EXT_IN_OBJ to caches that are already unmergeable for other reasons (e.g., those with constructors or SLAB_TYPESAFE_BY_RCU). Suggested-by: Vlastimil Babka Signed-off-by: Harry Yoo --- mm/slab.h | 1 + mm/slab_common.c | 3 +-- mm/slub.c | 3 ++- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/mm/slab.h b/mm/slab.h index 8593c506cbf1..a5c4f981ee8b 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -388,6 +388,7 @@ extern void create_boot_cache(struct kmem_cache *, const char *name, unsigned int useroffset, unsigned int usersize); int slab_unmergeable(struct kmem_cache *s); +bool slab_args_unmergeable(struct kmem_cache_args *args, slab_flags_t flags); slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name); diff --git a/mm/slab_common.c b/mm/slab_common.c index 904414c3ebb8..d5a70a831a2a 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -174,8 +174,7 @@ int slab_unmergeable(struct kmem_cache *s) return 0; } -static bool slab_args_unmergeable(struct kmem_cache_args *args, - slab_flags_t flags) +bool slab_args_unmergeable(struct kmem_cache_args *args, slab_flags_t flags) { if (slab_nomerge) return true; diff --git a/mm/slub.c b/mm/slub.c index ae9af184a18b..0581847e7dac 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -7676,7 +7676,8 @@ static int calculate_sizes(struct kmem_cache_args *args, struct kmem_cache *s) */ aligned_size = ALIGN(size, s->align); #if defined(CONFIG_SLAB_OBJ_EXT) && defined(CONFIG_64BIT) - if (aligned_size - size >= sizeof(struct slabobj_ext)) + if (slab_args_unmergeable(args, s->flags) && + (aligned_size - size >= sizeof(struct slabobj_ext))) s->flags |= SLAB_OBJ_EXT_IN_OBJ; #endif size = aligned_size; -- 2.43.0