Hi Venkat, could you please help testing this patch and check if it hits any warning? It's based on v7.0-rc1 tag. This (hopefully) should give us more information that would help debugging the issue. 1. set stride early in alloc_slab_obj_exts_early() 2. move some obj_exts helpers to slab.h 3. in slab_obj_ext(), check three things: 3-1. is the obj_ext address is the right one for this object? 3-2. does the obj_ext address change after smp_rmb()? 3-3. does obj_ext->objcg change after smp_rmb()? No smp_wmb() is used, intentionally. It is expected that the issue will still reproduce. Signed-off-by: Harry Yoo --- mm/slab.h | 131 ++++++++++++++++++++++++++++++++++++++++++++++++++++-- mm/slub.c | 100 ++--------------------------------------- 2 files changed, 130 insertions(+), 101 deletions(-) diff --git a/mm/slab.h b/mm/slab.h index 71c7261bf822..d1e44cd01ea1 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -578,6 +578,101 @@ static inline unsigned short slab_get_stride(struct slab *slab) } #endif +#ifdef CONFIG_SLAB_OBJ_EXT + +/* + * Check if memory cgroup or memory allocation profiling is enabled. + * If enabled, SLUB tries to reduce memory overhead of accounting + * slab objects. If neither is enabled when this function is called, + * the optimization is simply skipped to avoid affecting caches that do not + * need slabobj_ext metadata. + * + * However, this may disable optimization when memory cgroup or memory + * allocation profiling is used, but slabs are created too early + * even before those subsystems are initialized. + */ +static inline bool need_slab_obj_exts(struct kmem_cache *s) +{ + if (s->flags & SLAB_NO_OBJ_EXT) + return false; + + if (memcg_kmem_online() && (s->flags & SLAB_ACCOUNT)) + return true; + + if (mem_alloc_profiling_enabled()) + return true; + + return false; +} + +static inline unsigned int obj_exts_size_in_slab(struct slab *slab) +{ + return sizeof(struct slabobj_ext) * slab->objects; +} + +static inline unsigned long obj_exts_offset_in_slab(struct kmem_cache *s, + struct slab *slab) +{ + unsigned long objext_offset; + + objext_offset = s->size * slab->objects; + objext_offset = ALIGN(objext_offset, sizeof(struct slabobj_ext)); + return objext_offset; +} + +static inline bool obj_exts_fit_within_slab_leftover(struct kmem_cache *s, + struct slab *slab) +{ + unsigned long objext_offset = obj_exts_offset_in_slab(s, slab); + unsigned long objext_size = obj_exts_size_in_slab(slab); + + return objext_offset + objext_size <= slab_size(slab); +} + +static inline bool obj_exts_in_slab(struct kmem_cache *s, struct slab *slab) +{ + unsigned long obj_exts; + unsigned long start; + unsigned long end; + + obj_exts = slab_obj_exts(slab); + if (!obj_exts) + return false; + + start = (unsigned long)slab_address(slab); + end = start + slab_size(slab); + return (obj_exts >= start) && (obj_exts < end); +} +#else +static inline bool need_slab_obj_exts(struct kmem_cache *s) +{ + return false; +} + +static inline unsigned int obj_exts_size_in_slab(struct slab *slab) +{ + return 0; +} + +static inline unsigned long obj_exts_offset_in_slab(struct kmem_cache *s, + struct slab *slab) +{ + return 0; +} + +static inline bool obj_exts_fit_within_slab_leftover(struct kmem_cache *s, + struct slab *slab) +{ + return false; +} + +static inline bool obj_exts_in_slab(struct kmem_cache *s, struct slab *slab) +{ + return false; +} + +#endif + /* * slab_obj_ext - get the pointer to the slab object extension metadata * associated with an object in a slab. @@ -592,13 +687,41 @@ static inline struct slabobj_ext *slab_obj_ext(struct slab *slab, unsigned long obj_exts, unsigned int index) { - struct slabobj_ext *obj_ext; + struct slabobj_ext *ext_before; + struct slabobj_ext *ext_after; + struct obj_cgroup *objcg_before; + struct obj_cgroup *objcg_after; VM_WARN_ON_ONCE(obj_exts != slab_obj_exts(slab)); - obj_ext = (struct slabobj_ext *)(obj_exts + - slab_get_stride(slab) * index); - return kasan_reset_tag(obj_ext); + ext_before = (struct slabobj_ext *)(obj_exts + + slab_get_stride(slab) * index); + objcg_before = ext_before->objcg; + // re-read things after rmb + smp_rmb(); + // is ext_before the right obj_ext for this object? + if (obj_exts_in_slab(slab->slab_cache, slab)) { + struct kmem_cache *s = slab->slab_cache; + + if (obj_exts_fit_within_slab_leftover(s, slab)) + WARN(ext_before != (struct slabobj_ext *)(obj_exts + sizeof(struct slabobj_ext) * index), + "obj_exts array in leftover"); + else + WARN(ext_before != (struct slabobj_ext *)(obj_exts + s->size * index), + "obj_ext in object"); + + } else { + WARN(ext_before != (struct slabobj_ext *)(obj_exts + sizeof(struct slabobj_ext) * index), + "obj_exts array allocated from slab"); + } + + ext_after = (struct slabobj_ext *)(obj_exts + + slab_get_stride(slab) * index); + objcg_after = ext_after->objcg; + + WARN(ext_before != ext_after, "obj_ext pointer has changed"); + WARN(objcg_before != objcg_after, "obj_ext->objcg has changed"); + return kasan_reset_tag(ext_before); } int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, diff --git a/mm/slub.c b/mm/slub.c index 862642c165ed..8eb64534370e 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -757,101 +757,6 @@ static inline unsigned long get_orig_size(struct kmem_cache *s, void *object) return *(unsigned long *)p; } -#ifdef CONFIG_SLAB_OBJ_EXT - -/* - * Check if memory cgroup or memory allocation profiling is enabled. - * If enabled, SLUB tries to reduce memory overhead of accounting - * slab objects. If neither is enabled when this function is called, - * the optimization is simply skipped to avoid affecting caches that do not - * need slabobj_ext metadata. - * - * However, this may disable optimization when memory cgroup or memory - * allocation profiling is used, but slabs are created too early - * even before those subsystems are initialized. - */ -static inline bool need_slab_obj_exts(struct kmem_cache *s) -{ - if (s->flags & SLAB_NO_OBJ_EXT) - return false; - - if (memcg_kmem_online() && (s->flags & SLAB_ACCOUNT)) - return true; - - if (mem_alloc_profiling_enabled()) - return true; - - return false; -} - -static inline unsigned int obj_exts_size_in_slab(struct slab *slab) -{ - return sizeof(struct slabobj_ext) * slab->objects; -} - -static inline unsigned long obj_exts_offset_in_slab(struct kmem_cache *s, - struct slab *slab) -{ - unsigned long objext_offset; - - objext_offset = s->size * slab->objects; - objext_offset = ALIGN(objext_offset, sizeof(struct slabobj_ext)); - return objext_offset; -} - -static inline bool obj_exts_fit_within_slab_leftover(struct kmem_cache *s, - struct slab *slab) -{ - unsigned long objext_offset = obj_exts_offset_in_slab(s, slab); - unsigned long objext_size = obj_exts_size_in_slab(slab); - - return objext_offset + objext_size <= slab_size(slab); -} - -static inline bool obj_exts_in_slab(struct kmem_cache *s, struct slab *slab) -{ - unsigned long obj_exts; - unsigned long start; - unsigned long end; - - obj_exts = slab_obj_exts(slab); - if (!obj_exts) - return false; - - start = (unsigned long)slab_address(slab); - end = start + slab_size(slab); - return (obj_exts >= start) && (obj_exts < end); -} -#else -static inline bool need_slab_obj_exts(struct kmem_cache *s) -{ - return false; -} - -static inline unsigned int obj_exts_size_in_slab(struct slab *slab) -{ - return 0; -} - -static inline unsigned long obj_exts_offset_in_slab(struct kmem_cache *s, - struct slab *slab) -{ - return 0; -} - -static inline bool obj_exts_fit_within_slab_leftover(struct kmem_cache *s, - struct slab *slab) -{ - return false; -} - -static inline bool obj_exts_in_slab(struct kmem_cache *s, struct slab *slab) -{ - return false; -} - -#endif - #if defined(CONFIG_SLAB_OBJ_EXT) && defined(CONFIG_64BIT) static bool obj_exts_in_object(struct kmem_cache *s, struct slab *slab) { @@ -2196,7 +2101,6 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, retry: old_exts = READ_ONCE(slab->obj_exts); handle_failed_objexts_alloc(old_exts, vec, objects); - slab_set_stride(slab, sizeof(struct slabobj_ext)); if (new_slab) { /* @@ -2272,6 +2176,9 @@ static void alloc_slab_obj_exts_early(struct kmem_cache *s, struct slab *slab) void *addr; unsigned long obj_exts; + /* Initialize stride early to avoid memory ordering issues */ + slab_set_stride(slab, sizeof(struct slabobj_ext)); + if (!need_slab_obj_exts(s)) return; @@ -2288,7 +2195,6 @@ static void alloc_slab_obj_exts_early(struct kmem_cache *s, struct slab *slab) obj_exts |= MEMCG_DATA_OBJEXTS; #endif slab->obj_exts = obj_exts; - slab_set_stride(slab, sizeof(struct slabobj_ext)); } else if (s->flags & SLAB_OBJ_EXT_IN_OBJ) { unsigned int offset = obj_exts_offset_in_object(s); -- 2.43.0