Currently, the slab allocator assumes that slab->obj_exts is a pointer to an array of struct slabobj_ext objects. However, to support storage methods where struct slabobj_ext is embedded within objects, the slab allocator should not make this assumption. Instead of directly dereferencing the slabobj_exts array, abstract access to struct slabobj_ext via helper functions. Introduce a new API slabobj_ext metadata access: slab_obj_ext(slab, obj_exts, index) - returns the pointer to struct slabobj_ext element at the given index. Directly dereferencing the return value of slab_obj_exts() is no longer allowed. Instead, slab_obj_ext() must always be used to access individual struct slabobj_ext objects. Convert all users to use these APIs. No functional changes intended. Signed-off-by: Harry Yoo --- mm/memcontrol.c | 23 +++++++++++++++------- mm/slab.h | 43 +++++++++++++++++++++++++++++++++++------ mm/slub.c | 51 ++++++++++++++++++++++++++++--------------------- 3 files changed, 82 insertions(+), 35 deletions(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index be810c1fbfc3..fd9105a953b0 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2596,7 +2596,8 @@ struct mem_cgroup *mem_cgroup_from_obj_slab(struct slab *slab, void *p) * Memcg membership data for each individual object is saved in * slab->obj_exts. */ - struct slabobj_ext *obj_exts; + unsigned long obj_exts; + struct slabobj_ext *obj_ext; unsigned int off; obj_exts = slab_obj_exts(slab); @@ -2604,8 +2605,9 @@ struct mem_cgroup *mem_cgroup_from_obj_slab(struct slab *slab, void *p) return NULL; off = obj_to_index(slab->slab_cache, slab, p); - if (obj_exts[off].objcg) - return obj_cgroup_memcg(obj_exts[off].objcg); + obj_ext = slab_obj_ext(slab, obj_exts, off); + if (obj_ext->objcg) + return obj_cgroup_memcg(obj_ext->objcg); return NULL; } @@ -3191,6 +3193,9 @@ bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru, } for (i = 0; i < size; i++) { + unsigned long obj_exts; + struct slabobj_ext *obj_ext; + slab = virt_to_slab(p[i]); if (!slab_obj_exts(slab) && @@ -3213,29 +3218,33 @@ bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru, slab_pgdat(slab), cache_vmstat_idx(s))) return false; + obj_exts = slab_obj_exts(slab); off = obj_to_index(s, slab, p[i]); + obj_ext = slab_obj_ext(slab, obj_exts, off); obj_cgroup_get(objcg); - slab_obj_exts(slab)[off].objcg = objcg; + obj_ext->objcg = objcg; } return true; } void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, - void **p, int objects, struct slabobj_ext *obj_exts) + void **p, int objects, unsigned long obj_exts) { size_t obj_size = obj_full_size(s); for (int i = 0; i < objects; i++) { struct obj_cgroup *objcg; + struct slabobj_ext *obj_ext; unsigned int off; off = obj_to_index(s, slab, p[i]); - objcg = obj_exts[off].objcg; + obj_ext = slab_obj_ext(slab, obj_exts, off); + objcg = obj_ext->objcg; if (!objcg) continue; - obj_exts[off].objcg = NULL; + obj_ext->objcg = NULL; refill_obj_stock(objcg, obj_size, true, -obj_size, slab_pgdat(slab), cache_vmstat_idx(s)); obj_cgroup_put(objcg); diff --git a/mm/slab.h b/mm/slab.h index e767aa7e91b0..5c75ef3d1823 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -509,10 +509,12 @@ static inline bool slab_in_kunit_test(void) { return false; } * associated with a slab. * @slab: a pointer to the slab struct * - * Returns a pointer to the object extension vector associated with the slab, - * or NULL if no such vector has been associated yet. + * Returns the address of the object extension vector associated with the slab, + * or zero if no such vector has been associated yet. + * Do not dereference the return value directly; use slab_obj_ext() to access + * its elements. */ -static inline struct slabobj_ext *slab_obj_exts(struct slab *slab) +static inline unsigned long slab_obj_exts(struct slab *slab) { unsigned long obj_exts = READ_ONCE(slab->obj_exts); @@ -525,7 +527,30 @@ static inline struct slabobj_ext *slab_obj_exts(struct slab *slab) obj_exts != OBJEXTS_ALLOC_FAIL, slab_page(slab)); VM_BUG_ON_PAGE(obj_exts & MEMCG_DATA_KMEM, slab_page(slab)); #endif - return (struct slabobj_ext *)(obj_exts & ~OBJEXTS_FLAGS_MASK); + + return obj_exts & ~OBJEXTS_FLAGS_MASK; +} + +/* + * slab_obj_ext - get the pointer to the slab object extension metadata + * associated with an object in a slab. + * @slab: a pointer to the slab struct + * @obj_exts: a pointer to the object extension vector + * @index: an index of the object + * + * Returns a pointer to the object extension associated with the object. + */ +static inline struct slabobj_ext *slab_obj_ext(struct slab *slab, + unsigned long obj_exts, + unsigned int index) +{ + struct slabobj_ext *obj_ext; + + VM_WARN_ON_ONCE(!slab_obj_exts(slab)); + VM_WARN_ON_ONCE(obj_exts != slab_obj_exts(slab)); + + obj_ext = (struct slabobj_ext *)obj_exts; + return &obj_ext[index]; } int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, @@ -533,7 +558,13 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, #else /* CONFIG_SLAB_OBJ_EXT */ -static inline struct slabobj_ext *slab_obj_exts(struct slab *slab) +static inline unsigned long slab_obj_exts(struct slab *slab) +{ + return false; +} + +static inline struct slabobj_ext *slab_obj_ext(struct slab *slab, + unsigned int index) { return NULL; } @@ -550,7 +581,7 @@ static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s) bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru, gfp_t flags, size_t size, void **p); void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, - void **p, int objects, struct slabobj_ext *obj_exts); + void **p, int objects, unsigned long obj_exts); #endif void kvfree_rcu_cb(struct rcu_head *head); diff --git a/mm/slub.c b/mm/slub.c index 0e32f6420a8a..84bd4f23dc4a 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2042,7 +2042,7 @@ static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab, static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) { - struct slabobj_ext *slab_exts; + unsigned long slab_exts; struct slab *obj_exts_slab; obj_exts_slab = virt_to_slab(obj_exts); @@ -2050,13 +2050,15 @@ static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) if (slab_exts) { unsigned int offs = obj_to_index(obj_exts_slab->slab_cache, obj_exts_slab, obj_exts); + struct slabobj_ext *ext = slab_obj_ext(obj_exts_slab, + slab_exts, offs); - if (unlikely(is_codetag_empty(&slab_exts[offs].ref))) + if (unlikely(is_codetag_empty(ext->ref))) return; /* codetag should be NULL here */ - WARN_ON(slab_exts[offs].ref.ct); - set_codetag_empty(&slab_exts[offs].ref); + WARN_ON(ext->ref.ct); + set_codetag_empty(&ext->ref); } } @@ -2176,7 +2178,7 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, static inline void free_slab_obj_exts(struct slab *slab) { - struct slabobj_ext *obj_exts; + unsigned long obj_exts; obj_exts = slab_obj_exts(slab); if (!obj_exts) { @@ -2196,11 +2198,11 @@ static inline void free_slab_obj_exts(struct slab *slab) * NULL, therefore replace NULL with CODETAG_EMPTY to indicate that * the extension for obj_exts is expected to be NULL. */ - mark_objexts_empty(obj_exts); + mark_objexts_empty((struct slabobj_ext *)obj_exts); if (unlikely(READ_ONCE(slab->obj_exts) & OBJEXTS_NOSPIN_ALLOC)) - kfree_nolock(obj_exts); + kfree_nolock((void *)obj_exts); else - kfree(obj_exts); + kfree((void *)obj_exts); slab->obj_exts = 0; } @@ -2225,26 +2227,29 @@ static inline void free_slab_obj_exts(struct slab *slab) #ifdef CONFIG_MEM_ALLOC_PROFILING static inline struct slabobj_ext * -prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p) +prepare_slab_obj_ext_hook(struct kmem_cache *s, gfp_t flags, void *p) { struct slab *slab; + unsigned long obj_exts; slab = virt_to_slab(p); - if (!slab_obj_exts(slab) && + obj_exts = slab_obj_exts(slab); + if (!obj_exts && alloc_slab_obj_exts(slab, s, flags, false)) { pr_warn_once("%s, %s: Failed to create slab extension vector!\n", __func__, s->name); return NULL; } - return slab_obj_exts(slab) + obj_to_index(s, slab, p); + obj_exts = slab_obj_exts(slab); + return slab_obj_ext(slab, obj_exts, obj_to_index(s, slab, p)); } /* Should be called only if mem_alloc_profiling_enabled() */ static noinline void __alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags) { - struct slabobj_ext *obj_exts; + struct slabobj_ext *obj_ext; if (!object) return; @@ -2255,14 +2260,14 @@ __alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags) if (flags & __GFP_NO_OBJ_EXT) return; - obj_exts = prepare_slab_obj_exts_hook(s, flags, object); + obj_ext = prepare_slab_obj_ext_hook(s, flags, object); /* * Currently obj_exts is used only for allocation profiling. * If other users appear then mem_alloc_profiling_enabled() * check should be added before alloc_tag_add(). */ - if (likely(obj_exts)) - alloc_tag_add(&obj_exts->ref, current->alloc_tag, s->size); + if (likely(obj_ext)) + alloc_tag_add(&obj_ext->ref, current->alloc_tag, s->size); else alloc_tag_set_inaccurate(current->alloc_tag); } @@ -2279,8 +2284,8 @@ static noinline void __alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, int objects) { - struct slabobj_ext *obj_exts; int i; + unsigned long obj_exts; /* slab->obj_exts might not be NULL if it was created for MEMCG accounting. */ if (s->flags & (SLAB_NO_OBJ_EXT | SLAB_NOLEAKTRACE)) @@ -2293,7 +2298,7 @@ __alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p for (i = 0; i < objects; i++) { unsigned int off = obj_to_index(s, slab, p[i]); - alloc_tag_sub(&obj_exts[off].ref, s->size); + alloc_tag_sub(&slab_obj_ext(slab, obj_exts, off)->ref, s->size); } } @@ -2352,7 +2357,7 @@ static __fastpath_inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, int objects) { - struct slabobj_ext *obj_exts; + unsigned long obj_exts; if (!memcg_kmem_online()) return; @@ -2367,7 +2372,8 @@ void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, static __fastpath_inline bool memcg_slab_post_charge(void *p, gfp_t flags) { - struct slabobj_ext *slab_exts; + unsigned long obj_exts; + struct slabobj_ext *obj_ext; struct kmem_cache *s; struct page *page; struct slab *slab; @@ -2408,10 +2414,11 @@ bool memcg_slab_post_charge(void *p, gfp_t flags) return true; /* Ignore already charged objects. */ - slab_exts = slab_obj_exts(slab); - if (slab_exts) { + obj_exts = slab_obj_exts(slab); + if (obj_exts) { off = obj_to_index(s, slab, p); - if (unlikely(slab_exts[off].objcg)) + obj_ext = slab_obj_ext(slab, obj_exts, off); + if (unlikely(obj_ext->objcg)) return true; } -- 2.43.0