From: "Matthew Wilcox (Oracle)" We already know that slab is a valid slab as that's checked by the caller. In the future, we won't be able to get to a slab pointer from a non-slab page. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Harry Yoo Signed-off-by: Vlastimil Babka --- mm/slub.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index 3062f56bf49882538ba5af407de9f69c451f2e29..56143bfd1ae319d384981c810a5ed84af00f4afa 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1684,10 +1684,7 @@ static inline int free_consistency_checks(struct kmem_cache *s, return 0; if (unlikely(s != slab->slab_cache)) { - if (!folio_test_slab(slab_folio(slab))) { - slab_err(s, slab, "Attempt to free object(0x%p) outside of slab", - object); - } else if (!slab->slab_cache) { + if (!slab->slab_cache) { slab_err(NULL, slab, "No slab cache for object 0x%p", object); } else { -- 2.51.0 This will make it clear where we currently cast struct slab to folio only to check the slab type, and allow to change the implementation later with memdesc conversion. For now use a struct page based implementation instead of struct folio to be compatible with further upcoming changes. Signed-off-by: Vlastimil Babka --- mm/slub.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index 56143bfd1ae319d384981c810a5ed84af00f4afa..5bbfe4ee8d9846ec9a34584c10750388849da3b9 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -821,6 +821,15 @@ static inline unsigned int get_orig_size(struct kmem_cache *s, void *object) return *(unsigned int *)p; } +/* + * For debugging context when we want to check if the struct slab pointer + * appears to be valid. + */ +static bool validate_slab_ptr(struct slab *slab) +{ + return PageSlab(slab_page(slab)); +} + #ifdef CONFIG_SLUB_DEBUG static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)]; static DEFINE_SPINLOCK(object_map_lock); @@ -1453,7 +1462,7 @@ static int check_slab(struct kmem_cache *s, struct slab *slab) { int maxobj; - if (!folio_test_slab(slab_folio(slab))) { + if (!validate_slab_ptr(slab)) { slab_err(s, slab, "Not a valid slab page"); return 0; } @@ -1653,7 +1662,7 @@ static noinline bool alloc_debug_processing(struct kmem_cache *s, return true; bad: - if (folio_test_slab(slab_folio(slab))) { + if (validate_slab_ptr(slab)) { /* * If this is a slab page then lets do the best we can * to avoid issues in the future. Marking all objects @@ -2818,7 +2827,7 @@ static void *alloc_single_from_partial(struct kmem_cache *s, slab->inuse++; if (!alloc_debug_processing(s, slab, object, orig_size)) { - if (folio_test_slab(slab_folio(slab))) + if (validate_slab_ptr(slab)) remove_partial(n, slab); return NULL; } -- 2.51.0 We will want to do the validation earlier in some callers or remove it completely, so extract it from check_slab() first. No functional change. Signed-off-by: Vlastimil Babka --- mm/slub.c | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index 5bbfe4ee8d9846ec9a34584c10750388849da3b9..94a089205a86f0667444484e158d307e72cd96e1 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1458,15 +1458,15 @@ static int check_object(struct kmem_cache *s, struct slab *slab, return ret; } +/* + * Checks if the slab state looks sane. Assumes the struct slab pointer + * was either obtained in a way that ensures it's valid, or validated + * by validate_slab_ptr() + */ static int check_slab(struct kmem_cache *s, struct slab *slab) { int maxobj; - if (!validate_slab_ptr(slab)) { - slab_err(s, slab, "Not a valid slab page"); - return 0; - } - maxobj = order_objects(slab_order(slab), s->size); if (slab->objects > maxobj) { slab_err(s, slab, "objects %u > max %u", @@ -1633,6 +1633,11 @@ void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr) static inline int alloc_consistency_checks(struct kmem_cache *s, struct slab *slab, void *object) { + if (!validate_slab_ptr(slab)) { + slab_err(s, slab, "Not a valid slab page"); + return 0; + } + if (!check_slab(s, slab)) return 0; @@ -3485,6 +3490,11 @@ static inline bool free_debug_processing(struct kmem_cache *s, int cnt = 0; if (s->flags & SLAB_CONSISTENCY_CHECKS) { + if (!validate_slab_ptr(slab)) { + slab_err(s, slab, "Not a valid slab page"); + goto out; + } + if (!check_slab(s, slab)) goto out; } @@ -6519,6 +6529,11 @@ static void validate_slab(struct kmem_cache *s, struct slab *slab, void *p; void *addr = slab_address(slab); + if (!validate_slab_ptr(slab)) { + slab_err(s, slab, "Not a valid slab page"); + return; + } + if (!check_slab(s, slab) || !on_freelist(s, slab, NULL)) return; -- 2.51.0 In alloc_debug_processing() we can call validate_slab_ptr() upfront and then don't need to recheck when alloc_consistency_checks() fails for other reasons. Signed-off-by: Vlastimil Babka --- mm/slub.c | 30 ++++++++++++++---------------- 1 file changed, 14 insertions(+), 16 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index 94a089205a86f0667444484e158d307e72cd96e1..909c71372a2f542b6e0d67c12ea683133b246b66 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1633,11 +1633,6 @@ void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr) static inline int alloc_consistency_checks(struct kmem_cache *s, struct slab *slab, void *object) { - if (!validate_slab_ptr(slab)) { - slab_err(s, slab, "Not a valid slab page"); - return 0; - } - if (!check_slab(s, slab)) return 0; @@ -1656,6 +1651,11 @@ static noinline bool alloc_debug_processing(struct kmem_cache *s, struct slab *slab, void *object, int orig_size) { if (s->flags & SLAB_CONSISTENCY_CHECKS) { + if (!validate_slab_ptr(slab)) { + slab_err(s, slab, "Not a valid slab page"); + return false; + } + if (!alloc_consistency_checks(s, slab, object)) goto bad; } @@ -1667,17 +1667,15 @@ static noinline bool alloc_debug_processing(struct kmem_cache *s, return true; bad: - if (validate_slab_ptr(slab)) { - /* - * If this is a slab page then lets do the best we can - * to avoid issues in the future. Marking all objects - * as used avoids touching the remaining objects. - */ - slab_fix(s, "Marking all objects used"); - slab->inuse = slab->objects; - slab->freelist = NULL; - slab->frozen = 1; /* mark consistency-failed slab as frozen */ - } + /* + * Let's do the best we can to avoid issues in the future. Marking all + * objects as used avoids touching the remaining objects. + */ + slab_fix(s, "Marking all objects used"); + slab->inuse = slab->objects; + slab->freelist = NULL; + slab->frozen = 1; /* mark consistency-failed slab as frozen */ + return false; } -- 2.51.0 We touch slab->freelist and slab->inuse before checking the slab pointer is actually sane. Do that validation first, which will be safer. We can thus also remove the check from alloc_debug_processing(). This adds a new "s->flags & SLAB_CONSISTENCY_CHECKS" test but alloc_single_from_partial() is only called for caches with debugging enabled so it's acceptable. In alloc_single_from_new_slab() we just created the struct slab and call alloc_debug_processing() to mainly set up redzones, tracking etc, while not really expecting the consistency checks to fail. Thus don't validate it there. Signed-off-by: Vlastimil Babka --- mm/slub.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index 909c71372a2f542b6e0d67c12ea683133b246b66..93df6e82af37c798c3fa5574c9d825f0f4a83013 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1651,11 +1651,6 @@ static noinline bool alloc_debug_processing(struct kmem_cache *s, struct slab *slab, void *object, int orig_size) { if (s->flags & SLAB_CONSISTENCY_CHECKS) { - if (!validate_slab_ptr(slab)) { - slab_err(s, slab, "Not a valid slab page"); - return false; - } - if (!alloc_consistency_checks(s, slab, object)) goto bad; } @@ -2825,15 +2820,19 @@ static void *alloc_single_from_partial(struct kmem_cache *s, lockdep_assert_held(&n->list_lock); + if (s->flags & SLAB_CONSISTENCY_CHECKS) { + if (!validate_slab_ptr(slab)) { + slab_err(s, slab, "Not a valid slab page"); + return NULL; + } + } + object = slab->freelist; slab->freelist = get_freepointer(s, object); slab->inuse++; - if (!alloc_debug_processing(s, slab, object, orig_size)) { - if (validate_slab_ptr(slab)) - remove_partial(n, slab); + if (!alloc_debug_processing(s, slab, object, orig_size)) return NULL; - } if (slab->inuse == slab->objects) { remove_partial(n, slab); -- 2.51.0 The struct slab pointer has been obtained one from the object being freed on all the paths that lead to this function. In all cases this already includes the test for slab type of the struct page which struct slab is overlaying. Thus we would not reach this function if it was not a valid slab pointer in the first place. One less obvious case is that kmem_cache_free() trusts virt_to_slab() blindly so it may be NULL if the slab type check is false. But with SLAB_CONSISTENCY_CHECKS, cache_from_obj() called also from kmem_cache_free() catches this and returns NULL, which terminates freeing immediately. Signed-off-by: Vlastimil Babka --- mm/slub.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index 93df6e82af37c798c3fa5574c9d825f0f4a83013..106dbce64acdf32c1d271ec130c35c0ec0e15630 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3487,11 +3487,6 @@ static inline bool free_debug_processing(struct kmem_cache *s, int cnt = 0; if (s->flags & SLAB_CONSISTENCY_CHECKS) { - if (!validate_slab_ptr(slab)) { - slab_err(s, slab, "Not a valid slab page"); - goto out; - } - if (!check_slab(s, slab)) goto out; } -- 2.51.0