When a bio is allocated from the mempool with REQ_ALLOC_CACHE set and later completed, bio_put() places it into the per-cpu bio_alloc_cache via bio_put_percpu_cache() instead of freeing it back to the mempool/slab. The slab allocation remains tracked by kmemleak, but the only reference to the bio is through the percpu cache's free_list, which kmemleak fails to trace through percpu memory. This causes kmemleak to report the cached bios as unreferenced objects. Use symmetric kmemleak_free()/kmemleak_alloc() calls to properly track bios across percpu cache transitions: - bio_put_percpu_cache: call kmemleak_free() when a bio enters the cache, unregistering it from kmemleak tracking. - bio_alloc_percpu_cache: call kmemleak_alloc() when a bio is taken from the cache for reuse, re-registering it so that genuine leaks of reused bios remain detectable. - __bio_alloc_cache_prune: call kmemleak_alloc() before bio_free() so that kmem_cache_free()'s internal kmemleak_free() has a matching allocation to pair with. Tested-by: Yi Zhang Signed-off-by: Ming Lei --- V2: - rebase on for-7.1/block - add helper of bio_slab_addr() block/bio.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/block/bio.c b/block/bio.c index 77067fa346d3..c8234d347fc5 100644 --- a/block/bio.c +++ b/block/bio.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include "blk.h" @@ -116,6 +117,11 @@ static inline unsigned int bs_bio_slab_size(struct bio_set *bs) return bs->front_pad + sizeof(struct bio) + bs->back_pad; } +static inline void *bio_slab_addr(struct bio *bio) +{ + return (void *)bio - bio->bi_pool->front_pad; +} + static struct kmem_cache *bio_find_or_create_slab(struct bio_set *bs) { unsigned int size = bs_bio_slab_size(bs); @@ -486,6 +492,9 @@ static struct bio *bio_alloc_percpu_cache(struct bio_set *bs) cache->nr--; put_cpu(); bio->bi_pool = bs; + + kmemleak_alloc(bio_slab_addr(bio), + kmem_cache_size(bs->bio_slab), 1, GFP_NOIO); return bio; } @@ -728,6 +737,9 @@ static int __bio_alloc_cache_prune(struct bio_alloc_cache *cache, while ((bio = cache->free_list) != NULL) { cache->free_list = bio->bi_next; cache->nr--; + kmemleak_alloc(bio_slab_addr(bio), + kmem_cache_size(bio->bi_pool->bio_slab), + 1, GFP_KERNEL); bio_free(bio); if (++i == nr) break; @@ -791,6 +803,7 @@ static inline void bio_put_percpu_cache(struct bio *bio) bio->bi_bdev = NULL; cache->free_list = bio; cache->nr++; + kmemleak_free(bio_slab_addr(bio)); } else if (in_hardirq()) { lockdep_assert_irqs_disabled(); @@ -798,6 +811,7 @@ static inline void bio_put_percpu_cache(struct bio *bio) bio->bi_next = cache->free_list_irq; cache->free_list_irq = bio; cache->nr_irq++; + kmemleak_free(bio_slab_addr(bio)); } else { goto out_free; } -- 2.53.0