This adds a basic double free check by validating the first bit of the allocation in alloc_map and bound_map are set. If the alloc_map bit is not set, then this means the area is currently unallocated. If the bound_map bit is not set, then we are not freeing from the beginning of the allocation. This is a respin of [1] adding the requested changes from me and Christoph. [1] https://lore.kernel.org/linux-mm/20250904143514.Yk6Ap-jy@linutronix.de/ Signed-off-by: Dennis Zhou Cc: Sebastian Andrzej Siewior --- v2: - moved pcpu_stats_area_dealloc() - added additional check for bit_off out of bounds mm/percpu.c | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/mm/percpu.c b/mm/percpu.c index 81462ce5866e..2f1ac2059a15 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -79,6 +79,7 @@ #include #include #include +#include #include #include #include @@ -1276,18 +1277,24 @@ static int pcpu_alloc_area(struct pcpu_chunk *chunk, int alloc_bits, static int pcpu_free_area(struct pcpu_chunk *chunk, int off) { struct pcpu_block_md *chunk_md = &chunk->chunk_md; + int region_bits = pcpu_chunk_map_bits(chunk); int bit_off, bits, end, oslot, freed; lockdep_assert_held(&pcpu_lock); - pcpu_stats_area_dealloc(chunk); oslot = pcpu_chunk_slot(chunk); bit_off = off / PCPU_MIN_ALLOC_SIZE; + if (unlikely(bit_off < 0 || bit_off >= region_bits)) + return 0; + + /* check double free */ + if (!test_bit(bit_off, chunk->alloc_map) || + !test_bit(bit_off, chunk->bound_map)) + return 0; /* find end index */ - end = find_next_bit(chunk->bound_map, pcpu_chunk_map_bits(chunk), - bit_off + 1); + end = find_next_bit(chunk->bound_map, region_bits, bit_off + 1); bits = end - bit_off; bitmap_clear(chunk->alloc_map, bit_off, bits); @@ -1303,6 +1310,8 @@ static int pcpu_free_area(struct pcpu_chunk *chunk, int off) pcpu_chunk_relocate(chunk, oslot); + pcpu_stats_area_dealloc(chunk); + return freed; } @@ -2225,6 +2234,7 @@ static void pcpu_balance_workfn(struct work_struct *work) */ void free_percpu(void __percpu *ptr) { + static DEFINE_RATELIMIT_STATE(_rs, 60 * HZ, DEFAULT_RATELIMIT_BURST); void *addr; struct pcpu_chunk *chunk; unsigned long flags; @@ -2242,6 +2252,13 @@ void free_percpu(void __percpu *ptr) spin_lock_irqsave(&pcpu_lock, flags); size = pcpu_free_area(chunk, off); + if (size == 0) { + spin_unlock_irqrestore(&pcpu_lock, flags); + + if (__ratelimit(&_rs)) + WARN(1, "percpu double free or bad ptr\n"); + return; + } pcpu_alloc_tag_free_hook(chunk, off, size); -- 2.43.0