This adds a basic double free check by validating the first bit of the allocation in alloc_map and bound_map are set. If the alloc_map bit is not set, then this means the area is currently unallocated. If the bound_map bit is not set, then we are not freeing from the beginning of the allocation. This is a respin of [1] adding the requested changes from me and Christoph. [1] https://lore.kernel.org/linux-mm/20250904143514.Yk6Ap-jy@linutronix.de/ Signed-off-by: Dennis Zhou Cc: Sebastian Andrzej Siewior --- mm/percpu.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/mm/percpu.c b/mm/percpu.c index 81462ce5866e..c9f8df6c34c3 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -79,6 +79,7 @@ #include #include #include +#include #include #include #include @@ -1285,6 +1286,11 @@ static int pcpu_free_area(struct pcpu_chunk *chunk, int off) bit_off = off / PCPU_MIN_ALLOC_SIZE; + /* check double free */ + if (!test_bit(bit_off, chunk->alloc_map) || + !test_bit(bit_off, chunk->bound_map)) + return 0; + /* find end index */ end = find_next_bit(chunk->bound_map, pcpu_chunk_map_bits(chunk), bit_off + 1); @@ -2225,6 +2231,7 @@ static void pcpu_balance_workfn(struct work_struct *work) */ void free_percpu(void __percpu *ptr) { + static DEFINE_RATELIMIT_STATE(_rs, 60 * HZ, DEFAULT_RATELIMIT_BURST); void *addr; struct pcpu_chunk *chunk; unsigned long flags; @@ -2242,6 +2249,13 @@ void free_percpu(void __percpu *ptr) spin_lock_irqsave(&pcpu_lock, flags); size = pcpu_free_area(chunk, off); + if (size == 0) { + spin_unlock_irqrestore(&pcpu_lock, flags); + + if (__ratelimit(&_rs)) + WARN(1, "percpu double free\n"); + return; + } pcpu_alloc_tag_free_hook(chunk, off, size); -- 2.43.0