From: Mykyta Yatsenko Add bpf_prog_run_array_sleepable() for running BPF program arrays on faultable tracepoints. Unlike bpf_prog_run_array_uprobe(), it includes per-program recursion checking for private stack safety and hardcodes is_uprobe to false. Guard the recursion check with a prog->active NULL check. When bpf_prog_array_delete_safe() replaces a detached program with dummy_bpf_prog (on allocation failure), the dummy's active field is NULL because dummy_bpf_prog is statically allocated and never goes through bpf_prog_alloc(). Without the guard, bpf_prog_get_recursion_context() would dereference a NULL per-CPU pointer, corrupting per-CPU memory. This follows the same pattern used for prog->stats in commit 7dc211c1159d ("bpf: Fix invalid prog->stats access when update_effective_progs fails"). Keep bpf_prog_run_array_uprobe() unchanged for uprobe callers. Acked-by: Kumar Kartikeya Dwivedi Signed-off-by: Mykyta Yatsenko --- include/linux/bpf.h | 58 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 0136a108d083..f2c9520e11a8 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -3077,6 +3077,64 @@ void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr); void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr); void bpf_prog_report_arena_violation(bool write, unsigned long addr, unsigned long fault_ip); +static __always_inline u32 +bpf_prog_run_array_sleepable(const struct bpf_prog_array *array, + const void *ctx, bpf_prog_run_fn run_prog) +{ + const struct bpf_prog_array_item *item; + struct bpf_prog *prog; + struct bpf_run_ctx *old_run_ctx; + struct bpf_trace_run_ctx run_ctx; + u32 ret = 1; + + might_fault(); + RCU_LOCKDEP_WARN(!rcu_read_lock_trace_held(), "no rcu lock held"); + + if (unlikely(!array)) + return ret; + + migrate_disable(); + + run_ctx.is_uprobe = false; + + old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); + item = &array->items[0]; + while ((prog = READ_ONCE(item->prog))) { + if (!prog->sleepable) + rcu_read_lock(); + + /* + * Per-prog recursion check to enable private stack. + * Skip if prog->active is not allocated, e.g. + * for dummy_bpf_prog used as a fallback placeholder + * by bpf_prog_array_delete_safe(). + */ + if (likely(prog->active)) { + if (unlikely(!bpf_prog_get_recursion_context(prog))) { + bpf_prog_inc_misses_counter(prog); + bpf_prog_put_recursion_context(prog); + if (!prog->sleepable) + rcu_read_unlock(); + item++; + continue; + } + } + + run_ctx.bpf_cookie = item->bpf_cookie; + ret &= run_prog(prog, ctx); + + if (likely(prog->active)) + bpf_prog_put_recursion_context(prog); + item++; + + if (!prog->sleepable) + rcu_read_unlock(); + } + bpf_reset_run_ctx(old_run_ctx); + migrate_enable(); + return ret; +} + #else /* !CONFIG_BPF_SYSCALL */ static inline struct bpf_prog *bpf_prog_get(u32 ufd) { -- 2.52.0