From: Alexei Starovoitov call_rcu_tasks_trace() is not safe from in_nmi() and not reentrant. To prevent deadlock on raw_spin_lock_rcu_node(rtpcp) or memory corruption defer to irq_work when IRQs are disabled. call_rcu_tasks_generic() protects itself with local_irq_save(). Note when bpf_async_cb->refcnt drops to zero it's safe to reuse bpf_async_cb->worker for a different irq_work callback, since bpf_async_schedule_op() -> irq_work_queue(&cb->worker); is only called when refcnt >= 1. Fixes: 1bfbc267ec91 ("bpf: Enable bpf_timer and bpf_wq in any context") Signed-off-by: Alexei Starovoitov --- kernel/bpf/helpers.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index a4f039cee88b..0458597134da 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -1276,12 +1276,24 @@ static void bpf_async_cb_rcu_tasks_trace_free(struct rcu_head *rcu) bpf_async_cb_rcu_free(rcu); } +static void worker_for_call_rcu(struct irq_work *work) +{ + struct bpf_async_cb *cb = container_of(work, struct bpf_async_cb, worker); + + call_rcu_tasks_trace(&cb->rcu, bpf_async_cb_rcu_tasks_trace_free); +} + static void bpf_async_refcount_put(struct bpf_async_cb *cb) { if (!refcount_dec_and_test(&cb->refcnt)) return; - call_rcu_tasks_trace(&cb->rcu, bpf_async_cb_rcu_tasks_trace_free); + if (irqs_disabled()) { + cb->worker = IRQ_WORK_INIT(worker_for_call_rcu); + irq_work_queue(&cb->worker); + } else { + call_rcu_tasks_trace(&cb->rcu, bpf_async_cb_rcu_tasks_trace_free); + } } static void bpf_async_cancel_and_free(struct bpf_async_kern *async); -- 2.47.3