From: Mykyta Yatsenko Remove preempt_disable_notrace()/preempt_enable_notrace() from __BPF_DECLARE_TRACE_SYSCALL, the BPF probe callback wrapper for faultable (syscall) tracepoints. The preemption management is now handled inside __bpf_trace_run() on a per-program basis: migrate_disable() for sleepable programs, rcu_read_lock() (which implies preempt-off in non-PREEMPT_RCU configs) for non-sleepable programs. This allows sleepable BPF programs to actually sleep when attached to faultable tracepoints. Signed-off-by: Mykyta Yatsenko --- include/trace/bpf_probe.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/include/trace/bpf_probe.h b/include/trace/bpf_probe.h index 9391d54d3f124ab0d56ec57445cfc79baeffc28c..d1de8f9aa07fb76e9ee8037ce43099efb95b05d5 100644 --- a/include/trace/bpf_probe.h +++ b/include/trace/bpf_probe.h @@ -58,9 +58,7 @@ static notrace void \ __bpf_trace_##call(void *__data, proto) \ { \ might_fault(); \ - preempt_disable_notrace(); \ CONCATENATE(bpf_trace_run, COUNT_ARGS(args))(__data, CAST_TO_U64(args)); \ - preempt_enable_notrace(); \ } #undef DECLARE_EVENT_SYSCALL_CLASS -- 2.53.0