As Alexei noted, get_perf_callchain() return values may be reused if a task is preempted after the BPF program enters migrate disable mode. The perf_callchain_entres has a small stack of entries, and we can reuse it as follows: 1. get the perf callchain entry 2. BPF use... 3. put the perf callchain entry And Peter suggested that get_recursion_context used with preemption disabled, so we should disable preemption at BPF side. Acked-by: Yonghong Song Signed-off-by: Tao Chen --- kernel/bpf/stackmap.c | 55 ++++++++++++++++++++++++++++++++++++------- 1 file changed, 47 insertions(+), 8 deletions(-) diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index e77dcdc2164..6bdee6cc05f 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -215,7 +215,9 @@ get_callchain_entry_for_task(struct task_struct *task, u32 max_depth) #ifdef CONFIG_STACKTRACE struct perf_callchain_entry *entry; + preempt_disable(); entry = get_callchain_entry(); + preempt_enable(); if (!entry) return NULL; @@ -237,14 +239,40 @@ get_callchain_entry_for_task(struct task_struct *task, u32 max_depth) to[i] = (u64)(from[i]); } - put_callchain_entry(entry); - return entry; #else /* CONFIG_STACKTRACE */ return NULL; #endif } +static struct perf_callchain_entry * +bpf_get_perf_callchain(struct pt_regs *regs, bool kernel, bool user, int max_stack, + bool crosstask) +{ + struct perf_callchain_entry *entry; + int ret; + + preempt_disable(); + entry = get_callchain_entry(); + preempt_enable(); + + if (unlikely(!entry)) + return NULL; + + ret = __get_perf_callchain(entry, regs, kernel, user, max_stack, crosstask, false, 0); + if (ret) { + put_callchain_entry(entry); + return NULL; + } + + return entry; +} + +static void bpf_put_perf_callchain(struct perf_callchain_entry *entry) +{ + put_callchain_entry(entry); +} + static long __bpf_get_stackid(struct bpf_map *map, struct perf_callchain_entry *trace, u64 flags) { @@ -327,20 +355,23 @@ BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map, struct perf_callchain_entry *trace; bool kernel = !user; u32 max_depth; + int ret; if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK | BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID))) return -EINVAL; max_depth = stack_map_calculate_max_depth(map->value_size, elem_size, flags); - trace = get_perf_callchain(regs, kernel, user, max_depth, - false, false, 0); + trace = bpf_get_perf_callchain(regs, kernel, user, max_depth, false); if (unlikely(!trace)) /* couldn't fetch the stack trace */ return -EFAULT; - return __bpf_get_stackid(map, trace, flags); + ret = __bpf_get_stackid(map, trace, flags); + bpf_put_perf_callchain(trace); + + return ret; } const struct bpf_func_proto bpf_get_stackid_proto = { @@ -468,13 +499,19 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task, } else if (kernel && task) { trace = get_callchain_entry_for_task(task, max_depth); } else { - trace = get_perf_callchain(regs, kernel, user, max_depth, - crosstask, false, 0); + trace = bpf_get_perf_callchain(regs, kernel, user, max_depth, crosstask); } - if (unlikely(!trace) || trace->nr < skip) { + if (unlikely(!trace)) { + if (may_fault) + rcu_read_unlock(); + goto err_fault; + } + if (trace->nr < skip) { if (may_fault) rcu_read_unlock(); + if (!trace_in) + bpf_put_perf_callchain(trace); goto err_fault; } @@ -495,6 +532,8 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task, /* trace/ips should not be dereferenced after this point */ if (may_fault) rcu_read_unlock(); + if (!trace_in) + bpf_put_perf_callchain(trace); if (user_build_id) stack_map_get_build_id_offset(buf, trace_nr, user, may_fault); -- 2.48.1