From: Arnaud Lecomte Clean-up bounds checking for trace->nr in __bpf_get_stack by limiting it only to max_depth. Signed-off-by: Arnaud Lecomte Acked-by: Song Liu Cc: Song Liu --- kernel/bpf/stackmap.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index ed707bc07173..9f3ae426ddc3 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -462,13 +462,15 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task, if (may_fault) rcu_read_lock(); /* need RCU for perf's callchain below */ - if (trace_in) + if (trace_in) { trace = trace_in; - else if (kernel && task) + trace->nr = min_t(u32, trace->nr, max_depth); + } else if (kernel && task) { trace = get_callchain_entry_for_task(task, max_depth); - else + } else { trace = get_perf_callchain(regs, 0, kernel, user, max_depth, crosstask, false); + } if (unlikely(!trace) || trace->nr < skip) { if (may_fault) @@ -477,7 +479,6 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task, } trace_nr = trace->nr - skip; - trace_nr = (trace_nr <= num_elem) ? trace_nr : num_elem; copy_len = trace_nr * elem_size; ips = trace->ip + skip; -- 2.47.3