From bpf stack map, we want to use our own buffers to avoid unnecessary copy, so let us pass it directly. BPF will use this in the next patch. Signed-off-by: Tao Chen --- include/linux/perf_event.h | 4 ++-- kernel/bpf/stackmap.c | 4 ++-- kernel/events/callchain.c | 13 +++++++++---- kernel/events/core.c | 2 +- 4 files changed, 14 insertions(+), 9 deletions(-) diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index fd1d91017b9..b144da7d803 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -1719,8 +1719,8 @@ DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry); extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs); extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs); extern struct perf_callchain_entry * -get_perf_callchain(struct pt_regs *regs, bool kernel, bool user, - u32 max_stack, bool crosstask, bool add_mark); +get_perf_callchain(struct pt_regs *regs, struct perf_callchain_entry *external_entry, + bool kernel, bool user, u32 max_stack, bool crosstask, bool add_mark); extern int get_callchain_buffers(int max_stack); extern void put_callchain_buffers(void); extern struct perf_callchain_entry *get_callchain_entry(int *rctx); diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index 4d53cdd1374..94e46b7f340 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -314,7 +314,7 @@ BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map, if (max_depth > sysctl_perf_event_max_stack) max_depth = sysctl_perf_event_max_stack; - trace = get_perf_callchain(regs, kernel, user, max_depth, + trace = get_perf_callchain(regs, NULL, kernel, user, max_depth, false, false); if (unlikely(!trace)) @@ -451,7 +451,7 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task, else if (kernel && task) trace = get_callchain_entry_for_task(task, max_depth); else - trace = get_perf_callchain(regs, kernel, user, max_depth, + trace = get_perf_callchain(regs, NULL, kernel, user, max_depth, crosstask, false); if (unlikely(!trace) || trace->nr < skip) { diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c index 808c0d7a31f..851e8f9d026 100644 --- a/kernel/events/callchain.c +++ b/kernel/events/callchain.c @@ -217,8 +217,8 @@ static void fixup_uretprobe_trampoline_entries(struct perf_callchain_entry *entr } struct perf_callchain_entry * -get_perf_callchain(struct pt_regs *regs, bool kernel, bool user, - u32 max_stack, bool crosstask, bool add_mark) +get_perf_callchain(struct pt_regs *regs, struct perf_callchain_entry *external_entry, + bool kernel, bool user, u32 max_stack, bool crosstask, bool add_mark) { struct perf_callchain_entry *entry; struct perf_callchain_entry_ctx ctx; @@ -228,7 +228,11 @@ get_perf_callchain(struct pt_regs *regs, bool kernel, bool user, if (crosstask && user && !kernel) return NULL; - entry = get_callchain_entry(&rctx); + if (external_entry) + entry = external_entry; + else + entry = get_callchain_entry(&rctx); + if (!entry) return NULL; @@ -260,7 +264,8 @@ get_perf_callchain(struct pt_regs *regs, bool kernel, bool user, } exit_put: - put_callchain_entry(rctx); + if (!external_entry) + put_callchain_entry(rctx); return entry; } diff --git a/kernel/events/core.c b/kernel/events/core.c index 7541f6f85fc..5d8e146003a 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -8217,7 +8217,7 @@ perf_callchain(struct perf_event *event, struct pt_regs *regs) if (!kernel && !user) return &__empty_callchain; - callchain = get_perf_callchain(regs, kernel, user, + callchain = get_perf_callchain(regs, NULL, kernel, user, max_stack, crosstask, true); return callchain ?: &__empty_callchain; } -- 2.48.1