Record rctx inside the perf_callchain_entry itself, when callers of get_callchain_entry no longer care about the assignment of rctx, and will be used in the next patch. Suggested-by: Andrii Nakryiko Signed-off-by: Tao Chen --- include/linux/perf_event.h | 5 +++-- kernel/bpf/stackmap.c | 5 ++--- kernel/events/callchain.c | 27 ++++++++++++++++----------- 3 files changed, 21 insertions(+), 16 deletions(-) diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 9870d768db4..f0489843ebc 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -57,6 +57,7 @@ #include struct perf_callchain_entry { + int rctx; u64 nr; u64 ip[]; /* /proc/sys/kernel/perf_event_max_stack */ }; @@ -1723,8 +1724,8 @@ get_perf_callchain(struct pt_regs *regs, bool kernel, bool user, u32 max_stack, bool crosstask, bool add_mark, u64 defer_cookie); extern int get_callchain_buffers(int max_stack); extern void put_callchain_buffers(void); -extern struct perf_callchain_entry *get_callchain_entry(int *rctx); -extern void put_callchain_entry(int rctx); +extern struct perf_callchain_entry *get_callchain_entry(void); +extern void put_callchain_entry(struct perf_callchain_entry *entry); extern int sysctl_perf_event_max_stack; extern int sysctl_perf_event_max_contexts_per_stack; diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index da3d328f5c1..e77dcdc2164 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -214,9 +214,8 @@ get_callchain_entry_for_task(struct task_struct *task, u32 max_depth) { #ifdef CONFIG_STACKTRACE struct perf_callchain_entry *entry; - int rctx; - entry = get_callchain_entry(&rctx); + entry = get_callchain_entry(); if (!entry) return NULL; @@ -238,7 +237,7 @@ get_callchain_entry_for_task(struct task_struct *task, u32 max_depth) to[i] = (u64)(from[i]); } - put_callchain_entry(rctx); + put_callchain_entry(entry); return entry; #else /* CONFIG_STACKTRACE */ diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c index b9c7e00725d..6cdbc5937b1 100644 --- a/kernel/events/callchain.c +++ b/kernel/events/callchain.c @@ -151,31 +151,36 @@ void put_callchain_buffers(void) } } -struct perf_callchain_entry *get_callchain_entry(int *rctx) +struct perf_callchain_entry *get_callchain_entry(void) { int cpu; + int rctx; struct callchain_cpus_entries *entries; + struct perf_callchain_entry *entry; - *rctx = get_recursion_context(this_cpu_ptr(callchain_recursion)); - if (*rctx == -1) + rctx = get_recursion_context(this_cpu_ptr(callchain_recursion)); + if (rctx == -1) return NULL; entries = rcu_dereference(callchain_cpus_entries); if (!entries) { - put_recursion_context(this_cpu_ptr(callchain_recursion), *rctx); + put_recursion_context(this_cpu_ptr(callchain_recursion), rctx); return NULL; } cpu = smp_processor_id(); - return (((void *)entries->cpu_entries[cpu]) + - (*rctx * perf_callchain_entry__sizeof())); + entry = ((void *)entries->cpu_entries[cpu]) + + (rctx * perf_callchain_entry__sizeof()); + entry->rctx = rctx; + + return entry; } void -put_callchain_entry(int rctx) +put_callchain_entry(struct perf_callchain_entry *entry) { - put_recursion_context(this_cpu_ptr(callchain_recursion), rctx); + put_recursion_context(this_cpu_ptr(callchain_recursion), entry->rctx); } static void fixup_uretprobe_trampoline_entries(struct perf_callchain_entry *entry, @@ -222,13 +227,13 @@ get_perf_callchain(struct pt_regs *regs, bool kernel, bool user, { struct perf_callchain_entry *entry; struct perf_callchain_entry_ctx ctx; - int rctx, start_entry_idx; + int start_entry_idx; /* crosstask is not supported for user stacks */ if (crosstask && user && !kernel) return NULL; - entry = get_callchain_entry(&rctx); + entry = get_callchain_entry(); if (!entry) return NULL; @@ -272,7 +277,7 @@ get_perf_callchain(struct pt_regs *regs, bool kernel, bool user, } exit_put: - put_callchain_entry(rctx); + put_callchain_entry(entry); return entry; } -- 2.48.1