Currently, the top-level Makefile.perf defines a massive global bpf-skel umbrella target that pre-compiles all 12+ BPF skeletons (%.skel.h) upfront before launching sub-makes. This forces unrelated sub-makes to serialize behind bpftool and clang BPF target evaluations, causing parallel build bottlenecks. Furthermore, bench_uprobe.bpf.c lived inside util/bpf_skel/, breaking conceptual directory encapsulation since it is consumed purely by bench/uprobe.c. Refactor the BPF skeletons to better achieve directory isolation: 1. Move tools/perf/util/bpf_skel/bench_uprobe.bpf.c directly into tools/perf/bench/bpf_skel/. 2. Extract the skeleton generation infrastructure out of Makefile.perf into a shared inclusion file tools/perf/bpf_skel.mak. 3. Include bpf_skel.mak locally inside tools/perf/util/Build and tools/perf/bench/Build and bind precise local prerequisites. 4. Safely synchronize the shared bpftool bootstrap and vmlinux.h targets via the conditional prepare: umbrella to avoid parallel sub-make races, while evaluating the actual skeletons completely locally on demand. A later patch will move these targets into bpf_skel.mak. 5. Export CLANG from the global Makefile to ensure accurate tool propagation. 6. Clean up Makefile.perf by stripping the global bpf-skel umbrella target and its SKELETONS list. While removing code from Makefile.perf generally helps build performance, the impact here is minimal. The main motivation for the change is to better encapsulate things in the build and simplify Makefile.perf that has around 50 lines removed. Tested-by: James Clark Assisted-by: Gemini:gemini-3.1-pro-preview Signed-off-by: Ian Rogers --- tools/perf/Makefile.perf | 59 ++----------------- tools/perf/bench/Build | 6 ++ .../bpf_skel/bench_uprobe.bpf.c | 0 tools/perf/bench/uprobe.c | 2 +- tools/perf/bpf_skel.mak | 55 +++++++++++++++++ tools/perf/util/Build | 15 ++++- 6 files changed, 80 insertions(+), 57 deletions(-) rename tools/perf/{util => bench}/bpf_skel/bench_uprobe.bpf.c (100%) create mode 100644 tools/perf/bpf_skel.mak diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf index 24581941e912..d4fc10f36781 100644 --- a/tools/perf/Makefile.perf +++ b/tools/perf/Makefile.perf @@ -274,7 +274,7 @@ ifeq ($(PYLINT),1) PYLINT := $(shell which pylint 2> /dev/null) endif -export srctree OUTPUT RM CC CXX RUSTC LD AR CFLAGS CXXFLAGS RUST_FLAGS V BISON FLEX AWK +export srctree OUTPUT RM CC CXX RUSTC CLANG LD AR CFLAGS CXXFLAGS RUST_FLAGS V BISON FLEX AWK LIBBPF export HOSTCC HOSTLD HOSTAR HOSTCFLAGS SHELLCHECK MYPY PYLINT include $(srctree)/tools/build/Makefile.include @@ -632,8 +632,7 @@ prepare: $(OUTPUT)PERF-VERSION-FILE \ $(LIBAPI) \ $(LIBPERF) \ $(LIBSUBCMD) \ - $(LIBSYMBOL) \ - bpf-skel + $(LIBSYMBOL) ifdef LIBBPF_STATIC prepare: $(LIBBPF) @@ -914,44 +913,13 @@ python-clean: SKEL_OUT := $(abspath $(OUTPUT)util/bpf_skel) SKEL_TMP_OUT := $(abspath $(SKEL_OUT)/.tmp) -SKELETONS := $(SKEL_OUT)/bpf_prog_profiler.skel.h -SKELETONS += $(SKEL_OUT)/bperf_leader.skel.h $(SKEL_OUT)/bperf_follower.skel.h -SKELETONS += $(SKEL_OUT)/bperf_cgroup.skel.h $(SKEL_OUT)/func_latency.skel.h -SKELETONS += $(SKEL_OUT)/off_cpu.skel.h $(SKEL_OUT)/lock_contention.skel.h -SKELETONS += $(SKEL_OUT)/kwork_trace.skel.h $(SKEL_OUT)/sample_filter.skel.h -SKELETONS += $(SKEL_OUT)/kwork_top.skel.h $(SKEL_OUT)/syscall_summary.skel.h -SKELETONS += $(SKEL_OUT)/bench_uprobe.skel.h -SKELETONS += $(SKEL_OUT)/augmented_raw_syscalls.skel.h $(SKEL_TMP_OUT) $(LIBAPI_OUTPUT) $(LIBBPF_OUTPUT) $(LIBPERF_OUTPUT) $(LIBSUBCMD_OUTPUT) $(LIBSYMBOL_OUTPUT): $(Q)$(MKDIR) -p $@ ifeq ($(CONFIG_PERF_BPF_SKEL),y) +prepare: $(BPFTOOL) $(SKEL_OUT)/vmlinux.h BPFTOOL := $(SKEL_TMP_OUT)/bootstrap/bpftool -# Get Clang's default includes on this system, as opposed to those seen by -# '--target=bpf'. This fixes "missing" files on some architectures/distros, -# such as asm/byteorder.h, asm/socket.h, asm/sockios.h, sys/cdefs.h etc. -# -# Use '-idirafter': Don't interfere with include mechanics except where the -# build would have failed anyways. -define get_sys_includes -$(shell $(1) $(2) -v -E - &1 \ - | sed -n '/<...> search starts here:/,/End of search list./{ s| \(/.*\)|-idirafter \1|p }') \ -$(shell $(1) $(2) -dM -E - $@ - -bpf-skel: $(SKELETONS) - -.PRECIOUS: $(SKEL_TMP_OUT)/%.bpf.o - -else # CONFIG_PERF_BPF_SKEL - -bpf-skel: - endif # CONFIG_PERF_BPF_SKEL bpf-skel-clean: - $(call QUIET_CLEAN, bpf-skel) $(RM) -r $(SKEL_TMP_OUT) $(SKELETONS) $(SKEL_OUT)/vmlinux.h + $(call QUIET_CLEAN, bpf-skel) $(RM) -r $(SKEL_TMP_OUT) $(SKEL_OUT)/*.skel.h $(SKEL_OUT)/vmlinux.h $(OUTPUT)bench/bpf_skel/*.skel.h pmu-events-clean: ifeq ($(OUTPUT),) diff --git a/tools/perf/bench/Build b/tools/perf/bench/Build index b558ab98719f..67b76fe20ba6 100644 --- a/tools/perf/bench/Build +++ b/tools/perf/bench/Build @@ -24,3 +24,9 @@ perf-bench-$(CONFIG_X86_64) += mem-memcpy-x86-64-asm.o perf-bench-$(CONFIG_X86_64) += mem-memset-x86-64-asm.o perf-bench-$(CONFIG_NUMA) += numa.o + +ifeq ($(CONFIG_PERF_BPF_SKEL),y) +include $(srctree)/tools/perf/bpf_skel.mak + +$(OUTPUT)bench/uprobe.o: $(SKEL_OUT)/bench_uprobe.skel.h +endif diff --git a/tools/perf/util/bpf_skel/bench_uprobe.bpf.c b/tools/perf/bench/bpf_skel/bench_uprobe.bpf.c similarity index 100% rename from tools/perf/util/bpf_skel/bench_uprobe.bpf.c rename to tools/perf/bench/bpf_skel/bench_uprobe.bpf.c diff --git a/tools/perf/bench/uprobe.c b/tools/perf/bench/uprobe.c index 89697ff788ef..616873bca243 100644 --- a/tools/perf/bench/uprobe.c +++ b/tools/perf/bench/uprobe.c @@ -44,7 +44,7 @@ static const char * const bench_uprobe_usage[] = { }; #ifdef HAVE_BPF_SKEL -#include "bpf_skel/bench_uprobe.skel.h" +#include "bench/bpf_skel/bench_uprobe.skel.h" #define bench_uprobe__attach_uprobe(prog) \ skel->links.prog = bpf_program__attach_uprobe_opts(/*prog=*/skel->progs.prog, \ diff --git a/tools/perf/bpf_skel.mak b/tools/perf/bpf_skel.mak new file mode 100644 index 000000000000..bcb704d5af32 --- /dev/null +++ b/tools/perf/bpf_skel.mak @@ -0,0 +1,55 @@ +# SPDX-License-Identifier: GPL-2.0 +# Shared BPF Skeleton Generator Rules + +include $(srctree)/tools/scripts/Makefile.include + +# Shared foundational tooling always lives in util/bpf_skel +SKEL_TOOL_OUT := $(abspath $(OUTPUT)util/bpf_skel) +SKEL_TOOL_TMP_OUT := $(abspath $(SKEL_TOOL_OUT)/.tmp) + +# Component specific output lives in $(dir)/bpf_skel +SKEL_OUT := $(abspath $(OUTPUT)$(dir)/bpf_skel) +SKEL_TMP_OUT := $(abspath $(SKEL_OUT)/.tmp) + +ifeq ($(CONFIG_PERF_BPF_SKEL),y) +BPFTOOL := $(SKEL_TOOL_TMP_OUT)/bootstrap/bpftool +VMLINUX_H := $(SKEL_TOOL_OUT)/vmlinux.h +bpf_skel_deps := $(BPFTOOL) $(VMLINUX_H) + +define get_sys_includes +$(shell $(1) $(2) -v -E - &1 \ + | sed -n '/<...> search starts here:/,/End of search list./{ s| \(/.*\)|-idirafter \1|p }') \ +$(shell $(1) $(2) -dM -E - $@ + +.PRECIOUS: $(SKEL_TMP_OUT)/%.bpf.o +endif # CONFIG_PERF_BPF_SKEL diff --git a/tools/perf/util/Build b/tools/perf/util/Build index abc9a2926e85..40c258ac99b9 100644 --- a/tools/perf/util/Build +++ b/tools/perf/util/Build @@ -442,4 +442,17 @@ $(OUTPUT)%.pylint_log: % perf-util-y += $(PYLINT_TEST_LOGS) - +ifeq ($(CONFIG_PERF_BPF_SKEL),y) +include $(srctree)/tools/perf/bpf_skel.mak + +$(OUTPUT)util/bpf_ftrace.o: $(SKEL_OUT)/func_latency.skel.h +$(OUTPUT)util/bpf-filter.o: $(SKEL_OUT)/sample_filter.skel.h +$(OUTPUT)util/bpf_kwork_top.o: $(SKEL_OUT)/kwork_top.skel.h +$(OUTPUT)util/bpf_off_cpu.o: $(SKEL_OUT)/off_cpu.skel.h +$(OUTPUT)util/bpf-trace-summary.o: $(SKEL_OUT)/syscall_summary.skel.h +$(OUTPUT)util/bpf_counter_cgroup.o: $(SKEL_OUT)/bperf_cgroup.skel.h +$(OUTPUT)util/bpf_trace_augment.o: $(SKEL_OUT)/augmented_raw_syscalls.skel.h +$(OUTPUT)util/bpf_counter.o: $(SKEL_OUT)/bpf_prog_profiler.skel.h $(SKEL_OUT)/bperf_leader.skel.h $(SKEL_OUT)/bperf_follower.skel.h +$(OUTPUT)util/bpf_lock_contention.o: $(SKEL_OUT)/lock_contention.skel.h +$(OUTPUT)util/bpf_kwork.o: $(SKEL_OUT)/kwork_trace.skel.h +endif -- 2.54.0.563.g4f69b47b94-goog