bpf_program__attach_kprobe_opts() documents single-kprobe attach through func_name, with an optional offset. For the PMU-based non-legacy path, func_name = NULL with an absolute address in offset already works as well, but that is not described in the API. This commit clarifies this existing non-legacy behavior. For PMU-based attach, callers can use func_name = NULL with an absolute address in offset as the raw-address form. For legacy tracefs/debugfs kprobes, reject this form explicitly. Signed-off-by: Hoyeon Lee --- tools/lib/bpf/libbpf.c | 21 ++++++++++++--------- tools/lib/bpf/libbpf.h | 3 ++- 2 files changed, 14 insertions(+), 10 deletions(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 9ea41f40dc82..9083f542a3b0 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -11523,7 +11523,8 @@ static int determine_uprobe_retprobe_bit(void) #define PERF_UPROBE_REF_CTR_OFFSET_SHIFT 32 static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name, - uint64_t offset, int pid, size_t ref_ctr_off) + uint64_t offset_or_addr, int pid, + size_t ref_ctr_off) { const size_t attr_sz = sizeof(struct perf_event_attr); struct perf_event_attr attr; @@ -11558,7 +11559,7 @@ static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name, attr.type = type; attr.config |= (__u64)ref_ctr_off << PERF_UPROBE_REF_CTR_OFFSET_SHIFT; attr.config1 = ptr_to_u64(name); /* kprobe_func or uprobe_path */ - attr.config2 = offset; /* kprobe_addr or probe_offset */ + attr.config2 = offset_or_addr; /* kprobe_addr or probe_offset */ /* pid filter is meaningful only for uprobes */ pfd = syscall(__NR_perf_event_open, &attr, @@ -11816,6 +11817,8 @@ bpf_program__attach_kprobe_opts(const struct bpf_program *prog, default: return libbpf_err_ptr(-EINVAL); } + if (!func_name && legacy) + return libbpf_err_ptr(-ENOTSUP); if (!legacy) { pfd = perf_event_open_probe(false /* uprobe */, retprobe, @@ -11835,21 +11838,21 @@ bpf_program__attach_kprobe_opts(const struct bpf_program *prog, offset, -1 /* pid */); } if (pfd < 0) { - err = -errno; - pr_warn("prog '%s': failed to create %s '%s+0x%zx' perf event: %s\n", + err = pfd; + pr_warn("prog '%s': failed to create %s '%s%s0x%zx' perf event: %s\n", prog->name, retprobe ? "kretprobe" : "kprobe", - func_name, offset, - errstr(err)); + func_name ?: "", func_name ? "+" : "", + offset, errstr(err)); goto err_out; } link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts); err = libbpf_get_error(link); if (err) { close(pfd); - pr_warn("prog '%s': failed to attach to %s '%s+0x%zx': %s\n", + pr_warn("prog '%s': failed to attach to %s '%s%s0x%zx': %s\n", prog->name, retprobe ? "kretprobe" : "kprobe", - func_name, offset, - errstr(err)); + func_name ?: "", func_name ? "+" : "", + offset, errstr(err)); goto err_clean_legacy; } if (legacy) { diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index 0be34852350f..f75f3ab0f20c 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -557,7 +557,7 @@ struct bpf_kprobe_opts { size_t sz; /* custom user-provided value fetchable through bpf_get_attach_cookie() */ __u64 bpf_cookie; - /* function's offset to install kprobe to */ + /* function offset, or raw address if func_name == NULL (non-legacy) */ size_t offset; /* kprobe is return probe */ bool retprobe; @@ -565,6 +565,7 @@ struct bpf_kprobe_opts { enum probe_attach_mode attach_mode; size_t :0; }; + #define bpf_kprobe_opts__last_field attach_mode LIBBPF_API struct bpf_link * -- 2.52.0 Currently, attach_probe covers manual single-kprobe attaches by func_name, but not the raw-address form that the PMU-based single-kprobe path can accept. This commit adds PERF and LINK raw-address subtests by resolving SYS_NANOSLEEP_KPROBE_NAME through kallsyms, passing the absolute address in bpf_kprobe_opts.offset with func_name = NULL, and verifying that kprobe and kretprobe are still triggered. It also verifies that LEGACY rejects the same form. Signed-off-by: Hoyeon Lee --- .../selftests/bpf/prog_tests/attach_probe.c | 82 +++++++++++++++++++ 1 file changed, 82 insertions(+) diff --git a/tools/testing/selftests/bpf/prog_tests/attach_probe.c b/tools/testing/selftests/bpf/prog_tests/attach_probe.c index 9e77e5da7097..817c4794d54e 100644 --- a/tools/testing/selftests/bpf/prog_tests/attach_probe.c +++ b/tools/testing/selftests/bpf/prog_tests/attach_probe.c @@ -123,6 +123,82 @@ static void test_attach_probe_manual(enum probe_attach_mode attach_mode) test_attach_probe_manual__destroy(skel); } +/* manual attach address-based kprobe/kretprobe testings */ +static void test_attach_kprobe_by_addr(enum probe_attach_mode attach_mode) +{ + DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts); + struct test_attach_probe_manual *skel; + unsigned long func_addr; + + if (!ASSERT_OK(load_kallsyms(), "load_kallsyms")) + return; + + func_addr = ksym_get_addr(SYS_NANOSLEEP_KPROBE_NAME); + if (!ASSERT_NEQ(func_addr, 0UL, "func_addr")) + return; + + skel = test_attach_probe_manual__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_kprobe_manual_open_and_load")) + return; + + kprobe_opts.attach_mode = attach_mode; + kprobe_opts.retprobe = false; + kprobe_opts.offset = func_addr; + skel->links.handle_kprobe = + bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe, + NULL, &kprobe_opts); + if (!ASSERT_OK_PTR(skel->links.handle_kprobe, "attach_kprobe_by_addr")) + goto cleanup; + + kprobe_opts.retprobe = true; + skel->links.handle_kretprobe = + bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe, + NULL, &kprobe_opts); + if (!ASSERT_OK_PTR(skel->links.handle_kretprobe, + "attach_kretprobe_by_addr")) + goto cleanup; + + /* trigger & validate kprobe && kretprobe */ + usleep(1); + + ASSERT_EQ(skel->bss->kprobe_res, 1, "check_kprobe_res"); + ASSERT_EQ(skel->bss->kretprobe_res, 2, "check_kretprobe_res"); + +cleanup: + test_attach_probe_manual__destroy(skel); +} + +/* reject legacy address-based kprobe attach */ +static void test_attach_kprobe_legacy_by_addr_reject(void) +{ + DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts); + struct test_attach_probe_manual *skel; + unsigned long func_addr; + + if (!ASSERT_OK(load_kallsyms(), "load_kallsyms")) + return; + + func_addr = ksym_get_addr(SYS_NANOSLEEP_KPROBE_NAME); + if (!ASSERT_NEQ(func_addr, 0UL, "func_addr")) + return; + + skel = test_attach_probe_manual__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_kprobe_manual_open_and_load")) + return; + + kprobe_opts.attach_mode = PROBE_ATTACH_MODE_LEGACY; + kprobe_opts.offset = func_addr; + skel->links.handle_kprobe = + bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe, + NULL, &kprobe_opts); + if (ASSERT_ERR_PTR(skel->links.handle_kprobe, + "attach_kprobe_legacy_by_addr")) + ASSERT_EQ(libbpf_get_error(skel->links.handle_kprobe), -ENOTSUP, + "attach_kprobe_legacy_by_addr_err"); + + test_attach_probe_manual__destroy(skel); +} + /* attach uprobe/uretprobe long event name testings */ static void test_attach_uprobe_long_event_name(void) { @@ -416,6 +492,12 @@ void test_attach_probe(void) test_attach_probe_manual(PROBE_ATTACH_MODE_PERF); if (test__start_subtest("manual-link")) test_attach_probe_manual(PROBE_ATTACH_MODE_LINK); + if (test__start_subtest("kprobe-perf-by-addr")) + test_attach_kprobe_by_addr(PROBE_ATTACH_MODE_PERF); + if (test__start_subtest("kprobe-link-by-addr")) + test_attach_kprobe_by_addr(PROBE_ATTACH_MODE_LINK); + if (test__start_subtest("kprobe-legacy-by-addr-reject")) + test_attach_kprobe_legacy_by_addr_reject(); if (test__start_subtest("auto")) test_attach_probe_auto(skel); -- 2.52.0