From: Mykyta Yatsenko Add two subtests: - success: Attach a sleepable BPF program to the faultable sys_enter tracepoint (tp_btf.s/sys_enter). Verify the program is triggered by a syscall. - reject_non_faultable: Attempt to attach a sleepable BPF program to a non-faultable tracepoint (tp_btf.s/sched_switch). Verify that attachment is rejected. Signed-off-by: Mykyta Yatsenko --- .../selftests/bpf/prog_tests/sleepable_raw_tp.c | 56 ++++++++++++++++++++++ .../selftests/bpf/progs/test_sleepable_raw_tp.c | 43 +++++++++++++++++ .../bpf/progs/test_sleepable_raw_tp_fail.c | 16 +++++++ tools/testing/selftests/bpf/verifier/sleepable.c | 5 +- 4 files changed, 117 insertions(+), 3 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/sleepable_raw_tp.c b/tools/testing/selftests/bpf/prog_tests/sleepable_raw_tp.c new file mode 100644 index 0000000000000000000000000000000000000000..9b0ec7cc4cacf6ee3d2e0cdc23f63388c9613384 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/sleepable_raw_tp.c @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ + +#include +#include +#include "test_sleepable_raw_tp.skel.h" +#include "test_sleepable_raw_tp_fail.skel.h" + +static void test_sleepable_raw_tp_success(void) +{ + struct test_sleepable_raw_tp *skel; + int err; + + skel = test_sleepable_raw_tp__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open_load")) + return; + + skel->bss->target_pid = getpid(); + + err = test_sleepable_raw_tp__attach(skel); + if (!ASSERT_OK(err, "skel_attach")) + goto cleanup; + + syscall(__NR_nanosleep, &(struct timespec){ .tv_nsec = 555 }, NULL); + + ASSERT_EQ(skel->bss->triggered, 1, "triggered"); + ASSERT_EQ(skel->bss->err, 0, "err"); + ASSERT_EQ(skel->bss->copied_tv_nsec, 555, "copied_tv_nsec"); + +cleanup: + test_sleepable_raw_tp__destroy(skel); +} + +static void test_sleepable_raw_tp_reject(void) +{ + struct test_sleepable_raw_tp_fail *skel; + int err; + + skel = test_sleepable_raw_tp_fail__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open_load")) + goto cleanup; + + err = test_sleepable_raw_tp_fail__attach(skel); + ASSERT_ERR(err, "skel_attach_should_fail"); + +cleanup: + test_sleepable_raw_tp_fail__destroy(skel); +} + +void test_sleepable_raw_tp(void) +{ + if (test__start_subtest("success")) + test_sleepable_raw_tp_success(); + if (test__start_subtest("reject_non_faultable")) + test_sleepable_raw_tp_reject(); +} diff --git a/tools/testing/selftests/bpf/progs/test_sleepable_raw_tp.c b/tools/testing/selftests/bpf/progs/test_sleepable_raw_tp.c new file mode 100644 index 0000000000000000000000000000000000000000..ebacc766df573c4ab725202b90c0a9b6d32970a1 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_sleepable_raw_tp.c @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ + +#include +#include +#include +#include +#include + +char _license[] SEC("license") = "GPL"; + +int target_pid; +int triggered; +long err; +long copied_tv_nsec; + +SEC("tp_btf.s/sys_enter") +int BPF_PROG(test_sleepable_sys_enter, struct pt_regs *regs, long id) +{ + struct task_struct *task = bpf_get_current_task_btf(); + struct __kernel_timespec *ts; + long tv_nsec; + + if (task->pid != target_pid) + return 0; + + if (id != __NR_nanosleep) + return 0; + + ts = (void *)PT_REGS_PARM1_CORE_SYSCALL(regs); + + /* + * Use bpf_copy_from_user() - a sleepable helper - to read user memory. + * This exercises the sleepable execution path of raw tracepoints. + */ + err = bpf_copy_from_user(&tv_nsec, sizeof(tv_nsec), &ts->tv_nsec); + if (err) + return err; + + copied_tv_nsec = tv_nsec; + triggered = 1; + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/test_sleepable_raw_tp_fail.c b/tools/testing/selftests/bpf/progs/test_sleepable_raw_tp_fail.c new file mode 100644 index 0000000000000000000000000000000000000000..ef5dc3888df6d826f6b1d1adb211b439b71d6322 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_sleepable_raw_tp_fail.c @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ + +#include +#include +#include + +char _license[] SEC("license") = "GPL"; + +/* Sleepable program on a non-faultable tracepoint should fail at attach */ +SEC("tp_btf.s/sched_switch") +int BPF_PROG(test_sleepable_sched_switch, bool preempt, + struct task_struct *prev, struct task_struct *next) +{ + return 0; +} diff --git a/tools/testing/selftests/bpf/verifier/sleepable.c b/tools/testing/selftests/bpf/verifier/sleepable.c index 1f0d2bdc673f6e84e8e44be96c72977da0f73ab7..39522b7cd317080de42233afc180347b49fdff34 100644 --- a/tools/testing/selftests/bpf/verifier/sleepable.c +++ b/tools/testing/selftests/bpf/verifier/sleepable.c @@ -76,7 +76,7 @@ .runs = -1, }, { - "sleepable raw tracepoint reject", + "sleepable raw tracepoint accept", .insns = { BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), @@ -84,8 +84,7 @@ .prog_type = BPF_PROG_TYPE_TRACING, .expected_attach_type = BPF_TRACE_RAW_TP, .kfunc = "sched_switch", - .result = REJECT, - .errstr = "Only fentry/fexit/fmod_ret, lsm, iter, uprobe, and struct_ops programs can be sleepable", + .result = ACCEPT, .flags = BPF_F_SLEEPABLE, .runs = -1, }, -- 2.53.0