If fsession exists, we will use the bit (1 << BPF_TRAMP_M_IS_RETURN) in ctx[-1] to store the "is_return" flag. Introduce the function bpf_fsession_is_return(), which is used to tell if it is fexit currently. Meanwhile, inline it in the verifier. The calling to bpf_session_is_return() will be changed to bpf_fsession_is_return() in verifier for fsession. Signed-off-by: Menglong Dong Co-developed-by: Leon Hwang Signed-off-by: Leon Hwang --- v7: - reuse the kfunc bpf_session_is_return() instead of introduce new kfunc v4: - split out the bpf_fsession_cookie() to another patch v3: - merge the bpf_tracing_is_exit and bpf_fsession_cookie into a single patch v2: - store the session flags after return value, instead of before nr_args - inline the bpf_tracing_is_exit, as Jiri suggested --- include/linux/bpf.h | 5 +++++ kernel/bpf/verifier.c | 15 ++++++++++++++- kernel/trace/bpf_trace.c | 34 +++++++++++++++++++++++----------- 3 files changed, 42 insertions(+), 12 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 565ca7052518..d996dd390681 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1229,6 +1229,9 @@ enum { #endif }; +#define BPF_TRAMP_M_NR_ARGS 0 +#define BPF_TRAMP_M_IS_RETURN 8 + struct bpf_tramp_links { struct bpf_tramp_link *links[BPF_MAX_TRAMP_LINKS]; int nr_links; @@ -3945,4 +3948,6 @@ static inline int bpf_map_check_op_flags(struct bpf_map *map, u64 flags, u64 all return 0; } +bool bpf_fsession_is_return(void *ctx); + #endif /* _LINUX_BPF_H */ diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index bfff3f84fd91..d3709edd0e51 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -12374,6 +12374,7 @@ enum special_kfunc_type { KF_bpf_arena_alloc_pages, KF_bpf_arena_free_pages, KF_bpf_arena_reserve_pages, + KF_bpf_session_is_return, }; BTF_ID_LIST(special_kfunc_list) @@ -12451,6 +12452,7 @@ BTF_ID(func, bpf_task_work_schedule_resume_impl) BTF_ID(func, bpf_arena_alloc_pages) BTF_ID(func, bpf_arena_free_pages) BTF_ID(func, bpf_arena_reserve_pages) +BTF_ID(func, bpf_session_is_return) static bool is_task_work_add_kfunc(u32 func_id) { @@ -12505,7 +12507,8 @@ get_kfunc_ptr_arg_type(struct bpf_verifier_env *env, struct bpf_reg_state *reg = ®s[regno]; bool arg_mem_size = false; - if (meta->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx]) + if (meta->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx] || + meta->func_id == special_kfunc_list[KF_bpf_session_is_return]) return KF_ARG_PTR_TO_CTX; if (argno + 1 < nargs && @@ -22440,6 +22443,9 @@ static int specialize_kfunc(struct bpf_verifier_env *env, struct bpf_kfunc_desc } else if (func_id == special_kfunc_list[KF_bpf_arena_free_pages]) { if (env->insn_aux_data[insn_idx].non_sleepable) addr = (unsigned long)bpf_arena_free_pages_non_sleepable; + } else if (func_id == special_kfunc_list[KF_bpf_session_is_return]) { + if (prog->expected_attach_type == BPF_TRACE_FSESSION) + addr = (unsigned long)bpf_fsession_is_return; } desc->addr = addr; return 0; @@ -22558,6 +22564,13 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, desc->func_id == special_kfunc_list[KF_bpf_rdonly_cast]) { insn_buf[0] = BPF_MOV64_REG(BPF_REG_0, BPF_REG_1); *cnt = 1; + } else if (desc->func_id == special_kfunc_list[KF_bpf_session_is_return] && + env->prog->expected_attach_type == BPF_TRACE_FSESSION) { + /* Load nr_args from ctx - 8 */ + insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8); + insn_buf[1] = BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, BPF_TRAMP_M_IS_RETURN); + insn_buf[2] = BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1); + *cnt = 3; } if (env->insn_aux_data[insn_idx].arg_prog) { diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 736b32cf2195..9d3bf3bbe8f6 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -3314,6 +3314,12 @@ static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx) } #endif /* CONFIG_UPROBES */ +bool bpf_fsession_is_return(void *ctx) +{ + /* This helper call is inlined by verifier. */ + return !!(((u64 *)ctx)[-1] & (1 << BPF_TRAMP_M_IS_RETURN)); +} + __bpf_kfunc_start_defs(); __bpf_kfunc bool bpf_session_is_return(void *ctx) @@ -3334,34 +3340,40 @@ __bpf_kfunc __u64 *bpf_session_cookie(void *ctx) __bpf_kfunc_end_defs(); -BTF_KFUNCS_START(kprobe_multi_kfunc_set_ids) +BTF_KFUNCS_START(session_kfunc_set_ids) BTF_ID_FLAGS(func, bpf_session_is_return) BTF_ID_FLAGS(func, bpf_session_cookie) -BTF_KFUNCS_END(kprobe_multi_kfunc_set_ids) +BTF_KFUNCS_END(session_kfunc_set_ids) -static int bpf_kprobe_multi_filter(const struct bpf_prog *prog, u32 kfunc_id) +static int bpf_session_filter(const struct bpf_prog *prog, u32 kfunc_id) { - if (!btf_id_set8_contains(&kprobe_multi_kfunc_set_ids, kfunc_id)) + if (!btf_id_set8_contains(&session_kfunc_set_ids, kfunc_id)) return 0; - if (!is_kprobe_session(prog) && !is_uprobe_session(prog)) + if (!is_kprobe_session(prog) && !is_uprobe_session(prog) && + prog->expected_attach_type != BPF_TRACE_FSESSION) return -EACCES; return 0; } -static const struct btf_kfunc_id_set bpf_kprobe_multi_kfunc_set = { +static const struct btf_kfunc_id_set bpf_session_kfunc_set = { .owner = THIS_MODULE, - .set = &kprobe_multi_kfunc_set_ids, - .filter = bpf_kprobe_multi_filter, + .set = &session_kfunc_set_ids, + .filter = bpf_session_filter, }; -static int __init bpf_kprobe_multi_kfuncs_init(void) +static int __init bpf_trace_kfuncs_init(void) { - return register_btf_kfunc_id_set(BPF_PROG_TYPE_KPROBE, &bpf_kprobe_multi_kfunc_set); + int err = 0; + + err = err ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_KPROBE, &bpf_session_kfunc_set); + err = err ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_session_kfunc_set); + + return err; } -late_initcall(bpf_kprobe_multi_kfuncs_init); +late_initcall(bpf_trace_kfuncs_init); typedef int (*copy_fn_t)(void *dst, const void *src, u32 size, struct task_struct *tsk); -- 2.52.0