btf_kfunc_id_set_contains() is called by fetch_kfunc_meta() in the BPF verifier to get the kfunc flags stored in the .BTF_ids ELF section. If it returns NULL instead of a valid pointer, it's interpreted as an illegal kfunc usage failing the verification. There are two potential reasons for btf_kfunc_id_set_contains() to return NULL: 1. Provided kfunc BTF id is not present in relevant kfunc id sets. 2. The kfunc is not allowed, as determined by the program type specific filter [1]. The filter functions accept a pointer to `struct bpf_prog`, so they might implicitly depend on earlier stages of verification, when bpf_prog members are set. For example, bpf_qdisc_kfunc_filter() in linux/net/sched/bpf_qdisc.c inspects prog->aux->st_ops [2], which is initialized in: check_attach_btf_id() -> check_struct_ops_btf_id() So far this hasn't been an issue, because fetch_kfunc_meta() is the only caller of btf_kfunc_id_set_contains(). However in subsequent patches of this series it is necessary to inspect kfunc flags earlier in BPF verifier, in the add_kfunc_call(). To resolve this, refactor btf_kfunc_id_set_contains() into two interface functions: * btf_kfunc_flags() that simply returns pointer to kfunc_flags without applying the filters * btf_kfunc_is_allowed() that both checks for kfunc_flags existence (which is a requirement for a kfunc to be allowed) and applies the prog filters See [3] for the previous version of this patch. [1] https://lore.kernel.org/all/20230519225157.760788-7-aditi.ghag@isovalent.com/ [2] https://lore.kernel.org/all/20250409214606.2000194-4-ameryhung@gmail.com/ [3] https://lore.kernel.org/bpf/20251029190113.3323406-3-ihor.solodrai@linux.dev/ Signed-off-by: Ihor Solodrai --- include/linux/btf.h | 4 +-- kernel/bpf/btf.c | 70 ++++++++++++++++++++++++++++++++----------- kernel/bpf/verifier.c | 6 ++-- 3 files changed, 58 insertions(+), 22 deletions(-) diff --git a/include/linux/btf.h b/include/linux/btf.h index 691f09784933..bd261015c4bc 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -574,8 +574,8 @@ const char *btf_name_by_offset(const struct btf *btf, u32 offset); const char *btf_str_by_offset(const struct btf *btf, u32 offset); struct btf *btf_parse_vmlinux(void); struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog); -u32 *btf_kfunc_id_set_contains(const struct btf *btf, u32 kfunc_btf_id, - const struct bpf_prog *prog); +u32 *btf_kfunc_flags(const struct btf *btf, u32 kfunc_btf_id, const struct bpf_prog *prog); +bool btf_kfunc_is_allowed(const struct btf *btf, u32 kfunc_btf_id, const struct bpf_prog *prog); u32 *btf_kfunc_is_modify_return(const struct btf *btf, u32 kfunc_btf_id, const struct bpf_prog *prog); int register_btf_kfunc_id_set(enum bpf_prog_type prog_type, diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 539c9fdea41d..32974c11664d 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -8640,24 +8640,17 @@ static int btf_populate_kfunc_set(struct btf *btf, enum btf_kfunc_hook hook, return ret; } -static u32 *__btf_kfunc_id_set_contains(const struct btf *btf, - enum btf_kfunc_hook hook, - u32 kfunc_btf_id, - const struct bpf_prog *prog) +static u32 *btf_kfunc_id_set_contains(const struct btf *btf, + enum btf_kfunc_hook hook, + u32 kfunc_btf_id) { - struct btf_kfunc_hook_filter *hook_filter; struct btf_id_set8 *set; - u32 *id, i; + u32 *id; if (hook >= BTF_KFUNC_HOOK_MAX) return NULL; if (!btf->kfunc_set_tab) return NULL; - hook_filter = &btf->kfunc_set_tab->hook_filters[hook]; - for (i = 0; i < hook_filter->nr_filters; i++) { - if (hook_filter->filters[i](prog, kfunc_btf_id)) - return NULL; - } set = btf->kfunc_set_tab->sets[hook]; if (!set) return NULL; @@ -8668,6 +8661,28 @@ static u32 *__btf_kfunc_id_set_contains(const struct btf *btf, return id + 1; } +static bool __btf_kfunc_is_allowed(const struct btf *btf, + enum btf_kfunc_hook hook, + u32 kfunc_btf_id, + const struct bpf_prog *prog) +{ + struct btf_kfunc_hook_filter *hook_filter; + int i; + + if (hook >= BTF_KFUNC_HOOK_MAX) + return false; + if (!btf->kfunc_set_tab) + return false; + + hook_filter = &btf->kfunc_set_tab->hook_filters[hook]; + for (i = 0; i < hook_filter->nr_filters; i++) { + if (hook_filter->filters[i](prog, kfunc_btf_id)) + return false; + } + + return true; +} + static int bpf_prog_type_to_kfunc_hook(enum bpf_prog_type prog_type) { switch (prog_type) { @@ -8715,6 +8730,26 @@ static int bpf_prog_type_to_kfunc_hook(enum bpf_prog_type prog_type) } } +bool btf_kfunc_is_allowed(const struct btf *btf, + u32 kfunc_btf_id, + const struct bpf_prog *prog) +{ + enum bpf_prog_type prog_type = resolve_prog_type(prog); + enum btf_kfunc_hook hook; + u32 *kfunc_flags; + + kfunc_flags = btf_kfunc_id_set_contains(btf, BTF_KFUNC_HOOK_COMMON, kfunc_btf_id); + if (kfunc_flags && __btf_kfunc_is_allowed(btf, BTF_KFUNC_HOOK_COMMON, kfunc_btf_id, prog)) + return true; + + hook = bpf_prog_type_to_kfunc_hook(prog_type); + kfunc_flags = btf_kfunc_id_set_contains(btf, hook, kfunc_btf_id); + if (kfunc_flags && __btf_kfunc_is_allowed(btf, hook, kfunc_btf_id, prog)) + return true; + + return false; +} + /* Caution: * Reference to the module (obtained using btf_try_get_module) corresponding to * the struct btf *MUST* be held when calling this function from verifier @@ -8722,26 +8757,27 @@ static int bpf_prog_type_to_kfunc_hook(enum bpf_prog_type prog_type) * keeping the reference for the duration of the call provides the necessary * protection for looking up a well-formed btf->kfunc_set_tab. */ -u32 *btf_kfunc_id_set_contains(const struct btf *btf, - u32 kfunc_btf_id, - const struct bpf_prog *prog) +u32 *btf_kfunc_flags(const struct btf *btf, u32 kfunc_btf_id, const struct bpf_prog *prog) { enum bpf_prog_type prog_type = resolve_prog_type(prog); enum btf_kfunc_hook hook; u32 *kfunc_flags; - kfunc_flags = __btf_kfunc_id_set_contains(btf, BTF_KFUNC_HOOK_COMMON, kfunc_btf_id, prog); + kfunc_flags = btf_kfunc_id_set_contains(btf, BTF_KFUNC_HOOK_COMMON, kfunc_btf_id); if (kfunc_flags) return kfunc_flags; hook = bpf_prog_type_to_kfunc_hook(prog_type); - return __btf_kfunc_id_set_contains(btf, hook, kfunc_btf_id, prog); + return btf_kfunc_id_set_contains(btf, hook, kfunc_btf_id); } u32 *btf_kfunc_is_modify_return(const struct btf *btf, u32 kfunc_btf_id, const struct bpf_prog *prog) { - return __btf_kfunc_id_set_contains(btf, BTF_KFUNC_HOOK_FMODRET, kfunc_btf_id, prog); + if (!__btf_kfunc_is_allowed(btf, BTF_KFUNC_HOOK_FMODRET, kfunc_btf_id, prog)) + return NULL; + + return btf_kfunc_id_set_contains(btf, BTF_KFUNC_HOOK_FMODRET, kfunc_btf_id); } static int __register_btf_kfunc_id_set(enum btf_kfunc_hook hook, diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 53635ea2e41b..bee49c9cde21 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -13722,10 +13722,10 @@ static int fetch_kfunc_meta(struct bpf_verifier_env *env, *kfunc_name = func_name; func_proto = btf_type_by_id(desc_btf, func->type); - kfunc_flags = btf_kfunc_id_set_contains(desc_btf, func_id, env->prog); - if (!kfunc_flags) { + if (!btf_kfunc_is_allowed(desc_btf, func_id, env->prog)) return -EACCES; - } + + kfunc_flags = btf_kfunc_flags(desc_btf, func_id, env->prog); memset(meta, 0, sizeof(*meta)); meta->btf = desc_btf; -- 2.52.0 There is code duplication between add_kfunc_call() and fetch_kfunc_meta() collecting information about a kfunc from BTF. Introduce struct bpf_kfunc_meta to hold common kfunc BTF data and implement fetch_kfunc_meta() to fill it in, instead of struct bpf_kfunc_call_arg_meta directly. Then use these in add_kfunc_call() and (new) fetch_kfunc_arg_meta() functions, and fixup previous usages of fetch_kfunc_meta() to fetch_kfunc_arg_meta(). Besides the code dedup, this change enables add_kfunc_call() to access kfunc->flags. Signed-off-by: Ihor Solodrai --- kernel/bpf/verifier.c | 156 ++++++++++++++++++++++++------------------ 1 file changed, 91 insertions(+), 65 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index bee49c9cde21..9618e4c37fce 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -294,6 +294,14 @@ struct bpf_call_arg_meta { s64 const_map_key; }; +struct bpf_kfunc_meta { + struct btf *btf; + const struct btf_type *proto; + const char *name; + const u32 *flags; + s32 id; +}; + struct bpf_kfunc_call_arg_meta { /* In parameters */ struct btf *btf; @@ -3263,16 +3271,68 @@ static struct btf *find_kfunc_desc_btf(struct bpf_verifier_env *env, s16 offset) return btf_vmlinux ?: ERR_PTR(-ENOENT); } -static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, s16 offset) +static int fetch_kfunc_meta(struct bpf_verifier_env *env, + s32 func_id, + s16 offset, + struct bpf_kfunc_meta *kfunc) { const struct btf_type *func, *func_proto; + const char *func_name; + u32 *kfunc_flags; + struct btf *btf; + + if (func_id <= 0) { + verbose(env, "invalid kernel function btf_id %d\n", func_id); + return -EINVAL; + } + + btf = find_kfunc_desc_btf(env, offset); + if (IS_ERR(btf)) { + verbose(env, "failed to find BTF for kernel function\n"); + return PTR_ERR(btf); + } + + /* + * Note that kfunc_flags may be NULL at this point, which + * means that we couldn't find func_id in any relevant + * kfunc_id_set. This most likely indicates an invalid kfunc + * call. However we don't fail with an error here, + * and let the caller decide what to do with NULL kfunc->flags. + */ + kfunc_flags = btf_kfunc_flags(btf, func_id, env->prog); + + func = btf_type_by_id(btf, func_id); + if (!func || !btf_type_is_func(func)) { + verbose(env, "kernel btf_id %d is not a function\n", func_id); + return -EINVAL; + } + + func_name = btf_name_by_offset(btf, func->name_off); + func_proto = btf_type_by_id(btf, func->type); + if (!func_proto || !btf_type_is_func_proto(func_proto)) { + verbose(env, "kernel function btf_id %d does not have a valid func_proto\n", + func_id); + return -EINVAL; + } + + memset(kfunc, 0, sizeof(*kfunc)); + kfunc->btf = btf; + kfunc->id = func_id; + kfunc->name = func_name; + kfunc->proto = func_proto; + kfunc->flags = kfunc_flags; + + return 0; +} + +static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, s16 offset) +{ struct bpf_kfunc_btf_tab *btf_tab; struct btf_func_model func_model; struct bpf_kfunc_desc_tab *tab; struct bpf_prog_aux *prog_aux; + struct bpf_kfunc_meta kfunc; struct bpf_kfunc_desc *desc; - const char *func_name; - struct btf *desc_btf; unsigned long addr; int err; @@ -3322,12 +3382,6 @@ static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, s16 offset) prog_aux->kfunc_btf_tab = btf_tab; } - desc_btf = find_kfunc_desc_btf(env, offset); - if (IS_ERR(desc_btf)) { - verbose(env, "failed to find BTF for kernel function\n"); - return PTR_ERR(desc_btf); - } - if (find_kfunc_desc(env->prog, func_id, offset)) return 0; @@ -3336,24 +3390,13 @@ static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, s16 offset) return -E2BIG; } - func = btf_type_by_id(desc_btf, func_id); - if (!func || !btf_type_is_func(func)) { - verbose(env, "kernel btf_id %u is not a function\n", - func_id); - return -EINVAL; - } - func_proto = btf_type_by_id(desc_btf, func->type); - if (!func_proto || !btf_type_is_func_proto(func_proto)) { - verbose(env, "kernel function btf_id %u does not have a valid func_proto\n", - func_id); - return -EINVAL; - } + err = fetch_kfunc_meta(env, func_id, offset, &kfunc); + if (err) + return err; - func_name = btf_name_by_offset(desc_btf, func->name_off); - addr = kallsyms_lookup_name(func_name); + addr = kallsyms_lookup_name(kfunc.name); if (!addr) { - verbose(env, "cannot find address for kernel function %s\n", - func_name); + verbose(env, "cannot find address for kernel function %s\n", kfunc.name); return -EINVAL; } @@ -3363,9 +3406,7 @@ static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, s16 offset) return err; } - err = btf_distill_func_proto(&env->log, desc_btf, - func_proto, func_name, - &func_model); + err = btf_distill_func_proto(&env->log, kfunc.btf, kfunc.proto, kfunc.name, &func_model); if (err) return err; @@ -13695,44 +13736,28 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_ return 0; } -static int fetch_kfunc_meta(struct bpf_verifier_env *env, - struct bpf_insn *insn, - struct bpf_kfunc_call_arg_meta *meta, - const char **kfunc_name) +static int fetch_kfunc_arg_meta(struct bpf_verifier_env *env, + s32 func_id, + s16 offset, + struct bpf_kfunc_call_arg_meta *meta) { - const struct btf_type *func, *func_proto; - u32 func_id, *kfunc_flags; - const char *func_name; - struct btf *desc_btf; - - if (kfunc_name) - *kfunc_name = NULL; - - if (!insn->imm) - return -EINVAL; + struct bpf_kfunc_meta kfunc; + int err; - desc_btf = find_kfunc_desc_btf(env, insn->off); - if (IS_ERR(desc_btf)) - return PTR_ERR(desc_btf); + err = fetch_kfunc_meta(env, func_id, offset, &kfunc); + if (err) + return err; - func_id = insn->imm; - func = btf_type_by_id(desc_btf, func_id); - func_name = btf_name_by_offset(desc_btf, func->name_off); - if (kfunc_name) - *kfunc_name = func_name; - func_proto = btf_type_by_id(desc_btf, func->type); + memset(meta, 0, sizeof(*meta)); + meta->btf = kfunc.btf; + meta->func_id = kfunc.id; + meta->func_proto = kfunc.proto; + meta->func_name = kfunc.name; - if (!btf_kfunc_is_allowed(desc_btf, func_id, env->prog)) + if (!kfunc.flags || !btf_kfunc_is_allowed(kfunc.btf, kfunc.id, env->prog)) return -EACCES; - kfunc_flags = btf_kfunc_flags(desc_btf, func_id, env->prog); - - memset(meta, 0, sizeof(*meta)); - meta->btf = desc_btf; - meta->func_id = func_id; - meta->kfunc_flags = *kfunc_flags; - meta->func_proto = func_proto; - meta->func_name = func_name; + meta->kfunc_flags = *kfunc.flags; return 0; } @@ -13937,12 +13962,13 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, if (!insn->imm) return 0; - err = fetch_kfunc_meta(env, insn, &meta, &func_name); - if (err == -EACCES && func_name) - verbose(env, "calling kernel function %s is not allowed\n", func_name); + err = fetch_kfunc_arg_meta(env, insn->imm, insn->off, &meta); + if (err == -EACCES && meta.func_name) + verbose(env, "calling kernel function %s is not allowed\n", meta.func_name); if (err) return err; desc_btf = meta.btf; + func_name = meta.func_name; insn_aux = &env->insn_aux_data[insn_idx]; insn_aux->is_iter_next = is_iter_next_kfunc(&meta); @@ -17729,7 +17755,7 @@ static bool get_call_summary(struct bpf_verifier_env *env, struct bpf_insn *call if (bpf_pseudo_kfunc_call(call)) { int err; - err = fetch_kfunc_meta(env, call, &meta, NULL); + err = fetch_kfunc_arg_meta(env, call->imm, call->off, &meta); if (err < 0) /* error would be reported later */ return false; @@ -18237,7 +18263,7 @@ static int visit_insn(int t, struct bpf_verifier_env *env) } else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) { struct bpf_kfunc_call_arg_meta meta; - ret = fetch_kfunc_meta(env, insn, &meta, NULL); + ret = fetch_kfunc_arg_meta(env, insn->imm, insn->off, &meta); if (ret == 0 && is_iter_next_kfunc(&meta)) { mark_prune_point(env, t); /* Checking and saving state checkpoints at iter_next() call -- 2.52.0 A kernel function bpf_foo marked with KF_IMPLICIT_ARGS flag is expected to have two associated types in BTF: * `bpf_foo` with a function prototype that omits implicit arguments * `bpf_foo_impl` with a function prototype that matches the kernel declaration of `bpf_foo`, but doesn't have a ksym associated with its name In order to support kfuncs with implicit arguments, the verifier has to know how to resolve a call of `bpf_foo` to the correct BTF function prototype and address. To implement this, in add_kfunc_call() kfunc flags are checked for KF_IMPLICIT_ARGS. For such kfuncs a BTF func prototype is adjusted to the one found for `bpf_foo_impl` (func_name + "_impl" suffix, by convention) function in BTF. This effectively changes the signature of the `bpf_foo` kfunc in the context of verification: from one without implicit args to the one with full argument list. Whether a kfunc argument is implicit or not is determined by is_kfunc_arg_implicit(). The values of implicit arguments by design are provided by the verifier, and so they can only be of particular types. In this patch the only allowed implicit arg type is a pointer to struct bpf_prog_aux. The __prog args (usually void *) are also considered implicit for backwards compatibility. In order to enable the verifier to correctly set an implicit bpf_prog_aux arg value at runtime, is_kfunc_arg_prog() is extended to check for the arg type. At a point when prog arg is determined in check_kfunc_args() the kfunc with implicit args already has a prototype with full argument list, so the existing value patch mechanism just works. If a new kfunc with KF_IMPLICIT_ARG is declared for an existing kfunc that uses a __prog argument (a legacy case), the prototype substitution works in exactly the same way, assuming the kfunc follows the _impl naming convention. The difference is only in how _impl prototype is added to the BTF, which is not the verifier's concern. See a subsequent resolve_btfids patch for details. In check_kfunc_call() reset the subreg_def of registers holding implicit arguments to correctly track zero extensions. Signed-off-by: Ihor Solodrai --- include/linux/btf.h | 1 + kernel/bpf/verifier.c | 70 +++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 69 insertions(+), 2 deletions(-) diff --git a/include/linux/btf.h b/include/linux/btf.h index bd261015c4bc..f64cc40ab96f 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -78,6 +78,7 @@ #define KF_ARENA_RET (1 << 13) /* kfunc returns an arena pointer */ #define KF_ARENA_ARG1 (1 << 14) /* kfunc takes an arena pointer as its first argument */ #define KF_ARENA_ARG2 (1 << 15) /* kfunc takes an arena pointer as its second argument */ +#define KF_IMPLICIT_ARGS (1 << 16) /* kfunc has implicit arguments supplied by the verifier */ /* * Tag marking a kernel function as a kfunc. This is meant to minimize the diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 9618e4c37fce..5541ab674e30 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -3271,6 +3271,38 @@ static struct btf *find_kfunc_desc_btf(struct bpf_verifier_env *env, s16 offset) return btf_vmlinux ?: ERR_PTR(-ENOENT); } +#define KF_IMPL_SUFFIX "_impl" + +static const struct btf_type *find_kfunc_impl_proto(struct bpf_verifier_env *env, + struct btf *btf, + const char *func_name) +{ + char impl_name[KSYM_SYMBOL_LEN]; + const struct btf_type *func; + s32 impl_id; + int len; + + len = snprintf(impl_name, sizeof(impl_name), "%s%s", func_name, KF_IMPL_SUFFIX); + if (len < 0 || len >= sizeof(impl_name)) { + verbose(env, "function name %s%s is too long\n", func_name, KF_IMPL_SUFFIX); + return NULL; + } + + impl_id = btf_find_by_name_kind(btf, impl_name, BTF_KIND_FUNC); + if (impl_id <= 0) { + verbose(env, "cannot find function %s in BTF\n", impl_name); + return NULL; + } + + func = btf_type_by_id(btf, impl_id); + if (!func || !btf_type_is_func(func)) { + verbose(env, "%s (btf_id %d) is not a function\n", impl_name, impl_id); + return NULL; + } + + return btf_type_by_id(btf, func->type); +} + static int fetch_kfunc_meta(struct bpf_verifier_env *env, s32 func_id, s16 offset, @@ -3308,7 +3340,16 @@ static int fetch_kfunc_meta(struct bpf_verifier_env *env, } func_name = btf_name_by_offset(btf, func->name_off); - func_proto = btf_type_by_id(btf, func->type); + + /* + * An actual prototype of a kfunc with KF_IMPLICIT_ARGS flag + * can be found through the counterpart _impl kfunc. + */ + if (unlikely(kfunc_flags && KF_IMPLICIT_ARGS & *kfunc_flags)) + func_proto = find_kfunc_impl_proto(env, btf, func_name); + else + func_proto = btf_type_by_id(btf, func->type); + if (!func_proto || !btf_type_is_func_proto(func_proto)) { verbose(env, "kernel function btf_id %d does not have a valid func_proto\n", func_id); @@ -12173,9 +12214,16 @@ static bool is_kfunc_arg_irq_flag(const struct btf *btf, const struct btf_param return btf_param_match_suffix(btf, arg, "__irq_flag"); } +static bool is_kfunc_arg_prog_aux(const struct btf *btf, const struct btf_param *arg); + static bool is_kfunc_arg_prog(const struct btf *btf, const struct btf_param *arg) { - return btf_param_match_suffix(btf, arg, "__prog"); + return btf_param_match_suffix(btf, arg, "__prog") || is_kfunc_arg_prog_aux(btf, arg); +} + +static bool is_kfunc_arg_implicit(const struct btf *btf, const struct btf_param *arg) +{ + return is_kfunc_arg_prog(btf, arg); } static bool is_kfunc_arg_scalar_with_name(const struct btf *btf, @@ -12206,6 +12254,7 @@ enum { KF_ARG_WORKQUEUE_ID, KF_ARG_RES_SPIN_LOCK_ID, KF_ARG_TASK_WORK_ID, + KF_ARG_PROG_AUX_ID }; BTF_ID_LIST(kf_arg_btf_ids) @@ -12217,6 +12266,7 @@ BTF_ID(struct, bpf_rb_node) BTF_ID(struct, bpf_wq) BTF_ID(struct, bpf_res_spin_lock) BTF_ID(struct, bpf_task_work) +BTF_ID(struct, bpf_prog_aux) static bool __is_kfunc_ptr_arg_type(const struct btf *btf, const struct btf_param *arg, int type) @@ -12297,6 +12347,11 @@ static bool is_kfunc_arg_callback(struct bpf_verifier_env *env, const struct btf return true; } +static bool is_kfunc_arg_prog_aux(const struct btf *btf, const struct btf_param *arg) +{ + return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_PROG_AUX_ID); +} + /* Returns true if struct is composed of scalars, 4 levels of nesting allowed */ static bool __btf_type_is_scalar_struct(struct bpf_verifier_env *env, const struct btf *btf, @@ -14303,6 +14358,17 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, for (i = 0; i < nargs; i++) { u32 regno = i + 1; + /* + * Implicit kfunc arguments are set after main verification pass. + * For correct tracking of zero-extensions we have to reset subreg_def for such + * args. Otherwise mark_btf_func_reg_size() will be inspecting subreg_def of regs + * from an earlier (irrelevant) point in the program, which may lead to an error + * in opt_subreg_zext_lo32_rnd_hi32(). + */ + if (unlikely(KF_IMPLICIT_ARGS & meta.kfunc_flags + && is_kfunc_arg_implicit(desc_btf, &args[i]))) + regs[regno].subreg_def = DEF_NOT_SUBREG; + t = btf_type_skip_modifiers(desc_btf, args[i].type, NULL); if (btf_type_is_ptr(t)) mark_btf_func_reg_size(env, regno, sizeof(void *)); -- 2.52.0 Implement BTF modifications in resolve_btfids to support BPF kernel functions with implicit arguments. For a kfunc marked with KF_IMPLICIT_ARGS flag, a new function prototype is added to BTF that does not have implicit arguments. The kfunc's prototype is then updated to a new one in BTF. This prototype is the intended interface for the BPF programs. A _impl function is added to BTF to make the original kfunc prototype searchable for the BPF verifier. If a _impl function already exists in BTF, its interpreted as a legacy case, and this step is skipped. Whether an argument is implicit is determined by its type: currently only `struct bpf_prog_aux *` is supported. As a result, the BTF associated with kfunc is changed from __bpf_kfunc bpf_foo(int arg1, struct bpf_prog_aux *aux); into bpf_foo_impl(int arg1, struct bpf_prog_aux *aux); __bpf_kfunc bpf_foo(int arg1); For more context see previous discussions and patches [1][2]. [1] https://lore.kernel.org/dwarves/ba1650aa-fafd-49a8-bea4-bdddee7c38c9@linux.dev/ [2] https://lore.kernel.org/bpf/20251029190113.3323406-1-ihor.solodrai@linux.dev/ Signed-off-by: Ihor Solodrai --- tools/bpf/resolve_btfids/main.c | 282 ++++++++++++++++++++++++++++++++ 1 file changed, 282 insertions(+) diff --git a/tools/bpf/resolve_btfids/main.c b/tools/bpf/resolve_btfids/main.c index df39982f51df..b361e726fa36 100644 --- a/tools/bpf/resolve_btfids/main.c +++ b/tools/bpf/resolve_btfids/main.c @@ -152,6 +152,18 @@ struct object { int nr_typedefs; }; +#define KF_IMPLICIT_ARGS (1 << 16) +#define KF_IMPL_SUFFIX "_impl" +#define MAX_BPF_FUNC_REG_ARGS 5 +#define MAX_KFUNCS 256 +#define MAX_DECL_TAGS (MAX_KFUNCS * 4) + +struct btf2btf_context { + struct btf *btf; + u32 nr_decl_tags; + s32 decl_tags[MAX_DECL_TAGS]; +}; + static int verbose; static int warnings; @@ -972,6 +984,273 @@ static int patch_btfids(const char *btfids_path, const char *elf_path) return err; } +static s64 collect_kfunc_ids_by_flags(struct object *obj, + u32 flags, + s32 kfunc_ids[], + const u32 kfunc_ids_sz) +{ + Elf_Data *data = obj->efile.idlist; + struct rb_node *next; + s64 nr_kfuncs = 0; + int i; + + next = rb_first(&obj->sets); + while (next) { + struct btf_id_set8 *set8 = NULL; + unsigned long addr, off; + struct btf_id *id; + + id = rb_entry(next, struct btf_id, rb_node); + + if (id->kind != BTF_ID_KIND_SET8) + goto skip; + + addr = id->addr[0]; + off = addr - obj->efile.idlist_addr; + set8 = data->d_buf + off; + + for (i = 0; i < set8->cnt; i++) { + if (set8->pairs[i].flags & flags) { + if (nr_kfuncs >= kfunc_ids_sz) { + pr_err("ERROR: resolve_btfids: too many kfuncs with flags %u - limit %d\n", + flags, kfunc_ids_sz); + return -E2BIG; + } + kfunc_ids[nr_kfuncs++] = set8->pairs[i].id; + } + } +skip: + next = rb_next(next); + } + + return nr_kfuncs; +} + +static const struct btf_type *btf__unqualified_type_by_id(const struct btf *btf, s32 type_id) +{ + const struct btf_type *t = btf__type_by_id(btf, type_id); + + while (btf_is_mod(t)) + t = btf__type_by_id(btf, t->type); + + return t; +} + +/* Implicit BPF kfunc arguments can only be of particular types */ +static bool btf__is_kf_implicit_arg(const struct btf *btf, const struct btf_param *p) +{ + static const char *const kf_implicit_arg_types[] = { + "bpf_prog_aux", + }; + const struct btf_type *t; + const char *name; + + t = btf__unqualified_type_by_id(btf, p->type); + if (!btf_is_ptr(t)) + return false; + + t = btf__unqualified_type_by_id(btf, t->type); + if (!btf_is_struct(t)) + return false; + + name = btf__name_by_offset(btf, t->name_off); + if (!name) + return false; + + for (int i = 0; i < ARRAY_SIZE(kf_implicit_arg_types); i++) + if (strcmp(name, kf_implicit_arg_types[i]) == 0) + return true; + + return false; +} + +/* + * For a kfunc with KF_IMPLICIT_ARGS we do the following: + * 1. Add a new function with _impl suffix in the name, with the prototype + * of the original kfunc. + * 2. Add all decl tags except "bpf_kfunc" for the _impl func. + * 3. Add a new function prototype with modified list of arguments: + * omitting implicit args. + * 4. Change the prototype of the original kfunc to the new one. + * + * This way we transform the BTF associated with the kfunc from + * __bpf_kfunc bpf_foo(int arg1, void *implicit_arg); + * into + * bpf_foo_impl(int arg1, void *implicit_arg); + * __bpf_kfunc bpf_foo(int arg1); + * + * If a kfunc with KF_IMPLICIT_ARGS already has an _impl counterpart + * in BTF, then it's a legacy case: an _impl function is declared in the + * source code. In this case, we can skip adding an _impl function, but we + * still have to add a func prototype that omits implicit args. + */ +static s64 process_kfunc_with_implicit_args(struct btf2btf_context *ctx, s32 kfunc_id) +{ + struct btf_param new_params[MAX_BPF_FUNC_REG_ARGS]; + const char *kfunc_name, *param_name, *tag_name; + s32 proto_id, new_proto_id, new_func_id; + int err, len, name_len, nr_params; + const struct btf_param *params; + enum btf_func_linkage linkage; + char tmp_name[KSYM_NAME_LEN]; + struct btf *btf = ctx->btf; + struct btf_type *t; + + t = (struct btf_type *)btf__type_by_id(btf, kfunc_id); + if (!t || !btf_is_func(t)) { + pr_err("WARN: resolve_btfids: btf id %d is not a function\n", kfunc_id); + return -EINVAL; + } + + kfunc_name = btf__name_by_offset(btf, t->name_off); + name_len = strlen(kfunc_name); + linkage = btf_vlen(t); + + proto_id = t->type; + t = (struct btf_type *)btf__type_by_id(btf, proto_id); + if (!t || !btf_is_func_proto(t)) { + pr_err("ERROR: resolve_btfids: btf id %d is not a function prototype\n", proto_id); + return -EINVAL; + } + + len = snprintf(tmp_name, sizeof(tmp_name), "%s%s", kfunc_name, KF_IMPL_SUFFIX); + if (len < 0 || len >= sizeof(tmp_name)) { + pr_err("ERROR: function name is too long: %s%s\n", kfunc_name, KF_IMPL_SUFFIX); + return -E2BIG; + } + + if (btf__find_by_name_kind(btf, tmp_name, BTF_KIND_FUNC) > 0) { + pr_debug("resolve_btfids: function %s already exists in BTF\n", tmp_name); + goto add_new_proto; + } + + /* Add a new function with _impl suffix and original prototype */ + new_func_id = btf__add_func(btf, tmp_name, linkage, proto_id); + if (new_func_id < 0) { + pr_err("ERROR: resolve_btfids: failed to add func %s to BTF\n", tmp_name); + return new_func_id; + } + + /* Copy all decl tags except "bpf_kfunc" from the original kfunc to the new one */ + for (int i = 0; i < ctx->nr_decl_tags; i++) { + t = (struct btf_type *)btf__type_by_id(btf, ctx->decl_tags[i]); + if (t->type != kfunc_id) + continue; + + tag_name = btf__name_by_offset(btf, t->name_off); + if (strcmp(tag_name, "bpf_kfunc") == 0) + continue; + + err = btf__add_decl_tag(btf, tag_name, new_func_id, -1); + if (err < 0) { + pr_err("ERROR: resolve_btfids: failed to add decl tag %s for %s\n", + tag_name, tmp_name); + return -EINVAL; + } + } + +add_new_proto: + /* + * Drop the _impl suffix and point kfunc_name to the local buffer for later use. + * When BTF is modified the original pointer is invalidated. + */ + tmp_name[name_len] = '\0'; + kfunc_name = tmp_name; + + /* Load non-implicit args from the original prototype */ + t = (struct btf_type *)btf__type_by_id(btf, proto_id); + params = btf_params(t); + nr_params = 0; + for (int i = 0; i < btf_vlen(t); i++) { + if (btf__is_kf_implicit_arg(btf, ¶ms[i])) + break; + new_params[nr_params++] = params[i]; + } + + new_proto_id = btf__add_func_proto(btf, t->type); + if (new_proto_id < 0) { + pr_err("ERROR: resolve_btfids: failed to add func proto for %s\n", kfunc_name); + return new_proto_id; + } + + /* Add non-implicit args to the new prototype */ + for (int i = 0; i < nr_params; i++) { + param_name = btf__name_by_offset(btf, new_params[i].name_off); + err = btf__add_func_param(btf, param_name, new_params[i].type); + if (err < 0) { + pr_err("ERROR: resolve_btfids: failed to add param %s for %s\n", + param_name, kfunc_name); + return err; + } + } + + /* Finally change the prototype of the original kfunc to the new one */ + t = (struct btf_type *)btf__type_by_id(btf, kfunc_id); + t->type = new_proto_id; + + pr_debug("resolve_btfids: updated BTF for kfunc with implicit args %s\n", kfunc_name); + + return 0; +} + +static s64 btf__collect_decl_tags(const struct btf *btf, s32 *decl_tags, u32 decl_tags_sz) +{ + const u32 type_cnt = btf__type_cnt(btf); + const struct btf_type *t; + s64 nr_decl_tags = 0; + + for (u32 id = 1; id < type_cnt; id++) { + t = btf__type_by_id(btf, id); + if (!btf_is_decl_tag(t)) + continue; + if (nr_decl_tags >= decl_tags_sz) { + pr_err("ERROR: resolve_btfids: too many decl tags in BTF - limit %s\n", + decl_tags_sz); + return -E2BIG; + } + decl_tags[nr_decl_tags++] = id; + } + + return nr_decl_tags; +} + +static s64 build_btf2btf_context(struct object *obj, struct btf2btf_context *ctx) +{ + s64 nr_decl_tags; + + nr_decl_tags = btf__collect_decl_tags(obj->btf, ctx->decl_tags, ARRAY_SIZE(ctx->decl_tags)); + if (nr_decl_tags < 0) + return nr_decl_tags; + + ctx->btf = obj->btf; + ctx->nr_decl_tags = nr_decl_tags; + + return 0; +} + +static s64 finalize_btf(struct object *obj) +{ + struct btf2btf_context ctx = {}; + s32 kfuncs[MAX_KFUNCS]; + s64 err, nr_kfuncs; + + err = build_btf2btf_context(obj, &ctx); + if (err < 0) + return err; + + nr_kfuncs = collect_kfunc_ids_by_flags(obj, KF_IMPLICIT_ARGS, kfuncs, ARRAY_SIZE(kfuncs)); + if (nr_kfuncs < 0) + return nr_kfuncs; + + for (u32 i = 0; i < nr_kfuncs; i++) { + err = process_kfunc_with_implicit_args(&ctx, kfuncs[i]); + if (err < 0) + return err; + } + + return 0; +} + static const char * const resolve_btfids_usage[] = { "resolve_btfids [] ", "resolve_btfids --patch_btfids <.BTF_ids file> ", @@ -1047,6 +1326,9 @@ int main(int argc, const char **argv) if (symbols_patch(&obj)) goto out; + if (finalize_btf(&obj)) + goto out; + err = make_out_path(out_path, sizeof(out_path), obj.path, BTF_IDS_SECTION); err = err ?: dump_raw_btf_ids(&obj, out_path); if (err) -- 2.52.0 Add trivial end-to-end tests to validate that KF_IMPLICIT_ARGS flag is properly handled by both resolve_btfids and the verifier. Declare kfuncs in bpf_testmod. Check that bpf_prog_aux pointer is set in the kfunc implementation. Verify that calls with implicit args and a legacy case all work. Signed-off-by: Ihor Solodrai --- .../bpf/prog_tests/kfunc_implicit_args.c | 10 +++++ .../selftests/bpf/progs/kfunc_implicit_args.c | 41 +++++++++++++++++++ .../selftests/bpf/test_kmods/bpf_testmod.c | 26 ++++++++++++ 3 files changed, 77 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/kfunc_implicit_args.c create mode 100644 tools/testing/selftests/bpf/progs/kfunc_implicit_args.c diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_implicit_args.c b/tools/testing/selftests/bpf/prog_tests/kfunc_implicit_args.c new file mode 100644 index 000000000000..5e4793c9c29a --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/kfunc_implicit_args.c @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2026 Meta Platforms, Inc. and affiliates. */ + +#include +#include "kfunc_implicit_args.skel.h" + +void test_kfunc_implicit_args(void) +{ + RUN_TESTS(kfunc_implicit_args); +} diff --git a/tools/testing/selftests/bpf/progs/kfunc_implicit_args.c b/tools/testing/selftests/bpf/progs/kfunc_implicit_args.c new file mode 100644 index 000000000000..a1e456500442 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/kfunc_implicit_args.c @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2026 Meta Platforms, Inc. and affiliates. */ + +#include +#include +#include "bpf_misc.h" + +extern int bpf_kfunc_implicit_arg(int a) __weak __ksym; +extern int bpf_kfunc_implicit_arg_impl(int a, void *aux__prog) __weak __ksym; // illegal +extern int bpf_kfunc_implicit_arg_legacy(int a, int b) __weak __ksym; +extern int bpf_kfunc_implicit_arg_legacy_impl(int a, int b, void *aux__prog) __weak __ksym; + +char _license[] SEC("license") = "GPL"; + +SEC("syscall") +__retval(5) +int test_kfunc_implicit_arg(void *ctx) +{ + return bpf_kfunc_implicit_arg(5); +} + +SEC("syscall") +__failure +int test_kfunc_implicit_arg_impl_illegal(void *ctx) +{ + return bpf_kfunc_implicit_arg_impl(5, NULL); +} + +SEC("syscall") +__retval(7) +int test_kfunc_implicit_arg_legacy(void *ctx) +{ + return bpf_kfunc_implicit_arg_legacy(3, 4); +} + +SEC("syscall") +__retval(11) +int test_kfunc_implicit_arg_legacy_impl(void *ctx) +{ + return bpf_kfunc_implicit_arg_legacy_impl(5, 6, NULL); +} diff --git a/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c b/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c index 1c41d03bd5a1..503451875d33 100644 --- a/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c +++ b/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c @@ -1136,6 +1136,10 @@ __bpf_kfunc int bpf_kfunc_st_ops_inc10(struct st_ops_args *args) __bpf_kfunc int bpf_kfunc_multi_st_ops_test_1(struct st_ops_args *args, u32 id); __bpf_kfunc int bpf_kfunc_multi_st_ops_test_1_impl(struct st_ops_args *args, void *aux_prog); +__bpf_kfunc int bpf_kfunc_implicit_arg(int a, struct bpf_prog_aux *aux); +__bpf_kfunc int bpf_kfunc_implicit_arg_legacy(int a, int b, struct bpf_prog_aux *aux); +__bpf_kfunc int bpf_kfunc_implicit_arg_legacy_impl(int a, int b, void *aux__prog); + BTF_KFUNCS_START(bpf_testmod_check_kfunc_ids) BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc) BTF_ID_FLAGS(func, bpf_kfunc_call_test1) @@ -1178,6 +1182,9 @@ BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_pro_epilogue, KF_SLEEPABLE) BTF_ID_FLAGS(func, bpf_kfunc_st_ops_inc10) BTF_ID_FLAGS(func, bpf_kfunc_multi_st_ops_test_1) BTF_ID_FLAGS(func, bpf_kfunc_multi_st_ops_test_1_impl) +BTF_ID_FLAGS(func, bpf_kfunc_implicit_arg, KF_IMPLICIT_ARGS) +BTF_ID_FLAGS(func, bpf_kfunc_implicit_arg_legacy, KF_IMPLICIT_ARGS) +BTF_ID_FLAGS(func, bpf_kfunc_implicit_arg_legacy_impl) BTF_KFUNCS_END(bpf_testmod_check_kfunc_ids) static int bpf_testmod_ops_init(struct btf *btf) @@ -1669,6 +1676,25 @@ int bpf_kfunc_multi_st_ops_test_1_impl(struct st_ops_args *args, void *aux__prog return ret; } +int bpf_kfunc_implicit_arg(int a, struct bpf_prog_aux *aux) +{ + if (aux && a > 0) + return a; + return -EINVAL; +} + +int bpf_kfunc_implicit_arg_legacy(int a, int b, struct bpf_prog_aux *aux) +{ + if (aux) + return a + b; + return -EINVAL; +} + +int bpf_kfunc_implicit_arg_legacy_impl(int a, int b, void *aux__prog) +{ + return bpf_kfunc_implicit_arg_legacy(a, b, aux__prog); +} + static int multi_st_ops_reg(void *kdata, struct bpf_link *link) { struct bpf_testmod_multi_st_ops *st_ops = -- 2.52.0 Implement bpf_wq_set_callback() with an implicit bpf_prog_aux argument, and change bpf_wq_set_callback_impl() to call the new kfunc. Update special kfunc checks in the verifier to accept both versions of the function. In the selftests, a bpf_wq_set_callback_impl() call is intentionally introduced to confirm that both signatures are handled correctly. Signed-off-by: Ihor Solodrai --- kernel/bpf/helpers.c | 18 +++++++++++++----- kernel/bpf/verifier.c | 15 +++++++++------ tools/testing/selftests/bpf/bpf_experimental.h | 5 ----- tools/testing/selftests/bpf/progs/wq.c | 2 +- .../testing/selftests/bpf/progs/wq_failures.c | 4 ++-- 5 files changed, 25 insertions(+), 19 deletions(-) diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 9eaa4185e0a7..23aa785c0f99 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -3120,12 +3120,11 @@ __bpf_kfunc int bpf_wq_start(struct bpf_wq *wq, unsigned int flags) return 0; } -__bpf_kfunc int bpf_wq_set_callback_impl(struct bpf_wq *wq, - int (callback_fn)(void *map, int *key, void *value), - unsigned int flags, - void *aux__prog) +__bpf_kfunc int bpf_wq_set_callback(struct bpf_wq *wq, + int (callback_fn)(void *map, int *key, void *value), + unsigned int flags, + struct bpf_prog_aux *aux) { - struct bpf_prog_aux *aux = (struct bpf_prog_aux *)aux__prog; struct bpf_async_kern *async = (struct bpf_async_kern *)wq; if (flags) @@ -3134,6 +3133,14 @@ __bpf_kfunc int bpf_wq_set_callback_impl(struct bpf_wq *wq, return __bpf_async_set_callback(async, callback_fn, aux, flags, BPF_ASYNC_TYPE_WQ); } +__bpf_kfunc int bpf_wq_set_callback_impl(struct bpf_wq *wq, + int (callback_fn)(void *map, int *key, void *value), + unsigned int flags, + void *aux__prog) +{ + return bpf_wq_set_callback(wq, callback_fn, flags, aux__prog); +} + __bpf_kfunc void bpf_preempt_disable(void) { preempt_disable(); @@ -4488,6 +4495,7 @@ BTF_ID_FLAGS(func, bpf_dynptr_memset) BTF_ID_FLAGS(func, bpf_modify_return_test_tp) #endif BTF_ID_FLAGS(func, bpf_wq_init) +BTF_ID_FLAGS(func, bpf_wq_set_callback, KF_IMPLICIT_ARGS) BTF_ID_FLAGS(func, bpf_wq_set_callback_impl) BTF_ID_FLAGS(func, bpf_wq_start) BTF_ID_FLAGS(func, bpf_preempt_disable) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 5541ab674e30..e5aeee554377 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -520,7 +520,7 @@ static bool is_async_callback_calling_kfunc(u32 btf_id); static bool is_callback_calling_kfunc(u32 btf_id); static bool is_bpf_throw_kfunc(struct bpf_insn *insn); -static bool is_bpf_wq_set_callback_impl_kfunc(u32 btf_id); +static bool is_bpf_wq_set_callback_kfunc(u32 btf_id); static bool is_task_work_add_kfunc(u32 func_id); static bool is_sync_callback_calling_function(enum bpf_func_id func_id) @@ -562,7 +562,7 @@ static bool is_async_cb_sleepable(struct bpf_verifier_env *env, struct bpf_insn /* bpf_wq and bpf_task_work callbacks are always sleepable. */ if (bpf_pseudo_kfunc_call(insn) && insn->off == 0 && - (is_bpf_wq_set_callback_impl_kfunc(insn->imm) || is_task_work_add_kfunc(insn->imm))) + (is_bpf_wq_set_callback_kfunc(insn->imm) || is_task_work_add_kfunc(insn->imm))) return true; verifier_bug(env, "unhandled async callback in is_async_cb_sleepable"); @@ -12470,6 +12470,7 @@ enum special_kfunc_type { KF_bpf_arena_alloc_pages, KF_bpf_arena_free_pages, KF_bpf_arena_reserve_pages, + KF_bpf_wq_set_callback, }; BTF_ID_LIST(special_kfunc_list) @@ -12547,6 +12548,7 @@ BTF_ID(func, bpf_task_work_schedule_resume_impl) BTF_ID(func, bpf_arena_alloc_pages) BTF_ID(func, bpf_arena_free_pages) BTF_ID(func, bpf_arena_reserve_pages) +BTF_ID(func, bpf_wq_set_callback) static bool is_task_work_add_kfunc(u32 func_id) { @@ -13002,7 +13004,7 @@ static bool is_sync_callback_calling_kfunc(u32 btf_id) static bool is_async_callback_calling_kfunc(u32 btf_id) { - return btf_id == special_kfunc_list[KF_bpf_wq_set_callback_impl] || + return is_bpf_wq_set_callback_kfunc(btf_id) || is_task_work_add_kfunc(btf_id); } @@ -13012,9 +13014,10 @@ static bool is_bpf_throw_kfunc(struct bpf_insn *insn) insn->imm == special_kfunc_list[KF_bpf_throw]; } -static bool is_bpf_wq_set_callback_impl_kfunc(u32 btf_id) +static bool is_bpf_wq_set_callback_kfunc(u32 btf_id) { - return btf_id == special_kfunc_list[KF_bpf_wq_set_callback_impl]; + return btf_id == special_kfunc_list[KF_bpf_wq_set_callback_impl] || + btf_id == special_kfunc_list[KF_bpf_wq_set_callback]; } static bool is_callback_calling_kfunc(u32 btf_id) @@ -14093,7 +14096,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, meta.r0_rdonly = false; } - if (is_bpf_wq_set_callback_impl_kfunc(meta.func_id)) { + if (is_bpf_wq_set_callback_kfunc(meta.func_id)) { err = push_callback_call(env, insn, insn_idx, meta.subprogno, set_timer_callback_state); if (err) { diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h index 2cd9165c7348..68a49b1f77ae 100644 --- a/tools/testing/selftests/bpf/bpf_experimental.h +++ b/tools/testing/selftests/bpf/bpf_experimental.h @@ -580,11 +580,6 @@ extern void bpf_iter_css_destroy(struct bpf_iter_css *it) __weak __ksym; extern int bpf_wq_init(struct bpf_wq *wq, void *p__map, unsigned int flags) __weak __ksym; extern int bpf_wq_start(struct bpf_wq *wq, unsigned int flags) __weak __ksym; -extern int bpf_wq_set_callback_impl(struct bpf_wq *wq, - int (callback_fn)(void *map, int *key, void *value), - unsigned int flags__k, void *aux__ign) __ksym; -#define bpf_wq_set_callback(timer, cb, flags) \ - bpf_wq_set_callback_impl(timer, cb, flags, NULL) struct bpf_iter_kmem_cache; extern int bpf_iter_kmem_cache_new(struct bpf_iter_kmem_cache *it) __weak __ksym; diff --git a/tools/testing/selftests/bpf/progs/wq.c b/tools/testing/selftests/bpf/progs/wq.c index 25be2cd9d42c..f265242b954d 100644 --- a/tools/testing/selftests/bpf/progs/wq.c +++ b/tools/testing/selftests/bpf/progs/wq.c @@ -107,7 +107,7 @@ static int test_hmap_elem_callback(void *map, int *key, if (bpf_wq_init(wq, map, 0) != 0) return -3; - if (bpf_wq_set_callback(wq, callback_fn, 0)) + if (bpf_wq_set_callback_impl(wq, callback_fn, 0, NULL)) return -4; if (bpf_wq_start(wq, 0)) diff --git a/tools/testing/selftests/bpf/progs/wq_failures.c b/tools/testing/selftests/bpf/progs/wq_failures.c index d06f6d40594a..3767f5595bbc 100644 --- a/tools/testing/selftests/bpf/progs/wq_failures.c +++ b/tools/testing/selftests/bpf/progs/wq_failures.c @@ -97,7 +97,7 @@ __failure /* check that the first argument of bpf_wq_set_callback() * is a correct bpf_wq pointer. */ -__msg(": (85) call bpf_wq_set_callback_impl#") /* anchor message */ +__msg(": (85) call bpf_wq_set_callback#") /* anchor message */ __msg("arg#0 doesn't point to a map value") long test_wrong_wq_pointer(void *ctx) { @@ -123,7 +123,7 @@ __failure /* check that the first argument of bpf_wq_set_callback() * is a correct bpf_wq pointer. */ -__msg(": (85) call bpf_wq_set_callback_impl#") /* anchor message */ +__msg(": (85) call bpf_wq_set_callback#") /* anchor message */ __msg("off 1 doesn't point to 'struct bpf_wq' that is at 0") long test_wrong_wq_pointer_offset(void *ctx) { -- 2.52.0 Remove extern declaration of bpf_wq_set_callback_impl() from hid_bpf_helpers.h and replace bpf_wq_set_callback macro with a corresponding new declaration. Fix selftests/hid build failure caused by missing BPF_CFLAGS. Tested with: # append tools/testing/selftests/hid/config and build the kernel $ make -C tools/testing/selftests/hid # in built kernel $ ./tools/testing/selftests/hid/hid_bpf -t test_multiply_events_wq TAP version 13 1..1 # Starting 1 tests from 1 test cases. # RUN hid_bpf.test_multiply_events_wq ... [ 2.575520] hid-generic 0003:0001:0A36.0001: hidraw0: USB HID v0.00 Device [test-uhid-device-138] on 138 # OK hid_bpf.test_multiply_events_wq ok 1 hid_bpf.test_multiply_events_wq # PASSED: 1 / 1 tests passed. # Totals: pass:1 fail:0 xfail:0 xpass:0 skip:0 error:0 PASS Signed-off-by: Ihor Solodrai --- drivers/hid/bpf/progs/hid_bpf_helpers.h | 8 +++----- tools/testing/selftests/hid/Makefile | 4 +++- tools/testing/selftests/hid/progs/hid_bpf_helpers.h | 8 +++----- 3 files changed, 9 insertions(+), 11 deletions(-) diff --git a/drivers/hid/bpf/progs/hid_bpf_helpers.h b/drivers/hid/bpf/progs/hid_bpf_helpers.h index bf19785a6b06..228f8d787567 100644 --- a/drivers/hid/bpf/progs/hid_bpf_helpers.h +++ b/drivers/hid/bpf/progs/hid_bpf_helpers.h @@ -33,11 +33,9 @@ extern int hid_bpf_try_input_report(struct hid_bpf_ctx *ctx, /* bpf_wq implementation */ extern int bpf_wq_init(struct bpf_wq *wq, void *p__map, unsigned int flags) __weak __ksym; extern int bpf_wq_start(struct bpf_wq *wq, unsigned int flags) __weak __ksym; -extern int bpf_wq_set_callback_impl(struct bpf_wq *wq, - int (callback_fn)(void *map, int *key, void *value), - unsigned int flags__k, void *aux__ign) __ksym; -#define bpf_wq_set_callback(wq, cb, flags) \ - bpf_wq_set_callback_impl(wq, cb, flags, NULL) +extern int bpf_wq_set_callback(struct bpf_wq *wq, + int (*callback_fn)(void *, int *, void *), + unsigned int flags) __weak __ksym; #define HID_MAX_DESCRIPTOR_SIZE 4096 #define HID_IGNORE_EVENT -1 diff --git a/tools/testing/selftests/hid/Makefile b/tools/testing/selftests/hid/Makefile index 2839d2612ce3..4c01bb649913 100644 --- a/tools/testing/selftests/hid/Makefile +++ b/tools/testing/selftests/hid/Makefile @@ -184,7 +184,9 @@ MENDIAN=$(if $(IS_LITTLE_ENDIAN),-mlittle-endian,-mbig-endian) CLANG_SYS_INCLUDES = $(call get_sys_includes,$(CLANG)) BPF_CFLAGS = -g -Werror -D__TARGET_ARCH_$(SRCARCH) $(MENDIAN) \ - -I$(INCLUDE_DIR) + -I$(INCLUDE_DIR) \ + -Wno-microsoft-anon-tag \ + -fms-extensions CLANG_CFLAGS = $(CLANG_SYS_INCLUDES) \ -Wno-compare-distinct-pointer-types diff --git a/tools/testing/selftests/hid/progs/hid_bpf_helpers.h b/tools/testing/selftests/hid/progs/hid_bpf_helpers.h index 531228b849da..80ab60905865 100644 --- a/tools/testing/selftests/hid/progs/hid_bpf_helpers.h +++ b/tools/testing/selftests/hid/progs/hid_bpf_helpers.h @@ -116,10 +116,8 @@ extern int hid_bpf_try_input_report(struct hid_bpf_ctx *ctx, /* bpf_wq implementation */ extern int bpf_wq_init(struct bpf_wq *wq, void *p__map, unsigned int flags) __weak __ksym; extern int bpf_wq_start(struct bpf_wq *wq, unsigned int flags) __weak __ksym; -extern int bpf_wq_set_callback_impl(struct bpf_wq *wq, - int (callback_fn)(void *map, int *key, void *wq), - unsigned int flags__k, void *aux__ign) __weak __ksym; -#define bpf_wq_set_callback(timer, cb, flags) \ - bpf_wq_set_callback_impl(timer, cb, flags, NULL) +extern int bpf_wq_set_callback(struct bpf_wq *wq, + int (*callback_fn)(void *, int *, void *), + unsigned int flags) __weak __ksym; #endif /* __HID_BPF_HELPERS_H */ -- 2.52.0 Implement bpf_task_work_schedule_* with an implicit bpf_prog_aux argument, and change corresponding _impl funcs to call the new kfunc. Update special kfunc checks in the verifier to accept both new and old variants of the functions. Update the selftests to use the new API with implicit argument. Signed-off-by: Ihor Solodrai --- kernel/bpf/helpers.c | 28 +++++++++++++++---- kernel/bpf/verifier.c | 8 +++++- .../testing/selftests/bpf/progs/file_reader.c | 4 ++- tools/testing/selftests/bpf/progs/task_work.c | 11 ++++++-- .../selftests/bpf/progs/task_work_fail.c | 16 ++++++++--- .../selftests/bpf/progs/task_work_stress.c | 5 ++-- .../bpf/progs/verifier_async_cb_context.c | 6 ++-- 7 files changed, 59 insertions(+), 19 deletions(-) diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 23aa785c0f99..2e01973f2c18 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -4282,41 +4282,55 @@ static int bpf_task_work_schedule(struct task_struct *task, struct bpf_task_work } /** - * bpf_task_work_schedule_signal_impl - Schedule BPF callback using task_work_add with TWA_SIGNAL + * bpf_task_work_schedule_signal - Schedule BPF callback using task_work_add with TWA_SIGNAL * mode * @task: Task struct for which callback should be scheduled * @tw: Pointer to struct bpf_task_work in BPF map value for internal bookkeeping * @map__map: bpf_map that embeds struct bpf_task_work in the values * @callback: pointer to BPF subprogram to call - * @aux__prog: user should pass NULL + * @aux: pointer to bpf_prog_aux of the caller BPF program, implicitly set by the verifier * * Return: 0 if task work has been scheduled successfully, negative error code otherwise */ +__bpf_kfunc int bpf_task_work_schedule_signal(struct task_struct *task, struct bpf_task_work *tw, + void *map__map, bpf_task_work_callback_t callback, + struct bpf_prog_aux *aux) +{ + return bpf_task_work_schedule(task, tw, map__map, callback, aux, TWA_SIGNAL); +} + __bpf_kfunc int bpf_task_work_schedule_signal_impl(struct task_struct *task, struct bpf_task_work *tw, void *map__map, bpf_task_work_callback_t callback, void *aux__prog) { - return bpf_task_work_schedule(task, tw, map__map, callback, aux__prog, TWA_SIGNAL); + return bpf_task_work_schedule_signal(task, tw, map__map, callback, aux__prog); } /** - * bpf_task_work_schedule_resume_impl - Schedule BPF callback using task_work_add with TWA_RESUME + * bpf_task_work_schedule_resume - Schedule BPF callback using task_work_add with TWA_RESUME * mode * @task: Task struct for which callback should be scheduled * @tw: Pointer to struct bpf_task_work in BPF map value for internal bookkeeping * @map__map: bpf_map that embeds struct bpf_task_work in the values * @callback: pointer to BPF subprogram to call - * @aux__prog: user should pass NULL + * @aux: pointer to bpf_prog_aux of the caller BPF program, implicitly set by the verifier * * Return: 0 if task work has been scheduled successfully, negative error code otherwise */ +__bpf_kfunc int bpf_task_work_schedule_resume(struct task_struct *task, struct bpf_task_work *tw, + void *map__map, bpf_task_work_callback_t callback, + struct bpf_prog_aux *aux) +{ + return bpf_task_work_schedule(task, tw, map__map, callback, aux, TWA_RESUME); +} + __bpf_kfunc int bpf_task_work_schedule_resume_impl(struct task_struct *task, struct bpf_task_work *tw, void *map__map, bpf_task_work_callback_t callback, void *aux__prog) { - return bpf_task_work_schedule(task, tw, map__map, callback, aux__prog, TWA_RESUME); + return bpf_task_work_schedule_resume(task, tw, map__map, callback, aux__prog); } static int make_file_dynptr(struct file *file, u32 flags, bool may_sleep, @@ -4545,7 +4559,9 @@ BTF_ID_FLAGS(func, bpf_strncasestr); BTF_ID_FLAGS(func, bpf_cgroup_read_xattr, KF_RCU) #endif BTF_ID_FLAGS(func, bpf_stream_vprintk_impl) +BTF_ID_FLAGS(func, bpf_task_work_schedule_signal, KF_IMPLICIT_ARGS) BTF_ID_FLAGS(func, bpf_task_work_schedule_signal_impl) +BTF_ID_FLAGS(func, bpf_task_work_schedule_resume, KF_IMPLICIT_ARGS) BTF_ID_FLAGS(func, bpf_task_work_schedule_resume_impl) BTF_ID_FLAGS(func, bpf_dynptr_from_file) BTF_ID_FLAGS(func, bpf_dynptr_file_discard) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index e5aeee554377..fdd17d19d5be 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -12471,6 +12471,8 @@ enum special_kfunc_type { KF_bpf_arena_free_pages, KF_bpf_arena_reserve_pages, KF_bpf_wq_set_callback, + KF_bpf_task_work_schedule_signal, + KF_bpf_task_work_schedule_resume, }; BTF_ID_LIST(special_kfunc_list) @@ -12549,10 +12551,14 @@ BTF_ID(func, bpf_arena_alloc_pages) BTF_ID(func, bpf_arena_free_pages) BTF_ID(func, bpf_arena_reserve_pages) BTF_ID(func, bpf_wq_set_callback) +BTF_ID(func, bpf_task_work_schedule_signal) +BTF_ID(func, bpf_task_work_schedule_resume) static bool is_task_work_add_kfunc(u32 func_id) { - return func_id == special_kfunc_list[KF_bpf_task_work_schedule_signal_impl] || + return func_id == special_kfunc_list[KF_bpf_task_work_schedule_signal] || + func_id == special_kfunc_list[KF_bpf_task_work_schedule_signal_impl] || + func_id == special_kfunc_list[KF_bpf_task_work_schedule_resume] || func_id == special_kfunc_list[KF_bpf_task_work_schedule_resume_impl]; } diff --git a/tools/testing/selftests/bpf/progs/file_reader.c b/tools/testing/selftests/bpf/progs/file_reader.c index 4d756b623557..ff3270a0cb9b 100644 --- a/tools/testing/selftests/bpf/progs/file_reader.c +++ b/tools/testing/selftests/bpf/progs/file_reader.c @@ -77,7 +77,9 @@ int on_open_validate_file_read(void *c) err = 1; return 0; } - bpf_task_work_schedule_signal_impl(task, &work->tw, &arrmap, task_work_callback, NULL); + + bpf_task_work_schedule_signal(task, &work->tw, &arrmap, task_work_callback); + return 0; } diff --git a/tools/testing/selftests/bpf/progs/task_work.c b/tools/testing/selftests/bpf/progs/task_work.c index 663a80990f8f..eec422af20b8 100644 --- a/tools/testing/selftests/bpf/progs/task_work.c +++ b/tools/testing/selftests/bpf/progs/task_work.c @@ -66,7 +66,8 @@ int oncpu_hash_map(struct pt_regs *args) if (!work) return 0; - bpf_task_work_schedule_resume_impl(task, &work->tw, &hmap, process_work, NULL); + bpf_task_work_schedule_resume(task, &work->tw, &hmap, process_work); + return 0; } @@ -80,7 +81,9 @@ int oncpu_array_map(struct pt_regs *args) work = bpf_map_lookup_elem(&arrmap, &key); if (!work) return 0; - bpf_task_work_schedule_signal_impl(task, &work->tw, &arrmap, process_work, NULL); + + bpf_task_work_schedule_signal(task, &work->tw, &arrmap, process_work); + return 0; } @@ -102,6 +105,8 @@ int oncpu_lru_map(struct pt_regs *args) work = bpf_map_lookup_elem(&lrumap, &key); if (!work || work->data[0]) return 0; - bpf_task_work_schedule_resume_impl(task, &work->tw, &lrumap, process_work, NULL); + + bpf_task_work_schedule_resume(task, &work->tw, &lrumap, process_work); + return 0; } diff --git a/tools/testing/selftests/bpf/progs/task_work_fail.c b/tools/testing/selftests/bpf/progs/task_work_fail.c index 1270953fd092..557bdf9eb0fc 100644 --- a/tools/testing/selftests/bpf/progs/task_work_fail.c +++ b/tools/testing/selftests/bpf/progs/task_work_fail.c @@ -53,7 +53,9 @@ int mismatch_map(struct pt_regs *args) work = bpf_map_lookup_elem(&arrmap, &key); if (!work) return 0; - bpf_task_work_schedule_resume_impl(task, &work->tw, &hmap, process_work, NULL); + + bpf_task_work_schedule_resume(task, &work->tw, &hmap, process_work); + return 0; } @@ -65,7 +67,9 @@ int no_map_task_work(struct pt_regs *args) struct bpf_task_work tw; task = bpf_get_current_task_btf(); - bpf_task_work_schedule_resume_impl(task, &tw, &hmap, process_work, NULL); + + bpf_task_work_schedule_resume(task, &tw, &hmap, process_work); + return 0; } @@ -76,7 +80,9 @@ int task_work_null(struct pt_regs *args) struct task_struct *task; task = bpf_get_current_task_btf(); - bpf_task_work_schedule_resume_impl(task, NULL, &hmap, process_work, NULL); + + bpf_task_work_schedule_resume(task, NULL, &hmap, process_work); + return 0; } @@ -91,6 +97,8 @@ int map_null(struct pt_regs *args) work = bpf_map_lookup_elem(&arrmap, &key); if (!work) return 0; - bpf_task_work_schedule_resume_impl(task, &work->tw, NULL, process_work, NULL); + + bpf_task_work_schedule_resume(task, &work->tw, NULL, process_work); + return 0; } diff --git a/tools/testing/selftests/bpf/progs/task_work_stress.c b/tools/testing/selftests/bpf/progs/task_work_stress.c index 55e555f7f41b..0cba36569714 100644 --- a/tools/testing/selftests/bpf/progs/task_work_stress.c +++ b/tools/testing/selftests/bpf/progs/task_work_stress.c @@ -51,8 +51,9 @@ int schedule_task_work(void *ctx) if (!work) return 0; } - err = bpf_task_work_schedule_signal_impl(bpf_get_current_task_btf(), &work->tw, &hmap, - process_work, NULL); + err = bpf_task_work_schedule_signal(bpf_get_current_task_btf(), &work->tw, &hmap, + process_work); + if (err) __sync_fetch_and_add(&schedule_error, 1); else diff --git a/tools/testing/selftests/bpf/progs/verifier_async_cb_context.c b/tools/testing/selftests/bpf/progs/verifier_async_cb_context.c index 7efa9521105e..6c50aff03baa 100644 --- a/tools/testing/selftests/bpf/progs/verifier_async_cb_context.c +++ b/tools/testing/selftests/bpf/progs/verifier_async_cb_context.c @@ -156,7 +156,8 @@ int task_work_non_sleepable_prog(void *ctx) if (!task) return 0; - bpf_task_work_schedule_resume_impl(task, &val->tw, &task_work_map, task_work_cb, NULL); + bpf_task_work_schedule_resume(task, &val->tw, &task_work_map, task_work_cb); + return 0; } @@ -176,6 +177,7 @@ int task_work_sleepable_prog(void *ctx) if (!task) return 0; - bpf_task_work_schedule_resume_impl(task, &val->tw, &task_work_map, task_work_cb, NULL); + bpf_task_work_schedule_resume(task, &val->tw, &task_work_map, task_work_cb); + return 0; } -- 2.52.0 Implement bpf_stream_vprintk with an implicit bpf_prog_aux argument, and change bpf_stream_vprintk_impl to call the new kfunc. Update the selftests to use the new API with implicit argument. bpf_stream_vprintk macro is changed to use the new bpf_stream_vprintk kfunc, and the extern definition of bpf_stream_vprintk_impl is replaced accordingly. Signed-off-by: Ihor Solodrai --- kernel/bpf/helpers.c | 1 + kernel/bpf/stream.c | 11 ++++++++--- tools/lib/bpf/bpf_helpers.h | 6 +++--- tools/testing/selftests/bpf/progs/stream_fail.c | 6 +++--- 4 files changed, 15 insertions(+), 9 deletions(-) diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 2e01973f2c18..b35b78c08583 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -4558,6 +4558,7 @@ BTF_ID_FLAGS(func, bpf_strncasestr); #if defined(CONFIG_BPF_LSM) && defined(CONFIG_CGROUPS) BTF_ID_FLAGS(func, bpf_cgroup_read_xattr, KF_RCU) #endif +BTF_ID_FLAGS(func, bpf_stream_vprintk, KF_IMPLICIT_ARGS) BTF_ID_FLAGS(func, bpf_stream_vprintk_impl) BTF_ID_FLAGS(func, bpf_task_work_schedule_signal, KF_IMPLICIT_ARGS) BTF_ID_FLAGS(func, bpf_task_work_schedule_signal_impl) diff --git a/kernel/bpf/stream.c b/kernel/bpf/stream.c index 0b6bc3f30335..4e4713d9c771 100644 --- a/kernel/bpf/stream.c +++ b/kernel/bpf/stream.c @@ -212,14 +212,13 @@ __bpf_kfunc_start_defs(); * Avoid using enum bpf_stream_id so that kfunc users don't have to pull in the * enum in headers. */ -__bpf_kfunc int bpf_stream_vprintk_impl(int stream_id, const char *fmt__str, const void *args, - u32 len__sz, void *aux__prog) +__bpf_kfunc int bpf_stream_vprintk(int stream_id, const char *fmt__str, const void *args, + u32 len__sz, struct bpf_prog_aux *aux) { struct bpf_bprintf_data data = { .get_bin_args = true, .get_buf = true, }; - struct bpf_prog_aux *aux = aux__prog; u32 fmt_size = strlen(fmt__str) + 1; struct bpf_stream *stream; u32 data_len = len__sz; @@ -246,6 +245,12 @@ __bpf_kfunc int bpf_stream_vprintk_impl(int stream_id, const char *fmt__str, con return ret; } +__bpf_kfunc int bpf_stream_vprintk_impl(int stream_id, const char *fmt__str, const void *args, + u32 len__sz, void *aux__prog) +{ + return bpf_stream_vprintk(stream_id, fmt__str, args, len__sz, aux__prog); +} + __bpf_kfunc_end_defs(); /* Added kfunc to common_btf_ids */ diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h index d4e4e388e625..c145da05a67c 100644 --- a/tools/lib/bpf/bpf_helpers.h +++ b/tools/lib/bpf/bpf_helpers.h @@ -315,8 +315,8 @@ enum libbpf_tristate { ___param, sizeof(___param)); \ }) -extern int bpf_stream_vprintk_impl(int stream_id, const char *fmt__str, const void *args, - __u32 len__sz, void *aux__prog) __weak __ksym; +extern int bpf_stream_vprintk(int stream_id, const char *fmt__str, const void *args, + __u32 len__sz) __weak __ksym; #define bpf_stream_printk(stream_id, fmt, args...) \ ({ \ @@ -328,7 +328,7 @@ extern int bpf_stream_vprintk_impl(int stream_id, const char *fmt__str, const vo ___bpf_fill(___param, args); \ _Pragma("GCC diagnostic pop") \ \ - bpf_stream_vprintk_impl(stream_id, ___fmt, ___param, sizeof(___param), NULL); \ + bpf_stream_vprintk(stream_id, ___fmt, ___param, sizeof(___param)); \ }) /* Use __bpf_printk when bpf_printk call has 3 or fewer fmt args diff --git a/tools/testing/selftests/bpf/progs/stream_fail.c b/tools/testing/selftests/bpf/progs/stream_fail.c index 3662515f0107..8e8249f3521c 100644 --- a/tools/testing/selftests/bpf/progs/stream_fail.c +++ b/tools/testing/selftests/bpf/progs/stream_fail.c @@ -10,7 +10,7 @@ SEC("syscall") __failure __msg("Possibly NULL pointer passed") int stream_vprintk_null_arg(void *ctx) { - bpf_stream_vprintk_impl(BPF_STDOUT, "", NULL, 0, NULL); + bpf_stream_vprintk(BPF_STDOUT, "", NULL, 0); return 0; } @@ -18,7 +18,7 @@ SEC("syscall") __failure __msg("R3 type=scalar expected=") int stream_vprintk_scalar_arg(void *ctx) { - bpf_stream_vprintk_impl(BPF_STDOUT, "", (void *)46, 0, NULL); + bpf_stream_vprintk(BPF_STDOUT, "", (void *)46, 0); return 0; } @@ -26,7 +26,7 @@ SEC("syscall") __failure __msg("arg#1 doesn't point to a const string") int stream_vprintk_string_arg(void *ctx) { - bpf_stream_vprintk_impl(BPF_STDOUT, ctx, NULL, 0, NULL); + bpf_stream_vprintk(BPF_STDOUT, ctx, NULL, 0); return 0; } -- 2.52.0 Add sections explaining KF_IMPLICIT_ARGS kfunc flag. Mark __prog annotation as deprecated. Signed-off-by: Ihor Solodrai --- Documentation/bpf/kfuncs.rst | 44 +++++++++++++++++++++++++++++++++++- 1 file changed, 43 insertions(+), 1 deletion(-) diff --git a/Documentation/bpf/kfuncs.rst b/Documentation/bpf/kfuncs.rst index 3eb59a8f9f34..b849598271d2 100644 --- a/Documentation/bpf/kfuncs.rst +++ b/Documentation/bpf/kfuncs.rst @@ -232,7 +232,7 @@ Or:: ... } -2.3.6 __prog Annotation +2.3.6 __prog Annotation (deprecated, use KF_IMPLICIT_ARGS instead) --------------------------- This annotation is used to indicate that the argument needs to be fixed up to the bpf_prog_aux of the caller BPF program. Any value passed into this argument @@ -381,6 +381,48 @@ encouraged to make their use-cases known as early as possible, and participate in upstream discussions regarding whether to keep, change, deprecate, or remove those kfuncs if and when such discussions occur. +2.5.9 KF_IMPLICIT_ARGS flag +------------------------------------ + +The KF_IMPLICIT_ARGS flag is used to indicate that the BPF signature +of the kfunc is different from it's kernel signature, and the values +for implicit arguments are provided at load time by the verifier. + +Only arguments of specific types are implicit. +Currently only ``struct bpf_prog_aux *`` type is supported. + +A kfunc with KF_IMPLICIT_ARGS flag therefore has two types in BTF: one +function matching the kernel declaration (with _impl suffix in the +name by convention), and another matching the intended BPF API. + +Verifier only allows calls to the non-_impl version of a kfunc, that +uses a signature without the implicit arguments. + +Example declaration: + +.. code-block:: c + + __bpf_kfunc int bpf_task_work_schedule_signal(struct task_struct *task, struct bpf_task_work *tw, + void *map__map, bpf_task_work_callback_t callback, + struct bpf_prog_aux *aux) { ... } + +Example usage in BPF program: + +.. code-block:: c + + /* note that the last argument is omitted */ + bpf_task_work_schedule_signal(task, &work->tw, &arrmap, task_work_callback); + +An exception to this are kfuncs that use __prog argument, and were +implemented before KF_IMPLICIT_ARGS mechanism was introduced: + * bpf_stream_vprintk_impl + * bpf_task_work_schedule_resume_impl + * bpf_task_work_schedule_signal_impl + * bpf_wq_set_callback_impl + +These are allowed for backwards compatibility, however BPF programs +should use newer API that omits implicit arguments in BPF. + 2.6 Registering the kfuncs -------------------------- -- 2.52.0