Prepare to remove support for calling kfuncs from prologue & epilogue. Instead allow direct helpers calls using BPF_EMIT_CALL. Such calls already contain helper offset relative to __bpf_call_base and must bypass the verifier's patch_call_imm fixup, which expects BPF helper IDs rather than a pre-resolved offsets. Add a finalized_call flag to bpf_insn_aux_data to mark call instructions with resolved offsets so the verifier can skip patch_call_imm fixup for these calls. Note that the target of BPF_EMIT_CALL should be wrapped with BPF_CALL_x to prevent an ABI mismatch between BPF and C on 32-bit architectures. Suggested-by: Alexei Starovoitov Signed-off-by: Jakub Sitnicki --- include/linux/bpf_verifier.h | 1 + kernel/bpf/verifier.c | 29 ++++++++++++++++++++++++++--- net/core/filter.c | 3 +-- 3 files changed, 28 insertions(+), 5 deletions(-) diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 8355b585cd18..1ccbb47aca2d 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -561,6 +561,7 @@ struct bpf_insn_aux_data { bool non_sleepable; /* helper/kfunc may be called from non-sleepable context */ bool is_iter_next; /* bpf_iter__next() kfunc call */ bool call_with_percpu_alloc_ptr; /* {this,per}_cpu_ptr() with prog percpu alloc */ + bool finalized_call; /* call holds resolved helper offset relative to __bpf_call_base */ u8 alu_state; /* used in combination with alu_limit */ /* true if STX or LDX instruction is a part of a spill/fill * pattern for a bpf_fastcall call. diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index c7f5234d5fd2..0b4feedc14c9 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -22201,6 +22201,19 @@ static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env, return 0; } +/* Mark helper calls within prog->insns[off ... off+cnt-1] range as resolved, + * meaning imm contains the helper offset. Used for prologue & epilogue. + */ +static void mark_helper_calls_finalized(struct bpf_verifier_env *env, int off, int cnt) +{ + int i; + + for (i = 0; i < cnt; i++) { + if (bpf_helper_call(&env->prog->insnsi[i + off])) + env->insn_aux_data[i + off].finalized_call = true; + } +} + /* convert load instructions that access fields of a context type into a * sequence of instructions that access fields of the underlying structure: * struct __sk_buff -> struct sk_buff @@ -22267,6 +22280,8 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) ret = add_kfunc_in_insns(env, insn_buf, cnt - 1); if (ret < 0) return ret; + + mark_helper_calls_finalized(env, 0, cnt - 1); } } @@ -22280,6 +22295,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) for (i = 0; i < insn_cnt; i++, insn++) { bpf_convert_ctx_access_t convert_ctx_access; + bool finalize_helper_calls = false; u8 mode; if (env->insn_aux_data[i + delta].nospec) { @@ -22346,6 +22362,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) * epilogue. */ epilogue_idx = i + delta; + finalize_helper_calls = true; } goto patch_insn_buf; } else { @@ -22495,12 +22512,14 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); if (!new_prog) return -ENOMEM; + env->prog = new_prog; - delta += cnt - 1; + if (finalize_helper_calls) + mark_helper_calls_finalized(env, i + delta, cnt - 1); /* keep walking new program and skip insns we just inserted */ - env->prog = new_prog; - insn = new_prog->insnsi + i + delta; + delta += cnt - 1; + insn = new_prog->insnsi + i + delta; } return 0; @@ -23909,6 +23928,9 @@ static int do_misc_fixups(struct bpf_verifier_env *env) goto next_insn; } patch_call_imm: + if (env->insn_aux_data[i + delta].finalized_call) + goto next_insn; + fn = env->ops->get_func_proto(insn->imm, env->prog); /* all functions that have prototype and verifier allowed * programs to call them, must be real in-kernel functions @@ -23920,6 +23942,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env) return -EFAULT; } insn->imm = fn->func - __bpf_call_base; + env->insn_aux_data[i + delta].finalized_call = true; next_insn: if (subprogs[cur_subprog + 1].start == i + delta + 1) { subprogs[cur_subprog].stack_depth += stack_depth_extra; diff --git a/net/core/filter.c b/net/core/filter.c index d14401193b01..cb39388f69a9 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -9082,8 +9082,7 @@ static int bpf_unclone_prologue(struct bpf_insn *insn_buf, bool direct_write, /* ret = bpf_skb_pull_data(skb, 0); */ *insn++ = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1); *insn++ = BPF_ALU64_REG(BPF_XOR, BPF_REG_2, BPF_REG_2); - *insn++ = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_skb_pull_data); + *insn++ = BPF_EMIT_CALL(bpf_skb_pull_data); /* if (!ret) * goto restore; * return TC_ACT_SHOT; -- 2.43.0