From: Xu Kuohai JIT for single-subprog programs is done after the verification stage. This prevents the JIT stage from accessing the verifier's internal datas, like env->insn_aux_data. So move it to the verifier. After the movement, all bpf progs loaded with bpf_prog_load() are JITed in the verifier. The JIT in bpf_prog_select_runtime() is preserved for bpf_migrate_filter() and test cases. Signed-off-by: Xu Kuohai --- include/linux/filter.h | 2 ++ kernel/bpf/core.c | 51 +++++++++++++++++++++++++++--------------- kernel/bpf/syscall.c | 2 +- kernel/bpf/verifier.c | 7 +++++- 4 files changed, 42 insertions(+), 20 deletions(-) diff --git a/include/linux/filter.h b/include/linux/filter.h index 44d7ae95ddbc..632c03e126d9 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -1108,6 +1108,8 @@ static inline int sk_filter_reason(struct sock *sk, struct sk_buff *skb, return sk_filter_trim_cap(sk, skb, 1, reason); } +struct bpf_prog *bpf_prog_select_jit(struct bpf_prog *fp, int *err); +struct bpf_prog *__bpf_prog_select_runtime(struct bpf_prog *fp, bool jit_attempted, int *err); struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err); void bpf_prog_free(struct bpf_prog *fp); diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 229c74f3d6ae..00be578a438d 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -2505,18 +2505,18 @@ static bool bpf_prog_select_interpreter(struct bpf_prog *fp) return select_interpreter; } -/** - * bpf_prog_select_runtime - select exec runtime for BPF program - * @fp: bpf_prog populated with BPF program - * @err: pointer to error variable - * - * Try to JIT eBPF program, if JIT is not available, use interpreter. - * The BPF program will be executed via bpf_prog_run() function. - * - * Return: the &fp argument along with &err set to 0 for success or - * a negative errno code on failure - */ -struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err) +struct bpf_prog *bpf_prog_select_jit(struct bpf_prog *fp, int *err) +{ + *err = bpf_prog_alloc_jited_linfo(fp); + if (*err) + return fp; + + fp = bpf_int_jit_compile(fp); + bpf_prog_jit_attempt_done(fp); + return fp; +} + +struct bpf_prog *__bpf_prog_select_runtime(struct bpf_prog *fp, bool jit_attempted, int *err) { /* In case of BPF to BPF calls, verifier did all the prep * work with regards to JITing, etc. @@ -2540,12 +2540,11 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err) * be JITed, but falls back to the interpreter. */ if (!bpf_prog_is_offloaded(fp->aux)) { - *err = bpf_prog_alloc_jited_linfo(fp); - if (*err) - return fp; - - fp = bpf_int_jit_compile(fp); - bpf_prog_jit_attempt_done(fp); + if (!jit_attempted) { + fp = bpf_prog_select_jit(fp, err); + if (*err) + return fp; + } if (!fp->jited && jit_needed) { *err = -ENOTSUPP; return fp; @@ -2570,6 +2569,22 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err) return fp; } + +/** + * bpf_prog_select_runtime - select exec runtime for BPF program + * @fp: bpf_prog populated with BPF program + * @err: pointer to error variable + * + * Try to JIT eBPF program, if JIT is not available, use interpreter. + * The BPF program will be executed via bpf_prog_run() function. + * + * Return: the &fp argument along with &err set to 0 for success or + * a negative errno code on failure + */ +struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err) +{ + return __bpf_prog_select_runtime(fp, false, err); +} EXPORT_SYMBOL_GPL(bpf_prog_select_runtime); static unsigned int __bpf_prog_ret1(const void *ctx, diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 274039e36465..d6982107ba80 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -3090,7 +3090,7 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size) if (err < 0) goto free_used_maps; - prog = bpf_prog_select_runtime(prog, &err); + prog = __bpf_prog_select_runtime(prog, true, &err); if (err < 0) goto free_used_maps; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index fc4ccd1de569..ab2bc0850770 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -26086,6 +26086,11 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3 convert_pseudo_ld_imm64(env); } + /* constants blinding in the JIT may increase prog->len */ + len = env->prog->len; + if (env->subprog_cnt == 1) + env->prog = bpf_prog_select_jit(env->prog, &ret); + adjust_btf_func(env); err_release_maps: @@ -26111,7 +26116,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3 err_unlock: if (!is_priv) mutex_unlock(&bpf_verifier_lock); - clear_insn_aux_data(env, 0, env->prog->len); + clear_insn_aux_data(env, 0, len); vfree(env->insn_aux_data); err_free_env: bpf_stack_liveness_free(env); -- 2.47.3