In order to prepare to emit KASAN checks in JITed programs, JIT compilers need to be aware about whether some load/store instructions are targeting the bpf program stack, as those should not be monitored (we already have guard pages for that, and it is difficult anyway to correctly monitor any kind of data passed on stack). To support this need, make the BPF verifier mark the instructions that access program stack: - add a setter that allows the verifier to mark instructions accessing the program stack - add a getter that allows JIT compilers to check whether instructions being JITed are accessing the stack Signed-off-by: Alexis Lothoré (eBPF Foundation) --- include/linux/bpf.h | 2 ++ include/linux/bpf_verifier.h | 2 ++ kernel/bpf/core.c | 10 ++++++++++ kernel/bpf/verifier.c | 7 +++++++ 4 files changed, 21 insertions(+) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index b4b703c90ca9..774a0395c498 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1543,6 +1543,8 @@ void bpf_jit_uncharge_modmem(u32 size); bool bpf_prog_has_trampoline(const struct bpf_prog *prog); bool bpf_insn_is_indirect_target(const struct bpf_verifier_env *env, const struct bpf_prog *prog, int insn_idx); +bool bpf_insn_accesses_stack(const struct bpf_verifier_env *env, + const struct bpf_prog *prog, int insn_idx); #else static inline int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr, diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index b148f816f25b..ab99ed4c4227 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -660,6 +660,8 @@ struct bpf_insn_aux_data { u16 const_reg_map_mask; u16 const_reg_subprog_mask; u32 const_reg_vals[10]; + /* instruction accesses stack */ + bool accesses_stack; }; #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 8b018ff48875..340abfdadbed 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -1582,6 +1582,16 @@ bool bpf_insn_is_indirect_target(const struct bpf_verifier_env *env, const struc insn_idx += prog->aux->subprog_start; return env->insn_aux_data[insn_idx].indirect_target; } + +bool bpf_insn_accesses_stack(const struct bpf_verifier_env *env, + const struct bpf_prog *prog, int insn_idx) +{ + if (!env) + return false; + insn_idx += prog->aux->subprog_start; + return env->insn_aux_data[insn_idx].accesses_stack; +} + #endif /* CONFIG_BPF_JIT */ /* Base function for offset calculation. Needs to go into .text section, diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 1e36b9e91277..7bce4fb4e540 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -3502,6 +3502,11 @@ static void mark_indirect_target(struct bpf_verifier_env *env, int idx) env->insn_aux_data[idx].indirect_target = true; } +static void mark_insn_accesses_stack(struct bpf_verifier_env *env, int idx) +{ + env->insn_aux_data[idx].accesses_stack = true; +} + #define LR_FRAMENO_BITS 3 #define LR_SPI_BITS 6 #define LR_ENTRY_BITS (LR_SPI_BITS + LR_FRAMENO_BITS + 1) @@ -6490,6 +6495,8 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn else err = check_stack_write(env, regno, off, size, value_regno, insn_idx); + + mark_insn_accesses_stack(env, insn_idx); } else if (reg_is_pkt_pointer(reg)) { if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) { verbose(env, "cannot write into packet\n"); -- 2.53.0