From: Xu Kuohai When BTI is enabled, the indirect jump selftest triggers BTI exception: Internal error: Oops - BTI: 0000000036000003 [#1] SMP ... Call trace: bpf_prog_2e5f1c71c13ac3e0_big_jump_table+0x54/0xf8 (P) bpf_prog_run_pin_on_cpu+0x140/0x468 bpf_prog_test_run_syscall+0x280/0x3b8 bpf_prog_test_run+0x22c/0x2c0 __sys_bpf+0x4d8/0x5c8 __arm64_sys_bpf+0x88/0xa8 invoke_syscall+0x80/0x220 el0_svc_common+0x160/0x1d0 do_el0_svc+0x54/0x70 el0_svc+0x54/0x188 el0t_64_sync_handler+0x84/0x130 el0t_64_sync+0x198/0x1a0 This happens because no BTI instruction is generated by the JIT for indirect jump targets. Fix it by emitting BTI instruction for every possible indirect jump targets when BTI is enabled. The targets are identified by traversing all instruction arrays used by the BPF program, since indirect jump targets can only be read from instruction arrays. Fixes: f4a66cf1cb14 ("bpf: arm64: Add support for indirect jumps") Signed-off-by: Xu Kuohai --- arch/arm64/net/bpf_jit_comp.c | 20 ++++++++++++++++ include/linux/bpf.h | 12 ++++++++++ kernel/bpf/bpf_insn_array.c | 43 +++++++++++++++++++++++++++++++++++ 3 files changed, 75 insertions(+) diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index 929123a5431a..f546df886049 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -78,6 +78,7 @@ static const int bpf2a64[] = { struct jit_ctx { const struct bpf_prog *prog; + unsigned long *indirect_targets; int idx; int epilogue_offset; int *offset; @@ -1199,6 +1200,11 @@ static int add_exception_handler(const struct bpf_insn *insn, return 0; } +static bool maybe_indirect_target(int insn_off, unsigned long *targets_bitmap) +{ + return targets_bitmap && test_bit(insn_off, targets_bitmap); +} + /* JITs an eBPF instruction. * Returns: * 0 - successfully JITed an 8-byte eBPF instruction. @@ -1231,6 +1237,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, int ret; bool sign_extend; + if (maybe_indirect_target(i, ctx->indirect_targets)) + emit_bti(A64_BTI_J, ctx); + switch (code) { /* dst = src */ case BPF_ALU | BPF_MOV | BPF_X: @@ -2085,6 +2094,16 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) memset(&ctx, 0, sizeof(ctx)); ctx.prog = prog; + if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && bpf_prog_has_insn_array(prog)) { + ctx.indirect_targets = kvcalloc(BITS_TO_LONGS(prog->len), sizeof(unsigned long), + GFP_KERNEL); + if (ctx.indirect_targets == NULL) { + prog = orig_prog; + goto out_off; + } + bpf_prog_collect_indirect_targets(prog, ctx.indirect_targets); + } + ctx.offset = kvcalloc(prog->len + 1, sizeof(int), GFP_KERNEL); if (ctx.offset == NULL) { prog = orig_prog; @@ -2248,6 +2267,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) prog->aux->priv_stack_ptr = NULL; } kvfree(ctx.offset); + kvfree(ctx.indirect_targets); out_priv_stack: kfree(jit_data); prog->aux->jit_data = NULL; diff --git a/include/linux/bpf.h b/include/linux/bpf.h index a9b788c7b4aa..c81eb54f7b26 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -3822,11 +3822,23 @@ void bpf_insn_array_adjust_after_remove(struct bpf_map *map, u32 off, u32 len); #ifdef CONFIG_BPF_SYSCALL void bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 *offsets, void *image); +void bpf_prog_collect_indirect_targets(const struct bpf_prog *prog, unsigned long *bitmap); +bool bpf_prog_has_insn_array(const struct bpf_prog *prog); #else static inline void bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 *offsets, void *image) { } + +static inline bool bpf_prog_has_insn_array(const struct bpf_prog *prog) +{ + return false; +} + +static inline void +bpf_prog_collect_indirect_targets(const struct bpf_prog *prog, unsigned long *bitmap) +{ +} #endif #endif /* _LINUX_BPF_H */ diff --git a/kernel/bpf/bpf_insn_array.c b/kernel/bpf/bpf_insn_array.c index 61ce52882632..ed20b186a1f5 100644 --- a/kernel/bpf/bpf_insn_array.c +++ b/kernel/bpf/bpf_insn_array.c @@ -299,3 +299,46 @@ void bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 *offsets, void *image) } } } + +bool bpf_prog_has_insn_array(const struct bpf_prog *prog) +{ + int i; + + for (i = 0; i < prog->aux->used_map_cnt; i++) { + if (is_insn_array(prog->aux->used_maps[i])) + return true; + } + return false; +} + +/* + * This function collects possible indirect jump targets in a BPF program. Since indirect jump + * targets can only be read from instruction arrays, it traverses all instruction arrays used + * by @prog. For each instruction in the arrays, it sets the corresponding bit in @bitmap. + */ +void bpf_prog_collect_indirect_targets(const struct bpf_prog *prog, unsigned long *bitmap) +{ + struct bpf_insn_array *insn_array; + struct bpf_map *map; + u32 xlated_off; + int i, j; + + for (i = 0; i < prog->aux->used_map_cnt; i++) { + map = prog->aux->used_maps[i]; + if (!is_insn_array(map)) + continue; + + insn_array = cast_insn_array(map); + for (j = 0; j < map->max_entries; j++) { + xlated_off = insn_array->values[j].xlated_off; + if (xlated_off == INSN_DELETED) + continue; + if (xlated_off < prog->aux->subprog_start) + continue; + xlated_off -= prog->aux->subprog_start; + if (xlated_off >= prog->len) + continue; + __set_bit(xlated_off, bitmap); + } + } +} -- 2.47.3