From: Abhishek Dubey In the conventional stack frame, the position of tail_call_cnt is after the NVR save area (BPF_PPC_STACK_SAVE). Whereas, the offset of tail_call_cnt in the trampoline frame is after the stack alignment padding. BPF JIT logic could become complex when dealing with frame-sensitive offset calculation of tail_call_cnt. Having the same offset in both frames is the desired objective. The trampoline frame does not have a BPF_PPC_STACK_SAVE area. Introducing it leads to under-utilization of extra memory meant only for the offset alignment of tail_call_cnt. Another challenge is the variable alignment padding sitting at the bottom of the trampoline frame, which requires additional handling to compute tail_call_cnt offset. This patch addresses the above issues by moving tail_call_cnt to the bottom of the stack frame at offset 0 for both types of frames. This saves additional bytes required by BPF_PPC_STACK_SAVE in trampoline frame, and a common offset computation for tail_call_cnt serves both frames. The changes in this patch are required by the third patch in the series, where the 'reference to tail_call_info' of the main frame is copied into the trampoline frame from the previous frame. Signed-off-by: Abhishek Dubey --- arch/powerpc/net/bpf_jit.h | 4 ++++ arch/powerpc/net/bpf_jit_comp64.c | 31 ++++++++++++++++++++----------- 2 files changed, 24 insertions(+), 11 deletions(-) diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h index 8334cd667bba..45d419c0ee73 100644 --- a/arch/powerpc/net/bpf_jit.h +++ b/arch/powerpc/net/bpf_jit.h @@ -72,6 +72,10 @@ } } while (0) #ifdef CONFIG_PPC64 + +/* for tailcall counter */ +#define BPF_PPC_TAILCALL 8 + /* If dummy pass (!image), account for maximum possible instructions */ #define PPC_LI64(d, i) do { \ if (!image) \ diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index 1fe37128c876..39061cd742c1 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -20,13 +20,15 @@ #include "bpf_jit.h" /* - * Stack layout: + * Stack layout 1: + * Layout when setting up our own stack frame. + * Note: r1 at bottom, component offsets positive wrt r1. * Ensure the top half (upto local_tmp_var) stays consistent * with our redzone usage. * * [ prev sp ] <------------- - * [ nv gpr save area ] 6*8 | * [ tail_call_cnt ] 8 | + * [ nv gpr save area ] 6*8 | * [ local_tmp_var ] 24 | * fp (r31) --> [ ebpf stack space ] upto 512 | * [ frame header ] 32/112 | @@ -36,10 +38,12 @@ /* for gpr non volatile registers BPG_REG_6 to 10 */ #define BPF_PPC_STACK_SAVE (6*8) /* for bpf JIT code internal usage */ -#define BPF_PPC_STACK_LOCALS 32 +#define BPF_PPC_STACK_LOCALS 24 /* stack frame excluding BPF stack, ensure this is quadword aligned */ #define BPF_PPC_STACKFRAME (STACK_FRAME_MIN_SIZE + \ - BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE) + BPF_PPC_STACK_LOCALS + \ + BPF_PPC_STACK_SAVE + \ + BPF_PPC_TAILCALL) /* BPF register usage */ #define TMP_REG_1 (MAX_BPF_JIT_REG + 0) @@ -87,27 +91,32 @@ static inline bool bpf_has_stack_frame(struct codegen_context *ctx) } /* + * Stack layout 2: * When not setting up our own stackframe, the redzone (288 bytes) usage is: + * Note: r1 from prev frame. Component offset negative wrt r1. * * [ prev sp ] <------------- * [ ... ] | * sp (r1) ---> [ stack pointer ] -------------- - * [ nv gpr save area ] 6*8 * [ tail_call_cnt ] 8 + * [ nv gpr save area ] 6*8 * [ local_tmp_var ] 24 * [ unused red zone ] 224 */ static int bpf_jit_stack_local(struct codegen_context *ctx) { - if (bpf_has_stack_frame(ctx)) + if (bpf_has_stack_frame(ctx)) { + /* Stack layout 1 */ return STACK_FRAME_MIN_SIZE + ctx->stack_size; - else - return -(BPF_PPC_STACK_SAVE + 32); + } else { + /* Stack layout 2 */ + return -(BPF_PPC_TAILCALL + BPF_PPC_STACK_SAVE + BPF_PPC_STACK_LOCALS); + } } static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx) { - return bpf_jit_stack_local(ctx) + 24; + return bpf_jit_stack_local(ctx) + BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE; } static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg) @@ -115,7 +124,7 @@ static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg) if (reg >= BPF_PPC_NVR_MIN && reg < 32) return (bpf_has_stack_frame(ctx) ? (BPF_PPC_STACKFRAME + ctx->stack_size) : 0) - - (8 * (32 - reg)); + - (8 * (32 - reg)) - BPF_PPC_TAILCALL; pr_err("BPF JIT is asking about unknown registers"); BUG(); @@ -145,7 +154,7 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx) if (ctx->seen & SEEN_TAILCALL) { EMIT(PPC_RAW_LI(bpf_to_ppc(TMP_REG_1), 0)); /* this goes in the redzone */ - EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, -(BPF_PPC_STACK_SAVE + 8))); + EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, -(BPF_PPC_TAILCALL))); } else { EMIT(PPC_RAW_NOP()); EMIT(PPC_RAW_NOP()); -- 2.48.1 From: Abhishek Dubey Enabling tailcalls with subprog combinations by referencing method. The actual tailcall count is always maintained in the tail_call_info variable present in the frame of main function (also called entry function). The tail_call_info variables in the frames of all other subprog contains reference to the tail_call_info present in frame of main function. Dynamic resolution interprets the tail_call_info either as value or reference depending on the context of active frame while tailcall is invoked. Signed-off-by: Abhishek Dubey --- arch/powerpc/net/bpf_jit.h | 12 +++++- arch/powerpc/net/bpf_jit_comp.c | 10 ++++- arch/powerpc/net/bpf_jit_comp64.c | 68 +++++++++++++++++++++++-------- 3 files changed, 70 insertions(+), 20 deletions(-) diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h index 45d419c0ee73..5d735bc5e6bd 100644 --- a/arch/powerpc/net/bpf_jit.h +++ b/arch/powerpc/net/bpf_jit.h @@ -51,6 +51,12 @@ EMIT(PPC_INST_BRANCH_COND | (((cond) & 0x3ff) << 16) | (offset & 0xfffc)); \ } while (0) +/* Same as PPC_BCC_SHORT, except valid dest is known prior to call. */ +#define PPC_COND_BRANCH(cond, dest) \ + do { \ + long offset = (long)(dest) - CTX_NIA(ctx); \ + EMIT(PPC_INST_BRANCH_COND | (((cond) & 0x3ff) << 16) | (offset & 0xfffc)); \ + } while (0) /* * Sign-extended 32-bit immediate load * @@ -75,6 +81,8 @@ /* for tailcall counter */ #define BPF_PPC_TAILCALL 8 +/* for gpr non volatile registers BPG_REG_6 to 10 */ +#define BPF_PPC_STACK_SAVE (6*8) /* If dummy pass (!image), account for maximum possible instructions */ #define PPC_LI64(d, i) do { \ @@ -170,6 +178,7 @@ struct codegen_context { unsigned int alt_exit_addr; u64 arena_vm_start; u64 user_vm_start; + bool is_subprog; }; #define bpf_to_ppc(r) (ctx->b2p[r]) @@ -204,11 +213,10 @@ void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx); void bpf_jit_build_fentry_stubs(u32 *image, struct codegen_context *ctx); void bpf_jit_realloc_regs(struct codegen_context *ctx); int bpf_jit_emit_exit_insn(u32 *image, struct codegen_context *ctx, int tmp_reg, long exit_addr); - int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, u32 *fimage, int pass, struct codegen_context *ctx, int insn_idx, int jmp_off, int dst_reg, u32 code); - +int bpf_jit_stack_tailcallinfo_offset(struct codegen_context *ctx); #endif #endif diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index 5e976730b2f5..069a8822c30d 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -206,6 +206,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) cgctx.stack_size = round_up(fp->aux->stack_depth, 16); cgctx.arena_vm_start = bpf_arena_get_kern_vm_start(fp->aux->arena); cgctx.user_vm_start = bpf_arena_get_user_vm_start(fp->aux->arena); + cgctx.is_subprog = bpf_is_subprog(fp); /* Scouting faux-generate pass 0 */ if (bpf_jit_build_body(fp, NULL, NULL, &cgctx, addrs, 0, false)) { @@ -435,6 +436,11 @@ void bpf_jit_free(struct bpf_prog *fp) bpf_prog_unlock_free(fp); } +bool bpf_jit_supports_subprog_tailcalls(void) +{ + return IS_ENABLED(CONFIG_PPC64); +} + bool bpf_jit_supports_kfunc_call(void) { return true; @@ -604,7 +610,7 @@ static void bpf_trampoline_setup_tail_call_cnt(u32 *image, struct codegen_contex int func_frame_offset, int r4_off) { if (IS_ENABLED(CONFIG_PPC64)) { - /* See bpf_jit_stack_tailcallcnt() */ + /* See bpf_jit_stack_tailcallinfo_offset() */ int tailcallcnt_offset = 7 * 8; EMIT(PPC_RAW_LL(_R3, _R1, func_frame_offset - tailcallcnt_offset)); @@ -619,7 +625,7 @@ static void bpf_trampoline_restore_tail_call_cnt(u32 *image, struct codegen_cont int func_frame_offset, int r4_off) { if (IS_ENABLED(CONFIG_PPC64)) { - /* See bpf_jit_stack_tailcallcnt() */ + /* See bpf_jit_stack_tailcallinfo_offset() */ int tailcallcnt_offset = 7 * 8; EMIT(PPC_RAW_LL(_R3, _R1, -tailcallcnt_offset)); diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index 39061cd742c1..cebf81fbd59f 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -26,8 +26,12 @@ * Ensure the top half (upto local_tmp_var) stays consistent * with our redzone usage. * + * tail_call_info - stores tailcall count value in main program's + * frame, stores reference to tail_call_info of + * main's frame in sub-prog's frame. + * * [ prev sp ] <------------- - * [ tail_call_cnt ] 8 | + * [ tail_call_info ] 8 | * [ nv gpr save area ] 6*8 | * [ local_tmp_var ] 24 | * fp (r31) --> [ ebpf stack space ] upto 512 | @@ -35,8 +39,6 @@ * sp (r1) ---> [ stack pointer ] -------------- */ -/* for gpr non volatile registers BPG_REG_6 to 10 */ -#define BPF_PPC_STACK_SAVE (6*8) /* for bpf JIT code internal usage */ #define BPF_PPC_STACK_LOCALS 24 /* stack frame excluding BPF stack, ensure this is quadword aligned */ @@ -98,7 +100,7 @@ static inline bool bpf_has_stack_frame(struct codegen_context *ctx) * [ prev sp ] <------------- * [ ... ] | * sp (r1) ---> [ stack pointer ] -------------- - * [ tail_call_cnt ] 8 + * [ tail_call_info ] 8 * [ nv gpr save area ] 6*8 * [ local_tmp_var ] 24 * [ unused red zone ] 224 @@ -114,7 +116,7 @@ static int bpf_jit_stack_local(struct codegen_context *ctx) } } -static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx) +int bpf_jit_stack_tailcallinfo_offset(struct codegen_context *ctx) { return bpf_jit_stack_local(ctx) + BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE; } @@ -147,17 +149,32 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx) #endif /* - * Initialize tail_call_cnt if we do tail calls. - * Otherwise, put in NOPs so that it can be skipped when we are - * invoked through a tail call. + * Tail call count(tcc) is saved & updated only in main + * program's frame and the address of tcc in main program's + * frame (tcc_ptr) is saved in subprogs frame. + * + * Offset of tail_call_info on any frame will be interpreted + * as either tcc_ptr or tcc value depending on whether it is + * greater than MAX_TAIL_CALL_CNT or not. */ - if (ctx->seen & SEEN_TAILCALL) { + if (!ctx->is_subprog) { EMIT(PPC_RAW_LI(bpf_to_ppc(TMP_REG_1), 0)); /* this goes in the redzone */ EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, -(BPF_PPC_TAILCALL))); } else { - EMIT(PPC_RAW_NOP()); - EMIT(PPC_RAW_NOP()); + /* + * if tail_call_info < MAX_TAIL_CALL_CNT + * main prog calling first subprog -> copy reference + * else + * subsequent subprog calling another subprog -> directly copy content + */ + EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_2), _R1, 0)); + EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_2), -(BPF_PPC_TAILCALL))); + EMIT(PPC_RAW_CMPLWI(bpf_to_ppc(TMP_REG_1), MAX_TAIL_CALL_CNT)); + PPC_COND_BRANCH(COND_GT, CTX_NIA(ctx) + 8); + EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_2), + -(BPF_PPC_TAILCALL))); + EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, -(BPF_PPC_TAILCALL))); } if (bpf_has_stack_frame(ctx)) { @@ -352,19 +369,38 @@ static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 o EMIT(PPC_RAW_CMPLW(b2p_index, bpf_to_ppc(TMP_REG_1))); PPC_BCC_SHORT(COND_GE, out); + EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallinfo_offset(ctx))); + EMIT(PPC_RAW_CMPLWI(bpf_to_ppc(TMP_REG_1), MAX_TAIL_CALL_CNT)); + PPC_COND_BRANCH(COND_LE, CTX_NIA(ctx) + 8); + + /* dereference TMP_REG_1 */ + EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), 0)); + /* - * if (tail_call_cnt >= MAX_TAIL_CALL_CNT) + * if (tail_call_info == MAX_TAIL_CALL_CNT) * goto out; */ - EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx))); EMIT(PPC_RAW_CMPLWI(bpf_to_ppc(TMP_REG_1), MAX_TAIL_CALL_CNT)); - PPC_BCC_SHORT(COND_GE, out); + PPC_COND_BRANCH(COND_EQ, out); /* - * tail_call_cnt++; + * tail_call_info++; <- Actual value of tcc here */ EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), 1)); - EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx))); + + /* + * Before writing updated tail_call_info, distinguish if current frame + * is storing a reference to tail_call_info or actual tcc value in + * tail_call_info. + */ + EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_2), _R1, bpf_jit_stack_tailcallinfo_offset(ctx))); + EMIT(PPC_RAW_CMPLWI(bpf_to_ppc(TMP_REG_2), MAX_TAIL_CALL_CNT)); + PPC_COND_BRANCH(COND_GT, CTX_NIA(ctx) + 8); + + /* First get address of tail_call_info */ + EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_2), _R1, bpf_jit_stack_tailcallinfo_offset(ctx))); + /* Writeback updated value to tail_call_info */ + EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_2), 0)); /* prog = array->ptrs[index]; */ EMIT(PPC_RAW_MULI(bpf_to_ppc(TMP_REG_1), b2p_index, 8)); -- 2.48.1 From: Abhishek Dubey The trampoline mechanism sets up its own stack frame and an additional dummy frame. We need to have additional JIT instructions handling tailcall dereferencing in the trampoline's context. We don't add the two stack frames pointed above, rather add space for tail_call_info at bottom in trampoline frame for ppc64. This makes the trampoline's frame consistent with layout of all other frames wrt tail_call_info offset. Signed-off-by: Abhishek Dubey --- arch/powerpc/net/bpf_jit_comp.c | 83 ++++++++++++++++++++++----------- 1 file changed, 56 insertions(+), 27 deletions(-) diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index 069a8822c30d..e3088cf089d1 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -606,33 +606,58 @@ static int invoke_bpf_mod_ret(u32 *image, u32 *ro_image, struct codegen_context return 0; } -static void bpf_trampoline_setup_tail_call_cnt(u32 *image, struct codegen_context *ctx, - int func_frame_offset, int r4_off) -{ - if (IS_ENABLED(CONFIG_PPC64)) { - /* See bpf_jit_stack_tailcallinfo_offset() */ - int tailcallcnt_offset = 7 * 8; - - EMIT(PPC_RAW_LL(_R3, _R1, func_frame_offset - tailcallcnt_offset)); - EMIT(PPC_RAW_STL(_R3, _R1, -tailcallcnt_offset)); - } else { - /* See bpf_jit_stack_offsetof() and BPF_PPC_TC */ - EMIT(PPC_RAW_LL(_R4, _R1, r4_off)); - } -} +/* + * Refer the label 'Generated stack layout' in this file for actual stack + * layout during trampoline invocation. + * + * Refer __arch_prepare_bpf_trampoline() for stack component details. + * + * The tailcall count/reference is present in caller's stack frame. Its required + * to copy the content of tail_call_info before calling the actual function + * to which the trampoline is attached. + * + */ -static void bpf_trampoline_restore_tail_call_cnt(u32 *image, struct codegen_context *ctx, - int func_frame_offset, int r4_off) +static void bpf_trampoline_setup_tail_call_info(u32 *image, struct codegen_context *ctx, + int func_frame_offset, + int bpf_dummy_frame_size, int r4_off) { if (IS_ENABLED(CONFIG_PPC64)) { /* See bpf_jit_stack_tailcallinfo_offset() */ - int tailcallcnt_offset = 7 * 8; + int tailcallinfo_offset = BPF_PPC_TAILCALL; + /* + * func_frame_offset = ...(1) + * bpf_dummy_frame_size + trampoline_frame_size + */ + EMIT(PPC_RAW_LD(_R4, _R1, func_frame_offset)); + EMIT(PPC_RAW_LD(_R3, _R4, -tailcallinfo_offset)); + + /* + * Setting the tail_call_info in trampoline's frame + * depending on if previous frame had value or reference. + */ + EMIT(PPC_RAW_CMPLWI(_R3, MAX_TAIL_CALL_CNT)); + PPC_COND_BRANCH(COND_GT, CTX_NIA(ctx) + 8); + EMIT(PPC_RAW_ADDI(_R3, _R4, bpf_jit_stack_tailcallinfo_offset(ctx))); + /* + * From ...(1) above: + * trampoline_frame_bottom = ...(2) + * func_frame_offset - bpf_dummy_frame_size + * + * Using ...(2) derived above: + * trampoline_tail_call_info_offset = ...(3) + * trampoline_frame_bottom - tailcallinfo_offset + * + * From ...(3): + * Use trampoline_tail_call_info_offset to write reference of main's + * tail_call_info in trampoline frame. + */ + EMIT(PPC_RAW_STL(_R3, _R1, (func_frame_offset - bpf_dummy_frame_size) + - tailcallinfo_offset)); - EMIT(PPC_RAW_LL(_R3, _R1, -tailcallcnt_offset)); - EMIT(PPC_RAW_STL(_R3, _R1, func_frame_offset - tailcallcnt_offset)); } else { /* See bpf_jit_stack_offsetof() and BPF_PPC_TC */ - EMIT(PPC_RAW_STL(_R4, _R1, r4_off)); + EMIT(PPC_RAW_LL(_R4, _R1, r4_off)); } } @@ -720,6 +745,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im * LR save area [ r0 save (64-bit) ] | header * [ r0 save (32-bit) ] | * dummy frame for unwind [ back chain 1 ] -- + * [ tail_call_info ] non optional - 64-bit powerpc * [ padding ] align stack frame * r4_off [ r4 (tailcallcnt) ] optional - 32-bit powerpc * alt_lr_off [ real lr (ool stub)] optional - actual lr @@ -801,8 +827,14 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im } } - /* Padding to align stack frame, if any */ - bpf_frame_size = round_up(bpf_frame_size, SZL * 2); + if (!(bpf_frame_size % (2 * SZL))) { + /* Stack is 16-byte aligned */ + /* Room for padding followed by 64-bit tail_call_info */ + bpf_frame_size += SZL + BPF_PPC_TAILCALL; + } else { + /* Room for 64-bit tail_call_info */ + bpf_frame_size += BPF_PPC_TAILCALL; + } /* Dummy frame size for proper unwind - includes 64-bytes red zone for 64-bit powerpc */ bpf_dummy_frame_size = STACK_FRAME_MIN_SIZE + 64; @@ -902,7 +934,8 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im /* Replicate tail_call_cnt before calling the original BPF prog */ if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) - bpf_trampoline_setup_tail_call_cnt(image, ctx, func_frame_offset, r4_off); + bpf_trampoline_setup_tail_call_info(image, ctx, func_frame_offset, + bpf_dummy_frame_size, r4_off); /* Restore args */ bpf_trampoline_restore_args_stack(image, ctx, func_frame_offset, nr_regs, regs_off); @@ -917,10 +950,6 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im /* Store return value for bpf prog to access */ EMIT(PPC_RAW_STL(_R3, _R1, retval_off)); - /* Restore updated tail_call_cnt */ - if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) - bpf_trampoline_restore_tail_call_cnt(image, ctx, func_frame_offset, r4_off); - /* Reserve space to patch branch instruction to skip fexit progs */ if (ro_image) /* image is NULL for dummy pass */ im->ip_after_call = &((u32 *)ro_image)[ctx->idx]; -- 2.48.1 From: Abhishek Dubey This function is used by bpf_throw() to unwind the stack until frame of exception-boundary during BPF exception handling. This function is necessary to support BPF exceptions on PowerPC. Signed-off-by: Abhishek Dubey --- arch/powerpc/net/bpf_jit_comp64.c | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index cebf81fbd59f..ec58395f74f7 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -247,6 +247,34 @@ void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx) bpf_jit_build_fentry_stubs(image, ctx); } +void arch_bpf_stack_walk(bool (*consume_fn)(void *, u64, u64, u64), void *cookie) +{ + // callback processing always in current context + unsigned long fp = current_stack_frame(); + + for (;;) { + unsigned long *frame = (unsigned long *) fp; + unsigned long ip; + + if (!validate_sp(fp, current)) + return; + + ip = frame[STACK_FRAME_LR_SAVE]; + if (!ip) + break; + + /* + * consume_fn common code expects stack pointer(sp) in third + * argument. There is no sp in ppc64, rather pass frame + * pointer. + */ + if (ip && !consume_fn(cookie, ip, fp, fp)) + break; + + fp = frame[0]; + } +} + int bpf_jit_emit_func_call_rel(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func) { unsigned long func_addr = func ? ppc_function_entry((void *)func) : 0; -- 2.48.1 From: Abhishek Dubey The modified prologue/epilogue generation code now enables exception-callback to use the stack frame of the program marked as exception boundary, where callee saved registers are stored. As per ppc64 ABIv2 documentation[1], r14-r31 are callee saved registers. BPF programs on ppc64 already saves r26-r31 registers. Saving the remaining set of callee saved registers(r14-r25) is handled in the next patch. [1] https://ftp.rtems.org/pub/rtems/people/sebh/ABI64BitOpenPOWERv1.1_16July2015_pub.pdf Signed-off-by: Abhishek Dubey --- arch/powerpc/net/bpf_jit.h | 2 ++ arch/powerpc/net/bpf_jit_comp.c | 7 ++++ arch/powerpc/net/bpf_jit_comp64.c | 53 +++++++++++++++++++++---------- 3 files changed, 45 insertions(+), 17 deletions(-) diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h index 5d735bc5e6bd..fb548ae5d143 100644 --- a/arch/powerpc/net/bpf_jit.h +++ b/arch/powerpc/net/bpf_jit.h @@ -179,6 +179,8 @@ struct codegen_context { u64 arena_vm_start; u64 user_vm_start; bool is_subprog; + bool exception_boundary; + bool exception_cb; }; #define bpf_to_ppc(r) (ctx->b2p[r]) diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index e3088cf089d1..26991940d36e 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -207,6 +207,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) cgctx.arena_vm_start = bpf_arena_get_kern_vm_start(fp->aux->arena); cgctx.user_vm_start = bpf_arena_get_user_vm_start(fp->aux->arena); cgctx.is_subprog = bpf_is_subprog(fp); + cgctx.exception_boundary = fp->aux->exception_boundary; + cgctx.exception_cb = fp->aux->exception_cb; /* Scouting faux-generate pass 0 */ if (bpf_jit_build_body(fp, NULL, NULL, &cgctx, addrs, 0, false)) { @@ -436,6 +438,11 @@ void bpf_jit_free(struct bpf_prog *fp) bpf_prog_unlock_free(fp); } +bool bpf_jit_supports_exceptions(void) +{ + return IS_ENABLED(CONFIG_PPC64); +} + bool bpf_jit_supports_subprog_tailcalls(void) { return IS_ENABLED(CONFIG_PPC64); diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index ec58395f74f7..a6083dd9786c 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -89,7 +89,9 @@ static inline bool bpf_has_stack_frame(struct codegen_context *ctx) * - the bpf program uses its stack area * The latter condition is deduced from the usage of BPF_REG_FP */ - return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP)); + return ctx->seen & SEEN_FUNC || + bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP)) || + ctx->exception_cb; } /* @@ -190,23 +192,32 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx) EMIT(PPC_RAW_STDU(_R1, _R1, -(BPF_PPC_STACKFRAME + ctx->stack_size))); } - /* - * Back up non-volatile regs -- BPF registers 6-10 - * If we haven't created our own stack frame, we save these - * in the protected zone below the previous stack frame - */ - for (i = BPF_REG_6; i <= BPF_REG_10; i++) - if (bpf_is_seen_register(ctx, bpf_to_ppc(i))) - EMIT(PPC_RAW_STD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i)))); + if (!ctx->exception_cb) { + /* + * Back up non-volatile regs -- BPF registers 6-10 + * If we haven't created our own stack frame, we save these + * in the protected zone below the previous stack frame + */ + for (i = BPF_REG_6; i <= BPF_REG_10; i++) + if (ctx->exception_boundary || bpf_is_seen_register(ctx, bpf_to_ppc(i))) + EMIT(PPC_RAW_STD(bpf_to_ppc(i), _R1, + bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i)))); - if (ctx->arena_vm_start) - EMIT(PPC_RAW_STD(bpf_to_ppc(ARENA_VM_START), _R1, + if (ctx->exception_boundary || ctx->arena_vm_start) + EMIT(PPC_RAW_STD(bpf_to_ppc(ARENA_VM_START), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(ARENA_VM_START)))); - /* Setup frame pointer to point to the bpf stack area */ - if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP))) - EMIT(PPC_RAW_ADDI(bpf_to_ppc(BPF_REG_FP), _R1, + /* Setup frame pointer to point to the bpf stack area */ + if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP))) + EMIT(PPC_RAW_ADDI(bpf_to_ppc(BPF_REG_FP), _R1, STACK_FRAME_MIN_SIZE + ctx->stack_size)); + } else { + /* + * Exception callback receives Frame Pointer of main + * program as third arg + */ + EMIT(PPC_RAW_MR(_R1, _R5)); + } if (ctx->arena_vm_start) PPC_LI64(bpf_to_ppc(ARENA_VM_START), ctx->arena_vm_start); @@ -218,17 +229,25 @@ static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx /* Restore NVRs */ for (i = BPF_REG_6; i <= BPF_REG_10; i++) - if (bpf_is_seen_register(ctx, bpf_to_ppc(i))) + if (ctx->exception_cb || bpf_is_seen_register(ctx, bpf_to_ppc(i))) EMIT(PPC_RAW_LD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i)))); - if (ctx->arena_vm_start) + if (ctx->exception_cb || ctx->arena_vm_start) EMIT(PPC_RAW_LD(bpf_to_ppc(ARENA_VM_START), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(ARENA_VM_START)))); + if (ctx->exception_cb) { + /* + * LR value from boundary-frame is received as second parameter + * in exception callback. + */ + EMIT(PPC_RAW_MTLR(_R4)); + } + /* Tear down our stack frame */ if (bpf_has_stack_frame(ctx)) { EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME + ctx->stack_size)); - if (ctx->seen & SEEN_FUNC) { + if (ctx->seen & SEEN_FUNC || ctx->exception_cb) { EMIT(PPC_RAW_LD(_R0, _R1, PPC_LR_STKOFF)); EMIT(PPC_RAW_MTLR(_R0)); } -- 2.48.1 From: Abhishek Dubey The bpf_throw() function never returns, if it has clobbered any callee-saved register, those will remain clobbered. The prologue must take care of saving all callee-saved registers in the frame of exception boundary program. Later these additional non volatile registers R14-R25 along with other NVRs are restored back in the epilogue of exception callback. To achieve above objective the frame size is determined dynamically to accommodate additional non volatile registers in exception boundary's frame. For non-exception boundary program, the frame size remains optimal. The additional instructions to save & restore r14-r25 registers are emitted only during exception boundary and exception callback respectively. Signed-off-by: Abhishek Dubey --- arch/powerpc/net/bpf_jit_comp64.c | 70 +++++++++++++++++++++++++++---- 1 file changed, 63 insertions(+), 7 deletions(-) diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index a6083dd9786c..941e0818c9ec 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -32,21 +32,37 @@ * * [ prev sp ] <------------- * [ tail_call_info ] 8 | - * [ nv gpr save area ] 6*8 | + * [ nv gpr save area ] 6*8 + (12*8) | * [ local_tmp_var ] 24 | * fp (r31) --> [ ebpf stack space ] upto 512 | * [ frame header ] 32/112 | * sp (r1) ---> [ stack pointer ] -------------- + * + * Additional (12*8) in 'nv gpr save area' only in case of + * exception boundary. */ /* for bpf JIT code internal usage */ #define BPF_PPC_STACK_LOCALS 24 +/* + * for additional non volatile registers(r14-r25) to be saved + * at exception boundary + */ +#define BPF_PPC_EXC_STACK_SAVE (12*8) + /* stack frame excluding BPF stack, ensure this is quadword aligned */ #define BPF_PPC_STACKFRAME (STACK_FRAME_MIN_SIZE + \ BPF_PPC_STACK_LOCALS + \ BPF_PPC_STACK_SAVE + \ BPF_PPC_TAILCALL) +/* + * same as BPF_PPC_STACKFRAME with save area for additional + * non volatile registers saved at exception boundary. + * This is quad-word aligned. + */ +#define BPF_PPC_EXC_STACKFRAME (BPF_PPC_STACKFRAME + BPF_PPC_EXC_STACK_SAVE) + /* BPF register usage */ #define TMP_REG_1 (MAX_BPF_JIT_REG + 0) #define TMP_REG_2 (MAX_BPF_JIT_REG + 1) @@ -103,9 +119,12 @@ static inline bool bpf_has_stack_frame(struct codegen_context *ctx) * [ ... ] | * sp (r1) ---> [ stack pointer ] -------------- * [ tail_call_info ] 8 - * [ nv gpr save area ] 6*8 + * [ nv gpr save area ] 6*8 + (12*8) * [ local_tmp_var ] 24 * [ unused red zone ] 224 + * + * Additional (12*8) in 'nv gpr save area' only in case of + * exception boundary. */ static int bpf_jit_stack_local(struct codegen_context *ctx) { @@ -114,7 +133,11 @@ static int bpf_jit_stack_local(struct codegen_context *ctx) return STACK_FRAME_MIN_SIZE + ctx->stack_size; } else { /* Stack layout 2 */ - return -(BPF_PPC_TAILCALL + BPF_PPC_STACK_SAVE + BPF_PPC_STACK_LOCALS); + return -(BPF_PPC_TAILCALL + + BPF_PPC_STACK_SAVE + + (ctx->exception_boundary || ctx->exception_cb ? + BPF_PPC_EXC_STACK_SAVE:0) + + BPF_PPC_STACK_LOCALS); } } @@ -125,9 +148,19 @@ int bpf_jit_stack_tailcallinfo_offset(struct codegen_context *ctx) static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg) { - if (reg >= BPF_PPC_NVR_MIN && reg < 32) + int min_valid_nvreg = BPF_PPC_NVR_MIN; + /* Default frame size for all cases except exception boundary */ + int frame_nvr_size = BPF_PPC_STACKFRAME; + + /* Consider all nv regs for handling exceptions */ + if (ctx->exception_boundary || ctx->exception_cb) { + min_valid_nvreg = _R14; + frame_nvr_size = BPF_PPC_EXC_STACKFRAME; + } + + if (reg >= min_valid_nvreg && reg < 32) return (bpf_has_stack_frame(ctx) ? - (BPF_PPC_STACKFRAME + ctx->stack_size) : 0) + (frame_nvr_size + ctx->stack_size) : 0) - (8 * (32 - reg)) - BPF_PPC_TAILCALL; pr_err("BPF JIT is asking about unknown registers"); @@ -189,7 +222,20 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx) EMIT(PPC_RAW_STD(_R0, _R1, PPC_LR_STKOFF)); } - EMIT(PPC_RAW_STDU(_R1, _R1, -(BPF_PPC_STACKFRAME + ctx->stack_size))); + int stack_expand = ctx->exception_boundary || ctx->exception_cb ? + BPF_PPC_EXC_STACKFRAME : BPF_PPC_STACKFRAME; + EMIT(PPC_RAW_STDU(_R1, _R1, -(stack_expand + ctx->stack_size))); + } + + /* + * Program acting as exception boundary pushes R14..R25 in addition to + * BPF callee-saved non volatile registers. Exception callback uses + * the boundary program's stack frame, recover additionally saved + * registers in epilogue of exception callback. + */ + if (ctx->exception_boundary) { + for (i = _R14; i <= _R25; i++) + EMIT(PPC_RAW_STD(i, _R1, bpf_jit_stack_offsetof(ctx, i))); } if (!ctx->exception_cb) { @@ -237,6 +283,13 @@ static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx bpf_jit_stack_offsetof(ctx, bpf_to_ppc(ARENA_VM_START)))); if (ctx->exception_cb) { + /* + * Recover additionally saved non volatile registers from stack + * frame of exception boundary program. + */ + for (i = _R14; i <= _R25; i++) + EMIT(PPC_RAW_LD(i, _R1, bpf_jit_stack_offsetof(ctx, i))); + /* * LR value from boundary-frame is received as second parameter * in exception callback. @@ -246,7 +299,10 @@ static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx /* Tear down our stack frame */ if (bpf_has_stack_frame(ctx)) { - EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME + ctx->stack_size)); + int stack_shrink = ctx->exception_cb || ctx->exception_boundary ? + BPF_PPC_EXC_STACKFRAME : BPF_PPC_STACKFRAME; + EMIT(PPC_RAW_ADDI(_R1, _R1, stack_shrink + ctx->stack_size)); + if (ctx->seen & SEEN_FUNC || ctx->exception_cb) { EMIT(PPC_RAW_LD(_R0, _R1, PPC_LR_STKOFF)); EMIT(PPC_RAW_MTLR(_R0)); -- 2.48.1