The RV32 BPF JIT compiler currently only supports the BPF_ADD atomic operation. Other 32 bit atomic operations (and, or, xor, xchg) and their BPF_FETCH variants are not supported and gracefully fall back to the interpreter. Since the RISC-V A extension is required for Linux on RV32, we can natively support these 32-bit BPF atomic operations by mapping them directly to the corresponding RISC-V amo*.w instructions. Implement BPF_ADD, BPF_AND, BPF_OR, BPF_XOR, and BPF_XCHG with and without BPF_FETCH. BPF_CMPXCHG requires a more complex lr.w/sc.w loop and is left to fall back to the interpreter. Before this patch: [ 138.862161] test_bpf: Summary: 1054 PASSED, 0 FAILED, [843/1042 JIT'ed] After this patch: [ 157.024124] test_bpf: Summary: 1054 PASSED, 0 FAILED, [902/1042 JIT'ed] Signed-off-by: Kuan-Wei Chiu --- arch/riscv/net/bpf_jit_comp32.c | 50 +++++++++++++++++++++++++-------- 1 file changed, 39 insertions(+), 11 deletions(-) diff --git a/arch/riscv/net/bpf_jit_comp32.c b/arch/riscv/net/bpf_jit_comp32.c index f8509950fed4..7fd726a09b26 100644 --- a/arch/riscv/net/bpf_jit_comp32.c +++ b/arch/riscv/net/bpf_jit_comp32.c @@ -877,7 +877,7 @@ static int emit_load_r64(const s8 *dst, const s8 *src, s16 off, static int emit_store_r64(const s8 *dst, const s8 *src, s16 off, struct rv_jit_context *ctx, const u8 size, - const u8 mode) + const u8 mode, s32 imm) { const s8 *tmp1 = bpf2rv32[TMP_REG_1]; const s8 *tmp2 = bpf2rv32[TMP_REG_2]; @@ -902,11 +902,43 @@ static int emit_store_r64(const s8 *dst, const s8 *src, s16 off, case BPF_MEM: emit(rv_sw(RV_REG_T0, 0, lo(rs)), ctx); break; - case BPF_ATOMIC: /* Only BPF_ADD supported */ - emit(rv_amoadd_w(RV_REG_ZERO, lo(rs), RV_REG_T0, 0, 0), - ctx); + case BPF_ATOMIC: + { + bool is_fetch = (imm & BPF_FETCH) || (imm == BPF_XCHG); + s8 fetch_reg = is_fetch ? lo(rs) : RV_REG_ZERO; + int aq = is_fetch ? 1 : 0; + int rl = is_fetch ? 1 : 0; + + switch (imm) { + case BPF_ADD: + case BPF_ADD | BPF_FETCH: + emit(rv_amoadd_w(fetch_reg, lo(rs), RV_REG_T0, aq, rl), ctx); + break; + case BPF_AND: + case BPF_AND | BPF_FETCH: + emit(rv_amoand_w(fetch_reg, lo(rs), RV_REG_T0, aq, rl), ctx); + break; + case BPF_OR: + case BPF_OR | BPF_FETCH: + emit(rv_amoor_w(fetch_reg, lo(rs), RV_REG_T0, aq, rl), ctx); + break; + case BPF_XOR: + case BPF_XOR | BPF_FETCH: + emit(rv_amoxor_w(fetch_reg, lo(rs), RV_REG_T0, aq, rl), ctx); + break; + case BPF_XCHG: + emit(rv_amoswap_w(fetch_reg, lo(rs), RV_REG_T0, aq, rl), ctx); + break; + default: + return -1; + } + if (is_fetch) { + emit(rv_addi(hi(rs), RV_REG_ZERO, 0), ctx); + bpf_put_reg64(src, rs, ctx); + } break; } + } break; case BPF_DW: emit(rv_sw(RV_REG_T0, 0, lo(rs)), ctx); @@ -1308,20 +1340,16 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, } if (emit_store_r64(dst, src, off, ctx, BPF_SIZE(code), - BPF_MODE(code))) + BPF_MODE(code), 0)) return -1; break; case BPF_STX | BPF_ATOMIC | BPF_W: - if (insn->imm != BPF_ADD) { - pr_info_once( - "bpf-jit: not supported: atomic operation %02x ***\n", - insn->imm); + if (insn->imm == BPF_CMPXCHG) return -EFAULT; - } if (emit_store_r64(dst, src, off, ctx, BPF_SIZE(code), - BPF_MODE(code))) + BPF_MODE(code), insn->imm)) return -1; break; -- 2.54.0.563.g4f69b47b94-goog