Extend the linked register tracking to support: 1. Negative offsets via BPF_ADD (e.g., r1 += -4) 2. BPF_SUB operations (e.g., r1 -= 4), which is treated as r1 += -4 Previously, the verifier only tracked positive constant deltas between linked registers using BPF_ADD. This limitation meant patterns like: r1 = r0 r1 += -4 if r1 s>= 0 goto ... // r1 >= 0 implies r0 >= 4 // verifier couldn't propagate bounds back to r0 With this change, the verifier can now track negative deltas in reg->off (which is already s32), enabling bound propagation for the above pattern. The changes include: - Accept BPF_SUB in addition to BPF_ADD - Change overflow check from val > (u32)S32_MAX to checking if val fits in s32 range: (s64)val != (s64)(s32)val - For BPF_SUB, negate the offset with a guard against S32_MIN overflow - Keep !alu32 restriction as 32-bit ALU has known issues with upper bits Signed-off-by: Puranjay Mohan --- kernel/bpf/verifier.c | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 53635ea2e41b..5eca33e02d6e 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -15710,22 +15710,34 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env, * update r1 after 'if' condition. */ if (env->bpf_capable && - BPF_OP(insn->code) == BPF_ADD && !alu32 && - dst_reg->id && is_reg_const(src_reg, false)) { + (BPF_OP(insn->code) == BPF_ADD || BPF_OP(insn->code) == BPF_SUB) && + !alu32 && dst_reg->id && is_reg_const(src_reg, false)) { u64 val = reg_const_value(src_reg, false); + s32 off; - if ((dst_reg->id & BPF_ADD_CONST) || - /* prevent overflow in sync_linked_regs() later */ - val > (u32)S32_MAX) { + if ((s64)val != (s64)(s32)val) + goto clear_id; + + off = (s32)val; + + if (BPF_OP(insn->code) == BPF_SUB) { + /* Negating S32_MIN would overflow */ + if (off == S32_MIN) + goto clear_id; + off = -off; + } + + if (dst_reg->id & BPF_ADD_CONST) { /* * If the register already went through rX += val * we cannot accumulate another val into rx->off. */ +clear_id: dst_reg->off = 0; dst_reg->id = 0; } else { dst_reg->id |= BPF_ADD_CONST; - dst_reg->off = val; + dst_reg->off = off; } } else { /* @@ -16821,7 +16833,7 @@ static void sync_linked_regs(struct bpf_verifier_state *vstate, struct bpf_reg_s s32 saved_off = reg->off; fake_reg.type = SCALAR_VALUE; - __mark_reg_known(&fake_reg, (s32)reg->off - (s32)known_reg->off); + __mark_reg_known(&fake_reg, (s64)reg->off - (s64)known_reg->off); /* reg = known_reg; reg += delta */ copy_register_state(reg, known_reg); -- 2.47.3 Add tests for linked register tracking with negative offsets and BPF_SUB: Success cases (64-bit ALU, tracking works): - scalars_neg: r1 += -4 with signed comparison - scalars_neg_sub: r1 -= 4 with signed comparison - scalars_pos: r1 += 4 with unsigned comparison - scalars_sub_neg_imm: r1 -= -4 (equivalent to r1 += 4) Failure cases (tracking disabled, documents limitations): - scalars_neg_alu32_add: 32-bit ADD not tracked - scalars_neg_alu32_sub: 32-bit SUB not tracked - scalars_double_add: Double ADD clears ID Large delta tests (verifies 64-bit arithmetic in sync_linked_regs): - scalars_sync_delta_overflow: S32_MIN offset - scalars_sync_delta_overflow_large_range: S32_MAX offset Signed-off-by: Puranjay Mohan --- .../bpf/progs/verifier_linked_scalars.c | 213 ++++++++++++++++++ 1 file changed, 213 insertions(+) diff --git a/tools/testing/selftests/bpf/progs/verifier_linked_scalars.c b/tools/testing/selftests/bpf/progs/verifier_linked_scalars.c index 8f755d2464cf..2e1ef0f96717 100644 --- a/tools/testing/selftests/bpf/progs/verifier_linked_scalars.c +++ b/tools/testing/selftests/bpf/progs/verifier_linked_scalars.c @@ -31,4 +31,217 @@ l1: \ " ::: __clobber_all); } +SEC("socket") +__description("scalars: linked scalars with negative offset") +__success +__naked void scalars_neg(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r0 &= 0xff; \ + r1 = r0; \ + r1 += -4; \ + if r1 s< 0 goto l2; \ + if r0 != 0 goto l2; \ + r0 /= 0; \ +l2: \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +/* Same test but using BPF_SUB instead of BPF_ADD with negative immediate */ +SEC("socket") +__description("scalars: linked scalars with SUB") +__success +__naked void scalars_neg_sub(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r0 &= 0xff; \ + r1 = r0; \ + r1 -= 4; \ + if r1 s< 0 goto l2_sub; \ + if r0 != 0 goto l2_sub; \ + r0 /= 0; \ +l2_sub: \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +/* 32-bit ALU: linked scalar tracking not supported, ID cleared */ +SEC("socket") +__description("scalars: linked scalars 32-bit ADD not tracked") +__failure +__msg("div by zero") +__naked void scalars_neg_alu32_add(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + w0 &= 0xff; \ + w1 = w0; \ + w1 += -4; \ + if w1 s< 0 goto l2_alu32_add; \ + if w0 != 0 goto l2_alu32_add; \ + r0 /= 0; \ +l2_alu32_add: \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +/* 32-bit ALU: linked scalar tracking not supported, ID cleared */ +SEC("socket") +__description("scalars: linked scalars 32-bit SUB not tracked") +__failure +__msg("div by zero") +__naked void scalars_neg_alu32_sub(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + w0 &= 0xff; \ + w1 = w0; \ + w1 -= 4; \ + if w1 s< 0 goto l2_alu32_sub; \ + if w0 != 0 goto l2_alu32_sub; \ + r0 /= 0; \ +l2_alu32_sub: \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +/* Positive offset: r1 = r0 + 4, then if r1 >= 6, r0 >= 2, so r0 != 0 */ +SEC("socket") +__description("scalars: linked scalars positive offset") +__success +__naked void scalars_pos(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r0 &= 0xff; \ + r1 = r0; \ + r1 += 4; \ + if r1 < 6 goto l2_pos; \ + if r0 != 0 goto l2_pos; \ + r0 /= 0; \ +l2_pos: \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +/* SUB with negative immediate: r1 -= -4 is equivalent to r1 += 4 */ +SEC("socket") +__description("scalars: linked scalars SUB negative immediate") +__success +__naked void scalars_sub_neg_imm(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r0 &= 0xff; \ + r1 = r0; \ + r1 -= -4; \ + if r1 < 6 goto l2_sub_neg; \ + if r0 != 0 goto l2_sub_neg; \ + r0 /= 0; \ +l2_sub_neg: \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +/* Double ADD clears the ID (can't accumulate offsets) */ +SEC("socket") +__description("scalars: linked scalars double ADD clears ID") +__failure +__msg("div by zero") +__naked void scalars_double_add(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r0 &= 0xff; \ + r1 = r0; \ + r1 += 2; \ + r1 += 2; \ + if r1 < 6 goto l2_double; \ + if r0 != 0 goto l2_double; \ + r0 /= 0; \ +l2_double: \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +/* + * Test that sync_linked_regs() correctly handles large offset differences. + * r1.off = S32_MIN, r2.off = 1, delta = S32_MIN - 1 requires 64-bit math. + */ +SEC("socket") +__description("scalars: linked regs sync with large delta (S32_MIN offset)") +__success +__naked void scalars_sync_delta_overflow(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r0 &= 0xff; \ + r1 = r0; \ + r2 = r0; \ + r1 += %[s32_min]; \ + r2 += 1; \ + if r2 s< 100 goto l2_overflow; \ + if r1 s< 0 goto l2_overflow; \ + r0 /= 0; \ +l2_overflow: \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32), + [s32_min]"i"((int)(-2147483647 - 1)) + : __clobber_all); +} + +/* + * Another large delta case: r1.off = S32_MAX, r2.off = -1. + * delta = S32_MAX - (-1) = S32_MAX + 1 requires 64-bit math. + */ +SEC("socket") +__description("scalars: linked regs sync with large delta (S32_MAX offset)") +__success +__naked void scalars_sync_delta_overflow_large_range(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r0 &= 0xff; \ + r1 = r0; \ + r2 = r0; \ + r1 += %[s32_max]; \ + r2 += -1; \ + if r2 s< 0 goto l2_large; \ + if r1 s>= 0 goto l2_large; \ + r0 /= 0; \ +l2_large: \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32), + [s32_max]"i"((int)2147483647) + : __clobber_all); +} + char _license[] SEC("license") = "GPL"; -- 2.47.3 Update the expected regex pattern for the sub64_partial_overflow test. With BPF_SUB now supporting linked register tracking, the verifier output shows R3=scalar(id=1-1) instead of R3=scalar() because r3 is now tracked as linked to r0 with an offset of -1. Signed-off-by: Puranjay Mohan --- tools/testing/selftests/bpf/progs/verifier_bounds.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/bpf/progs/verifier_bounds.c b/tools/testing/selftests/bpf/progs/verifier_bounds.c index 411a18437d7e..560531404bce 100644 --- a/tools/testing/selftests/bpf/progs/verifier_bounds.c +++ b/tools/testing/selftests/bpf/progs/verifier_bounds.c @@ -1477,7 +1477,7 @@ __naked void sub64_full_overflow(void) SEC("socket") __description("64-bit subtraction, partial overflow, result in unbounded reg") __success __log_level(2) -__msg("3: (1f) r3 -= r2 {{.*}} R3=scalar()") +__msg("3: (1f) r3 -= r2 {{.*}} R3=scalar(id=1-1)") __retval(0) __naked void sub64_partial_overflow(void) { -- 2.47.3