This patch implements bitwise tracking (tnum analysis) for BPF_END (byte swap) operation. Currently, the BPF verifier does not track value for BPF_END operation, treating the result as completely unknown. This limits the verifier's ability to prove safety of programs that perform endianness conversions, which are common in networking code. For example, the following code pattern for port number validation: int test(struct pt_regs *ctx) { __u64 x = bpf_get_prandom_u32(); x &= 0x3f00; // Range: [0, 0x3f00], var_off: (0x0; 0x3f00) x = bswap16(x); // Should swap to range [0, 0x3f], var_off: (0x0; 0x3f) if (x > 0x3f) goto trap; return 0; trap: return *(u64 *)NULL; // Should be unreachable } Currently generates verifier output: 1: (54) w0 &= 16128 ; R0=scalar(smin=smin32=0,smax=umax=smax32=umax32=16128,var_off=(0x0; 0x3f00)) 2: (d7) r0 = bswap16 r0 ; R0=scalar() 3: (25) if r0 > 0x3f goto pc+2 ; R0=scalar(smin=smin32=0,smax=umax=smax32=umax32=63,var_off=(0x0; 0x3f)) Without this patch, even though the verifier knows `x` has certain bits set, after bswap16, it loses all tracking information and treats port as having a completely unknown value [0, 65535]. According to the BPF instruction set[1], there are 3 kinds of BPF_END: 1. `bswap(16|32|64)`: opcode=0xd7 (BPF_END | BPF_ALU64 | BPF_TO_LE) - do unconditional swap 2. `le(16|32|64)`: opcode=0xd4 (BPF_END | BPF_ALU | BPF_TO_LE) - on big-endian: do swap - on little-endian: truncation (16/32-bit) or no-op (64-bit) 3. `be(16|32|64)`: opcode=0xdc (BPF_END | BPF_ALU | BPF_TO_BE) - on little-endian: do swap - on big-endian: truncation (16/32-bit) or no-op (64-bit) Since BPF_END operations are inherently bit-wise permutations, tnum (bitwise tracking) offers the most efficient and precise mechanism for value analysis. By implementing `tnum_bswap16`, `tnum_bswap32`, and `tnum_bswap64`, we can derive exact `var_off` values concisely, directly reflecting the bit-level changes. Here is the overview of changes: 1. In `tnum_bswap(16|32|64)` (kernel/bpf/tnum.c): Call `swab(16|32|64)` function on the value and mask of `var_off`, and do truncation for 16/32-bit cases. 2. In `adjust_scalar_min_max_vals` (kernel/bpf/verifier.c): Call helper function `scalar_byte_swap`. - Only do byte swap when * alu64 (unconditional swap) OR * switching between big-endian and little-endian machines. - If need do byte swap: * Firstly call `tnum_bswap(16|32|64)` to update `var_off`. * Then reset the bound since byte swap scrambles the range. - For 16/32-bit cases, truncate dst register to match the swapped size. This enables better verification of networking code that frequently uses byte swaps for protocol processing, reducing false positive rejections. [1] https://www.kernel.org/doc/Documentation/bpf/standardization/instruction-set.rst Co-developed-by: Shenghao Yuan Signed-off-by: Shenghao Yuan Co-developed-by: Yazhou Tang Signed-off-by: Yazhou Tang Signed-off-by: Tianci Cao --- include/linux/tnum.h | 5 ++++ kernel/bpf/tnum.c | 16 +++++++++++++ kernel/bpf/verifier.c | 56 ++++++++++++++++++++++++++++++++++++++++--- 3 files changed, 74 insertions(+), 3 deletions(-) diff --git a/include/linux/tnum.h b/include/linux/tnum.h index c52b862dad45..fa4654ffb621 100644 --- a/include/linux/tnum.h +++ b/include/linux/tnum.h @@ -63,6 +63,11 @@ struct tnum tnum_union(struct tnum t1, struct tnum t2); /* Return @a with all but the lowest @size bytes cleared */ struct tnum tnum_cast(struct tnum a, u8 size); +/* Swap the bytes of a tnum */ +struct tnum tnum_bswap16(struct tnum a); +struct tnum tnum_bswap32(struct tnum a); +struct tnum tnum_bswap64(struct tnum a); + /* Returns true if @a is a known constant */ static inline bool tnum_is_const(struct tnum a) { diff --git a/kernel/bpf/tnum.c b/kernel/bpf/tnum.c index f8e70e9c3998..26fbfbb01700 100644 --- a/kernel/bpf/tnum.c +++ b/kernel/bpf/tnum.c @@ -8,6 +8,7 @@ */ #include #include +#include #define TNUM(_v, _m) (struct tnum){.value = _v, .mask = _m} /* A completely unknown value */ @@ -253,3 +254,18 @@ struct tnum tnum_const_subreg(struct tnum a, u32 value) { return tnum_with_subreg(a, tnum_const(value)); } + +struct tnum tnum_bswap16(struct tnum a) +{ + return TNUM(swab16(a.value & 0xFFFF), swab16(a.mask & 0xFFFF)); +} + +struct tnum tnum_bswap32(struct tnum a) +{ + return TNUM(swab32(a.value & 0xFFFFFFFF), swab32(a.mask & 0xFFFFFFFF)); +} + +struct tnum tnum_bswap64(struct tnum a) +{ + return TNUM(swab64(a.value), swab64(a.mask)); +} diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 6b62b6d57175..815f63cc82b1 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -15771,6 +15771,48 @@ static void scalar_min_max_arsh(struct bpf_reg_state *dst_reg, __update_reg_bounds(dst_reg); } +static void scalar_byte_swap(struct bpf_reg_state *dst_reg, struct bpf_insn *insn) +{ + /* + * Byte swap operation - update var_off using tnum_bswap. + * Three cases: + * 1. bswap(16|32|64): opcode=0xd7 (BPF_END | BPF_ALU64 | BPF_TO_LE) + * unconditional swap + * 2. to_le(16|32|64): opcode=0xd4 (BPF_END | BPF_ALU | BPF_TO_LE) + * swap on big-endian, truncation or no-op on little-endian + * 3. to_be(16|32|64): opcode=0xdc (BPF_END | BPF_ALU | BPF_TO_BE) + * swap on little-endian, truncation or no-op on big-endian + */ + + bool alu64 = BPF_CLASS(insn->code) == BPF_ALU64; + bool to_le = BPF_SRC(insn->code) == BPF_TO_LE; + bool is_big_endian; +#ifdef CONFIG_CPU_BIG_ENDIAN + is_big_endian = true; +#else + is_big_endian = false; +#endif + /* Apply bswap if alu64 or switch between big-endian and little-endian machines */ + bool need_bswap = alu64 || (to_le == is_big_endian); + + if (need_bswap) { + if (insn->imm == 16) + dst_reg->var_off = tnum_bswap16(dst_reg->var_off); + else if (insn->imm == 32) + dst_reg->var_off = tnum_bswap32(dst_reg->var_off); + else if (insn->imm == 64) + dst_reg->var_off = tnum_bswap64(dst_reg->var_off); + /* + * Byteswap scrambles the range, so we must reset bounds. + * Bounds will be re-derived from the new tnum later. + */ + __mark_reg_unbounded(dst_reg); + } + /* For bswap16/32, truncate dst register to match the swapped size */ + if (insn->imm == 16 || insn->imm == 32) + coerce_reg_to_size(dst_reg, insn->imm / 8); +} + static bool is_safe_to_compute_dst_reg_range(struct bpf_insn *insn, const struct bpf_reg_state *src_reg) { @@ -15797,6 +15839,7 @@ static bool is_safe_to_compute_dst_reg_range(struct bpf_insn *insn, case BPF_XOR: case BPF_OR: case BPF_MUL: + case BPF_END: return true; /* @@ -15986,12 +16029,19 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, else scalar_min_max_arsh(dst_reg, &src_reg); break; + case BPF_END: + scalar_byte_swap(dst_reg, insn); + break; default: break; } - /* ALU32 ops are zero extended into 64bit register */ - if (alu32) + /* + * ALU32 ops are zero extended into 64bit register. + * BPF_END is already handled inside the helper (truncation), + * so skip zext here. + */ + if (alu32 && opcode != BPF_END) zext_32_to_64(dst_reg); reg_bounds_sync(dst_reg); return 0; @@ -16171,7 +16221,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) } /* check dest operand */ - if (opcode == BPF_NEG && + if ((opcode == BPF_NEG || opcode == BPF_END) && regs[insn->dst_reg].type == SCALAR_VALUE) { err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); err = err ?: adjust_scalar_min_max_vals(env, insn, -- 2.52.0 Now BPF_END has bitwise tracking support. This patch adds selftests to cover various cases of BPF_END (`bswap(16|32|64)`, `be(16|32|64)`, `le(16|32|64)`) with bitwise propagation. This patch is based on existing `verifier_bswap.c`, and add several types of new tests: 1. Unconditional byte swap operations: - bswap16: port number routing (network byte order conversion) - bswap32: IPv4 subnet routing (network byte order conversion) - bswap64: IPv6 subnet routing (network byte order conversion) 2. Endian conversion operations (architecture-aware): - be16/be32/be64: convert to big-endian * on little-endian: do swap * on big-endian: truncation (16/32-bit) or no-op (64-bit) - le16/le32/le64: convert to little-endian * on big-endian: do swap * on little-endian: truncation (16/32-bit) or no-op (64-bit) Each test simulates realistic networking scenarios where a value is masked to a specific range (e.g., 0x3f00), then byte-swapped, and the verifier must prove the result stays within expected bounds. Specifically, these selftests are based on dead code elimination: If the BPF verifier can precisely track bitwise through byte swap operations, it can prune the trap path (invalid memory access) that should be unreachable, allowing the program to pass verification. If bitwise tracking is incorrect, the verifier cannot prove the trap is unreachable, causing verification failure. The tests use preprocessor conditionals (#ifdef __BYTE_ORDER__) to verify correct behavior on both little-endian and big-endian architectures, and require Clang 18+ for bswap instruction support. Co-developed-by: Shenghao Yuan Signed-off-by: Shenghao Yuan Co-developed-by: Yazhou Tang Signed-off-by: Yazhou Tang Signed-off-by: Tianci Cao --- .../selftests/bpf/progs/verifier_bswap.c | 321 ++++++++++++++++++ 1 file changed, 321 insertions(+) diff --git a/tools/testing/selftests/bpf/progs/verifier_bswap.c b/tools/testing/selftests/bpf/progs/verifier_bswap.c index e61755656e8d..76d4fb3fc52c 100644 --- a/tools/testing/selftests/bpf/progs/verifier_bswap.c +++ b/tools/testing/selftests/bpf/progs/verifier_bswap.c @@ -48,6 +48,327 @@ __naked void bswap_64(void) : __clobber_all); } +SEC("socket") +__description("BSWAP16 network - port number routing with tnum") +__success __log_level(2) +__msg("w0 &= 16128 {{.*}}; R0=scalar(smin=smin32=0,smax=umax=smax32=umax32=16128,var_off=(0x0; 0x3f00))") __msg("r0 = bswap16 r0 {{.*}}; R0=scalar(smin=smin32=0,smax=umax=smax32=umax32=63,var_off=(0x0; 0x3f))") +__naked void bswap16_port_routing(void) +{ + asm volatile (" \ + /* Simulate reading port number from network packet */ \ + call %[bpf_get_prandom_u32]; \ + /* Extract byte range 0x0000-0x3f00 */ \ + w0 &= 0x3f00; \ + /* Convert network byte order to host order */ \ + r0 = bswap16 r0; \ + /* Verify range - if tnum works correctly, trap path is dead code */ \ + if r0 > 0x3f goto trap_%=; \ + r0 = 0; \ + exit; \ +trap_%=: /* Should never reach - r0 must be in [0, 0x3f] */ \ + r1 = 42; /* r1 = scalar for trap */ \ + r0 = *(u64 *)(r1 + 0); /* Invalid: scalar dereference */ \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("BSWAP32 network - IPv4 subnet routing with tnum") +__success __log_level(2) +__msg("w0 &= 16128 {{.*}}; R0=scalar(smin=smin32=0,smax=umax=smax32=umax32=16128,var_off=(0x0; 0x3f00))") __msg("r0 = bswap32 r0 {{.*}}; R0=scalar(smin=smin32=0,smax=umax=smax32=umax32=0x3f0000,var_off=(0x0; 0x3f0000))") +__naked void bswap32_ipv4_subnet_routing(void) +{ + asm volatile (" \ + /* Simulate reading IPv4 from network packet */ \ + call %[bpf_get_prandom_u32]; \ + /* Extract second byte (subnet ID) 0x0000-0x3f00 */ \ + w0 &= 0x3f00; \ + /* Convert network byte order to host order */ \ + r0 = bswap32 r0; \ + /* Verify bswap32 result - if tnum works correctly, trap path is dead code */ \ + if r0 > 0x3f0000 goto trap_%=; \ + r0 = 0; \ + exit; \ +trap_%=: /* Should never reach - r0 must be in [0, 0x3f0000] */ \ + r1 = 42; /* r1 = scalar for trap */ \ + r0 = *(u64 *)(r1 + 0); /* Invalid: scalar dereference */ \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("BSWAP64 network - IPv6 subnet routing with tnum") +__success __log_level(2) +__msg("r0 &= 16128 {{.*}}; R0=scalar(smin=smin32=0,smax=umax=smax32=umax32=16128,var_off=(0x0; 0x3f00))") __msg("r0 = bswap64 r0 {{.*}}; R0=scalar(smin=smin32=0,smax=umax=0x3f000000000000,smax32=umax32=0,var_off=(0x0; 0x3f000000000000))") +__naked void bswap64_ipv6_subnet_routing(void) +{ + asm volatile (" \ + /* Simulate reading IPv6 from network packet */ \ + call %[bpf_get_prandom_u32]; \ + /* Extract second byte (subnet ID) 0x0000-0x3f00 */ \ + r0 &= 0x3f00; \ + /* Convert network byte order to host order */ \ + r0 = bswap64 r0; \ + /* Verify bswap64 result - if tnum works correctly, trap path is dead code */ \ + r2 = 0x3f000000000000 ll; /* Load 64-bit constant */ \ + if r0 > r2 goto trap_%=; \ + r0 = 0; \ + exit; \ +trap_%=: /* Should never reach - r0 must be in [0, 0x3f000000000000] */ \ + r1 = 42; /* r1 = scalar for trap */ \ + r0 = *(u64 *)(r1 + 0); /* Invalid: scalar dereference */ \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("BE16 (to big endian) network - endian conversion with tnum") +__success __log_level(2) +__msg("w0 &= 16128 {{.*}}; R0=scalar(smin=smin32=0,smax=umax=smax32=umax32=16128,var_off=(0x0; 0x3f00))") +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ +/* Little endian: be16 = bswap16 */ +__msg("r0 = be16 r0 {{.*}}; R0=scalar(smin=smin32=0,smax=umax=smax32=umax32=63,var_off=(0x0; 0x3f))") +#else +/* Big endian: be16 = _to_u16 (truncation) */ +__msg("r0 = be16 r0 {{.*}}; R0=scalar(smin=smin32=0,smax=umax=smax32=umax32=16128,var_off=(0x0; 0x3f00))") +#endif +__naked void be16_network(void) +{ + asm volatile (" \ + /* Simulate reading from network packet */ \ + call %[bpf_get_prandom_u32]; \ + /* Extract byte range 0x0000-0x3f00 */ \ + w0 &= 0x3f00; \ + /* Convert to big endian */ \ + r0 = be16 r0; \ + /* Verify range - if tnum works correctly, trap path is dead code */ \ +" +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + "if r0 > 0x3f goto trap_%=; \ +" +#else + "if r0 > 0x3f00 goto trap_%=; \ +" +#endif + "r0 = 0; \ + exit; \ +trap_%=: /* Should never reach */ \ + r1 = 42; /* r1 = scalar for trap */ \ + r0 = *(u64 *)(r1 + 0); /* Invalid: scalar dereference */ \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("BE32 (to big endian) network - endian conversion with tnum") +__success __log_level(2) +__msg("w0 &= 16128 {{.*}}; R0=scalar(smin=smin32=0,smax=umax=smax32=umax32=16128,var_off=(0x0; 0x3f00))") +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ +/* Little endian: be32 = bswap32 */ +__msg("r0 = be32 r0 {{.*}}; R0=scalar(smin=smin32=0,smax=umax=smax32=umax32=0x3f0000,var_off=(0x0; 0x3f0000))") +#else +/* Big endian: be32 = _to_u32 (truncation) */ +__msg("r0 = be32 r0 {{.*}}; R0=scalar(smin=smin32=0,smax=umax=smax32=umax32=16128,var_off=(0x0; 0x3f00))") +#endif +__naked void be32_network(void) +{ + asm volatile (" \ + /* Simulate reading from network packet */ \ + call %[bpf_get_prandom_u32]; \ + /* Extract byte range 0x0000-0x3f00 */ \ + w0 &= 0x3f00; \ + /* Convert to big endian */ \ + r0 = be32 r0; \ + /* Verify range - if tnum works correctly, trap path is dead code */ \ +" +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + "if r0 > 0x3f0000 goto trap_%=; \ +" +#else + "if r0 > 0x3f00 goto trap_%=; \ +" +#endif + "r0 = 0; \ + exit; \ +trap_%=: /* Should never reach */ \ + r1 = 42; /* r1 = scalar for trap */ \ + r0 = *(u64 *)(r1 + 0); /* Invalid: scalar dereference */ \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("BE64 (to big endian) network - endian conversion with tnum") +__success __log_level(2) +__msg("r0 &= 16128 {{.*}}; R0=scalar(smin=smin32=0,smax=umax=smax32=umax32=16128,var_off=(0x0; 0x3f00))") +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ +/* Little endian: be64 = bswap64 */ +__msg("r0 = be64 r0 {{.*}}; R0=scalar(smin=smin32=0,smax=umax=0x3f000000000000,smax32=umax32=0,var_off=(0x0; 0x3f000000000000))") +#else +/* Big endian: be64 = no-op */ +__msg("r0 = be64 r0 {{.*}}; R0=scalar(smin=smin32=0,smax=umax=smax32=umax32=16128,var_off=(0x0; 0x3f00))") +#endif +__naked void be64_network(void) +{ + asm volatile (" \ + /* Simulate reading from network packet */ \ + call %[bpf_get_prandom_u32]; \ + /* Extract byte range 0x0000-0x3f00 */ \ + r0 &= 0x3f00; \ + /* Convert to big endian */ \ + r0 = be64 r0; \ + /* Verify range - if tnum works correctly, trap path is dead code */ \ +" +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + "r2 = 0x3f000000000000 ll; /* Load 64-bit constant */ \ + if r0 > r2 goto trap_%=; \ +" +#else + "if r0 > 0x3f00 goto trap_%=; \ +" +#endif + "r0 = 0; \ + exit; \ +trap_%=: /* Should never reach */ \ + r1 = 42; /* r1 = scalar for trap */ \ + r0 = *(u64 *)(r1 + 0); /* Invalid: scalar dereference */ \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("LE16 (to little endian) network - endian conversion with tnum") +__success __log_level(2) +__msg("w0 &= 16128 {{.*}}; R0=scalar(smin=smin32=0,smax=umax=smax32=umax32=16128,var_off=(0x0; 0x3f00))") +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ +/* Little endian: le16 = _to_u16 (truncation) */ +__msg("r0 = le16 r0 {{.*}}; R0=scalar(smin=smin32=0,smax=umax=smax32=umax32=16128,var_off=(0x0; 0x3f00))") +#else +/* Big endian: le16 = bswap16 */ +__msg("r0 = le16 r0 {{.*}}; R0=scalar(smin=smin32=0,smax=umax=smax32=umax32=63,var_off=(0x0; 0x3f))") +#endif +__naked void le16_network(void) +{ + asm volatile (" \ + /* Simulate reading from network packet */ \ + call %[bpf_get_prandom_u32]; \ + /* Extract byte range 0x0000-0x3f00 */ \ + w0 &= 0x3f00; \ + /* Convert to little endian */ \ + r0 = le16 r0; \ + /* Verify range - if tnum works correctly, trap path is dead code */ \ +" +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + "if r0 > 0x3f00 goto trap_%=; \ +" +#else + "if r0 > 0x3f goto trap_%=; \ +" +#endif + "r0 = 0; \ + exit; \ +trap_%=: /* Should never reach */ \ + r1 = 42; /* r1 = scalar for trap */ \ + r0 = *(u64 *)(r1 + 0); /* Invalid: scalar dereference */ \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("LE32 (to little endian) network - endian conversion with tnum") +__success __log_level(2) +__msg("w0 &= 16128 {{.*}}; R0=scalar(smin=smin32=0,smax=umax=smax32=umax32=16128,var_off=(0x0; 0x3f00))") +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ +/* Little endian: le32 = _to_u32 (truncation) */ +__msg("r0 = le32 r0 {{.*}}; R0=scalar(smin=smin32=0,smax=umax=smax32=umax32=16128,var_off=(0x0; 0x3f00))") +#else +/* Big endian: le32 = bswap32 */ +__msg("r0 = le32 r0 {{.*}}; R0=scalar(smin=smin32=0,smax=umax=smax32=umax32=0x3f0000,var_off=(0x0; 0x3f0000))") +#endif +__naked void le32_network(void) +{ + asm volatile (" \ + /* Simulate reading from network packet */ \ + call %[bpf_get_prandom_u32]; \ + /* Extract byte range 0x0000-0x3f00 */ \ + w0 &= 0x3f00; \ + /* Convert to little endian */ \ + r0 = le32 r0; \ + /* Verify range - if tnum works correctly, trap path is dead code */ \ +" +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + "if r0 > 0x3f00 goto trap_%=; \ +" +#else + "if r0 > 0x3f0000 goto trap_%=; \ +" +#endif + "r0 = 0; \ + exit; \ +trap_%=: /* Should never reach */ \ + r1 = 42; /* r1 = scalar for trap */ \ + r0 = *(u64 *)(r1 + 0); /* Invalid: scalar dereference */ \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("LE64 (to little endian) network - endian conversion with tnum") +__success __log_level(2) +__msg("r0 &= 16128 {{.*}}; R0=scalar(smin=smin32=0,smax=umax=smax32=umax32=16128,var_off=(0x0; 0x3f00))") +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ +/* Little endian: le64 = no-op */ +__msg("r0 = le64 r0 {{.*}}; R0=scalar(smin=smin32=0,smax=umax=smax32=umax32=16128,var_off=(0x0; 0x3f00))") +#else +/* Big endian: le64 = bswap64 */ +__msg("r0 = le64 r0 {{.*}}; R0=scalar(smin=smin32=0,smax=umax=0x3f000000000000,smax32=umax32=0,var_off=(0x0; 0x3f000000000000))") +#endif +__naked void le64_network(void) +{ + asm volatile (" \ + /* Simulate reading from network packet */ \ + call %[bpf_get_prandom_u32]; \ + /* Extract byte range 0x0000-0x3f00 */ \ + r0 &= 0x3f00; \ + /* Convert to little endian */ \ + r0 = le64 r0; \ + /* Verify range - if tnum works correctly, trap path is dead code */ \ +" +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + "if r0 > 0x3f00 goto trap_%=; \ +" +#else + "r2 = 0x3f000000000000 ll; /* Load 64-bit constant */ \ + if r0 > r2 goto trap_%=; \ +" +#endif + "r0 = 0; \ + exit; \ +trap_%=: /* Should never reach */ \ + r1 = 42; /* r1 = scalar for trap */ \ + r0 = *(u64 *)(r1 + 0); /* Invalid: scalar dereference */ \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + #else SEC("socket") -- 2.52.0