The following BPF program, simplified from a syzkaller repro, causes a kernel warning: r0 = *(u8 *)(r1 + 169); exit; With pointer field sk being at offset 168 in __sk_buff. This access is detected as a narrower read in bpf_skb_is_valid_access because it doesn't match offsetof(struct __sk_buff, sk). It is therefore allowed and later proceeds to bpf_convert_ctx_access. At that point, target_size is null and the verifier errors with a kernel warning and: verifier bug: error during ctx access conversion(1) This patch fixes that to return a proper "invalid bpf_context" error on the load instruction. The same issue affects the sk field in multiple context structure, as well as data and data_end in bpf_sock_ops and optval and optval_end in bpf_sockopt. Note this syzkaller crash was reported in [1], which used to be about a different bug, fixed in commit fce7bd8e385a ("bpf/verifier: Handle BPF_LOAD_ACQ instructions in insn_def_regno()"). Because syzbot somehow confused the two bugs, the new crash and repro didn't get reported to the mailing list. Link: https://syzkaller.appspot.com/bug?extid=0ef84a7bdf5301d4cbec [1] Fixes: f96da09473b52 ("bpf: simplify narrower ctx access") Fixes: 0df1a55afa832 ("bpf: Warn on internal verifier errors") Reported-by: syzbot+0ef84a7bdf5301d4cbec@syzkaller.appspotmail.com Signed-off-by: Paul Chaignon --- kernel/bpf/cgroup.c | 6 +++--- net/core/filter.c | 14 +++++++------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index 72c8b50dca0a..3a4ad9f124e1 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -2577,17 +2577,17 @@ static bool cg_sockopt_is_valid_access(int off, int size, } switch (off) { - case offsetof(struct bpf_sockopt, sk): + case bpf_ctx_range_ptr(struct bpf_sockopt, sk): if (size != sizeof(__u64)) return false; info->reg_type = PTR_TO_SOCKET; break; - case offsetof(struct bpf_sockopt, optval): + case bpf_ctx_range_ptr(struct bpf_sockopt, optval): if (size != sizeof(__u64)) return false; info->reg_type = PTR_TO_PACKET; break; - case offsetof(struct bpf_sockopt, optval_end): + case bpf_ctx_range_ptr(struct bpf_sockopt, optval_end): if (size != sizeof(__u64)) return false; info->reg_type = PTR_TO_PACKET_END; diff --git a/net/core/filter.c b/net/core/filter.c index 7a72f766aacf..458908c5f1f4 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -8690,7 +8690,7 @@ static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type if (size != sizeof(__u64)) return false; break; - case offsetof(struct __sk_buff, sk): + case bpf_ctx_range_ptr(struct __sk_buff, sk): if (type == BPF_WRITE || size != sizeof(__u64)) return false; info->reg_type = PTR_TO_SOCK_COMMON_OR_NULL; @@ -9268,7 +9268,7 @@ static bool sock_addr_is_valid_access(int off, int size, return false; } break; - case offsetof(struct bpf_sock_addr, sk): + case bpf_ctx_range_ptr(struct bpf_sock_addr, sk): if (type != BPF_READ) return false; if (size != sizeof(__u64)) @@ -9318,17 +9318,17 @@ static bool sock_ops_is_valid_access(int off, int size, if (size != sizeof(__u64)) return false; break; - case offsetof(struct bpf_sock_ops, sk): + case bpf_ctx_range_ptr(struct bpf_sock_ops, sk): if (size != sizeof(__u64)) return false; info->reg_type = PTR_TO_SOCKET_OR_NULL; break; - case offsetof(struct bpf_sock_ops, skb_data): + case bpf_ctx_range_ptr(struct bpf_sock_ops, skb_data): if (size != sizeof(__u64)) return false; info->reg_type = PTR_TO_PACKET; break; - case offsetof(struct bpf_sock_ops, skb_data_end): + case bpf_ctx_range_ptr(struct bpf_sock_ops, skb_data_end): if (size != sizeof(__u64)) return false; info->reg_type = PTR_TO_PACKET_END; @@ -9417,7 +9417,7 @@ static bool sk_msg_is_valid_access(int off, int size, if (size != sizeof(__u64)) return false; break; - case offsetof(struct sk_msg_md, sk): + case bpf_ctx_range_ptr(struct sk_msg_md, sk): if (size != sizeof(__u64)) return false; info->reg_type = PTR_TO_SOCKET; @@ -11623,7 +11623,7 @@ static bool sk_lookup_is_valid_access(int off, int size, return false; switch (off) { - case offsetof(struct bpf_sk_lookup, sk): + case bpf_ctx_range_ptr(struct bpf_sk_lookup, sk): info->reg_type = PTR_TO_SOCKET_OR_NULL; return size == sizeof(__u64); -- 2.43.0 This patch adds two selftests to cover invalid narrower loads on the context. These used to cause kernel warning before the previous patch. To trigger the warning, the load had to be aligned, to read an affected pointer field (ex., skb->sk), and not starting at the beginning of the pointer field. The new selftests show two such loads of 1B and 4B sizes. Signed-off-by: Paul Chaignon --- .../selftests/bpf/progs/verifier_ctx.c | 27 +++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/tools/testing/selftests/bpf/progs/verifier_ctx.c b/tools/testing/selftests/bpf/progs/verifier_ctx.c index a83809a1dbbf..229f26d1d413 100644 --- a/tools/testing/selftests/bpf/progs/verifier_ctx.c +++ b/tools/testing/selftests/bpf/progs/verifier_ctx.c @@ -218,4 +218,31 @@ __naked void null_check_8_null_bind(void) : __clobber_all); } +SEC("tc") +__description("invalid narrow skb->sk load") +__failure __msg("invalid bpf_context access") +__naked void invalid_narrow_skb_sk_load(void) +{ + asm volatile (" \ + r0 = *(u8 *)(r1 + %[__sk_buff_sk]); \ + exit; \ +" : + : __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk) + 1) + : __clobber_all); +} + +SEC("sockops") +__description("invalid narrow skops->sk_data load") +__failure __msg("invalid bpf_context access") +__naked void invalid_narrow_skops_sk_data_load(void) +{ + asm volatile (" \ + r1 = *(u32 *)(r1 + %[sk_data]); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(sk_data, offsetof(struct bpf_sock_ops, skb_data) + 4) + : __clobber_all); +} + char _license[] SEC("license") = "GPL"; -- 2.43.0