Introduce helper masks for bpf_skb_adjust_room() decapsulation flags to simplify validation and keep flag handling readable: - BPF_F_ADJ_ROOM_DECAP_L4_MASK - BPF_F_ADJ_ROOM_DECAP_IPXIP_MASK - BPF_F_ADJ_ROOM_ENCAP_MASK - BPF_F_ADJ_ROOM_DECAP_MASK Also fold these masks into BPF_F_ADJ_ROOM_MASK and per-path flag validation checks. This is a refactoring/plumbing change; functional decapsulation and GSO behavior updates are handled in later patches. Co-developed-by: Max Tottenham Signed-off-by: Max Tottenham Co-developed-by: Anna Glasgall Signed-off-by: Anna Glasgall Signed-off-by: Nick Hudson --- net/core/filter.c | 33 +++++++++++++++++++++++++-------- 1 file changed, 25 insertions(+), 8 deletions(-) diff --git a/net/core/filter.c b/net/core/filter.c index 0d5d5a17acb2..ac7e1068fe4c 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -3483,14 +3483,27 @@ static u32 bpf_skb_net_base_len(const struct sk_buff *skb) #define BPF_F_ADJ_ROOM_DECAP_L3_MASK (BPF_F_ADJ_ROOM_DECAP_L3_IPV4 | \ BPF_F_ADJ_ROOM_DECAP_L3_IPV6) -#define BPF_F_ADJ_ROOM_MASK (BPF_F_ADJ_ROOM_FIXED_GSO | \ - BPF_F_ADJ_ROOM_ENCAP_L3_MASK | \ +#define BPF_F_ADJ_ROOM_DECAP_L4_MASK (BPF_F_ADJ_ROOM_DECAP_L4_UDP | \ + BPF_F_ADJ_ROOM_DECAP_L4_GRE) + +#define BPF_F_ADJ_ROOM_DECAP_IPXIP_MASK (BPF_F_ADJ_ROOM_DECAP_IPXIP4 | \ + BPF_F_ADJ_ROOM_DECAP_IPXIP6) + +#define BPF_F_ADJ_ROOM_ENCAP_MASK (BPF_F_ADJ_ROOM_ENCAP_L3_MASK | \ BPF_F_ADJ_ROOM_ENCAP_L4_GRE | \ BPF_F_ADJ_ROOM_ENCAP_L4_UDP | \ BPF_F_ADJ_ROOM_ENCAP_L2_ETH | \ BPF_F_ADJ_ROOM_ENCAP_L2( \ - BPF_ADJ_ROOM_ENCAP_L2_MASK) | \ - BPF_F_ADJ_ROOM_DECAP_L3_MASK) + BPF_ADJ_ROOM_ENCAP_L2_MASK)) + +#define BPF_F_ADJ_ROOM_DECAP_MASK (BPF_F_ADJ_ROOM_DECAP_L3_MASK | \ + BPF_F_ADJ_ROOM_DECAP_L4_MASK | \ + BPF_F_ADJ_ROOM_DECAP_IPXIP_MASK) + +#define BPF_F_ADJ_ROOM_MASK (BPF_F_ADJ_ROOM_FIXED_GSO | \ + BPF_F_ADJ_ROOM_ENCAP_MASK | \ + BPF_F_ADJ_ROOM_DECAP_MASK | \ + BPF_F_ADJ_ROOM_NO_CSUM_RESET) static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff, u64 flags) @@ -3502,6 +3515,11 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff, unsigned int gso_type = SKB_GSO_DODGY; int ret; + if (unlikely(flags & ~(BPF_F_ADJ_ROOM_ENCAP_MASK | + BPF_F_ADJ_ROOM_NO_CSUM_RESET | + BPF_F_ADJ_ROOM_FIXED_GSO))) + return -EINVAL; + if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) { /* udp gso_size delineates datagrams, only allow if fixed */ if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) || @@ -3611,8 +3629,8 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 off, u32 len_diff, { int ret; - if (unlikely(flags & ~(BPF_F_ADJ_ROOM_FIXED_GSO | - BPF_F_ADJ_ROOM_DECAP_L3_MASK | + if (unlikely(flags & ~(BPF_F_ADJ_ROOM_DECAP_MASK | + BPF_F_ADJ_ROOM_FIXED_GSO | BPF_F_ADJ_ROOM_NO_CSUM_RESET))) return -EINVAL; @@ -3708,8 +3726,7 @@ BPF_CALL_4(bpf_skb_adjust_room, struct sk_buff *, skb, s32, len_diff, u32 off; int ret; - if (unlikely(flags & ~(BPF_F_ADJ_ROOM_MASK | - BPF_F_ADJ_ROOM_NO_CSUM_RESET))) + if (unlikely(flags & ~BPF_F_ADJ_ROOM_MASK)) return -EINVAL; if (unlikely(len_diff_abs > 0xfffU)) return -EFAULT; -- 2.34.1