This is needed in the context of Tetragon to provide improved feedback (in contrast to just dropping packets) to east-west traffic when blocked by policies using cgroup_skb programs. We also extend this kfunc to tc program as a convenience. This reuses concepts from netfilter reject target codepath with the differences that: * Packets are cloned since the BPF user can still let the packet pass (SK_PASS from the cgroup_skb progs for example) and the current skb need to stay untouched (cgroup_skb hooks only allow read-only skb payload). * Packet should be already routed, checksums are not computed or verified and IPv4 fragmentation is not checked early because cgroup_skb and tc programs are called later in the network stack compared to what netfilter could do. * We protect against recursion since the kfunc, by generating an ICMP error message, could retrigger the BPF prog that invoked it. * Additionally, we ensure the IP headers are linearized before processing with pskb_network_may_pull(), and zero out the SKB control block after cloning to prevent icmp_send()/icmpv6_send() from misinterpreting garbage data as IP options. This should be mostly for tc ingress progs. Only ICMP_DEST_UNREACH and ICMPV6_DEST_UNREACH are currently supported. The interface accepts a type parameter to facilitate future extension to other ICMP control message types. Signed-off-by: Mahe Tardy --- net/core/filter.c | 103 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 103 insertions(+) diff --git a/net/core/filter.c b/net/core/filter.c index 9590877b0714..a1fe20b45f29 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -84,6 +84,8 @@ #include #include #include +#include +#include #include "dev.h" @@ -12464,6 +12466,95 @@ __bpf_kfunc int bpf_xdp_pull_data(struct xdp_md *x, u32 len) return 0; } +static DEFINE_PER_CPU(bool, bpf_icmp_send_in_progress); + +/** + * bpf_icmp_send - Send an ICMP control message + * @skb: Packet that triggered the control message + * @type: ICMP type (only ICMP_DEST_UNREACH/ICMPV6_DEST_UNREACH supported) + * @code: ICMP code (0-15 for IPv4, 0-6 for IPv6) + * + * Sends an ICMP control message in response to the packet. The original packet + * is cloned before sending the ICMP message, so the BPF program can still let + * the packet pass if desired. + * + * Currently only ICMP_DEST_UNREACH (IPv4) and ICMPV6_DEST_UNREACH (IPv6) are + * supported. + * + * Recursion protection: If called from a context that would trigger recursion + * (e.g., root cgroup processing its own ICMP packets), returns -EBUSY on + * re-entry. + * + * Return: 0 on success, negative error code on failure: + * -EINVAL: Invalid code parameter + * -EBADMSG: Packet too short or malformed + * -ENOMEM: Memory allocation failed + * -EBUSY: Recursion detected + * -EPROTONOSUPPORT: Non-IP protocol + * -EOPNOTSUPP: Unsupported ICMP type + */ +__bpf_kfunc int bpf_icmp_send(struct __sk_buff *__skb, int type, int code) +{ + struct sk_buff *skb = (struct sk_buff *)__skb; + struct sk_buff *nskb; + bool *in_progress; + + in_progress = this_cpu_ptr(&bpf_icmp_send_in_progress); + if (*in_progress) + return -EBUSY; + + switch (skb->protocol) { +#if IS_ENABLED(CONFIG_INET) + case htons(ETH_P_IP): + if (type != ICMP_DEST_UNREACH) + return -EOPNOTSUPP; + if (code < 0 || code > NR_ICMP_UNREACH) + return -EINVAL; + + if (!pskb_network_may_pull(skb, sizeof(struct iphdr))) + return -EBADMSG; + + nskb = skb_clone(skb, GFP_ATOMIC); + if (!nskb) + return -ENOMEM; + + memset(IPCB(nskb), 0, sizeof(struct inet_skb_parm)); + + *in_progress = true; + icmp_send(nskb, type, code, 0); + *in_progress = false; + kfree_skb(nskb); + break; +#endif +#if IS_ENABLED(CONFIG_IPV6) + case htons(ETH_P_IPV6): + if (type != ICMPV6_DEST_UNREACH) + return -EOPNOTSUPP; + if (code < 0 || code > ICMPV6_REJECT_ROUTE) + return -EINVAL; + + if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr))) + return -EBADMSG; + + nskb = skb_clone(skb, GFP_ATOMIC); + if (!nskb) + return -ENOMEM; + + memset(IP6CB(nskb), 0, sizeof(struct inet6_skb_parm)); + + *in_progress = true; + icmpv6_send(nskb, type, code, 0); + *in_progress = false; + kfree_skb(nskb); + break; +#endif + default: + return -EPROTONOSUPPORT; + } + + return 0; +} + __bpf_kfunc_end_defs(); int bpf_dynptr_from_skb_rdonly(struct __sk_buff *skb, u64 flags, @@ -12506,6 +12597,10 @@ BTF_KFUNCS_START(bpf_kfunc_check_set_sock_ops) BTF_ID_FLAGS(func, bpf_sock_ops_enable_tx_tstamp) BTF_KFUNCS_END(bpf_kfunc_check_set_sock_ops) +BTF_KFUNCS_START(bpf_kfunc_check_set_icmp_send) +BTF_ID_FLAGS(func, bpf_icmp_send) +BTF_KFUNCS_END(bpf_kfunc_check_set_icmp_send) + static const struct btf_kfunc_id_set bpf_kfunc_set_skb = { .owner = THIS_MODULE, .set = &bpf_kfunc_check_set_skb, @@ -12536,6 +12631,11 @@ static const struct btf_kfunc_id_set bpf_kfunc_set_sock_ops = { .set = &bpf_kfunc_check_set_sock_ops, }; +static const struct btf_kfunc_id_set bpf_kfunc_set_icmp_send = { + .owner = THIS_MODULE, + .set = &bpf_kfunc_check_set_icmp_send, +}; + static int __init bpf_kfunc_init(void) { int ret; @@ -12557,6 +12657,9 @@ static int __init bpf_kfunc_init(void) ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_CGROUP_SOCK_ADDR, &bpf_kfunc_set_sock_addr); ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_kfunc_set_tcp_reqsk); + ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_CGROUP_SKB, &bpf_kfunc_set_icmp_send); + ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_kfunc_set_icmp_send); + ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_ACT, &bpf_kfunc_set_icmp_send); return ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SOCK_OPS, &bpf_kfunc_set_sock_ops); } late_initcall(bpf_kfunc_init); -- 2.34.1