From: Jason Xing Since Eric proposed an idea about adding indirect call for UDP and managed to see a huge improvement[1], the same situation can also be applied in xsk scenario. This patch adds an indirect call for xsk and helps current copy mode improve the performance by around 1% stably which was observed with IXGBE at 10Gb/sec loaded. If the throughput grows, the positive effect will be magnified. I applied this patch on top of batch xmit series[2], and was able to see <5% improvement from our internal application which is a little bit unstable though. Use INDIRECT wrappers to keep xsk_destruct_skb static as it used to be when the mitigation config is off. [1]: https://lore.kernel.org/netdev/20251006193103.2684156-2-edumazet@google.com/ [2]: https://lore.kernel.org/all/20251021131209.41491-1-kerneljasonxing@gmail.com/ Suggested-by: Alexander Lobakin Signed-off-by: Jason Xing --- v2 Link: https://lore.kernel.org/all/20251023085843.25619-1-kerneljasonxing@gmail.com/ 1. use INDIRECT helpers (Alexander) --- include/net/xdp_sock.h | 7 +++++++ net/core/skbuff.c | 8 +++++--- net/xdp/xsk.c | 3 ++- 3 files changed, 14 insertions(+), 4 deletions(-) diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h index ce587a225661..23e8861e8b25 100644 --- a/include/net/xdp_sock.h +++ b/include/net/xdp_sock.h @@ -125,6 +125,7 @@ struct xsk_tx_metadata_ops { int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp); int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp); void __xsk_map_flush(struct list_head *flush_list); +INDIRECT_CALLABLE_DECLARE(void xsk_destruct_skb(struct sk_buff *)); /** * xsk_tx_metadata_to_compl - Save enough relevant metadata information @@ -218,6 +219,12 @@ static inline void __xsk_map_flush(struct list_head *flush_list) { } +#ifdef CONFIG_MITIGATION_RETPOLINE +static inline void xsk_destruct_skb(struct sk_buff *skb) +{ +} +#endif + static inline void xsk_tx_metadata_to_compl(struct xsk_tx_metadata *meta, struct xsk_tx_metadata_compl *compl) { diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 5b4bc8b1c7d5..00ea38248bd6 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -81,6 +81,7 @@ #include #include #include +#include #include #include @@ -1140,12 +1141,13 @@ void skb_release_head_state(struct sk_buff *skb) if (skb->destructor) { DEBUG_NET_WARN_ON_ONCE(in_hardirq()); #ifdef CONFIG_INET - INDIRECT_CALL_3(skb->destructor, + INDIRECT_CALL_4(skb->destructor, tcp_wfree, __sock_wfree, sock_wfree, + xsk_destruct_skb, skb); #else - INDIRECT_CALL_1(skb->destructor, - sock_wfree, + INDIRECT_CALL_2(skb->destructor, + sock_wfree, xsk_destruct_skb, skb); #endif diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index 7b0c68a70888..9451b090db16 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -605,7 +605,8 @@ static u32 xsk_get_num_desc(struct sk_buff *skb) return XSKCB(skb)->num_descs; } -static void xsk_destruct_skb(struct sk_buff *skb) +INDIRECT_CALLABLE_SCOPE +void xsk_destruct_skb(struct sk_buff *skb) { struct xsk_tx_metadata_compl *compl = &skb_shinfo(skb)->xsk_meta; -- 2.41.3