clang is inlining it already, gcc (14.2) does not. Small space cost (215 bytes on x86_64) but faster sk_buff allocations. $ scripts/bloat-o-meter -t net/core/skbuff.gcc.before.o net/core/skbuff.gcc.after.o add/remove: 0/1 grow/shrink: 4/1 up/down: 359/-144 (215) Function old new delta __alloc_skb 471 611 +140 napi_build_skb 245 363 +118 napi_alloc_skb 331 416 +85 skb_copy_ubufs 1869 1885 +16 skb_shift 1445 1413 -32 napi_skb_cache_get 112 - -112 Total: Before=59941, After=60156, chg +0.36% Signed-off-by: Eric Dumazet --- net/core/skbuff.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/core/skbuff.c b/net/core/skbuff.c index a56133902c0d9c47b45a4a19b228b151456e5051..9e94590914b7f9b1cc748262c73eb5aa4f9d2df8 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -280,7 +280,7 @@ EXPORT_SYMBOL(__netdev_alloc_frag_align); */ static u32 skbuff_cache_size __read_mostly; -static struct sk_buff *napi_skb_cache_get(bool alloc) +static inline struct sk_buff *napi_skb_cache_get(bool alloc) { struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); struct sk_buff *skb; -- 2.52.0.457.g6b5491de43-goog