This patch refactors __alloc_skb() to prepare the following one, and does not change functionality. Signed-off-by: Eric Dumazet --- net/core/skbuff.c | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 88b5530f9c460d86e12c98e410774444367e0404..c6b065c0a2af265159ee6188469936767a295729 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -646,25 +646,31 @@ static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node, struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, int flags, int node) { + struct sk_buff *skb = NULL; struct kmem_cache *cache; - struct sk_buff *skb; bool pfmemalloc; u8 *data; - cache = (flags & SKB_ALLOC_FCLONE) - ? net_hotdata.skbuff_fclone_cache : net_hotdata.skbuff_cache; - if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX)) gfp_mask |= __GFP_MEMALLOC; - /* Get the HEAD */ - if ((flags & (SKB_ALLOC_FCLONE | SKB_ALLOC_NAPI)) == SKB_ALLOC_NAPI && - likely(node == NUMA_NO_NODE || node == numa_mem_id())) + if (flags & SKB_ALLOC_FCLONE) { + cache = net_hotdata.skbuff_fclone_cache; + goto fallback; + } + cache = net_hotdata.skbuff_cache; + if (unlikely(node != NUMA_NO_NODE && node != numa_mem_id())) + goto fallback; + + if (flags & SKB_ALLOC_NAPI) skb = napi_skb_cache_get(true); - else + + if (!skb) { +fallback: skb = kmem_cache_alloc_node(cache, gfp_mask & ~GFP_DMA, node); - if (unlikely(!skb)) - return NULL; + if (unlikely(!skb)) + return NULL; + } prefetchw(skb); /* We do our best to align skb_shared_info on a separate cache -- 2.52.0.rc1.455.g30608eb744-goog