Export skb_defer_free_flush so that it can be invoked in other modules, which is helpful in situations where processing the deferred backlog at specific cache refill points may be preferred. Signed-off-by: Jon Kohler --- include/linux/skbuff.h | 1 + net/core/dev.c | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index ff90281ddf90..daa2d7480fbd 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1365,6 +1365,7 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size); struct sk_buff *build_skb_around(struct sk_buff *skb, void *data, unsigned int frag_size); void skb_attempt_defer_free(struct sk_buff *skb); +void skb_defer_free_flush(void); u32 napi_skb_cache_get_bulk(void **skbs, u32 n); struct sk_buff *napi_build_skb(void *data, unsigned int frag_size); diff --git a/net/core/dev.c b/net/core/dev.c index 9094c0fb8c68..c4f535be7b6d 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -6774,7 +6774,7 @@ bool napi_complete_done(struct napi_struct *n, int work_done) } EXPORT_SYMBOL(napi_complete_done); -static void skb_defer_free_flush(void) +void skb_defer_free_flush(void) { struct llist_node *free_list; struct sk_buff *skb, *next; @@ -6795,6 +6795,7 @@ static void skb_defer_free_flush(void) } } } +EXPORT_SYMBOL_GPL(skb_defer_free_flush); #if defined(CONFIG_NET_RX_BUSY_POLL) -- 2.43.0