Split the netpoll_send_udp() function into two separate operations: netpoll_prepare_skb() for message preparation and netpoll_send_skb() for transmission. This improves separation of concerns. SKB building logic is now isolated from the actual network transmission, improving code modularity and testability. Why? The separation of SKB preparation and transmission operations enables more granular locking strategies. The netconsole buffer requires lock protection during packet construction, but the transmission phase can proceed without holding the same lock. Also, this makes netpoll only reponsible for handling SKB. netpoll_prepare_skb() is now exported, but, in the upcoming change, it will be moved to netconsole, and become static. Signed-off-by: Breno Leitao --- drivers/net/netconsole.c | 21 ++++++++++++++------- include/linux/netpoll.h | 2 ++ net/core/netpoll.c | 9 +++++---- 3 files changed, 21 insertions(+), 11 deletions(-) diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index 194570443493b..5b8af2de719a2 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c @@ -1492,18 +1492,25 @@ static struct notifier_block netconsole_netdev_notifier = { */ static void send_udp(struct netconsole_target *nt, const char *msg, int len) { - int result = netpoll_send_udp(&nt->np, msg, len); + struct sk_buff *skb; + netdev_tx_t result; - if (IS_ENABLED(CONFIG_NETCONSOLE_DYNAMIC)) { - if (result == NET_XMIT_DROP) { - u64_stats_update_begin(&nt->stats.syncp); - u64_stats_inc(&nt->stats.xmit_drop_count); - u64_stats_update_end(&nt->stats.syncp); - } else if (result == -ENOMEM) { + skb = netpoll_prepare_skb(&nt->np, msg, len); + if (!skb) { + if (IS_ENABLED(CONFIG_NETCONSOLE_DYNAMIC)) { u64_stats_update_begin(&nt->stats.syncp); u64_stats_inc(&nt->stats.enomem_count); u64_stats_update_end(&nt->stats.syncp); } + return; + } + + result = netpoll_send_skb(&nt->np, skb); + + if (IS_ENABLED(CONFIG_NETCONSOLE_DYNAMIC) && result == NET_XMIT_DROP) { + u64_stats_update_begin(&nt->stats.syncp); + u64_stats_inc(&nt->stats.xmit_drop_count); + u64_stats_update_end(&nt->stats.syncp); } } diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index b5ea9882eda8b..ed74889e126c7 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h @@ -69,6 +69,8 @@ static inline void netpoll_poll_enable(struct net_device *dev) { return; } #endif int netpoll_send_udp(struct netpoll *np, const char *msg, int len); +struct sk_buff *netpoll_prepare_skb(struct netpoll *np, const char *msg, + int len); int __netpoll_setup(struct netpoll *np, struct net_device *ndev); int netpoll_setup(struct netpoll *np); void __netpoll_free(struct netpoll *np); diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 5f65b62346d4e..e2098c19987f4 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -496,7 +496,8 @@ static void push_eth(struct netpoll *np, struct sk_buff *skb) eth->h_proto = htons(ETH_P_IP); } -int netpoll_send_udp(struct netpoll *np, const char *msg, int len) +struct sk_buff *netpoll_prepare_skb(struct netpoll *np, const char *msg, + int len) { int total_len, ip_len, udp_len; struct sk_buff *skb; @@ -515,7 +516,7 @@ int netpoll_send_udp(struct netpoll *np, const char *msg, int len) skb = find_skb(np, total_len + np->dev->needed_tailroom, total_len - len); if (!skb) - return -ENOMEM; + return NULL; skb_copy_to_linear_data(skb, msg, len); skb_put(skb, len); @@ -528,9 +529,9 @@ int netpoll_send_udp(struct netpoll *np, const char *msg, int len) push_eth(np, skb); skb->dev = np->dev; - return (int)netpoll_send_skb(np, skb); + return skb; } -EXPORT_SYMBOL(netpoll_send_udp); +EXPORT_SYMBOL(netpoll_prepare_skb); static void skb_pool_flush(struct netpoll *np) -- 2.47.3 Move the UDP packet preparation logic from netpoll core to netconsole driver, consolidating network console-specific functionality. Changes include: - Move netpoll_prepare_skb() from net/core/netpoll.c to netconsole.c - Move all UDP/IP header construction helpers (push_udp, push_ipv4, push_ipv6, push_eth, netpoll_udp_checksum) to netconsole.c - Add necessary network header includes to netconsole.c - Export find_skb() from netpoll core to allow netconsole access * This is temporary, given that skb pool management is a netconsole thing. This will be removed in the upcoming change in this patchset. With this in mind, netconsole become another usual netpoll user, by calling it with SKBs instead of msgs and len. Signed-off-by: Breno Leitao --- drivers/net/netconsole.c | 147 +++++++++++++++++++++++++++++++++++++++++++++ include/linux/netpoll.h | 3 +- net/core/netpoll.c | 151 +---------------------------------------------- 3 files changed, 150 insertions(+), 151 deletions(-) diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index 5b8af2de719a2..30731711571be 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c @@ -40,6 +40,10 @@ #include #include +#include +#include +#include + MODULE_AUTHOR("Matt Mackall "); MODULE_DESCRIPTION("Console driver for network interfaces"); MODULE_LICENSE("GPL"); @@ -1480,6 +1484,149 @@ static struct notifier_block netconsole_netdev_notifier = { .notifier_call = netconsole_netdev_event, }; +static void netpoll_udp_checksum(struct netpoll *np, struct sk_buff *skb, + int len) +{ + struct udphdr *udph; + int udp_len; + + udp_len = len + sizeof(struct udphdr); + udph = udp_hdr(skb); + + /* check needs to be set, since it will be consumed in csum_partial */ + udph->check = 0; + if (np->ipv6) + udph->check = csum_ipv6_magic(&np->local_ip.in6, + &np->remote_ip.in6, + udp_len, IPPROTO_UDP, + csum_partial(udph, udp_len, 0)); + else + udph->check = csum_tcpudp_magic(np->local_ip.ip, + np->remote_ip.ip, + udp_len, IPPROTO_UDP, + csum_partial(udph, udp_len, 0)); + if (udph->check == 0) + udph->check = CSUM_MANGLED_0; +} + +static void push_ipv6(struct netpoll *np, struct sk_buff *skb, int len) +{ + struct ipv6hdr *ip6h; + + skb_push(skb, sizeof(struct ipv6hdr)); + skb_reset_network_header(skb); + ip6h = ipv6_hdr(skb); + + /* ip6h->version = 6; ip6h->priority = 0; */ + *(unsigned char *)ip6h = 0x60; + ip6h->flow_lbl[0] = 0; + ip6h->flow_lbl[1] = 0; + ip6h->flow_lbl[2] = 0; + + ip6h->payload_len = htons(sizeof(struct udphdr) + len); + ip6h->nexthdr = IPPROTO_UDP; + ip6h->hop_limit = 32; + ip6h->saddr = np->local_ip.in6; + ip6h->daddr = np->remote_ip.in6; + + skb->protocol = htons(ETH_P_IPV6); +} + +static void push_ipv4(struct netpoll *np, struct sk_buff *skb, int len) +{ + static atomic_t ip_ident; + struct iphdr *iph; + int ip_len; + + ip_len = len + sizeof(struct udphdr) + sizeof(struct iphdr); + + skb_push(skb, sizeof(struct iphdr)); + skb_reset_network_header(skb); + iph = ip_hdr(skb); + + /* iph->version = 4; iph->ihl = 5; */ + *(unsigned char *)iph = 0x45; + iph->tos = 0; + put_unaligned(htons(ip_len), &iph->tot_len); + iph->id = htons(atomic_inc_return(&ip_ident)); + iph->frag_off = 0; + iph->ttl = 64; + iph->protocol = IPPROTO_UDP; + iph->check = 0; + put_unaligned(np->local_ip.ip, &iph->saddr); + put_unaligned(np->remote_ip.ip, &iph->daddr); + iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); + skb->protocol = htons(ETH_P_IP); +} + +static void push_udp(struct netpoll *np, struct sk_buff *skb, int len) +{ + struct udphdr *udph; + int udp_len; + + udp_len = len + sizeof(struct udphdr); + + skb_push(skb, sizeof(struct udphdr)); + skb_reset_transport_header(skb); + + udph = udp_hdr(skb); + udph->source = htons(np->local_port); + udph->dest = htons(np->remote_port); + udph->len = htons(udp_len); + + netpoll_udp_checksum(np, skb, len); +} + +static void push_eth(struct netpoll *np, struct sk_buff *skb) +{ + struct ethhdr *eth; + + eth = skb_push(skb, ETH_HLEN); + skb_reset_mac_header(skb); + ether_addr_copy(eth->h_source, np->dev->dev_addr); + ether_addr_copy(eth->h_dest, np->remote_mac); + if (np->ipv6) + eth->h_proto = htons(ETH_P_IPV6); + else + eth->h_proto = htons(ETH_P_IP); +} + +static struct sk_buff *netpoll_prepare_skb(struct netpoll *np, const char *msg, + int len) +{ + int total_len, ip_len, udp_len; + struct sk_buff *skb; + + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) + WARN_ON_ONCE(!irqs_disabled()); + + udp_len = len + sizeof(struct udphdr); + if (np->ipv6) + ip_len = udp_len + sizeof(struct ipv6hdr); + else + ip_len = udp_len + sizeof(struct iphdr); + + total_len = ip_len + LL_RESERVED_SPACE(np->dev); + + skb = find_skb(np, total_len + np->dev->needed_tailroom, + total_len - len); + if (!skb) + return NULL; + + skb_copy_to_linear_data(skb, msg, len); + skb_put(skb, len); + + push_udp(np, skb, len); + if (np->ipv6) + push_ipv6(np, skb, len); + else + push_ipv4(np, skb, len); + push_eth(np, skb); + skb->dev = np->dev; + + return skb; +} + /** * send_udp - Wrapper for netpoll_send_udp that counts errors * @nt: target to send message to diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index ed74889e126c7..481ec474fa6b9 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h @@ -69,14 +69,13 @@ static inline void netpoll_poll_enable(struct net_device *dev) { return; } #endif int netpoll_send_udp(struct netpoll *np, const char *msg, int len); -struct sk_buff *netpoll_prepare_skb(struct netpoll *np, const char *msg, - int len); int __netpoll_setup(struct netpoll *np, struct net_device *ndev); int netpoll_setup(struct netpoll *np); void __netpoll_free(struct netpoll *np); void netpoll_cleanup(struct netpoll *np); void do_netpoll_cleanup(struct netpoll *np); netdev_tx_t netpoll_send_skb(struct netpoll *np, struct sk_buff *skb); +struct sk_buff *find_skb(struct netpoll *np, int len, int reserve); #ifdef CONFIG_NETPOLL static inline void *netpoll_poll_lock(struct napi_struct *napi) diff --git a/net/core/netpoll.c b/net/core/netpoll.c index e2098c19987f4..b4634e91568e8 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -29,11 +29,8 @@ #include #include #include -#include -#include #include #include -#include #include #include #include @@ -271,7 +268,7 @@ static void zap_completion_queue(void) put_cpu_var(softnet_data); } -static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve) +struct sk_buff *find_skb(struct netpoll *np, int len, int reserve) { int count = 0; struct sk_buff *skb; @@ -297,6 +294,7 @@ static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve) skb_reserve(skb, reserve); return skb; } +EXPORT_SYMBOL_GPL(find_skb); static int netpoll_owner_active(struct net_device *dev) { @@ -372,31 +370,6 @@ static netdev_tx_t __netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) return ret; } -static void netpoll_udp_checksum(struct netpoll *np, struct sk_buff *skb, - int len) -{ - struct udphdr *udph; - int udp_len; - - udp_len = len + sizeof(struct udphdr); - udph = udp_hdr(skb); - - /* check needs to be set, since it will be consumed in csum_partial */ - udph->check = 0; - if (np->ipv6) - udph->check = csum_ipv6_magic(&np->local_ip.in6, - &np->remote_ip.in6, - udp_len, IPPROTO_UDP, - csum_partial(udph, udp_len, 0)); - else - udph->check = csum_tcpudp_magic(np->local_ip.ip, - np->remote_ip.ip, - udp_len, IPPROTO_UDP, - csum_partial(udph, udp_len, 0)); - if (udph->check == 0) - udph->check = CSUM_MANGLED_0; -} - netdev_tx_t netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) { unsigned long flags; @@ -414,126 +387,6 @@ netdev_tx_t netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) } EXPORT_SYMBOL(netpoll_send_skb); -static void push_ipv6(struct netpoll *np, struct sk_buff *skb, int len) -{ - struct ipv6hdr *ip6h; - - skb_push(skb, sizeof(struct ipv6hdr)); - skb_reset_network_header(skb); - ip6h = ipv6_hdr(skb); - - /* ip6h->version = 6; ip6h->priority = 0; */ - *(unsigned char *)ip6h = 0x60; - ip6h->flow_lbl[0] = 0; - ip6h->flow_lbl[1] = 0; - ip6h->flow_lbl[2] = 0; - - ip6h->payload_len = htons(sizeof(struct udphdr) + len); - ip6h->nexthdr = IPPROTO_UDP; - ip6h->hop_limit = 32; - ip6h->saddr = np->local_ip.in6; - ip6h->daddr = np->remote_ip.in6; - - skb->protocol = htons(ETH_P_IPV6); -} - -static void push_ipv4(struct netpoll *np, struct sk_buff *skb, int len) -{ - static atomic_t ip_ident; - struct iphdr *iph; - int ip_len; - - ip_len = len + sizeof(struct udphdr) + sizeof(struct iphdr); - - skb_push(skb, sizeof(struct iphdr)); - skb_reset_network_header(skb); - iph = ip_hdr(skb); - - /* iph->version = 4; iph->ihl = 5; */ - *(unsigned char *)iph = 0x45; - iph->tos = 0; - put_unaligned(htons(ip_len), &iph->tot_len); - iph->id = htons(atomic_inc_return(&ip_ident)); - iph->frag_off = 0; - iph->ttl = 64; - iph->protocol = IPPROTO_UDP; - iph->check = 0; - put_unaligned(np->local_ip.ip, &iph->saddr); - put_unaligned(np->remote_ip.ip, &iph->daddr); - iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); - skb->protocol = htons(ETH_P_IP); -} - -static void push_udp(struct netpoll *np, struct sk_buff *skb, int len) -{ - struct udphdr *udph; - int udp_len; - - udp_len = len + sizeof(struct udphdr); - - skb_push(skb, sizeof(struct udphdr)); - skb_reset_transport_header(skb); - - udph = udp_hdr(skb); - udph->source = htons(np->local_port); - udph->dest = htons(np->remote_port); - udph->len = htons(udp_len); - - netpoll_udp_checksum(np, skb, len); -} - -static void push_eth(struct netpoll *np, struct sk_buff *skb) -{ - struct ethhdr *eth; - - eth = skb_push(skb, ETH_HLEN); - skb_reset_mac_header(skb); - ether_addr_copy(eth->h_source, np->dev->dev_addr); - ether_addr_copy(eth->h_dest, np->remote_mac); - if (np->ipv6) - eth->h_proto = htons(ETH_P_IPV6); - else - eth->h_proto = htons(ETH_P_IP); -} - -struct sk_buff *netpoll_prepare_skb(struct netpoll *np, const char *msg, - int len) -{ - int total_len, ip_len, udp_len; - struct sk_buff *skb; - - if (!IS_ENABLED(CONFIG_PREEMPT_RT)) - WARN_ON_ONCE(!irqs_disabled()); - - udp_len = len + sizeof(struct udphdr); - if (np->ipv6) - ip_len = udp_len + sizeof(struct ipv6hdr); - else - ip_len = udp_len + sizeof(struct iphdr); - - total_len = ip_len + LL_RESERVED_SPACE(np->dev); - - skb = find_skb(np, total_len + np->dev->needed_tailroom, - total_len - len); - if (!skb) - return NULL; - - skb_copy_to_linear_data(skb, msg, len); - skb_put(skb, len); - - push_udp(np, skb, len); - if (np->ipv6) - push_ipv6(np, skb, len); - else - push_ipv4(np, skb, len); - push_eth(np, skb); - skb->dev = np->dev; - - return skb; -} -EXPORT_SYMBOL(netpoll_prepare_skb); - - static void skb_pool_flush(struct netpoll *np) { struct sk_buff_head *skb_pool; -- 2.47.3 Shift the definition of netpoll_cleanup() from netpoll core to the netconsole driver, updating all relevant file references. This change centralizes cleanup logic alongside netconsole target management, Given netpoll_cleanup() is only called by netconsole, keep it there. Signed-off-by: Breno Leitao --- drivers/net/netconsole.c | 10 ++++++++++ include/linux/netpoll.h | 1 - net/core/netpoll.c | 10 ---------- 3 files changed, 10 insertions(+), 11 deletions(-) diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index 30731711571be..90e359b87469a 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c @@ -260,6 +260,16 @@ static struct netconsole_target *alloc_and_init(void) return nt; } +static void netpoll_cleanup(struct netpoll *np) +{ + rtnl_lock(); + if (!np->dev) + goto out; + do_netpoll_cleanup(np); +out: + rtnl_unlock(); +} + /* Clean up every target in the cleanup_list and move the clean targets back to * the main target_list. */ diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index 481ec474fa6b9..65bfade025f09 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h @@ -72,7 +72,6 @@ int netpoll_send_udp(struct netpoll *np, const char *msg, int len); int __netpoll_setup(struct netpoll *np, struct net_device *ndev); int netpoll_setup(struct netpoll *np); void __netpoll_free(struct netpoll *np); -void netpoll_cleanup(struct netpoll *np); void do_netpoll_cleanup(struct netpoll *np); netdev_tx_t netpoll_send_skb(struct netpoll *np, struct sk_buff *skb); struct sk_buff *find_skb(struct netpoll *np, int len, int reserve); diff --git a/net/core/netpoll.c b/net/core/netpoll.c index b4634e91568e8..9e12a667a5f0a 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -703,13 +703,3 @@ void do_netpoll_cleanup(struct netpoll *np) } EXPORT_SYMBOL(do_netpoll_cleanup); -void netpoll_cleanup(struct netpoll *np) -{ - rtnl_lock(); - if (!np->dev) - goto out; - do_netpoll_cleanup(np); -out: - rtnl_unlock(); -} -EXPORT_SYMBOL(netpoll_cleanup); -- 2.47.3 Make zap_completion_queue() a globally visible symbol by changing its linkage to non-static and adding EXPORT_SYMBOL_GPL. This is a true netpoll function that will be needed by non-netpoll functions that will be moved away from netpoll. This will allow moving the skb pool management to netconsole, mainly find_skb(), which invokes zap_completion_queue(), and will be moved to netconsole. Signed-off-by: Breno Leitao --- include/linux/netpoll.h | 1 + net/core/netpoll.c | 5 ++--- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index 65bfade025f09..7f8b4d758a1e7 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h @@ -75,6 +75,7 @@ void __netpoll_free(struct netpoll *np); void do_netpoll_cleanup(struct netpoll *np); netdev_tx_t netpoll_send_skb(struct netpoll *np, struct sk_buff *skb); struct sk_buff *find_skb(struct netpoll *np, int len, int reserve); +void zap_completion_queue(void); #ifdef CONFIG_NETPOLL static inline void *netpoll_poll_lock(struct napi_struct *napi) diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 9e12a667a5f0a..04a55ec392fd2 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -50,8 +50,6 @@ sizeof(struct udphdr) + \ MAX_UDP_CHUNK) -static void zap_completion_queue(void); - static unsigned int carrier_timeout = 4; module_param(carrier_timeout, uint, 0644); @@ -240,7 +238,7 @@ static void refill_skbs(struct netpoll *np) spin_unlock_irqrestore(&skb_pool->lock, flags); } -static void zap_completion_queue(void) +void zap_completion_queue(void) { unsigned long flags; struct softnet_data *sd = &get_cpu_var(softnet_data); @@ -267,6 +265,7 @@ static void zap_completion_queue(void) put_cpu_var(softnet_data); } +EXPORT_SYMBOL_GPL(zap_completion_queue); struct sk_buff *find_skb(struct netpoll *np, int len, int reserve) { -- 2.47.3 Since netconsole is the sole user of the SKBs pool within netpoll, move the pool management into the netconsole driver. This change prevents other netpoll users from allocating and holding onto skb pool memory unnecessarily, thereby reducing memory usage when the pool is not required (which is all the cases except netconsole). The skb poll struct is still attached to the netpoll, but, eventually this should move to the netconsole target, since it has nothing to do with netpoll. Signed-off-by: Breno Leitao --- drivers/net/netconsole.c | 58 ++++++++++++++++++++++++++++++++++++++++++++++-- net/core/netpoll.c | 44 ------------------------------------ 2 files changed, 56 insertions(+), 46 deletions(-) diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index 90e359b87469a..3fe55db07cfe5 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c @@ -57,6 +57,19 @@ MODULE_LICENSE("GPL"); #define MAX_EXTRADATA_ITEMS 16 #define MAX_PRINT_CHUNK 1000 +/* + * We maintain a small pool of fully-sized skbs, to make sure the + * message gets out even in extreme OOM situations. + */ + +#define MAX_SKBS 32 +#define MAX_UDP_CHUNK 1460 +#define MAX_SKB_SIZE \ + (sizeof(struct ethhdr) + \ + sizeof(struct iphdr) + \ + sizeof(struct udphdr) + \ + MAX_UDP_CHUNK) + static char config[MAX_PARAM_LENGTH]; module_param_string(netconsole, config, MAX_PARAM_LENGTH, 0); MODULE_PARM_DESC(netconsole, " netconsole=[src-port]@[src-ip]/[dev],[tgt-port]@/[tgt-macaddr]"); @@ -172,6 +185,33 @@ struct netconsole_target { char buf[MAX_PRINT_CHUNK]; }; +static void refill_skbs(struct netpoll *np) +{ + struct sk_buff_head *skb_pool; + struct sk_buff *skb; + unsigned long flags; + + skb_pool = &np->skb_pool; + + spin_lock_irqsave(&skb_pool->lock, flags); + while (skb_pool->qlen < MAX_SKBS) { + skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC); + if (!skb) + break; + + __skb_queue_tail(skb_pool, skb); + } + spin_unlock_irqrestore(&skb_pool->lock, flags); +} + +static void refill_skbs_work_handler(struct work_struct *work) +{ + struct netpoll *np = + container_of(work, struct netpoll, refill_wq); + + refill_skbs(np); +} + #ifdef CONFIG_NETCONSOLE_DYNAMIC static struct configfs_subsystem netconsole_subsys; @@ -341,6 +381,20 @@ static int netpoll_parse_ip_addr(const char *str, union inet_addr *addr) return -1; } +static int setup_netpoll(struct netpoll *np) +{ + int err; + + err = netpoll_setup(np); + if (err) + return err; + + refill_skbs(np); + INIT_WORK(&np->refill_wq, refill_skbs_work_handler); + + return 0; +} + #ifdef CONFIG_NETCONSOLE_DYNAMIC /* @@ -615,7 +669,7 @@ static ssize_t enabled_store(struct config_item *item, */ netconsole_print_banner(&nt->np); - ret = netpoll_setup(&nt->np); + ret = setup_netpoll(&nt->np); if (ret) goto out_unlock; @@ -2036,7 +2090,7 @@ static struct netconsole_target *alloc_param_target(char *target_config, if (err) goto fail; - err = netpoll_setup(&nt->np); + err = setup_netpoll(&nt->np); if (err) { pr_err("Not enabling netconsole for %s%d. Netpoll setup failed\n", NETCONSOLE_PARAM_TARGET_PREFIX, cmdline_count); diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 04a55ec392fd2..94c75f39787bb 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -35,21 +35,8 @@ #include #include -/* - * We maintain a small pool of fully-sized skbs, to make sure the - * message gets out even in extreme OOM situations. - */ - -#define MAX_UDP_CHUNK 1460 -#define MAX_SKBS 32 #define USEC_PER_POLL 50 -#define MAX_SKB_SIZE \ - (sizeof(struct ethhdr) + \ - sizeof(struct iphdr) + \ - sizeof(struct udphdr) + \ - MAX_UDP_CHUNK) - static unsigned int carrier_timeout = 4; module_param(carrier_timeout, uint, 0644); @@ -219,25 +206,6 @@ void netpoll_poll_enable(struct net_device *dev) up(&ni->dev_lock); } -static void refill_skbs(struct netpoll *np) -{ - struct sk_buff_head *skb_pool; - struct sk_buff *skb; - unsigned long flags; - - skb_pool = &np->skb_pool; - - spin_lock_irqsave(&skb_pool->lock, flags); - while (skb_pool->qlen < MAX_SKBS) { - skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC); - if (!skb) - break; - - __skb_queue_tail(skb_pool, skb); - } - spin_unlock_irqrestore(&skb_pool->lock, flags); -} - void zap_completion_queue(void) { unsigned long flags; @@ -395,14 +363,6 @@ static void skb_pool_flush(struct netpoll *np) skb_queue_purge_reason(skb_pool, SKB_CONSUMED); } -static void refill_skbs_work_handler(struct work_struct *work) -{ - struct netpoll *np = - container_of(work, struct netpoll, refill_wq); - - refill_skbs(np); -} - int __netpoll_setup(struct netpoll *np, struct net_device *ndev) { struct netpoll_info *npinfo; @@ -446,10 +406,6 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev) strscpy(np->dev_name, ndev->name, IFNAMSIZ); npinfo->netpoll = np; - /* fill up the skb queue */ - refill_skbs(np); - INIT_WORK(&np->refill_wq, refill_skbs_work_handler); - /* last thing to do is link it to the net device structure */ rcu_assign_pointer(ndev->npinfo, npinfo); -- 2.47.3 Complete the SKB pool management refactoring by moving find_skb() from netpoll core to netconsole driver, making it a static function. This is the final step in removing SKB pool management from the generic netpoll infrastructure. With this change: 1. Netpoll core is now purely transmission-focused: Contains only the essential netpoll_send_skb() function for low-level packet transmission, with no knowledge of SKB allocation or pool management. 2. Complete encapsulation in netconsole: All SKB lifecycle management (allocation, pool handling, packet construction) is now contained within the netconsole driver where it belongs. 3. Cleaner API surface: Removes the last SKB management export from netpoll, leaving only zap_completion_queue() as a utility function and netpoll_send_skb() for transmission. 4. Better maintainability: Changes to SKB allocation strategies or pool management can now be made entirely within netconsole without affecting the core netpoll infrastructure. The find_skb() function is made static since it's now only used within netconsole.c for its internal SKB allocation needs. This completes the architectural cleanup that separates generic netpoll transmission capabilities from console-specific resource management. Signed-off-by: Breno Leitao --- drivers/net/netconsole.c | 27 +++++++++++++++++++++++++++ include/linux/netpoll.h | 1 - net/core/netpoll.c | 28 ---------------------------- 3 files changed, 27 insertions(+), 29 deletions(-) diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index 3fe55db07cfe5..bf7bab7a9c2f0 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c @@ -1655,6 +1655,33 @@ static void push_eth(struct netpoll *np, struct sk_buff *skb) eth->h_proto = htons(ETH_P_IP); } +static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve) +{ + int count = 0; + struct sk_buff *skb; + + zap_completion_queue(); +repeat: + + skb = alloc_skb(len, GFP_ATOMIC); + if (!skb) { + skb = skb_dequeue(&np->skb_pool); + schedule_work(&np->refill_wq); + } + + if (!skb) { + if (++count < 10) { + netpoll_poll_dev(np->dev); + goto repeat; + } + return NULL; + } + + refcount_set(&skb->users, 1); + skb_reserve(skb, reserve); + return skb; +} + static struct sk_buff *netpoll_prepare_skb(struct netpoll *np, const char *msg, int len) { diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index 7f8b4d758a1e7..f89bc9fb1f773 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h @@ -74,7 +74,6 @@ int netpoll_setup(struct netpoll *np); void __netpoll_free(struct netpoll *np); void do_netpoll_cleanup(struct netpoll *np); netdev_tx_t netpoll_send_skb(struct netpoll *np, struct sk_buff *skb); -struct sk_buff *find_skb(struct netpoll *np, int len, int reserve); void zap_completion_queue(void); #ifdef CONFIG_NETPOLL diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 94c75f39787bb..5aa83c9c09e05 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -235,34 +235,6 @@ void zap_completion_queue(void) } EXPORT_SYMBOL_GPL(zap_completion_queue); -struct sk_buff *find_skb(struct netpoll *np, int len, int reserve) -{ - int count = 0; - struct sk_buff *skb; - - zap_completion_queue(); -repeat: - - skb = alloc_skb(len, GFP_ATOMIC); - if (!skb) { - skb = skb_dequeue(&np->skb_pool); - schedule_work(&np->refill_wq); - } - - if (!skb) { - if (++count < 10) { - netpoll_poll_dev(np->dev); - goto repeat; - } - return NULL; - } - - refcount_set(&skb->users, 1); - skb_reserve(skb, reserve); - return skb; -} -EXPORT_SYMBOL_GPL(find_skb); - static int netpoll_owner_active(struct net_device *dev) { struct napi_struct *napi; -- 2.47.3 Transfer the skb_pool_flush() function from netpoll to netconsole, and call it within netpoll_cleanup() to ensure skb pool resources are properly released once the device is down. The invocation of skb_pool_flush() was removed from netpoll_setup(), as the pool is now only managed after successful allocation. This complete the move of skb pool management from netpoll to netconsole. Signed-off-by: Breno Leitao --- drivers/net/netconsole.c | 10 ++++++++++ net/core/netpoll.c | 15 +-------------- 2 files changed, 11 insertions(+), 14 deletions(-) diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index bf7bab7a9c2f0..b9bfb78560b3c 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c @@ -300,12 +300,22 @@ static struct netconsole_target *alloc_and_init(void) return nt; } +static void skb_pool_flush(struct netpoll *np) +{ + struct sk_buff_head *skb_pool; + + cancel_work_sync(&np->refill_wq); + skb_pool = &np->skb_pool; + skb_queue_purge_reason(skb_pool, SKB_CONSUMED); +} + static void netpoll_cleanup(struct netpoll *np) { rtnl_lock(); if (!np->dev) goto out; do_netpoll_cleanup(np); + skb_pool_flush(np); out: rtnl_unlock(); } diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 5aa83c9c09e05..c0eeeb9ac3daf 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -326,15 +326,6 @@ netdev_tx_t netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) } EXPORT_SYMBOL(netpoll_send_skb); -static void skb_pool_flush(struct netpoll *np) -{ - struct sk_buff_head *skb_pool; - - cancel_work_sync(&np->refill_wq); - skb_pool = &np->skb_pool; - skb_queue_purge_reason(skb_pool, SKB_CONSUMED); -} - int __netpoll_setup(struct netpoll *np, struct net_device *ndev) { struct netpoll_info *npinfo; @@ -547,7 +538,7 @@ int netpoll_setup(struct netpoll *np) err = __netpoll_setup(np, ndev); if (err) - goto flush; + goto put; rtnl_unlock(); /* Make sure all NAPI polls which started before dev->npinfo @@ -558,8 +549,6 @@ int netpoll_setup(struct netpoll *np) return 0; -flush: - skb_pool_flush(np); put: DEBUG_NET_WARN_ON_ONCE(np->dev); if (ip_overwritten) @@ -607,8 +596,6 @@ static void __netpoll_cleanup(struct netpoll *np) call_rcu(&npinfo->rcu, rcu_cleanup_netpoll_info); } else RCU_INIT_POINTER(np->dev->npinfo, NULL); - - skb_pool_flush(np); } void __netpoll_free(struct netpoll *np) -- 2.47.3