Avoid piling too many producers on the busylock by updating sk_rmem_alloc before busylock acquisition. Signed-off-by: Eric Dumazet --- net/ipv4/udp.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index edd846fee90ff7850356a5cb3400ce96856e5429..658ae87827991a78c25c2172d52e772c94ea217f 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1753,13 +1753,16 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb) if (rmem > (rcvbuf >> 1)) { skb_condense(skb); size = skb->truesize; + rmem = atomic_add_return(size, &sk->sk_rmem_alloc); + if (rmem > rcvbuf) + goto uncharge_drop; busy = busylock_acquire(sk); + } else { + atomic_add(size, &sk->sk_rmem_alloc); } udp_set_dev_scratch(skb); - atomic_add(size, &sk->sk_rmem_alloc); - spin_lock(&list->lock); err = udp_rmem_schedule(sk, size); if (err) { -- 2.51.0.384.g4c02a37b29-goog