From: Jason Xing - Move xs->mutex into xsk_generic_xmit to prevent race condition when application manipulates generic_xmit_batch simultaneously. - Enable batch xmit eventually. Make the whole feature work eventually. Signed-off-by: Jason Xing --- net/xdp/xsk.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index 90089a6e78b2..34fd54ad4768 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -803,8 +803,6 @@ static int __xsk_generic_xmit_batch(struct xdp_sock *xs) u32 max_batch, expected; int err = 0; - mutex_lock(&xs->mutex); - /* Since we dropped the RCU read lock, the socket state might have changed. */ if (unlikely(!xsk_is_bound(xs))) { err = -ENXIO; @@ -902,21 +900,17 @@ static int __xsk_generic_xmit_batch(struct xdp_sock *xs) if (sent_frame) __xsk_tx_release(xs); - mutex_unlock(&xs->mutex); return err; } -static int __xsk_generic_xmit(struct sock *sk) +static int __xsk_generic_xmit(struct xdp_sock *xs) { - struct xdp_sock *xs = xdp_sk(sk); bool sent_frame = false; struct xdp_desc desc; struct sk_buff *skb; u32 max_batch; int err = 0; - mutex_lock(&xs->mutex); - /* Since we dropped the RCU read lock, the socket state might have changed. */ if (unlikely(!xsk_is_bound(xs))) { err = -ENXIO; @@ -991,17 +985,22 @@ static int __xsk_generic_xmit(struct sock *sk) if (sent_frame) __xsk_tx_release(xs); - mutex_unlock(&xs->mutex); return err; } static int xsk_generic_xmit(struct sock *sk) { + struct xdp_sock *xs = xdp_sk(sk); int ret; /* Drop the RCU lock since the SKB path might sleep. */ rcu_read_unlock(); - ret = __xsk_generic_xmit(sk); + mutex_lock(&xs->mutex); + if (xs->generic_xmit_batch) + ret = __xsk_generic_xmit_batch(xs); + else + ret = __xsk_generic_xmit(xs); + mutex_unlock(&xs->mutex); /* Reaquire RCU lock before going into common code. */ rcu_read_lock(); -- 2.41.3