From: Jason Xing Previously it only reserves one slot. The patch extends it to n to cover the batch mode. Signed-off-by: Jason Xing --- net/xdp/xsk.c | 12 ++++++++---- net/xdp/xsk_queue.h | 12 +++++++----- 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index 6cd2e58e170c..c26e26cb4dda 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -546,12 +546,17 @@ static int xsk_wakeup(struct xdp_sock *xs, u8 flags) return dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags); } -static int xsk_cq_reserve_locked(struct xsk_buff_pool *pool) +/* The function tries to reserve as many descs as possible. If there + * is no single slot to allocate, return zero. Otherwise, return how + * many slots are available, even though it might stop reserving at + * certain point. + */ +static int xsk_cq_reserve_locked(struct xsk_buff_pool *pool, u32 n) { int ret; spin_lock(&pool->cq->cq_cached_prod_lock); - ret = xskq_prod_reserve(pool->cq); + ret = xskq_prod_reserve(pool->cq, n); spin_unlock(&pool->cq->cq_cached_prod_lock); return ret; @@ -947,8 +952,7 @@ static int __xsk_generic_xmit(struct sock *sk) * if there is space in it. This avoids having to implement * any buffering in the Tx path. */ - err = xsk_cq_reserve_locked(xs->pool); - if (err) { + if (!xsk_cq_reserve_locked(xs->pool, 1)) { err = -EAGAIN; goto out; } diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h index 354f6fe86893..34cc07d6115e 100644 --- a/net/xdp/xsk_queue.h +++ b/net/xdp/xsk_queue.h @@ -413,14 +413,16 @@ static inline void xskq_prod_cancel_n(struct xsk_queue *q, u32 cnt) q->cached_prod -= cnt; } -static inline int xskq_prod_reserve(struct xsk_queue *q) +static inline int xskq_prod_reserve(struct xsk_queue *q, u32 n) { - if (xskq_prod_is_full(q)) - return -ENOSPC; + u32 nr_free = xskq_prod_nb_free(q, n); + + if (!nr_free) + return 0; /* A, matches D */ - q->cached_prod++; - return 0; + q->cached_prod += nr_free; + return nr_free; } static inline int xskq_prod_reserve_addr(struct xsk_queue *q, u64 addr) -- 2.41.3