From: Jason Xing Use some exclusive functions for cached_prod in generic path instead of extending unified functions to avoid affecting zerocopy feature. Use atomic operations. Signed-off-by: Jason Xing --- net/xdp/xsk.c | 4 ++-- net/xdp/xsk_queue.h | 21 ++++++++++++++++++--- 2 files changed, 20 insertions(+), 5 deletions(-) diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index bcfd400e9cf8..b63409b1422e 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -551,7 +551,7 @@ static int xsk_cq_reserve_locked(struct xsk_buff_pool *pool) int ret; spin_lock(&pool->cq_cached_prod_lock); - ret = xskq_prod_reserve(pool->cq); + ret = xsk_cq_cached_prod_reserve(pool->cq); spin_unlock(&pool->cq_cached_prod_lock); return ret; @@ -588,7 +588,7 @@ static void xsk_cq_submit_addr_locked(struct xsk_buff_pool *pool, static void xsk_cq_cancel_locked(struct xsk_buff_pool *pool, u32 n) { spin_lock(&pool->cq_cached_prod_lock); - xskq_prod_cancel_n(pool->cq, n); + atomic_sub(n, &pool->cq->cached_prod_atomic); spin_unlock(&pool->cq_cached_prod_lock); } diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h index 44cc01555c0b..3a023791b273 100644 --- a/net/xdp/xsk_queue.h +++ b/net/xdp/xsk_queue.h @@ -402,13 +402,28 @@ static inline void xskq_prod_cancel_n(struct xsk_queue *q, u32 cnt) q->cached_prod -= cnt; } -static inline int xskq_prod_reserve(struct xsk_queue *q) +static inline bool xsk_cq_cached_prod_nb_free(struct xsk_queue *q) { - if (xskq_prod_is_full(q)) + u32 cached_prod = atomic_read(&q->cached_prod_atomic); + u32 free_entries = q->nentries - (cached_prod - q->cached_cons); + + if (free_entries) + return true; + + /* Refresh the local tail pointer */ + q->cached_cons = READ_ONCE(q->ring->consumer); + free_entries = q->nentries - (cached_prod - q->cached_cons); + + return free_entries ? true : false; +} + +static inline int xsk_cq_cached_prod_reserve(struct xsk_queue *q) +{ + if (!xsk_cq_cached_prod_nb_free(q)) return -ENOSPC; /* A, matches D */ - q->cached_prod++; + atomic_inc(&q->cached_prod_atomic); return 0; } -- 2.41.3