From: Jason Xing In the shared umem mode with different queues or devices, either uninitialized cq or fq is not allowed which was previously done in xp_assign_dev_shared(). The patch advances the check at the beginning so that 1) we can avoid a few memory allocation and stuff if cq or fq is NULL, 2) it can be regarded as preparation for the next patch in the series. Signed-off-by: Jason Xing --- net/xdp/xsk.c | 7 +++++++ net/xdp/xsk_buff_pool.c | 4 ---- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index f093c3453f64..3c52fafae47c 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -1349,6 +1349,13 @@ static int xsk_bind(struct socket *sock, struct sockaddr_unsized *addr, int addr } if (umem_xs->queue_id != qid || umem_xs->dev != dev) { + /* One fill and completion ring required for each queue id. */ + if (!xsk_validate_queues(xs)) { + err = -EINVAL; + sockfd_put(sock); + goto out_unlock; + } + /* Share the umem with another socket on another qid * and/or device. */ diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c index 51526034c42a..6bf84316e2ad 100644 --- a/net/xdp/xsk_buff_pool.c +++ b/net/xdp/xsk_buff_pool.c @@ -247,10 +247,6 @@ int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs, u16 flags; struct xdp_umem *umem = umem_xs->umem; - /* One fill and completion ring required for each queue id. */ - if (!pool->fq || !pool->cq) - return -EINVAL; - flags = umem->zc ? XDP_ZEROCOPY : XDP_COPY; if (umem_xs->pool->uses_need_wakeup) flags |= XDP_USE_NEED_WAKEUP; -- 2.41.3 From: Jason Xing We (Paolo and I) noticed that in the sending path touching an extra cacheline due to cq_cached_prod_lock will impact the performance. After moving the lock from struct xsk_buff_pool to struct xsk_queue, the performance is increased by ~5% which can be observed by xdpsock. An alternative approach [1] can be using atomic_try_cmpxchg() to have the same effect. But unfortunately I don't have evident performance numbers to prove the atomic approach is better than the current patch. The advantage is to save the contention time among multiple xsks sharing the same pool while the disadvantage is losing good maintenance. The full discussion can be found at the following link. [1]: https://lore.kernel.org/all/20251128134601.54678-1-kerneljasonxing@gmail.com/ Suggested-by: Paolo Abeni Signed-off-by: Jason Xing --- include/net/xsk_buff_pool.h | 5 ----- net/xdp/xsk.c | 8 ++++---- net/xdp/xsk_buff_pool.c | 2 +- net/xdp/xsk_queue.h | 5 +++++ 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h index 92a2358c6ce3..0b1abdb99c9e 100644 --- a/include/net/xsk_buff_pool.h +++ b/include/net/xsk_buff_pool.h @@ -90,11 +90,6 @@ struct xsk_buff_pool { * destructor callback. */ spinlock_t cq_prod_lock; - /* Mutual exclusion of the completion ring in the SKB mode. - * Protect: when sockets share a single cq when the same netdev - * and queue id is shared. - */ - spinlock_t cq_cached_prod_lock; struct xdp_buff_xsk *free_heads[]; }; diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index 3c52fafae47c..3b46bc635c43 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -543,9 +543,9 @@ static int xsk_cq_reserve_locked(struct xsk_buff_pool *pool) { int ret; - spin_lock(&pool->cq_cached_prod_lock); + spin_lock(&pool->cq->cq_cached_prod_lock); ret = xskq_prod_reserve(pool->cq); - spin_unlock(&pool->cq_cached_prod_lock); + spin_unlock(&pool->cq->cq_cached_prod_lock); return ret; } @@ -619,9 +619,9 @@ static void xsk_cq_submit_addr_locked(struct xsk_buff_pool *pool, static void xsk_cq_cancel_locked(struct xsk_buff_pool *pool, u32 n) { - spin_lock(&pool->cq_cached_prod_lock); + spin_lock(&pool->cq->cq_cached_prod_lock); xskq_prod_cancel_n(pool->cq, n); - spin_unlock(&pool->cq_cached_prod_lock); + spin_unlock(&pool->cq->cq_cached_prod_lock); } INDIRECT_CALLABLE_SCOPE diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c index 6bf84316e2ad..cd5125b6af53 100644 --- a/net/xdp/xsk_buff_pool.c +++ b/net/xdp/xsk_buff_pool.c @@ -91,7 +91,7 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs, INIT_LIST_HEAD(&pool->xsk_tx_list); spin_lock_init(&pool->xsk_tx_list_lock); spin_lock_init(&pool->cq_prod_lock); - spin_lock_init(&pool->cq_cached_prod_lock); + spin_lock_init(&xs->cq_tmp->cq_cached_prod_lock); refcount_set(&pool->users, 1); pool->fq = xs->fq_tmp; diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h index 1eb8d9f8b104..ec08d9c102b1 100644 --- a/net/xdp/xsk_queue.h +++ b/net/xdp/xsk_queue.h @@ -46,6 +46,11 @@ struct xsk_queue { u64 invalid_descs; u64 queue_empty_descs; size_t ring_vmalloc_size; + /* Mutual exclusion of the completion ring in the SKB mode. + * Protect: when sockets share a single cq when the same netdev + * and queue id is shared. + */ + spinlock_t cq_cached_prod_lock; }; struct parsed_desc { -- 2.41.3