From: Jason Xing In the shared umem mode with different queues or devices, either uninitialized cq or fq is not allowed which was previously done in xp_assign_dev_shared(). The patch advances the check at the beginning so that 1) we can avoid a few memory allocation and stuff if cq or fq is NULL, 2) it can be regarded as preparation for the next patch in the series. Signed-off-by: Jason Xing --- net/xdp/xsk.c | 7 +++++++ net/xdp/xsk_buff_pool.c | 4 ---- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index f093c3453f64..3c52fafae47c 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -1349,6 +1349,13 @@ static int xsk_bind(struct socket *sock, struct sockaddr_unsized *addr, int addr } if (umem_xs->queue_id != qid || umem_xs->dev != dev) { + /* One fill and completion ring required for each queue id. */ + if (!xsk_validate_queues(xs)) { + err = -EINVAL; + sockfd_put(sock); + goto out_unlock; + } + /* Share the umem with another socket on another qid * and/or device. */ diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c index 51526034c42a..6bf84316e2ad 100644 --- a/net/xdp/xsk_buff_pool.c +++ b/net/xdp/xsk_buff_pool.c @@ -247,10 +247,6 @@ int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs, u16 flags; struct xdp_umem *umem = umem_xs->umem; - /* One fill and completion ring required for each queue id. */ - if (!pool->fq || !pool->cq) - return -EINVAL; - flags = umem->zc ? XDP_ZEROCOPY : XDP_COPY; if (umem_xs->pool->uses_need_wakeup) flags |= XDP_USE_NEED_WAKEUP; -- 2.41.3