Similarly what we do for net_mp_{open,close}_rxq for mapped queues, proxy also the xsk_{reg,clear}_pool_at_qid via netif_get_rx_queue_peer_locked such that when a virtual netdev picked a mapped rxq, the request gets through to the real rxq in the physical netdev. The proxying is only relevant for queue_id < dev->real_num_rx_queues since right now its only supported for rxqs. Signed-off-by: Daniel Borkmann Co-developed-by: David Wei Signed-off-by: David Wei --- net/xdp/xsk.c | 48 +++++++++++++++++++++++++++++++++++------------- 1 file changed, 35 insertions(+), 13 deletions(-) diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index 6ae9ad5f27ad..872b243b7fcc 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -23,6 +23,8 @@ #include #include #include + +#include #include #include #include @@ -124,10 +126,18 @@ EXPORT_SYMBOL(xsk_get_pool_from_qid); void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id) { - if (queue_id < dev->num_rx_queues) - dev->_rx[queue_id].pool = NULL; - if (queue_id < dev->num_tx_queues) - dev->_tx[queue_id].pool = NULL; + struct net_device *orig_dev = dev; + unsigned int id = queue_id; + + if (id < dev->real_num_rx_queues) + WARN_ON_ONCE(!netif_get_rx_queue_peer_locked(&dev, &id)); + + if (id < dev->real_num_rx_queues) + dev->_rx[id].pool = NULL; + if (id < dev->real_num_tx_queues) + dev->_tx[id].pool = NULL; + + netif_put_rx_queue_peer_locked(orig_dev, dev); } /* The buffer pool is stored both in the _rx struct and the _tx struct as we do @@ -137,17 +147,29 @@ void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id) int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool, u16 queue_id) { - if (queue_id >= max_t(unsigned int, - dev->real_num_rx_queues, - dev->real_num_tx_queues)) - return -EINVAL; + struct net_device *orig_dev = dev; + unsigned int id = queue_id; + int ret = 0; - if (queue_id < dev->real_num_rx_queues) - dev->_rx[queue_id].pool = pool; - if (queue_id < dev->real_num_tx_queues) - dev->_tx[queue_id].pool = pool; + if (id >= max(dev->real_num_rx_queues, + dev->real_num_tx_queues)) + return -EINVAL; + if (id < dev->real_num_rx_queues) { + if (!netif_get_rx_queue_peer_locked(&dev, &id)) + return -EBUSY; + if (xsk_get_pool_from_qid(dev, id)) { + ret = -EBUSY; + goto out; + } + } - return 0; + if (id < dev->real_num_rx_queues) + dev->_rx[id].pool = pool; + if (id < dev->real_num_tx_queues) + dev->_tx[id].pool = pool; +out: + netif_put_rx_queue_peer_locked(orig_dev, dev); + return ret; } static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff_xsk *xskb, u32 len, -- 2.43.0