Now ice is ready to get queue_mgmt_ops support. It already has API to disable/reconfig/enable one particular queue (for XSk). Reuse as much of its code as possible to implement Rx queue management callbacks and vice versa -- ice_queue_mem_{alloc,free}() can be reused during ifup/ifdown to elide code duplication. With this, ice passes the io_uring zcrx selftests, meaning the Rx part of netmem/MP support is done. Reviewed-by: Jacob Keller Reviewed-by: Aleksandr Loktionov Signed-off-by: Alexander Lobakin --- drivers/net/ethernet/intel/ice/ice_lib.h | 5 + drivers/net/ethernet/intel/ice/ice_txrx.h | 2 + drivers/net/ethernet/intel/ice/ice_base.c | 192 ++++++++++++++------ drivers/net/ethernet/intel/ice/ice_main.c | 2 +- drivers/net/ethernet/intel/ice/ice_sf_eth.c | 2 +- drivers/net/ethernet/intel/ice/ice_txrx.c | 26 ++- 6 files changed, 163 insertions(+), 66 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index 347b63e497e7..2d3168458891 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -4,6 +4,8 @@ #ifndef _ICE_LIB_H_ #define _ICE_LIB_H_ +#include + #include "ice.h" #include "ice_vlan.h" @@ -132,4 +134,7 @@ void ice_clear_feature_support(struct ice_pf *pf, enum ice_feature f); void ice_init_feature_support(struct ice_pf *pf); bool ice_vsi_is_rx_queue_active(struct ice_vsi *vsi); void ice_vsi_update_l2tsel(struct ice_vsi *vsi, enum ice_l2tsel l2tsel); + +extern const struct netdev_queue_mgmt_ops ice_queue_mgmt_ops; + #endif /* !_ICE_LIB_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index c51b1e60f717..e3c682723107 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -440,6 +440,8 @@ u16 ice_select_queue(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev); void ice_clean_tx_ring(struct ice_tx_ring *tx_ring); +void ice_queue_mem_free(struct net_device *dev, void *per_queue_mem); +void ice_zero_rx_ring(struct ice_rx_ring *rx_ring); void ice_clean_rx_ring(struct ice_rx_ring *rx_ring); int ice_setup_tx_ring(struct ice_tx_ring *tx_ring); int ice_setup_rx_ring(struct ice_rx_ring *rx_ring); diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index 7097324c38f3..4479bb5e66ac 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -651,6 +651,42 @@ static int ice_rxq_pp_create(struct ice_rx_ring *rq) return err; } +static int ice_queue_mem_alloc(struct net_device *dev, void *per_queue_mem, + int idx) +{ + const struct ice_netdev_priv *priv = netdev_priv(dev); + const struct ice_rx_ring *real = priv->vsi->rx_rings[idx]; + struct ice_rx_ring *new = per_queue_mem; + int ret; + + new->count = real->count; + new->netdev = real->netdev; + new->q_index = real->q_index; + new->q_vector = real->q_vector; + new->vsi = real->vsi; + + ret = ice_rxq_pp_create(new); + if (ret) + return ret; + + if (!netif_running(dev)) + return 0; + + ret = __xdp_rxq_info_reg(&new->xdp_rxq, new->netdev, new->q_index, + new->q_vector->napi.napi_id, new->rx_buf_len); + if (ret) + goto err_destroy_fq; + + xdp_rxq_info_attach_page_pool(&new->xdp_rxq, new->pp); + + return 0; + +err_destroy_fq: + ice_rxq_pp_destroy(new); + + return ret; +} + /** * ice_vsi_cfg_rxq - Configure an Rx queue * @ring: the ring being configured @@ -665,23 +701,12 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) int err; if (ring->vsi->type == ICE_VSI_PF || ring->vsi->type == ICE_VSI_SF) { - if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) { - err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, - ring->q_index, - ring->q_vector->napi.napi_id, - ring->rx_buf_len); - if (err) - return err; - } - ice_rx_xsk_pool(ring); err = ice_realloc_rx_xdp_bufs(ring, ring->xsk_pool); if (err) return err; if (ring->xsk_pool) { - xdp_rxq_info_unreg(&ring->xdp_rxq); - rx_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool); err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, @@ -700,20 +725,10 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n", ring->q_index); } else { - err = ice_rxq_pp_create(ring); + err = ice_queue_mem_alloc(ring->netdev, ring, + ring->q_index); if (err) return err; - - if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) { - err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, - ring->q_index, - ring->q_vector->napi.napi_id, - ring->rx_buf_len); - if (err) - goto err_destroy_fq; - } - xdp_rxq_info_attach_page_pool(&ring->xdp_rxq, - ring->pp); } } @@ -722,7 +737,7 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) if (err) { dev_err(dev, "ice_setup_rx_ctx failed for RxQ %d, err %d\n", ring->q_index, err); - goto err_destroy_fq; + goto err_clean_rq; } if (ring->xsk_pool) { @@ -753,12 +768,12 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) err = ice_alloc_rx_bufs(ring, num_bufs); if (err) - goto err_destroy_fq; + goto err_clean_rq; return 0; -err_destroy_fq: - ice_rxq_pp_destroy(ring); +err_clean_rq: + ice_clean_rx_ring(ring); return err; } @@ -1425,27 +1440,7 @@ static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx) sizeof(vsi->xdp_rings[q_idx]->ring_stats->stats)); } -/** - * ice_qp_clean_rings - Cleans all the rings of a given index - * @vsi: VSI that contains rings of interest - * @q_idx: ring index in array - */ -static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx) -{ - ice_clean_tx_ring(vsi->tx_rings[q_idx]); - if (vsi->xdp_rings) - ice_clean_tx_ring(vsi->xdp_rings[q_idx]); - ice_clean_rx_ring(vsi->rx_rings[q_idx]); -} - -/** - * ice_qp_dis - Disables a queue pair - * @vsi: VSI of interest - * @q_idx: ring index in array - * - * Returns 0 on success, negative on failure. - */ -int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx) +static int __ice_qp_dis(struct ice_vsi *vsi, u16 q_idx) { struct ice_txq_meta txq_meta = { }; struct ice_q_vector *q_vector; @@ -1484,23 +1479,35 @@ int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx) } ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, false); - ice_qp_clean_rings(vsi, q_idx); ice_qp_reset_stats(vsi, q_idx); + ice_clean_tx_ring(vsi->tx_rings[q_idx]); + if (vsi->xdp_rings) + ice_clean_tx_ring(vsi->xdp_rings[q_idx]); + return fail; } /** - * ice_qp_ena - Enables a queue pair + * ice_qp_dis - Disables a queue pair * @vsi: VSI of interest * @q_idx: ring index in array * * Returns 0 on success, negative on failure. */ -int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx) +int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx) +{ + int ret; + + ret = __ice_qp_dis(vsi, q_idx); + ice_clean_rx_ring(vsi->rx_rings[q_idx]); + + return ret; +} + +static int __ice_qp_ena(struct ice_vsi *vsi, u16 q_idx, int fail) { struct ice_q_vector *q_vector; - int fail = 0; bool link_up; int err; @@ -1518,10 +1525,6 @@ int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx) ice_tx_xsk_pool(vsi, q_idx); } - err = ice_vsi_cfg_single_rxq(vsi, q_idx); - if (!fail) - fail = err; - q_vector = vsi->rx_rings[q_idx]->q_vector; ice_qvec_cfg_msix(vsi, q_vector, q_idx); @@ -1542,3 +1545,80 @@ int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx) return fail; } + +/** + * ice_qp_ena - Enables a queue pair + * @vsi: VSI of interest + * @q_idx: ring index in array + * + * Returns 0 on success, negative on failure. + */ +int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx) +{ + return __ice_qp_ena(vsi, q_idx, ice_vsi_cfg_single_rxq(vsi, q_idx)); +} + +static int ice_queue_start(struct net_device *dev, void *per_queue_mem, + int idx) +{ + const struct ice_netdev_priv *priv = netdev_priv(dev); + struct ice_rx_ring *real = priv->vsi->rx_rings[idx]; + struct ice_rx_ring *new = per_queue_mem; + struct napi_struct *napi; + int ret; + + real->pp = new->pp; + real->rx_fqes = new->rx_fqes; + real->hdr_fqes = new->hdr_fqes; + real->hdr_pp = new->hdr_pp; + + real->hdr_truesize = new->hdr_truesize; + real->truesize = new->truesize; + real->rx_hdr_len = new->rx_hdr_len; + real->rx_buf_len = new->rx_buf_len; + + memcpy(&real->xdp_rxq, &new->xdp_rxq, sizeof(new->xdp_rxq)); + + ret = ice_setup_rx_ctx(real); + if (ret) + return ret; + + napi = &real->q_vector->napi; + + page_pool_enable_direct_recycling(real->pp, napi); + if (real->hdr_pp) + page_pool_enable_direct_recycling(real->hdr_pp, napi); + + ret = ice_alloc_rx_bufs(real, ICE_DESC_UNUSED(real)); + + return __ice_qp_ena(priv->vsi, idx, ret); +} + +static int ice_queue_stop(struct net_device *dev, void *per_queue_mem, + int idx) +{ + const struct ice_netdev_priv *priv = netdev_priv(dev); + struct ice_rx_ring *real = priv->vsi->rx_rings[idx]; + int ret; + + ret = __ice_qp_dis(priv->vsi, idx); + if (ret) + return ret; + + page_pool_disable_direct_recycling(real->pp); + if (real->hdr_pp) + page_pool_disable_direct_recycling(real->hdr_pp); + + ice_zero_rx_ring(real); + memcpy(per_queue_mem, real, sizeof(*real)); + + return 0; +} + +const struct netdev_queue_mgmt_ops ice_queue_mgmt_ops = { + .ndo_queue_mem_alloc = ice_queue_mem_alloc, + .ndo_queue_mem_free = ice_queue_mem_free, + .ndo_queue_mem_size = sizeof(struct ice_rx_ring), + .ndo_queue_start = ice_queue_start, + .ndo_queue_stop = ice_queue_stop, +}; diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 881b41d37821..25e9091ca309 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -3523,7 +3523,7 @@ static void ice_set_ops(struct ice_vsi *vsi) } netdev->netdev_ops = &ice_netdev_ops; - netdev->request_ops_lock = true; + netdev->queue_mgmt_ops = &ice_queue_mgmt_ops; netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic; netdev->xdp_metadata_ops = &ice_xdp_md_ops; ice_set_ethtool_ops(netdev); diff --git a/drivers/net/ethernet/intel/ice/ice_sf_eth.c b/drivers/net/ethernet/intel/ice/ice_sf_eth.c index 2c3db1b03055..41e1606a8222 100644 --- a/drivers/net/ethernet/intel/ice/ice_sf_eth.c +++ b/drivers/net/ethernet/intel/ice/ice_sf_eth.c @@ -58,7 +58,7 @@ static int ice_sf_cfg_netdev(struct ice_dynamic_port *dyn_port, eth_hw_addr_set(netdev, dyn_port->hw_addr); ether_addr_copy(netdev->perm_addr, dyn_port->hw_addr); netdev->netdev_ops = &ice_sf_netdev_ops; - netdev->request_ops_lock = true; + netdev->queue_mgmt_ops = &ice_queue_mgmt_ops; SET_NETDEV_DEVLINK_PORT(netdev, devlink_port); err = register_netdev(netdev); diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 47d44796635a..b6f56cb81f93 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -530,17 +530,13 @@ void ice_rxq_pp_destroy(struct ice_rx_ring *rq) rq->hdr_pp = NULL; } -/** - * ice_clean_rx_ring - Free Rx buffers - * @rx_ring: ring to be cleaned - */ -void ice_clean_rx_ring(struct ice_rx_ring *rx_ring) +void ice_queue_mem_free(struct net_device *dev, void *per_queue_mem) { - u32 size; + struct ice_rx_ring *rx_ring = per_queue_mem; if (rx_ring->xsk_pool) { ice_xsk_clean_rx_ring(rx_ring); - goto rx_skip_free; + return; } /* ring already cleared, nothing to do */ @@ -567,8 +563,12 @@ void ice_clean_rx_ring(struct ice_rx_ring *rx_ring) } ice_rxq_pp_destroy(rx_ring); +} + +void ice_zero_rx_ring(struct ice_rx_ring *rx_ring) +{ + size_t size; -rx_skip_free: /* Zero out the descriptor ring */ size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), PAGE_SIZE); @@ -579,6 +579,16 @@ void ice_clean_rx_ring(struct ice_rx_ring *rx_ring) rx_ring->next_to_use = 0; } +/** + * ice_clean_rx_ring - Free Rx buffers + * @rx_ring: ring to be cleaned + */ +void ice_clean_rx_ring(struct ice_rx_ring *rx_ring) +{ + ice_queue_mem_free(rx_ring->netdev, rx_ring); + ice_zero_rx_ring(rx_ring); +} + /** * ice_free_rx_ring - Free Rx resources * @rx_ring: ring to clean the resources from -- 2.52.0