Advertise netmem Tx support in ice. The only change needed is to set ICE_TX_BUF_FRAG conditionally, only when skb_frag_is_net_iov() is false. Otherwise, the Tx buffer type will be ICE_TX_BUF_EMPTY and the driver will skip the DMA unmapping operation. Reviewed-by: Jacob Keller Reviewed-by: Aleksandr Loktionov Signed-off-by: Alexander Lobakin --- drivers/net/ethernet/intel/ice/ice_main.c | 1 + drivers/net/ethernet/intel/ice/ice_sf_eth.c | 1 + drivers/net/ethernet/intel/ice/ice_txrx.c | 17 +++++++++++++---- 3 files changed, 15 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 25e9091ca309..66601b1b7fec 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -3524,6 +3524,7 @@ static void ice_set_ops(struct ice_vsi *vsi) netdev->netdev_ops = &ice_netdev_ops; netdev->queue_mgmt_ops = &ice_queue_mgmt_ops; + netdev->netmem_tx = true; netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic; netdev->xdp_metadata_ops = &ice_xdp_md_ops; ice_set_ethtool_ops(netdev); diff --git a/drivers/net/ethernet/intel/ice/ice_sf_eth.c b/drivers/net/ethernet/intel/ice/ice_sf_eth.c index 41e1606a8222..51ad13c9d7f9 100644 --- a/drivers/net/ethernet/intel/ice/ice_sf_eth.c +++ b/drivers/net/ethernet/intel/ice/ice_sf_eth.c @@ -59,6 +59,7 @@ static int ice_sf_cfg_netdev(struct ice_dynamic_port *dyn_port, ether_addr_copy(netdev->perm_addr, dyn_port->hw_addr); netdev->netdev_ops = &ice_sf_netdev_ops; netdev->queue_mgmt_ops = &ice_queue_mgmt_ops; + netdev->netmem_tx = true; SET_NETDEV_DEVLINK_PORT(netdev, devlink_port); err = register_netdev(netdev); diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index b6f56cb81f93..e8e1acbd5a7d 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -113,11 +113,17 @@ ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc, static void ice_unmap_and_free_tx_buf(struct ice_tx_ring *ring, struct ice_tx_buf *tx_buf) { - if (tx_buf->type != ICE_TX_BUF_XDP_TX && dma_unmap_len(tx_buf, len)) + switch (tx_buf->type) { + case ICE_TX_BUF_DUMMY: + case ICE_TX_BUF_FRAG: + case ICE_TX_BUF_SKB: + case ICE_TX_BUF_XDP_XMIT: dma_unmap_page(ring->dev, dma_unmap_addr(tx_buf, dma), dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); + break; + } switch (tx_buf->type) { case ICE_TX_BUF_DUMMY: @@ -337,12 +343,14 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget) } /* unmap any remaining paged data */ - if (dma_unmap_len(tx_buf, len)) { + if (tx_buf->type != ICE_TX_BUF_EMPTY) { dma_unmap_page(tx_ring->dev, dma_unmap_addr(tx_buf, dma), dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); + dma_unmap_len_set(tx_buf, len, 0); + tx_buf->type = ICE_TX_BUF_EMPTY; } } ice_trace(clean_tx_irq_unmap_eop, tx_ring, tx_desc, tx_buf); @@ -1493,7 +1501,8 @@ ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first, DMA_TO_DEVICE); tx_buf = &tx_ring->tx_buf[i]; - tx_buf->type = ICE_TX_BUF_FRAG; + if (!skb_frag_is_net_iov(frag)) + tx_buf->type = ICE_TX_BUF_FRAG; } /* record SW timestamp if HW timestamp is not available */ @@ -2368,7 +2377,7 @@ void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring) } /* unmap the data header */ - if (dma_unmap_len(tx_buf, len)) + if (tx_buf->type != ICE_TX_BUF_EMPTY) dma_unmap_single(tx_ring->dev, dma_unmap_addr(tx_buf, dma), dma_unmap_len(tx_buf, len), -- 2.52.0