From: Jason Xing Since 'budget' parameter in ixgbe_clean_xdp_tx_irq() takes no effect, the patch removes it. No functional change here. Reviewed-by: Larysa Zaremba Signed-off-by: Jason Xing --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 2 +- drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h | 2 +- drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 9a6a67a6d644..7a9508e1c05a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -3585,7 +3585,7 @@ int ixgbe_poll(struct napi_struct *napi, int budget) ixgbe_for_each_ring(ring, q_vector->tx) { bool wd = ring->xsk_pool ? - ixgbe_clean_xdp_tx_irq(q_vector, ring, budget) : + ixgbe_clean_xdp_tx_irq(q_vector, ring) : ixgbe_clean_tx_irq(q_vector, ring, budget); if (!wd) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h index 78deea5ec536..788722fe527a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h @@ -42,7 +42,7 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector, const int budget); void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring); bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector, - struct ixgbe_ring *tx_ring, int napi_budget); + struct ixgbe_ring *tx_ring); int ixgbe_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags); void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c index 7b941505a9d0..a463c5ac9c7c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c @@ -456,7 +456,7 @@ static void ixgbe_clean_xdp_tx_buffer(struct ixgbe_ring *tx_ring, } bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector, - struct ixgbe_ring *tx_ring, int napi_budget) + struct ixgbe_ring *tx_ring) { u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use; unsigned int total_packets = 0, total_bytes = 0; -- 2.41.3 From: Jason Xing - Adjust ixgbe_desc_unused as the budget value. - Avoid checking desc_unused over and over again in the loop. The patch makes ixgbe follow i40e driver that was done in commit 1fd972ebe523 ("i40e: move check of full Tx ring to outside of send loop"). [ Note that the above i40e patch has problem when ixgbe_desc_unused(tx_ring) returns zero. The zero value as the budget value means we don't have any possible descs to be sent, so it should return true instead to tell the napi poll not to launch another poll to handle tx packets. Even though that patch behaves correctly by returning true in this case, it happens because of the unexpected underflow of the budget. Taking the current version of i40e_xmit_zc() as an example, it returns true as expected. ] Hence, this patch adds a standalone if statement of zero budget in front of ixgbe_xmit_zc() as explained before. Use ixgbe_desc_unused to replace the original fixed budget with the number of available slots in the Tx ring. It can gain some performance. Signed-off-by: Jason Xing --- In this version, I keep it as is (please see the following link) https://lore.kernel.org/intel-wired-lan/CAL+tcoAUW_J62aw3aGBru+0GmaTjoom1qu8Y=aiSc9EGU09Nww@mail.gmail.com/ --- drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c index a463c5ac9c7c..f3d3f5c1cdc7 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c @@ -393,17 +393,14 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget) struct xsk_buff_pool *pool = xdp_ring->xsk_pool; union ixgbe_adv_tx_desc *tx_desc = NULL; struct ixgbe_tx_buffer *tx_bi; - bool work_done = true; struct xdp_desc desc; dma_addr_t dma; u32 cmd_type; - while (likely(budget)) { - if (unlikely(!ixgbe_desc_unused(xdp_ring))) { - work_done = false; - break; - } + if (!budget) + return true; + while (likely(budget)) { if (!netif_carrier_ok(xdp_ring->netdev)) break; @@ -442,7 +439,7 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget) xsk_tx_release(pool); } - return !!budget && work_done; + return !!budget; } static void ixgbe_clean_xdp_tx_buffer(struct ixgbe_ring *tx_ring, @@ -505,7 +502,7 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector, if (xsk_uses_need_wakeup(pool)) xsk_set_tx_need_wakeup(pool); - return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit); + return ixgbe_xmit_zc(tx_ring, ixgbe_desc_unused(tx_ring)); } int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) -- 2.41.3 From: Jason Xing Like what i40e driver initially did in commit 3106c580fb7cf ("i40e: Use batched xsk Tx interfaces to increase performance"), use the batched xsk feature to transmit packets. Signed-off-by: Jason Xing --- In this version, I still choose use the current implementation. Last time at the first glance, I agreed 'i' is useless but it is not. https://lore.kernel.org/intel-wired-lan/CAL+tcoADu-ZZewsZzGDaL7NugxFTWO_Q+7WsLHs3Mx-XHjJnyg@mail.gmail.com/ --- drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c | 106 +++++++++++++------ 1 file changed, 72 insertions(+), 34 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c index f3d3f5c1cdc7..9fe2c4bf8bc5 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c @@ -2,12 +2,15 @@ /* Copyright(c) 2018 Intel Corporation. */ #include +#include #include #include #include "ixgbe.h" #include "ixgbe_txrx_common.h" +#define PKTS_PER_BATCH 4 + struct xsk_buff_pool *ixgbe_xsk_pool(struct ixgbe_adapter *adapter, struct ixgbe_ring *ring) { @@ -388,58 +391,93 @@ void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring) } } -static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget) +static void ixgbe_set_rs_bit(struct ixgbe_ring *xdp_ring) +{ + u16 ntu = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : xdp_ring->count - 1; + union ixgbe_adv_tx_desc *tx_desc; + + tx_desc = IXGBE_TX_DESC(xdp_ring, ntu); + tx_desc->read.cmd_type_len |= cpu_to_le32(IXGBE_TXD_CMD_RS); +} + +static void ixgbe_xmit_pkt(struct ixgbe_ring *xdp_ring, struct xdp_desc *desc, + int i) + { struct xsk_buff_pool *pool = xdp_ring->xsk_pool; union ixgbe_adv_tx_desc *tx_desc = NULL; struct ixgbe_tx_buffer *tx_bi; - struct xdp_desc desc; dma_addr_t dma; u32 cmd_type; - if (!budget) - return true; + dma = xsk_buff_raw_get_dma(pool, desc[i].addr); + xsk_buff_raw_dma_sync_for_device(pool, dma, desc[i].len); - while (likely(budget)) { - if (!netif_carrier_ok(xdp_ring->netdev)) - break; + tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use]; + tx_bi->bytecount = desc[i].len; + tx_bi->xdpf = NULL; + tx_bi->gso_segs = 1; - if (!xsk_tx_peek_desc(pool, &desc)) - break; + tx_desc = IXGBE_TX_DESC(xdp_ring, xdp_ring->next_to_use); + tx_desc->read.buffer_addr = cpu_to_le64(dma); - dma = xsk_buff_raw_get_dma(pool, desc.addr); - xsk_buff_raw_dma_sync_for_device(pool, dma, desc.len); + cmd_type = IXGBE_ADVTXD_DTYP_DATA | + IXGBE_ADVTXD_DCMD_DEXT | + IXGBE_ADVTXD_DCMD_IFCS; + cmd_type |= desc[i].len | IXGBE_TXD_CMD_EOP; + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + tx_desc->read.olinfo_status = + cpu_to_le32(desc[i].len << IXGBE_ADVTXD_PAYLEN_SHIFT); - tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use]; - tx_bi->bytecount = desc.len; - tx_bi->xdpf = NULL; - tx_bi->gso_segs = 1; + xdp_ring->next_to_use++; +} - tx_desc = IXGBE_TX_DESC(xdp_ring, xdp_ring->next_to_use); - tx_desc->read.buffer_addr = cpu_to_le64(dma); +static void ixgbe_xmit_pkt_batch(struct ixgbe_ring *xdp_ring, struct xdp_desc *desc) +{ + u32 i; - /* put descriptor type bits */ - cmd_type = IXGBE_ADVTXD_DTYP_DATA | - IXGBE_ADVTXD_DCMD_DEXT | - IXGBE_ADVTXD_DCMD_IFCS; - cmd_type |= desc.len | IXGBE_TXD_CMD; - tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); - tx_desc->read.olinfo_status = - cpu_to_le32(desc.len << IXGBE_ADVTXD_PAYLEN_SHIFT); + unrolled_count(PKTS_PER_BATCH) + for (i = 0; i < PKTS_PER_BATCH; i++) + ixgbe_xmit_pkt(xdp_ring, desc, i); +} - xdp_ring->next_to_use++; - if (xdp_ring->next_to_use == xdp_ring->count) - xdp_ring->next_to_use = 0; +static void ixgbe_fill_tx_hw_ring(struct ixgbe_ring *xdp_ring, + struct xdp_desc *descs, u32 nb_pkts) +{ + u32 batched, leftover, i; + + batched = nb_pkts & ~(PKTS_PER_BATCH - 1); + leftover = nb_pkts & (PKTS_PER_BATCH - 1); + for (i = 0; i < batched; i += PKTS_PER_BATCH) + ixgbe_xmit_pkt_batch(xdp_ring, &descs[i]); + for (i = batched; i < batched + leftover; i++) + ixgbe_xmit_pkt(xdp_ring, &descs[i], 0); +} - budget--; - } +static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget) +{ + struct xdp_desc *descs = xdp_ring->xsk_pool->tx_descs; + u32 nb_pkts, nb_processed = 0; - if (tx_desc) { - ixgbe_xdp_ring_update_tail(xdp_ring); - xsk_tx_release(pool); + if (!netif_carrier_ok(xdp_ring->netdev)) + return true; + + nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, budget); + if (!nb_pkts) + return true; + + if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) { + nb_processed = xdp_ring->count - xdp_ring->next_to_use; + ixgbe_fill_tx_hw_ring(xdp_ring, descs, nb_processed); + xdp_ring->next_to_use = 0; } - return !!budget; + ixgbe_fill_tx_hw_ring(xdp_ring, &descs[nb_processed], nb_pkts - nb_processed); + + ixgbe_set_rs_bit(xdp_ring); + ixgbe_xdp_ring_update_tail(xdp_ring); + + return nb_pkts < budget; } static void ixgbe_clean_xdp_tx_buffer(struct ixgbe_ring *tx_ring, -- 2.41.3