Initialize itr function according to rx packets/bytes Signed-off-by: Dong Yibo --- drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h | 5 + .../net/ethernet/mucse/rnpgbe/rnpgbe_lib.c | 91 ++++++++++++++++++- 2 files changed, 95 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h index 0b6ba4c3a6cb..8e692da05eb7 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h @@ -551,6 +551,8 @@ struct mucse_ring_container { unsigned int total_packets; u16 work_limit; u16 count; + u16 itr; + int update_count; }; struct mucse_q_vector { @@ -705,6 +707,9 @@ static inline __le16 rnpgbe_test_staterr(union rnpgbe_rx_desc *rx_desc, #define M_TRY_LINK_TIMEOUT (4 * HZ) +#define M_LOWEREST_ITR (5) +#define M_4K_ITR (980) + #define M_RX_BUFFER_WRITE (16) #define m_rd_reg(reg) readl((void *)(reg)) #define m_wr_reg(reg, val) writel((val), (void *)(reg)) diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c index 05073663ad0e..5d82f063eade 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c @@ -681,6 +681,62 @@ static int rnpgbe_clean_rx_irq(struct mucse_q_vector *q_vector, return fail_alloc ? budget : total_rx_packets; } +static void rnpgbe_update_ring_itr_rx(struct mucse_q_vector *q_vector) +{ + int new_val = q_vector->itr_rx; + int avg_wire_size = 0; + struct mucse *mucse = q_vector->mucse; + unsigned int packets; + + switch (mucse->link_speed) { + case M_LINK_SPEED_10_FULL: + case M_LINK_SPEED_100_FULL: + new_val = M_4K_ITR; + goto set_itr_val; + default: + break; + } + + packets = q_vector->rx.total_packets; + if (packets) + avg_wire_size = max_t(u32, avg_wire_size, + q_vector->rx.total_bytes / packets); + + /* if avg_wire_size isn't set no work was done */ + if (!avg_wire_size) + goto clear_counts; + + /* Add 24 bytes to size to account for CRC, preamble, and gap */ + avg_wire_size += 24; + + /* Don't starve jumbo frames */ + avg_wire_size = min(avg_wire_size, 3000); + + /* Give a little boost to mid-size frames */ + if (avg_wire_size > 300 && avg_wire_size < 1200) + new_val = avg_wire_size / 3; + else + new_val = avg_wire_size / 2; + + if (new_val < M_LOWEREST_ITR) + new_val = M_LOWEREST_ITR; + +set_itr_val: + if (q_vector->rx.itr != new_val) { + q_vector->rx.update_count++; + if (q_vector->rx.update_count >= 2) { + q_vector->rx.itr = new_val; + q_vector->rx.update_count = 0; + } + } else { + q_vector->rx.update_count = 0; + } + +clear_counts: + q_vector->rx.total_bytes = 0; + q_vector->rx.total_packets = 0; +} + /** * rnpgbe_poll - NAPI Rx polling callback * @napi: structure for representing this polling device @@ -725,6 +781,7 @@ static int rnpgbe_poll(struct napi_struct *napi, int budget) return budget; /* all work done, exit the polling mode */ if (likely(napi_complete_done(napi, work_done))) { + rnpgbe_update_ring_itr_rx(q_vector); if (!test_bit(__MUCSE_DOWN, &mucse->state)) rnpgbe_irq_enable_queues(mucse, q_vector); } @@ -1677,12 +1734,44 @@ void rnpgbe_clean_all_tx_rings(struct mucse *mucse) rnpgbe_clean_tx_ring(mucse->tx_ring[i]); } +static void rnpgbe_write_eitr_rx(struct mucse_q_vector *q_vector) +{ + struct mucse *mucse = q_vector->mucse; + struct mucse_hw *hw = &mucse->hw; + u32 new_itr_rx = q_vector->rx.itr; + u32 old_itr_rx = q_vector->rx.itr; + struct mucse_ring *ring; + + new_itr_rx = new_itr_rx * hw->usecstocount; + /* if we are in auto mode write to hw */ + mucse_for_each_ring(ring, q_vector->rx) { + ring_wr32(ring, DMA_REG_RX_INT_DELAY_TIMER, new_itr_rx); + if (ring->ring_flags & M_RING_LOWER_ITR) { + /* if we are already in this mode skip */ + if (q_vector->itr_rx == M_LOWEREST_ITR) + continue; + ring_wr32(ring, DMA_REG_RX_INT_DELAY_PKTCNT, 1); + ring_wr32(ring, DMA_REG_RX_INT_DELAY_TIMER, + M_LOWEREST_ITR); + q_vector->itr_rx = M_LOWEREST_ITR; + } else { + if (new_itr_rx == q_vector->itr_rx) + continue; + ring_wr32(ring, DMA_REG_RX_INT_DELAY_TIMER, + new_itr_rx); + ring_wr32(ring, DMA_REG_RX_INT_DELAY_PKTCNT, + mucse->rx_frames); + q_vector->itr_rx = old_itr_rx; + } + } +} + static irqreturn_t rnpgbe_msix_clean_rings(int irq, void *data) { struct mucse_q_vector *q_vector = (struct mucse_q_vector *)data; rnpgbe_irq_disable_queues(q_vector); - + rnpgbe_write_eitr_rx(q_vector); if (q_vector->rx.ring || q_vector->tx.ring) napi_schedule_irqoff(&q_vector->napi); -- 2.25.1