The stmmac descriptor queues are circular buffers, operated as far as the hardware is concerned as either a ring, or a chain that loops back on itself. From the software perspective, it forms a circular buffer. We have a few places which calculate the number of in-use and free entries in these circular buffers, for which we have macros for. Use CIRC_CNT() and CIRC_SPACE() as appropriate to calculate these values. Validating, for stmmac_tx_avail(), which uses CIRC_SPACE(): dirty_tx = 1, cur_tx = 0 -> 0 dirty_tx = 0, cur_tx = 0 -> dma_tx_size - 1 dirty_tx = 0, cur_tx = 1 -> dma_tx_size - 2 dirty_tx passed as end, reduced by one. cur_tx passed as start. Output on sane computers is identical. For stmmac_rx_dirty(), which uses CIRC_CNT(): dirty_rx = 1, cur_rx = 0 -> dma_rx_size - 1 dirty_rx = 0, cur_rx = 0 -> 0 dirty_rx = 0, cur_rx = 1 -> 1 dirty_rx passed as start, cur_rx passed as end. Output is identical. Same validation performed on the is_last_segment calculation, which also gets converted to CIRC_CNT(). Signed-off-by: Russell King (Oracle) --- .../net/ethernet/stmicro/stmmac/stmmac_main.c | 23 ++++++------------- 1 file changed, 7 insertions(+), 16 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index a2a0985e8c37..2d74fe98ad61 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -14,6 +14,7 @@ https://bugzilla.stlinux.com/ *******************************************************************************/ +#include #include #include #include @@ -355,14 +356,9 @@ static void print_pkt(unsigned char *buf, int len) static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue) { struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; - u32 avail; - if (tx_q->dirty_tx > tx_q->cur_tx) - avail = tx_q->dirty_tx - tx_q->cur_tx - 1; - else - avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1; - - return avail; + return CIRC_SPACE(tx_q->cur_tx, tx_q->dirty_tx, + priv->dma_conf.dma_tx_size); } /** @@ -373,14 +369,9 @@ static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue) static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue) { struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; - u32 dirty; - - if (rx_q->dirty_rx <= rx_q->cur_rx) - dirty = rx_q->cur_rx - rx_q->dirty_rx; - else - dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx; - return dirty; + return CIRC_CNT(rx_q->cur_rx, rx_q->dirty_rx, + priv->dma_conf.dma_rx_size); } static bool stmmac_eee_tx_busy(struct stmmac_priv *priv) @@ -4571,8 +4562,8 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) /* If we only have one entry used, then the first entry is the last * segment. */ - is_last_segment = ((tx_q->cur_tx - first_entry) & - (priv->dma_conf.dma_tx_size - 1)) == 1; + is_last_segment = CIRC_CNT(tx_q->cur_tx, first_entry, + priv->dma_conf.dma_tx_size) == 1; /* Complete the first descriptor before granting the DMA */ stmmac_prepare_tso_tx_desc(priv, first, 1, proto_hdr_len, 0, 1, -- 2.47.3