From: Théo Lebrun mog_alloc_rx_buffers(), getting called at open, does not do rx buffer alloc on GEM. The bulk of the work is done by gem_rx_refill() filling up all slots with valid buffers. gem_rx_refill() is called at link up by gem_init_rings() == bp->macbgem_ops.mog_init_rings(). Move operation to macb_open(), mostly to allow it to fail early and loudly rather than init the device with Rx mostly broken. About `bool fail_early`: - When called from macb_open(), ring init fails as soon as a queue cannot be refilled. - When called from macb_hresp_error_task(), we do our best to reinit the device: we still iterate over all queues and try refilling all even if a previous queue failed. Signed-off-by: Théo Lebrun Signed-off-by: Paolo Valerio --- drivers/net/ethernet/cadence/macb.h | 2 +- drivers/net/ethernet/cadence/macb_main.c | 40 +++++++++++++++++------- 2 files changed, 30 insertions(+), 12 deletions(-) diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 87414a2ddf6e..2cb65ec37d44 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -1180,7 +1180,7 @@ struct macb_queue; struct macb_or_gem_ops { int (*mog_alloc_rx_buffers)(struct macb *bp); void (*mog_free_rx_buffers)(struct macb *bp); - void (*mog_init_rings)(struct macb *bp); + int (*mog_init_rings)(struct macb *bp, bool fail_early); int (*mog_rx)(struct macb_queue *queue, struct napi_struct *napi, int budget); }; diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 2d5f3eb09530..5947c2b44bb3 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -705,8 +705,8 @@ static void macb_mac_link_up(struct phylink_config *config, if (rx_pause) ctrl |= MACB_BIT(PAE); - /* Initialize rings & buffers as clearing MACB_BIT(TE) in link down - * cleared the pipeline and control registers. + /* Initialize buffer registers as clearing MACB_BIT(TE) in link + * down cleared the pipeline and control registers. */ macb_init_buffers(bp); @@ -1249,13 +1249,14 @@ static int macb_tx_complete(struct macb_queue *queue, int budget) return packets; } -static void gem_rx_refill(struct macb_queue *queue) +static int gem_rx_refill(struct macb_queue *queue) { unsigned int entry; struct sk_buff *skb; dma_addr_t paddr; struct macb *bp = queue->bp; struct macb_dma_desc *desc; + int err = 0; while (CIRC_SPACE(queue->rx_prepared_head, queue->rx_tail, bp->rx_ring_size) > 0) { @@ -1272,6 +1273,7 @@ static void gem_rx_refill(struct macb_queue *queue) if (unlikely(!skb)) { netdev_err(bp->dev, "Unable to allocate sk_buff\n"); + err = -ENOMEM; break; } @@ -1321,6 +1323,7 @@ static void gem_rx_refill(struct macb_queue *queue) netdev_vdbg(bp->dev, "rx ring: queue: %p, prepared head %d, tail %d\n", queue, queue->rx_prepared_head, queue->rx_tail); + return err; } /* Mark DMA descriptors from begin up to and not including end as unused */ @@ -1773,7 +1776,7 @@ static void macb_hresp_error_task(struct work_struct *work) netif_tx_stop_all_queues(dev); netif_carrier_off(dev); - bp->macbgem_ops.mog_init_rings(bp); + bp->macbgem_ops.mog_init_rings(bp, false); /* Initialize TX and RX buffers */ macb_init_buffers(bp); @@ -2546,8 +2549,6 @@ static int macb_alloc_consistent(struct macb *bp) if (!queue->tx_skb) goto out_err; } - if (bp->macbgem_ops.mog_alloc_rx_buffers(bp)) - goto out_err; /* Required for tie off descriptor for PM cases */ if (!(bp->caps & MACB_CAPS_QUEUE_DISABLE)) { @@ -2559,6 +2560,11 @@ static int macb_alloc_consistent(struct macb *bp) goto out_err; } + if (bp->macbgem_ops.mog_alloc_rx_buffers(bp)) + goto out_err; + if (bp->macbgem_ops.mog_init_rings(bp, true)) + goto out_err; + return 0; out_err: @@ -2579,11 +2585,13 @@ static void macb_init_tieoff(struct macb *bp) desc->ctrl = 0; } -static void gem_init_rings(struct macb *bp) +static int gem_init_rings(struct macb *bp, bool fail_early) { struct macb_queue *queue; struct macb_dma_desc *desc = NULL; + int last_err = 0; unsigned int q; + int err; int i; for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { @@ -2599,13 +2607,24 @@ static void gem_init_rings(struct macb *bp) queue->rx_tail = 0; queue->rx_prepared_head = 0; - gem_rx_refill(queue); + /* We get called in two cases: + * - open: we can propagate alloc errors (so fail early), + * - HRESP error: cannot propagate, we attempt to reinit + * all queues in case of failure. + */ + err = gem_rx_refill(queue); + if (err) { + last_err = err; + if (fail_early) + break; + } } macb_init_tieoff(bp); + return last_err; } -static void macb_init_rings(struct macb *bp) +static int macb_init_rings(struct macb *bp, bool fail_early) { int i; struct macb_dma_desc *desc = NULL; @@ -2622,6 +2641,7 @@ static void macb_init_rings(struct macb *bp) desc->ctrl |= MACB_BIT(TX_WRAP); macb_init_tieoff(bp); + return 0; } static void macb_reset_hw(struct macb *bp) @@ -2953,8 +2973,6 @@ static int macb_open(struct net_device *dev) goto pm_exit; } - bp->macbgem_ops.mog_init_rings(bp); - for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { napi_enable(&queue->napi_rx); napi_enable(&queue->napi_tx); -- 2.52.0