Packets with unknown MAC address cannot be handled by the HW forwarding. These need to be forwarded, via an exception path, to the network driver. Creates a queue for the exeption path. Packets received with unknown src/dst address need to be passed to the CPU. The received packet does not have the correct source port information, this is derived from the descriptor and added to the new queue. The received packet is added to the new queue and sent to the CPU for MAC learning. The CPU will broadcast the received packet, to all ports. This is how the HW learns the new MAC address. Signed-off-by: Michael Dege --- drivers/net/ethernet/renesas/rswitch.h | 1 + drivers/net/ethernet/renesas/rswitch_main.c | 80 ++++++++++++++++++++++++++--- 2 files changed, 74 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/renesas/rswitch.h b/drivers/net/ethernet/renesas/rswitch.h index 15d3fa48b0e3..bf123b564ed1 100644 --- a/drivers/net/ethernet/renesas/rswitch.h +++ b/drivers/net/ethernet/renesas/rswitch.h @@ -1148,6 +1148,7 @@ struct rswitch_gwca { struct rswitch_gwca_queue *queues; int num_queues; struct rswitch_gwca_queue ts_queue; + struct rswitch_gwca_queue *l2_shared_rx_queue; DECLARE_BITMAP(used, RSWITCH_MAX_NUM_QUEUES); u32 tx_irq_bits[RSWITCH_NUM_IRQ_REGS]; u32 rx_irq_bits[RSWITCH_NUM_IRQ_REGS]; diff --git a/drivers/net/ethernet/renesas/rswitch_main.c b/drivers/net/ethernet/renesas/rswitch_main.c index d404bc41bd1e..f8ceb7f66903 100644 --- a/drivers/net/ethernet/renesas/rswitch_main.c +++ b/drivers/net/ethernet/renesas/rswitch_main.c @@ -680,6 +680,34 @@ static int rswitch_rxdmac_init(struct rswitch_private *priv, unsigned int index) return rswitch_gwca_queue_ext_ts_format(ndev->dev.parent, priv, rdev->rx_queue); } +static int rswitch_shared_rx_queue_alloc(struct rswitch_private *priv) +{ + struct rswitch_gwca *gwca = &priv->gwca; + struct device *dev = &priv->pdev->dev; + + int err; + + gwca->l2_shared_rx_queue = rswitch_gwca_get(priv); + if (!gwca->l2_shared_rx_queue) + return -EBUSY; + + err = rswitch_gwca_queue_alloc(NULL, priv, gwca->l2_shared_rx_queue, false, RX_RING_SIZE); + if (err < 0) { + rswitch_gwca_put(priv, gwca->l2_shared_rx_queue); + return err; + } + + return rswitch_gwca_queue_ext_ts_format(dev, priv, gwca->l2_shared_rx_queue); +} + +static void rswitch_shared_rx_queue_free(struct rswitch_private *priv) +{ + struct rswitch_gwca *gwca = &priv->gwca; + + rswitch_gwca_queue_free(&priv->pdev->dev, gwca->l2_shared_rx_queue); + rswitch_gwca_put(priv, gwca->l2_shared_rx_queue); +} + static int rswitch_gwca_hw_init(struct rswitch_private *priv) { unsigned int i; @@ -717,6 +745,12 @@ static int rswitch_gwca_hw_init(struct rswitch_private *priv) return err; } + err = rswitch_shared_rx_queue_alloc(priv); + if (err < 0) { + rswitch_shared_rx_queue_free(priv); + return err; + } + return rswitch_gwca_change_mode(priv, GWMC_OPC_OPERATION); } @@ -940,6 +974,7 @@ static int rswitch_poll(struct napi_struct *napi, int budget) rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true); rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true); } + rswitch_enadis_data_irq(priv, priv->gwca.l2_shared_rx_queue->index, true); spin_unlock_irqrestore(&priv->lock, flags); } @@ -952,15 +987,31 @@ static int rswitch_poll(struct napi_struct *napi, int budget) return 0; } -static void rswitch_queue_interrupt(struct net_device *ndev) +static void rswitch_queue_interrupt(struct rswitch_private *priv, struct rswitch_gwca_queue *gq) { - struct rswitch_device *rdev = netdev_priv(ndev); + struct rswitch_ext_ts_desc *desc; + struct rswitch_device *rdev; + struct net_device *ndev; + u32 spn; + + if (gq->index == priv->gwca.l2_shared_rx_queue->index) { + desc = &gq->rx_ring[gq->cur]; + spn = FIELD_GET(SPN, desc->info1); + ndev = priv->rdev[spn]->ndev; + rdev = netdev_priv(ndev); + gq->ndev = ndev; + rdev->rx_queue = gq; + } else { + rdev = netdev_priv(gq->ndev); + } + if (napi_schedule_prep(&rdev->napi)) { - spin_lock(&rdev->priv->lock); - rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false); - rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false); - spin_unlock(&rdev->priv->lock); + spin_lock(&priv->lock); + rswitch_enadis_data_irq(priv, rdev->tx_queue->index, false); + rswitch_enadis_data_irq(priv, rdev->rx_queue->index, false); + rswitch_enadis_data_irq(priv, priv->gwca.l2_shared_rx_queue->index, false); + spin_unlock(&priv->lock); __napi_schedule(&rdev->napi); } } @@ -978,7 +1029,7 @@ static irqreturn_t rswitch_data_irq(struct rswitch_private *priv, u32 *dis) continue; rswitch_ack_data_irq(priv, gq->index); - rswitch_queue_interrupt(gq->ndev); + rswitch_queue_interrupt(priv, gq); } return IRQ_HANDLED; @@ -1513,6 +1564,14 @@ static int rswitch_serdes_set_params(struct rswitch_device *rdev) return phy_set_speed(rdev->serdes, rdev->etha->speed); } +static void rswitch_etha_set_exception_path(struct rswitch_private *priv) +{ + iowrite32(FDMACUFEF, priv->addr + FWCEPRC2); + iowrite32(FIELD_PREP(EPCSD, priv->gwca.l2_shared_rx_queue->index) | + FIELD_PREP(EPCSD, priv->gwca.l2_shared_rx_queue->index), + priv->addr + FWCEPTC); +} + static int rswitch_ether_port_init_one(struct rswitch_device *rdev) { int err; @@ -1566,6 +1625,8 @@ static int rswitch_ether_port_init_all(struct rswitch_private *priv) unsigned int i; int err; + rswitch_etha_set_exception_path(priv); + rswitch_for_each_enabled_port(priv, i) { err = rswitch_ether_port_init_one(priv->rdev[i]); if (err) @@ -1616,6 +1677,7 @@ static int rswitch_open(struct net_device *ndev) bitmap_set(rdev->priv->opened_ports, rdev->port, 1); rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, true); rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, true); + rswitch_enadis_data_irq(rdev->priv, rdev->priv->gwca.l2_shared_rx_queue->index, true); spin_unlock_irqrestore(&rdev->priv->lock, flags); phy_start(ndev->phydev); @@ -1642,6 +1704,7 @@ static int rswitch_stop(struct net_device *ndev) spin_lock_irqsave(&rdev->priv->lock, flags); rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false); rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false); + rswitch_enadis_data_irq(rdev->priv, rdev->priv->gwca.l2_shared_rx_queue->index, false); bitmap_clear(rdev->priv->opened_ports, rdev->port, 1); spin_unlock_irqrestore(&rdev->priv->lock, flags); @@ -2166,6 +2229,9 @@ static int renesas_eth_sw_probe(struct platform_device *pdev) priv->gwca.index = AGENT_INDEX_GWCA; priv->gwca.num_queues = min(RSWITCH_NUM_PORTS * NUM_QUEUES_PER_NDEV, RSWITCH_MAX_NUM_QUEUES); + /* One extra queue for L2 switch reception */ + priv->gwca.num_queues = min(priv->gwca.num_queues + 1, + RSWITCH_MAX_NUM_QUEUES); priv->gwca.queues = devm_kcalloc(&pdev->dev, priv->gwca.num_queues, sizeof(*priv->gwca.queues), GFP_KERNEL); if (!priv->gwca.queues) -- 2.43.0