Prepare ntb_netdev for multi-queue operation by moving queue-pair state out of struct ntb_netdev. Introduce struct ntb_netdev_queue to carry the ntb_transport_qp pointer, the per-QP TX timer and queue id. Pass this object as the callback context and convert the RX/TX handlers and link event path accordingly. The probe path allocates a fixed upper bound for netdev queues while instantiating only a single ntb_transport queue pair, preserving the previous behavior. Also store client_dev for future queue pair creation/removal via the ntb_transport API. Signed-off-by: Koichiro Den --- Changes since v1: - Allocate a fixed upper bound for netdev queues (alloc_etherdev_mq), while creating only a single ntb_transport queue pair by default. Users can later add/remove queues dynamically starting from 1. - Drop unrelated changes from this patch, for example: * remove "any_up" from ntb_netdev_event_handler() * restore the original __ntb_netdev_maybe_stop_tx(), - Store client_dev at probe time so that queue pair creation and removal can later be done via the standard ntb_transport API. drivers/net/ntb_netdev.c | 279 ++++++++++++++++++++++++++------------- 1 file changed, 190 insertions(+), 89 deletions(-) diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c index fbeae05817e9..7437b4580dff 100644 --- a/drivers/net/ntb_netdev.c +++ b/drivers/net/ntb_netdev.c @@ -53,6 +53,7 @@ #include #include #include +#include #define NTB_NETDEV_VER "0.7" @@ -70,11 +71,24 @@ static unsigned int tx_start = 10; /* Number of descriptors still available before stop upper layer tx */ static unsigned int tx_stop = 5; -struct ntb_netdev { - struct pci_dev *pdev; - struct net_device *ndev; +#define NTB_NETDEV_MAX_QUEUES 64 +#define NTB_NETDEV_DEFAULT_QUEUES 1 + +struct ntb_netdev; + +struct ntb_netdev_queue { + struct ntb_netdev *ntdev; struct ntb_transport_qp *qp; struct timer_list tx_timer; + u16 qid; +}; + +struct ntb_netdev { + struct pci_dev *pdev; + struct device *client_dev; + struct net_device *ndev; + unsigned int num_queues; + struct ntb_netdev_queue *queues; }; #define NTB_TX_TIMEOUT_MS 1000 @@ -82,14 +96,15 @@ struct ntb_netdev { static void ntb_netdev_event_handler(void *data, int link_is_up) { - struct net_device *ndev = data; - struct ntb_netdev *dev = netdev_priv(ndev); + struct ntb_netdev_queue *q = data; + struct ntb_netdev *dev = q->ntdev; + struct net_device *ndev = dev->ndev; - netdev_dbg(ndev, "Event %x, Link %x\n", link_is_up, - ntb_transport_link_query(dev->qp)); + netdev_dbg(ndev, "Event %x, Link %x, qp %u\n", link_is_up, + ntb_transport_link_query(q->qp), q->qid); if (link_is_up) { - if (ntb_transport_link_query(dev->qp)) + if (ntb_transport_link_query(q->qp)) netif_carrier_on(ndev); } else { netif_carrier_off(ndev); @@ -99,7 +114,9 @@ static void ntb_netdev_event_handler(void *data, int link_is_up) static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data, void *data, int len) { - struct net_device *ndev = qp_data; + struct ntb_netdev_queue *q = qp_data; + struct ntb_netdev *dev = q->ntdev; + struct net_device *ndev = dev->ndev; struct sk_buff *skb; int rc; @@ -118,6 +135,7 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data, skb_put(skb, len); skb->protocol = eth_type_trans(skb, ndev); skb->ip_summed = CHECKSUM_NONE; + skb_record_rx_queue(skb, q->qid); if (netif_rx(skb) == NET_RX_DROP) { ndev->stats.rx_errors++; @@ -135,7 +153,8 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data, } enqueue_again: - rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN); + rc = ntb_transport_rx_enqueue(q->qp, skb, skb->data, + ndev->mtu + ETH_HLEN); if (rc) { dev_kfree_skb_any(skb); ndev->stats.rx_errors++; @@ -144,41 +163,41 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data, } static int __ntb_netdev_maybe_stop_tx(struct net_device *netdev, - struct ntb_transport_qp *qp, int size) + struct ntb_netdev_queue *q, int size) { - struct ntb_netdev *dev = netdev_priv(netdev); + netif_stop_subqueue(netdev, q->qid); - netif_stop_queue(netdev); /* Make sure to see the latest value of ntb_transport_tx_free_entry() * since the queue was last started. */ smp_mb(); - if (likely(ntb_transport_tx_free_entry(qp) < size)) { - mod_timer(&dev->tx_timer, jiffies + usecs_to_jiffies(tx_time)); + if (likely(ntb_transport_tx_free_entry(q->qp) < size)) { + mod_timer(&q->tx_timer, jiffies + usecs_to_jiffies(tx_time)); return -EBUSY; } - netif_start_queue(netdev); + netif_start_subqueue(netdev, q->qid); return 0; } static int ntb_netdev_maybe_stop_tx(struct net_device *ndev, - struct ntb_transport_qp *qp, int size) + struct ntb_netdev_queue *q, int size) { - if (netif_queue_stopped(ndev) || - (ntb_transport_tx_free_entry(qp) >= size)) + if (__netif_subqueue_stopped(ndev, q->qid) || + (ntb_transport_tx_free_entry(q->qp) >= size)) return 0; - return __ntb_netdev_maybe_stop_tx(ndev, qp, size); + return __ntb_netdev_maybe_stop_tx(ndev, q, size); } static void ntb_netdev_tx_handler(struct ntb_transport_qp *qp, void *qp_data, void *data, int len) { - struct net_device *ndev = qp_data; + struct ntb_netdev_queue *q = qp_data; + struct ntb_netdev *dev = q->ntdev; + struct net_device *ndev = dev->ndev; struct sk_buff *skb; - struct ntb_netdev *dev = netdev_priv(ndev); skb = data; if (!skb || !ndev) @@ -194,13 +213,13 @@ static void ntb_netdev_tx_handler(struct ntb_transport_qp *qp, void *qp_data, dev_kfree_skb_any(skb); - if (ntb_transport_tx_free_entry(dev->qp) >= tx_start) { + if (ntb_transport_tx_free_entry(qp) >= tx_start) { /* Make sure anybody stopping the queue after this sees the new * value of ntb_transport_tx_free_entry() */ smp_mb(); - if (netif_queue_stopped(ndev)) - netif_wake_queue(ndev); + if (__netif_subqueue_stopped(ndev, q->qid)) + netif_wake_subqueue(ndev, q->qid); } } @@ -208,16 +227,26 @@ static netdev_tx_t ntb_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct ntb_netdev *dev = netdev_priv(ndev); + u16 qid = skb_get_queue_mapping(skb); + struct ntb_netdev_queue *q; int rc; - ntb_netdev_maybe_stop_tx(ndev, dev->qp, tx_stop); + if (unlikely(!dev->num_queues)) + goto err; - rc = ntb_transport_tx_enqueue(dev->qp, skb, skb->data, skb->len); + if (unlikely(qid >= dev->num_queues)) + qid = 0; + + q = &dev->queues[qid]; + + ntb_netdev_maybe_stop_tx(ndev, q, tx_stop); + + rc = ntb_transport_tx_enqueue(q->qp, skb, skb->data, skb->len); if (rc) goto err; /* check for next submit */ - ntb_netdev_maybe_stop_tx(ndev, dev->qp, tx_stop); + ntb_netdev_maybe_stop_tx(ndev, q, tx_stop); return NETDEV_TX_OK; @@ -229,80 +258,102 @@ static netdev_tx_t ntb_netdev_start_xmit(struct sk_buff *skb, static void ntb_netdev_tx_timer(struct timer_list *t) { - struct ntb_netdev *dev = timer_container_of(dev, t, tx_timer); + struct ntb_netdev_queue *q = timer_container_of(q, t, tx_timer); + struct ntb_netdev *dev = q->ntdev; struct net_device *ndev = dev->ndev; - if (ntb_transport_tx_free_entry(dev->qp) < tx_stop) { - mod_timer(&dev->tx_timer, jiffies + usecs_to_jiffies(tx_time)); + if (ntb_transport_tx_free_entry(q->qp) < tx_stop) { + mod_timer(&q->tx_timer, jiffies + usecs_to_jiffies(tx_time)); } else { /* Make sure anybody stopping the queue after this sees the new * value of ntb_transport_tx_free_entry() */ smp_mb(); - if (netif_queue_stopped(ndev)) - netif_wake_queue(ndev); + if (__netif_subqueue_stopped(ndev, q->qid)) + netif_wake_subqueue(ndev, q->qid); } } static int ntb_netdev_open(struct net_device *ndev) { struct ntb_netdev *dev = netdev_priv(ndev); + struct ntb_netdev_queue *queue; struct sk_buff *skb; - int rc, i, len; + unsigned int q; + int rc = 0, i, len; - /* Add some empty rx bufs */ - for (i = 0; i < NTB_RXQ_SIZE; i++) { - skb = netdev_alloc_skb(ndev, ndev->mtu + ETH_HLEN); - if (!skb) { - rc = -ENOMEM; - goto err; - } + /* Add some empty rx bufs for each queue */ + for (q = 0; q < dev->num_queues; q++) { + queue = &dev->queues[q]; + + for (i = 0; i < NTB_RXQ_SIZE; i++) { + skb = netdev_alloc_skb(ndev, ndev->mtu + ETH_HLEN); + if (!skb) { + rc = -ENOMEM; + goto err; + } - rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data, - ndev->mtu + ETH_HLEN); - if (rc) { - dev_kfree_skb(skb); - goto err; + rc = ntb_transport_rx_enqueue(queue->qp, skb, skb->data, + ndev->mtu + ETH_HLEN); + if (rc) { + dev_kfree_skb(skb); + goto err; + } } + + timer_setup(&queue->tx_timer, ntb_netdev_tx_timer, 0); } - timer_setup(&dev->tx_timer, ntb_netdev_tx_timer, 0); - netif_carrier_off(ndev); - ntb_transport_link_up(dev->qp); + + for (q = 0; q < dev->num_queues; q++) + ntb_transport_link_up(dev->queues[q].qp); + netif_start_queue(ndev); return 0; err: - while ((skb = ntb_transport_rx_remove(dev->qp, &len))) - dev_kfree_skb(skb); + for (q = 0; q < dev->num_queues; q++) { + queue = &dev->queues[q]; + + while ((skb = ntb_transport_rx_remove(queue->qp, &len))) + dev_kfree_skb(skb); + } return rc; } static int ntb_netdev_close(struct net_device *ndev) { struct ntb_netdev *dev = netdev_priv(ndev); + struct ntb_netdev_queue *queue; struct sk_buff *skb; + unsigned int q; int len; - ntb_transport_link_down(dev->qp); - while ((skb = ntb_transport_rx_remove(dev->qp, &len))) - dev_kfree_skb(skb); + for (q = 0; q < dev->num_queues; q++) { + queue = &dev->queues[q]; - timer_delete_sync(&dev->tx_timer); + ntb_transport_link_down(queue->qp); + while ((skb = ntb_transport_rx_remove(queue->qp, &len))) + dev_kfree_skb(skb); + + timer_delete_sync(&queue->tx_timer); + } return 0; } static int ntb_netdev_change_mtu(struct net_device *ndev, int new_mtu) { struct ntb_netdev *dev = netdev_priv(ndev); + struct ntb_netdev_queue *queue; struct sk_buff *skb; - int len, rc; + unsigned int q, i; + int len, rc = 0; - if (new_mtu > ntb_transport_max_size(dev->qp) - ETH_HLEN) + if (new_mtu > ntb_transport_max_size(dev->queues[0].qp) - ETH_HLEN) return -EINVAL; if (!netif_running(ndev)) { @@ -311,41 +362,54 @@ static int ntb_netdev_change_mtu(struct net_device *ndev, int new_mtu) } /* Bring down the link and dispose of posted rx entries */ - ntb_transport_link_down(dev->qp); + for (q = 0; q < dev->num_queues; q++) + ntb_transport_link_down(dev->queues[q].qp); if (ndev->mtu < new_mtu) { - int i; + for (q = 0; q < dev->num_queues; q++) { + queue = &dev->queues[q]; - for (i = 0; (skb = ntb_transport_rx_remove(dev->qp, &len)); i++) - dev_kfree_skb(skb); - - for (; i; i--) { - skb = netdev_alloc_skb(ndev, new_mtu + ETH_HLEN); - if (!skb) { - rc = -ENOMEM; - goto err; - } - - rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data, - new_mtu + ETH_HLEN); - if (rc) { + for (i = 0; + (skb = ntb_transport_rx_remove(queue->qp, &len)); + i++) dev_kfree_skb(skb); - goto err; + + for (; i; i--) { + skb = netdev_alloc_skb(ndev, + new_mtu + ETH_HLEN); + if (!skb) { + rc = -ENOMEM; + goto err; + } + + rc = ntb_transport_rx_enqueue(queue->qp, skb, + skb->data, + new_mtu + + ETH_HLEN); + if (rc) { + dev_kfree_skb(skb); + goto err; + } } } } WRITE_ONCE(ndev->mtu, new_mtu); - ntb_transport_link_up(dev->qp); + for (q = 0; q < dev->num_queues; q++) + ntb_transport_link_up(dev->queues[q].qp); return 0; err: - ntb_transport_link_down(dev->qp); + for (q = 0; q < dev->num_queues; q++) { + struct ntb_netdev_queue *queue = &dev->queues[q]; - while ((skb = ntb_transport_rx_remove(dev->qp, &len))) - dev_kfree_skb(skb); + ntb_transport_link_down(queue->qp); + + while ((skb = ntb_transport_rx_remove(queue->qp, &len))) + dev_kfree_skb(skb); + } netdev_err(ndev, "Error changing MTU, device inoperable\n"); return rc; @@ -404,6 +468,7 @@ static int ntb_netdev_probe(struct device *client_dev) struct net_device *ndev; struct pci_dev *pdev; struct ntb_netdev *dev; + unsigned int q; int rc; ntb = dev_ntb(client_dev->parent); @@ -411,7 +476,7 @@ static int ntb_netdev_probe(struct device *client_dev) if (!pdev) return -ENODEV; - ndev = alloc_etherdev(sizeof(*dev)); + ndev = alloc_etherdev_mq(sizeof(*dev), NTB_NETDEV_MAX_QUEUES); if (!ndev) return -ENOMEM; @@ -420,6 +485,16 @@ static int ntb_netdev_probe(struct device *client_dev) dev = netdev_priv(ndev); dev->ndev = ndev; dev->pdev = pdev; + dev->client_dev = client_dev; + dev->num_queues = 0; + + dev->queues = kcalloc(NTB_NETDEV_MAX_QUEUES, sizeof(*dev->queues), + GFP_KERNEL); + if (!dev->queues) { + rc = -ENOMEM; + goto err_free_netdev; + } + ndev->features = NETIF_F_HIGHDMA; ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE; @@ -436,26 +511,47 @@ static int ntb_netdev_probe(struct device *client_dev) ndev->min_mtu = 0; ndev->max_mtu = ETH_MAX_MTU; - dev->qp = ntb_transport_create_queue(ndev, client_dev, - &ntb_netdev_handlers); - if (!dev->qp) { + for (q = 0; q < NTB_NETDEV_DEFAULT_QUEUES; q++) { + struct ntb_netdev_queue *queue = &dev->queues[q]; + + queue->ntdev = dev; + queue->qid = q; + queue->qp = ntb_transport_create_queue(queue, client_dev, + &ntb_netdev_handlers); + if (!queue->qp) + break; + + dev->num_queues++; + } + + if (!dev->num_queues) { rc = -EIO; - goto err; + goto err_free_queues; } - ndev->mtu = ntb_transport_max_size(dev->qp) - ETH_HLEN; + rc = netif_set_real_num_queues(ndev, dev->num_queues, dev->num_queues); + if (rc) + goto err_free_qps; + + ndev->mtu = ntb_transport_max_size(dev->queues[0].qp) - ETH_HLEN; rc = register_netdev(ndev); if (rc) - goto err1; + goto err_free_qps; dev_set_drvdata(client_dev, ndev); - dev_info(&pdev->dev, "%s created\n", ndev->name); + dev_info(&pdev->dev, "%s created with %u queue pairs\n", + ndev->name, dev->num_queues); return 0; -err1: - ntb_transport_free_queue(dev->qp); -err: +err_free_qps: + for (q = 0; q < dev->num_queues; q++) + ntb_transport_free_queue(dev->queues[q].qp); + +err_free_queues: + kfree(dev->queues); + +err_free_netdev: free_netdev(ndev); return rc; } @@ -464,9 +560,14 @@ static void ntb_netdev_remove(struct device *client_dev) { struct net_device *ndev = dev_get_drvdata(client_dev); struct ntb_netdev *dev = netdev_priv(ndev); + unsigned int q; + unregister_netdev(ndev); - ntb_transport_free_queue(dev->qp); + for (q = 0; q < dev->num_queues; q++) + ntb_transport_free_queue(dev->queues[q].qp); + + kfree(dev->queues); free_netdev(ndev); } -- 2.51.0 When ntb_netdev is extended to multiple ntb_transport queue pairs, the netdev carrier can be up as long as at least one QP link is up. In that setup, a given QP may be link-down while the carrier remains on. Make the link event handler start/stop the corresponding netdev TX subqueue and drive carrier state based on whether any QP link is up. Also guard subqueue wake/start points in the TX completion and timer paths so a subqueue is not restarted while its QP link is down. Stop all queues in ndo_open() and let the link event handler wake each subqueue once ntb_transport link negotiation succeeds. Signed-off-by: Koichiro Den --- drivers/net/ntb_netdev.c | 42 ++++++++++++++++++++++++++++++---------- 1 file changed, 32 insertions(+), 10 deletions(-) diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c index 7437b4580dff..19a3383d86f8 100644 --- a/drivers/net/ntb_netdev.c +++ b/drivers/net/ntb_netdev.c @@ -99,16 +99,30 @@ static void ntb_netdev_event_handler(void *data, int link_is_up) struct ntb_netdev_queue *q = data; struct ntb_netdev *dev = q->ntdev; struct net_device *ndev = dev->ndev; + bool any_up = false; + unsigned int i; netdev_dbg(ndev, "Event %x, Link %x, qp %u\n", link_is_up, ntb_transport_link_query(q->qp), q->qid); - if (link_is_up) { - if (ntb_transport_link_query(q->qp)) - netif_carrier_on(ndev); - } else { + if (netif_running(ndev)) { + if (link_is_up) + netif_wake_subqueue(ndev, q->qid); + else + netif_stop_subqueue(ndev, q->qid); + } + + for (i = 0; i < dev->num_queues; i++) { + if (ntb_transport_link_query(dev->queues[i].qp)) { + any_up = true; + break; + } + } + + if (any_up) + netif_carrier_on(ndev); + else netif_carrier_off(ndev); - } } static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data, @@ -177,7 +191,10 @@ static int __ntb_netdev_maybe_stop_tx(struct net_device *netdev, return -EBUSY; } - netif_start_subqueue(netdev, q->qid); + /* The subqueue must be kept stopped if the link is down */ + if (ntb_transport_link_query(q->qp)) + netif_start_subqueue(netdev, q->qid); + return 0; } @@ -218,7 +235,8 @@ static void ntb_netdev_tx_handler(struct ntb_transport_qp *qp, void *qp_data, * value of ntb_transport_tx_free_entry() */ smp_mb(); - if (__netif_subqueue_stopped(ndev, q->qid)) + if (__netif_subqueue_stopped(ndev, q->qid) && + ntb_transport_link_query(q->qp)) netif_wake_subqueue(ndev, q->qid); } } @@ -269,7 +287,10 @@ static void ntb_netdev_tx_timer(struct timer_list *t) * value of ntb_transport_tx_free_entry() */ smp_mb(); - if (__netif_subqueue_stopped(ndev, q->qid)) + + /* The subqueue must be kept stopped if the link is down */ + if (__netif_subqueue_stopped(ndev, q->qid) && + ntb_transport_link_query(q->qp)) netif_wake_subqueue(ndev, q->qid); } } @@ -305,12 +326,11 @@ static int ntb_netdev_open(struct net_device *ndev) } netif_carrier_off(ndev); + netif_tx_stop_all_queues(ndev); for (q = 0; q < dev->num_queues; q++) ntb_transport_link_up(dev->queues[q].qp); - netif_start_queue(ndev); - return 0; err: @@ -331,6 +351,8 @@ static int ntb_netdev_close(struct net_device *ndev) unsigned int q; int len; + netif_tx_stop_all_queues(ndev); + netif_carrier_off(ndev); for (q = 0; q < dev->num_queues; q++) { queue = &dev->queues[q]; -- 2.51.0 Implementing .set_channels will otherwise duplicate the same multi-queue operations at multiple call sites. Factor out the following helpers: - ntb_netdev_update_carrier(): carrier is switched on when at least one QP link is up - ntb_netdev_queue_rx_drain(): drain and free all queued RX packets for one QP - ntb_netdev_queue_rx_fill(): prefill RX ring for one QP No functional change. Signed-off-by: Koichiro Den --- drivers/net/ntb_netdev.c | 99 ++++++++++++++++++++++++---------------- 1 file changed, 59 insertions(+), 40 deletions(-) diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c index 19a3383d86f8..6aa59316569c 100644 --- a/drivers/net/ntb_netdev.c +++ b/drivers/net/ntb_netdev.c @@ -94,24 +94,12 @@ struct ntb_netdev { #define NTB_TX_TIMEOUT_MS 1000 #define NTB_RXQ_SIZE 100 -static void ntb_netdev_event_handler(void *data, int link_is_up) +static void ntb_netdev_update_carrier(struct ntb_netdev *dev) { - struct ntb_netdev_queue *q = data; - struct ntb_netdev *dev = q->ntdev; struct net_device *ndev = dev->ndev; bool any_up = false; unsigned int i; - netdev_dbg(ndev, "Event %x, Link %x, qp %u\n", link_is_up, - ntb_transport_link_query(q->qp), q->qid); - - if (netif_running(ndev)) { - if (link_is_up) - netif_wake_subqueue(ndev, q->qid); - else - netif_stop_subqueue(ndev, q->qid); - } - for (i = 0; i < dev->num_queues; i++) { if (ntb_transport_link_query(dev->queues[i].qp)) { any_up = true; @@ -125,6 +113,56 @@ static void ntb_netdev_event_handler(void *data, int link_is_up) netif_carrier_off(ndev); } +static void ntb_netdev_queue_rx_drain(struct ntb_netdev_queue *queue) +{ + struct sk_buff *skb; + int len; + + while ((skb = ntb_transport_rx_remove(queue->qp, &len))) + dev_kfree_skb(skb); +} + +static int ntb_netdev_queue_rx_fill(struct net_device *ndev, + struct ntb_netdev_queue *queue) +{ + struct sk_buff *skb; + int rc, i; + + for (i = 0; i < NTB_RXQ_SIZE; i++) { + skb = netdev_alloc_skb(ndev, ndev->mtu + ETH_HLEN); + if (!skb) + return -ENOMEM; + + rc = ntb_transport_rx_enqueue(queue->qp, skb, skb->data, + ndev->mtu + ETH_HLEN); + if (rc) { + dev_kfree_skb(skb); + return rc; + } + } + + return 0; +} + +static void ntb_netdev_event_handler(void *data, int link_is_up) +{ + struct ntb_netdev_queue *q = data; + struct ntb_netdev *dev = q->ntdev; + struct net_device *ndev = dev->ndev; + + netdev_dbg(ndev, "Event %x, Link %x, qp %u\n", link_is_up, + ntb_transport_link_query(q->qp), q->qid); + + if (netif_running(ndev)) { + if (link_is_up) + netif_wake_subqueue(ndev, q->qid); + else + netif_stop_subqueue(ndev, q->qid); + } + + ntb_netdev_update_carrier(dev); +} + static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data, void *data, int len) { @@ -299,28 +337,16 @@ static int ntb_netdev_open(struct net_device *ndev) { struct ntb_netdev *dev = netdev_priv(ndev); struct ntb_netdev_queue *queue; - struct sk_buff *skb; unsigned int q; - int rc = 0, i, len; + int rc = 0; /* Add some empty rx bufs for each queue */ for (q = 0; q < dev->num_queues; q++) { queue = &dev->queues[q]; - for (i = 0; i < NTB_RXQ_SIZE; i++) { - skb = netdev_alloc_skb(ndev, ndev->mtu + ETH_HLEN); - if (!skb) { - rc = -ENOMEM; - goto err; - } - - rc = ntb_transport_rx_enqueue(queue->qp, skb, skb->data, - ndev->mtu + ETH_HLEN); - if (rc) { - dev_kfree_skb(skb); - goto err; - } - } + rc = ntb_netdev_queue_rx_fill(ndev, queue); + if (rc) + goto err; timer_setup(&queue->tx_timer, ntb_netdev_tx_timer, 0); } @@ -336,9 +362,7 @@ static int ntb_netdev_open(struct net_device *ndev) err: for (q = 0; q < dev->num_queues; q++) { queue = &dev->queues[q]; - - while ((skb = ntb_transport_rx_remove(queue->qp, &len))) - dev_kfree_skb(skb); + ntb_netdev_queue_rx_drain(queue); } return rc; } @@ -347,9 +371,7 @@ static int ntb_netdev_close(struct net_device *ndev) { struct ntb_netdev *dev = netdev_priv(ndev); struct ntb_netdev_queue *queue; - struct sk_buff *skb; unsigned int q; - int len; netif_tx_stop_all_queues(ndev); netif_carrier_off(ndev); @@ -358,12 +380,10 @@ static int ntb_netdev_close(struct net_device *ndev) queue = &dev->queues[q]; ntb_transport_link_down(queue->qp); - - while ((skb = ntb_transport_rx_remove(queue->qp, &len))) - dev_kfree_skb(skb); - + ntb_netdev_queue_rx_drain(queue); timer_delete_sync(&queue->tx_timer); } + return 0; } @@ -429,8 +449,7 @@ static int ntb_netdev_change_mtu(struct net_device *ndev, int new_mtu) ntb_transport_link_down(queue->qp); - while ((skb = ntb_transport_rx_remove(queue->qp, &len))) - dev_kfree_skb(skb); + ntb_netdev_queue_rx_drain(queue); } netdev_err(ndev, "Error changing MTU, device inoperable\n"); -- 2.51.0 Support dynamic queue pair addition/removal via ethtool channels. Use the combined channel count to control the number of netdev TX/RX queues, each corresponding to a ntb_transport queue pair. When the number of queues is reduced, tear down and free the removed ntb_transport queue pairs (not just deactivate them) so other ntb_transport clients can reuse the freed resources. When the number of queues is increased, create additional queue pairs up to NTB_NETDEV_MAX_QUEUES (=64). The effective limit is determined by the underlying ntb_transport implementation and NTB hardware resources (the number of MWs), so set_channels may return -ENOSPC if no more QPs can be allocated. Keep the default at one queue pair to preserve the previous behavior. Signed-off-by: Koichiro Den --- drivers/net/ntb_netdev.c | 147 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 147 insertions(+) diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c index 6aa59316569c..c2b1886775bf 100644 --- a/drivers/net/ntb_netdev.c +++ b/drivers/net/ntb_netdev.c @@ -113,6 +113,24 @@ static void ntb_netdev_update_carrier(struct ntb_netdev *dev) netif_carrier_off(ndev); } +static void ntb_netdev_sync_subqueues(struct ntb_netdev *dev) +{ + struct net_device *ndev = dev->ndev; + unsigned int q; + + if (!netif_running(ndev)) + return; + + for (q = 0; q < dev->num_queues; q++) { + struct ntb_netdev_queue *queue = &dev->queues[q]; + + if (ntb_transport_link_query(queue->qp)) + netif_wake_subqueue(ndev, queue->qid); + else + netif_stop_subqueue(ndev, queue->qid); + } +} + static void ntb_netdev_queue_rx_drain(struct ntb_netdev_queue *queue) { struct sk_buff *skb; @@ -464,6 +482,8 @@ static const struct net_device_ops ntb_netdev_ops = { .ndo_set_mac_address = eth_mac_addr, }; +static const struct ntb_queue_handlers ntb_netdev_handlers; + static void ntb_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) { @@ -491,10 +511,137 @@ static int ntb_get_link_ksettings(struct net_device *dev, return 0; } +static void ntb_get_channels(struct net_device *ndev, + struct ethtool_channels *channels) +{ + struct ntb_netdev *dev = netdev_priv(ndev); + + channels->combined_count = dev->num_queues; + channels->max_combined = ndev->num_tx_queues; +} + +static int ntb_set_channels(struct net_device *ndev, + struct ethtool_channels *channels) +{ + struct ntb_netdev *dev = netdev_priv(ndev); + unsigned int new = channels->combined_count; + unsigned int old = dev->num_queues; + bool running = netif_running(ndev); + struct ntb_netdev_queue *queue; + unsigned int q, created; + int rc = 0; + + if (channels->rx_count || channels->tx_count || channels->other_count) + return -EINVAL; + + if (!new || new > ndev->num_tx_queues) + return -ERANGE; + + if (new == old) + return 0; + + if (new < old) { + if (running) + for (q = new; q < old; q++) + netif_stop_subqueue(ndev, q); + + rc = netif_set_real_num_queues(ndev, new, new); + if (rc) + goto out_restore; + + /* Publish new queue count before invalidating QP pointers */ + dev->num_queues = new; + + for (q = new; q < old; q++) { + queue = &dev->queues[q]; + + if (running) { + ntb_transport_link_down(queue->qp); + ntb_netdev_queue_rx_drain(queue); + timer_delete_sync(&queue->tx_timer); + } + + ntb_transport_free_queue(queue->qp); + queue->qp = NULL; + } + + goto out_restore; + } + + created = old; + for (q = old; q < new; q++) { + queue = &dev->queues[q]; + + queue->ntdev = dev; + queue->qid = q; + queue->qp = ntb_transport_create_queue(queue, dev->client_dev, + &ntb_netdev_handlers); + if (!queue->qp) { + rc = -ENOSPC; + goto err_new; + } + created++; + + if (!running) + continue; + + timer_setup(&queue->tx_timer, ntb_netdev_tx_timer, 0); + + rc = ntb_netdev_queue_rx_fill(ndev, queue); + if (rc) + goto err_new; + + /* + * Carrier may already be on due to other QPs. Keep the new + * subqueue stopped until we get a Link Up event for this QP. + */ + netif_stop_subqueue(ndev, q); + } + + rc = netif_set_real_num_queues(ndev, new, new); + if (rc) + goto err_new; + + dev->num_queues = new; + + if (running) + for (q = old; q < new; q++) + ntb_transport_link_up(dev->queues[q].qp); + + return 0; + +err_new: + if (running) { + unsigned int rollback = created; + + while (rollback-- > old) { + queue = &dev->queues[rollback]; + ntb_transport_link_down(queue->qp); + ntb_netdev_queue_rx_drain(queue); + timer_delete_sync(&queue->tx_timer); + } + } + + while (created-- > old) { + queue = &dev->queues[created]; + ntb_transport_free_queue(queue->qp); + queue->qp = NULL; + } + +out_restore: + if (running) { + ntb_netdev_sync_subqueues(dev); + ntb_netdev_update_carrier(dev); + } + return rc; +} + static const struct ethtool_ops ntb_ethtool_ops = { .get_drvinfo = ntb_get_drvinfo, .get_link = ethtool_op_get_link, .get_link_ksettings = ntb_get_link_ksettings, + .get_channels = ntb_get_channels, + .set_channels = ntb_set_channels, }; static const struct ntb_queue_handlers ntb_netdev_handlers = { -- 2.51.0