From: Matt Olson For DQO, change QPL page registration logic to be more flexible to honor the "max_registered_pages" parameter from the gVNIC device. Previously the number of RX pages per QPL was hardcoded to twice the ring size, and the number of TX pages per QPL was dictated by the device in the DQO-QPL device option. Now [in DQO-QPL mode], the driver will ignore the "tx_pages_per_qpl" parameter indicated in the DQO-QPL device option and instead allocate up to (tx_queue_length / 2) pages per TX QPL and up to (rx_queue_length * 2) pages per RX QPL while keeping the total number of pages under the "max_registered_pages". Merge DQO and GQI QPL page calculation logic into a unified gve_update_num_qpl_pages function. Add rx_pages_per_qpl to the priv struct for consumption by both DQO and GQI. Signed-off-by: Matt Olson Signed-off-by: Max Yuan Reviewed-by: Jordan Rhee Reviewed-by: Harshitha Ramamurthy Reviewed-by: Willem de Bruijn Reviewed-by: Praveen Kaligineedi Signed-off-by: Joshua Washington --- drivers/net/ethernet/google/gve/gve.h | 18 ++++++-------- drivers/net/ethernet/google/gve/gve_adminq.c | 8 ------- drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c | 2 +- drivers/net/ethernet/google/gve/gve_main.c | 40 ++++++++++++++++++++++++++++++++ drivers/net/ethernet/google/gve/gve_rx.c | 5 +--- drivers/net/ethernet/google/gve/gve_rx_dqo.c | 6 ++--- drivers/net/ethernet/google/gve/gve_tx.c | 5 +--- drivers/net/ethernet/google/gve/gve_tx_dqo.c | 4 +--- 8 files changed, 53 insertions(+), 35 deletions(-) diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h index 970d5ca8..adc91d6a 100644 --- a/drivers/net/ethernet/google/gve/gve.h +++ b/drivers/net/ethernet/google/gve/gve.h @@ -79,8 +79,6 @@ #define GVE_DEFAULT_HEADER_BUFFER_SIZE 128 -#define DQO_QPL_DEFAULT_TX_PAGES 512 - /* Maximum TSO size supported on DQO */ #define GVE_DQO_TX_MAX 0x3FFFF @@ -711,6 +709,7 @@ struct gve_ptype_lut { /* Parameters for allocating resources for tx queues */ struct gve_tx_alloc_rings_cfg { struct gve_tx_queue_config *qcfg; + u16 pages_per_qpl; u16 num_xdp_rings; @@ -726,6 +725,7 @@ struct gve_rx_alloc_rings_cfg { /* tx config is also needed to determine QPL ids */ struct gve_rx_queue_config *qcfg_rx; struct gve_tx_queue_config *qcfg_tx; + u16 pages_per_qpl; u16 ring_size; u16 packet_buffer_size; @@ -816,7 +816,8 @@ struct gve_priv { u16 min_rx_desc_cnt; bool modify_ring_size_enabled; bool default_min_ring_size; - u16 tx_pages_per_qpl; /* Suggested number of pages per qpl for TX queues by NIC */ + u16 tx_pages_per_qpl; + u16 rx_pages_per_qpl; u64 max_registered_pages; u64 num_registered_pages; /* num pages registered with NIC */ struct bpf_prog *xdp_prog; /* XDP BPF program */ @@ -1150,14 +1151,6 @@ static inline u32 gve_rx_start_qpl_id(const struct gve_tx_queue_config *tx_cfg) return gve_get_rx_qpl_id(tx_cfg, 0); } -static inline u32 gve_get_rx_pages_per_qpl_dqo(u32 rx_desc_cnt) -{ - /* For DQO, page count should be more than ring size for - * out-of-order completions. Set it to two times of ring size. - */ - return 2 * rx_desc_cnt; -} - /* Returns the correct dma direction for tx and rx qpls */ static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv, int id) @@ -1303,6 +1296,9 @@ int gve_reset(struct gve_priv *priv, bool attempt_teardown); void gve_get_curr_alloc_cfgs(struct gve_priv *priv, struct gve_tx_alloc_rings_cfg *tx_alloc_cfg, struct gve_rx_alloc_rings_cfg *rx_alloc_cfg); +void gve_update_num_qpl_pages(struct gve_priv *priv, + struct gve_rx_alloc_rings_cfg *rx_alloc_cfg, + struct gve_tx_alloc_rings_cfg *tx_alloc_cfg); int gve_adjust_config(struct gve_priv *priv, struct gve_tx_alloc_rings_cfg *tx_alloc_cfg, struct gve_rx_alloc_rings_cfg *rx_alloc_cfg); diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c index f27b9501..b1983f97 100644 --- a/drivers/net/ethernet/google/gve/gve_adminq.c +++ b/drivers/net/ethernet/google/gve/gve_adminq.c @@ -970,14 +970,6 @@ static void gve_enable_supported_features(struct gve_priv *priv, priv->dev->max_mtu = be16_to_cpu(dev_op_jumbo_frames->max_mtu); } - /* Override pages for qpl for DQO-QPL */ - if (dev_op_dqo_qpl) { - priv->tx_pages_per_qpl = - be16_to_cpu(dev_op_dqo_qpl->tx_pages_per_qpl); - if (priv->tx_pages_per_qpl == 0) - priv->tx_pages_per_qpl = DQO_QPL_DEFAULT_TX_PAGES; - } - if (dev_op_buffer_sizes && (supported_features_mask & GVE_SUP_BUFFER_SIZES_MASK)) { priv->max_rx_buffer_size = diff --git a/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c b/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c index 0e2b703c..6880d153 100644 --- a/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c +++ b/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c @@ -133,7 +133,7 @@ int gve_alloc_qpl_page_dqo(struct gve_rx_ring *rx, u32 idx; idx = rx->dqo.next_qpl_page_idx; - if (idx >= gve_get_rx_pages_per_qpl_dqo(priv->rx_desc_cnt)) { + if (idx >= priv->rx_pages_per_qpl) { net_err_ratelimited("%s: Out of QPL pages\n", priv->dev->name); return -ENOMEM; diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c index 4feaa481..7a26faeb 100644 --- a/drivers/net/ethernet/google/gve/gve_main.c +++ b/drivers/net/ethernet/google/gve/gve_main.c @@ -965,6 +965,7 @@ static void gve_tx_get_curr_alloc_cfg(struct gve_priv *priv, cfg->qcfg = &priv->tx_cfg; cfg->raw_addressing = !gve_is_qpl(priv); cfg->ring_size = priv->tx_desc_cnt; + cfg->pages_per_qpl = priv->tx_pages_per_qpl; cfg->num_xdp_rings = cfg->qcfg->num_xdp_queues; cfg->tx = priv->tx; } @@ -996,12 +997,48 @@ static void gve_tx_start_rings(struct gve_priv *priv, int num_rings) } } +void gve_update_num_qpl_pages(struct gve_priv *priv, + struct gve_rx_alloc_rings_cfg *rx_alloc_cfg, + struct gve_tx_alloc_rings_cfg *tx_alloc_cfg) +{ + u64 ideal_tx_pages, ideal_rx_pages; + u16 tx_num_queues, rx_num_queues; + u64 max_pages, tx_pages; + + if (priv->queue_format == GVE_GQI_QPL_FORMAT) { + rx_alloc_cfg->pages_per_qpl = rx_alloc_cfg->ring_size; + } else if (priv->queue_format == GVE_DQO_QPL_FORMAT) { + /* + * We want 2 pages per RX descriptor and half a page per TX + * descriptor, which means the fraction ideal_tx_pages / + * (ideal_tx_pages + ideal_rx_pages) of the pages we allocate + * should be for TX. Shrink proportionally as necessary to avoid + * allocating more than max_registered_pages total pages. + */ + tx_num_queues = tx_alloc_cfg->qcfg->num_queues; + rx_num_queues = rx_alloc_cfg->qcfg_rx->num_queues; + + ideal_tx_pages = tx_alloc_cfg->ring_size * tx_num_queues / 2; + ideal_rx_pages = rx_alloc_cfg->ring_size * rx_num_queues * 2; + max_pages = min(priv->max_registered_pages, + ideal_tx_pages + ideal_rx_pages); + + tx_pages = (max_pages * ideal_tx_pages) / + (ideal_tx_pages + ideal_rx_pages); + tx_alloc_cfg->pages_per_qpl = tx_pages / tx_num_queues; + rx_alloc_cfg->pages_per_qpl = (max_pages - tx_pages) / + rx_num_queues; + } +} + static int gve_queues_mem_alloc(struct gve_priv *priv, struct gve_tx_alloc_rings_cfg *tx_alloc_cfg, struct gve_rx_alloc_rings_cfg *rx_alloc_cfg) { int err; + gve_update_num_qpl_pages(priv, rx_alloc_cfg, tx_alloc_cfg); + if (gve_is_gqi(priv)) err = gve_tx_alloc_rings_gqi(priv, tx_alloc_cfg); else @@ -1292,6 +1329,7 @@ static void gve_rx_get_curr_alloc_cfg(struct gve_priv *priv, cfg->raw_addressing = !gve_is_qpl(priv); cfg->enable_header_split = priv->header_split_enabled; cfg->ring_size = priv->rx_desc_cnt; + cfg->pages_per_qpl = priv->rx_pages_per_qpl; cfg->packet_buffer_size = priv->rx_cfg.packet_buffer_size; cfg->rx = priv->rx; cfg->xdp = !!cfg->qcfg_tx->num_xdp_queues; @@ -1371,6 +1409,8 @@ static int gve_queues_start(struct gve_priv *priv, priv->rx_cfg = *rx_alloc_cfg->qcfg_rx; priv->tx_desc_cnt = tx_alloc_cfg->ring_size; priv->rx_desc_cnt = rx_alloc_cfg->ring_size; + priv->tx_pages_per_qpl = tx_alloc_cfg->pages_per_qpl; + priv->rx_pages_per_qpl = rx_alloc_cfg->pages_per_qpl; gve_tx_start_rings(priv, gve_num_tx_queues(priv)); gve_rx_start_rings(priv, rx_alloc_cfg->qcfg_rx->num_queues); diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c index 9a37bd99..f466fe82 100644 --- a/drivers/net/ethernet/google/gve/gve_rx.c +++ b/drivers/net/ethernet/google/gve/gve_rx.c @@ -277,7 +277,6 @@ int gve_rx_alloc_ring_gqi(struct gve_priv *priv, struct device *hdev = &priv->pdev->dev; u32 slots = cfg->ring_size; int filled_pages; - int qpl_page_cnt; u32 qpl_id = 0; size_t bytes; int err; @@ -313,10 +312,8 @@ int gve_rx_alloc_ring_gqi(struct gve_priv *priv, if (!rx->data.raw_addressing) { qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx->q_num); - qpl_page_cnt = cfg->ring_size; - rx->data.qpl = gve_alloc_queue_page_list(priv, qpl_id, - qpl_page_cnt); + cfg->pages_per_qpl); if (!rx->data.qpl) { err = -ENOMEM; goto abort_with_copy_pool; diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c index d2f5c2d7..57c45c54 100644 --- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c +++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c @@ -218,7 +218,6 @@ int gve_rx_alloc_ring_dqo(struct gve_priv *priv, { struct device *hdev = &priv->pdev->dev; struct page_pool *pool; - int qpl_page_cnt; size_t size; u32 qpl_id; @@ -246,7 +245,7 @@ int gve_rx_alloc_ring_dqo(struct gve_priv *priv, XSK_CHECK_PRIV_TYPE(struct gve_xdp_buff); rx->dqo.num_buf_states = cfg->raw_addressing ? buffer_queue_slots : - gve_get_rx_pages_per_qpl_dqo(cfg->ring_size); + cfg->pages_per_qpl; rx->dqo.buf_states = kvcalloc_node(rx->dqo.num_buf_states, sizeof(rx->dqo.buf_states[0]), GFP_KERNEL, priv->numa_node); @@ -281,10 +280,9 @@ int gve_rx_alloc_ring_dqo(struct gve_priv *priv, rx->dqo.page_pool = pool; } else { qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx->q_num); - qpl_page_cnt = gve_get_rx_pages_per_qpl_dqo(cfg->ring_size); rx->dqo.qpl = gve_alloc_queue_page_list(priv, qpl_id, - qpl_page_cnt); + cfg->pages_per_qpl); if (!rx->dqo.qpl) goto err; rx->dqo.next_qpl_page_idx = 0; diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c index 97efc8d2..65401a05 100644 --- a/drivers/net/ethernet/google/gve/gve_tx.c +++ b/drivers/net/ethernet/google/gve/gve_tx.c @@ -264,7 +264,6 @@ static int gve_tx_alloc_ring_gqi(struct gve_priv *priv, int idx) { struct device *hdev = &priv->pdev->dev; - int qpl_page_cnt; u32 qpl_id = 0; size_t bytes; @@ -291,10 +290,8 @@ static int gve_tx_alloc_ring_gqi(struct gve_priv *priv, tx->dev = hdev; if (!tx->raw_addressing) { qpl_id = gve_tx_qpl_id(priv, tx->q_num); - qpl_page_cnt = priv->tx_pages_per_qpl; - tx->tx_fifo.qpl = gve_alloc_queue_page_list(priv, qpl_id, - qpl_page_cnt); + cfg->pages_per_qpl); if (!tx->tx_fifo.qpl) goto abort_with_desc; diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c index a2b22004..57361406 100644 --- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c +++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c @@ -302,7 +302,6 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv, { struct device *hdev = &priv->pdev->dev; int num_pending_packets; - int qpl_page_cnt; size_t bytes; u32 qpl_id; int i; @@ -384,10 +383,9 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv, if (!cfg->raw_addressing) { qpl_id = gve_tx_qpl_id(priv, tx->q_num); - qpl_page_cnt = priv->tx_pages_per_qpl; tx->dqo.qpl = gve_alloc_queue_page_list(priv, qpl_id, - qpl_page_cnt); + cfg->pages_per_qpl); if (!tx->dqo.qpl) goto err; -- 2.53.0.239.g8d8fc8a987-goog From: Matt Olson The gVNIC device indicates a device option (MODIFY_RING) to the driver, which presents a range of ring sizes from which the user is allowed to select. But in DQO-QPL queue format, the driver ignores the "max" of this range and instead allows the user to configure the ring size in the range [min, default]. This was done because increasing the ring size could result in the number of registered pages being higher than the max allowed by the device. In order to support large ring sizes, stop ignoring the "max" of the range presented in the MODIFY_RING option. Signed-off-by: Matt Olson Signed-off-by: Max Yuan Reviewed-by: Jordan Rhee Reviewed-by: Harshitha Ramamurthy Reviewed-by: Praveen Kaligineedi Signed-off-by: Joshua Washington --- drivers/net/ethernet/google/gve/gve_adminq.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c index b1983f97..08587bf4 100644 --- a/drivers/net/ethernet/google/gve/gve_adminq.c +++ b/drivers/net/ethernet/google/gve/gve_adminq.c @@ -989,12 +989,10 @@ static void gve_enable_supported_features(struct gve_priv *priv, if (dev_op_modify_ring && (supported_features_mask & GVE_SUP_MODIFY_RING_MASK)) { priv->modify_ring_size_enabled = true; - - /* max ring size for DQO QPL should not be overwritten because of device limit */ - if (priv->queue_format != GVE_DQO_QPL_FORMAT) { - priv->max_rx_desc_cnt = be16_to_cpu(dev_op_modify_ring->max_rx_ring_size); - priv->max_tx_desc_cnt = be16_to_cpu(dev_op_modify_ring->max_tx_ring_size); - } + priv->max_rx_desc_cnt = + be16_to_cpu(dev_op_modify_ring->max_rx_ring_size); + priv->max_tx_desc_cnt = + be16_to_cpu(dev_op_modify_ring->max_tx_ring_size); if (priv->default_min_ring_size) { /* If device hasn't provided minimums, use default minimums */ priv->min_tx_desc_cnt = GVE_DEFAULT_MIN_TX_RING_SIZE; -- 2.53.0.239.g8d8fc8a987-goog