From: Pavan Kumar Linga Move both TX and RX queue resources to the newly introduced idpf_q_vec_rsrc structure. Reviewed-by: Anton Nadezhdin Signed-off-by: Pavan Kumar Linga Signed-off-by: Joshua Hay Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/idpf/idpf.h | 71 ++-- .../net/ethernet/intel/idpf/idpf_ethtool.c | 77 ++-- drivers/net/ethernet/intel/idpf/idpf_lib.c | 70 ++-- drivers/net/ethernet/intel/idpf/idpf_ptp.c | 17 +- drivers/net/ethernet/intel/idpf/idpf_txrx.c | 380 ++++++++++-------- drivers/net/ethernet/intel/idpf/idpf_txrx.h | 21 +- .../net/ethernet/intel/idpf/idpf_virtchnl.c | 258 ++++++------ .../net/ethernet/intel/idpf/idpf_virtchnl.h | 12 +- drivers/net/ethernet/intel/idpf/xdp.c | 37 +- drivers/net/ethernet/intel/idpf/xdp.h | 6 +- drivers/net/ethernet/intel/idpf/xsk.c | 7 +- 11 files changed, 528 insertions(+), 428 deletions(-) diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h index 4ec9859756ca..77268739731e 100644 --- a/drivers/net/ethernet/intel/idpf/idpf.h +++ b/drivers/net/ethernet/intel/idpf/idpf.h @@ -291,14 +291,35 @@ struct idpf_fsteer_fltr { /** * struct idpf_q_vec_rsrc - handle for queue and vector resources + * @dev: device pointer for DMA mapping * @q_vectors: array of queue vectors * @q_vector_idxs: starting index of queue vectors * @num_q_vectors: number of IRQ vectors allocated * @noirq_v_idx: ID of the NOIRQ vector * @noirq_dyn_ctl_ena: value to write to the above to enable it * @noirq_dyn_ctl: register to enable/disable the vector for NOIRQ queues + * @txq_grps: array of TX queue groups + * @txq_desc_count: TX queue descriptor count + * @complq_desc_count: completion queue descriptor count + * @txq_model: split queue or single queue queuing model + * @num_txq: number of allocated TX queues + * @num_complq: number of allocated completion queues + * @num_txq_grp: number of TX queue groups + * @num_rxq_grp: number of RX queues in a group + * @rxq_model: splitq queue or single queue queuing model + * @rxq_grps: total number of RX groups. Number of groups * number of RX per + * group will yield total number of RX queues. + * @num_rxq: number of allocated RX queues + * @num_bufq: number of allocated buffer queues + * @rxq_desc_count: RX queue descriptor count. *MUST* have enough descriptors + * to complete all buffer descriptors for all buffer queues in + * the worst case. + * @bufq_desc_count: buffer queue descriptor count + * @num_bufqs_per_qgrp: buffer queues per RX queue in a given grouping + * @base_rxd: true if the driver should use base descriptors instead of flex */ struct idpf_q_vec_rsrc { + struct device *dev; struct idpf_q_vector *q_vectors; u16 *q_vector_idxs; u16 num_q_vectors; @@ -306,36 +327,36 @@ struct idpf_q_vec_rsrc { u32 noirq_dyn_ctl_ena; void __iomem *noirq_dyn_ctl; + struct idpf_txq_group *txq_grps; + u32 txq_desc_count; + u32 complq_desc_count; + u32 txq_model; + u16 num_txq; + u16 num_complq; + u16 num_txq_grp; + + u16 num_rxq_grp; + u32 rxq_model; + struct idpf_rxq_group *rxq_grps; + u16 num_rxq; + u16 num_bufq; + u32 rxq_desc_count; + u32 bufq_desc_count[IDPF_MAX_BUFQS_PER_RXQ_GRP]; + u8 num_bufqs_per_qgrp; + bool base_rxd; }; /** * struct idpf_vport - Handle for netdevices and queue resources * @dflt_qv_rsrc: contains default queue and vector resources * @num_txq: Number of allocated TX queues - * @num_complq: Number of allocated completion queues - * @txq_desc_count: TX queue descriptor count - * @complq_desc_count: Completion queue descriptor count * @compln_clean_budget: Work budget for completion clean - * @num_txq_grp: Number of TX queue groups - * @txq_grps: Array of TX queue groups - * @txq_model: Split queue or single queue queuing model * @txqs: Used only in hotpath to get to the right queue very fast * @crc_enable: Enable CRC insertion offload * @xdpsq_share: whether XDPSQ sharing is enabled * @num_xdp_txq: number of XDPSQs * @xdp_txq_offset: index of the first XDPSQ (== number of regular SQs) * @xdp_prog: installed XDP program - * @num_rxq: Number of allocated RX queues - * @num_bufq: Number of allocated buffer queues - * @rxq_desc_count: RX queue descriptor count. *MUST* have enough descriptors - * to complete all buffer descriptors for all buffer queues in - * the worst case. - * @num_bufqs_per_qgrp: Buffer queues per RX queue in a given grouping - * @bufq_desc_count: Buffer queue descriptor count - * @num_rxq_grp: Number of RX queues in a group - * @rxq_grps: Total number of RX groups. Number of groups * number of RX per - * group will yield total number of RX queues. - * @rxq_model: Splitq queue or single queue queuing model * @rx_ptype_lkup: Lookup table for ptypes on RX * @vdev_info: IDC vport device info pointer * @adapter: back pointer to associated adapter @@ -346,7 +367,6 @@ struct idpf_q_vec_rsrc { * @vport_id: Device given vport identifier * @idx: Software index in adapter vports struct * @default_vport: Use this vport if one isn't specified - * @base_rxd: True if the driver should use base descriptors instead of flex * @max_mtu: device given max possible MTU * @default_mac_addr: device will give a default MAC to use * @rx_itr_profile: RX profiles for Dynamic Interrupt Moderation @@ -361,13 +381,7 @@ struct idpf_q_vec_rsrc { struct idpf_vport { struct idpf_q_vec_rsrc dflt_qv_rsrc; u16 num_txq; - u16 num_complq; - u32 txq_desc_count; - u32 complq_desc_count; u32 compln_clean_budget; - u16 num_txq_grp; - struct idpf_txq_group *txq_grps; - u32 txq_model; struct idpf_tx_queue **txqs; bool crc_enable; @@ -376,14 +390,6 @@ struct idpf_vport { u16 xdp_txq_offset; struct bpf_prog *xdp_prog; - u16 num_rxq; - u16 num_bufq; - u32 rxq_desc_count; - u8 num_bufqs_per_qgrp; - u32 bufq_desc_count[IDPF_MAX_BUFQS_PER_RXQ_GRP]; - u16 num_rxq_grp; - struct idpf_rxq_group *rxq_grps; - u32 rxq_model; struct libeth_rx_pt *rx_ptype_lkup; struct iidc_rdma_vport_dev_info *vdev_info; @@ -395,7 +401,6 @@ struct idpf_vport { u32 vport_id; u16 idx; bool default_vport; - bool base_rxd; u16 max_mtu; u8 default_mac_addr[ETH_ALEN]; diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c index 2efa3c08aba5..072c065a6006 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c +++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c @@ -18,7 +18,7 @@ static u32 idpf_get_rx_ring_count(struct net_device *netdev) idpf_vport_ctrl_lock(netdev); vport = idpf_netdev_to_vport(netdev); - num_rxq = vport->num_rxq; + num_rxq = vport->dflt_qv_rsrc.num_rxq; idpf_vport_ctrl_unlock(netdev); return num_rxq; @@ -644,8 +644,8 @@ static void idpf_get_ringparam(struct net_device *netdev, ring->rx_max_pending = IDPF_MAX_RXQ_DESC; ring->tx_max_pending = IDPF_MAX_TXQ_DESC; - ring->rx_pending = vport->rxq_desc_count; - ring->tx_pending = vport->txq_desc_count; + ring->rx_pending = vport->dflt_qv_rsrc.rxq_desc_count; + ring->tx_pending = vport->dflt_qv_rsrc.txq_desc_count; kring->tcp_data_split = idpf_vport_get_hsplit(vport); @@ -669,6 +669,7 @@ static int idpf_set_ringparam(struct net_device *netdev, { struct idpf_vport_user_config_data *config_data; u32 new_rx_count, new_tx_count; + struct idpf_q_vec_rsrc *rsrc; struct idpf_vport *vport; int i, err = 0; u16 idx; @@ -704,8 +705,9 @@ static int idpf_set_ringparam(struct net_device *netdev, netdev_info(netdev, "Requested Tx descriptor count rounded up to %u\n", new_tx_count); - if (new_tx_count == vport->txq_desc_count && - new_rx_count == vport->rxq_desc_count && + rsrc = &vport->dflt_qv_rsrc; + if (new_tx_count == rsrc->txq_desc_count && + new_rx_count == rsrc->rxq_desc_count && kring->tcp_data_split == idpf_vport_get_hsplit(vport)) goto unlock_mutex; @@ -724,10 +726,10 @@ static int idpf_set_ringparam(struct net_device *netdev, /* Since we adjusted the RX completion queue count, the RX buffer queue * descriptor count needs to be adjusted as well */ - for (i = 0; i < vport->num_bufqs_per_qgrp; i++) - vport->bufq_desc_count[i] = + for (i = 0; i < rsrc->num_bufqs_per_qgrp; i++) + rsrc->bufq_desc_count[i] = IDPF_RX_BUFQ_DESC_COUNT(new_rx_count, - vport->num_bufqs_per_qgrp); + rsrc->num_bufqs_per_qgrp); err = idpf_initiate_soft_reset(vport, IDPF_SR_Q_DESC_CHANGE); @@ -1104,6 +1106,7 @@ static void idpf_add_port_stats(struct idpf_vport *vport, u64 **data) static void idpf_collect_queue_stats(struct idpf_vport *vport) { struct idpf_port_stats *pstats = &vport->port_stats; + struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc; int i, j; /* zero out port stats since they're actually tracked in per @@ -1120,11 +1123,11 @@ static void idpf_collect_queue_stats(struct idpf_vport *vport) u64_stats_set(&pstats->tx_dma_map_errs, 0); u64_stats_update_end(&pstats->stats_sync); - for (i = 0; i < vport->num_rxq_grp; i++) { - struct idpf_rxq_group *rxq_grp = &vport->rxq_grps[i]; + for (i = 0; i < rsrc->num_rxq_grp; i++) { + struct idpf_rxq_group *rxq_grp = &rsrc->rxq_grps[i]; u16 num_rxq; - if (idpf_is_queue_model_split(vport->rxq_model)) + if (idpf_is_queue_model_split(rsrc->rxq_model)) num_rxq = rxq_grp->splitq.num_rxq_sets; else num_rxq = rxq_grp->singleq.num_rxq; @@ -1135,7 +1138,7 @@ static void idpf_collect_queue_stats(struct idpf_vport *vport) struct idpf_rx_queue *rxq; unsigned int start; - if (idpf_is_queue_model_split(vport->rxq_model)) + if (idpf_is_queue_model_split(rsrc->rxq_model)) rxq = &rxq_grp->splitq.rxq_sets[j]->rxq; else rxq = rxq_grp->singleq.rxqs[j]; @@ -1162,8 +1165,8 @@ static void idpf_collect_queue_stats(struct idpf_vport *vport) } } - for (i = 0; i < vport->num_txq_grp; i++) { - struct idpf_txq_group *txq_grp = &vport->txq_grps[i]; + for (i = 0; i < rsrc->num_txq_grp; i++) { + struct idpf_txq_group *txq_grp = &rsrc->txq_grps[i]; for (j = 0; j < txq_grp->num_txq; j++) { u64 linearize, qbusy, skb_drops, dma_map_errs; @@ -1208,6 +1211,7 @@ static void idpf_get_ethtool_stats(struct net_device *netdev, { struct idpf_netdev_priv *np = netdev_priv(netdev); struct idpf_vport_config *vport_config; + struct idpf_q_vec_rsrc *rsrc; struct idpf_vport *vport; unsigned int total = 0; unsigned int i, j; @@ -1228,8 +1232,9 @@ static void idpf_get_ethtool_stats(struct net_device *netdev, idpf_collect_queue_stats(vport); idpf_add_port_stats(vport, &data); - for (i = 0; i < vport->num_txq_grp; i++) { - struct idpf_txq_group *txq_grp = &vport->txq_grps[i]; + rsrc = &vport->dflt_qv_rsrc; + for (i = 0; i < rsrc->num_txq_grp; i++) { + struct idpf_txq_group *txq_grp = &rsrc->txq_grps[i]; qtype = VIRTCHNL2_QUEUE_TYPE_TX; @@ -1253,10 +1258,10 @@ static void idpf_get_ethtool_stats(struct net_device *netdev, idpf_add_empty_queue_stats(&data, VIRTCHNL2_QUEUE_TYPE_TX); total = 0; - is_splitq = idpf_is_queue_model_split(vport->rxq_model); + is_splitq = idpf_is_queue_model_split(rsrc->rxq_model); - for (i = 0; i < vport->num_rxq_grp; i++) { - struct idpf_rxq_group *rxq_grp = &vport->rxq_grps[i]; + for (i = 0; i < rsrc->num_rxq_grp; i++) { + struct idpf_rxq_group *rxq_grp = &rsrc->rxq_grps[i]; u16 num_rxq; qtype = VIRTCHNL2_QUEUE_TYPE_RX; @@ -1298,15 +1303,16 @@ static void idpf_get_ethtool_stats(struct net_device *netdev, struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport, u32 q_num) { + const struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc; int q_grp, q_idx; - if (!idpf_is_queue_model_split(vport->rxq_model)) - return vport->rxq_grps->singleq.rxqs[q_num]->q_vector; + if (!idpf_is_queue_model_split(rsrc->rxq_model)) + return rsrc->rxq_grps->singleq.rxqs[q_num]->q_vector; q_grp = q_num / IDPF_DFLT_SPLITQ_RXQ_PER_GROUP; q_idx = q_num % IDPF_DFLT_SPLITQ_RXQ_PER_GROUP; - return vport->rxq_grps[q_grp].splitq.rxq_sets[q_idx]->rxq.q_vector; + return rsrc->rxq_grps[q_grp].splitq.rxq_sets[q_idx]->rxq.q_vector; } /** @@ -1319,14 +1325,15 @@ struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport, struct idpf_q_vector *idpf_find_txq_vec(const struct idpf_vport *vport, u32 q_num) { + const struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc; int q_grp; - if (!idpf_is_queue_model_split(vport->txq_model)) + if (!idpf_is_queue_model_split(rsrc->txq_model)) return vport->txqs[q_num]->q_vector; q_grp = q_num / IDPF_DFLT_SPLITQ_TXQ_PER_GROUP; - return vport->txq_grps[q_grp].complq->q_vector; + return rsrc->txq_grps[q_grp].complq->q_vector; } /** @@ -1363,7 +1370,8 @@ static int idpf_get_q_coalesce(struct net_device *netdev, u32 q_num) { const struct idpf_netdev_priv *np = netdev_priv(netdev); - const struct idpf_vport *vport; + struct idpf_q_vec_rsrc *rsrc; + struct idpf_vport *vport; int err = 0; idpf_vport_ctrl_lock(netdev); @@ -1372,16 +1380,17 @@ static int idpf_get_q_coalesce(struct net_device *netdev, if (!test_bit(IDPF_VPORT_UP, np->state)) goto unlock_mutex; - if (q_num >= vport->num_rxq && q_num >= vport->num_txq) { + rsrc = &vport->dflt_qv_rsrc; + if (q_num >= rsrc->num_rxq && q_num >= rsrc->num_txq) { err = -EINVAL; goto unlock_mutex; } - if (q_num < vport->num_rxq) + if (q_num < rsrc->num_rxq) __idpf_get_q_coalesce(ec, idpf_find_rxq_vec(vport, q_num), VIRTCHNL2_QUEUE_TYPE_RX); - if (q_num < vport->num_txq) + if (q_num < rsrc->num_txq) __idpf_get_q_coalesce(ec, idpf_find_txq_vec(vport, q_num), VIRTCHNL2_QUEUE_TYPE_TX); @@ -1549,6 +1558,7 @@ static int idpf_set_coalesce(struct net_device *netdev, struct idpf_netdev_priv *np = netdev_priv(netdev); struct idpf_vport_user_config_data *user_config; struct idpf_q_coalesce *q_coal; + struct idpf_q_vec_rsrc *rsrc; struct idpf_vport *vport; int i, err = 0; @@ -1560,14 +1570,15 @@ static int idpf_set_coalesce(struct net_device *netdev, if (!test_bit(IDPF_VPORT_UP, np->state)) goto unlock_mutex; - for (i = 0; i < vport->num_txq; i++) { + rsrc = &vport->dflt_qv_rsrc; + for (i = 0; i < rsrc->num_txq; i++) { q_coal = &user_config->q_coalesce[i]; err = idpf_set_q_coalesce(vport, q_coal, ec, i, false); if (err) goto unlock_mutex; } - for (i = 0; i < vport->num_rxq; i++) { + for (i = 0; i < rsrc->num_rxq; i++) { q_coal = &user_config->q_coalesce[i]; err = idpf_set_q_coalesce(vport, q_coal, ec, i, true); if (err) @@ -1748,6 +1759,7 @@ static void idpf_get_ts_stats(struct net_device *netdev, struct ethtool_ts_stats *ts_stats) { struct idpf_netdev_priv *np = netdev_priv(netdev); + struct idpf_q_vec_rsrc *rsrc; struct idpf_vport *vport; unsigned int start; @@ -1763,8 +1775,9 @@ static void idpf_get_ts_stats(struct net_device *netdev, if (!test_bit(IDPF_VPORT_UP, np->state)) goto exit; - for (u16 i = 0; i < vport->num_txq_grp; i++) { - struct idpf_txq_group *txq_grp = &vport->txq_grps[i]; + rsrc = &vport->dflt_qv_rsrc; + for (u16 i = 0; i < rsrc->num_txq_grp; i++) { + struct idpf_txq_group *txq_grp = &rsrc->txq_grps[i]; for (u16 j = 0; j < txq_grp->num_txq; j++) { struct idpf_tx_queue *txq = txq_grp->txqs[j]; diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c index 70d0383102a6..4ec662f1918e 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lib.c +++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c @@ -991,7 +991,7 @@ static void idpf_vport_stop(struct idpf_vport *vport, bool rtnl) idpf_send_disable_vport_msg(vport); idpf_send_disable_queues_msg(vport); - idpf_send_map_unmap_queue_vector_msg(vport, false); + idpf_send_map_unmap_queue_vector_msg(vport, rsrc, false); /* Normally we ask for queues in create_vport, but if the number of * initially requested queues have changed, for example via ethtool * set channels, we do delete queues and then add the queues back @@ -1004,8 +1004,8 @@ static void idpf_vport_stop(struct idpf_vport *vport, bool rtnl) vport->link_up = false; idpf_vport_intr_deinit(vport, rsrc); - idpf_xdp_rxq_info_deinit_all(vport); - idpf_vport_queues_rel(vport); + idpf_xdp_rxq_info_deinit_all(rsrc); + idpf_vport_queues_rel(vport, rsrc); idpf_vport_intr_rel(rsrc); clear_bit(IDPF_VPORT_UP, np->state); @@ -1159,7 +1159,7 @@ static void idpf_vport_dealloc(struct idpf_vport *vport) */ static bool idpf_is_hsplit_supported(const struct idpf_vport *vport) { - return idpf_is_queue_model_split(vport->rxq_model) && + return idpf_is_queue_model_split(vport->dflt_qv_rsrc.rxq_model) && idpf_is_cap_ena_all(vport->adapter, IDPF_HSPLIT_CAPS, IDPF_CAP_HSPLIT); } @@ -1277,6 +1277,7 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter, idpf_get_default_vports(adapter); rsrc = &vport->dflt_qv_rsrc; + rsrc->dev = &adapter->pdev->dev; rsrc->q_vector_idxs = kcalloc(num_max_q, sizeof(u16), GFP_KERNEL); if (!rsrc->q_vector_idxs) goto free_vport; @@ -1427,9 +1428,10 @@ static void idpf_restore_features(struct idpf_vport *vport) */ static int idpf_set_real_num_queues(struct idpf_vport *vport) { - int err, txq = vport->num_txq - vport->num_xdp_txq; + int err, txq = vport->dflt_qv_rsrc.num_txq - vport->num_xdp_txq; - err = netif_set_real_num_rx_queues(vport->netdev, vport->num_rxq); + err = netif_set_real_num_rx_queues(vport->netdev, + vport->dflt_qv_rsrc.num_rxq); if (err) return err; @@ -1454,17 +1456,17 @@ static void idpf_up_complete(struct idpf_vport *vport) /** * idpf_rx_init_buf_tail - Write initial buffer ring tail value - * @vport: virtual port struct + * @rsrc: pointer to queue and vector resources */ -static void idpf_rx_init_buf_tail(struct idpf_vport *vport) +static void idpf_rx_init_buf_tail(struct idpf_q_vec_rsrc *rsrc) { int i, j; - for (i = 0; i < vport->num_rxq_grp; i++) { - struct idpf_rxq_group *grp = &vport->rxq_grps[i]; + for (i = 0; i < rsrc->num_rxq_grp; i++) { + struct idpf_rxq_group *grp = &rsrc->rxq_grps[i]; - if (idpf_is_queue_model_split(vport->rxq_model)) { - for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { + if (idpf_is_queue_model_split(rsrc->rxq_model)) { + for (j = 0; j < rsrc->num_bufqs_per_qgrp; j++) { const struct idpf_buf_queue *q = &grp->splitq.bufq_sets[j].bufq; @@ -1511,14 +1513,14 @@ static int idpf_vport_open(struct idpf_vport *vport, bool rtnl) goto err_rtnl_unlock; } - err = idpf_vport_queues_alloc(vport); + err = idpf_vport_queues_alloc(vport, rsrc); if (err) goto intr_rel; vport_config = adapter->vport_config[vport->idx]; chunks = &vport_config->qid_reg_info; - err = idpf_vport_queue_ids_init(vport, chunks); + err = idpf_vport_queue_ids_init(vport, rsrc, chunks); if (err) { dev_err(&adapter->pdev->dev, "Failed to initialize queue ids for vport %u: %d\n", vport->vport_id, err); @@ -1532,23 +1534,23 @@ static int idpf_vport_open(struct idpf_vport *vport, bool rtnl) goto queues_rel; } - err = idpf_queue_reg_init(vport, chunks); + err = idpf_queue_reg_init(vport, rsrc, chunks); if (err) { dev_err(&adapter->pdev->dev, "Failed to initialize queue registers for vport %u: %d\n", vport->vport_id, err); goto intr_deinit; } - err = idpf_rx_bufs_init_all(vport); + err = idpf_rx_bufs_init_all(vport, rsrc); if (err) { dev_err(&adapter->pdev->dev, "Failed to initialize RX buffers for vport %u: %d\n", vport->vport_id, err); goto intr_deinit; } - idpf_rx_init_buf_tail(vport); + idpf_rx_init_buf_tail(rsrc); - err = idpf_xdp_rxq_info_init_all(vport); + err = idpf_xdp_rxq_info_init_all(rsrc); if (err) { netdev_err(vport->netdev, "Failed to initialize XDP RxQ info for vport %u: %pe\n", @@ -1558,14 +1560,14 @@ static int idpf_vport_open(struct idpf_vport *vport, bool rtnl) idpf_vport_intr_ena(vport, rsrc); - err = idpf_send_config_queues_msg(vport); + err = idpf_send_config_queues_msg(vport, rsrc); if (err) { dev_err(&adapter->pdev->dev, "Failed to configure queues for vport %u, %d\n", vport->vport_id, err); goto rxq_deinit; } - err = idpf_send_map_unmap_queue_vector_msg(vport, true); + err = idpf_send_map_unmap_queue_vector_msg(vport, rsrc, true); if (err) { dev_err(&adapter->pdev->dev, "Failed to map queue vectors for vport %u: %d\n", vport->vport_id, err); @@ -1608,13 +1610,13 @@ static int idpf_vport_open(struct idpf_vport *vport, bool rtnl) disable_queues: idpf_send_disable_queues_msg(vport); unmap_queue_vectors: - idpf_send_map_unmap_queue_vector_msg(vport, false); + idpf_send_map_unmap_queue_vector_msg(vport, rsrc, false); rxq_deinit: - idpf_xdp_rxq_info_deinit_all(vport); + idpf_xdp_rxq_info_deinit_all(rsrc); intr_deinit: idpf_vport_intr_deinit(vport, rsrc); queues_rel: - idpf_vport_queues_rel(vport); + idpf_vport_queues_rel(vport, rsrc); intr_rel: idpf_vport_intr_rel(rsrc); @@ -2003,8 +2005,10 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport, { struct idpf_netdev_priv *np = netdev_priv(vport->netdev); bool vport_is_up = test_bit(IDPF_VPORT_UP, np->state); + struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc; struct idpf_adapter *adapter = vport->adapter; struct idpf_vport_config *vport_config; + struct idpf_q_vec_rsrc *new_rsrc; struct idpf_vport *new_vport; int err, tmp_err = 0; @@ -2031,16 +2035,18 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport, */ memcpy(new_vport, vport, offsetof(struct idpf_vport, link_up)); + new_rsrc = &new_vport->dflt_qv_rsrc; + /* Adjust resource parameters prior to reallocating resources */ switch (reset_cause) { case IDPF_SR_Q_CHANGE: - err = idpf_vport_adjust_qs(new_vport); + err = idpf_vport_adjust_qs(new_vport, new_rsrc); if (err) goto free_vport; break; case IDPF_SR_Q_DESC_CHANGE: /* Update queue parameters before allocating resources */ - idpf_vport_calc_num_q_desc(new_vport); + idpf_vport_calc_num_q_desc(new_vport, new_rsrc); break; case IDPF_SR_MTU_CHANGE: idpf_idc_vdev_mtu_event(vport->vdev_info, @@ -2069,10 +2075,10 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport, * to add code to add_queues to change the vport config within * vport itself as it will be wiped with a memcpy later. */ - err = idpf_send_add_queues_msg(vport, new_vport->num_txq, - new_vport->num_complq, - new_vport->num_rxq, - new_vport->num_bufq); + err = idpf_send_add_queues_msg(vport, new_rsrc->num_txq, + new_rsrc->num_complq, + new_rsrc->num_rxq, + new_rsrc->num_bufq); if (err) goto err_reset; @@ -2098,9 +2104,9 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport, goto free_vport; err_reset: - tmp_err = idpf_send_add_queues_msg(vport, vport->num_txq, - vport->num_complq, vport->num_rxq, - vport->num_bufq); + tmp_err = idpf_send_add_queues_msg(vport, rsrc->num_txq, + rsrc->num_complq, rsrc->num_rxq, + rsrc->num_bufq); err_open: if (!tmp_err && vport_is_up) diff --git a/drivers/net/ethernet/intel/idpf/idpf_ptp.c b/drivers/net/ethernet/intel/idpf/idpf_ptp.c index 3e1052d070cf..990e78686786 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_ptp.c +++ b/drivers/net/ethernet/intel/idpf/idpf_ptp.c @@ -384,15 +384,17 @@ static int idpf_ptp_update_cached_phctime(struct idpf_adapter *adapter) WRITE_ONCE(adapter->ptp->cached_phc_jiffies, jiffies); idpf_for_each_vport(adapter, vport) { + struct idpf_q_vec_rsrc *rsrc; bool split; - if (!vport || !vport->rxq_grps) + if (!vport || !vport->dflt_qv_rsrc.rxq_grps) continue; - split = idpf_is_queue_model_split(vport->rxq_model); + rsrc = &vport->dflt_qv_rsrc; + split = idpf_is_queue_model_split(rsrc->rxq_model); - for (u16 i = 0; i < vport->num_rxq_grp; i++) { - struct idpf_rxq_group *grp = &vport->rxq_grps[i]; + for (u16 i = 0; i < rsrc->num_rxq_grp; i++) { + struct idpf_rxq_group *grp = &rsrc->rxq_grps[i]; idpf_ptp_update_phctime_rxq_grp(grp, split, systime); } @@ -681,9 +683,10 @@ int idpf_ptp_request_ts(struct idpf_tx_queue *tx_q, struct sk_buff *skb, */ static void idpf_ptp_set_rx_tstamp(struct idpf_vport *vport, int rx_filter) { + struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc; bool enable = true, splitq; - splitq = idpf_is_queue_model_split(vport->rxq_model); + splitq = idpf_is_queue_model_split(rsrc->rxq_model); if (rx_filter == HWTSTAMP_FILTER_NONE) { enable = false; @@ -692,8 +695,8 @@ static void idpf_ptp_set_rx_tstamp(struct idpf_vport *vport, int rx_filter) vport->tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL; } - for (u16 i = 0; i < vport->num_rxq_grp; i++) { - struct idpf_rxq_group *grp = &vport->rxq_grps[i]; + for (u16 i = 0; i < rsrc->num_rxq_grp; i++) { + struct idpf_rxq_group *grp = &rsrc->rxq_grps[i]; struct idpf_rx_queue *rx_queue; u16 j, num_rxq; diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index 48557c67dda8..3a369f592115 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -148,24 +148,24 @@ static void idpf_compl_desc_rel(struct idpf_compl_queue *complq) /** * idpf_tx_desc_rel_all - Free Tx Resources for All Queues - * @vport: virtual port structure + * @rsrc: pointer to queue and vector resources * * Free all transmit software resources */ -static void idpf_tx_desc_rel_all(struct idpf_vport *vport) +static void idpf_tx_desc_rel_all(struct idpf_q_vec_rsrc *rsrc) { int i, j; - if (!vport->txq_grps) + if (!rsrc->txq_grps) return; - for (i = 0; i < vport->num_txq_grp; i++) { - struct idpf_txq_group *txq_grp = &vport->txq_grps[i]; + for (i = 0; i < rsrc->num_txq_grp; i++) { + struct idpf_txq_group *txq_grp = &rsrc->txq_grps[i]; for (j = 0; j < txq_grp->num_txq; j++) idpf_tx_desc_rel(txq_grp->txqs[j]); - if (idpf_is_queue_model_split(vport->txq_model)) + if (idpf_is_queue_model_split(rsrc->txq_model)) idpf_compl_desc_rel(txq_grp->complq); } } @@ -265,7 +265,7 @@ static int idpf_tx_desc_alloc(const struct idpf_vport *vport, /** * idpf_compl_desc_alloc - allocate completion descriptors - * @vport: vport to allocate resources for + * @vport: virtual port private structure * @complq: completion queue to set up * * Return: 0 on success, -errno on failure. @@ -298,10 +298,12 @@ static int idpf_compl_desc_alloc(const struct idpf_vport *vport, /** * idpf_tx_desc_alloc_all - allocate all queues Tx resources * @vport: virtual port private structure + * @rsrc: pointer to queue and vector resources * * Return: 0 on success, negative on failure */ -static int idpf_tx_desc_alloc_all(struct idpf_vport *vport) +static int idpf_tx_desc_alloc_all(struct idpf_vport *vport, + struct idpf_q_vec_rsrc *rsrc) { int err = 0; int i, j; @@ -309,9 +311,9 @@ static int idpf_tx_desc_alloc_all(struct idpf_vport *vport) /* Setup buffer queues. In single queue model buffer queues and * completion queues will be same */ - for (i = 0; i < vport->num_txq_grp; i++) { - for (j = 0; j < vport->txq_grps[i].num_txq; j++) { - struct idpf_tx_queue *txq = vport->txq_grps[i].txqs[j]; + for (i = 0; i < rsrc->num_txq_grp; i++) { + for (j = 0; j < rsrc->txq_grps[i].num_txq; j++) { + struct idpf_tx_queue *txq = rsrc->txq_grps[i].txqs[j]; err = idpf_tx_desc_alloc(vport, txq); if (err) { @@ -322,11 +324,11 @@ static int idpf_tx_desc_alloc_all(struct idpf_vport *vport) } } - if (!idpf_is_queue_model_split(vport->txq_model)) + if (!idpf_is_queue_model_split(rsrc->txq_model)) continue; /* Setup completion queues */ - err = idpf_compl_desc_alloc(vport, vport->txq_grps[i].complq); + err = idpf_compl_desc_alloc(vport, rsrc->txq_grps[i].complq); if (err) { pci_err(vport->adapter->pdev, "Allocation for Tx Completion Queue %u failed\n", @@ -337,7 +339,7 @@ static int idpf_tx_desc_alloc_all(struct idpf_vport *vport) err_out: if (err) - idpf_tx_desc_rel_all(vport); + idpf_tx_desc_rel_all(rsrc); return err; } @@ -490,23 +492,24 @@ static void idpf_rx_desc_rel_bufq(struct idpf_buf_queue *bufq, /** * idpf_rx_desc_rel_all - Free Rx Resources for All Queues * @vport: virtual port structure + * @rsrc: pointer to queue and vector resources * * Free all rx queues resources */ -static void idpf_rx_desc_rel_all(struct idpf_vport *vport) +static void idpf_rx_desc_rel_all(struct idpf_q_vec_rsrc *rsrc) { - struct device *dev = &vport->adapter->pdev->dev; + struct device *dev = rsrc->dev; struct idpf_rxq_group *rx_qgrp; u16 num_rxq; int i, j; - if (!vport->rxq_grps) + if (!rsrc->rxq_grps) return; - for (i = 0; i < vport->num_rxq_grp; i++) { - rx_qgrp = &vport->rxq_grps[i]; + for (i = 0; i < rsrc->num_rxq_grp; i++) { + rx_qgrp = &rsrc->rxq_grps[i]; - if (!idpf_is_queue_model_split(vport->rxq_model)) { + if (!idpf_is_queue_model_split(rsrc->rxq_model)) { for (j = 0; j < rx_qgrp->singleq.num_rxq; j++) idpf_rx_desc_rel(rx_qgrp->singleq.rxqs[j], dev, VIRTCHNL2_QUEUE_MODEL_SINGLE); @@ -521,7 +524,7 @@ static void idpf_rx_desc_rel_all(struct idpf_vport *vport) if (!rx_qgrp->splitq.bufq_sets) continue; - for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { + for (j = 0; j < rsrc->num_bufqs_per_qgrp; j++) { struct idpf_bufq_set *bufq_set = &rx_qgrp->splitq.bufq_sets[j]; @@ -781,19 +784,21 @@ static int idpf_rx_bufs_init(struct idpf_buf_queue *bufq, /** * idpf_rx_bufs_init_all - Initialize all RX bufs - * @vport: virtual port struct + * @vport: pointer to vport struct + * @rsrc: pointer to queue and vector resources * * Return: 0 on success, negative on failure */ -int idpf_rx_bufs_init_all(struct idpf_vport *vport) +int idpf_rx_bufs_init_all(struct idpf_vport *vport, + struct idpf_q_vec_rsrc *rsrc) { - bool split = idpf_is_queue_model_split(vport->rxq_model); + bool split = idpf_is_queue_model_split(rsrc->rxq_model); int i, j, err; - idpf_xdp_copy_prog_to_rqs(vport, vport->xdp_prog); + idpf_xdp_copy_prog_to_rqs(rsrc, vport->xdp_prog); - for (i = 0; i < vport->num_rxq_grp; i++) { - struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; + for (i = 0; i < rsrc->num_rxq_grp; i++) { + struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i]; u32 truesize = 0; /* Allocate bufs for the rxq itself in singleq */ @@ -813,7 +818,7 @@ int idpf_rx_bufs_init_all(struct idpf_vport *vport) } /* Otherwise, allocate bufs for the buffer queues */ - for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { + for (j = 0; j < rsrc->num_bufqs_per_qgrp; j++) { enum libeth_fqe_type type; struct idpf_buf_queue *q; @@ -899,18 +904,20 @@ static int idpf_bufq_desc_alloc(const struct idpf_vport *vport, /** * idpf_rx_desc_alloc_all - allocate all RX queues resources * @vport: virtual port structure + * @rsrc: pointer to queue and vector resources * * Return: 0 on success, negative on failure */ -static int idpf_rx_desc_alloc_all(struct idpf_vport *vport) +static int idpf_rx_desc_alloc_all(struct idpf_vport *vport, + struct idpf_q_vec_rsrc *rsrc) { struct idpf_rxq_group *rx_qgrp; int i, j, err; u16 num_rxq; - for (i = 0; i < vport->num_rxq_grp; i++) { - rx_qgrp = &vport->rxq_grps[i]; - if (idpf_is_queue_model_split(vport->rxq_model)) + for (i = 0; i < rsrc->num_rxq_grp; i++) { + rx_qgrp = &rsrc->rxq_grps[i]; + if (idpf_is_queue_model_split(rsrc->rxq_model)) num_rxq = rx_qgrp->splitq.num_rxq_sets; else num_rxq = rx_qgrp->singleq.num_rxq; @@ -918,7 +925,7 @@ static int idpf_rx_desc_alloc_all(struct idpf_vport *vport) for (j = 0; j < num_rxq; j++) { struct idpf_rx_queue *q; - if (idpf_is_queue_model_split(vport->rxq_model)) + if (idpf_is_queue_model_split(rsrc->rxq_model)) q = &rx_qgrp->splitq.rxq_sets[j]->rxq; else q = rx_qgrp->singleq.rxqs[j]; @@ -932,10 +939,10 @@ static int idpf_rx_desc_alloc_all(struct idpf_vport *vport) } } - if (!idpf_is_queue_model_split(vport->rxq_model)) + if (!idpf_is_queue_model_split(rsrc->rxq_model)) continue; - for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { + for (j = 0; j < rsrc->num_bufqs_per_qgrp; j++) { struct idpf_buf_queue *q; q = &rx_qgrp->splitq.bufq_sets[j].bufq; @@ -953,7 +960,7 @@ static int idpf_rx_desc_alloc_all(struct idpf_vport *vport) return 0; err_out: - idpf_rx_desc_rel_all(vport); + idpf_rx_desc_rel_all(rsrc); return err; } @@ -964,7 +971,7 @@ static int idpf_init_queue_set(const struct idpf_queue_set *qs) bool splitq; int err; - splitq = idpf_is_queue_model_split(vport->rxq_model); + splitq = idpf_is_queue_model_split(qs->qv_rsrc->rxq_model); for (u32 i = 0; i < qs->num; i++) { const struct idpf_queue_ptr *q = &qs->qs[i]; @@ -1034,19 +1041,18 @@ static int idpf_init_queue_set(const struct idpf_queue_set *qs) static void idpf_clean_queue_set(const struct idpf_queue_set *qs) { - const struct idpf_vport *vport = qs->vport; - struct device *dev = vport->netdev->dev.parent; + const struct idpf_q_vec_rsrc *rsrc = qs->qv_rsrc; for (u32 i = 0; i < qs->num; i++) { const struct idpf_queue_ptr *q = &qs->qs[i]; switch (q->type) { case VIRTCHNL2_QUEUE_TYPE_RX: - idpf_xdp_rxq_info_deinit(q->rxq, vport->rxq_model); - idpf_rx_desc_rel(q->rxq, dev, vport->rxq_model); + idpf_xdp_rxq_info_deinit(q->rxq, rsrc->rxq_model); + idpf_rx_desc_rel(q->rxq, rsrc->dev, rsrc->rxq_model); break; case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER: - idpf_rx_desc_rel_bufq(q->bufq, dev); + idpf_rx_desc_rel_bufq(q->bufq, rsrc->dev); break; case VIRTCHNL2_QUEUE_TYPE_TX: idpf_tx_desc_rel(q->txq); @@ -1183,6 +1189,7 @@ idpf_vector_to_queue_set(struct idpf_q_vector *qv) static int idpf_qp_enable(const struct idpf_queue_set *qs, u32 qid) { + struct idpf_q_vec_rsrc *rsrc = qs->qv_rsrc; struct idpf_vport *vport = qs->vport; struct idpf_q_vector *q_vector; int err; @@ -1199,8 +1206,8 @@ static int idpf_qp_enable(const struct idpf_queue_set *qs, u32 qid) if (!vport->xdp_txq_offset) goto config; - q_vector->xsksq = kcalloc(DIV_ROUND_UP(vport->num_rxq_grp, - qs->qv_rsrc->num_q_vectors), + q_vector->xsksq = kcalloc(DIV_ROUND_UP(rsrc->num_rxq_grp, + rsrc->num_q_vectors), sizeof(*q_vector->xsksq), GFP_KERNEL); if (!q_vector->xsksq) return -ENOMEM; @@ -1295,25 +1302,23 @@ int idpf_qp_switch(struct idpf_vport *vport, u32 qid, bool en) /** * idpf_txq_group_rel - Release all resources for txq groups - * @vport: vport to release txq groups on + * @rsrc: pointer to queue and vector resources */ -static void idpf_txq_group_rel(struct idpf_vport *vport) +static void idpf_txq_group_rel(struct idpf_q_vec_rsrc *rsrc) { - bool split, flow_sch_en; + bool split; int i, j; - if (!vport->txq_grps) + if (!rsrc->txq_grps) return; - split = idpf_is_queue_model_split(vport->txq_model); - flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS, - VIRTCHNL2_CAP_SPLITQ_QSCHED); + split = idpf_is_queue_model_split(rsrc->txq_model); - for (i = 0; i < vport->num_txq_grp; i++) { - struct idpf_txq_group *txq_grp = &vport->txq_grps[i]; + for (i = 0; i < rsrc->num_txq_grp; i++) { + struct idpf_txq_group *txq_grp = &rsrc->txq_grps[i]; for (j = 0; j < txq_grp->num_txq; j++) { - if (flow_sch_en) { + if (idpf_queue_has(FLOW_SCH_EN, txq_grp->txqs[j])) { kfree(txq_grp->txqs[j]->refillq); txq_grp->txqs[j]->refillq = NULL; } @@ -1328,8 +1333,8 @@ static void idpf_txq_group_rel(struct idpf_vport *vport) kfree(txq_grp->complq); txq_grp->complq = NULL; } - kfree(vport->txq_grps); - vport->txq_grps = NULL; + kfree(rsrc->txq_grps); + rsrc->txq_grps = NULL; } /** @@ -1340,7 +1345,7 @@ static void idpf_rxq_sw_queue_rel(struct idpf_rxq_group *rx_qgrp) { int i, j; - for (i = 0; i < rx_qgrp->vport->num_bufqs_per_qgrp; i++) { + for (i = 0; i < rx_qgrp->splitq.num_bufq_sets; i++) { struct idpf_bufq_set *bufq_set = &rx_qgrp->splitq.bufq_sets[i]; for (j = 0; j < bufq_set->num_refillqs; j++) { @@ -1354,21 +1359,21 @@ static void idpf_rxq_sw_queue_rel(struct idpf_rxq_group *rx_qgrp) /** * idpf_rxq_group_rel - Release all resources for rxq groups - * @vport: vport to release rxq groups on + * @rsrc: pointer to queue and vector resources */ -static void idpf_rxq_group_rel(struct idpf_vport *vport) +static void idpf_rxq_group_rel(struct idpf_q_vec_rsrc *rsrc) { int i; - if (!vport->rxq_grps) + if (!rsrc->rxq_grps) return; - for (i = 0; i < vport->num_rxq_grp; i++) { - struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; + for (i = 0; i < rsrc->num_rxq_grp; i++) { + struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i]; u16 num_rxq; int j; - if (idpf_is_queue_model_split(vport->rxq_model)) { + if (idpf_is_queue_model_split(rsrc->rxq_model)) { num_rxq = rx_qgrp->splitq.num_rxq_sets; for (j = 0; j < num_rxq; j++) { kfree(rx_qgrp->splitq.rxq_sets[j]); @@ -1386,35 +1391,38 @@ static void idpf_rxq_group_rel(struct idpf_vport *vport) } } } - kfree(vport->rxq_grps); - vport->rxq_grps = NULL; + kfree(rsrc->rxq_grps); + rsrc->rxq_grps = NULL; } /** * idpf_vport_queue_grp_rel_all - Release all queue groups * @vport: vport to release queue groups for + * @rsrc: pointer to queue and vector resources */ -static void idpf_vport_queue_grp_rel_all(struct idpf_vport *vport) +static void idpf_vport_queue_grp_rel_all(struct idpf_q_vec_rsrc *rsrc) { - idpf_txq_group_rel(vport); - idpf_rxq_group_rel(vport); + idpf_txq_group_rel(rsrc); + idpf_rxq_group_rel(rsrc); } /** * idpf_vport_queues_rel - Free memory for all queues * @vport: virtual port + * @rsrc: pointer to queue and vector resources * * Free the memory allocated for queues associated to a vport */ -void idpf_vport_queues_rel(struct idpf_vport *vport) +void idpf_vport_queues_rel(struct idpf_vport *vport, + struct idpf_q_vec_rsrc *rsrc) { - idpf_xdp_copy_prog_to_rqs(vport, NULL); + idpf_xdp_copy_prog_to_rqs(rsrc, NULL); - idpf_tx_desc_rel_all(vport); - idpf_rx_desc_rel_all(vport); + idpf_tx_desc_rel_all(rsrc); + idpf_rx_desc_rel_all(rsrc); idpf_xdpsqs_put(vport); - idpf_vport_queue_grp_rel_all(vport); + idpf_vport_queue_grp_rel_all(rsrc); kfree(vport->txqs); vport->txqs = NULL; @@ -1423,6 +1431,7 @@ void idpf_vport_queues_rel(struct idpf_vport *vport) /** * idpf_vport_init_fast_path_txqs - Initialize fast path txq array * @vport: vport to init txqs on + * @rsrc: pointer to queue and vector resources * * We get a queue index from skb->queue_mapping and we need a fast way to * dereference the queue from queue groups. This allows us to quickly pull a @@ -1430,20 +1439,21 @@ void idpf_vport_queues_rel(struct idpf_vport *vport) * * Return: 0 on success, negative on failure */ -static int idpf_vport_init_fast_path_txqs(struct idpf_vport *vport) +static int idpf_vport_init_fast_path_txqs(struct idpf_vport *vport, + struct idpf_q_vec_rsrc *rsrc) { struct idpf_ptp_vport_tx_tstamp_caps *caps = vport->tx_tstamp_caps; struct work_struct *tstamp_task = &vport->tstamp_task; int i, j, k = 0; - vport->txqs = kcalloc(vport->num_txq, sizeof(*vport->txqs), + vport->txqs = kcalloc(rsrc->num_txq, sizeof(*vport->txqs), GFP_KERNEL); - if (!vport->txqs) return -ENOMEM; - for (i = 0; i < vport->num_txq_grp; i++) { - struct idpf_txq_group *tx_grp = &vport->txq_grps[i]; + vport->num_txq = rsrc->num_txq; + for (i = 0; i < rsrc->num_txq_grp; i++) { + struct idpf_txq_group *tx_grp = &rsrc->txq_grps[i]; for (j = 0; j < tx_grp->num_txq; j++, k++) { vport->txqs[k] = tx_grp->txqs[j]; @@ -1464,16 +1474,18 @@ static int idpf_vport_init_fast_path_txqs(struct idpf_vport *vport) * idpf_vport_init_num_qs - Initialize number of queues * @vport: vport to initialize queues * @vport_msg: data to be filled into vport + * @rsrc: pointer to queue and vector resources */ void idpf_vport_init_num_qs(struct idpf_vport *vport, - struct virtchnl2_create_vport *vport_msg) + struct virtchnl2_create_vport *vport_msg, + struct idpf_q_vec_rsrc *rsrc) { struct idpf_vport_user_config_data *config_data; u16 idx = vport->idx; config_data = &vport->adapter->vport_config[idx]->user_config; - vport->num_txq = le16_to_cpu(vport_msg->num_tx_q); - vport->num_rxq = le16_to_cpu(vport_msg->num_rx_q); + rsrc->num_txq = le16_to_cpu(vport_msg->num_tx_q); + rsrc->num_rxq = le16_to_cpu(vport_msg->num_rx_q); /* number of txqs and rxqs in config data will be zeros only in the * driver load path and we dont update them there after */ @@ -1482,10 +1494,10 @@ void idpf_vport_init_num_qs(struct idpf_vport *vport, config_data->num_req_rx_qs = le16_to_cpu(vport_msg->num_rx_q); } - if (idpf_is_queue_model_split(vport->txq_model)) - vport->num_complq = le16_to_cpu(vport_msg->num_tx_complq); - if (idpf_is_queue_model_split(vport->rxq_model)) - vport->num_bufq = le16_to_cpu(vport_msg->num_rx_bufq); + if (idpf_is_queue_model_split(rsrc->txq_model)) + rsrc->num_complq = le16_to_cpu(vport_msg->num_tx_complq); + if (idpf_is_queue_model_split(rsrc->rxq_model)) + rsrc->num_bufq = le16_to_cpu(vport_msg->num_rx_bufq); vport->xdp_prog = config_data->xdp_prog; if (idpf_xdp_enabled(vport)) { @@ -1500,23 +1512,25 @@ void idpf_vport_init_num_qs(struct idpf_vport *vport, } /* Adjust number of buffer queues per Rx queue group. */ - if (!idpf_is_queue_model_split(vport->rxq_model)) { - vport->num_bufqs_per_qgrp = 0; + if (!idpf_is_queue_model_split(rsrc->rxq_model)) { + rsrc->num_bufqs_per_qgrp = 0; return; } - vport->num_bufqs_per_qgrp = IDPF_MAX_BUFQS_PER_RXQ_GRP; + rsrc->num_bufqs_per_qgrp = IDPF_MAX_BUFQS_PER_RXQ_GRP; } /** * idpf_vport_calc_num_q_desc - Calculate number of queue groups * @vport: vport to calculate q groups for + * @rsrc: pointer to queue and vector resources */ -void idpf_vport_calc_num_q_desc(struct idpf_vport *vport) +void idpf_vport_calc_num_q_desc(struct idpf_vport *vport, + struct idpf_q_vec_rsrc *rsrc) { struct idpf_vport_user_config_data *config_data; - int num_bufqs = vport->num_bufqs_per_qgrp; + u8 num_bufqs = rsrc->num_bufqs_per_qgrp; u32 num_req_txq_desc, num_req_rxq_desc; u16 idx = vport->idx; int i; @@ -1525,31 +1539,31 @@ void idpf_vport_calc_num_q_desc(struct idpf_vport *vport) num_req_txq_desc = config_data->num_req_txq_desc; num_req_rxq_desc = config_data->num_req_rxq_desc; - vport->complq_desc_count = 0; + rsrc->complq_desc_count = 0; if (num_req_txq_desc) { - vport->txq_desc_count = num_req_txq_desc; - if (idpf_is_queue_model_split(vport->txq_model)) { - vport->complq_desc_count = num_req_txq_desc; - if (vport->complq_desc_count < IDPF_MIN_TXQ_COMPLQ_DESC) - vport->complq_desc_count = + rsrc->txq_desc_count = num_req_txq_desc; + if (idpf_is_queue_model_split(rsrc->txq_model)) { + rsrc->complq_desc_count = num_req_txq_desc; + if (rsrc->complq_desc_count < IDPF_MIN_TXQ_COMPLQ_DESC) + rsrc->complq_desc_count = IDPF_MIN_TXQ_COMPLQ_DESC; } } else { - vport->txq_desc_count = IDPF_DFLT_TX_Q_DESC_COUNT; - if (idpf_is_queue_model_split(vport->txq_model)) - vport->complq_desc_count = + rsrc->txq_desc_count = IDPF_DFLT_TX_Q_DESC_COUNT; + if (idpf_is_queue_model_split(rsrc->txq_model)) + rsrc->complq_desc_count = IDPF_DFLT_TX_COMPLQ_DESC_COUNT; } if (num_req_rxq_desc) - vport->rxq_desc_count = num_req_rxq_desc; + rsrc->rxq_desc_count = num_req_rxq_desc; else - vport->rxq_desc_count = IDPF_DFLT_RX_Q_DESC_COUNT; + rsrc->rxq_desc_count = IDPF_DFLT_RX_Q_DESC_COUNT; for (i = 0; i < num_bufqs; i++) { - if (!vport->bufq_desc_count[i]) - vport->bufq_desc_count[i] = - IDPF_RX_BUFQ_DESC_COUNT(vport->rxq_desc_count, + if (!rsrc->bufq_desc_count[i]) + rsrc->bufq_desc_count[i] = + IDPF_RX_BUFQ_DESC_COUNT(rsrc->rxq_desc_count, num_bufqs); } } @@ -1638,54 +1652,54 @@ int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_idx, /** * idpf_vport_calc_num_q_groups - Calculate number of queue groups - * @vport: vport to calculate q groups for + * @rsrc: pointer to queue and vector resources */ -void idpf_vport_calc_num_q_groups(struct idpf_vport *vport) +void idpf_vport_calc_num_q_groups(struct idpf_q_vec_rsrc *rsrc) { - if (idpf_is_queue_model_split(vport->txq_model)) - vport->num_txq_grp = vport->num_txq; + if (idpf_is_queue_model_split(rsrc->txq_model)) + rsrc->num_txq_grp = rsrc->num_txq; else - vport->num_txq_grp = IDPF_DFLT_SINGLEQ_TX_Q_GROUPS; + rsrc->num_txq_grp = IDPF_DFLT_SINGLEQ_TX_Q_GROUPS; - if (idpf_is_queue_model_split(vport->rxq_model)) - vport->num_rxq_grp = vport->num_rxq; + if (idpf_is_queue_model_split(rsrc->rxq_model)) + rsrc->num_rxq_grp = rsrc->num_rxq; else - vport->num_rxq_grp = IDPF_DFLT_SINGLEQ_RX_Q_GROUPS; + rsrc->num_rxq_grp = IDPF_DFLT_SINGLEQ_RX_Q_GROUPS; } /** * idpf_vport_calc_numq_per_grp - Calculate number of queues per group - * @vport: vport to calculate queues for + * @rsrc: pointer to queue and vector resources * @num_txq: return parameter for number of TX queues * @num_rxq: return parameter for number of RX queues */ -static void idpf_vport_calc_numq_per_grp(struct idpf_vport *vport, +static void idpf_vport_calc_numq_per_grp(struct idpf_q_vec_rsrc *rsrc, u16 *num_txq, u16 *num_rxq) { - if (idpf_is_queue_model_split(vport->txq_model)) + if (idpf_is_queue_model_split(rsrc->txq_model)) *num_txq = IDPF_DFLT_SPLITQ_TXQ_PER_GROUP; else - *num_txq = vport->num_txq; + *num_txq = rsrc->num_txq; - if (idpf_is_queue_model_split(vport->rxq_model)) + if (idpf_is_queue_model_split(rsrc->rxq_model)) *num_rxq = IDPF_DFLT_SPLITQ_RXQ_PER_GROUP; else - *num_rxq = vport->num_rxq; + *num_rxq = rsrc->num_rxq; } /** * idpf_rxq_set_descids - set the descids supported by this queue - * @vport: virtual port data structure + * @rsrc: pointer to queue and vector resources * @q: rx queue for which descids are set * */ -static void idpf_rxq_set_descids(const struct idpf_vport *vport, +static void idpf_rxq_set_descids(struct idpf_q_vec_rsrc *rsrc, struct idpf_rx_queue *q) { - if (idpf_is_queue_model_split(vport->rxq_model)) + if (idpf_is_queue_model_split(rsrc->rxq_model)) return; - if (vport->base_rxd) + if (rsrc->base_rxd) q->rxdids = VIRTCHNL2_RXDID_1_32B_BASE_M; else q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M; @@ -1694,26 +1708,29 @@ static void idpf_rxq_set_descids(const struct idpf_vport *vport, /** * idpf_txq_group_alloc - Allocate all txq group resources * @vport: vport to allocate txq groups for + * @rsrc: pointer to queue and vector resources * @num_txq: number of txqs to allocate for each group * * Return: 0 on success, negative on failure */ -static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq) +static int idpf_txq_group_alloc(struct idpf_vport *vport, + struct idpf_q_vec_rsrc *rsrc, + u16 num_txq) { bool split, flow_sch_en; int i; - vport->txq_grps = kcalloc(vport->num_txq_grp, - sizeof(*vport->txq_grps), GFP_KERNEL); - if (!vport->txq_grps) + rsrc->txq_grps = kcalloc(rsrc->num_txq_grp, + sizeof(*rsrc->txq_grps), GFP_KERNEL); + if (!rsrc->txq_grps) return -ENOMEM; - split = idpf_is_queue_model_split(vport->txq_model); + split = idpf_is_queue_model_split(rsrc->txq_model); flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_SPLITQ_QSCHED); - for (i = 0; i < vport->num_txq_grp; i++) { - struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; + for (i = 0; i < rsrc->num_txq_grp; i++) { + struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i]; struct idpf_adapter *adapter = vport->adapter; int j; @@ -1731,7 +1748,7 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq) struct idpf_tx_queue *q = tx_qgrp->txqs[j]; q->dev = &adapter->pdev->dev; - q->desc_count = vport->txq_desc_count; + q->desc_count = rsrc->txq_desc_count; q->tx_max_bufs = idpf_get_max_tx_bufs(adapter); q->tx_min_pkt_len = idpf_get_min_tx_pkt_len(adapter); q->netdev = vport->netdev; @@ -1766,7 +1783,7 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq) if (!tx_qgrp->complq) goto err_alloc; - tx_qgrp->complq->desc_count = vport->complq_desc_count; + tx_qgrp->complq->desc_count = rsrc->complq_desc_count; tx_qgrp->complq->txq_grp = tx_qgrp; tx_qgrp->complq->netdev = vport->netdev; tx_qgrp->complq->clean_budget = vport->compln_clean_budget; @@ -1778,7 +1795,7 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq) return 0; err_alloc: - idpf_txq_group_rel(vport); + idpf_txq_group_rel(rsrc); return -ENOMEM; } @@ -1786,28 +1803,32 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq) /** * idpf_rxq_group_alloc - Allocate all rxq group resources * @vport: vport to allocate rxq groups for + * @rsrc: pointer to queue and vector resources * @num_rxq: number of rxqs to allocate for each group * * Return: 0 on success, negative on failure */ -static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq) +static int idpf_rxq_group_alloc(struct idpf_vport *vport, + struct idpf_q_vec_rsrc *rsrc, + u16 num_rxq) { int i, k, err = 0; - bool hs; + bool hs, rsc; - vport->rxq_grps = kcalloc(vport->num_rxq_grp, - sizeof(struct idpf_rxq_group), GFP_KERNEL); - if (!vport->rxq_grps) + rsrc->rxq_grps = kcalloc(rsrc->num_rxq_grp, + sizeof(struct idpf_rxq_group), GFP_KERNEL); + if (!rsrc->rxq_grps) return -ENOMEM; hs = idpf_vport_get_hsplit(vport) == ETHTOOL_TCP_DATA_SPLIT_ENABLED; + rsc = idpf_is_feature_ena(vport, NETIF_F_GRO_HW); - for (i = 0; i < vport->num_rxq_grp; i++) { - struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; + for (i = 0; i < rsrc->num_rxq_grp; i++) { + struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i]; int j; rx_qgrp->vport = vport; - if (!idpf_is_queue_model_split(vport->rxq_model)) { + if (!idpf_is_queue_model_split(rsrc->rxq_model)) { rx_qgrp->singleq.num_rxq = num_rxq; for (j = 0; j < num_rxq; j++) { rx_qgrp->singleq.rxqs[j] = @@ -1832,25 +1853,27 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq) } } - rx_qgrp->splitq.bufq_sets = kcalloc(vport->num_bufqs_per_qgrp, + rx_qgrp->splitq.bufq_sets = kcalloc(rsrc->num_bufqs_per_qgrp, sizeof(struct idpf_bufq_set), GFP_KERNEL); if (!rx_qgrp->splitq.bufq_sets) { err = -ENOMEM; goto err_alloc; } + rx_qgrp->splitq.num_bufq_sets = rsrc->num_bufqs_per_qgrp; - for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { + for (j = 0; j < rsrc->num_bufqs_per_qgrp; j++) { struct idpf_bufq_set *bufq_set = &rx_qgrp->splitq.bufq_sets[j]; int swq_size = sizeof(struct idpf_sw_queue); struct idpf_buf_queue *q; q = &rx_qgrp->splitq.bufq_sets[j].bufq; - q->desc_count = vport->bufq_desc_count[j]; + q->desc_count = rsrc->bufq_desc_count[j]; q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK; idpf_queue_assign(HSPLIT_EN, q, hs); + idpf_queue_assign(RSC_EN, q, rsc); bufq_set->num_refillqs = num_rxq; bufq_set->refillqs = kcalloc(num_rxq, swq_size, @@ -1864,7 +1887,7 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq) &bufq_set->refillqs[k]; refillq->desc_count = - vport->bufq_desc_count[j]; + rsrc->bufq_desc_count[j]; idpf_queue_set(GEN_CHK, refillq); idpf_queue_set(RFL_GEN_CHK, refillq); refillq->ring = kcalloc(refillq->desc_count, @@ -1881,34 +1904,35 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq) for (j = 0; j < num_rxq; j++) { struct idpf_rx_queue *q; - if (!idpf_is_queue_model_split(vport->rxq_model)) { + if (!idpf_is_queue_model_split(rsrc->rxq_model)) { q = rx_qgrp->singleq.rxqs[j]; goto setup_rxq; } q = &rx_qgrp->splitq.rxq_sets[j]->rxq; rx_qgrp->splitq.rxq_sets[j]->refillq[0] = &rx_qgrp->splitq.bufq_sets[0].refillqs[j]; - if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) + if (rsrc->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) rx_qgrp->splitq.rxq_sets[j]->refillq[1] = &rx_qgrp->splitq.bufq_sets[1].refillqs[j]; idpf_queue_assign(HSPLIT_EN, q, hs); + idpf_queue_assign(RSC_EN, q, rsc); setup_rxq: - q->desc_count = vport->rxq_desc_count; + q->desc_count = rsrc->rxq_desc_count; q->rx_ptype_lkup = vport->rx_ptype_lkup; q->bufq_sets = rx_qgrp->splitq.bufq_sets; q->idx = (i * num_rxq) + j; q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK; q->rx_max_pkt_size = vport->netdev->mtu + LIBETH_RX_LL_LEN; - idpf_rxq_set_descids(vport, q); + idpf_rxq_set_descids(rsrc, q); } } err_alloc: if (err) - idpf_rxq_group_rel(vport); + idpf_rxq_group_rel(rsrc); return err; } @@ -1916,28 +1940,30 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq) /** * idpf_vport_queue_grp_alloc_all - Allocate all queue groups/resources * @vport: vport with qgrps to allocate + * @rsrc: pointer to queue and vector resources * * Return: 0 on success, negative on failure */ -static int idpf_vport_queue_grp_alloc_all(struct idpf_vport *vport) +static int idpf_vport_queue_grp_alloc_all(struct idpf_vport *vport, + struct idpf_q_vec_rsrc *rsrc) { u16 num_txq, num_rxq; int err; - idpf_vport_calc_numq_per_grp(vport, &num_txq, &num_rxq); + idpf_vport_calc_numq_per_grp(rsrc, &num_txq, &num_rxq); - err = idpf_txq_group_alloc(vport, num_txq); + err = idpf_txq_group_alloc(vport, rsrc, num_txq); if (err) goto err_out; - err = idpf_rxq_group_alloc(vport, num_rxq); + err = idpf_rxq_group_alloc(vport, rsrc, num_rxq); if (err) goto err_out; return 0; err_out: - idpf_vport_queue_grp_rel_all(vport); + idpf_vport_queue_grp_rel_all(rsrc); return err; } @@ -1945,20 +1971,22 @@ static int idpf_vport_queue_grp_alloc_all(struct idpf_vport *vport) /** * idpf_vport_queues_alloc - Allocate memory for all queues * @vport: virtual port + * @rsrc: pointer to queue and vector resources * * Allocate memory for queues associated with a vport. * * Return: 0 on success, negative on failure. */ -int idpf_vport_queues_alloc(struct idpf_vport *vport) +int idpf_vport_queues_alloc(struct idpf_vport *vport, + struct idpf_q_vec_rsrc *rsrc) { int err; - err = idpf_vport_queue_grp_alloc_all(vport); + err = idpf_vport_queue_grp_alloc_all(vport, rsrc); if (err) goto err_out; - err = idpf_vport_init_fast_path_txqs(vport); + err = idpf_vport_init_fast_path_txqs(vport, rsrc); if (err) goto err_out; @@ -1966,18 +1994,18 @@ int idpf_vport_queues_alloc(struct idpf_vport *vport) if (err) goto err_out; - err = idpf_tx_desc_alloc_all(vport); + err = idpf_tx_desc_alloc_all(vport, rsrc); if (err) goto err_out; - err = idpf_rx_desc_alloc_all(vport); + err = idpf_rx_desc_alloc_all(vport, rsrc); if (err) goto err_out; return 0; err_out: - idpf_vport_queues_rel(vport); + idpf_vport_queues_rel(vport, rsrc); return err; } @@ -3154,7 +3182,7 @@ netdev_tx_t idpf_tx_start(struct sk_buff *skb, struct net_device *netdev) return NETDEV_TX_OK; } - if (idpf_is_queue_model_split(vport->txq_model)) + if (idpf_is_queue_model_split(vport->dflt_qv_rsrc.txq_model)) return idpf_tx_splitq_frame(skb, tx_q); else return idpf_tx_singleq_frame(skb, tx_q); @@ -4354,19 +4382,19 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget) static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport, struct idpf_q_vec_rsrc *rsrc) { - u16 num_txq_grp = vport->num_txq_grp - vport->num_xdp_txq; - bool split = idpf_is_queue_model_split(vport->rxq_model); + u16 num_txq_grp = rsrc->num_txq_grp - vport->num_xdp_txq; + bool split = idpf_is_queue_model_split(rsrc->rxq_model); struct idpf_rxq_group *rx_qgrp; struct idpf_txq_group *tx_qgrp; u32 i, qv_idx, q_index; - for (i = 0, qv_idx = 0; i < vport->num_rxq_grp; i++) { + for (i = 0, qv_idx = 0; i < rsrc->num_rxq_grp; i++) { u16 num_rxq; if (qv_idx >= rsrc->num_q_vectors) qv_idx = 0; - rx_qgrp = &vport->rxq_grps[i]; + rx_qgrp = &rsrc->rxq_grps[i]; if (split) num_rxq = rx_qgrp->splitq.num_rxq_sets; else @@ -4389,7 +4417,7 @@ static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport, } if (split) { - for (u32 j = 0; j < vport->num_bufqs_per_qgrp; j++) { + for (u32 j = 0; j < rsrc->num_bufqs_per_qgrp; j++) { struct idpf_buf_queue *bufq; bufq = &rx_qgrp->splitq.bufq_sets[j].bufq; @@ -4403,7 +4431,7 @@ static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport, qv_idx++; } - split = idpf_is_queue_model_split(vport->txq_model); + split = idpf_is_queue_model_split(rsrc->txq_model); for (i = 0, qv_idx = 0; i < num_txq_grp; i++) { u16 num_txq; @@ -4411,7 +4439,7 @@ static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport, if (qv_idx >= rsrc->num_q_vectors) qv_idx = 0; - tx_qgrp = &vport->txq_grps[i]; + tx_qgrp = &rsrc->txq_grps[i]; num_txq = tx_qgrp->num_txq; for (u32 j = 0; j < num_txq; j++) { @@ -4504,7 +4532,7 @@ static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport, int irq_num; u16 qv_idx; - if (idpf_is_queue_model_split(vport->txq_model)) + if (idpf_is_queue_model_split(rsrc->txq_model)) napi_poll = idpf_vport_splitq_napi_poll; else napi_poll = idpf_vport_singleq_napi_poll; @@ -4547,14 +4575,14 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport, if (!rsrc->q_vectors) return -ENOMEM; - txqs_per_vector = DIV_ROUND_UP(vport->num_txq_grp, + txqs_per_vector = DIV_ROUND_UP(rsrc->num_txq_grp, rsrc->num_q_vectors); - rxqs_per_vector = DIV_ROUND_UP(vport->num_rxq_grp, + rxqs_per_vector = DIV_ROUND_UP(rsrc->num_rxq_grp, rsrc->num_q_vectors); - bufqs_per_vector = vport->num_bufqs_per_qgrp * - DIV_ROUND_UP(vport->num_rxq_grp, + bufqs_per_vector = rsrc->num_bufqs_per_qgrp * + DIV_ROUND_UP(rsrc->num_rxq_grp, rsrc->num_q_vectors); - complqs_per_vector = DIV_ROUND_UP(vport->num_txq_grp, + complqs_per_vector = DIV_ROUND_UP(rsrc->num_txq_grp, rsrc->num_q_vectors); for (u16 v_idx = 0; v_idx < rsrc->num_q_vectors; v_idx++) { @@ -4580,7 +4608,7 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport, if (!q_vector->rx) goto error; - if (!idpf_is_queue_model_split(vport->rxq_model)) + if (!idpf_is_queue_model_split(rsrc->rxq_model)) continue; q_vector->bufq = kcalloc(bufqs_per_vector, @@ -4676,8 +4704,8 @@ int idpf_config_rss(struct idpf_vport *vport) */ void idpf_fill_dflt_rss_lut(struct idpf_vport *vport) { + u16 num_active_rxq = vport->dflt_qv_rsrc.num_rxq; struct idpf_adapter *adapter = vport->adapter; - u16 num_active_rxq = vport->num_rxq; struct idpf_rss_data *rss_data; int i; diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h index fefa5647f6e2..99daa081268a 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h @@ -283,6 +283,7 @@ struct idpf_ptype_state { * @__IDPF_Q_FLOW_SCH_EN: Enable flow scheduling * @__IDPF_Q_SW_MARKER: Used to indicate TX queue marker completions * @__IDPF_Q_CRC_EN: enable CRC offload in singleq mode + * @__IDPF_Q_RSC_EN: enable Receive Side Coalescing on Rx (splitq) * @__IDPF_Q_HSPLIT_EN: enable header split on Rx (splitq) * @__IDPF_Q_PTP: indicates whether the Rx timestamping is enabled for the * queue @@ -297,6 +298,7 @@ enum idpf_queue_flags_t { __IDPF_Q_FLOW_SCH_EN, __IDPF_Q_SW_MARKER, __IDPF_Q_CRC_EN, + __IDPF_Q_RSC_EN, __IDPF_Q_HSPLIT_EN, __IDPF_Q_PTP, __IDPF_Q_NOIRQ, @@ -925,6 +927,7 @@ struct idpf_bufq_set { * @singleq.rxqs: Array of RX queue pointers * @splitq: Struct with split queue related members * @splitq.num_rxq_sets: Number of RX queue sets + * @splitq.num_rxq_sets: Number of Buffer queue sets * @splitq.rxq_sets: Array of RX queue sets * @splitq.bufq_sets: Buffer queue set pointer * @@ -942,6 +945,7 @@ struct idpf_rxq_group { } singleq; struct { u16 num_rxq_sets; + u16 num_bufq_sets; struct idpf_rxq_set *rxq_sets[IDPF_LARGE_MAX_Q]; struct idpf_bufq_set *bufq_sets; } splitq; @@ -1072,14 +1076,18 @@ static inline u32 idpf_tx_splitq_get_free_bufs(struct idpf_sw_queue *refillq) int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget); void idpf_vport_init_num_qs(struct idpf_vport *vport, - struct virtchnl2_create_vport *vport_msg); -void idpf_vport_calc_num_q_desc(struct idpf_vport *vport); + struct virtchnl2_create_vport *vport_msg, + struct idpf_q_vec_rsrc *rsrc); +void idpf_vport_calc_num_q_desc(struct idpf_vport *vport, + struct idpf_q_vec_rsrc *rsrc); int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_index, struct virtchnl2_create_vport *vport_msg, struct idpf_vport_max_q *max_q); -void idpf_vport_calc_num_q_groups(struct idpf_vport *vport); -int idpf_vport_queues_alloc(struct idpf_vport *vport); -void idpf_vport_queues_rel(struct idpf_vport *vport); +void idpf_vport_calc_num_q_groups(struct idpf_q_vec_rsrc *rsrc); +int idpf_vport_queues_alloc(struct idpf_vport *vport, + struct idpf_q_vec_rsrc *rsrc); +void idpf_vport_queues_rel(struct idpf_vport *vport, + struct idpf_q_vec_rsrc *rsrc); void idpf_vport_intr_rel(struct idpf_q_vec_rsrc *rsrc); int idpf_vport_intr_alloc(struct idpf_vport *vport, struct idpf_q_vec_rsrc *rsrc); @@ -1094,7 +1102,8 @@ void idpf_fill_dflt_rss_lut(struct idpf_vport *vport); int idpf_config_rss(struct idpf_vport *vport); int idpf_init_rss_lut(struct idpf_vport *vport); void idpf_deinit_rss_lut(struct idpf_vport *vport); -int idpf_rx_bufs_init_all(struct idpf_vport *vport); +int idpf_rx_bufs_init_all(struct idpf_vport *vport, + struct idpf_q_vec_rsrc *rsrc); struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport, u32 q_num); diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c index 07c163d66537..211f00e7b30d 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c +++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c @@ -1392,13 +1392,15 @@ static int idpf_vport_get_q_reg(u32 *reg_vals, int num_regs, u32 q_type, /** * __idpf_queue_reg_init - initialize queue registers * @vport: virtual port structure + * @rsrc: pointer to queue and vector resources * @reg_vals: registers we are initializing * @num_regs: how many registers there are in total * @q_type: queue model * * Return number of queues that are initialized */ -static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals, +static int __idpf_queue_reg_init(struct idpf_vport *vport, + struct idpf_q_vec_rsrc *rsrc, u32 *reg_vals, int num_regs, u32 q_type) { struct idpf_adapter *adapter = vport->adapter; @@ -1406,8 +1408,8 @@ static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals, switch (q_type) { case VIRTCHNL2_QUEUE_TYPE_TX: - for (i = 0; i < vport->num_txq_grp; i++) { - struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; + for (i = 0; i < rsrc->num_txq_grp; i++) { + struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i]; for (j = 0; j < tx_qgrp->num_txq && k < num_regs; j++, k++) tx_qgrp->txqs[j]->tail = @@ -1415,8 +1417,8 @@ static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals, } break; case VIRTCHNL2_QUEUE_TYPE_RX: - for (i = 0; i < vport->num_rxq_grp; i++) { - struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; + for (i = 0; i < rsrc->num_rxq_grp; i++) { + struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i]; u16 num_rxq = rx_qgrp->singleq.num_rxq; for (j = 0; j < num_rxq && k < num_regs; j++, k++) { @@ -1429,9 +1431,9 @@ static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals, } break; case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER: - for (i = 0; i < vport->num_rxq_grp; i++) { - struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; - u8 num_bufqs = vport->num_bufqs_per_qgrp; + for (i = 0; i < rsrc->num_rxq_grp; i++) { + struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i]; + u8 num_bufqs = rsrc->num_bufqs_per_qgrp; for (j = 0; j < num_bufqs && k < num_regs; j++, k++) { struct idpf_buf_queue *q; @@ -1452,11 +1454,13 @@ static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals, /** * idpf_queue_reg_init - initialize queue registers * @vport: virtual port structure + * @rsrc: pointer to queue and vector resources * @chunks: queue registers received over mailbox * * Return: 0 on success, negative on failure */ int idpf_queue_reg_init(struct idpf_vport *vport, + struct idpf_q_vec_rsrc *rsrc, struct idpf_queue_id_reg_info *chunks) { int num_regs, ret = 0; @@ -1471,14 +1475,14 @@ int idpf_queue_reg_init(struct idpf_vport *vport, num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q, VIRTCHNL2_QUEUE_TYPE_TX, chunks); - if (num_regs < vport->num_txq) { + if (num_regs < rsrc->num_txq) { ret = -EINVAL; goto free_reg_vals; } - num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs, + num_regs = __idpf_queue_reg_init(vport, rsrc, reg_vals, num_regs, VIRTCHNL2_QUEUE_TYPE_TX); - if (num_regs < vport->num_txq) { + if (num_regs < rsrc->num_txq) { ret = -EINVAL; goto free_reg_vals; } @@ -1486,18 +1490,18 @@ int idpf_queue_reg_init(struct idpf_vport *vport, /* Initialize Rx/buffer queue tail register address based on Rx queue * model */ - if (idpf_is_queue_model_split(vport->rxq_model)) { + if (idpf_is_queue_model_split(rsrc->rxq_model)) { num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q, VIRTCHNL2_QUEUE_TYPE_RX_BUFFER, chunks); - if (num_regs < vport->num_bufq) { + if (num_regs < rsrc->num_bufq) { ret = -EINVAL; goto free_reg_vals; } - num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs, + num_regs = __idpf_queue_reg_init(vport, rsrc, reg_vals, num_regs, VIRTCHNL2_QUEUE_TYPE_RX_BUFFER); - if (num_regs < vport->num_bufq) { + if (num_regs < rsrc->num_bufq) { ret = -EINVAL; goto free_reg_vals; } @@ -1505,14 +1509,14 @@ int idpf_queue_reg_init(struct idpf_vport *vport, num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q, VIRTCHNL2_QUEUE_TYPE_RX, chunks); - if (num_regs < vport->num_rxq) { + if (num_regs < rsrc->num_rxq) { ret = -EINVAL; goto free_reg_vals; } - num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs, + num_regs = __idpf_queue_reg_init(vport, rsrc, reg_vals, num_regs, VIRTCHNL2_QUEUE_TYPE_RX); - if (num_regs < vport->num_rxq) { + if (num_regs < rsrc->num_rxq) { ret = -EINVAL; goto free_reg_vals; } @@ -1611,6 +1615,7 @@ int idpf_send_create_vport_msg(struct idpf_adapter *adapter, */ int idpf_check_supported_desc_ids(struct idpf_vport *vport) { + struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc; struct idpf_adapter *adapter = vport->adapter; struct virtchnl2_create_vport *vport_msg; u64 rx_desc_ids, tx_desc_ids; @@ -1627,17 +1632,17 @@ int idpf_check_supported_desc_ids(struct idpf_vport *vport) rx_desc_ids = le64_to_cpu(vport_msg->rx_desc_ids); tx_desc_ids = le64_to_cpu(vport_msg->tx_desc_ids); - if (idpf_is_queue_model_split(vport->rxq_model)) { + if (idpf_is_queue_model_split(rsrc->rxq_model)) { if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M)) { dev_info(&adapter->pdev->dev, "Minimum RX descriptor support not provided, using the default\n"); vport_msg->rx_desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M); } } else { if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M)) - vport->base_rxd = true; + rsrc->base_rxd = true; } - if (!idpf_is_queue_model_split(vport->txq_model)) + if (!idpf_is_queue_model_split(rsrc->txq_model)) return 0; if ((tx_desc_ids & MIN_SUPPORT_TXDID) != MIN_SUPPORT_TXDID) { @@ -1722,24 +1727,24 @@ int idpf_send_disable_vport_msg(struct idpf_vport *vport) /** * idpf_fill_txq_config_chunk - fill chunk describing the Tx queue - * @vport: virtual port data structure + * @rsrc: pointer to queue and vector resources * @q: Tx queue to be inserted into VC chunk * @qi: pointer to the buffer containing the VC chunk */ -static void idpf_fill_txq_config_chunk(const struct idpf_vport *vport, +static void idpf_fill_txq_config_chunk(const struct idpf_q_vec_rsrc *rsrc, const struct idpf_tx_queue *q, struct virtchnl2_txq_info *qi) { u32 val; qi->queue_id = cpu_to_le32(q->q_id); - qi->model = cpu_to_le16(vport->txq_model); + qi->model = cpu_to_le16(rsrc->txq_model); qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX); qi->ring_len = cpu_to_le16(q->desc_count); qi->dma_ring_addr = cpu_to_le64(q->dma); qi->relative_queue_id = cpu_to_le16(q->rel_q_id); - if (!idpf_is_queue_model_split(vport->txq_model)) { + if (!idpf_is_queue_model_split(rsrc->txq_model)) { qi->sched_mode = cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_QUEUE); return; } @@ -1761,18 +1766,18 @@ static void idpf_fill_txq_config_chunk(const struct idpf_vport *vport, /** * idpf_fill_complq_config_chunk - fill chunk describing the completion queue - * @vport: virtual port data structure + * @rsrc: pointer to queue and vector resources * @q: completion queue to be inserted into VC chunk * @qi: pointer to the buffer containing the VC chunk */ -static void idpf_fill_complq_config_chunk(const struct idpf_vport *vport, +static void idpf_fill_complq_config_chunk(const struct idpf_q_vec_rsrc *rsrc, const struct idpf_compl_queue *q, struct virtchnl2_txq_info *qi) { u32 val; qi->queue_id = cpu_to_le32(q->q_id); - qi->model = cpu_to_le16(vport->txq_model); + qi->model = cpu_to_le16(rsrc->txq_model); qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION); qi->ring_len = cpu_to_le16(q->desc_count); qi->dma_ring_addr = cpu_to_le64(q->dma); @@ -1838,10 +1843,10 @@ static int idpf_send_config_tx_queue_set_msg(const struct idpf_queue_set *qs) for (u32 i = 0; i < qs->num; i++) { if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_TX) - idpf_fill_txq_config_chunk(qs->vport, qs->qs[i].txq, + idpf_fill_txq_config_chunk(qs->qv_rsrc, qs->qs[i].txq, &qi[params.num_chunks++]); else if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION) - idpf_fill_complq_config_chunk(qs->vport, + idpf_fill_complq_config_chunk(qs->qv_rsrc, qs->qs[i].complq, &qi[params.num_chunks++]); } @@ -1852,29 +1857,31 @@ static int idpf_send_config_tx_queue_set_msg(const struct idpf_queue_set *qs) /** * idpf_send_config_tx_queues_msg - send virtchnl config Tx queues message * @vport: virtual port data structure + * @rsrc: pointer to queue and vector resources * * Return: 0 on success, -errno on failure. */ -static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport) +static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport, + struct idpf_q_vec_rsrc *rsrc) { struct idpf_queue_set *qs __free(kfree) = NULL; - u32 totqs = vport->num_txq + vport->num_complq; + u32 totqs = rsrc->num_txq + rsrc->num_complq; u32 k = 0; - qs = idpf_alloc_queue_set(vport, &vport->dflt_qv_rsrc, totqs); + qs = idpf_alloc_queue_set(vport, rsrc, totqs); if (!qs) return -ENOMEM; /* Populate the queue info buffer with all queue context info */ - for (u32 i = 0; i < vport->num_txq_grp; i++) { - const struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; + for (u32 i = 0; i < rsrc->num_txq_grp; i++) { + const struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i]; for (u32 j = 0; j < tx_qgrp->num_txq; j++) { qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX; qs->qs[k++].txq = tx_qgrp->txqs[j]; } - if (idpf_is_queue_model_split(vport->txq_model)) { + if (idpf_is_queue_model_split(rsrc->txq_model)) { qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION; qs->qs[k++].complq = tx_qgrp->complq; } @@ -1889,28 +1896,28 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport) /** * idpf_fill_rxq_config_chunk - fill chunk describing the Rx queue - * @vport: virtual port data structure + * @rsrc: pointer to queue and vector resources * @q: Rx queue to be inserted into VC chunk * @qi: pointer to the buffer containing the VC chunk */ -static void idpf_fill_rxq_config_chunk(const struct idpf_vport *vport, +static void idpf_fill_rxq_config_chunk(const struct idpf_q_vec_rsrc *rsrc, struct idpf_rx_queue *q, struct virtchnl2_rxq_info *qi) { const struct idpf_bufq_set *sets; qi->queue_id = cpu_to_le32(q->q_id); - qi->model = cpu_to_le16(vport->rxq_model); + qi->model = cpu_to_le16(rsrc->rxq_model); qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX); qi->ring_len = cpu_to_le16(q->desc_count); qi->dma_ring_addr = cpu_to_le64(q->dma); qi->max_pkt_size = cpu_to_le32(q->rx_max_pkt_size); qi->rx_buffer_low_watermark = cpu_to_le16(q->rx_buffer_low_watermark); qi->qflags = cpu_to_le16(VIRTCHNL2_RX_DESC_SIZE_32BYTE); - if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW)) + if (idpf_queue_has(RSC_EN, q)) qi->qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC); - if (!idpf_is_queue_model_split(vport->rxq_model)) { + if (!idpf_is_queue_model_split(rsrc->rxq_model)) { qi->data_buffer_size = cpu_to_le32(q->rx_buf_size); qi->desc_ids = cpu_to_le64(q->rxdids); @@ -1927,7 +1934,7 @@ static void idpf_fill_rxq_config_chunk(const struct idpf_vport *vport, qi->data_buffer_size = cpu_to_le32(q->rx_buf_size); qi->rx_bufq1_id = cpu_to_le16(sets[0].bufq.q_id); - if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) { + if (rsrc->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) { qi->bufq2_ena = IDPF_BUFQ2_ENA; qi->rx_bufq2_id = cpu_to_le16(sets[1].bufq.q_id); } @@ -1944,16 +1951,16 @@ static void idpf_fill_rxq_config_chunk(const struct idpf_vport *vport, /** * idpf_fill_bufq_config_chunk - fill chunk describing the buffer queue - * @vport: virtual port data structure + * @rsrc: pointer to queue and vector resources * @q: buffer queue to be inserted into VC chunk * @qi: pointer to the buffer containing the VC chunk */ -static void idpf_fill_bufq_config_chunk(const struct idpf_vport *vport, +static void idpf_fill_bufq_config_chunk(const struct idpf_q_vec_rsrc *rsrc, const struct idpf_buf_queue *q, struct virtchnl2_rxq_info *qi) { qi->queue_id = cpu_to_le32(q->q_id); - qi->model = cpu_to_le16(vport->rxq_model); + qi->model = cpu_to_le16(rsrc->rxq_model); qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX_BUFFER); qi->ring_len = cpu_to_le16(q->desc_count); qi->dma_ring_addr = cpu_to_le64(q->dma); @@ -1961,7 +1968,7 @@ static void idpf_fill_bufq_config_chunk(const struct idpf_vport *vport, qi->rx_buffer_low_watermark = cpu_to_le16(q->rx_buffer_low_watermark); qi->desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M); qi->buffer_notif_stride = IDPF_RX_BUF_STRIDE; - if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW)) + if (idpf_queue_has(RSC_EN, q)) qi->qflags = cpu_to_le16(VIRTCHNL2_RXQ_RSC); if (idpf_queue_has(HSPLIT_EN, q)) { @@ -2023,10 +2030,10 @@ static int idpf_send_config_rx_queue_set_msg(const struct idpf_queue_set *qs) for (u32 i = 0; i < qs->num; i++) { if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_RX) - idpf_fill_rxq_config_chunk(qs->vport, qs->qs[i].rxq, + idpf_fill_rxq_config_chunk(qs->qv_rsrc, qs->qs[i].rxq, &qi[params.num_chunks++]); else if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_RX_BUFFER) - idpf_fill_bufq_config_chunk(qs->vport, qs->qs[i].bufq, + idpf_fill_bufq_config_chunk(qs->qv_rsrc, qs->qs[i].bufq, &qi[params.num_chunks++]); } @@ -2036,23 +2043,25 @@ static int idpf_send_config_rx_queue_set_msg(const struct idpf_queue_set *qs) /** * idpf_send_config_rx_queues_msg - send virtchnl config Rx queues message * @vport: virtual port data structure + * @rsrc: pointer to queue and vector resources * * Return: 0 on success, -errno on failure. */ -static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport) +static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport, + struct idpf_q_vec_rsrc *rsrc) { - bool splitq = idpf_is_queue_model_split(vport->rxq_model); + bool splitq = idpf_is_queue_model_split(rsrc->rxq_model); struct idpf_queue_set *qs __free(kfree) = NULL; - u32 totqs = vport->num_rxq + vport->num_bufq; + u32 totqs = rsrc->num_rxq + rsrc->num_bufq; u32 k = 0; - qs = idpf_alloc_queue_set(vport, &vport->dflt_qv_rsrc, totqs); + qs = idpf_alloc_queue_set(vport, rsrc, totqs); if (!qs) return -ENOMEM; /* Populate the queue info buffer with all queue context info */ - for (u32 i = 0; i < vport->num_rxq_grp; i++) { - const struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; + for (u32 i = 0; i < rsrc->num_rxq_grp; i++) { + const struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i]; u32 num_rxq; if (!splitq) { @@ -2060,7 +2069,7 @@ static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport) goto rxq; } - for (u32 j = 0; j < vport->num_bufqs_per_qgrp; j++) { + for (u32 j = 0; j < rsrc->num_bufqs_per_qgrp; j++) { qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER; qs->qs[k++].bufq = &rx_qgrp->splitq.bufq_sets[j].bufq; } @@ -2184,21 +2193,22 @@ static int idpf_send_ena_dis_queue_set_msg(const struct idpf_queue_set *qs, */ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool en) { + struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc; struct idpf_queue_set *qs __free(kfree) = NULL; u32 num_txq, num_q, k = 0; bool split; - num_txq = vport->num_txq + vport->num_complq; - num_q = num_txq + vport->num_rxq + vport->num_bufq; + num_txq = rsrc->num_txq + rsrc->num_complq; + num_q = num_txq + rsrc->num_rxq + rsrc->num_bufq; - qs = idpf_alloc_queue_set(vport, &vport->dflt_qv_rsrc, num_q); + qs = idpf_alloc_queue_set(vport, rsrc, num_q); if (!qs) return -ENOMEM; - split = idpf_is_queue_model_split(vport->txq_model); + split = idpf_is_queue_model_split(rsrc->txq_model); - for (u32 i = 0; i < vport->num_txq_grp; i++) { - const struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; + for (u32 i = 0; i < rsrc->num_txq_grp; i++) { + const struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i]; for (u32 j = 0; j < tx_qgrp->num_txq; j++) { qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX; @@ -2215,10 +2225,10 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool en) if (k != num_txq) return -EINVAL; - split = idpf_is_queue_model_split(vport->rxq_model); + split = idpf_is_queue_model_split(rsrc->rxq_model); - for (u32 i = 0; i < vport->num_rxq_grp; i++) { - const struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; + for (u32 i = 0; i < rsrc->num_rxq_grp; i++) { + const struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i]; u32 num_rxq; if (split) @@ -2239,7 +2249,7 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool en) if (!split) continue; - for (u32 j = 0; j < vport->num_bufqs_per_qgrp; j++) { + for (u32 j = 0; j < rsrc->num_bufqs_per_qgrp; j++) { qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER; qs->qs[k++].bufq = &rx_qgrp->splitq.bufq_sets[j].bufq; } @@ -2307,7 +2317,7 @@ idpf_send_map_unmap_queue_set_vector_msg(const struct idpf_queue_set *qs, params.chunks = vqv; - split = idpf_is_queue_model_split(qs->vport->txq_model); + split = idpf_is_queue_model_split(qs->qv_rsrc->txq_model); for (u32 i = 0; i < qs->num; i++) { const struct idpf_queue_ptr *q = &qs->qs[i]; @@ -2369,22 +2379,25 @@ idpf_send_map_unmap_queue_set_vector_msg(const struct idpf_queue_set *qs, * idpf_send_map_unmap_queue_vector_msg - send virtchnl map or unmap queue * vector message * @vport: virtual port data structure + * @rsrc: pointer to queue and vector resources * @map: true for map and false for unmap * * Return: 0 on success, -errno on failure. */ -int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map) +int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, + struct idpf_q_vec_rsrc *rsrc, + bool map) { struct idpf_queue_set *qs __free(kfree) = NULL; - u32 num_q = vport->num_txq + vport->num_rxq; + u32 num_q = rsrc->num_txq + rsrc->num_rxq; u32 k = 0; - qs = idpf_alloc_queue_set(vport, &vport->dflt_qv_rsrc, num_q); + qs = idpf_alloc_queue_set(vport, rsrc, num_q); if (!qs) return -ENOMEM; - for (u32 i = 0; i < vport->num_txq_grp; i++) { - const struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; + for (u32 i = 0; i < rsrc->num_txq_grp; i++) { + const struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i]; for (u32 j = 0; j < tx_qgrp->num_txq; j++) { qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX; @@ -2392,14 +2405,14 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map) } } - if (k != vport->num_txq) + if (k != rsrc->num_txq) return -EINVAL; - for (u32 i = 0; i < vport->num_rxq_grp; i++) { - const struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; + for (u32 i = 0; i < rsrc->num_rxq_grp; i++) { + const struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i]; u32 num_rxq; - if (idpf_is_queue_model_split(vport->rxq_model)) + if (idpf_is_queue_model_split(rsrc->rxq_model)) num_rxq = rx_qgrp->splitq.num_rxq_sets; else num_rxq = rx_qgrp->singleq.num_rxq; @@ -2407,7 +2420,7 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map) for (u32 j = 0; j < num_rxq; j++) { qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX; - if (idpf_is_queue_model_split(vport->rxq_model)) + if (idpf_is_queue_model_split(rsrc->rxq_model)) qs->qs[k++].rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq; else @@ -2565,19 +2578,21 @@ int idpf_send_delete_queues_msg(struct idpf_vport *vport, /** * idpf_send_config_queues_msg - Send config queues virtchnl message * @vport: Virtual port private data structure + * @rsrc: pointer to queue and vector resources * * Will send config queues virtchnl message. Returns 0 on success, negative on * failure. */ -int idpf_send_config_queues_msg(struct idpf_vport *vport) +int idpf_send_config_queues_msg(struct idpf_vport *vport, + struct idpf_q_vec_rsrc *rsrc) { int err; - err = idpf_send_config_tx_queues_msg(vport); + err = idpf_send_config_tx_queues_msg(vport, rsrc); if (err) return err; - return idpf_send_config_rx_queues_msg(vport); + return idpf_send_config_rx_queues_msg(vport, rsrc); } /** @@ -3038,12 +3053,14 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport) struct idpf_vc_xn_params xn_params = {}; u16 next_ptype_id = 0; ssize_t reply_sz; + bool is_splitq; int i, j, k; if (vport->rx_ptype_lkup) return 0; - if (idpf_is_queue_model_split(vport->rxq_model)) + is_splitq = idpf_is_queue_model_split(vport->dflt_qv_rsrc.rxq_model); + if (is_splitq) max_ptype = IDPF_RX_MAX_PTYPE; else max_ptype = IDPF_RX_MAX_BASE_PTYPE; @@ -3107,7 +3124,7 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport) IDPF_INVALID_PTYPE_ID) goto out; - if (idpf_is_queue_model_split(vport->rxq_model)) + if (is_splitq) k = le16_to_cpu(ptype->ptype_id_10); else k = ptype->ptype_id_8; @@ -3645,7 +3662,7 @@ int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport, vec_info.num_curr_vecs += IDPF_RESERVED_VECS; /* XDPSQs are all bound to the NOIRQ vector from IDPF_RESERVED_VECS */ - req = max(vport->num_txq - vport->num_xdp_txq, vport->num_rxq) + + req = max(rsrc->num_txq - vport->num_xdp_txq, rsrc->num_rxq) + IDPF_RESERVED_VECS; vec_info.num_req_vecs = req; @@ -3701,8 +3718,8 @@ int idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q) vport_config->max_q.max_complq = max_q->max_complq; vport_config->max_q.max_bufq = max_q->max_bufq; - vport->txq_model = le16_to_cpu(vport_msg->txq_model); - vport->rxq_model = le16_to_cpu(vport_msg->rxq_model); + rsrc->txq_model = le16_to_cpu(vport_msg->txq_model); + rsrc->rxq_model = le16_to_cpu(vport_msg->rxq_model); vport->vport_type = le16_to_cpu(vport_msg->vport_type); vport->vport_id = le32_to_cpu(vport_msg->vport_id); @@ -3719,9 +3736,9 @@ int idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q) idpf_vport_set_hsplit(vport, ETHTOOL_TCP_DATA_SPLIT_ENABLED); - idpf_vport_init_num_qs(vport, vport_msg); - idpf_vport_calc_num_q_desc(vport); - idpf_vport_calc_num_q_groups(vport); + idpf_vport_init_num_qs(vport, vport_msg, rsrc); + idpf_vport_calc_num_q_desc(vport, rsrc); + idpf_vport_calc_num_q_groups(rsrc); idpf_vport_alloc_vec_indexes(vport, rsrc); vport->crc_enable = adapter->crc_enable; @@ -3831,6 +3848,7 @@ static int idpf_vport_get_queue_ids(u32 *qids, int num_qids, u16 q_type, /** * __idpf_vport_queue_ids_init - Initialize queue ids from Mailbox parameters * @vport: virtual port for which the queues ids are initialized + * @rsrc: pointer to queue and vector resources * @qids: queue ids * @num_qids: number of queue ids * @q_type: type of queue @@ -3839,6 +3857,7 @@ static int idpf_vport_get_queue_ids(u32 *qids, int num_qids, u16 q_type, * parameters. Returns number of queue ids initialized. */ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport, + struct idpf_q_vec_rsrc *rsrc, const u32 *qids, int num_qids, u32 q_type) @@ -3847,19 +3866,19 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport, switch (q_type) { case VIRTCHNL2_QUEUE_TYPE_TX: - for (i = 0; i < vport->num_txq_grp; i++) { - struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; + for (i = 0; i < rsrc->num_txq_grp; i++) { + struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i]; for (j = 0; j < tx_qgrp->num_txq && k < num_qids; j++, k++) tx_qgrp->txqs[j]->q_id = qids[k]; } break; case VIRTCHNL2_QUEUE_TYPE_RX: - for (i = 0; i < vport->num_rxq_grp; i++) { - struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; + for (i = 0; i < rsrc->num_rxq_grp; i++) { + struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i]; u16 num_rxq; - if (idpf_is_queue_model_split(vport->rxq_model)) + if (idpf_is_queue_model_split(rsrc->rxq_model)) num_rxq = rx_qgrp->splitq.num_rxq_sets; else num_rxq = rx_qgrp->singleq.num_rxq; @@ -3867,7 +3886,7 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport, for (j = 0; j < num_rxq && k < num_qids; j++, k++) { struct idpf_rx_queue *q; - if (idpf_is_queue_model_split(vport->rxq_model)) + if (idpf_is_queue_model_split(rsrc->rxq_model)) q = &rx_qgrp->splitq.rxq_sets[j]->rxq; else q = rx_qgrp->singleq.rxqs[j]; @@ -3876,16 +3895,16 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport, } break; case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION: - for (i = 0; i < vport->num_txq_grp && k < num_qids; i++, k++) { - struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; + for (i = 0; i < rsrc->num_txq_grp && k < num_qids; i++, k++) { + struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i]; tx_qgrp->complq->q_id = qids[k]; } break; case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER: - for (i = 0; i < vport->num_rxq_grp; i++) { - struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; - u8 num_bufqs = vport->num_bufqs_per_qgrp; + for (i = 0; i < rsrc->num_rxq_grp; i++) { + struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i]; + u8 num_bufqs = rsrc->num_bufqs_per_qgrp; for (j = 0; j < num_bufqs && k < num_qids; j++, k++) { struct idpf_buf_queue *q; @@ -3905,6 +3924,7 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport, /** * idpf_vport_queue_ids_init - Initialize queue ids from Mailbox parameters * @vport: virtual port for which the queues ids are initialized + * @rsrc: pointer to queue and vector resources * @chunks: queue ids received over mailbox * * Will initialize all queue ids with ids received as mailbox parameters. @@ -3912,6 +3932,7 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport, * Return: 0 on success, negative if all the queues are not initialized. */ int idpf_vport_queue_ids_init(struct idpf_vport *vport, + struct idpf_q_vec_rsrc *rsrc, struct idpf_queue_id_reg_info *chunks) { int num_ids, err = 0; @@ -3925,13 +3946,13 @@ int idpf_vport_queue_ids_init(struct idpf_vport *vport, num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, VIRTCHNL2_QUEUE_TYPE_TX, chunks); - if (num_ids < vport->num_txq) { + if (num_ids < rsrc->num_txq) { err = -EINVAL; goto mem_rel; } - num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, + num_ids = __idpf_vport_queue_ids_init(vport, rsrc, qids, num_ids, VIRTCHNL2_QUEUE_TYPE_TX); - if (num_ids < vport->num_txq) { + if (num_ids < rsrc->num_txq) { err = -EINVAL; goto mem_rel; } @@ -3939,44 +3960,46 @@ int idpf_vport_queue_ids_init(struct idpf_vport *vport, num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, VIRTCHNL2_QUEUE_TYPE_RX, chunks); - if (num_ids < vport->num_rxq) { + if (num_ids < rsrc->num_rxq) { err = -EINVAL; goto mem_rel; } - num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, + num_ids = __idpf_vport_queue_ids_init(vport, rsrc, qids, num_ids, VIRTCHNL2_QUEUE_TYPE_RX); - if (num_ids < vport->num_rxq) { + if (num_ids < rsrc->num_rxq) { err = -EINVAL; goto mem_rel; } - if (!idpf_is_queue_model_split(vport->txq_model)) + if (!idpf_is_queue_model_split(rsrc->txq_model)) goto check_rxq; q_type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION; num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, q_type, chunks); - if (num_ids < vport->num_complq) { + if (num_ids < rsrc->num_complq) { err = -EINVAL; goto mem_rel; } - num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, q_type); - if (num_ids < vport->num_complq) { + num_ids = __idpf_vport_queue_ids_init(vport, rsrc, qids, + num_ids, q_type); + if (num_ids < rsrc->num_complq) { err = -EINVAL; goto mem_rel; } check_rxq: - if (!idpf_is_queue_model_split(vport->rxq_model)) + if (!idpf_is_queue_model_split(rsrc->rxq_model)) goto mem_rel; q_type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER; num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, q_type, chunks); - if (num_ids < vport->num_bufq) { + if (num_ids < rsrc->num_bufq) { err = -EINVAL; goto mem_rel; } - num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, q_type); - if (num_ids < vport->num_bufq) + num_ids = __idpf_vport_queue_ids_init(vport, rsrc, qids, + num_ids, q_type); + if (num_ids < rsrc->num_bufq) err = -EINVAL; mem_rel: @@ -3988,23 +4011,24 @@ int idpf_vport_queue_ids_init(struct idpf_vport *vport, /** * idpf_vport_adjust_qs - Adjust to new requested queues * @vport: virtual port data struct + * @rsrc: pointer to queue and vector resources * * Renegotiate queues. Returns 0 on success, negative on failure. */ -int idpf_vport_adjust_qs(struct idpf_vport *vport) +int idpf_vport_adjust_qs(struct idpf_vport *vport, struct idpf_q_vec_rsrc *rsrc) { struct virtchnl2_create_vport vport_msg; int err; - vport_msg.txq_model = cpu_to_le16(vport->txq_model); - vport_msg.rxq_model = cpu_to_le16(vport->rxq_model); + vport_msg.txq_model = cpu_to_le16(rsrc->txq_model); + vport_msg.rxq_model = cpu_to_le16(rsrc->rxq_model); err = idpf_vport_calc_total_qs(vport->adapter, vport->idx, &vport_msg, NULL); if (err) return err; - idpf_vport_init_num_qs(vport, &vport_msg); - idpf_vport_calc_num_q_groups(vport); + idpf_vport_init_num_qs(vport, &vport_msg, rsrc); + idpf_vport_calc_num_q_groups(rsrc); return 0; } diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h index 3124680a42ae..6fde600dfe53 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h +++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h @@ -105,8 +105,10 @@ void idpf_vc_core_deinit(struct idpf_adapter *adapter); int idpf_get_reg_intr_vecs(struct idpf_vport *vport, struct idpf_vec_regs *reg_vals); int idpf_queue_reg_init(struct idpf_vport *vport, + struct idpf_q_vec_rsrc *rsrc, struct idpf_queue_id_reg_info *chunks); int idpf_vport_queue_ids_init(struct idpf_vport *vport, + struct idpf_q_vec_rsrc *rsrc, struct idpf_queue_id_reg_info *chunks); static inline void idpf_vport_deinit_queue_reg_chunks(struct idpf_vport_config *vport_cfg) @@ -152,8 +154,9 @@ int idpf_send_disable_queue_set_msg(const struct idpf_queue_set *qs); int idpf_send_config_queue_set_msg(const struct idpf_queue_set *qs); int idpf_send_disable_queues_msg(struct idpf_vport *vport); -int idpf_send_config_queues_msg(struct idpf_vport *vport); int idpf_send_enable_queues_msg(struct idpf_vport *vport); +int idpf_send_config_queues_msg(struct idpf_vport *vport, + struct idpf_q_vec_rsrc *rsrc); int idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q); u32 idpf_get_vport_id(struct idpf_vport *vport); @@ -163,7 +166,8 @@ int idpf_send_destroy_vport_msg(struct idpf_vport *vport); int idpf_send_enable_vport_msg(struct idpf_vport *vport); int idpf_send_disable_vport_msg(struct idpf_vport *vport); -int idpf_vport_adjust_qs(struct idpf_vport *vport); +int idpf_vport_adjust_qs(struct idpf_vport *vport, + struct idpf_q_vec_rsrc *rsrc); int idpf_vport_alloc_max_qs(struct idpf_adapter *adapter, struct idpf_vport_max_q *max_q); void idpf_vport_dealloc_max_qs(struct idpf_adapter *adapter, @@ -180,7 +184,9 @@ int idpf_get_vec_ids(struct idpf_adapter *adapter, struct virtchnl2_vector_chunks *chunks); int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors); int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter); -int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map); +int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, + struct idpf_q_vec_rsrc *rsrc, + bool map); int idpf_add_del_mac_filters(struct idpf_vport *vport, struct idpf_netdev_priv *np, diff --git a/drivers/net/ethernet/intel/idpf/xdp.c b/drivers/net/ethernet/intel/idpf/xdp.c index 958d16f87424..2b411bf5184f 100644 --- a/drivers/net/ethernet/intel/idpf/xdp.c +++ b/drivers/net/ethernet/intel/idpf/xdp.c @@ -6,17 +6,17 @@ #include "xdp.h" #include "xsk.h" -static int idpf_rxq_for_each(const struct idpf_vport *vport, +static int idpf_rxq_for_each(const struct idpf_q_vec_rsrc *rsrc, int (*fn)(struct idpf_rx_queue *rxq, void *arg), void *arg) { - bool splitq = idpf_is_queue_model_split(vport->rxq_model); + bool splitq = idpf_is_queue_model_split(rsrc->rxq_model); - if (!vport->rxq_grps) + if (!rsrc->rxq_grps) return -ENETDOWN; - for (u32 i = 0; i < vport->num_rxq_grp; i++) { - const struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; + for (u32 i = 0; i < rsrc->num_rxq_grp; i++) { + const struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i]; u32 num_rxq; if (splitq) @@ -45,7 +45,8 @@ static int idpf_rxq_for_each(const struct idpf_vport *vport, static int __idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq, void *arg) { const struct idpf_vport *vport = rxq->q_vector->vport; - bool split = idpf_is_queue_model_split(vport->rxq_model); + const struct idpf_q_vec_rsrc *rsrc; + bool split; int err; err = __xdp_rxq_info_reg(&rxq->xdp_rxq, vport->netdev, rxq->idx, @@ -54,6 +55,9 @@ static int __idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq, void *arg) if (err) return err; + rsrc = &vport->dflt_qv_rsrc; + split = idpf_is_queue_model_split(rsrc->rxq_model); + if (idpf_queue_has(XSK, rxq)) { err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_XSK_BUFF_POOL, @@ -86,9 +90,9 @@ int idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq) return __idpf_xdp_rxq_info_init(rxq, NULL); } -int idpf_xdp_rxq_info_init_all(const struct idpf_vport *vport) +int idpf_xdp_rxq_info_init_all(const struct idpf_q_vec_rsrc *rsrc) { - return idpf_rxq_for_each(vport, __idpf_xdp_rxq_info_init, NULL); + return idpf_rxq_for_each(rsrc, __idpf_xdp_rxq_info_init, NULL); } static int __idpf_xdp_rxq_info_deinit(struct idpf_rx_queue *rxq, void *arg) @@ -111,10 +115,10 @@ void idpf_xdp_rxq_info_deinit(struct idpf_rx_queue *rxq, u32 model) __idpf_xdp_rxq_info_deinit(rxq, (void *)(size_t)model); } -void idpf_xdp_rxq_info_deinit_all(const struct idpf_vport *vport) +void idpf_xdp_rxq_info_deinit_all(const struct idpf_q_vec_rsrc *rsrc) { - idpf_rxq_for_each(vport, __idpf_xdp_rxq_info_deinit, - (void *)(size_t)vport->rxq_model); + idpf_rxq_for_each(rsrc, __idpf_xdp_rxq_info_deinit, + (void *)(size_t)rsrc->rxq_model); } static int idpf_xdp_rxq_assign_prog(struct idpf_rx_queue *rxq, void *arg) @@ -132,10 +136,10 @@ static int idpf_xdp_rxq_assign_prog(struct idpf_rx_queue *rxq, void *arg) return 0; } -void idpf_xdp_copy_prog_to_rqs(const struct idpf_vport *vport, +void idpf_xdp_copy_prog_to_rqs(const struct idpf_q_vec_rsrc *rsrc, struct bpf_prog *xdp_prog) { - idpf_rxq_for_each(vport, idpf_xdp_rxq_assign_prog, xdp_prog); + idpf_rxq_for_each(rsrc, idpf_xdp_rxq_assign_prog, xdp_prog); } static void idpf_xdp_tx_timer(struct work_struct *work); @@ -397,7 +401,7 @@ static const struct xdp_metadata_ops idpf_xdpmo = { void idpf_xdp_set_features(const struct idpf_vport *vport) { - if (!idpf_is_queue_model_split(vport->rxq_model)) + if (!idpf_is_queue_model_split(vport->dflt_qv_rsrc.rxq_model)) return; libeth_xdp_set_features_noredir(vport->netdev, &idpf_xdpmo, @@ -409,6 +413,7 @@ static int idpf_xdp_setup_prog(struct idpf_vport *vport, const struct netdev_bpf *xdp) { const struct idpf_netdev_priv *np = netdev_priv(vport->netdev); + const struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc; struct bpf_prog *old, *prog = xdp->prog; struct idpf_vport_config *cfg; int ret; @@ -419,7 +424,7 @@ static int idpf_xdp_setup_prog(struct idpf_vport *vport, !test_bit(IDPF_VPORT_REG_NETDEV, cfg->flags) || !!vport->xdp_prog == !!prog) { if (test_bit(IDPF_VPORT_UP, np->state)) - idpf_xdp_copy_prog_to_rqs(vport, prog); + idpf_xdp_copy_prog_to_rqs(rsrc, prog); old = xchg(&vport->xdp_prog, prog); if (old) @@ -464,7 +469,7 @@ int idpf_xdp(struct net_device *dev, struct netdev_bpf *xdp) idpf_vport_ctrl_lock(dev); vport = idpf_netdev_to_vport(dev); - if (!idpf_is_queue_model_split(vport->txq_model)) + if (!idpf_is_queue_model_split(vport->dflt_qv_rsrc.txq_model)) goto notsupp; switch (xdp->command) { diff --git a/drivers/net/ethernet/intel/idpf/xdp.h b/drivers/net/ethernet/intel/idpf/xdp.h index 479f5ef3c604..7ffc6955dfae 100644 --- a/drivers/net/ethernet/intel/idpf/xdp.h +++ b/drivers/net/ethernet/intel/idpf/xdp.h @@ -9,10 +9,10 @@ #include "idpf_txrx.h" int idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq); -int idpf_xdp_rxq_info_init_all(const struct idpf_vport *vport); +int idpf_xdp_rxq_info_init_all(const struct idpf_q_vec_rsrc *rsrc); void idpf_xdp_rxq_info_deinit(struct idpf_rx_queue *rxq, u32 model); -void idpf_xdp_rxq_info_deinit_all(const struct idpf_vport *vport); -void idpf_xdp_copy_prog_to_rqs(const struct idpf_vport *vport, +void idpf_xdp_rxq_info_deinit_all(const struct idpf_q_vec_rsrc *rsrc); +void idpf_xdp_copy_prog_to_rqs(const struct idpf_q_vec_rsrc *rsrc, struct bpf_prog *xdp_prog); int idpf_xdpsqs_get(const struct idpf_vport *vport); diff --git a/drivers/net/ethernet/intel/idpf/xsk.c b/drivers/net/ethernet/intel/idpf/xsk.c index fd2cc43ab43c..e4768ec07336 100644 --- a/drivers/net/ethernet/intel/idpf/xsk.c +++ b/drivers/net/ethernet/intel/idpf/xsk.c @@ -26,13 +26,14 @@ static void idpf_xsk_setup_rxq(const struct idpf_vport *vport, static void idpf_xsk_setup_bufq(const struct idpf_vport *vport, struct idpf_buf_queue *bufq) { + const struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc; struct xsk_buff_pool *pool; u32 qid = U32_MAX; - for (u32 i = 0; i < vport->num_rxq_grp; i++) { - const struct idpf_rxq_group *grp = &vport->rxq_grps[i]; + for (u32 i = 0; i < rsrc->num_rxq_grp; i++) { + const struct idpf_rxq_group *grp = &rsrc->rxq_grps[i]; - for (u32 j = 0; j < vport->num_bufqs_per_qgrp; j++) { + for (u32 j = 0; j < rsrc->num_bufqs_per_qgrp; j++) { if (&grp->splitq.bufq_sets[j].bufq == bufq) { qid = grp->splitq.rxq_sets[0]->rxq.idx; goto setup; -- 2.47.1