Use unified Halo context present in CN20K hardware for octeontx2 netdevs instead of aura and pool contexts. Signed-off-by: Subbaraya Sundeep --- .../ethernet/marvell/octeontx2/nic/cn20k.c | 213 +++++++++--------- .../ethernet/marvell/octeontx2/nic/cn20k.h | 3 + .../marvell/octeontx2/nic/otx2_common.h | 3 + .../ethernet/marvell/octeontx2/nic/otx2_pf.c | 8 + 4 files changed, 124 insertions(+), 103 deletions(-) diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c index a5a8f4558717..08033858c59d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c @@ -242,15 +242,6 @@ int cn20k_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs) #define RQ_BP_LVL_AURA (255 - ((85 * 256) / 100)) /* BP when 85% is full */ -static u8 cn20k_aura_bpid_idx(struct otx2_nic *pfvf, int aura_id) -{ -#ifdef CONFIG_DCB - return pfvf->queue_to_pfc_map[aura_id]; -#else - return 0; -#endif -} - static int cn20k_tc_get_entry_index(struct otx2_flow_config *flow_cfg, struct otx2_tc_flow *node) { @@ -517,84 +508,7 @@ int cn20k_tc_alloc_entry(struct otx2_nic *nic, return 0; } -static int cn20k_aura_aq_init(struct otx2_nic *pfvf, int aura_id, - int pool_id, int numptrs) -{ - struct npa_cn20k_aq_enq_req *aq; - struct otx2_pool *pool; - u8 bpid_idx; - int err; - - pool = &pfvf->qset.pool[pool_id]; - - /* Allocate memory for HW to update Aura count. - * Alloc one cache line, so that it fits all FC_STYPE modes. - */ - if (!pool->fc_addr) { - err = qmem_alloc(pfvf->dev, &pool->fc_addr, 1, OTX2_ALIGN); - if (err) - return err; - } - - /* Initialize this aura's context via AF */ - aq = otx2_mbox_alloc_msg_npa_cn20k_aq_enq(&pfvf->mbox); - if (!aq) { - /* Shared mbox memory buffer is full, flush it and retry */ - err = otx2_sync_mbox_msg(&pfvf->mbox); - if (err) - return err; - aq = otx2_mbox_alloc_msg_npa_cn20k_aq_enq(&pfvf->mbox); - if (!aq) - return -ENOMEM; - } - - aq->aura_id = aura_id; - - /* Will be filled by AF with correct pool context address */ - aq->aura.pool_addr = pool_id; - aq->aura.pool_caching = 1; - aq->aura.shift = ilog2(numptrs) - 8; - aq->aura.count = numptrs; - aq->aura.limit = numptrs; - aq->aura.avg_level = 255; - aq->aura.ena = 1; - aq->aura.fc_ena = 1; - aq->aura.fc_addr = pool->fc_addr->iova; - aq->aura.fc_hyst_bits = 0; /* Store count on all updates */ - - /* Enable backpressure for RQ aura */ - if (aura_id < pfvf->hw.rqpool_cnt && !is_otx2_lbkvf(pfvf->pdev)) { - aq->aura.bp_ena = 0; - /* If NIX1 LF is attached then specify NIX1_RX. - * - * Below NPA_AURA_S[BP_ENA] is set according to the - * NPA_BPINTF_E enumeration given as: - * 0x0 + a*0x1 where 'a' is 0 for NIX0_RX and 1 for NIX1_RX so - * NIX0_RX is 0x0 + 0*0x1 = 0 - * NIX1_RX is 0x0 + 1*0x1 = 1 - * But in HRM it is given that - * "NPA_AURA_S[BP_ENA](w1[33:32]) - Enable aura backpressure to - * NIX-RX based on [BP] level. One bit per NIX-RX; index - * enumerated by NPA_BPINTF_E." - */ - if (pfvf->nix_blkaddr == BLKADDR_NIX1) - aq->aura.bp_ena = 1; - - bpid_idx = cn20k_aura_bpid_idx(pfvf, aura_id); - aq->aura.bpid = pfvf->bpid[bpid_idx]; - - /* Set backpressure level for RQ's Aura */ - aq->aura.bp = RQ_BP_LVL_AURA; - } - - /* Fill AQ info */ - aq->ctype = NPA_AQ_CTYPE_AURA; - aq->op = NPA_AQ_INSTOP_INIT; - - return 0; -} - -static int cn20k_pool_aq_init(struct otx2_nic *pfvf, u16 pool_id, +static int cn20k_halo_aq_init(struct otx2_nic *pfvf, u16 pool_id, int stack_pages, int numptrs, int buf_size, int type) { @@ -610,36 +524,57 @@ static int cn20k_pool_aq_init(struct otx2_nic *pfvf, u16 pool_id, if (err) return err; + /* Allocate memory for HW to update Aura count. + * Alloc one cache line, so that it fits all FC_STYPE modes. + */ + if (!pool->fc_addr) { + err = qmem_alloc(pfvf->dev, &pool->fc_addr, 1, OTX2_ALIGN); + if (err) { + qmem_free(pfvf->dev, pool->stack); + return err; + } + } + pool->rbsize = buf_size; - /* Initialize this pool's context via AF */ + /* Initialize this aura's context via AF */ aq = otx2_mbox_alloc_msg_npa_cn20k_aq_enq(&pfvf->mbox); if (!aq) { /* Shared mbox memory buffer is full, flush it and retry */ err = otx2_sync_mbox_msg(&pfvf->mbox); - if (err) { - qmem_free(pfvf->dev, pool->stack); - return err; - } + if (err) + goto free_mem; aq = otx2_mbox_alloc_msg_npa_cn20k_aq_enq(&pfvf->mbox); if (!aq) { - qmem_free(pfvf->dev, pool->stack); - return -ENOMEM; + err = -ENOMEM; + goto free_mem; } } aq->aura_id = pool_id; - aq->pool.stack_base = pool->stack->iova; - aq->pool.stack_caching = 1; - aq->pool.ena = 1; - aq->pool.buf_size = buf_size / 128; - aq->pool.stack_max_pages = stack_pages; - aq->pool.shift = ilog2(numptrs) - 8; - aq->pool.ptr_start = 0; - aq->pool.ptr_end = ~0ULL; + + aq->halo.stack_base = pool->stack->iova; + aq->halo.stack_caching = 1; + aq->halo.ena = 1; + aq->halo.buf_size = buf_size / 128; + aq->halo.stack_max_pages = stack_pages; + aq->halo.shift = ilog2(numptrs) - 8; + aq->halo.ptr_start = 0; + aq->halo.ptr_end = ~0ULL; + + aq->halo.avg_level = 255; + aq->halo.fc_ena = 1; + aq->halo.fc_addr = pool->fc_addr->iova; + aq->halo.fc_hyst_bits = 0; /* Store count on all updates */ + + if (pfvf->npa_dpc_valid) { + aq->halo.op_dpc_ena = 1; + aq->halo.op_dpc_set = pfvf->npa_dpc; + } + aq->halo.unified_ctx = 1; /* Fill AQ info */ - aq->ctype = NPA_AQ_CTYPE_POOL; + aq->ctype = NPA_AQ_CTYPE_HALO; aq->op = NPA_AQ_INSTOP_INIT; if (type != AURA_NIX_RQ) { @@ -661,6 +596,78 @@ static int cn20k_pool_aq_init(struct otx2_nic *pfvf, u16 pool_id, } return 0; + +free_mem: + qmem_free(pfvf->dev, pool->stack); + qmem_free(pfvf->dev, pool->fc_addr); + return err; +} + +static int cn20k_aura_aq_init(struct otx2_nic *pfvf, int aura_id, + int pool_id, int numptrs) +{ + return 0; +} + +static int cn20k_pool_aq_init(struct otx2_nic *pfvf, u16 pool_id, + int stack_pages, int numptrs, int buf_size, + int type) +{ + return cn20k_halo_aq_init(pfvf, pool_id, stack_pages, + numptrs, buf_size, type); +} + +int cn20k_npa_alloc_dpc(struct otx2_nic *nic) +{ + struct npa_cn20k_dpc_alloc_req *req; + struct npa_cn20k_dpc_alloc_rsp *rsp; + int err; + + req = otx2_mbox_alloc_msg_npa_cn20k_dpc_alloc(&nic->mbox); + if (!req) + return -ENOMEM; + + /* Count successful ALLOC requests only */ + req->dpc_conf = 1ULL << 4; + + err = otx2_sync_mbox_msg(&nic->mbox); + if (err) + return err; + + rsp = (struct npa_cn20k_dpc_alloc_rsp *)otx2_mbox_get_rsp(&nic->mbox.mbox, + 0, &req->hdr); + if (IS_ERR(rsp)) + return PTR_ERR(rsp); + + nic->npa_dpc = rsp->cntr_id; + nic->npa_dpc_valid = true; + + return 0; +} + +int cn20k_npa_free_dpc(struct otx2_nic *nic) +{ + struct npa_cn20k_dpc_free_req *req; + int err; + + if (!nic->npa_dpc_valid) + return 0; + + mutex_lock(&nic->mbox.lock); + + req = otx2_mbox_alloc_msg_npa_cn20k_dpc_free(&nic->mbox); + if (!req) { + mutex_unlock(&nic->mbox.lock); + return -ENOMEM; + } + + req->cntr_id = nic->npa_dpc; + + err = otx2_sync_mbox_msg(&nic->mbox); + + mutex_unlock(&nic->mbox.lock); + + return err; } static int cn20k_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura) diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.h index b5e527f6d7eb..16a69d84ea79 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.h @@ -28,4 +28,7 @@ int cn20k_tc_alloc_entry(struct otx2_nic *nic, struct otx2_tc_flow *new_node, struct npc_install_flow_req *dummy); int cn20k_tc_free_mcam_entry(struct otx2_nic *nic, u16 entry); +int cn20k_npa_alloc_dpc(struct otx2_nic *nic); +int cn20k_npa_free_dpc(struct otx2_nic *nic); + #endif /* CN20K_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h index eecee612b7b2..f997dfc0fedd 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -592,6 +592,9 @@ struct otx2_nic { struct cn10k_ipsec ipsec; /* af_xdp zero-copy */ unsigned long *af_xdp_zc_qidx; + + bool npa_dpc_valid; + u8 npa_dpc; /* NPA DPC counter id */ }; static inline bool is_otx2_lbkvf(struct pci_dev *pdev) diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index ee623476e5ff..2b5fe67d297c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -1651,6 +1651,9 @@ int otx2_init_hw_resources(struct otx2_nic *pf) if (!is_otx2_lbkvf(pf->pdev)) otx2_nix_config_bp(pf, true); + if (is_cn20k(pf->pdev)) + cn20k_npa_alloc_dpc(pf); + /* Init Auras and pools used by NIX RQ, for free buffer ptrs */ err = otx2_rq_aura_pool_init(pf); if (err) { @@ -1726,6 +1729,8 @@ int otx2_init_hw_resources(struct otx2_nic *pf) otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true); otx2_aura_pool_free(pf); err_free_nix_lf: + if (pf->npa_dpc_valid) + cn20k_npa_free_dpc(pf); mutex_lock(&mbox->lock); free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox); if (free_req) { @@ -1790,6 +1795,9 @@ void otx2_free_hw_resources(struct otx2_nic *pf) otx2_free_sq_res(pf); + if (is_cn20k(pf->pdev)) + cn20k_npa_free_dpc(pf); + /* Free RQ buffer pointers*/ otx2_free_aura_ptr(pf, AURA_NIX_RQ); -- 2.48.1