From: Geetha sowjanya Adds mbox handlers to allocate/free BPIDs from the free BPIDs pool. This can be used by the PF/VF to request up to 8 BPIds. Also add a mbox handler to configure NIXX_AF_RX_CHANX with multiple Bpids. Signed-off-by: Amit Singh Tomar Signed-off-by: Geetha sowjanya Signed-off-by: Tanmay Jagdale --- Changes in V3: - None Changes in V2: - None V1 Link: https://lore.kernel.org/netdev/20250502132005.611698-9-tanmay@marvell.com/ V2 Link: https://lore.kernel.org/netdev/20250618113020.130888-8-tanmay@marvell.com/ .../ethernet/marvell/octeontx2/af/common.h | 1 + .../net/ethernet/marvell/octeontx2/af/mbox.h | 26 +++++ .../ethernet/marvell/octeontx2/af/rvu_nix.c | 100 ++++++++++++++++++ 3 files changed, 127 insertions(+) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h index 8a08bebf08c2..656f6e5c8524 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/common.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h @@ -191,6 +191,7 @@ enum nix_scheduler { #define NIX_INTF_TYPE_CGX 0 #define NIX_INTF_TYPE_LBK 1 #define NIX_INTF_TYPE_SDP 2 +#define NIX_INTF_TYPE_CPT 3 #define MAX_LMAC_PKIND 12 #define NIX_LINK_CGX_LMAC(a, b) (0 + 4 * (a) + (b)) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index f179070ab702..717a37c7d48c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -347,6 +347,9 @@ M(NIX_MCAST_GRP_UPDATE, 0x802d, nix_mcast_grp_update, \ nix_mcast_grp_update_req, \ nix_mcast_grp_update_rsp) \ M(NIX_LF_STATS, 0x802e, nix_lf_stats, nix_stats_req, nix_stats_rsp) \ +M(NIX_ALLOC_BPIDS, 0x8028, nix_alloc_bpids, nix_alloc_bpid_req, nix_bpids) \ +M(NIX_FREE_BPIDS, 0x8029, nix_free_bpids, nix_bpids, msg_rsp) \ +M(NIX_RX_CHAN_CFG, 0x802a, nix_rx_chan_cfg, nix_rx_chan_cfg, nix_rx_chan_cfg) \ /* MCS mbox IDs (range 0xA000 - 0xBFFF) */ \ M(MCS_ALLOC_RESOURCES, 0xa000, mcs_alloc_resources, mcs_alloc_rsrc_req, \ mcs_alloc_rsrc_rsp) \ @@ -1360,6 +1363,29 @@ struct nix_mcast_grp_update_rsp { u32 mce_start_index; }; +struct nix_alloc_bpid_req { + struct mbox_msghdr hdr; + u8 bpid_cnt; + u8 type; + u64 rsvd; +}; + +struct nix_bpids { + struct mbox_msghdr hdr; + u8 bpid_cnt; + u16 bpids[8]; + u64 rsvd; +}; + +struct nix_rx_chan_cfg { + struct mbox_msghdr hdr; + u8 type; /* Interface type(CGX/CPT/LBK) */ + u8 read; + u16 chan; /* RX channel to be configured */ + u64 val; /* NIX_AF_RX_CHAN_CFG value */ + u64 rsvd; +}; + /* Global NIX inline IPSec configuration */ struct nix_inline_ipsec_cfg { struct mbox_msghdr hdr; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index c68cb0107277..f411e64ca729 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -570,6 +570,106 @@ void rvu_nix_flr_free_bpids(struct rvu *rvu, u16 pcifunc) mutex_unlock(&rvu->rsrc_lock); } +int rvu_mbox_handler_nix_rx_chan_cfg(struct rvu *rvu, + struct nix_rx_chan_cfg *req, + struct nix_rx_chan_cfg *rsp) +{ + struct rvu_pfvf *pfvf; + int blkaddr; + u16 chan; + + pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc); + chan = pfvf->rx_chan_base + req->chan; + + if (req->type == NIX_INTF_TYPE_CPT) + chan = chan | BIT(11); + + if (req->read) { + rsp->val = rvu_read64(rvu, blkaddr, + NIX_AF_RX_CHANX_CFG(chan)); + rsp->chan = req->chan; + } else { + rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan), req->val); + } + return 0; +} + +int rvu_mbox_handler_nix_alloc_bpids(struct rvu *rvu, + struct nix_alloc_bpid_req *req, + struct nix_bpids *rsp) +{ + u16 pcifunc = req->hdr.pcifunc; + struct nix_hw *nix_hw; + int blkaddr, cnt = 0; + struct nix_bp *bp; + int bpid, err; + + err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); + if (err) + return err; + + bp = &nix_hw->bp; + + /* For interface like sso uses same bpid across multiple + * application. Find the bpid is it already allocate or + * allocate a new one. + */ + if (req->type > NIX_INTF_TYPE_CPT) { + for (bpid = 0; bpid < bp->bpids.max; bpid++) { + if (bp->intf_map[bpid] == req->type) { + rsp->bpids[cnt] = bpid + bp->free_pool_base; + rsp->bpid_cnt++; + bp->ref_cnt[bpid]++; + cnt++; + } + } + if (rsp->bpid_cnt) + return 0; + } + + for (cnt = 0; cnt < req->bpid_cnt; cnt++) { + bpid = rvu_alloc_rsrc(&bp->bpids); + if (bpid < 0) + return 0; + rsp->bpids[cnt] = bpid + bp->free_pool_base; + bp->intf_map[bpid] = req->type; + bp->fn_map[bpid] = pcifunc; + bp->ref_cnt[bpid]++; + rsp->bpid_cnt++; + } + return 0; +} + +int rvu_mbox_handler_nix_free_bpids(struct rvu *rvu, + struct nix_bpids *req, + struct msg_rsp *rsp) +{ + u16 pcifunc = req->hdr.pcifunc; + int blkaddr, cnt, err, id; + struct nix_hw *nix_hw; + struct nix_bp *bp; + u16 bpid; + + err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); + if (err) + return err; + + bp = &nix_hw->bp; + for (cnt = 0; cnt < req->bpid_cnt; cnt++) { + bpid = req->bpids[cnt] - bp->free_pool_base; + bp->ref_cnt[bpid]--; + if (bp->ref_cnt[bpid]) + continue; + rvu_free_rsrc(&bp->bpids, bpid); + for (id = 0; id < bp->bpids.max; id++) { + if (bp->fn_map[id] == pcifunc) + bp->fn_map[id] = 0; + } + } + return 0; +} + static u16 nix_get_channel(u16 chan, bool cpt_link) { /* CPT channel for a given link channel is always -- 2.43.0