Initialize ingress inline IPsec offload when ESP offload feature is enabled via Ethtool. As part of initialization, the following mailboxes must be invoked to configure inline IPsec: NIX_INLINE_IPSEC_LF_CFG - Every NIX LF has the provision to maintain a contiguous SA Table. This mailbox configure the SA table base address, size of each SA, maximum number entries in the table. Currently, we support 128 entry table with each SA of size 1024 bytes. NIX_LF_INLINE_RQ_CFG - Post decryption, CPT sends a metapacket of 256 bytes which have enough packet headers to help NIX RX classify it. However, since the packet is not complete, we cannot perform checksum and packet length verification. Hence, configure the RQ context to disable L3, L4 checksum and length verification for packets coming from CPT. NIX_INLINE_IPSEC_CFG - RVU hardware supports 1 common CPT LF for inbound ingress IPsec flows. This CPT LF is configured via this mailbox and is a one time system-wide configuration. Signed-off-by: Tanmay Jagdale --- Changes in V5: - Remove backpressure related configuration from this patch to another patch dedicated for backpressure related changes. Changes in V4: - Moved BPID configuration before initializing CPT for inbound configuration Changes in V3: - None Changes in V2: - Fixed commit message be within 75 characters V1 Link: https://lore.kernel.org/netdev/20250502132005.611698-13-tanmay@marvell.com/ V2 Link: https://lore.kernel.org/netdev/20250618113020.130888-12-tanmay@marvell.com/ V3 Link: https://lore.kernel.org/netdev/20250711121317.340326-12-tanmay@marvell.com/ V4 Link: https://lore.kernel.org/netdev/20250819021507.323752-12-tanmay@marvell.com/ .../marvell/octeontx2/nic/cn10k_ipsec.c | 130 ++++++++++++++++++ 1 file changed, 130 insertions(+) diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c index 0899c6832c0d..664ccfc7e80d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c @@ -342,6 +342,114 @@ static int cn10k_outb_cpt_init(struct net_device *netdev) return ret; } +static int cn10k_inb_nix_inline_lf_cfg(struct otx2_nic *pfvf) +{ + struct nix_inline_ipsec_lf_cfg *req; + int ret = 0; + + mutex_lock(&pfvf->mbox.lock); + req = otx2_mbox_alloc_msg_nix_inline_ipsec_lf_cfg(&pfvf->mbox); + if (!req) { + ret = -ENOMEM; + goto error; + } + + req->sa_base_addr = pfvf->ipsec.inb_sa->iova; + req->ipsec_cfg0.tag_const = 0; + req->ipsec_cfg0.tt = 0; + req->ipsec_cfg0.lenm1_max = 11872; /* (Max packet size - 128 (first skip)) */ + req->ipsec_cfg0.sa_pow2_size = 0xb; /* 2048 */ + req->ipsec_cfg1.sa_idx_max = CN10K_IPSEC_INB_MAX_SA - 1; + req->ipsec_cfg1.sa_idx_w = 0x7; + req->enable = 1; + + ret = otx2_sync_mbox_msg(&pfvf->mbox); +error: + mutex_unlock(&pfvf->mbox.lock); + return ret; +} + +static int cn10k_inb_nix_inline_lf_rq_cfg(struct otx2_nic *pfvf) +{ + struct nix_rq_cpt_field_mask_cfg_req *req; + int ret = 0, i; + + mutex_lock(&pfvf->mbox.lock); + req = otx2_mbox_alloc_msg_nix_lf_inline_rq_cfg(&pfvf->mbox); + if (!req) { + ret = -ENOMEM; + goto error; + } + + for (i = 0; i < RQ_CTX_MASK_MAX; i++) + req->rq_ctx_word_mask[i] = 0xffffffffffffffff; + + req->rq_set.len_ol3_dis = 1; + req->rq_set.len_ol4_dis = 1; + req->rq_set.len_il3_dis = 1; + req->rq_set.len_il4_dis = 1; + req->rq_set.csum_ol4_dis = 1; + req->rq_set.csum_il4_dis = 1; + req->rq_set.lenerr_dis = 1; + req->rq_set.port_ol4_dis = 1; + req->rq_set.port_il4_dis = 1; + req->rq_set.lpb_drop_ena = 0; + req->rq_set.spb_drop_ena = 0; + req->rq_set.xqe_drop_ena = 0; + req->rq_set.spb_ena = 1; + req->rq_set.ena = 1; + + req->rq_mask.len_ol3_dis = 0; + req->rq_mask.len_ol4_dis = 0; + req->rq_mask.len_il3_dis = 0; + req->rq_mask.len_il4_dis = 0; + req->rq_mask.csum_ol4_dis = 0; + req->rq_mask.csum_il4_dis = 0; + req->rq_mask.lenerr_dis = 0; + req->rq_mask.port_ol4_dis = 0; + req->rq_mask.port_il4_dis = 0; + req->rq_mask.lpb_drop_ena = 0; + req->rq_mask.spb_drop_ena = 0; + req->rq_mask.xqe_drop_ena = 0; + req->rq_mask.spb_ena = 0; + req->rq_mask.ena = 0; + + /* Setup SPB fields for second pass */ + req->ipsec_cfg1.rq_mask_enable = 1; + + ret = otx2_sync_mbox_msg(&pfvf->mbox); +error: + mutex_unlock(&pfvf->mbox.lock); + return ret; +} + +static int cn10k_inb_nix_inline_ipsec_cfg(struct otx2_nic *pfvf) +{ + struct cpt_rx_inline_lf_cfg_msg *req; + int ret = 0; + + mutex_lock(&pfvf->mbox.lock); + req = otx2_mbox_alloc_msg_cpt_rx_inline_lf_cfg(&pfvf->mbox); + if (!req) { + ret = -ENOMEM; + goto error; + } + + req->sso_pf_func = 0; + req->opcode = CN10K_IPSEC_MAJOR_OP_INB_IPSEC | (1 << 6); + req->param1 = 7; /* bit 0:ip_csum_dis 1:tcp_csum_dis 2:esp_trailer_dis */ + req->param2 = 0; + req->credit = (pfvf->qset.rqe_cnt * 3) / 4; + req->credit_th = pfvf->qset.rqe_cnt / 10; + req->ctx_ilen_valid = 1; + req->ctx_ilen = 5; + + ret = otx2_sync_mbox_msg(&pfvf->mbox); +error: + mutex_unlock(&pfvf->mbox.lock); + return ret; +} + static int cn10k_ipsec_ingress_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura) { struct nix_cn10k_aq_enq_req *aq; @@ -601,6 +709,28 @@ static int cn10k_inb_cpt_init(struct net_device *netdev) /* Enable interrupt */ otx2_write64(pfvf, NPA_LF_QINTX_ENA_W1S(0), BIT_ULL(0)); + /* Enable inbound inline IPSec in NIX LF */ + ret = cn10k_inb_nix_inline_lf_cfg(pfvf); + if (ret) { + netdev_err(netdev, "Error configuring NIX for Inline IPSec\n"); + goto out; + } + + /* IPsec specific RQ settings in NIX LF */ + ret = cn10k_inb_nix_inline_lf_rq_cfg(pfvf); + if (ret) { + netdev_err(netdev, "Error configuring NIX for Inline IPSec\n"); + goto out; + } + + /* One-time configuration to enable CPT LF for inline inbound IPSec */ + ret = cn10k_inb_nix_inline_ipsec_cfg(pfvf); + if (ret && ret != -EEXIST) + netdev_err(netdev, "CPT LF configuration error\n"); + else + ret = 0; + +out: return ret; } -- 2.43.0