The MANA hardware supports a maximum of 30 scatter-gather entries (SGEs) per TX WQE. In rare configurations where MAX_SKB_FRAGS + 2 exceeds this limit, the driver drops the skb. Add a check in mana_start_xmit() to detect such cases and linearize the SKB before transmission. Return NETDEV_TX_BUSY only for -ENOSPC from mana_gd_post_work_request(), send other errors to free_sgl_ptr to free resources and record the tx drop. Signed-off-by: Aditya Garg Reviewed-by: Dipayaan Roy --- drivers/net/ethernet/microsoft/mana/mana_en.c | 26 +++++++++++++++---- include/net/mana/gdma.h | 8 +++++- include/net/mana/mana.h | 1 + 3 files changed, 29 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index f4fc86f20213..22605753ca84 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -20,6 +20,7 @@ #include #include +#include static DEFINE_IDA(mana_adev_ida); @@ -289,6 +290,19 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) cq = &apc->tx_qp[txq_idx].tx_cq; tx_stats = &txq->stats; + BUILD_BUG_ON(MAX_TX_WQE_SGL_ENTRIES != MANA_MAX_TX_WQE_SGL_ENTRIES); + #if (MAX_SKB_FRAGS + 2 > MANA_MAX_TX_WQE_SGL_ENTRIES) + if (skb_shinfo(skb)->nr_frags + 2 > MANA_MAX_TX_WQE_SGL_ENTRIES) { + netdev_info_once(ndev, + "nr_frags %d exceeds max supported sge limit. Attempting skb_linearize\n", + skb_shinfo(skb)->nr_frags); + if (skb_linearize(skb)) { + netdev_warn_once(ndev, "Failed to linearize skb\n"); + goto tx_drop_count; + } + } + #endif + pkg.tx_oob.s_oob.vcq_num = cq->gdma_id; pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame; @@ -402,8 +416,6 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) } } - WARN_ON_ONCE(pkg.wqe_req.num_sge > MAX_TX_WQE_SGL_ENTRIES); - if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) { pkg.wqe_req.sgl = pkg.sgl_array; } else { @@ -438,9 +450,13 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) if (err) { (void)skb_dequeue_tail(&txq->pending_skbs); + mana_unmap_skb(skb, apc); netdev_warn(ndev, "Failed to post TX OOB: %d\n", err); - err = NETDEV_TX_BUSY; - goto tx_busy; + if (err == -ENOSPC) { + err = NETDEV_TX_BUSY; + goto tx_busy; + } + goto free_sgl_ptr; } err = NETDEV_TX_OK; @@ -1606,7 +1622,7 @@ static int mana_move_wq_tail(struct gdma_queue *wq, u32 num_units) return 0; } -static void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc) +void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc) { struct mana_skb_head *ash = (struct mana_skb_head *)skb->head; struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; diff --git a/include/net/mana/gdma.h b/include/net/mana/gdma.h index 57df78cfbf82..67fab1a5f382 100644 --- a/include/net/mana/gdma.h +++ b/include/net/mana/gdma.h @@ -489,6 +489,8 @@ struct gdma_wqe { #define MAX_TX_WQE_SIZE 512 #define MAX_RX_WQE_SIZE 256 +#define MANA_MAX_TX_WQE_SGL_ENTRIES 30 + #define MAX_TX_WQE_SGL_ENTRIES ((GDMA_MAX_SQE_SIZE - \ sizeof(struct gdma_sge) - INLINE_OOB_SMALL_SIZE) / \ sizeof(struct gdma_sge)) @@ -591,6 +593,9 @@ enum { /* Driver can self reset on FPGA Reconfig EQE notification */ #define GDMA_DRV_CAP_FLAG_1_HANDLE_RECONFIG_EQE BIT(17) +/* Driver supports linearizing the skb when num_sge exceeds hardware limit */ +#define GDMA_DRV_CAP_FLAG_1_SKB_LINEARIZE BIT(20) + #define GDMA_DRV_CAP_FLAGS1 \ (GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \ GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX | \ @@ -599,7 +604,8 @@ enum { GDMA_DRV_CAP_FLAG_1_DEV_LIST_HOLES_SUP | \ GDMA_DRV_CAP_FLAG_1_DYNAMIC_IRQ_ALLOC_SUPPORT | \ GDMA_DRV_CAP_FLAG_1_SELF_RESET_ON_EQE | \ - GDMA_DRV_CAP_FLAG_1_HANDLE_RECONFIG_EQE) + GDMA_DRV_CAP_FLAG_1_HANDLE_RECONFIG_EQE | \ + GDMA_DRV_CAP_FLAG_1_SKB_LINEARIZE) #define GDMA_DRV_CAP_FLAGS2 0 diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h index 0921485565c0..330e1bb088bb 100644 --- a/include/net/mana/mana.h +++ b/include/net/mana/mana.h @@ -580,6 +580,7 @@ int mana_set_bw_clamp(struct mana_port_context *apc, u32 speed, void mana_query_phy_stats(struct mana_port_context *apc); int mana_pre_alloc_rxbufs(struct mana_port_context *apc, int mtu, int num_queues); void mana_pre_dealloc_rxbufs(struct mana_port_context *apc); +void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc); extern const struct ethtool_ops mana_ethtool_ops; extern struct dentry *mana_debugfs_root; -- 2.34.1