The ns83820 driver currently ignores the return values of dma_map_single() and skb_frag_dma_map() in the transmit path. If DMA mapping fails due to IOMMU exhaustion or SWIOTLB pressure, the driver may proceed with invalid DMA addresses, potentially causing hardware errors, data corruption, or system instability. Additionally, if mapping fails midway through processing fragmented packets,previously mapped DMA resources are not released, leading to DMA resource leaks. Fix this by: 1. Checking dma_mapping_error() after each DMA mapping call. 2. Implementing an error handling path to unmap successfully mapped buffers(both linear and fragments) using dma_unmap_single(). 3. Freeing the skb using dev_kfree_skb_any() to safely handle both process and softirq contexts, as hard_start_xmit may be called with IRQs enabled. 4. Returning NETDEV_TX_OK to drop the packet gracefully and prevent TX queue stagnation. This ensures compliance with the DMA API guidelines and improves driver stability under memory pressure. Fixes: fd9e4d6fec15 ("natsemi: switch from 'pci_' to 'dma_' API") Cc: stable@vger.kernel.org Signed-off-by: Wang Jun <1742789905@qq.com> --- drivers/net/ethernet/natsemi/ns83820.c | 32 ++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c index cdbf82affa7b..90465e4977c3 100644 --- a/drivers/net/ethernet/natsemi/ns83820.c +++ b/drivers/net/ethernet/natsemi/ns83820.c @@ -1051,6 +1051,12 @@ static netdev_tx_t ns83820_hard_start_xmit(struct sk_buff *skb, int stopped = 0; int do_intr = 0; volatile __le32 *first_desc; + dma_addr_t frag_dma_addr[MAX_SKB_FRAGS]; + unsigned int frag_dma_len[MAX_SKB_FRAGS]; + int frag_mapped_count = 0; + dma_addr_t main_buf = 0; + unsigned int main_len = 0; + int i; dprintk("ns83820_hard_start_xmit\n"); @@ -1121,6 +1127,13 @@ static netdev_tx_t ns83820_hard_start_xmit(struct sk_buff *skb, buf = dma_map_single(&dev->pci_dev->dev, skb->data, len, DMA_TO_DEVICE); + if (dma_mapping_error(&dev->pci_dev->dev, buf)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + main_buf = buf; + main_len = len; + first_desc = dev->tx_descs + (free_idx * DESC_SIZE); for (;;) { @@ -1144,6 +1157,15 @@ static netdev_tx_t ns83820_hard_start_xmit(struct sk_buff *skb, buf = skb_frag_dma_map(&dev->pci_dev->dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); + if (dma_mapping_error(&dev->pci_dev->dev, buf)) + goto dma_map_error; + + if (frag_mapped_count < MAX_SKB_FRAGS) { + frag_dma_addr[frag_mapped_count] = buf; + frag_dma_len[frag_mapped_count] = skb_frag_size(frag); + frag_mapped_count++; + } + dprintk("frag: buf=%08Lx page=%08lx offset=%08lx\n", (long long)buf, (long) page_to_pfn(frag->page), frag->page_offset); @@ -1166,6 +1188,16 @@ static netdev_tx_t ns83820_hard_start_xmit(struct sk_buff *skb, if (stopped && (dev->tx_done_idx != tx_done_idx) && start_tx_okay(dev)) netif_start_queue(ndev); + return NETDEV_TX_OK; +dma_map_error: + dma_unmap_single(&dev->pci_dev->dev, main_buf, main_len, DMA_TO_DEVICE); + for (i = 0; i < frag_mapped_count; i++) { + dma_unmap_single(&dev->pci_dev->dev, frag_dma_addr[i], frag_dma_len[i], + DMA_TO_DEVICE); + } + + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; } -- 2.43.0