From: Christoph Paasch Doing the call to dma_sync_single_for_cpu() earlier will allow us to adjust headlen based on the actual size of the protocol headers. Doing this earlier means that we don't need to call mlx5e_copy_skb_header() anymore and rather can call skb_copy_to_linear_data() directly. Signed-off-by: Christoph Paasch --- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index b8c609d91d11bd315e8fb67f794a91bd37cd28c0..8bedbda522808cbabc8e62ae91a8c25d66725ebb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -2005,17 +2005,19 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w struct skb_shared_info *sinfo; unsigned int truesize = 0; struct bpf_prog *prog; + void *va, *head_addr; struct sk_buff *skb; u32 linear_frame_sz; u16 linear_data_len; u16 linear_hr; - void *va; prog = rcu_dereference(rq->xdp_prog); + head_addr = netmem_address(head_page->netmem) + head_offset; + if (prog) { /* area for bpf_xdp_[store|load]_bytes */ - net_prefetchw(netmem_address(frag_page->netmem) + frag_offset); + net_prefetchw(head_addr); if (unlikely(mlx5e_page_alloc_fragmented(rq->page_pool, &wi->linear_page))) { rq->stats->buff_alloc_err++; @@ -2028,6 +2030,8 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w linear_data_len = 0; linear_frame_sz = MLX5_SKB_FRAG_SZ(linear_hr + MLX5E_RX_MAX_HEAD); } else { + dma_addr_t addr; + skb = napi_alloc_skb(rq->cq.napi, ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long))); if (unlikely(!skb)) { @@ -2039,6 +2043,10 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w net_prefetchw(va); /* xdp_frame data area */ net_prefetchw(skb->data); + addr = page_pool_get_dma_addr_netmem(head_page->netmem); + dma_sync_single_for_cpu(rq->pdev, addr + head_offset, headlen, + rq->buff.map_dir); + frag_offset += headlen; byte_cnt -= headlen; linear_hr = skb_headroom(skb); @@ -2117,8 +2125,6 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w } __pskb_pull_tail(skb, headlen); } else { - dma_addr_t addr; - if (xdp_buff_has_frags(&mxbuf->xdp)) { struct mlx5e_frag_page *pagep; @@ -2133,9 +2139,8 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w while (++pagep < frag_page); } /* copy header */ - addr = page_pool_get_dma_addr_netmem(head_page->netmem); - mlx5e_copy_skb_header(rq, skb, head_page->netmem, addr, - head_offset, head_offset, headlen); + skb_copy_to_linear_data(skb, head_addr, headlen); + /* skb linear part was allocated with headlen and aligned to long */ skb->tail += headlen; skb->len += headlen; -- 2.50.1 From: Christoph Paasch mlx5e_skb_from_cqe_mpwrq_nonlinear() copies MLX5E_RX_MAX_HEAD (256) bytes from the page-pool to the skb's linear part. Those 256 bytes include part of the payload. When attempting to do GRO in skb_gro_receive, if headlen > data_offset (and skb->head_frag is not set), we end up aggregating packets in the frag_list. This is of course not good when we are CPU-limited. Also causes a worse skb->len/truesize ratio,... So, let's avoid copying parts of the payload to the linear part. We use eth_get_headlen() to parse the headers and compute the length of the protocol headers, which will be used to copy the relevant bits ot the skb's linear part. We still allocate MLX5E_RX_MAX_HEAD for the skb so that if the networking stack needs to call pskb_may_pull() later on, we don't need to reallocate memory. This gives a nice throughput increase (ARM Neoverse-V2 with CX-7 NIC and LRO enabled): BEFORE: ======= (netserver pinned to core receiving interrupts) $ netperf -H 10.221.81.118 -T 80,9 -P 0 -l 60 -- -m 256K -M 256K 87380 16384 262144 60.01 32547.82 (netserver pinned to adjacent core receiving interrupts) $ netperf -H 10.221.81.118 -T 80,10 -P 0 -l 60 -- -m 256K -M 256K 87380 16384 262144 60.00 52531.67 AFTER: ====== (netserver pinned to core receiving interrupts) $ netperf -H 10.221.81.118 -T 80,9 -P 0 -l 60 -- -m 256K -M 256K 87380 16384 262144 60.00 52896.06 (netserver pinned to adjacent core receiving interrupts) $ netperf -H 10.221.81.118 -T 80,10 -P 0 -l 60 -- -m 256K -M 256K 87380 16384 262144 60.00 85094.90 Additional tests across a larger range of parameters w/ and w/o LRO, w/ and w/o IPv6-encapsulation, different MTUs (1500, 4096, 9000), different TCP read/write-sizes as well as UDP benchmarks, all have shown equal or better performance with this patch. Signed-off-by: Christoph Paasch --- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 8bedbda522808cbabc8e62ae91a8c25d66725ebb..792bb647ba28668ad7789c328456e3609440455d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -2047,6 +2047,8 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w dma_sync_single_for_cpu(rq->pdev, addr + head_offset, headlen, rq->buff.map_dir); + headlen = eth_get_headlen(skb->dev, head_addr, headlen); + frag_offset += headlen; byte_cnt -= headlen; linear_hr = skb_headroom(skb); @@ -2123,6 +2125,9 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w pagep->frags++; while (++pagep < frag_page); } + + headlen = eth_get_headlen(skb->dev, mxbuf->xdp.data, headlen); + __pskb_pull_tail(skb, headlen); } else { if (xdp_buff_has_frags(&mxbuf->xdp)) { -- 2.50.1