From: Maxim Mikityanskiy From: Maxim Mikityanskiy Now that the kernel doesn't insert HBH for BIG TCP IPv6 packets, remove unnecessary steps from the mlx4 TX path, that used to check and remove HBH. Signed-off-by: Maxim Mikityanskiy --- drivers/net/ethernet/mellanox/mlx4/en_tx.c | 42 +++++----------------- 1 file changed, 8 insertions(+), 34 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 87f35bcbeff8..c5d564e5a581 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -636,28 +636,20 @@ static int get_real_size(const struct sk_buff *skb, struct net_device *dev, int *lso_header_size, bool *inline_ok, - void **pfrag, - int *hopbyhop) + void **pfrag) { struct mlx4_en_priv *priv = netdev_priv(dev); int real_size; if (shinfo->gso_size) { *inline_ok = false; - *hopbyhop = 0; if (skb->encapsulation) { *lso_header_size = skb_inner_tcp_all_headers(skb); } else { - /* Detects large IPV6 TCP packets and prepares for removal of - * HBH header that has been pushed by ip6_xmit(), - * mainly so that tcpdump can dissect them. - */ - if (ipv6_has_hopopt_jumbo(skb)) - *hopbyhop = sizeof(struct hop_jumbo_hdr); *lso_header_size = skb_tcp_all_headers(skb); } real_size = CTRL_SIZE + shinfo->nr_frags * DS_SIZE + - ALIGN(*lso_header_size - *hopbyhop + 4, DS_SIZE); + ALIGN(*lso_header_size + 4, DS_SIZE); if (unlikely(*lso_header_size != skb_headlen(skb))) { /* We add a segment for the skb linear buffer only if * it contains data */ @@ -884,7 +876,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) int desc_size; int real_size; u32 index, bf_index; - struct ipv6hdr *h6; __be32 op_own; int lso_header_size; void *fragptr = NULL; @@ -893,7 +884,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) bool stop_queue; bool inline_ok; u8 data_offset; - int hopbyhop; bool bf_ok; tx_ind = skb_get_queue_mapping(skb); @@ -903,7 +893,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) goto tx_drop; real_size = get_real_size(skb, shinfo, dev, &lso_header_size, - &inline_ok, &fragptr, &hopbyhop); + &inline_ok, &fragptr); if (unlikely(!real_size)) goto tx_drop_count; @@ -956,7 +946,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) data = &tx_desc->data; data_offset = offsetof(struct mlx4_en_tx_desc, data); } else { - int lso_align = ALIGN(lso_header_size - hopbyhop + 4, DS_SIZE); + int lso_align = ALIGN(lso_header_size + 4, DS_SIZE); data = (void *)&tx_desc->lso + lso_align; data_offset = offsetof(struct mlx4_en_tx_desc, lso) + lso_align; @@ -1021,31 +1011,15 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) ((ring->prod & ring->size) ? cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0); - lso_header_size -= hopbyhop; /* Fill in the LSO prefix */ tx_desc->lso.mss_hdr_size = cpu_to_be32( shinfo->gso_size << 16 | lso_header_size); + /* Copy headers; + * note that we already verified that it is linear + */ + memcpy(tx_desc->lso.header, skb->data, lso_header_size); - if (unlikely(hopbyhop)) { - /* remove the HBH header. - * Layout: [Ethernet header][IPv6 header][HBH][TCP header] - */ - memcpy(tx_desc->lso.header, skb->data, ETH_HLEN + sizeof(*h6)); - h6 = (struct ipv6hdr *)((char *)tx_desc->lso.header + ETH_HLEN); - h6->nexthdr = IPPROTO_TCP; - /* Copy the TCP header after the IPv6 one */ - memcpy(h6 + 1, - skb->data + ETH_HLEN + sizeof(*h6) + - sizeof(struct hop_jumbo_hdr), - tcp_hdrlen(skb)); - /* Leave ipv6 payload_len set to 0, as LSO v2 specs request. */ - } else { - /* Copy headers; - * note that we already verified that it is linear - */ - memcpy(tx_desc->lso.header, skb->data, lso_header_size); - } ring->tso_packets++; i = shinfo->gso_segs; -- 2.50.1