Convert all the legacy code directly accessing the pp fields in net_iov to access them through @desc in net_iov. Signed-off-by: Byungchul Park --- io_uring/zcrx.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/io_uring/zcrx.c b/io_uring/zcrx.c index b1b723222cdb..f3ba04ce97ab 100644 --- a/io_uring/zcrx.c +++ b/io_uring/zcrx.c @@ -693,12 +693,12 @@ static void io_zcrx_return_niov(struct net_iov *niov) { netmem_ref netmem = net_iov_to_netmem(niov); - if (!niov->pp) { + if (!niov->desc.pp) { /* copy fallback allocated niovs */ io_zcrx_return_niov_freelist(niov); return; } - page_pool_put_unrefed_netmem(niov->pp, netmem, -1, false); + page_pool_put_unrefed_netmem(niov->desc.pp, netmem, -1, false); } static void io_zcrx_scrub(struct io_zcrx_ifq *ifq) @@ -800,7 +800,7 @@ static void io_zcrx_ring_refill(struct page_pool *pp, if (!page_pool_unref_and_test(netmem)) continue; - if (unlikely(niov->pp != pp)) { + if (unlikely(niov->desc.pp != pp)) { io_zcrx_return_niov(niov); continue; } @@ -1074,8 +1074,8 @@ static int io_zcrx_recv_frag(struct io_kiocb *req, struct io_zcrx_ifq *ifq, return io_zcrx_copy_frag(req, ifq, frag, off, len); niov = netmem_to_net_iov(frag->netmem); - if (!niov->pp || niov->pp->mp_ops != &io_uring_pp_zc_ops || - io_pp_to_ifq(niov->pp) != ifq) + if (!niov->desc.pp || niov->desc.pp->mp_ops != &io_uring_pp_zc_ops || + io_pp_to_ifq(niov->desc.pp) != ifq) return -EFAULT; if (!io_zcrx_queue_cqe(req, niov, ifq, off + skb_frag_off(frag), len)) -- 2.17.1 Convert all the legacy code directly accessing the pp fields in net_iov to access them through @desc in net_iov. Signed-off-by: Byungchul Park --- include/linux/skbuff.h | 4 ++-- net/core/devmem.c | 6 +++--- net/ipv4/tcp.c | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index ff90281ddf90..86737076101d 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -3778,8 +3778,8 @@ static inline dma_addr_t __skb_frag_dma_map(struct device *dev, enum dma_data_direction dir) { if (skb_frag_is_net_iov(frag)) { - return netmem_to_net_iov(frag->netmem)->dma_addr + offset + - frag->offset; + return netmem_to_net_iov(frag->netmem)->desc.dma_addr + + offset + frag->offset; } return dma_map_page(dev, skb_frag_page(frag), skb_frag_off(frag) + offset, size, dir); diff --git a/net/core/devmem.c b/net/core/devmem.c index 1d04754bc756..ec4217d6c0b4 100644 --- a/net/core/devmem.c +++ b/net/core/devmem.c @@ -97,9 +97,9 @@ net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding) index = offset / PAGE_SIZE; niov = &owner->area.niovs[index]; - niov->pp_magic = 0; - niov->pp = NULL; - atomic_long_set(&niov->pp_ref_count, 0); + niov->desc.pp_magic = 0; + niov->desc.pp = NULL; + atomic_long_set(&niov->desc.pp_ref_count, 0); return niov; } diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index dee578aad690..f035440c475a 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2587,7 +2587,7 @@ static int tcp_recvmsg_dmabuf(struct sock *sk, const struct sk_buff *skb, if (err) goto out; - atomic_long_inc(&niov->pp_ref_count); + atomic_long_inc(&niov->desc.pp_ref_count); tcp_xa_pool.netmems[tcp_xa_pool.idx++] = skb_frag_netmem(frag); sent += copy; -- 2.17.1 Now that the pp fields in net_iov have no users, remove them from net_iov and clean up. Signed-off-by: Byungchul Park --- include/net/netmem.h | 38 +------------------------------------- 1 file changed, 1 insertion(+), 37 deletions(-) diff --git a/include/net/netmem.h b/include/net/netmem.h index 9e10f4ac50c3..46def457dc65 100644 --- a/include/net/netmem.h +++ b/include/net/netmem.h @@ -93,23 +93,7 @@ enum net_iov_type { * supported. */ struct net_iov { - union { - struct netmem_desc desc; - - /* XXX: The following part should be removed once all - * the references to them are converted so as to be - * accessed via netmem_desc e.g. niov->desc.pp instead - * of niov->pp. - */ - struct { - unsigned long _flags; - unsigned long pp_magic; - struct page_pool *pp; - unsigned long _pp_mapping_pad; - unsigned long dma_addr; - atomic_long_t pp_ref_count; - }; - }; + struct netmem_desc desc; struct net_iov_area *owner; enum net_iov_type type; }; @@ -123,26 +107,6 @@ struct net_iov_area { unsigned long base_virtual; }; -/* net_iov is union'ed with struct netmem_desc mirroring struct page, so - * the page_pool can access these fields without worrying whether the - * underlying fields are accessed via netmem_desc or directly via - * net_iov, until all the references to them are converted so as to be - * accessed via netmem_desc e.g. niov->desc.pp instead of niov->pp. - * - * The non-net stack fields of struct page are private to the mm stack - * and must never be mirrored to net_iov. - */ -#define NET_IOV_ASSERT_OFFSET(desc, iov) \ - static_assert(offsetof(struct netmem_desc, desc) == \ - offsetof(struct net_iov, iov)) -NET_IOV_ASSERT_OFFSET(_flags, _flags); -NET_IOV_ASSERT_OFFSET(pp_magic, pp_magic); -NET_IOV_ASSERT_OFFSET(pp, pp); -NET_IOV_ASSERT_OFFSET(_pp_mapping_pad, _pp_mapping_pad); -NET_IOV_ASSERT_OFFSET(dma_addr, dma_addr); -NET_IOV_ASSERT_OFFSET(pp_ref_count, pp_ref_count); -#undef NET_IOV_ASSERT_OFFSET - static inline struct net_iov_area *net_iov_owner(const struct net_iov *niov) { return niov->owner; -- 2.17.1