To simplify struct page, the page pool members of struct page should be moved to other, allowing these members to be removed from struct page. Introduce a network memory descriptor to store the members, struct netmem_desc, and make it union'ed with the existing fields in struct net_iov, allowing to organize the fields of struct net_iov. Signed-off-by: Byungchul Park Reviewed-by: Toke Høiland-Jørgensen Reviewed-by: Pavel Begunkov Reviewed-by: Mina Almasry Reviewed-by: Vlastimil Babka Acked-by: Harry Yoo --- include/net/netmem.h | 116 +++++++++++++++++++++++++++++++++++-------- 1 file changed, 95 insertions(+), 21 deletions(-) diff --git a/include/net/netmem.h b/include/net/netmem.h index de1d95f04076..535cf17b9134 100644 --- a/include/net/netmem.h +++ b/include/net/netmem.h @@ -12,6 +12,50 @@ #include #include +/* These fields in struct page are used by the page_pool and net stack: + * + * struct { + * unsigned long pp_magic; + * struct page_pool *pp; + * unsigned long _pp_mapping_pad; + * unsigned long dma_addr; + * atomic_long_t pp_ref_count; + * }; + * + * We mirror the page_pool fields here so the page_pool can access these + * fields without worrying whether the underlying fields belong to a + * page or netmem_desc. + * + * CAUTION: Do not update the fields in netmem_desc without also + * updating the anonymous aliasing union in struct net_iov. + */ +struct netmem_desc { + unsigned long _flags; + unsigned long pp_magic; + struct page_pool *pp; + unsigned long _pp_mapping_pad; + unsigned long dma_addr; + atomic_long_t pp_ref_count; +}; + +#define NETMEM_DESC_ASSERT_OFFSET(pg, desc) \ + static_assert(offsetof(struct page, pg) == \ + offsetof(struct netmem_desc, desc)) +NETMEM_DESC_ASSERT_OFFSET(flags, _flags); +NETMEM_DESC_ASSERT_OFFSET(pp_magic, pp_magic); +NETMEM_DESC_ASSERT_OFFSET(pp, pp); +NETMEM_DESC_ASSERT_OFFSET(_pp_mapping_pad, _pp_mapping_pad); +NETMEM_DESC_ASSERT_OFFSET(dma_addr, dma_addr); +NETMEM_DESC_ASSERT_OFFSET(pp_ref_count, pp_ref_count); +#undef NETMEM_DESC_ASSERT_OFFSET + +/* + * Since struct netmem_desc uses the space in struct page, the size + * should be checked, until struct netmem_desc has its own instance from + * slab, to avoid conflicting with other members within struct page. + */ +static_assert(sizeof(struct netmem_desc) <= offsetof(struct page, _refcount)); + /* net_iov */ DECLARE_STATIC_KEY_FALSE(page_pool_mem_providers); @@ -30,13 +74,48 @@ enum net_iov_type { NET_IOV_MAX = ULONG_MAX }; +/* A memory descriptor representing abstract networking I/O vectors, + * generally for non-pages memory that doesn't have its corresponding + * struct page and needs to be explicitly allocated through slab. + * + * net_iovs are allocated and used by networking code, and the size of + * the chunk is PAGE_SIZE. + * + * This memory can be any form of non-struct paged memory. Examples + * include imported dmabuf memory and imported io_uring memory. See + * net_iov_type for all the supported types. + * + * @pp_magic: pp field, similar to the one in struct page/struct + * netmem_desc. + * @pp: the pp this net_iov belongs to, if any. + * @dma_addr: the dma addrs of the net_iov. Needed for the network + * card to send/receive this net_iov. + * @pp_ref_count: the pp ref count of this net_iov, exactly the same + * usage as struct page/struct netmem_desc. + * @owner: the net_iov_area this net_iov belongs to, if any. + * @type: the type of the memory. Different types of net_iovs are + * supported. + */ struct net_iov { - enum net_iov_type type; - unsigned long pp_magic; - struct page_pool *pp; + union { + struct netmem_desc desc; + + /* XXX: The following part should be removed once all + * the references to them are converted so as to be + * accessed via netmem_desc e.g. niov->desc.pp instead + * of niov->pp. + */ + struct { + unsigned long _flags; + unsigned long pp_magic; + struct page_pool *pp; + unsigned long _pp_mapping_pad; + unsigned long dma_addr; + atomic_long_t pp_ref_count; + }; + }; struct net_iov_area *owner; - unsigned long dma_addr; - atomic_long_t pp_ref_count; + enum net_iov_type type; }; struct net_iov_area { @@ -48,27 +127,22 @@ struct net_iov_area { unsigned long base_virtual; }; -/* These fields in struct page are used by the page_pool and net stack: +/* net_iov is union'ed with struct netmem_desc mirroring struct page, so + * the page_pool can access these fields without worrying whether the + * underlying fields are accessed via netmem_desc or directly via + * net_iov, until all the references to them are converted so as to be + * accessed via netmem_desc e.g. niov->desc.pp instead of niov->pp. * - * struct { - * unsigned long pp_magic; - * struct page_pool *pp; - * unsigned long _pp_mapping_pad; - * unsigned long dma_addr; - * atomic_long_t pp_ref_count; - * }; - * - * We mirror the page_pool fields here so the page_pool can access these fields - * without worrying whether the underlying fields belong to a page or net_iov. - * - * The non-net stack fields of struct page are private to the mm stack and must - * never be mirrored to net_iov. + * The non-net stack fields of struct page are private to the mm stack + * and must never be mirrored to net_iov. */ -#define NET_IOV_ASSERT_OFFSET(pg, iov) \ - static_assert(offsetof(struct page, pg) == \ +#define NET_IOV_ASSERT_OFFSET(desc, iov) \ + static_assert(offsetof(struct netmem_desc, desc) == \ offsetof(struct net_iov, iov)) +NET_IOV_ASSERT_OFFSET(_flags, _flags); NET_IOV_ASSERT_OFFSET(pp_magic, pp_magic); NET_IOV_ASSERT_OFFSET(pp, pp); +NET_IOV_ASSERT_OFFSET(_pp_mapping_pad, _pp_mapping_pad); NET_IOV_ASSERT_OFFSET(dma_addr, dma_addr); NET_IOV_ASSERT_OFFSET(pp_ref_count, pp_ref_count); #undef NET_IOV_ASSERT_OFFSET -- 2.17.1 To eliminate the use of the page pool fields in struct page, the page pool code should use netmem descriptor and APIs instead. However, __netmem_get_pp() still accesses ->pp via struct page. So change it to use struct netmem_desc instead, since ->pp no longer will be available in struct page. While at it, add a helper, __netmem_to_nmdesc(), that can be used to unsafely get pointer to netmem_desc backing the netmem_ref, only when the netmem_ref is always backed by system memory. Signed-off-by: Byungchul Park --- include/net/netmem.h | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/include/net/netmem.h b/include/net/netmem.h index 535cf17b9134..097bc74d9555 100644 --- a/include/net/netmem.h +++ b/include/net/netmem.h @@ -247,6 +247,24 @@ static inline unsigned long netmem_pfn_trace(netmem_ref netmem) return page_to_pfn(netmem_to_page(netmem)); } +/** + * __netmem_to_nmdesc - unsafely get pointer to the &netmem_desc backing + * @netmem + * @netmem: netmem reference to convert + * + * Unsafe version that can be used only when @netmem is always backed by + * system memory, performs faster and generates smaller object code (no + * check for the LSB, no WARN). When @netmem points to IOV, provokes + * undefined behaviour. + * + * Return: pointer to the &netmem_desc (garbage if @netmem is not backed + * by system memory). + */ +static inline struct netmem_desc *__netmem_to_nmdesc(netmem_ref netmem) +{ + return (__force struct netmem_desc *)netmem; +} + /* __netmem_clear_lsb - convert netmem_ref to struct net_iov * for access to * common fields. * @netmem: netmem reference to extract as net_iov. @@ -280,7 +298,7 @@ static inline struct net_iov *__netmem_clear_lsb(netmem_ref netmem) */ static inline struct page_pool *__netmem_get_pp(netmem_ref netmem) { - return __netmem_to_page(netmem)->pp; + return __netmem_to_nmdesc(netmem)->pp; } static inline struct page_pool *netmem_get_pp(netmem_ref netmem) -- 2.17.1 To eliminate the use of struct page in page pool, the page pool users should use netmem descriptor and APIs instead. Make mlx4 access ->pp_ref_count through netmem_desc instead of page. While at it, add a helper, pp_page_to_nmdesc() and __pp_page_to_nmdesc(), that can be used to get netmem_desc from page only if it's a pp page. For now that netmem_desc overlays on page, it can be achieved by just casting, and use macro and _Generic to cover const casting as well. Signed-off-by: Byungchul Park --- drivers/net/ethernet/mellanox/mlx4/en_rx.c | 4 +++- include/net/netmem.h | 17 +++++++++++++++++ 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index b33285d755b9..92a16ddb7d86 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -460,9 +460,11 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv, truesize += frag_info->frag_stride; if (frag_info->frag_stride == PAGE_SIZE / 2) { + struct netmem_desc *desc = pp_page_to_nmdesc(page); + frags->page_offset ^= PAGE_SIZE / 2; release = page_count(page) != 1 || - atomic_long_read(&page->pp_ref_count) != 1 || + atomic_long_read(&desc->pp_ref_count) != 1 || page_is_pfmemalloc(page) || page_to_nid(page) != numa_mem_id(); } else if (!priv->rx_headroom) { diff --git a/include/net/netmem.h b/include/net/netmem.h index 097bc74d9555..f7dacc9e75fd 100644 --- a/include/net/netmem.h +++ b/include/net/netmem.h @@ -285,6 +285,23 @@ static inline struct net_iov *__netmem_clear_lsb(netmem_ref netmem) return (struct net_iov *)((__force unsigned long)netmem & ~NET_IOV); } +/* XXX: How to extract netmem_desc from page must be changed, once + * netmem_desc no longer overlays on page and will be allocated through + * slab. + */ +#define __pp_page_to_nmdesc(p) (_Generic((p), \ + const struct page * : (const struct netmem_desc *)(p), \ + struct page * : (struct netmem_desc *)(p))) + +/* CAUTION: Check if the page is a pp page before calling this helper or + * know it's a pp page. + */ +#define pp_page_to_nmdesc(p) \ +({ \ + DEBUG_NET_WARN_ON_ONCE(!page_pool_page_is_pp(p)); \ + __pp_page_to_nmdesc(p); \ +}) + /** * __netmem_get_pp - unsafely get pointer to the &page_pool backing @netmem * @netmem: netmem reference to get the pointer from -- 2.17.1 To eliminate the use of struct page in page pool, the page pool users should use netmem descriptor and APIs instead. Make netdevsim access ->pp through netmem_desc instead of page. Signed-off-by: Byungchul Park --- drivers/net/netdevsim/netdev.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c index e36d3e846c2d..d6b3a7755f14 100644 --- a/drivers/net/netdevsim/netdev.c +++ b/drivers/net/netdevsim/netdev.c @@ -842,7 +842,8 @@ nsim_pp_hold_write(struct file *file, const char __user *data, if (!ns->page) ret = -ENOMEM; } else { - page_pool_put_full_page(ns->page->pp, ns->page, false); + page_pool_put_full_page(pp_page_to_nmdesc(ns->page)->pp, + ns->page, false); ns->page = NULL; } @@ -1069,7 +1070,8 @@ void nsim_destroy(struct netdevsim *ns) /* Put this intentionally late to exercise the orphaning path */ if (ns->page) { - page_pool_put_full_page(ns->page->pp, ns->page, false); + page_pool_put_full_page(pp_page_to_nmdesc(ns->page)->pp, + ns->page, false); ns->page = NULL; } -- 2.17.1 To eliminate the use of struct page in page pool, the page pool users should use netmem descriptor and APIs instead. Make mt76 access ->pp through netmem_desc instead of page. Signed-off-by: Byungchul Park --- drivers/net/wireless/mediatek/mt76/mt76.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index 14927a92f9d1..7e9ddf91b822 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -1798,7 +1798,8 @@ static inline void mt76_put_page_pool_buf(void *buf, bool allow_direct) { struct page *page = virt_to_head_page(buf); - page_pool_put_full_page(page->pp, page, allow_direct); + page_pool_put_full_page(pp_page_to_nmdesc(page)->pp, page, + allow_direct); } static inline void * -- 2.17.1 To eliminate the use of struct page in page pool, the page pool users should use netmem descriptor and APIs instead. Make fec access ->pp through netmem_desc instead of page. Signed-off-by: Byungchul Park --- drivers/net/ethernet/freescale/fec_main.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index d4eed252ad40..1d0bd6f75368 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1043,7 +1043,9 @@ static void fec_enet_bd_init(struct net_device *dev) struct page *page = txq->tx_buf[i].buf_p; if (page) - page_pool_put_page(page->pp, page, 0, false); + page_pool_put_page(pp_page_to_nmdesc(page)->pp, + page, 0, + false); } txq->tx_buf[i].buf_p = NULL; @@ -1581,7 +1583,8 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget) xdp_return_frame_rx_napi(xdpf); } else { /* recycle pages of XDP_TX frames */ /* The dma_sync_size = 0 as XDP_TX has already synced DMA for_device */ - page_pool_put_page(page->pp, page, 0, true); + page_pool_put_page(pp_page_to_nmdesc(page)->pp, page, + 0, true); } txq->tx_buf[index].buf_p = NULL; @@ -3343,7 +3346,8 @@ static void fec_enet_free_buffers(struct net_device *ndev) } else { struct page *page = txq->tx_buf[i].buf_p; - page_pool_put_page(page->pp, page, 0, false); + page_pool_put_page(pp_page_to_nmdesc(page)->pp, + page, 0, false); } txq->tx_buf[i].buf_p = NULL; -- 2.17.1 To eliminate the use of struct page in page pool, the page pool users should use netmem descriptor and APIs instead. Make octeontx2-pf access ->pp through netmem_desc instead of page. Signed-off-by: Byungchul Park --- drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c index 99ace381cc78..625bb5a05344 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c @@ -1571,7 +1571,7 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf, cq->pool_ptrs++; if (xsk_buff) { xsk_buff_free(xsk_buff); - } else if (page->pp) { + } else if (pp_page_to_nmdesc(page)->pp) { page_pool_recycle_direct(pool->page_pool, page); } else { otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, -- 2.17.1 To eliminate the use of struct page in page pool, the page pool users should use netmem descriptor and APIs instead. Make iavf access ->pp through netmem_desc instead of page. Signed-off-by: Byungchul Park --- drivers/net/ethernet/intel/iavf/iavf_txrx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c index aaf70c625655..363c42bf3dcf 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c @@ -1216,7 +1216,7 @@ static struct sk_buff *iavf_build_skb(const struct libeth_fqe *rx_buffer, unsigned int size) { struct page *buf_page = __netmem_to_page(rx_buffer->netmem); - u32 hr = buf_page->pp->p.offset; + u32 hr = pp_page_to_nmdesc(buf_page)->pp->p.offset; struct sk_buff *skb; void *va; -- 2.17.1 To eliminate the use of struct page in page pool, the page pool users should use netmem descriptor and APIs instead. Make idpf access ->pp through netmem_desc instead of page. Signed-off-by: Byungchul Park --- drivers/net/ethernet/intel/idpf/idpf_txrx.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index cef9dfb877e8..6b5f440aede3 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -3276,8 +3276,10 @@ static u32 idpf_rx_hsplit_wa(const struct libeth_fqe *hdr, hdr_page = __netmem_to_page(hdr->netmem); buf_page = __netmem_to_page(buf->netmem); - dst = page_address(hdr_page) + hdr->offset + hdr_page->pp->p.offset; - src = page_address(buf_page) + buf->offset + buf_page->pp->p.offset; + dst = page_address(hdr_page) + hdr->offset + + pp_page_to_nmdesc(hdr_page)->pp->p.offset; + src = page_address(buf_page) + buf->offset + + pp_page_to_nmdesc(buf_page)->pp->p.offset; memcpy(dst, src, LARGEST_ALIGN(copy)); buf->offset += copy; @@ -3296,7 +3298,7 @@ static u32 idpf_rx_hsplit_wa(const struct libeth_fqe *hdr, struct sk_buff *idpf_rx_build_skb(const struct libeth_fqe *buf, u32 size) { struct page *buf_page = __netmem_to_page(buf->netmem); - u32 hr = buf_page->pp->p.offset; + u32 hr = pp_page_to_nmdesc(buf_page)->pp->p.offset; struct sk_buff *skb; void *va; -- 2.17.1 To eliminate the use of struct page in page pool, the page pool users should use netmem descriptor and APIs instead. Make mlx5 access ->pp through netmem_desc instead of page. Signed-off-by: Byungchul Park --- drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index 5ce1b463b7a8..5d51600935a6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -710,7 +710,8 @@ static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq, /* No need to check page_pool_page_is_pp() as we * know this is a page_pool page. */ - page_pool_recycle_direct(page->pp, page); + page_pool_recycle_direct(pp_page_to_nmdesc(page)->pp, + page); } while (++n < num); break; -- 2.17.1 To eliminate the use of struct page in page pool, the page pool users should use netmem descriptor and APIs instead. Make icssg-prueth access ->pp through netmem_desc instead of page. Signed-off-by: Byungchul Park --- drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c index ff5f41bf499e..5e225310c9de 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c @@ -367,7 +367,7 @@ static irqreturn_t prueth_rx_mgm_ts_thread_sr1(int irq, void *dev_id) return IRQ_NONE; prueth_tx_ts_sr1(emac, (void *)page_address(page)); - page_pool_recycle_direct(page->pp, page); + page_pool_recycle_direct(pp_page_to_nmdesc(page)->pp, page); return IRQ_HANDLED; } @@ -392,7 +392,7 @@ static irqreturn_t prueth_rx_mgm_rsp_thread(int irq, void *dev_id) complete(&emac->cmd_complete); } - page_pool_recycle_direct(page->pp, page); + page_pool_recycle_direct(pp_page_to_nmdesc(page)->pp, page); return IRQ_HANDLED; } -- 2.17.1 To eliminate the use of struct page in page pool, the page pool users should use netmem descriptor and APIs instead. Make xdp access ->pp through netmem_desc instead of page. Signed-off-by: Byungchul Park --- include/net/libeth/xdp.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/net/libeth/xdp.h b/include/net/libeth/xdp.h index 6ce6aec6884c..f4880b50e804 100644 --- a/include/net/libeth/xdp.h +++ b/include/net/libeth/xdp.h @@ -1292,7 +1292,7 @@ static inline void libeth_xdp_prepare_buff(struct libeth_xdp_buff *xdp, xdp_init_buff(&xdp->base, fqe->truesize, xdp->base.rxq); #endif xdp_prepare_buff(&xdp->base, page_address(page) + fqe->offset, - page->pp->p.offset, len, true); + pp_page_to_nmdesc(page)->pp->p.offset, len, true); } /** -- 2.17.1