To simplify struct page, the page pool members of struct page should be moved to other, allowing these members to be removed from struct page. Introduce a network memory descriptor to store the members, struct netmem_desc, and make it union'ed with the existing fields in struct net_iov, allowing to organize the fields of struct net_iov. Signed-off-by: Byungchul Park Reviewed-by: Toke Høiland-Jørgensen Reviewed-by: Pavel Begunkov Reviewed-by: Mina Almasry Reviewed-by: Vlastimil Babka Acked-by: Harry Yoo --- include/net/netmem.h | 116 +++++++++++++++++++++++++++++++++++-------- 1 file changed, 95 insertions(+), 21 deletions(-) diff --git a/include/net/netmem.h b/include/net/netmem.h index de1d95f04076..535cf17b9134 100644 --- a/include/net/netmem.h +++ b/include/net/netmem.h @@ -12,6 +12,50 @@ #include #include +/* These fields in struct page are used by the page_pool and net stack: + * + * struct { + * unsigned long pp_magic; + * struct page_pool *pp; + * unsigned long _pp_mapping_pad; + * unsigned long dma_addr; + * atomic_long_t pp_ref_count; + * }; + * + * We mirror the page_pool fields here so the page_pool can access these + * fields without worrying whether the underlying fields belong to a + * page or netmem_desc. + * + * CAUTION: Do not update the fields in netmem_desc without also + * updating the anonymous aliasing union in struct net_iov. + */ +struct netmem_desc { + unsigned long _flags; + unsigned long pp_magic; + struct page_pool *pp; + unsigned long _pp_mapping_pad; + unsigned long dma_addr; + atomic_long_t pp_ref_count; +}; + +#define NETMEM_DESC_ASSERT_OFFSET(pg, desc) \ + static_assert(offsetof(struct page, pg) == \ + offsetof(struct netmem_desc, desc)) +NETMEM_DESC_ASSERT_OFFSET(flags, _flags); +NETMEM_DESC_ASSERT_OFFSET(pp_magic, pp_magic); +NETMEM_DESC_ASSERT_OFFSET(pp, pp); +NETMEM_DESC_ASSERT_OFFSET(_pp_mapping_pad, _pp_mapping_pad); +NETMEM_DESC_ASSERT_OFFSET(dma_addr, dma_addr); +NETMEM_DESC_ASSERT_OFFSET(pp_ref_count, pp_ref_count); +#undef NETMEM_DESC_ASSERT_OFFSET + +/* + * Since struct netmem_desc uses the space in struct page, the size + * should be checked, until struct netmem_desc has its own instance from + * slab, to avoid conflicting with other members within struct page. + */ +static_assert(sizeof(struct netmem_desc) <= offsetof(struct page, _refcount)); + /* net_iov */ DECLARE_STATIC_KEY_FALSE(page_pool_mem_providers); @@ -30,13 +74,48 @@ enum net_iov_type { NET_IOV_MAX = ULONG_MAX }; +/* A memory descriptor representing abstract networking I/O vectors, + * generally for non-pages memory that doesn't have its corresponding + * struct page and needs to be explicitly allocated through slab. + * + * net_iovs are allocated and used by networking code, and the size of + * the chunk is PAGE_SIZE. + * + * This memory can be any form of non-struct paged memory. Examples + * include imported dmabuf memory and imported io_uring memory. See + * net_iov_type for all the supported types. + * + * @pp_magic: pp field, similar to the one in struct page/struct + * netmem_desc. + * @pp: the pp this net_iov belongs to, if any. + * @dma_addr: the dma addrs of the net_iov. Needed for the network + * card to send/receive this net_iov. + * @pp_ref_count: the pp ref count of this net_iov, exactly the same + * usage as struct page/struct netmem_desc. + * @owner: the net_iov_area this net_iov belongs to, if any. + * @type: the type of the memory. Different types of net_iovs are + * supported. + */ struct net_iov { - enum net_iov_type type; - unsigned long pp_magic; - struct page_pool *pp; + union { + struct netmem_desc desc; + + /* XXX: The following part should be removed once all + * the references to them are converted so as to be + * accessed via netmem_desc e.g. niov->desc.pp instead + * of niov->pp. + */ + struct { + unsigned long _flags; + unsigned long pp_magic; + struct page_pool *pp; + unsigned long _pp_mapping_pad; + unsigned long dma_addr; + atomic_long_t pp_ref_count; + }; + }; struct net_iov_area *owner; - unsigned long dma_addr; - atomic_long_t pp_ref_count; + enum net_iov_type type; }; struct net_iov_area { @@ -48,27 +127,22 @@ struct net_iov_area { unsigned long base_virtual; }; -/* These fields in struct page are used by the page_pool and net stack: +/* net_iov is union'ed with struct netmem_desc mirroring struct page, so + * the page_pool can access these fields without worrying whether the + * underlying fields are accessed via netmem_desc or directly via + * net_iov, until all the references to them are converted so as to be + * accessed via netmem_desc e.g. niov->desc.pp instead of niov->pp. * - * struct { - * unsigned long pp_magic; - * struct page_pool *pp; - * unsigned long _pp_mapping_pad; - * unsigned long dma_addr; - * atomic_long_t pp_ref_count; - * }; - * - * We mirror the page_pool fields here so the page_pool can access these fields - * without worrying whether the underlying fields belong to a page or net_iov. - * - * The non-net stack fields of struct page are private to the mm stack and must - * never be mirrored to net_iov. + * The non-net stack fields of struct page are private to the mm stack + * and must never be mirrored to net_iov. */ -#define NET_IOV_ASSERT_OFFSET(pg, iov) \ - static_assert(offsetof(struct page, pg) == \ +#define NET_IOV_ASSERT_OFFSET(desc, iov) \ + static_assert(offsetof(struct netmem_desc, desc) == \ offsetof(struct net_iov, iov)) +NET_IOV_ASSERT_OFFSET(_flags, _flags); NET_IOV_ASSERT_OFFSET(pp_magic, pp_magic); NET_IOV_ASSERT_OFFSET(pp, pp); +NET_IOV_ASSERT_OFFSET(_pp_mapping_pad, _pp_mapping_pad); NET_IOV_ASSERT_OFFSET(dma_addr, dma_addr); NET_IOV_ASSERT_OFFSET(pp_ref_count, pp_ref_count); #undef NET_IOV_ASSERT_OFFSET -- 2.17.1 To eliminate the use of the page pool fields in struct page, the page pool code should use netmem descriptor and APIs instead. However, some code e.g. __netmem_to_page() is still used to access the page pool fields e.g. ->pp via struct page, which should be changed so as to access them via netmem descriptor, struct netmem_desc instead, since the fields no longer will be available in struct page. Introduce utility APIs to make them easy to use struct netmem_desc as descriptor. The APIs are: 1. __netmem_to_nmdesc(), to convert netmem_ref to struct netmem_desc, but unsafely without checking if it's net_iov or system memory. 2. netmem_to_nmdesc(), to convert netmem_ref to struct netmem_desc, safely with checking if it's net_iov or system memory. 3. nmdesc_to_page(), to convert struct netmem_desc to struct page, assuming struct netmem_desc overlays on struct page. 4. page_to_nmdesc(), to convert struct page to struct netmem_desc, assuming struct netmem_desc overlays on struct page, allowing only head page to be converted. 5. nmdesc_adress(), to get its virtual address corresponding to the struct netmem_desc. Signed-off-by: Byungchul Park --- include/net/netmem.h | 41 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/include/net/netmem.h b/include/net/netmem.h index 535cf17b9134..ad9444be229a 100644 --- a/include/net/netmem.h +++ b/include/net/netmem.h @@ -198,6 +198,32 @@ static inline struct page *netmem_to_page(netmem_ref netmem) return __netmem_to_page(netmem); } +/** + * __netmem_to_nmdesc - unsafely get pointer to the &netmem_desc backing + * @netmem + * @netmem: netmem reference to convert + * + * Unsafe version of netmem_to_nmdesc(). When @netmem is always backed + * by system memory, performs faster and generates smaller object code + * (no check for the LSB, no WARN). When @netmem points to IOV, provokes + * undefined behaviour. + * + * Return: pointer to the &netmem_desc (garbage if @netmem is not backed + * by system memory). + */ +static inline struct netmem_desc *__netmem_to_nmdesc(netmem_ref netmem) +{ + return (__force struct netmem_desc *)netmem; +} + +static inline struct netmem_desc *netmem_to_nmdesc(netmem_ref netmem) +{ + if (WARN_ON_ONCE(netmem_is_net_iov(netmem))) + return NULL; + + return __netmem_to_nmdesc(netmem); +} + static inline struct net_iov *netmem_to_net_iov(netmem_ref netmem) { if (netmem_is_net_iov(netmem)) @@ -314,6 +340,21 @@ static inline netmem_ref netmem_compound_head(netmem_ref netmem) return page_to_netmem(compound_head(netmem_to_page(netmem))); } +#define nmdesc_to_page(nmdesc) (_Generic((nmdesc), \ + const struct netmem_desc * : (const struct page *)(nmdesc), \ + struct netmem_desc * : (struct page *)(nmdesc))) + +static inline struct netmem_desc *page_to_nmdesc(struct page *page) +{ + VM_BUG_ON_PAGE(PageTail(page), page); + return (struct netmem_desc *)page; +} + +static inline void *nmdesc_address(struct netmem_desc *nmdesc) +{ + return page_address(nmdesc_to_page(nmdesc)); +} + /** * __netmem_address - unsafely get pointer to the memory backing @netmem * @netmem: netmem reference to get the pointer for -- 2.17.1 To simplify struct page, the effort to separate its own descriptor from struct page is required and the work for page pool is on going. To achieve that, all the code should avoid directly accessing page pool members of struct page. Access ->pp_magic through struct netmem_desc instead of directly accessing it through struct page in page_pool_page_is_pp(). Plus, move page_pool_page_is_pp() from mm.h to netmem.h to use struct netmem_desc without header dependency issue. Signed-off-by: Byungchul Park Reviewed-by: Toke Høiland-Jørgensen Reviewed-by: Mina Almasry Reviewed-by: Pavel Begunkov Reviewed-by: Vlastimil Babka Acked-by: Harry Yoo --- include/linux/mm.h | 12 ------------ include/net/netmem.h | 17 +++++++++++++++++ mm/page_alloc.c | 1 + 3 files changed, 18 insertions(+), 12 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 0ef2ba0c667a..0b7f7f998085 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -4172,16 +4172,4 @@ int arch_lock_shadow_stack_status(struct task_struct *t, unsigned long status); */ #define PP_MAGIC_MASK ~(PP_DMA_INDEX_MASK | 0x3UL) -#ifdef CONFIG_PAGE_POOL -static inline bool page_pool_page_is_pp(struct page *page) -{ - return (page->pp_magic & PP_MAGIC_MASK) == PP_SIGNATURE; -} -#else -static inline bool page_pool_page_is_pp(struct page *page) -{ - return false; -} -#endif - #endif /* _LINUX_MM_H */ diff --git a/include/net/netmem.h b/include/net/netmem.h index ad9444be229a..11e9de45efcb 100644 --- a/include/net/netmem.h +++ b/include/net/netmem.h @@ -355,6 +355,23 @@ static inline void *nmdesc_address(struct netmem_desc *nmdesc) return page_address(nmdesc_to_page(nmdesc)); } +#ifdef CONFIG_PAGE_POOL +/* XXX: This would better be moved to mm, once mm gets its way to + * identify the type of page for page pool. + */ +static inline bool page_pool_page_is_pp(struct page *page) +{ + struct netmem_desc *desc = page_to_nmdesc(page); + + return (desc->pp_magic & PP_MAGIC_MASK) == PP_SIGNATURE; +} +#else +static inline bool page_pool_page_is_pp(struct page *page) +{ + return false; +} +#endif + /** * __netmem_address - unsafely get pointer to the memory backing @netmem * @netmem: netmem reference to get the pointer for diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 2ef3c07266b3..cc1d169853e8 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -55,6 +55,7 @@ #include #include #include +#include #include #include "internal.h" #include "shuffle.h" -- 2.17.1 To eliminate the use of the page pool fields in struct page, the page pool code should use netmem descriptor and APIs instead. However, __netmem_get_pp() still accesses ->pp via struct page. So change it to use struct netmem_desc instead, since ->pp no longer will be available in struct page. Signed-off-by: Byungchul Park --- include/net/netmem.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/net/netmem.h b/include/net/netmem.h index 11e9de45efcb..283b4a997fbc 100644 --- a/include/net/netmem.h +++ b/include/net/netmem.h @@ -306,7 +306,7 @@ static inline struct net_iov *__netmem_clear_lsb(netmem_ref netmem) */ static inline struct page_pool *__netmem_get_pp(netmem_ref netmem) { - return __netmem_to_page(netmem)->pp; + return __netmem_to_nmdesc(netmem)->pp; } static inline struct page_pool *netmem_get_pp(netmem_ref netmem) -- 2.17.1 To eliminate the use of struct page in page pool, the page pool code should use netmem descriptor and APIs instead. As part of the work, introduce a netmem API to convert a virtual address to a head netmem allowing the code to use it rather than the existing API, virt_to_head_page() for struct page. Signed-off-by: Byungchul Park Reviewed-by: Toke Høiland-Jørgensen Reviewed-by: Pavel Begunkov Reviewed-by: Mina Almasry --- include/net/netmem.h | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/include/net/netmem.h b/include/net/netmem.h index 283b4a997fbc..b92c7f15166a 100644 --- a/include/net/netmem.h +++ b/include/net/netmem.h @@ -372,6 +372,13 @@ static inline bool page_pool_page_is_pp(struct page *page) } #endif +static inline netmem_ref virt_to_head_netmem(const void *x) +{ + netmem_ref netmem = virt_to_netmem(x); + + return netmem_compound_head(netmem); +} + /** * __netmem_address - unsafely get pointer to the memory backing @netmem * @netmem: netmem reference to get the pointer for -- 2.17.1 To simplify struct page, the effort to separate its own descriptor from struct page is required and the work for page pool is on going. Use netmem descriptor and APIs for page pool in mlx4 code. Signed-off-by: Byungchul Park --- drivers/net/ethernet/mellanox/mlx4/en_rx.c | 48 +++++++++++--------- drivers/net/ethernet/mellanox/mlx4/en_tx.c | 8 ++-- drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | 4 +- 3 files changed, 32 insertions(+), 28 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index b33285d755b9..7cf0d2dc5011 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -62,18 +62,18 @@ static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv, int i; for (i = 0; i < priv->num_frags; i++, frags++) { - if (!frags->page) { - frags->page = page_pool_alloc_pages(ring->pp, gfp); - if (!frags->page) { + if (!frags->netmem) { + frags->netmem = page_pool_alloc_netmems(ring->pp, gfp); + if (!frags->netmem) { ring->alloc_fail++; return -ENOMEM; } - page_pool_fragment_page(frags->page, 1); + page_pool_fragment_netmem(frags->netmem, 1); frags->page_offset = priv->rx_headroom; ring->rx_alloc_pages++; } - dma = page_pool_get_dma_addr(frags->page); + dma = page_pool_get_dma_addr_netmem(frags->netmem); rx_desc->data[i].addr = cpu_to_be64(dma + frags->page_offset); } return 0; @@ -83,10 +83,10 @@ static void mlx4_en_free_frag(const struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring, struct mlx4_en_rx_alloc *frag) { - if (frag->page) - page_pool_put_full_page(ring->pp, frag->page, false); + if (frag->netmem) + page_pool_put_full_netmem(ring->pp, frag->netmem, false); /* We need to clear all fields, otherwise a change of priv->log_rx_info - * could lead to see garbage later in frag->page. + * could lead to see garbage later in frag->netmem. */ memset(frag, 0, sizeof(*frag)); } @@ -440,29 +440,33 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv, unsigned int truesize = 0; bool release = true; int nr, frag_size; - struct page *page; + netmem_ref netmem; dma_addr_t dma; /* Collect used fragments while replacing them in the HW descriptors */ for (nr = 0;; frags++) { frag_size = min_t(int, length, frag_info->frag_size); - page = frags->page; - if (unlikely(!page)) + netmem = frags->netmem; + if (unlikely(!netmem)) goto fail; - dma = page_pool_get_dma_addr(page); + dma = page_pool_get_dma_addr_netmem(netmem); dma_sync_single_range_for_cpu(priv->ddev, dma, frags->page_offset, frag_size, priv->dma_dir); - __skb_fill_page_desc(skb, nr, page, frags->page_offset, - frag_size); + __skb_fill_netmem_desc(skb, nr, netmem, frags->page_offset, + frag_size); truesize += frag_info->frag_stride; if (frag_info->frag_stride == PAGE_SIZE / 2) { + struct page *page = netmem_to_page(netmem); + atomic_long_t *pp_ref_count = + netmem_get_pp_ref_count_ref(netmem); + frags->page_offset ^= PAGE_SIZE / 2; release = page_count(page) != 1 || - atomic_long_read(&page->pp_ref_count) != 1 || + atomic_long_read(pp_ref_count) != 1 || page_is_pfmemalloc(page) || page_to_nid(page) != numa_mem_id(); } else if (!priv->rx_headroom) { @@ -476,9 +480,9 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv, release = frags->page_offset + frag_info->frag_size > PAGE_SIZE; } if (release) { - frags->page = NULL; + frags->netmem = 0; } else { - page_pool_ref_page(page); + page_pool_ref_netmem(netmem); } nr++; @@ -719,7 +723,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud int nr; frags = ring->rx_info + (index << priv->log_rx_info); - va = page_address(frags[0].page) + frags[0].page_offset; + va = netmem_address(frags[0].netmem) + frags[0].page_offset; net_prefetchw(va); /* * make sure we read the CQE after we read the ownership bit @@ -748,7 +752,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud /* Get pointer to first fragment since we haven't * skb yet and cast it to ethhdr struct */ - dma = page_pool_get_dma_addr(frags[0].page); + dma = page_pool_get_dma_addr_netmem(frags[0].netmem); dma += frags[0].page_offset; dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh), DMA_FROM_DEVICE); @@ -788,7 +792,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud void *orig_data; u32 act; - dma = page_pool_get_dma_addr(frags[0].page); + dma = page_pool_get_dma_addr_netmem(frags[0].netmem); dma += frags[0].page_offset; dma_sync_single_for_cpu(priv->ddev, dma, priv->frag_info[0].frag_size, @@ -818,7 +822,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud if (likely(!xdp_do_redirect(dev, &mxbuf.xdp, xdp_prog))) { ring->xdp_redirect++; xdp_redir_flush = true; - frags[0].page = NULL; + frags[0].netmem = 0; goto next; } ring->xdp_redirect_fail++; @@ -828,7 +832,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud if (likely(!mlx4_en_xmit_frame(ring, frags, priv, length, cq_ring, &doorbell_pending))) { - frags[0].page = NULL; + frags[0].netmem = 0; goto next; } trace_xdp_exception(dev, xdp_prog, act); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 87f35bcbeff8..b564a953da09 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -354,7 +354,7 @@ u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv, struct page_pool *pool = ring->recycle_ring->pp; /* Note that napi_mode = 0 means ndo_close() path, not budget = 0 */ - page_pool_put_full_page(pool, tx_info->page, !!napi_mode); + page_pool_put_full_netmem(pool, tx_info->netmem, !!napi_mode); return tx_info->nr_txbb; } @@ -1191,10 +1191,10 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring, tx_desc = ring->buf + (index << LOG_TXBB_SIZE); data = &tx_desc->data; - dma = page_pool_get_dma_addr(frame->page); + dma = page_pool_get_dma_addr_netmem(frame->netmem); - tx_info->page = frame->page; - frame->page = NULL; + tx_info->netmem = frame->netmem; + frame->netmem = 0; tx_info->map0_dma = dma; tx_info->nr_bytes = max_t(unsigned int, length, ETH_ZLEN); diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index ad0d91a75184..3ef9a0a1f783 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -213,7 +213,7 @@ enum cq_type { struct mlx4_en_tx_info { union { struct sk_buff *skb; - struct page *page; + netmem_ref netmem; }; dma_addr_t map0_dma; u32 map0_byte_count; @@ -246,7 +246,7 @@ struct mlx4_en_tx_desc { #define MLX4_EN_CX3_HIGH_ID 0x1005 struct mlx4_en_rx_alloc { - struct page *page; + netmem_ref netmem; u32 page_offset; }; -- 2.17.1 To simplify struct page, the effort to separate its own descriptor from struct page is required and the work for page pool is on going. Use netmem descriptor and APIs for page pool in netdevsim code. Signed-off-by: Byungchul Park --- drivers/net/netdevsim/netdev.c | 19 ++++++++++--------- drivers/net/netdevsim/netdevsim.h | 2 +- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c index e36d3e846c2d..ba19870524c5 100644 --- a/drivers/net/netdevsim/netdev.c +++ b/drivers/net/netdevsim/netdev.c @@ -812,7 +812,7 @@ nsim_pp_hold_read(struct file *file, char __user *data, struct netdevsim *ns = file->private_data; char buf[3] = "n\n"; - if (ns->page) + if (ns->netmem) buf[0] = 'y'; return simple_read_from_buffer(data, count, ppos, buf, 2); @@ -832,18 +832,19 @@ nsim_pp_hold_write(struct file *file, const char __user *data, rtnl_lock(); ret = count; - if (val == !!ns->page) + if (val == !!ns->netmem) goto exit; if (!netif_running(ns->netdev) && val) { ret = -ENETDOWN; } else if (val) { - ns->page = page_pool_dev_alloc_pages(ns->rq[0]->page_pool); - if (!ns->page) + ns->netmem = page_pool_alloc_netmems(ns->rq[0]->page_pool, + GFP_ATOMIC | __GFP_NOWARN); + if (!ns->netmem) ret = -ENOMEM; } else { - page_pool_put_full_page(ns->page->pp, ns->page, false); - ns->page = NULL; + page_pool_put_full_netmem(netmem_get_pp(ns->netmem), ns->netmem, false); + ns->netmem = 0; } exit: @@ -1068,9 +1069,9 @@ void nsim_destroy(struct netdevsim *ns) nsim_exit_netdevsim(ns); /* Put this intentionally late to exercise the orphaning path */ - if (ns->page) { - page_pool_put_full_page(ns->page->pp, ns->page, false); - ns->page = NULL; + if (ns->netmem) { + page_pool_put_full_netmem(netmem_get_pp(ns->netmem), ns->netmem, false); + ns->netmem = 0; } free_netdev(dev); diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h index 809dd29fc5fe..129e005ef577 100644 --- a/drivers/net/netdevsim/netdevsim.h +++ b/drivers/net/netdevsim/netdevsim.h @@ -132,7 +132,7 @@ struct netdevsim { struct debugfs_u32_array dfs_ports[2]; } udp_ports; - struct page *page; + netmem_ref netmem; struct dentry *pp_dfs; struct dentry *qr_dfs; -- 2.17.1 To simplify struct page, the effort to separate its own descriptor from struct page is required and the work for page pool is on going. Use netmem descriptor and APIs for page pool in mt76 code. Signed-off-by: Byungchul Park Reviewed-by: Mina Almasry --- drivers/net/wireless/mediatek/mt76/dma.c | 6 ++--- drivers/net/wireless/mediatek/mt76/mt76.h | 12 +++++----- .../net/wireless/mediatek/mt76/sdio_txrx.c | 24 +++++++++---------- drivers/net/wireless/mediatek/mt76/usb.c | 10 ++++---- 4 files changed, 26 insertions(+), 26 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c index 35b4ec91979e..41b529b95877 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.c +++ b/drivers/net/wireless/mediatek/mt76/dma.c @@ -820,10 +820,10 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data, int nr_frags = shinfo->nr_frags; if (nr_frags < ARRAY_SIZE(shinfo->frags)) { - struct page *page = virt_to_head_page(data); - int offset = data - page_address(page) + q->buf_offset; + netmem_ref netmem = virt_to_head_netmem(data); + int offset = data - netmem_address(netmem) + q->buf_offset; - skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size); + skb_add_rx_frag_netmem(skb, nr_frags, netmem, offset, len, q->buf_size); } else { mt76_put_page_pool_buf(data, allow_direct); } diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index 14927a92f9d1..5fbc15a8cb06 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -1796,21 +1796,21 @@ int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr, int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q); static inline void mt76_put_page_pool_buf(void *buf, bool allow_direct) { - struct page *page = virt_to_head_page(buf); + netmem_ref netmem = virt_to_head_netmem(buf); - page_pool_put_full_page(page->pp, page, allow_direct); + page_pool_put_full_netmem(netmem_get_pp(netmem), netmem, allow_direct); } static inline void * mt76_get_page_pool_buf(struct mt76_queue *q, u32 *offset, u32 size) { - struct page *page; + netmem_ref netmem; - page = page_pool_dev_alloc_frag(q->page_pool, offset, size); - if (!page) + netmem = page_pool_dev_alloc_netmem(q->page_pool, offset, &size); + if (!netmem) return NULL; - return page_address(page) + *offset; + return netmem_address(netmem) + *offset; } static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked) diff --git a/drivers/net/wireless/mediatek/mt76/sdio_txrx.c b/drivers/net/wireless/mediatek/mt76/sdio_txrx.c index 0a927a7313a6..b1d89b6f663d 100644 --- a/drivers/net/wireless/mediatek/mt76/sdio_txrx.c +++ b/drivers/net/wireless/mediatek/mt76/sdio_txrx.c @@ -68,14 +68,14 @@ mt76s_build_rx_skb(void *data, int data_len, int buf_len) skb_put_data(skb, data, len); if (data_len > len) { - struct page *page; + netmem_ref netmem; data += len; - page = virt_to_head_page(data); - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, - page, data - page_address(page), - data_len - len, buf_len); - get_page(page); + netmem = virt_to_head_netmem(data); + skb_add_rx_frag_netmem(skb, skb_shinfo(skb)->nr_frags, + netmem, data - netmem_address(netmem), + data_len - len, buf_len); + get_netmem(netmem); } return skb; @@ -88,7 +88,7 @@ mt76s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid, struct mt76_queue *q = &dev->q_rx[qid]; struct mt76_sdio *sdio = &dev->sdio; int len = 0, err, i; - struct page *page; + netmem_ref netmem; u8 *buf, *end; for (i = 0; i < intr->rx.num[qid]; i++) @@ -100,11 +100,11 @@ mt76s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid, if (len > sdio->func->cur_blksize) len = roundup(len, sdio->func->cur_blksize); - page = __dev_alloc_pages(GFP_KERNEL, get_order(len)); - if (!page) + netmem = page_to_netmem(__dev_alloc_pages(GFP_KERNEL, get_order(len))); + if (!netmem) return -ENOMEM; - buf = page_address(page); + buf = netmem_address(netmem); sdio_claim_host(sdio->func); err = sdio_readsb(sdio->func, buf, MCR_WRDR(qid), len); @@ -112,7 +112,7 @@ mt76s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid, if (err < 0) { dev_err(dev->dev, "sdio read data failed:%d\n", err); - put_page(page); + put_netmem(netmem); return err; } @@ -140,7 +140,7 @@ mt76s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid, } buf += round_up(len + 4, 4); } - put_page(page); + put_netmem(netmem); spin_lock_bh(&q->lock); q->head = (q->head + i) % q->ndesc; diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c index f9e67b8c3b3c..1ea80c87a839 100644 --- a/drivers/net/wireless/mediatek/mt76/usb.c +++ b/drivers/net/wireless/mediatek/mt76/usb.c @@ -478,7 +478,7 @@ mt76u_build_rx_skb(struct mt76_dev *dev, void *data, head_room = drv_flags & MT_DRV_RX_DMA_HDR ? 0 : MT_DMA_HDR_LEN; if (SKB_WITH_OVERHEAD(buf_size) < head_room + len) { - struct page *page; + netmem_ref netmem; /* slow path, not enough space for data and * skb_shared_info @@ -489,10 +489,10 @@ mt76u_build_rx_skb(struct mt76_dev *dev, void *data, skb_put_data(skb, data + head_room, MT_SKB_HEAD_LEN); data += head_room + MT_SKB_HEAD_LEN; - page = virt_to_head_page(data); - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, - page, data - page_address(page), - len - MT_SKB_HEAD_LEN, buf_size); + netmem = virt_to_head_netmem(data); + skb_add_rx_frag_netmem(skb, skb_shinfo(skb)->nr_frags, + netmem, data - netmem_address(netmem), + len - MT_SKB_HEAD_LEN, buf_size); return skb; } -- 2.17.1