mlx5 pokes into the rxq state to check if the queue has a memory provider, and therefore whether it may produce unreable mem. Add a helper for doing this in the page pool API. fbnic will want a similar thing (tho, for a slightly different reason). Signed-off-by: Jakub Kicinski --- include/net/page_pool/helpers.h | 9 +++++++++ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 10 ++-------- net/core/page_pool.c | 8 ++++++++ 3 files changed, 19 insertions(+), 8 deletions(-) diff --git a/include/net/page_pool/helpers.h b/include/net/page_pool/helpers.h index aa3719f28216..307c2436fa12 100644 --- a/include/net/page_pool/helpers.h +++ b/include/net/page_pool/helpers.h @@ -505,6 +505,15 @@ static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid) page_pool_update_nid(pool, new_nid); } +bool __page_pool_rxq_wants_unreadable(struct net_device *dev, unsigned int qid); + +static inline bool +page_pool_rxq_wants_unreadable(const struct page_pool_params *pp_params) +{ + return __page_pool_rxq_wants_unreadable(pp_params->netdev, + pp_params->queue_idx); +} + static inline bool page_pool_is_unreadable(struct page_pool *pool) { return !!pool->mp_ops; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 21bb88c5d3dc..cee96ded300e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -42,6 +42,7 @@ #include #include #include +#include #include #include #include @@ -777,13 +778,6 @@ static void mlx5e_rq_shampo_hd_info_free(struct mlx5e_rq *rq) bitmap_free(rq->mpwqe.shampo->bitmap); } -static bool mlx5_rq_needs_separate_hd_pool(struct mlx5e_rq *rq) -{ - struct netdev_rx_queue *rxq = __netif_get_rx_queue(rq->netdev, rq->ix); - - return !!rxq->mp_params.mp_ops; -} - static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev, struct mlx5e_params *params, struct mlx5e_rq_param *rqp, @@ -822,7 +816,7 @@ static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev, hd_pool_size = (rq->mpwqe.shampo->hd_per_wqe * wq_size) / MLX5E_SHAMPO_WQ_HEADER_PER_PAGE; - if (mlx5_rq_needs_separate_hd_pool(rq)) { + if (__page_pool_rxq_wants_unreadable(rq->netdev, rq->ix)) { /* Separate page pool for shampo headers */ struct page_pool_params pp_params = { }; diff --git a/net/core/page_pool.c b/net/core/page_pool.c index 343a6cac21e3..9f087a6742c3 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -190,6 +190,14 @@ static void page_pool_struct_check(void) PAGE_POOL_FRAG_GROUP_ALIGN); } +bool __page_pool_rxq_wants_unreadable(struct net_device *dev, unsigned int qid) +{ + struct netdev_rx_queue *rxq = __netif_get_rx_queue(dev, qid); + + return !!rxq->mp_params.mp_ops; +} +EXPORT_SYMBOL(__page_pool_rxq_wants_unreadable); + static int page_pool_init(struct page_pool *pool, const struct page_pool_params *params, int cpuid) -- 2.50.1