We need to be more careful about when direct page pool recycling is enabled in preparation for queue ops support. Don't set the NAPI pointer, call page_pool_enable_direct_recycling() from the function that activates the queue (once the config can no longer fail). Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/meta/fbnic/fbnic_txrx.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c index 44d9f1598820..958793be21a1 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c @@ -1528,7 +1528,6 @@ fbnic_alloc_qt_page_pools(struct fbnic_net *fbn, struct fbnic_napi_vector *nv, .dma_dir = DMA_BIDIRECTIONAL, .offset = 0, .max_len = PAGE_SIZE, - .napi = &nv->napi, .netdev = fbn->netdev, .queue_idx = rxq_idx, }; @@ -2615,6 +2614,11 @@ static void __fbnic_nv_enable(struct fbnic_napi_vector *nv) for (j = 0; j < nv->rxt_count; j++, t++) { struct fbnic_q_triad *qt = &nv->qt[t]; + page_pool_enable_direct_recycling(qt->sub0.page_pool, + &nv->napi); + page_pool_enable_direct_recycling(qt->sub1.page_pool, + &nv->napi); + fbnic_enable_bdq(&qt->sub0, &qt->sub1); fbnic_config_drop_mode_rcq(nv, &qt->cmpl); fbnic_enable_rcq(nv, &qt->cmpl); -- 2.50.1