Add a new pagepool_order member in the fec_enet_private struct to allow dynamic configuration of page size for an instance. This change clears the hardcoded page size assumptions. Signed-off-by: Shenwei Wang --- drivers/net/ethernet/freescale/fec.h | 1 + drivers/net/ethernet/freescale/fec_main.c | 5 +++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 2969088dda09..47317346b2f3 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -620,6 +620,7 @@ struct fec_enet_private { unsigned int total_tx_ring_size; unsigned int total_rx_ring_size; unsigned int max_buf_size; + unsigned int pagepool_order; struct platform_device *pdev; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 24ce808b0c05..b6ce56051c79 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1783,7 +1783,7 @@ fec_enet_rx_queue(struct net_device *ndev, u16 queue_id, int budget) * These get messed up if we get called due to a busy condition. */ bdp = rxq->bd.cur; - xdp_init_buff(&xdp, PAGE_SIZE, &rxq->xdp_rxq); + xdp_init_buff(&xdp, (PAGE_SIZE << fep->pagepool_order), &rxq->xdp_rxq); while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) { @@ -1853,7 +1853,7 @@ fec_enet_rx_queue(struct net_device *ndev, u16 queue_id, int budget) * include that when passing upstream as it messes up * bridging applications. */ - skb = build_skb(page_address(page), PAGE_SIZE); + skb = build_skb(page_address(page), (PAGE_SIZE << fep->pagepool_order)); if (unlikely(!skb)) { page_pool_recycle_direct(rxq->page_pool, page); ndev->stats.rx_dropped++; @@ -4562,6 +4562,7 @@ fec_probe(struct platform_device *pdev) fec_enet_clk_enable(ndev, false); pinctrl_pm_select_sleep_state(&pdev->dev); + fep->pagepool_order = 0; fep->max_buf_size = PKT_MAXBUF_SIZE; ndev->max_mtu = fep->max_buf_size - ETH_HLEN - ETH_FCS_LEN; -- 2.43.0