libeth uses netmems for quite some time already, so in order to support unreadable frags / memory providers, it only needs to set PP_FLAG_ALLOW_UNREADABLE_NETMEM when needed. Also add a couple sanity checks to make sure the driver didn't mess up the configuration options and, in case when an MP is installed, return the truesize always equal to PAGE_SIZE, so that libeth_rx_alloc() will never try to allocate frags. Memory providers manage buffers on their own and expect 1:1 buffer / HW Rx descriptor association. Bonus: mention in the libeth_sqe_type description that LIBETH_SQE_EMPTY should also be used for netmem Tx SQEs -- they don't need DMA unmapping. Reviewed-by: Jacob Keller Reviewed-by: Aleksandr Loktionov Signed-off-by: Alexander Lobakin --- include/net/libeth/tx.h | 2 +- drivers/net/ethernet/intel/libeth/rx.c | 45 ++++++++++++++++++++++++++ 2 files changed, 46 insertions(+), 1 deletion(-) diff --git a/include/net/libeth/tx.h b/include/net/libeth/tx.h index c3db5c6f1641..a66fc2b3a114 100644 --- a/include/net/libeth/tx.h +++ b/include/net/libeth/tx.h @@ -12,7 +12,7 @@ /** * enum libeth_sqe_type - type of &libeth_sqe to act on Tx completion - * @LIBETH_SQE_EMPTY: unused/empty OR XDP_TX/XSk frame, no action required + * @LIBETH_SQE_EMPTY: empty OR netmem/XDP_TX/XSk frame, no action required * @LIBETH_SQE_CTX: context descriptor with empty SQE, no action required * @LIBETH_SQE_SLAB: kmalloc-allocated buffer, unmap and kfree() * @LIBETH_SQE_FRAG: mapped skb frag, only unmap DMA diff --git a/drivers/net/ethernet/intel/libeth/rx.c b/drivers/net/ethernet/intel/libeth/rx.c index 9ac3a1448b2f..9b45c9cdd599 100644 --- a/drivers/net/ethernet/intel/libeth/rx.c +++ b/drivers/net/ethernet/intel/libeth/rx.c @@ -6,6 +6,7 @@ #include #include +#include /* Rx buffer management */ @@ -139,9 +140,50 @@ static bool libeth_rx_page_pool_params_zc(struct libeth_fq *fq, fq->buf_len = clamp(mtu, LIBETH_RX_BUF_STRIDE, max); fq->truesize = fq->buf_len; + /* + * Allow frags only for kernel pages. `fq->truesize == pp->max_len` + * will always fall back to regular page_pool_alloc_netmems() + * regardless of the MTU / FQ buffer size. + */ + if (pp->flags & PP_FLAG_ALLOW_UNREADABLE_NETMEM) + fq->truesize = pp->max_len; + return true; } +/** + * libeth_rx_page_pool_check_unread - check input params for unreadable MPs + * @fq: buffer queue to check + * @pp: &page_pool_params for the queue + * + * Make sure we don't create an invalid pool with full-frame unreadable + * buffers, bidirectional unreadable buffers or so, and configure the + * ZC payload pool accordingly. + * + * Return: true on success, false on invalid input params. + */ +static bool libeth_rx_page_pool_check_unread(const struct libeth_fq *fq, + struct page_pool_params *pp) +{ + if (!pp->netdev) + return true; + + if (!netif_rxq_has_unreadable_mp(pp->netdev, pp->queue_idx)) + return true; + + /* For now, the core stack doesn't allow XDP with unreadable frags */ + if (fq->xdp) + return false; + + /* It should be either a header pool or a ZC payload pool */ + if (fq->type == LIBETH_FQE_HDR) + return !fq->hsplit; + + pp->flags |= PP_FLAG_ALLOW_UNREADABLE_NETMEM; + + return fq->hsplit; +} + /** * libeth_rx_fq_create - create a PP with the default libeth settings * @fq: buffer queue struct to fill @@ -166,6 +208,9 @@ int libeth_rx_fq_create(struct libeth_fq *fq, void *napi_dev) struct page_pool *pool; int ret; + if (!libeth_rx_page_pool_check_unread(fq, &pp)) + return -EINVAL; + pp.dma_dir = fq->xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; if (!fq->hsplit) -- 2.52.0