There are network cards that support receive buffers larger than 4K, and that can be vastly beneficial for performance, and benchmarks for this patch showed up to 30% CPU util improvement for 32K vs 4K buffers. Allows zcrx users to specify the size in struct io_uring_zcrx_ifq_reg::rx_buf_len. If set to zero, zcrx will use a default value. zcrx will check and fail if the memory backing the area can't be split into physically contiguous chunks of the required size. It's more restrictive as it only needs dma addresses to be contig, but that's beyond this series. Signed-off-by: Pavel Begunkov --- Depends on networking patches include/uapi/linux/io_uring.h | 2 +- io_uring/zcrx.c | 39 ++++++++++++++++++++++++++++++----- 2 files changed, 35 insertions(+), 6 deletions(-) diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index b5b23c0d5283..3184f7e7f1f2 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -1082,7 +1082,7 @@ struct io_uring_zcrx_ifq_reg { struct io_uring_zcrx_offsets offsets; __u32 zcrx_id; - __u32 __resv2; + __u32 rx_buf_len; __u64 __resv[3]; }; diff --git a/io_uring/zcrx.c b/io_uring/zcrx.c index b99cf2c6670a..30dbdf1cff13 100644 --- a/io_uring/zcrx.c +++ b/io_uring/zcrx.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -55,6 +56,18 @@ static inline struct page *io_zcrx_iov_page(const struct net_iov *niov) return area->mem.pages[net_iov_idx(niov) << niov_pages_shift]; } +static int io_area_max_shift(struct io_zcrx_mem *mem) +{ + struct sg_table *sgt = mem->sgt; + struct scatterlist *sg; + unsigned shift = -1U; + unsigned i; + + for_each_sgtable_dma_sg(sgt, sg, i) + shift = min(shift, __ffs(sg->length)); + return shift; +} + static int io_populate_area_dma(struct io_zcrx_ifq *ifq, struct io_zcrx_area *area) { @@ -416,12 +429,21 @@ static int io_zcrx_append_area(struct io_zcrx_ifq *ifq, } static int io_zcrx_create_area(struct io_zcrx_ifq *ifq, - struct io_uring_zcrx_area_reg *area_reg) + struct io_uring_zcrx_area_reg *area_reg, + struct io_uring_zcrx_ifq_reg *reg) { + int buf_size_shift = PAGE_SHIFT; struct io_zcrx_area *area; unsigned nr_iovs; int i, ret; + if (reg->rx_buf_len) { + if (!is_power_of_2(reg->rx_buf_len) || + reg->rx_buf_len < PAGE_SIZE) + return -EINVAL; + buf_size_shift = ilog2(reg->rx_buf_len); + } + ret = -ENOMEM; area = kzalloc(sizeof(*area), GFP_KERNEL); if (!area) @@ -432,7 +454,12 @@ static int io_zcrx_create_area(struct io_zcrx_ifq *ifq, if (ret) goto err; - ifq->niov_shift = PAGE_SHIFT; + if (buf_size_shift > io_area_max_shift(&area->mem)) { + ret = -ERANGE; + goto err; + } + + ifq->niov_shift = buf_size_shift; nr_iovs = area->mem.size >> ifq->niov_shift; area->nia.num_niovs = nr_iovs; @@ -742,8 +769,7 @@ int io_register_zcrx_ifq(struct io_ring_ctx *ctx, return -EINVAL; if (copy_from_user(®, arg, sizeof(reg))) return -EFAULT; - if (!mem_is_zero(®.__resv, sizeof(reg.__resv)) || - reg.__resv2 || reg.zcrx_id) + if (!mem_is_zero(®.__resv, sizeof(reg.__resv)) || reg.zcrx_id) return -EINVAL; if (reg.flags & ZCRX_REG_IMPORT) return import_zcrx(ctx, arg, ®); @@ -800,10 +826,11 @@ int io_register_zcrx_ifq(struct io_ring_ctx *ctx, } get_device(ifq->dev); - ret = io_zcrx_create_area(ifq, &area); + ret = io_zcrx_create_area(ifq, &area, ®); if (ret) goto netdev_put_unlock; + mp_param.rx_buf_len = 1U << ifq->niov_shift; mp_param.mp_ops = &io_uring_pp_zc_ops; mp_param.mp_priv = ifq; ret = __net_mp_open_rxq(ifq->netdev, reg.if_rxq, &mp_param, NULL); @@ -821,6 +848,8 @@ int io_register_zcrx_ifq(struct io_ring_ctx *ctx, goto err; } + reg.rx_buf_len = 1U << ifq->niov_shift; + if (copy_to_user(arg, ®, sizeof(reg)) || copy_to_user(u64_to_user_ptr(reg.region_ptr), &rd, sizeof(rd)) || copy_to_user(u64_to_user_ptr(reg.area_ptr), &area, sizeof(area))) { -- 2.52.0