Rename lock_two_rings() to io_lock_two_rings() and export. This will be used when sharing a src ifq owned by one ring with another ring. During this process both rings need to be locked in a deterministic order, similar to the current user io_clone_buffers(). Signed-off-by: David Wei --- io_uring/rsrc.c | 4 ++-- io_uring/rsrc.h | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c index d787c16dc1c3..d245b7592eee 100644 --- a/io_uring/rsrc.c +++ b/io_uring/rsrc.c @@ -1148,7 +1148,7 @@ int io_import_reg_buf(struct io_kiocb *req, struct iov_iter *iter, } /* Lock two rings at once. The rings must be different! */ -static void lock_two_rings(struct io_ring_ctx *ctx1, struct io_ring_ctx *ctx2) +void io_lock_two_rings(struct io_ring_ctx *ctx1, struct io_ring_ctx *ctx2) { if (ctx1 > ctx2) swap(ctx1, ctx2); @@ -1299,7 +1299,7 @@ int io_register_clone_buffers(struct io_ring_ctx *ctx, void __user *arg) src_ctx = file->private_data; if (src_ctx != ctx) { mutex_unlock(&ctx->uring_lock); - lock_two_rings(ctx, src_ctx); + io_lock_two_rings(ctx, src_ctx); if (src_ctx->submitter_task && src_ctx->submitter_task != current) { diff --git a/io_uring/rsrc.h b/io_uring/rsrc.h index a3ca6ba66596..b002c4a5a8cd 100644 --- a/io_uring/rsrc.h +++ b/io_uring/rsrc.h @@ -70,6 +70,7 @@ int io_import_reg_vec(int ddir, struct iov_iter *iter, int io_prep_reg_iovec(struct io_kiocb *req, struct iou_vec *iv, const struct iovec __user *uvec, size_t uvec_segs); +void io_lock_two_rings(struct io_ring_ctx *ctx1, struct io_ring_ctx *ctx2); int io_register_clone_buffers(struct io_ring_ctx *ctx, void __user *arg); int io_sqe_buffers_unregister(struct io_ring_ctx *ctx); int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg, -- 2.47.3 Add a refcount to struct io_zcrx_ifq to track the number of rings that share it. For now, this is only ever 1 i.e. not shared, but will be larger once shared ifqs are added. This ref is dec and tested in io_shutdown_zcrx_ifqs() to ensure that an ifq is not cleaned up while there are still rings using it. It's important to note that io_shutdown_zcrx_ifqs() may be called in a loop in io_ring_exit_work() while waiting for ctx->refs to drop to 0. Use XArray marks to ensure that the refcount dec only happens once. The cleanup functions io_zcrx_scrub() and io_close_queue() only take ifq locks and do not need anything from the ring ctx. Therefore it is safe to call from any ring. Opted for a bog standard refcount_t. The inc and dec ops are expected to happen during the slow setup/teardown paths only, and a src ifq is only expected to be shared a handful of times at most. Signed-off-by: David Wei --- io_uring/zcrx.c | 18 ++++++++++++++++-- io_uring/zcrx.h | 2 ++ 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/io_uring/zcrx.c b/io_uring/zcrx.c index a816f5902091..569cc0338acb 100644 --- a/io_uring/zcrx.c +++ b/io_uring/zcrx.c @@ -587,6 +587,7 @@ int io_register_zcrx_ifq(struct io_ring_ctx *ctx, if (!ifq) return -ENOMEM; ifq->rq_entries = reg.rq_entries; + refcount_set(&ifq->refs, 1); scoped_guard(mutex, &ctx->mmap_lock) { /* preallocate id */ @@ -730,8 +731,21 @@ void io_shutdown_zcrx_ifqs(struct io_ring_ctx *ctx) lockdep_assert_held(&ctx->uring_lock); xa_for_each(&ctx->zcrx_ctxs, index, ifq) { - io_zcrx_scrub(ifq); - io_close_queue(ifq); + if (xa_get_mark(&ctx->zcrx_ctxs, index, XA_MARK_0)) + continue; + + /* Safe to clean up from any ring. */ + if (refcount_dec_and_test(&ifq->refs)) { + io_zcrx_scrub(ifq); + io_close_queue(ifq); + } + + /* + * This is called in a loop in io_ring_exit_work() until + * ctx->refs drops to 0. Use marks to ensure refcounts are only + * decremented once per ifq per ring. + */ + xa_set_mark(&ctx->zcrx_ctxs, index, XA_MARK_0); } } diff --git a/io_uring/zcrx.h b/io_uring/zcrx.h index 33ef61503092..566d519cbaf6 100644 --- a/io_uring/zcrx.h +++ b/io_uring/zcrx.h @@ -60,6 +60,8 @@ struct io_zcrx_ifq { */ struct mutex pp_lock; struct io_mapped_region region; + + refcount_t refs; }; #if defined(CONFIG_IO_URING_ZCRX) -- 2.47.3 Add a way to share an ifq from a src ring that is real i.e. bound to a HW RX queue with other rings. This is done by passing a new flag IORING_ZCRX_IFQ_REG_SHARE in the registration struct io_uring_zcrx_ifq_reg, alongside the fd of the src ring and the ifq id to be shared. To prevent the src ring or ifq from being cleaned up or freed while there are still shared ifqs, take the appropriate refs on the src ring (ctx->refs) and src ifq (ifq->refs). Signed-off-by: David Wei --- include/uapi/linux/io_uring.h | 4 ++ io_uring/zcrx.c | 74 ++++++++++++++++++++++++++++++++++- 2 files changed, 76 insertions(+), 2 deletions(-) diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 04797a9b76bc..4da4552a4215 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -1063,6 +1063,10 @@ struct io_uring_zcrx_area_reg { __u64 __resv2[2]; }; +enum io_uring_zcrx_ifq_reg_flags { + IORING_ZCRX_IFQ_REG_SHARE = 1, +}; + /* * Argument for IORING_REGISTER_ZCRX_IFQ */ diff --git a/io_uring/zcrx.c b/io_uring/zcrx.c index 569cc0338acb..7418c959390a 100644 --- a/io_uring/zcrx.c +++ b/io_uring/zcrx.c @@ -22,10 +22,10 @@ #include #include "io_uring.h" -#include "kbuf.h" #include "memmap.h" #include "zcrx.h" #include "rsrc.h" +#include "register.h" #define IO_ZCRX_AREA_SUPPORTED_FLAGS (IORING_ZCRX_AREA_DMABUF) @@ -541,6 +541,67 @@ struct io_mapped_region *io_zcrx_get_region(struct io_ring_ctx *ctx, return ifq ? &ifq->region : NULL; } +static int io_share_zcrx_ifq(struct io_ring_ctx *ctx, + struct io_uring_zcrx_ifq_reg __user *arg, + struct io_uring_zcrx_ifq_reg *reg) +{ + struct io_ring_ctx *src_ctx; + struct io_zcrx_ifq *src_ifq; + struct file *file; + int src_fd, ret; + u32 src_id, id; + + src_fd = reg->if_idx; + src_id = reg->if_rxq; + + file = io_uring_register_get_file(src_fd, false); + if (IS_ERR(file)) + return PTR_ERR(file); + + src_ctx = file->private_data; + if (src_ctx == ctx) + return -EBADFD; + + mutex_unlock(&ctx->uring_lock); + io_lock_two_rings(ctx, src_ctx); + + ret = -EINVAL; + src_ifq = xa_load(&src_ctx->zcrx_ctxs, src_id); + if (!src_ifq) + goto err_unlock; + + percpu_ref_get(&src_ctx->refs); + refcount_inc(&src_ifq->refs); + + scoped_guard(mutex, &ctx->mmap_lock) { + ret = xa_alloc(&ctx->zcrx_ctxs, &id, NULL, xa_limit_31b, GFP_KERNEL); + if (ret) + goto err_unlock; + + ret = -ENOMEM; + if (xa_store(&ctx->zcrx_ctxs, id, src_ifq, GFP_KERNEL)) { + xa_erase(&ctx->zcrx_ctxs, id); + goto err_unlock; + } + } + + reg->zcrx_id = id; + if (copy_to_user(arg, reg, sizeof(*reg))) { + ret = -EFAULT; + goto err; + } + mutex_unlock(&src_ctx->uring_lock); + fput(file); + return 0; +err: + scoped_guard(mutex, &ctx->mmap_lock) + xa_erase(&ctx->zcrx_ctxs, id); +err_unlock: + mutex_unlock(&src_ctx->uring_lock); + fput(file); + return ret; +} + int io_register_zcrx_ifq(struct io_ring_ctx *ctx, struct io_uring_zcrx_ifq_reg __user *arg) { @@ -566,6 +627,8 @@ int io_register_zcrx_ifq(struct io_ring_ctx *ctx, return -EINVAL; if (copy_from_user(®, arg, sizeof(reg))) return -EFAULT; + if (reg.flags & IORING_ZCRX_IFQ_REG_SHARE) + return io_share_zcrx_ifq(ctx, arg, ®); if (copy_from_user(&rd, u64_to_user_ptr(reg.region_ptr), sizeof(rd))) return -EFAULT; if (!mem_is_zero(®.__resv, sizeof(reg.__resv)) || @@ -663,7 +726,7 @@ void io_unregister_zcrx_ifqs(struct io_ring_ctx *ctx) if (ifq) xa_erase(&ctx->zcrx_ctxs, id); } - if (!ifq) + if (!ifq || ctx != ifq->ctx) break; io_zcrx_ifq_free(ifq); } @@ -734,6 +797,13 @@ void io_shutdown_zcrx_ifqs(struct io_ring_ctx *ctx) if (xa_get_mark(&ctx->zcrx_ctxs, index, XA_MARK_0)) continue; + /* + * Only shared ifqs want to put ctx->refs on the owning ifq + * ring. This matches the get in io_share_zcrx_ifq(). + */ + if (ctx != ifq->ctx) + percpu_ref_put(&ifq->ctx->refs); + /* Safe to clean up from any ring. */ if (refcount_dec_and_test(&ifq->refs)) { io_zcrx_scrub(ifq); -- 2.47.3