Currently, creating an io_uring with IORING_SETUP_IOPOLL requires all requests issued to it to support iopoll. This prevents, for example, using ublk zero-copy together with IORING_SETUP_IOPOLL, as ublk zero-copy buffer registrations are performed using a uring_cmd. There's no technical reason why these non-iopoll uring_cmds can't be supported. They will either complete synchronously or via an external mechanism that calls io_uring_cmd_done(), so they don't need to be polled. Allow uring_cmd requests to be issued to IORING_SETUP_IOPOLL io_urings even if their files don't implement ->uring_cmd_iopoll(). For these uring_cmd requests, skip initializing struct io_kiocb's iopoll fields, don't insert the request into iopoll_list, and take the io_req_complete_defer() or io_req_task_work_add() path in __io_uring_cmd_done() instead of setting the iopoll_completed flag. Also allow io_uring_cmd_mark_cancelable() to be called on these uring_cmds. Assert that io_uring_cmd_mark_cancelable() is only called on non-IORING_SETUP_IOPOLL io_urings or uring_cmds to files that don't implement ->uring_cmd_iopoll(). Signed-off-by: Caleb Sander Mateos --- io_uring/io_uring.c | 4 +++- io_uring/uring_cmd.c | 11 +++++------ 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index c45af82dda3d..4e68a5168894 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -1417,11 +1417,13 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags) if (ret == IOU_ISSUE_SKIP_COMPLETE) { ret = 0; /* If the op doesn't have a file, we're not polling for it */ - if ((req->ctx->flags & IORING_SETUP_IOPOLL) && def->iopoll_queue) + if ((req->ctx->flags & IORING_SETUP_IOPOLL) && + def->iopoll_queue && (!io_is_uring_cmd(req) || + req->file->f_op->uring_cmd_iopoll)) io_iopoll_req_issued(req, issue_flags); } return ret; } diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c index ee7b49f47cb5..8df52e8f1c1b 100644 --- a/io_uring/uring_cmd.c +++ b/io_uring/uring_cmd.c @@ -108,12 +108,12 @@ void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd, * Doing cancelations on IOPOLL requests are not supported. Both * because they can't get canceled in the block stack, but also * because iopoll completion data overlaps with the hash_node used * for tracking. */ - if (ctx->flags & IORING_SETUP_IOPOLL) - return; + WARN_ON_ONCE(ctx->flags & IORING_SETUP_IOPOLL && + req->file->f_op->uring_cmd_iopoll); if (!(cmd->flags & IORING_URING_CMD_CANCELABLE)) { cmd->flags |= IORING_URING_CMD_CANCELABLE; io_ring_submit_lock(ctx, issue_flags); hlist_add_head(&req->hash_node, &ctx->cancelable_uring_cmd); @@ -165,11 +165,12 @@ void __io_uring_cmd_done(struct io_uring_cmd *ioucmd, s32 ret, u64 res2, if (req->ctx->flags & IORING_SETUP_CQE_MIXED) req->cqe.flags |= IORING_CQE_F_32; io_req_set_cqe32_extra(req, res2, 0); } io_req_uring_cleanup(req, issue_flags); - if (req->ctx->flags & IORING_SETUP_IOPOLL) { + if (req->ctx->flags & IORING_SETUP_IOPOLL && + req->file->f_op->uring_cmd_iopoll) { /* order with io_iopoll_req_issued() checking ->iopoll_complete */ smp_store_release(&req->iopoll_completed, 1); } else if (issue_flags & IO_URING_F_COMPLETE_DEFER) { if (WARN_ON_ONCE(issue_flags & IO_URING_F_UNLOCKED)) return; @@ -255,13 +256,11 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags) issue_flags |= IO_URING_F_SQE128; if (ctx->flags & (IORING_SETUP_CQE32 | IORING_SETUP_CQE_MIXED)) issue_flags |= IO_URING_F_CQE32; if (io_is_compat(ctx)) issue_flags |= IO_URING_F_COMPAT; - if (ctx->flags & IORING_SETUP_IOPOLL) { - if (!file->f_op->uring_cmd_iopoll) - return -EOPNOTSUPP; + if (ctx->flags & IORING_SETUP_IOPOLL && file->f_op->uring_cmd_iopoll) { issue_flags |= IO_URING_F_IOPOLL; req->iopoll_completed = 0; if (ctx->flags & IORING_SETUP_HYBRID_IOPOLL) { /* make sure every req only blocks once */ req->flags &= ~REQ_F_IOPOLL_STATE; -- 2.45.2