Add UAPI flag IORING_URING_CMD_MULTISHOT for supporting naive multishot uring_cmd: - for notifying userspace to handle event, typical use case is to notify userspace for handle device interrupt event, really generic use case - needn't device to support poll() because the event may be originated from multiple source in device wide, such as multi queues - add two APIs, io_uring_cmd_select_buffer() is for getting the provided group buffer, io_uring_mshot_cmd_post_cqe() is for post CQE after event data is pushed to the provided buffer Follows one ublk use case: https://github.com/ming1/linux/commits/ublk-devel/ Signed-off-by: Ming Lei --- include/linux/io_uring/cmd.h | 27 +++++++++++++++ include/uapi/linux/io_uring.h | 9 ++++- io_uring/opdef.c | 1 + io_uring/uring_cmd.c | 64 ++++++++++++++++++++++++++++++++++- 4 files changed, 99 insertions(+), 2 deletions(-) diff --git a/include/linux/io_uring/cmd.h b/include/linux/io_uring/cmd.h index cfa6d0c0c322..5a72399bfa77 100644 --- a/include/linux/io_uring/cmd.h +++ b/include/linux/io_uring/cmd.h @@ -70,6 +70,22 @@ void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd, /* Execute the request from a blocking context */ void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd); +/* + * Select a buffer from the provided buffer group for multishot uring_cmd. + * Returns the selected buffer address and size. + */ +int io_uring_cmd_select_buffer(struct io_uring_cmd *ioucmd, + unsigned buf_group, + void **buf, size_t *len, + unsigned int issue_flags); + +/* + * Complete a multishot uring_cmd event. This will post a CQE to the completion + * queue and update the provided buffer. + */ +bool io_uring_mshot_cmd_post_cqe(struct io_uring_cmd *ioucmd, + ssize_t ret, unsigned int issue_flags); + #else static inline int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw, @@ -102,6 +118,17 @@ static inline void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd, static inline void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd) { } +static inline int io_uring_cmd_select_buffer(struct io_uring_cmd *ioucmd, + unsigned buf_group, + void **buf, size_t *len, + unsigned int issue_flags) +{ + return -EOPNOTSUPP; +} +static inline void io_uring_mshot_cmd_post_cqe(struct io_uring_cmd *ioucmd, + ssize_t ret, unsigned int issue_flags) +{ +} #endif /* diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 6957dc539d83..e8afb4f5b56a 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -297,10 +297,17 @@ enum io_uring_op { /* * sqe->uring_cmd_flags top 8bits aren't available for userspace * IORING_URING_CMD_FIXED use registered buffer; pass this flag + * along with setting sqe->buf_index, + * IORING_URING_CMD_MULTISHOT can't be set + * at the same time + * IORING_URING_CMD_MULTISHOT use buffer select; pass this flag * along with setting sqe->buf_index. + * IORING_URING_CMD_FIXED can't be set + * at the same time */ #define IORING_URING_CMD_FIXED (1U << 0) -#define IORING_URING_CMD_MASK IORING_URING_CMD_FIXED +#define IORING_URING_CMD_MULTISHOT (1U << 1) +#define IORING_URING_CMD_MASK (IORING_URING_CMD_FIXED | IORING_URING_CMD_MULTISHOT) /* diff --git a/io_uring/opdef.c b/io_uring/opdef.c index 9568785810d9..932319633eac 100644 --- a/io_uring/opdef.c +++ b/io_uring/opdef.c @@ -413,6 +413,7 @@ const struct io_issue_def io_issue_defs[] = { #endif }, [IORING_OP_URING_CMD] = { + .buffer_select = 1, .needs_file = 1, .plug = 1, .iopoll = 1, diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c index 053bac89b6c0..f0539e73f9d7 100644 --- a/io_uring/uring_cmd.c +++ b/io_uring/uring_cmd.c @@ -11,6 +11,7 @@ #include "io_uring.h" #include "alloc_cache.h" #include "rsrc.h" +#include "kbuf.h" #include "uring_cmd.h" #include "poll.h" @@ -194,8 +195,20 @@ int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) if (ioucmd->flags & ~IORING_URING_CMD_MASK) return -EINVAL; - if (ioucmd->flags & IORING_URING_CMD_FIXED) + if ((ioucmd->flags & IORING_URING_CMD_FIXED) && (ioucmd->flags & + IORING_URING_CMD_MULTISHOT)) + return -EINVAL; + + if (ioucmd->flags & IORING_URING_CMD_FIXED) { + if (req->flags & REQ_F_BUFFER_SELECT) + return -EINVAL; req->buf_index = READ_ONCE(sqe->buf_index); + } + + if (ioucmd->flags & IORING_URING_CMD_MULTISHOT) { + if (!(req->flags & REQ_F_BUFFER_SELECT)) + return -EINVAL; + } ioucmd->cmd_op = READ_ONCE(sqe->cmd_op); @@ -251,6 +264,11 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags) } ret = file->f_op->uring_cmd(ioucmd, issue_flags); + if (ioucmd->flags & IORING_URING_CMD_MULTISHOT) { + if (ret >= 0) + return IOU_ISSUE_SKIP_COMPLETE; + io_kbuf_recycle(req, issue_flags); + } if (ret == -EAGAIN) { ioucmd->flags |= IORING_URING_CMD_REISSUE; return ret; @@ -333,3 +351,47 @@ bool io_uring_cmd_post_mshot_cqe32(struct io_uring_cmd *cmd, return false; return io_req_post_cqe32(req, cqe); } + +int io_uring_cmd_select_buffer(struct io_uring_cmd *ioucmd, + unsigned buf_group, + void __user **buf, size_t *len, + unsigned int issue_flags) +{ + struct io_kiocb *req = cmd_to_io_kiocb(ioucmd); + void __user *ubuf; + + if (!(req->flags & REQ_F_BUFFER_SELECT)) + return -EINVAL; + + ubuf = io_buffer_select(req, len, buf_group, issue_flags); + if (!ubuf) + return -ENOBUFS; + + *buf = ubuf; + return 0; +} +EXPORT_SYMBOL_GPL(io_uring_cmd_select_buffer); + +/* + * Return true if this multishot uring_cmd needs to be completed, otherwise + * the event CQE is posted successfully. + * + * Should only be used from a task_work + * + */ +bool io_uring_mshot_cmd_post_cqe(struct io_uring_cmd *ioucmd, + ssize_t ret, unsigned int issue_flags) +{ + struct io_kiocb *req = cmd_to_io_kiocb(ioucmd); + unsigned int cflags = 0; + + if (ret > 0) { + cflags = io_put_kbuf(req, ret, issue_flags); + if (io_req_post_cqe(req, ret, cflags | IORING_CQE_F_MORE)) + return false; + } + + io_req_set_res(req, ret, cflags); + return true; +} +EXPORT_SYMBOL_GPL(io_uring_mshot_cmd_post_cqe); -- 2.47.1