Handle UBLK_U_IO_COMMIT_IO_CMDS by walking the uring_cmd fixed buffer: - read each element into one temp buffer in batch style - parse and apply each element for committing io result Signed-off-by: Ming Lei --- drivers/block/ublk_drv.c | 120 ++++++++++++++++++++++++++++++++-- include/uapi/linux/ublk_cmd.h | 8 +++ 2 files changed, 124 insertions(+), 4 deletions(-) diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index a4bae3d1562a..fae016b67254 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -2083,9 +2083,9 @@ static inline int ublk_set_auto_buf_reg(struct ublk_io *io, struct io_uring_cmd return 0; } -static int ublk_handle_auto_buf_reg(struct ublk_io *io, - struct io_uring_cmd *cmd, - u16 *buf_idx) +static void __ublk_handle_auto_buf_reg(struct ublk_io *io, + struct io_uring_cmd *cmd, + u16 *buf_idx) { if (io->flags & UBLK_IO_FLAG_AUTO_BUF_REG) { io->flags &= ~UBLK_IO_FLAG_AUTO_BUF_REG; @@ -2103,7 +2103,13 @@ static int ublk_handle_auto_buf_reg(struct ublk_io *io, if (io->buf_ctx_handle == io_uring_cmd_ctx_handle(cmd)) *buf_idx = io->buf.auto_reg.index; } +} +static int ublk_handle_auto_buf_reg(struct ublk_io *io, + struct io_uring_cmd *cmd, + u16 *buf_idx) +{ + __ublk_handle_auto_buf_reg(io, cmd, buf_idx); return ublk_set_auto_buf_reg(io, cmd); } @@ -2563,6 +2569,17 @@ static inline __u64 ublk_batch_buf_addr(const struct ublk_batch_io *uc, return -1; } +static inline __u64 ublk_batch_zone_lba(const struct ublk_batch_io *uc, + const struct ublk_elem_header *elem) +{ + const void *buf = (const void *)elem; + + if (uc->flags & UBLK_BATCH_F_HAS_ZONE_LBA) + return *(__u64 *)(buf + sizeof(*elem) + + 8 * !!(uc->flags & UBLK_BATCH_F_HAS_BUF_ADDR)); + return -1; +} + static struct ublk_auto_buf_reg ublk_batch_auto_buf_reg(const struct ublk_batch_io *uc, const struct ublk_elem_header *elem) @@ -2718,6 +2735,101 @@ static int ublk_handle_batch_prep_cmd(struct ublk_batch_io_data *data) return ret; } +static int ublk_batch_commit_io_check(const struct ublk_queue *ubq, + struct ublk_io *io, + union ublk_io_buf *buf) +{ + struct request *req = io->req; + + if (!req) + return -EINVAL; + + if (io->flags & UBLK_IO_FLAG_ACTIVE) + return -EBUSY; + + if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)) + return -EINVAL; + + if (ublk_need_map_io(ubq)) { + /* + * COMMIT_AND_FETCH_REQ has to provide IO buffer if + * NEED GET DATA is not enabled or it is Read IO. + */ + if (!buf->addr && (!ublk_need_get_data(ubq) || + req_op(req) == REQ_OP_READ)) + return -EINVAL; + } + return 0; +} + +static int ublk_batch_commit_io(struct ublk_io *io, + const struct ublk_batch_io_data *data) +{ + const struct ublk_batch_io *uc = io_uring_sqe_cmd(data->cmd->sqe); + struct ublk_queue *ubq = data->ubq; + u16 buf_idx = UBLK_INVALID_BUF_IDX; + union ublk_io_buf buf = { 0 }; + struct request *req = NULL; + bool auto_reg = false; + bool compl = false; + int ret; + + if (ublk_support_auto_buf_reg(data->ubq)) { + buf.auto_reg = ublk_batch_auto_buf_reg(uc, data->elem); + auto_reg = true; + } else if (ublk_need_map_io(data->ubq)) + buf.addr = ublk_batch_buf_addr(uc, data->elem); + + ublk_io_lock(io); + ret = ublk_batch_commit_io_check(ubq, io, &buf); + if (!ret) { + io->res = data->elem->result; + io->buf = buf; + req = ublk_fill_io_cmd(io, data->cmd); + + if (auto_reg) + __ublk_handle_auto_buf_reg(io, data->cmd, &buf_idx); + compl = ublk_need_complete_req(ubq, io); + } + ublk_io_unlock(io); + + if (unlikely(ret)) { + pr_warn("%s: dev %u queue %u io %ld: commit failure %d\n", + __func__, ubq->dev->dev_info.dev_id, ubq->q_id, + io - ubq->ios, ret); + return ret; + } + + /* can't touch 'ublk_io' any more */ + if (buf_idx != UBLK_INVALID_BUF_IDX) + io_buffer_unregister_bvec(data->cmd, buf_idx, data->issue_flags); + if (req_op(req) == REQ_OP_ZONE_APPEND) + req->__sector = ublk_batch_zone_lba(uc, data->elem); + if (compl) + __ublk_complete_rq(req); + return 0; +} + +static int ublk_handle_batch_commit_cmd(struct ublk_batch_io_data *data) +{ + struct io_uring_cmd *cmd = data->cmd; + const struct ublk_batch_io *uc = io_uring_sqe_cmd(cmd->sqe); + struct ublk_batch_io_iter iter = { + .total = uc->nr_elem * uc->elem_bytes, + .elem_bytes = uc->elem_bytes, + }; + int ret; + + ret = io_uring_cmd_import_fixed(cmd->sqe->addr, cmd->sqe->len, + WRITE, &iter.iter, cmd, data->issue_flags); + if (ret) + return ret; + + ret = ublk_walk_cmd_buf(&iter, data, ublk_batch_commit_io); + + return iter.done == 0 ? ret : iter.done; +} + static int ublk_check_batch_cmd_flags(const struct ublk_batch_io *uc) { const unsigned short mask = UBLK_BATCH_F_HAS_BUF_ADDR | @@ -2809,7 +2921,7 @@ static int ublk_ch_batch_io_uring_cmd(struct io_uring_cmd *cmd, ret = ublk_check_batch_cmd(&data, uc); if (ret) goto out; - ret = -EOPNOTSUPP; + ret = ublk_handle_batch_commit_cmd(&data); break; default: ret = -EOPNOTSUPP; diff --git a/include/uapi/linux/ublk_cmd.h b/include/uapi/linux/ublk_cmd.h index 38c8cc10d694..695b38522995 100644 --- a/include/uapi/linux/ublk_cmd.h +++ b/include/uapi/linux/ublk_cmd.h @@ -109,6 +109,14 @@ */ #define UBLK_U_IO_PREP_IO_CMDS \ _IOWR('u', 0x25, struct ublk_batch_io) +/* + * If failure code is returned, nothing in the command buffer is handled. + * Otherwise, the returned value means how many bytes in command buffer + * are handled actually, then number of handled IOs can be calculated with + * `elem_bytes` for each IO. IOs in the remained bytes are not committed, + * userspace has to check return value for dealing with partial committing + * correctly. + */ #define UBLK_U_IO_COMMIT_IO_CMDS \ _IOWR('u', 0x26, struct ublk_batch_io) -- 2.47.0