Reduce overhead when completing multiple requests in batch I/O mode by accumulating them in an io_comp_batch structure and completing them together via blk_mq_end_request_batch(). This minimizes per-request completion overhead and improves performance for high IOPS workloads. The implementation adds an io_comp_batch pointer to struct ublk_io and initializes it in __ublk_fetch(). For batch I/O, the pointer is set to the batch structure in ublk_batch_commit_io(). The __ublk_complete_rq() function uses io->iob to call blk_mq_add_to_batch() for batch mode. After processing all batch I/Os, the completion callback is invoked in ublk_handle_batch_commit_cmd() to complete all accumulated requests efficiently. So far just covers direct completion. For deferred completion(zero copy, auto buffer reg), ublk_io_release() is often delayed in freeing buffer consumer io_uring request's code path, so this patch often doesn't work, also it is hard to pass the per-task 'struct io_comp_batch' for deferred completion. Reviewed-by: Caleb Sander Mateos Signed-off-by: Ming Lei --- drivers/block/ublk_drv.c | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index 034420e8df55..0168ee885b2d 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -130,6 +130,7 @@ struct ublk_batch_io_data { struct io_uring_cmd *cmd; struct ublk_batch_io header; unsigned int issue_flags; + struct io_comp_batch *iob; }; /* @@ -647,7 +648,7 @@ static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq, #endif static inline void __ublk_complete_rq(struct request *req, struct ublk_io *io, - bool need_map); + bool need_map, struct io_comp_batch *iob); static dev_t ublk_chr_devt; static const struct class ublk_chr_class = { @@ -917,7 +918,7 @@ static inline void ublk_put_req_ref(struct ublk_io *io, struct request *req) return; /* ublk_need_map_io() and ublk_need_req_ref() are mutually exclusive */ - __ublk_complete_rq(req, io, false); + __ublk_complete_rq(req, io, false, NULL); } static inline bool ublk_sub_req_ref(struct ublk_io *io) @@ -1256,7 +1257,7 @@ static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu( /* todo: handle partial completion */ static inline void __ublk_complete_rq(struct request *req, struct ublk_io *io, - bool need_map) + bool need_map, struct io_comp_batch *iob) { unsigned int unmapped_bytes; blk_status_t res = BLK_STS_OK; @@ -1293,8 +1294,11 @@ static inline void __ublk_complete_rq(struct request *req, struct ublk_io *io, if (blk_update_request(req, BLK_STS_OK, io->res)) blk_mq_requeue_request(req, true); - else if (likely(!blk_should_fake_timeout(req->q))) + else if (likely(!blk_should_fake_timeout(req->q))) { + if (blk_mq_add_to_batch(req, iob, false, blk_mq_end_request_batch)) + return; __blk_mq_end_request(req, BLK_STS_OK); + } return; exit: @@ -2273,7 +2277,7 @@ static void __ublk_fail_req(struct ublk_device *ub, struct ublk_io *io, blk_mq_requeue_request(req, false); else { io->res = -EIO; - __ublk_complete_rq(req, io, ublk_dev_need_map_io(ub)); + __ublk_complete_rq(req, io, ublk_dev_need_map_io(ub), NULL); } } @@ -3008,7 +3012,7 @@ static int ublk_ch_uring_cmd_local(struct io_uring_cmd *cmd, if (req_op(req) == REQ_OP_ZONE_APPEND) req->__sector = addr; if (compl) - __ublk_complete_rq(req, io, ublk_dev_need_map_io(ub)); + __ublk_complete_rq(req, io, ublk_dev_need_map_io(ub), NULL); if (ret) goto out; @@ -3329,11 +3333,11 @@ static int ublk_batch_commit_io(struct ublk_queue *ubq, if (req_op(req) == REQ_OP_ZONE_APPEND) req->__sector = ublk_batch_zone_lba(uc, elem); if (compl) - __ublk_complete_rq(req, io, ublk_dev_need_map_io(data->ub)); + __ublk_complete_rq(req, io, ublk_dev_need_map_io(data->ub), data->iob); return 0; } -static int ublk_handle_batch_commit_cmd(const struct ublk_batch_io_data *data) +static int ublk_handle_batch_commit_cmd(struct ublk_batch_io_data *data) { const struct ublk_batch_io *uc = &data->header; struct io_uring_cmd *cmd = data->cmd; @@ -3342,10 +3346,15 @@ static int ublk_handle_batch_commit_cmd(const struct ublk_batch_io_data *data) .total = uc->nr_elem * uc->elem_bytes, .elem_bytes = uc->elem_bytes, }; + DEFINE_IO_COMP_BATCH(iob); int ret; + data->iob = &iob; ret = ublk_walk_cmd_buf(&iter, data, ublk_batch_commit_io); + if (iob.complete) + iob.complete(&iob); + return iter.done == 0 ? ret : iter.done; } -- 2.47.0