io_uring task work dispatch makes an indirect call to struct io_kiocb's io_task_work.func field to allow running arbitrary task work functions. In the uring_cmd case, this calls io_uring_cmd_work(), which immediately makes another indirect call to struct io_uring_cmd's task_work_cb field. Define the uring_cmd task work callbacks as functions whose signatures match io_req_tw_func_t. Define a IO_URING_CMD_TASK_WORK_ISSUE_FLAGS constant in io_uring/cmd.h to avoid manufacturing issue_flags in the uring_cmd task work callbacks. Now uring_cmd task work dispatch makes a single indirect call to the uring_cmd implementation's callback. This also allows removing the task_work_cb field from struct io_uring_cmd, freeing up some additional storage space. Signed-off-by: Caleb Sander Mateos --- block/ioctl.c | 4 +++- drivers/block/ublk_drv.c | 15 +++++++++------ drivers/nvme/host/ioctl.c | 5 +++-- fs/btrfs/ioctl.c | 4 +++- fs/fuse/dev_uring.c | 5 +++-- include/linux/io_uring/cmd.h | 16 +++++++--------- io_uring/uring_cmd.c | 13 ++----------- 7 files changed, 30 insertions(+), 32 deletions(-) diff --git a/block/ioctl.c b/block/ioctl.c index d7489a56b33c..5c10d48fab27 100644 --- a/block/ioctl.c +++ b/block/ioctl.c @@ -767,13 +767,15 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) struct blk_iou_cmd { int res; bool nowait; }; -static void blk_cmd_complete(struct io_uring_cmd *cmd, unsigned int issue_flags) +static void blk_cmd_complete(struct io_kiocb *req, io_tw_token_t tw) { + struct io_uring_cmd *cmd = io_kiocb_to_cmd(req, struct io_uring_cmd); struct blk_iou_cmd *bic = io_uring_cmd_to_pdu(cmd, struct blk_iou_cmd); + unsigned int issue_flags = IO_URING_CMD_TASK_WORK_ISSUE_FLAGS; if (bic->res == -EAGAIN && bic->nowait) io_uring_cmd_issue_blocking(cmd); else io_uring_cmd_done(cmd, bic->res, issue_flags); diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index 0c74a41a6753..00439d1879b0 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -1346,13 +1346,14 @@ static void ublk_dispatch_req(struct ublk_queue *ubq, if (ublk_prep_auto_buf_reg(ubq, req, io, issue_flags)) ublk_complete_io_cmd(io, req, UBLK_IO_RES_OK, issue_flags); } -static void ublk_cmd_tw_cb(struct io_uring_cmd *cmd, - unsigned int issue_flags) +static void ublk_cmd_tw_cb(struct io_kiocb *req, io_tw_token_t tw) { + struct io_uring_cmd *cmd = io_kiocb_to_cmd(req, struct io_uring_cmd); + unsigned int issue_flags = IO_URING_CMD_TASK_WORK_ISSUE_FLAGS; struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd); struct ublk_queue *ubq = pdu->ubq; ublk_dispatch_req(ubq, pdu->req, issue_flags); } @@ -1364,13 +1365,14 @@ static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq) pdu->req = rq; io_uring_cmd_complete_in_task(cmd, ublk_cmd_tw_cb); } -static void ublk_cmd_list_tw_cb(struct io_uring_cmd *cmd, - unsigned int issue_flags) +static void ublk_cmd_list_tw_cb(struct io_kiocb *req, io_tw_token_t tw) { + struct io_uring_cmd *cmd = io_kiocb_to_cmd(req, struct io_uring_cmd); + unsigned int issue_flags = IO_URING_CMD_TASK_WORK_ISSUE_FLAGS; struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd); struct request *rq = pdu->req_list; struct request *next; do { @@ -2521,13 +2523,14 @@ static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub, fail_put: ublk_put_req_ref(io, req); return NULL; } -static void ublk_ch_uring_cmd_cb(struct io_uring_cmd *cmd, - unsigned int issue_flags) +static void ublk_ch_uring_cmd_cb(struct io_kiocb *req, io_tw_token_t tw) { + struct io_uring_cmd *cmd = io_kiocb_to_cmd(req, struct io_uring_cmd); + unsigned int issue_flags = IO_URING_CMD_TASK_WORK_ISSUE_FLAGS; int ret = ublk_ch_uring_cmd_local(cmd, issue_flags); if (ret != -EIOCBQUEUED) io_uring_cmd_done(cmd, ret, issue_flags); } diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c index c212fa952c0f..df39cee94de1 100644 --- a/drivers/nvme/host/ioctl.c +++ b/drivers/nvme/host/ioctl.c @@ -396,13 +396,14 @@ static inline struct nvme_uring_cmd_pdu *nvme_uring_cmd_pdu( struct io_uring_cmd *ioucmd) { return io_uring_cmd_to_pdu(ioucmd, struct nvme_uring_cmd_pdu); } -static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd, - unsigned issue_flags) +static void nvme_uring_task_cb(struct io_kiocb *req, io_tw_token_t tw) { + struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd); + unsigned int issue_flags = IO_URING_CMD_TASK_WORK_ISSUE_FLAGS; struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd); if (pdu->bio) blk_rq_unmap_user(pdu->bio); io_uring_cmd_done32(ioucmd, pdu->status, pdu->result, issue_flags); diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 185bef0df1c2..3b62eb8a50dc 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -4647,13 +4647,15 @@ struct btrfs_uring_priv { struct io_btrfs_cmd { struct btrfs_uring_encoded_data *data; struct btrfs_uring_priv *priv; }; -static void btrfs_uring_read_finished(struct io_uring_cmd *cmd, unsigned int issue_flags) +static void btrfs_uring_read_finished(struct io_kiocb *req, io_tw_token_t tw) { + struct io_uring_cmd *cmd = io_kiocb_to_cmd(req, struct io_uring_cmd); struct io_btrfs_cmd *bc = io_uring_cmd_to_pdu(cmd, struct io_btrfs_cmd); + unsigned int issue_flags = IO_URING_CMD_TASK_WORK_ISSUE_FLAGS; struct btrfs_uring_priv *priv = bc->priv; struct btrfs_inode *inode = BTRFS_I(file_inode(priv->iocb.ki_filp)); struct extent_io_tree *io_tree = &inode->io_tree; pgoff_t index; u64 cur; diff --git a/fs/fuse/dev_uring.c b/fs/fuse/dev_uring.c index 71b0c9662716..051136e94a33 100644 --- a/fs/fuse/dev_uring.c +++ b/fs/fuse/dev_uring.c @@ -1207,13 +1207,14 @@ static void fuse_uring_send(struct fuse_ring_ent *ent, struct io_uring_cmd *cmd, /* * This prepares and sends the ring request in fuse-uring task context. * User buffers are not mapped yet - the application does not have permission * to write to it - this has to be executed in ring task context. */ -static void fuse_uring_send_in_task(struct io_uring_cmd *cmd, - unsigned int issue_flags) +static void fuse_uring_send_in_task(struct io_kiocb *req, io_tw_token_t tw) { + struct io_uring_cmd *cmd = io_kiocb_to_cmd(req, struct io_uring_cmd); + unsigned int issue_flags = IO_URING_CMD_TASK_WORK_ISSUE_FLAGS; struct fuse_ring_ent *ent = uring_cmd_to_ring_ent(cmd); struct fuse_ring_queue *queue = ent->queue; int err; if (!io_uring_cmd_should_terminate_tw(cmd)) { diff --git a/include/linux/io_uring/cmd.h b/include/linux/io_uring/cmd.h index b84b97c21b43..3efad93404f9 100644 --- a/include/linux/io_uring/cmd.h +++ b/include/linux/io_uring/cmd.h @@ -9,18 +9,13 @@ /* only top 8 bits of sqe->uring_cmd_flags for kernel internal use */ #define IORING_URING_CMD_CANCELABLE (1U << 30) /* io_uring_cmd is being issued again */ #define IORING_URING_CMD_REISSUE (1U << 31) -typedef void (*io_uring_cmd_tw_t)(struct io_uring_cmd *cmd, - unsigned issue_flags); - struct io_uring_cmd { struct file *file; const struct io_uring_sqe *sqe; - /* callback to defer completions to task context */ - io_uring_cmd_tw_t task_work_cb; u32 cmd_op; u32 flags; u8 pdu[32]; /* available inline for free use */ }; @@ -58,11 +53,11 @@ int io_uring_cmd_import_fixed_vec(struct io_uring_cmd *ioucmd, */ void __io_uring_cmd_done(struct io_uring_cmd *cmd, s32 ret, u64 res2, unsigned issue_flags, bool is_cqe32); void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd, - io_uring_cmd_tw_t task_work_cb, + io_req_tw_func_t task_work_cb, unsigned flags); /* * Note: the caller should never hard code @issue_flags and only use the * mask provided by the core io_uring code. @@ -107,11 +102,11 @@ static inline int io_uring_cmd_import_fixed_vec(struct io_uring_cmd *ioucmd, static inline void __io_uring_cmd_done(struct io_uring_cmd *cmd, s32 ret, u64 ret2, unsigned issue_flags, bool is_cqe32) { } static inline void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd, - io_uring_cmd_tw_t task_work_cb, unsigned flags) + io_req_tw_func_t task_work_cb, unsigned flags) { } static inline void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd, unsigned int issue_flags) { @@ -130,19 +125,22 @@ static inline bool io_uring_mshot_cmd_post_cqe(struct io_uring_cmd *ioucmd, { return true; } #endif +/* task_work executor checks the deferred list completion */ +#define IO_URING_CMD_TASK_WORK_ISSUE_FLAGS IO_URING_F_COMPLETE_DEFER + /* users must follow the IOU_F_TWQ_LAZY_WAKE semantics */ static inline void io_uring_cmd_do_in_task_lazy(struct io_uring_cmd *ioucmd, - io_uring_cmd_tw_t task_work_cb) + io_req_tw_func_t task_work_cb) { __io_uring_cmd_do_in_task(ioucmd, task_work_cb, IOU_F_TWQ_LAZY_WAKE); } static inline void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd, - io_uring_cmd_tw_t task_work_cb) + io_req_tw_func_t task_work_cb) { __io_uring_cmd_do_in_task(ioucmd, task_work_cb, 0); } static inline bool io_uring_cmd_should_terminate_tw(struct io_uring_cmd *cmd) diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c index 35bdac35cf4d..5a80d35658dc 100644 --- a/io_uring/uring_cmd.c +++ b/io_uring/uring_cmd.c @@ -111,29 +111,20 @@ void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd, io_ring_submit_unlock(ctx, issue_flags); } } EXPORT_SYMBOL_GPL(io_uring_cmd_mark_cancelable); -static void io_uring_cmd_work(struct io_kiocb *req, io_tw_token_t tw) -{ - struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd); - - /* task_work executor checks the deffered list completion */ - ioucmd->task_work_cb(ioucmd, IO_URING_F_COMPLETE_DEFER); -} - void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd, - io_uring_cmd_tw_t task_work_cb, + io_req_tw_func_t task_work_cb, unsigned flags) { struct io_kiocb *req = cmd_to_io_kiocb(ioucmd); if (WARN_ON_ONCE(req->flags & REQ_F_APOLL_MULTISHOT)) return; - ioucmd->task_work_cb = task_work_cb; - req->io_task_work.func = io_uring_cmd_work; + req->io_task_work.func = task_work_cb; __io_req_task_work_add(req, flags); } EXPORT_SYMBOL_GPL(__io_uring_cmd_do_in_task); static inline void io_req_set_cqe32_extra(struct io_kiocb *req, -- 2.45.2