Add an interface for buffers to be recycled back into a kernel-managed buffer ring. This is a preparatory patch for fuse over io-uring. Signed-off-by: Joanne Koong --- include/linux/io_uring/cmd.h | 11 +++++++++ io_uring/kbuf.c | 44 ++++++++++++++++++++++++++++++++++++ 2 files changed, 55 insertions(+) diff --git a/include/linux/io_uring/cmd.h b/include/linux/io_uring/cmd.h index 702b1903e6ee..a488e945f883 100644 --- a/include/linux/io_uring/cmd.h +++ b/include/linux/io_uring/cmd.h @@ -88,6 +88,10 @@ int io_uring_buf_ring_pin(struct io_uring_cmd *cmd, unsigned buf_group, unsigned issue_flags, struct io_buffer_list **bl); int io_uring_buf_ring_unpin(struct io_uring_cmd *cmd, unsigned buf_group, unsigned issue_flags); + +int io_uring_kmbuf_recycle(struct io_uring_cmd *cmd, unsigned int buf_group, + u64 addr, unsigned int len, unsigned int bid, + unsigned int issue_flags); #else static inline int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw, @@ -143,6 +147,13 @@ static inline int io_uring_buf_ring_unpin(struct io_uring_cmd *cmd, { return -EOPNOTSUPP; } +static inline int io_uring_kmbuf_recycle(struct io_uring_cmd *cmd, + unsigned int buf_group, u64 addr, + unsigned int len, unsigned int bid, + unsigned int issue_flags) +{ + return -EOPNOTSUPP; +} #endif static inline struct io_uring_cmd *io_uring_cmd_from_tw(struct io_tw_req tw_req) diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c index 94ab23400721..a7d7d2c6b42c 100644 --- a/io_uring/kbuf.c +++ b/io_uring/kbuf.c @@ -102,6 +102,50 @@ void io_kbuf_drop_legacy(struct io_kiocb *req) req->kbuf = NULL; } +int io_uring_kmbuf_recycle(struct io_uring_cmd *cmd, unsigned int buf_group, + u64 addr, unsigned int len, unsigned int bid, + unsigned int issue_flags) +{ + struct io_kiocb *req = cmd_to_io_kiocb(cmd); + struct io_ring_ctx *ctx = req->ctx; + struct io_uring_buf_ring *br; + struct io_uring_buf *buf; + struct io_buffer_list *bl; + int ret = -EINVAL; + + if (WARN_ON_ONCE(req->flags & REQ_F_BUFFERS_COMMIT)) + return ret; + + io_ring_submit_lock(ctx, issue_flags); + + bl = io_buffer_get_list(ctx, buf_group); + + if (WARN_ON_ONCE(!(bl->flags & IOBL_BUF_RING)) || + WARN_ON_ONCE(!(bl->flags & IOBL_KERNEL_MANAGED))) + goto done; + + br = bl->buf_ring; + + if (WARN_ON_ONCE((br->tail - bl->head) >= bl->nr_entries)) + goto done; + + buf = &br->bufs[(br->tail) & bl->mask]; + + buf->addr = addr; + buf->len = len; + buf->bid = bid; + + req->flags &= ~REQ_F_BUFFER_RING; + + br->tail++; + ret = 0; + +done: + io_ring_submit_unlock(ctx, issue_flags); + return ret; +} +EXPORT_SYMBOL_GPL(io_uring_kmbuf_recycle); + bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags) { struct io_ring_ctx *ctx = req->ctx; -- 2.47.3