Add kernel APIs to pin and unpin buffer rings, preventing userspace from unregistering a buffer ring while it is pinned by the kernel. This provides a mechanism for kernel subsystems to safely access buffer ring contents while ensuring the buffer ring remains valid. A pinned buffer ring cannot be unregistered until explicitly unpinned. On the userspace side, trying to unregister a pinned buffer will return -EBUSY. This is a preparatory change for upcoming fuse usage of kernel-managed buffer rings. It is necessary for fuse to pin the buffer ring because fuse may need to select a buffer in atomic contexts, which it can only do so by using the underlying buffer list pointer. Signed-off-by: Joanne Koong --- include/linux/io_uring/cmd.h | 17 +++++++++++++ io_uring/kbuf.c | 48 ++++++++++++++++++++++++++++++++++++ io_uring/kbuf.h | 10 ++++++++ io_uring/uring_cmd.c | 18 ++++++++++++++ 4 files changed, 93 insertions(+) diff --git a/include/linux/io_uring/cmd.h b/include/linux/io_uring/cmd.h index 375fd048c4cb..424f071f42e5 100644 --- a/include/linux/io_uring/cmd.h +++ b/include/linux/io_uring/cmd.h @@ -84,6 +84,10 @@ struct io_br_sel io_uring_cmd_buffer_select(struct io_uring_cmd *ioucmd, bool io_uring_mshot_cmd_post_cqe(struct io_uring_cmd *ioucmd, struct io_br_sel *sel, unsigned int issue_flags); +int io_uring_cmd_buf_ring_pin(struct io_uring_cmd *ioucmd, unsigned buf_group, + unsigned issue_flags, struct io_buffer_list **bl); +int io_uring_cmd_buf_ring_unpin(struct io_uring_cmd *ioucmd, unsigned buf_group, + unsigned issue_flags); #else static inline int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw, @@ -126,6 +130,19 @@ static inline bool io_uring_mshot_cmd_post_cqe(struct io_uring_cmd *ioucmd, { return true; } +static inline int io_uring_cmd_buf_ring_pin(struct io_uring_cmd *ioucmd, + unsigned buf_group, + unsigned issue_flags, + struct io_buffer_list **bl) +{ + return -EOPNOTSUPP; +} +static inline int io_uring_cmd_buf_ring_unpin(struct io_uring_cmd *ioucmd, + unsigned buf_group, + unsigned issue_flags) +{ + return -EOPNOTSUPP; +} #endif static inline struct io_uring_cmd *io_uring_cmd_from_tw(struct io_tw_req tw_req) diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c index c98cecb56b8c..49dc75f24432 100644 --- a/io_uring/kbuf.c +++ b/io_uring/kbuf.c @@ -238,6 +238,52 @@ struct io_br_sel io_buffer_select(struct io_kiocb *req, size_t *len, return sel; } +int io_kbuf_ring_pin(struct io_kiocb *req, unsigned buf_group, + unsigned issue_flags, struct io_buffer_list **bl) +{ + struct io_buffer_list *buffer_list; + struct io_ring_ctx *ctx = req->ctx; + int ret = -EINVAL; + + io_ring_submit_lock(ctx, issue_flags); + + buffer_list = io_buffer_get_list(ctx, buf_group); + if (likely(buffer_list) && likely(buffer_list->flags & IOBL_BUF_RING)) { + if (unlikely(buffer_list->flags & IOBL_PINNED)) { + ret = -EALREADY; + } else { + buffer_list->flags |= IOBL_PINNED; + ret = 0; + *bl = buffer_list; + } + } + + io_ring_submit_unlock(ctx, issue_flags); + return ret; +} +EXPORT_SYMBOL_GPL(io_kbuf_ring_pin); + +int io_kbuf_ring_unpin(struct io_kiocb *req, unsigned buf_group, + unsigned issue_flags) +{ + struct io_ring_ctx *ctx = req->ctx; + struct io_buffer_list *bl; + int ret = -EINVAL; + + io_ring_submit_lock(ctx, issue_flags); + + bl = io_buffer_get_list(ctx, buf_group); + if (likely(bl) && likely(bl->flags & IOBL_BUF_RING) && + likely(bl->flags & IOBL_PINNED)) { + bl->flags &= ~IOBL_PINNED; + ret = 0; + } + + io_ring_submit_unlock(ctx, issue_flags); + return ret; +} +EXPORT_SYMBOL_GPL(io_kbuf_ring_unpin); + /* cap it at a reasonable 256, will be one page even for 4K */ #define PEEK_MAX_IMPORT 256 @@ -744,6 +790,8 @@ int io_unregister_buf_ring(struct io_ring_ctx *ctx, void __user *arg) return -ENOENT; if (!(bl->flags & IOBL_BUF_RING)) return -EINVAL; + if (bl->flags & IOBL_PINNED) + return -EBUSY; scoped_guard(mutex, &ctx->mmap_lock) xa_erase(&ctx->io_bl_xa, bl->bgid); diff --git a/io_uring/kbuf.h b/io_uring/kbuf.h index 11d165888b8e..c4368f35cf11 100644 --- a/io_uring/kbuf.h +++ b/io_uring/kbuf.h @@ -12,6 +12,11 @@ enum { IOBL_INC = 2, /* buffers are kernel managed */ IOBL_KERNEL_MANAGED = 4, + /* + * buffer ring is pinned and cannot be unregistered by userspace until + * it has been unpinned + */ + IOBL_PINNED = 8, }; struct io_buffer_list { @@ -136,4 +141,9 @@ static inline unsigned int io_put_kbufs(struct io_kiocb *req, int len, return 0; return __io_put_kbufs(req, bl, len, nbufs); } + +int io_kbuf_ring_pin(struct io_kiocb *req, unsigned buf_group, + unsigned issue_flags, struct io_buffer_list **bl); +int io_kbuf_ring_unpin(struct io_kiocb *req, unsigned buf_group, + unsigned issue_flags); #endif diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c index 197474911f04..8ac79ead4158 100644 --- a/io_uring/uring_cmd.c +++ b/io_uring/uring_cmd.c @@ -398,3 +398,21 @@ bool io_uring_mshot_cmd_post_cqe(struct io_uring_cmd *ioucmd, return true; } EXPORT_SYMBOL_GPL(io_uring_mshot_cmd_post_cqe); + +int io_uring_cmd_buf_ring_pin(struct io_uring_cmd *ioucmd, unsigned buf_group, + unsigned issue_flags, struct io_buffer_list **bl) +{ + struct io_kiocb *req = cmd_to_io_kiocb(ioucmd); + + return io_kbuf_ring_pin(req, buf_group, issue_flags, bl); +} +EXPORT_SYMBOL_GPL(io_uring_cmd_buf_ring_pin); + +int io_uring_cmd_buf_ring_unpin(struct io_uring_cmd *ioucmd, unsigned buf_group, + unsigned issue_flags) +{ + struct io_kiocb *req = cmd_to_io_kiocb(ioucmd); + + return io_kbuf_ring_unpin(req, buf_group, issue_flags); +} +EXPORT_SYMBOL_GPL(io_uring_cmd_buf_ring_unpin); -- 2.47.3