Split io_prep_reg_iovec() into: - __io_prep_reg_iovec(): core logic without request association - io_prep_reg_iovec(): inline wrapper handling request flags The core function takes explicit 'compat' and 'need_clean' parameters instead of accessing req directly. This allows BPF kfuncs to prepare vectored buffers without request association, enabling support for multiple buffers per request. Signed-off-by: Ming Lei --- io_uring/rsrc.c | 11 +++++------ io_uring/rsrc.h | 21 +++++++++++++++++++-- 2 files changed, 24 insertions(+), 8 deletions(-) diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c index 8aa2f7473c89..ec716e14d467 100644 --- a/io_uring/rsrc.c +++ b/io_uring/rsrc.c @@ -1540,8 +1540,8 @@ int io_import_reg_vec(int ddir, struct iov_iter *iter, return io_vec_fill_bvec(ddir, iter, imu, iov, nr_iovs, vec); } -int io_prep_reg_iovec(struct io_kiocb *req, struct iou_vec *iv, - const struct iovec __user *uvec, size_t uvec_segs) +int __io_prep_reg_iovec(struct iou_vec *iv, const struct iovec __user *uvec, + size_t uvec_segs, bool compat, bool *need_clean) { struct iovec *iov; int iovec_off, ret; @@ -1551,17 +1551,16 @@ int io_prep_reg_iovec(struct io_kiocb *req, struct iou_vec *iv, ret = io_vec_realloc(iv, uvec_segs); if (ret) return ret; - req->flags |= REQ_F_NEED_CLEANUP; + if (need_clean) + *need_clean = true; } /* pad iovec to the right */ iovec_off = iv->nr - uvec_segs; iov = iv->iovec + iovec_off; - res = iovec_from_user(uvec, uvec_segs, uvec_segs, iov, - io_is_compat(req->ctx)); + res = iovec_from_user(uvec, uvec_segs, uvec_segs, iov, compat); if (IS_ERR(res)) return PTR_ERR(res); - req->flags |= REQ_F_IMPORT_BUFFER; return 0; } diff --git a/io_uring/rsrc.h b/io_uring/rsrc.h index bf77bc618fb5..2a29da350727 100644 --- a/io_uring/rsrc.h +++ b/io_uring/rsrc.h @@ -4,6 +4,7 @@ #include #include +#include "io_uring.h" #define IO_VEC_CACHE_SOFT_CAP 256 @@ -79,8 +80,24 @@ static inline int io_import_reg_buf(struct io_kiocb *req, struct iov_iter *iter, int io_import_reg_vec(int ddir, struct iov_iter *iter, struct io_kiocb *req, struct iou_vec *vec, unsigned nr_iovs, unsigned issue_flags); -int io_prep_reg_iovec(struct io_kiocb *req, struct iou_vec *iv, - const struct iovec __user *uvec, size_t uvec_segs); +int __io_prep_reg_iovec(struct iou_vec *iv, const struct iovec __user *uvec, + size_t uvec_segs, bool compat, bool *need_clean); + +static inline int io_prep_reg_iovec(struct io_kiocb *req, struct iou_vec *iv, + const struct iovec __user *uvec, + size_t uvec_segs) +{ + bool need_clean = false; + int ret; + + ret = __io_prep_reg_iovec(iv, uvec, uvec_segs, + io_is_compat(req->ctx), &need_clean); + if (need_clean) + req->flags |= REQ_F_NEED_CLEANUP; + if (ret >= 0) + req->flags |= REQ_F_IMPORT_BUFFER; + return ret; +} int io_register_clone_buffers(struct io_ring_ctx *ctx, void __user *arg); int io_sqe_buffers_unregister(struct io_ring_ctx *ctx); -- 2.47.0