Add two kfuncs that should cover most of the needs: 1. bpf_io_uring_submit_sqes(), which allows to submit io_uring requests. It mirrors the normal user space submission path and follows all related io_uring_enter(2) rules. i.e. SQEs are taken from the SQ according to head/tail values. In case of IORING_SETUP_SQ_REWIND, it'll submit first N entries. 2. bpf_io_uring_get_region() returns a pointer to the specified region, where io_uring regions are kernel-userspace shared chunks of memory. It takes the size as an argument, which should be a load time constant. There are 3 types of regions: - IOU_REGION_SQ returns the submission queue. - IOU_REGION_CQ stores the CQ, SQ/CQ headers and the sqarray. In other words, it gives same memory that would normally be mmap'ed with IORING_FEAT_SINGLE_MMAP enabled IORING_OFF_SQ_RING. - IOU_REGION_MEM represents the memory / parameter region. It can be used to store request indirect parameters and for kernel - user communication. It intentionally provides a thin but flexible API and expects BPF programs to implement CQ/SQ header parsing, CQ walking, etc. That mirrors how the normal user space works with rings and should help to minimise kernel / kfunc helpers changes while introducing new generic io_uring features. Signed-off-by: Pavel Begunkov --- io_uring/bpf-ops.c | 53 ++++++++++++++++++++++++++++++++++++++++++++++ io_uring/bpf-ops.h | 6 ++++++ 2 files changed, 59 insertions(+) diff --git a/io_uring/bpf-ops.c b/io_uring/bpf-ops.c index 7db07eda5a48..ad4e3dc889ba 100644 --- a/io_uring/bpf-ops.c +++ b/io_uring/bpf-ops.c @@ -4,11 +4,56 @@ #include "io_uring.h" #include "register.h" +#include "memmap.h" #include "bpf-ops.h" #include "loop.h" static const struct btf_type *loop_params_type; +__bpf_kfunc_start_defs(); + +__bpf_kfunc int bpf_io_uring_submit_sqes(struct io_ring_ctx *ctx, u32 nr) +{ + return io_submit_sqes(ctx, nr); +} + +__bpf_kfunc +__u8 *bpf_io_uring_get_region(struct io_ring_ctx *ctx, __u32 region_id, + const size_t rdwr_buf_size) +{ + struct io_mapped_region *r; + + switch (region_id) { + case IOU_REGION_MEM: + r = &ctx->param_region; + break; + case IOU_REGION_CQ: + r = &ctx->ring_region; + break; + case IOU_REGION_SQ: + r = &ctx->sq_region; + break; + default: + return NULL; + } + + if (unlikely(rdwr_buf_size > io_region_size(r))) + return NULL; + return io_region_get_ptr(r); +} + +__bpf_kfunc_end_defs(); + +BTF_KFUNCS_START(io_uring_kfunc_set) +BTF_ID_FLAGS(func, bpf_io_uring_submit_sqes, KF_SLEEPABLE | KF_TRUSTED_ARGS); +BTF_ID_FLAGS(func, bpf_io_uring_get_region, KF_RET_NULL | KF_TRUSTED_ARGS); +BTF_KFUNCS_END(io_uring_kfunc_set) + +static const struct btf_kfunc_id_set bpf_io_uring_kfunc_set = { + .owner = THIS_MODULE, + .set = &io_uring_kfunc_set, +}; + static int io_bpf_ops__loop_step(struct io_ring_ctx *ctx, struct iou_loop_params *lp) { @@ -68,12 +113,20 @@ io_lookup_struct_type(struct btf *btf, const char *name) static int bpf_io_init(struct btf *btf) { + int ret; + loop_params_type = io_lookup_struct_type(btf, "iou_loop_params"); if (!loop_params_type) { pr_err("io_uring: Failed to locate iou_loop_params\n"); return -EINVAL; } + ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, + &bpf_io_uring_kfunc_set); + if (ret) { + pr_err("io_uring: Failed to register kfuncs (%d)\n", ret); + return ret; + } return 0; } diff --git a/io_uring/bpf-ops.h b/io_uring/bpf-ops.h index e8a08ae2df0a..b9e589ad519a 100644 --- a/io_uring/bpf-ops.h +++ b/io_uring/bpf-ops.h @@ -4,6 +4,12 @@ #include +enum { + IOU_REGION_MEM, + IOU_REGION_CQ, + IOU_REGION_SQ, +}; + struct io_uring_bpf_ops { int (*loop_step)(struct io_ring_ctx *ctx, struct iou_loop_params *lp); -- 2.52.0