Implement BPF struct ops registration. It's registered from the BPF path, and can be removed by BPF as well as io_uring, which is why it's protected by a global lock io_bpf_ctrl_mutex. Signed-off-by: Pavel Begunkov --- include/linux/io_uring_types.h | 5 ++ io_uring/bpf-ops.c | 87 +++++++++++++++++++++++++++++++++- io_uring/bpf-ops.h | 8 ++++ io_uring/io_uring.c | 1 + 4 files changed, 100 insertions(+), 1 deletion(-) diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h index 9990df98790d..5dfe3608dbb9 100644 --- a/include/linux/io_uring_types.h +++ b/include/linux/io_uring_types.h @@ -8,6 +8,9 @@ #include #include +struct iou_loop_params; +struct io_uring_bpf_ops; + enum { /* * A hint to not wake right away but delay until there are enough of @@ -462,6 +465,8 @@ struct io_ring_ctx { DECLARE_HASHTABLE(napi_ht, 4); #endif + struct io_uring_bpf_ops *bpf_ops; + /* * Protection for resize vs mmap races - both the mmap and resize * side will need to grab this lock, to prevent either side from diff --git a/io_uring/bpf-ops.c b/io_uring/bpf-ops.c index ad4e3dc889ba..26955ff06ecf 100644 --- a/io_uring/bpf-ops.c +++ b/io_uring/bpf-ops.c @@ -4,10 +4,12 @@ #include "io_uring.h" #include "register.h" +#include "loop.h" #include "memmap.h" #include "bpf-ops.h" #include "loop.h" +static DEFINE_MUTEX(io_bpf_ctrl_mutex); static const struct btf_type *loop_params_type; __bpf_kfunc_start_defs(); @@ -141,16 +143,99 @@ static int bpf_io_init_member(const struct btf_type *t, const struct btf_member *member, void *kdata, const void *udata) { + u32 moff = __btf_member_bit_offset(t, member) / 8; + const struct io_uring_bpf_ops *uops = udata; + struct io_uring_bpf_ops *ops = kdata; + + switch (moff) { + case offsetof(struct io_uring_bpf_ops, ring_fd): + ops->ring_fd = uops->ring_fd; + return 1; + } + return 0; +} + +static int io_install_bpf(struct io_ring_ctx *ctx, struct io_uring_bpf_ops *ops) +{ + if (ctx->flags & (IORING_SETUP_SQPOLL | IORING_SETUP_IOPOLL)) + return -EOPNOTSUPP; + if (!(ctx->flags & IORING_SETUP_DEFER_TASKRUN)) + return -EOPNOTSUPP; + + if (ctx->bpf_ops) + return -EBUSY; + if (WARN_ON_ONCE(!ops->loop_step)) + return -EINVAL; + + ops->priv = ctx; + ctx->bpf_ops = ops; + ctx->loop_step = ops->loop_step; return 0; } static int bpf_io_reg(void *kdata, struct bpf_link *link) { - return -EOPNOTSUPP; + struct io_uring_bpf_ops *ops = kdata; + struct io_ring_ctx *ctx; + struct file *file; + int ret = -EBUSY; + + file = io_uring_register_get_file(ops->ring_fd, false); + if (IS_ERR(file)) + return PTR_ERR(file); + ctx = file->private_data; + + scoped_guard(mutex, &io_bpf_ctrl_mutex) { + guard(mutex)(&ctx->uring_lock); + ret = io_install_bpf(ctx, ops); + } + + fput(file); + return ret; +} + +static void io_eject_bpf(struct io_ring_ctx *ctx) +{ + struct io_uring_bpf_ops *ops = ctx->bpf_ops; + + if (!WARN_ON_ONCE(!ops)) + return; + if (WARN_ON_ONCE(ops->priv != ctx)) + return; + + ops->priv = NULL; + ctx->bpf_ops = NULL; + ctx->loop_step = NULL; } static void bpf_io_unreg(void *kdata, struct bpf_link *link) { + struct io_uring_bpf_ops *ops = kdata; + struct io_ring_ctx *ctx; + + guard(mutex)(&io_bpf_ctrl_mutex); + ctx = ops->priv; + if (ctx) { + guard(mutex)(&ctx->uring_lock); + if (WARN_ON_ONCE(ctx->bpf_ops != ops)) + return; + + io_eject_bpf(ctx); + } +} + +void io_unregister_bpf_ops(struct io_ring_ctx *ctx) +{ + /* check it first to avoid taking io_bpf_ctrl_mutex */ + scoped_guard(mutex, &ctx->uring_lock) { + if (!ctx->bpf_ops) + return; + } + + guard(mutex)(&io_bpf_ctrl_mutex); + guard(mutex)(&ctx->uring_lock); + if (ctx->bpf_ops) + io_eject_bpf(ctx); } static struct bpf_struct_ops bpf_ring_ops = { diff --git a/io_uring/bpf-ops.h b/io_uring/bpf-ops.h index b9e589ad519a..bf4d5b9bb8c9 100644 --- a/io_uring/bpf-ops.h +++ b/io_uring/bpf-ops.h @@ -17,4 +17,12 @@ struct io_uring_bpf_ops { void *priv; }; +#ifdef CONFIG_IO_URING_BPF +void io_unregister_bpf_ops(struct io_ring_ctx *ctx); +#else +static inline void io_unregister_bpf_ops(struct io_ring_ctx *ctx) +{ +} +#endif + #endif /* IOU_BPF_OPS_H */ diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 09920e56c9c9..9d6eef7ccf22 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -2141,6 +2141,7 @@ static __cold void io_req_caches_free(struct io_ring_ctx *ctx) static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx) { + io_unregister_bpf_ops(ctx); io_sq_thread_finish(ctx); mutex_lock(&ctx->uring_lock); -- 2.52.0