The recent lockdep splat [1] highlights a potential deadlock risk involving ->elevator_lock and ->freeze_lock dependencies on -pcpu_alloc_ mutex. The trace shows that the issue occurs when the Kyber scheduler allocates dynamic memory for its elevator data during initialization. To address this, introduce two new elevator operation callbacks: ->alloc_sched_data and ->free_sched_data. When an elevator implements these methods, they are invoked during scheduler switch before acquiring ->freeze_lock and ->elevator_lock. This allows safe allocation and deallocation of per-elevator data without holding locks that could depend on pcpu_alloc_mutex, effectively breaking the lock dependency chain and avoiding the reported deadlock scenario. [1] https://lore.kernel.org/all/CAGVVp+VNW4M-5DZMNoADp6o2VKFhi7KxWpTDkcnVyjO0=-D5+A@mail.gmail.com/ Signed-off-by: Nilay Shroff --- block/blk-mq-sched.c | 68 ++++++++++++++++++++++++++++++++++++++++++-- block/blk-mq-sched.h | 23 ++++++++++++++- block/blk-mq.c | 7 ++++- block/elevator.c | 46 +++++++++++++++++++++++------- block/elevator.h | 8 +++++- 5 files changed, 137 insertions(+), 15 deletions(-) diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 1c9571136a30..f1cc2f2428b2 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -453,6 +453,70 @@ void blk_mq_free_sched_tags_batch(struct xarray *et_table, } } +void blk_mq_free_sched_data_batch(struct xarray *elv_tbl, + struct blk_mq_tag_set *set) +{ + struct request_queue *q; + struct elv_change_ctx *ctx; + + lockdep_assert_held_write(&set->update_nr_hwq_lock); + + list_for_each_entry(q, &set->tag_list, tag_set_list) { + if (q->elevator) { + ctx = xa_load(elv_tbl, q->id); + if (WARN_ON_ONCE(!ctx)) + continue; + if (ctx->data) + blk_mq_free_sched_data(q->elevator->type, + ctx->data); + } + } +} + +int blk_mq_alloc_sched_data_batch(struct xarray *elv_tbl, + struct blk_mq_tag_set *set) +{ + struct request_queue *q; + struct elv_change_ctx *ctx; + int ret = 0; + + lockdep_assert_held_write(&set->update_nr_hwq_lock); + + list_for_each_entry(q, &set->tag_list, tag_set_list) { + /* + * Accessing q->elevator without holding q->elevator_lock is + * safe because we're holding here set->update_nr_hwq_lock in + * the writer context. So, scheduler update/switch code (which + * acquires the same lock but in the reader context) can't run + * concurrently. + */ + if (q->elevator) { + ctx = xa_load(elv_tbl, q->id); + if (WARN_ON_ONCE(!ctx)) + return -ENOENT; + + ret = blk_mq_alloc_sched_data(q, q->elevator->type, + &ctx->data); + if (ret) + goto out_unwind; + } + } + return ret; + +out_unwind: + list_for_each_entry_continue_reverse(q, &set->tag_list, tag_set_list) { + if (q->elevator) { + ctx = xa_load(elv_tbl, q->id); + if (WARN_ON_ONCE(!ctx)) + continue; + if (ctx->data) + blk_mq_free_sched_data(q->elevator->type, + ctx->data); + } + } + return ret; +} + int blk_mq_alloc_sched_ctx_batch(struct xarray *elv_tbl, struct blk_mq_tag_set *set) { @@ -573,7 +637,7 @@ int blk_mq_alloc_sched_tags_batch(struct xarray *elv_tbl, /* caller must have a reference to @e, will grab another one if successful */ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e, - struct elevator_tags *et) + struct elevator_tags *et, void *data) { unsigned int flags = q->tag_set->flags; struct blk_mq_hw_ctx *hctx; @@ -581,7 +645,7 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e, unsigned long i; int ret; - eq = elevator_alloc(q, e, et); + eq = elevator_alloc(q, e, et, data); if (!eq) return -ENOMEM; diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index ba67e4e2447b..23cda157d8dd 100644 --- a/block/blk-mq-sched.h +++ b/block/blk-mq-sched.h @@ -19,7 +19,7 @@ void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx); void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx); int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e, - struct elevator_tags *et); + struct elevator_tags *et, void *data); void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e); void blk_mq_sched_free_rqs(struct request_queue *q); @@ -29,10 +29,31 @@ int blk_mq_alloc_sched_tags_batch(struct xarray *et_table, struct blk_mq_tag_set *set, unsigned int nr_hw_queues); int blk_mq_alloc_sched_ctx_batch(struct xarray *elv_tbl, struct blk_mq_tag_set *set); +int blk_mq_alloc_sched_data_batch(struct xarray *elv_tbl, + struct blk_mq_tag_set *set); void blk_mq_free_sched_tags(struct elevator_tags *et, struct blk_mq_tag_set *set); void blk_mq_free_sched_tags_batch(struct xarray *et_table, struct blk_mq_tag_set *set); +void blk_mq_free_sched_data_batch(struct xarray *elv_tbl, + struct blk_mq_tag_set *set); + +static inline int blk_mq_alloc_sched_data(struct request_queue *q, + struct elevator_type *e, void **data) +{ + if (e && e->ops.alloc_sched_data) { + *data = e->ops.alloc_sched_data(q); + if (!*data) + return -ENOMEM; + } + return 0; +} + +static inline void blk_mq_free_sched_data(struct elevator_type *e, void *data) +{ + if (e && e->ops.free_sched_data) + e->ops.free_sched_data(data); +} static inline void blk_mq_free_sched_ctx_batch(struct xarray *elv_tbl) { diff --git a/block/blk-mq.c b/block/blk-mq.c index 2e3ebaf877e1..0ffec6875db9 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -5066,9 +5066,14 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, if (blk_mq_alloc_sched_ctx_batch(&elv_tbl, set) < 0) goto out_xa_destroy; - if (blk_mq_alloc_sched_tags_batch(&elv_tbl, set, nr_hw_queues) < 0) + if (blk_mq_alloc_sched_data_batch(&elv_tbl, set) < 0) goto out_free_ctx; + if (blk_mq_alloc_sched_tags_batch(&elv_tbl, set, nr_hw_queues) < 0) { + blk_mq_free_sched_data_batch(&elv_tbl, set); + goto out_free_ctx; + } + list_for_each_entry(q, &set->tag_list, tag_set_list) { blk_mq_debugfs_unregister_hctxs(q); blk_mq_sysfs_unregister_hctxs(q); diff --git a/block/elevator.c b/block/elevator.c index cd7bdff205c8..89f04b359911 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -121,7 +121,9 @@ static struct elevator_type *elevator_find_get(const char *name) static const struct kobj_type elv_ktype; struct elevator_queue *elevator_alloc(struct request_queue *q, - struct elevator_type *e, struct elevator_tags *et) + struct elevator_type *e, + struct elevator_tags *et, + void *data) { struct elevator_queue *eq; @@ -135,6 +137,7 @@ struct elevator_queue *elevator_alloc(struct request_queue *q, mutex_init(&eq->sysfs_lock); hash_init(eq->hash); eq->et = et; + eq->elevator_data = data; return eq; } @@ -580,7 +583,7 @@ static int elevator_switch(struct request_queue *q, struct elv_change_ctx *ctx) } if (new_e) { - ret = blk_mq_init_sched(q, new_e, ctx->et); + ret = blk_mq_init_sched(q, new_e, ctx->et, ctx->data); if (ret) goto out_unfreeze; ctx->new = q->elevator; @@ -617,6 +620,7 @@ static void elv_exit_and_release(struct request_queue *q) blk_mq_unfreeze_queue(q, memflags); if (e) { blk_mq_free_sched_tags(e->et, q->tag_set); + blk_mq_free_sched_data(e->type, e->elevator_data); kobject_put(&e->kobj); } } @@ -632,6 +636,7 @@ static int elevator_change_done(struct request_queue *q, elv_unregister_queue(q, ctx->old); blk_mq_free_sched_tags(ctx->old->et, q->tag_set); + blk_mq_free_sched_data(ctx->old->type, ctx->old->elevator_data); kobject_put(&ctx->old->kobj); if (enable_wbt) wbt_enable_default(q->disk); @@ -660,6 +665,10 @@ static int elevator_change(struct request_queue *q, struct elv_change_ctx *ctx) blk_mq_default_nr_requests(set)); if (!ctx->et) return -ENOMEM; + + ret = blk_mq_alloc_sched_data(q, ctx->type, &ctx->data); + if (ret) + goto free_sched_tags; } memflags = blk_mq_freeze_queue(q); @@ -680,10 +689,18 @@ static int elevator_change(struct request_queue *q, struct elv_change_ctx *ctx) blk_mq_unfreeze_queue(q, memflags); if (!ret) ret = elevator_change_done(q, ctx); + + if (ctx->new) /* switching to new elevator is successful */ + return ret; + /* - * Free sched tags if it's allocated but we couldn't switch elevator. + * Free sched tags and data if those were allocated but we couldn't + * switch elevator. */ - if (ctx->et && !ctx->new) + if (ctx->data) + blk_mq_free_sched_data(ctx->type, ctx->data); +free_sched_tags: + if (ctx->et) blk_mq_free_sched_tags(ctx->et, set); return ret; @@ -710,11 +727,17 @@ void elv_update_nr_hw_queues(struct request_queue *q, blk_mq_unfreeze_queue_nomemrestore(q); if (!ret) WARN_ON_ONCE(elevator_change_done(q, ctx)); + + if (ctx->new) /* switching to new elevator is successful */ + return; /* - * Free sched tags if it's allocated but we couldn't switch elevator. + * Free sched tags and data if it's allocated but we couldn't switch + * elevator. */ - if (ctx->et && !ctx->new) + if (ctx->et) blk_mq_free_sched_tags(ctx->et, set); + if (ctx->data) + blk_mq_free_sched_data(ctx->type, ctx->data); } /* @@ -728,7 +751,6 @@ void elevator_set_default(struct request_queue *q) .no_uevent = true, }; int err; - struct elevator_type *e; /* now we allow to switch elevator */ blk_queue_flag_clear(QUEUE_FLAG_NO_ELV_SWITCH, q); @@ -741,8 +763,8 @@ void elevator_set_default(struct request_queue *q) * have multiple queues or mq-deadline is not available, default * to "none". */ - e = elevator_find_get(ctx.name); - if (!e) + ctx.type = elevator_find_get(ctx.name); + if (!ctx.type) return; if ((q->nr_hw_queues == 1 || @@ -752,7 +774,7 @@ void elevator_set_default(struct request_queue *q) pr_warn("\"%s\" elevator initialization, failed %d, falling back to \"none\"\n", ctx.name, err); } - elevator_put(e); + elevator_put(ctx.type); } void elevator_set_none(struct request_queue *q) @@ -801,6 +823,7 @@ ssize_t elv_iosched_store(struct gendisk *disk, const char *buf, ctx.name = strstrip(elevator_name); elv_iosched_load_module(ctx.name); + ctx.type = elevator_find_get(ctx.name); down_read(&set->update_nr_hwq_lock); if (!blk_queue_no_elv_switch(q)) { @@ -811,6 +834,9 @@ ssize_t elv_iosched_store(struct gendisk *disk, const char *buf, ret = -ENOENT; } up_read(&set->update_nr_hwq_lock); + + if (ctx.type) + elevator_put(ctx.type); return ret; } diff --git a/block/elevator.h b/block/elevator.h index bad43182361e..648022e4ec92 100644 --- a/block/elevator.h +++ b/block/elevator.h @@ -43,6 +43,8 @@ struct elv_change_ctx { struct elevator_queue *new; /* store elevator type */ struct elevator_type *type; + /* store elevator data */ + void *data; /* holds sched tags data */ struct elevator_tags *et; }; @@ -53,6 +55,8 @@ struct elevator_mq_ops { int (*init_hctx)(struct blk_mq_hw_ctx *, unsigned int); void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int); void (*depth_updated)(struct request_queue *); + void *(*alloc_sched_data)(struct request_queue *); + void (*free_sched_data)(void *); bool (*allow_merge)(struct request_queue *, struct request *, struct bio *); bool (*bio_merge)(struct request_queue *, struct bio *, unsigned int); @@ -178,7 +182,9 @@ ssize_t elv_iosched_store(struct gendisk *disk, const char *page, size_t count); extern bool elv_bio_merge_ok(struct request *, struct bio *); struct elevator_queue *elevator_alloc(struct request_queue *, - struct elevator_type *, struct elevator_tags *); + struct elevator_type *, + struct elevator_tags *, + void *); /* * Helper functions. -- 2.51.0