bfq_end_wr_async() iterates q->blkg_list while only holding bfqd->lock, but not blkcg_mutex. This can race with blkg_free_workfn() that removes blkgs from the list while holding blkcg_mutex. Add blkcg_mutex protection in bfq_end_wr() before taking bfqd->lock to ensure proper synchronization when iterating q->blkg_list. Signed-off-by: Yu Kuai --- block/bfq-cgroup.c | 3 ++- block/bfq-iosched.c | 6 ++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c index 6a75fe1c7a5c..839d266a6aa6 100644 --- a/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c @@ -940,7 +940,8 @@ void bfq_end_wr_async(struct bfq_data *bfqd) list_for_each_entry(blkg, &bfqd->queue->blkg_list, q_node) { struct bfq_group *bfqg = blkg_to_bfqg(blkg); - bfq_end_wr_async_queues(bfqd, bfqg); + if (bfqg) + bfq_end_wr_async_queues(bfqd, bfqg); } bfq_end_wr_async_queues(bfqd, bfqd->root_group); } diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index b180ce583951..14ca67a616b7 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -2645,6 +2645,9 @@ static void bfq_end_wr(struct bfq_data *bfqd) struct bfq_queue *bfqq; int i; +#ifdef CONFIG_BFQ_GROUP_IOSCHED + mutex_lock(&bfqd->queue->blkcg_mutex); +#endif spin_lock_irq(&bfqd->lock); for (i = 0; i < bfqd->num_actuators; i++) { @@ -2656,6 +2659,9 @@ static void bfq_end_wr(struct bfq_data *bfqd) bfq_end_wr_async(bfqd); spin_unlock_irq(&bfqd->lock); +#ifdef CONFIG_BFQ_GROUP_IOSCHED + mutex_unlock(&bfqd->queue->blkcg_mutex); +#endif } static sector_t bfq_io_struct_pos(void *io_struct, bool request) -- 2.51.0