bfq_end_wr_async() iterates q->blkg_list while only holding bfqd->lock, but not blkcg_mutex. This can race with blkg_free_workfn() that removes blkgs from the list while holding blkcg_mutex. Add blkcg_mutex protection in bfq_end_wr() before taking bfqd->lock to ensure proper synchronization when iterating q->blkg_list. Signed-off-by: Yu Kuai --- block/bfq-cgroup.c | 3 ++- block/bfq-iosched.c | 2 ++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c index 6a75fe1c7a5c..839d266a6aa6 100644 --- a/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c @@ -940,7 +940,8 @@ void bfq_end_wr_async(struct bfq_data *bfqd) list_for_each_entry(blkg, &bfqd->queue->blkg_list, q_node) { struct bfq_group *bfqg = blkg_to_bfqg(blkg); - bfq_end_wr_async_queues(bfqd, bfqg); + if (bfqg) + bfq_end_wr_async_queues(bfqd, bfqg); } bfq_end_wr_async_queues(bfqd, bfqd->root_group); } diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 3ebdec40e758..617633be8abc 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -2645,6 +2645,7 @@ static void bfq_end_wr(struct bfq_data *bfqd) struct bfq_queue *bfqq; int i; + mutex_lock(&bfqd->queue->blkcg_mutex); spin_lock_irq(&bfqd->lock); for (i = 0; i < bfqd->num_actuators; i++) { @@ -2656,6 +2657,7 @@ static void bfq_end_wr(struct bfq_data *bfqd) bfq_end_wr_async(bfqd); spin_unlock_irq(&bfqd->lock); + mutex_unlock(&bfqd->queue->blkcg_mutex); } static sector_t bfq_io_struct_pos(void *io_struct, bool request) -- 2.51.0