From: Yu Kuai It's safe to iterate blkgs with cgroup lock or rcu lock held, prevent nested queue_lock under rcu lock, and prepare to convert protecting blkcg with blkcg_mutex instead of queuelock. Signed-off-by: Yu Kuai --- block/blk-cgroup-rwstat.c | 2 -- block/blk-cgroup.c | 16 +++++----------- 2 files changed, 5 insertions(+), 13 deletions(-) diff --git a/block/blk-cgroup-rwstat.c b/block/blk-cgroup-rwstat.c index a55fb0c53558..d41ade993312 100644 --- a/block/blk-cgroup-rwstat.c +++ b/block/blk-cgroup-rwstat.c @@ -101,8 +101,6 @@ void blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol, struct cgroup_subsys_state *pos_css; unsigned int i; - lockdep_assert_held(&blkg->q->queue_lock); - memset(sum, 0, sizeof(*sum)); rcu_read_lock(); blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) { diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index f93de34fe87d..2d767ae61d2f 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -712,12 +712,9 @@ void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, u64 total = 0; rcu_read_lock(); - hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { - spin_lock_irq(&blkg->q->queue_lock); - if (blkcg_policy_enabled(blkg->q, pol)) + hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) + if (blkcg_policy_enabled(blkg->q, pol) && blkg->online) total += prfill(sf, blkg->pd[pol->plid], data); - spin_unlock_irq(&blkg->q->queue_lock); - } rcu_read_unlock(); if (show_total) @@ -1242,13 +1239,10 @@ static int blkcg_print_stat(struct seq_file *sf, void *v) else css_rstat_flush(&blkcg->css); - rcu_read_lock(); - hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { - spin_lock_irq(&blkg->q->queue_lock); + spin_lock_irq(&blkcg->lock); + hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) blkcg_print_one_stat(blkg, sf); - spin_unlock_irq(&blkg->q->queue_lock); - } - rcu_read_unlock(); + spin_unlock_irq(&blkcg->lock); return 0; } -- 2.39.2