Prepare mq_dump_common() for RTNL avoidance. Use RCU instead of RTNL, and no longer acquire each children spinlock. Signed-off-by: Eric Dumazet --- include/net/sch_generic.h | 9 +++++++++ net/sched/sch_mq.c | 32 ++++++++++++++++++++++---------- 2 files changed, 31 insertions(+), 10 deletions(-) diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 2ce84b2c9423ee3b945c0a537620b2568279e1ca..9e6ed92729d282642e17a72cc578f25e1d22e4d9 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -952,6 +952,15 @@ static inline void _bstats_update(struct gnet_stats_basic_sync *bstats, u64_stats_update_end(&bstats->syncp); } +static inline void _bstats_set(struct gnet_stats_basic_sync *bstats, + u64 bytes, u64 packets) +{ + u64_stats_update_begin(&bstats->syncp); + u64_stats_set(&bstats->bytes, bytes); + u64_stats_set(&bstats->packets, packets); + u64_stats_update_end(&bstats->syncp); +} + static inline void bstats_update(struct gnet_stats_basic_sync *bstats, const struct sk_buff *skb) { diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c index 4172ec24a43d1c2fe56789986a46da93eb522721..5a6b14557d85fdc6c2462ee9e926155438f597b8 100644 --- a/net/sched/sch_mq.c +++ b/net/sched/sch_mq.c @@ -143,30 +143,42 @@ EXPORT_SYMBOL_NS_GPL(mq_attach, "NET_SCHED_INTERNAL"); void mq_dump_common(struct Qdisc *sch, struct sk_buff *skb) { struct net_device *dev = qdisc_dev(sch); + struct gnet_stats_queue qstats = { 0 }; + struct gnet_stats_basic_sync bstats; + const struct Qdisc *qdisc; unsigned int qlen = 0; - struct Qdisc *qdisc; unsigned int ntx; - gnet_stats_basic_sync_init(&sch->bstats); - memset(&sch->qstats, 0, sizeof(sch->qstats)); + gnet_stats_basic_sync_init(&bstats); /* MQ supports lockless qdiscs. However, statistics accounting needs * to account for all, none, or a mix of locked and unlocked child * qdiscs. Percpu stats are added to counters in-band and locking * qdisc totals are added at end. */ + rcu_read_lock(); for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { - qdisc = rtnl_dereference(netdev_get_tx_queue(dev, ntx)->qdisc_sleeping); - spin_lock_bh(qdisc_lock(qdisc)); + qdisc = rcu_dereference(netdev_get_tx_queue(dev, ntx)->qdisc_sleeping); - gnet_stats_add_basic(&sch->bstats, qdisc->cpu_bstats, - &qdisc->bstats, false); - gnet_stats_add_queue(&sch->qstats, qdisc->cpu_qstats, + gnet_stats_add_basic(&bstats, qdisc->cpu_bstats, + &qdisc->bstats, true); + gnet_stats_add_queue(&qstats, qdisc->cpu_qstats, &qdisc->qstats); qlen += qdisc_qlen_lockless(qdisc); - - spin_unlock_bh(qdisc_lock(qdisc)); } + rcu_read_unlock(); + + spin_lock_bh(qdisc_lock(sch)); + _bstats_set(&sch->bstats, u64_stats_read(&bstats.bytes), + u64_stats_read(&bstats.packets)); + spin_unlock_bh(qdisc_lock(sch)); + + WRITE_ONCE(sch->qstats.qlen, qstats.qlen); + WRITE_ONCE(sch->qstats.backlog, qstats.backlog); + WRITE_ONCE(sch->qstats.drops, qstats.drops); + WRITE_ONCE(sch->qstats.requeues, qstats.requeues); + WRITE_ONCE(sch->qstats.overlimits, qstats.overlimits); + WRITE_ONCE(sch->q.qlen, qlen); } EXPORT_SYMBOL_NS_GPL(mq_dump_common, "NET_SCHED_INTERNAL"); -- 2.54.0.563.g4f69b47b94-goog