Stats are read locklessly, add READ_ONCE() to prevent load-stearing. Write side will be handled in separate patches. Signed-off-by: Eric Dumazet --- net/core/gen_stats.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c index b71ccaec0991461333dbe465ee619bca4a06e75b..1a2380e74272de8eaf3d4ef453e56105a31e9edf 100644 --- a/net/core/gen_stats.c +++ b/net/core/gen_stats.c @@ -345,11 +345,11 @@ static void gnet_stats_add_queue_cpu(struct gnet_stats_queue *qstats, for_each_possible_cpu(i) { const struct gnet_stats_queue *qcpu = per_cpu_ptr(q, i); - qstats->qlen += qcpu->qlen; - qstats->backlog += qcpu->backlog; - qstats->drops += qcpu->drops; - qstats->requeues += qcpu->requeues; - qstats->overlimits += qcpu->overlimits; + qstats->qlen += READ_ONCE(qcpu->qlen); + qstats->backlog += READ_ONCE(qcpu->backlog); + qstats->drops += READ_ONCE(qcpu->drops); + qstats->requeues += READ_ONCE(qcpu->requeues); + qstats->overlimits += READ_ONCE(qcpu->overlimits); } } @@ -360,11 +360,11 @@ void gnet_stats_add_queue(struct gnet_stats_queue *qstats, if (cpu) { gnet_stats_add_queue_cpu(qstats, cpu); } else { - qstats->qlen += q->qlen; - qstats->backlog += q->backlog; - qstats->drops += q->drops; - qstats->requeues += q->requeues; - qstats->overlimits += q->overlimits; + qstats->qlen += READ_ONCE(q->qlen); + qstats->backlog += READ_ONCE(q->backlog); + qstats->drops += READ_ONCE(q->drops); + qstats->requeues += READ_ONCE(q->requeues); + qstats->overlimits += READ_ONCE(q->overlimits); } } EXPORT_SYMBOL(gnet_stats_add_queue); -- 2.53.0.1213.gd9a14994de-goog