We soon will no longer hold RTNL in qdisc dumps. Add READ_ONCE()/WRITE_ONCE() annoations. Note taprio already uses RCU to protect most of its fields. Signed-off-by: Eric Dumazet --- net/sched/sch_taprio.c | 42 ++++++++++++++++++++++++------------------ 1 file changed, 24 insertions(+), 18 deletions(-) diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index 885a9bc859166dfb6d20aa0dfbb8f11194e02ba9..75030a834840cde3480722cc4214431625a7c7ee 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -308,10 +308,10 @@ static void taprio_update_queue_max_sdu(struct taprio_sched *q, if (max_sdu != U32_MAX) { sched->max_frm_len[tc] = max_sdu + dev->hard_header_len; - sched->max_sdu[tc] = max_sdu; + WRITE_ONCE(sched->max_sdu[tc], max_sdu); } else { sched->max_frm_len[tc] = U32_MAX; /* never oversized */ - sched->max_sdu[tc] = 0; + WRITE_ONCE(sched->max_sdu[tc], 0); } } } @@ -1770,8 +1770,8 @@ static int taprio_parse_tc_entries(struct Qdisc *sch, } for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) { - q->max_sdu[tc] = max_sdu[tc]; - q->fp[tc] = fp[tc]; + WRITE_ONCE(q->max_sdu[tc], max_sdu[tc]); + WRITE_ONCE(q->fp[tc], fp[tc]); if (fp[tc] != TC_FP_EXPRESS) have_preemption = true; } @@ -1851,12 +1851,14 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt, return -EINVAL; } - if (q->flags != TAPRIO_FLAGS_INVALID && q->flags != taprio_flags) { - NL_SET_ERR_MSG_MOD(extack, - "Changing 'flags' of a running schedule is not supported"); - return -EOPNOTSUPP; + if (q->flags != taprio_flags) { + if (q->flags != TAPRIO_FLAGS_INVALID) { + NL_SET_ERR_MSG_MOD(extack, + "Changing 'flags' of a running schedule is not supported"); + return -EOPNOTSUPP; + } + q->flags = taprio_flags; } - q->flags = taprio_flags; /* Needed for length_to_duration() during netlink attribute parsing */ taprio_set_picos_per_byte(dev, q, extack); @@ -1939,7 +1941,8 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt, goto unlock; } - q->txtime_delay = nla_get_u32(tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]); + WRITE_ONCE(q->txtime_delay, + nla_get_u32(tb[TCA_TAPRIO_ATTR_TXTIME_DELAY])); } if (!TXTIME_ASSIST_IS_ENABLED(q->flags) && @@ -2279,8 +2282,8 @@ static int dump_schedule(struct sk_buff *msg, } static int taprio_dump_tc_entries(struct sk_buff *skb, - struct taprio_sched *q, - struct sched_gate_list *sched) + const struct taprio_sched *q, + const struct sched_gate_list *sched) { struct nlattr *n; int tc; @@ -2294,10 +2297,11 @@ static int taprio_dump_tc_entries(struct sk_buff *skb, goto nla_put_failure; if (nla_put_u32(skb, TCA_TAPRIO_TC_ENTRY_MAX_SDU, - sched->max_sdu[tc])) + READ_ONCE(sched->max_sdu[tc]))) goto nla_put_failure; - if (nla_put_u32(skb, TCA_TAPRIO_TC_ENTRY_FP, q->fp[tc])) + if (nla_put_u32(skb, TCA_TAPRIO_TC_ENTRY_FP, + READ_ONCE(q->fp[tc]))) goto nla_put_failure; nla_nest_end(skb, n); @@ -2383,6 +2387,7 @@ static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb) struct sched_gate_list *oper, *admin; struct tc_mqprio_qopt opt = { 0 }; struct nlattr *nest, *sched_nest; + u32 txtime_delay; mqprio_qopt_reconstruct(dev, &opt); @@ -2400,14 +2405,15 @@ static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb) if (q->flags && nla_put_u32(skb, TCA_TAPRIO_ATTR_FLAGS, q->flags)) goto options_error; - if (q->txtime_delay && - nla_put_u32(skb, TCA_TAPRIO_ATTR_TXTIME_DELAY, q->txtime_delay)) + txtime_delay = READ_ONCE(q->txtime_delay); + if (txtime_delay && + nla_put_u32(skb, TCA_TAPRIO_ATTR_TXTIME_DELAY, txtime_delay)) goto options_error; rcu_read_lock(); - oper = rtnl_dereference(q->oper_sched); - admin = rtnl_dereference(q->admin_sched); + oper = rcu_dereference(q->oper_sched); + admin = rcu_dereference(q->admin_sched); if (oper && taprio_dump_tc_entries(skb, q, oper)) goto options_error_rcu; -- 2.53.0.1213.gd9a14994de-goog