netem applies several impairments (delay, loss, corruption, duplication, reordering) but exposes no counters distinguishing which impairment affected a given packet. Add a struct tc_netem_xstats reported via TCA_STATS_APP so that userspace (tc -s qdisc show) can display per-impairment counters. Signed-off-by: Stephen Hemminger --- include/uapi/linux/pkt_sched.h | 9 +++++++++ net/sched/sch_netem.c | 27 ++++++++++++++++++++++++--- 2 files changed, 33 insertions(+), 3 deletions(-) diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h index 66e8072f44df..fada10cb9b7b 100644 --- a/include/uapi/linux/pkt_sched.h +++ b/include/uapi/linux/pkt_sched.h @@ -569,6 +569,15 @@ struct tc_netem_gemodel { #define NETEM_DIST_SCALE 8192 #define NETEM_DIST_MAX 16384 +struct tc_netem_xstats { + __u32 delayed; /* packets delayed */ + __u32 dropped; /* packets dropped by loss model */ + __u32 corrupted; /* packets with bit errors injected */ + __u32 duplicated; /* duplicate packets generated */ + __u32 reordered; /* packets sent out of order */ + __u32 ecn_marked; /* packets ECN CE-marked (not dropped)*/ +}; + /* DRR */ enum { diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 26fc68e34b91..755e8d009f85 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -152,6 +152,9 @@ struct netem_sched_data { } slot; struct disttable *slot_dist; + + /* Per-impairment counters */ + struct tc_netem_xstats xstats; }; /* Time stamp put into socket buffer control block @@ -459,17 +462,22 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, skb->prev = NULL; /* Random duplication */ - if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor, &q->prng)) + if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor, &q->prng)) { ++count; + q->xstats.duplicated++; + } /* Drop packet? */ if (loss_event(q)) { - if (q->ecn && INET_ECN_set_ce(skb)) + if (q->ecn && INET_ECN_set_ce(skb)) { qdisc_qstats_drop(sch); /* mark packet */ - else + q->xstats.ecn_marked++; + } else { --count; + } } if (count == 0) { + q->xstats.dropped++; qdisc_qstats_drop(sch); __qdisc_drop(skb, to_free); return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; @@ -495,6 +503,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, * do it now in software before we mangle it. */ if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor, &q->prng)) { + q->xstats.corrupted++; if (skb_is_gso(skb)) { skb = netem_segment(skb, sch, to_free); if (!skb) @@ -597,6 +606,9 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, delay += packet_time_ns(qdisc_pkt_len(skb), q); } + if (delay > 0) + q->xstats.delayed++; + cb->time_to_send = now + delay; ++q->counter; tfifo_enqueue(skb, sch); @@ -605,6 +617,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, * Do re-ordering by putting one out of N packets at the front * of the queue. */ + q->xstats.reordered++; cb->time_to_send = ktime_get_ns(); q->counter = 0; @@ -1311,6 +1324,13 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb) return -1; } +static int netem_dump_stats(struct Qdisc *sch, struct gnet_dump *d) +{ + struct netem_sched_data *q = qdisc_priv(sch); + + return gnet_stats_copy_app(d, &q->xstats, sizeof(q->xstats)); +} + static int netem_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb, struct tcmsg *tcm) { @@ -1373,6 +1393,7 @@ static struct Qdisc_ops netem_qdisc_ops __read_mostly = { .destroy = netem_destroy, .change = netem_change, .dump = netem_dump, + .dump_stats = netem_dump_stats, .owner = THIS_MODULE, }; MODULE_ALIAS_NET_SCH("netem"); -- 2.53.0