Add a global static spinlock to serialize counter fetch+reset operations, preventing concurrent dump-and-reset from underrunning values. The lock is taken before fetching the total so that two parallel resets cannot both read the same counter values and then both subtract them. A global lock is used for simplicity since resets are infrequent. If this becomes a bottleneck, it can be replaced with a per-net lock later. Suggested-by: Florian Westphal Signed-off-by: Brian Witte --- net/netfilter/nft_counter.c | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/net/netfilter/nft_counter.c b/net/netfilter/nft_counter.c index 0d70325280cc..169ae93688bc 100644 --- a/net/netfilter/nft_counter.c +++ b/net/netfilter/nft_counter.c @@ -32,6 +32,9 @@ struct nft_counter_percpu_priv { static DEFINE_PER_CPU(struct u64_stats_sync, nft_counter_sync); +/* control plane only: sync fetch+reset */ +static DEFINE_SPINLOCK(nft_counter_lock); + static inline void nft_counter_do_eval(struct nft_counter_percpu_priv *priv, struct nft_regs *regs, const struct nft_pktinfo *pkt) @@ -148,13 +151,25 @@ static void nft_counter_fetch(struct nft_counter_percpu_priv *priv, } } +static void nft_counter_fetch_and_reset(struct nft_counter_percpu_priv *priv, + struct nft_counter_tot *total) +{ + spin_lock(&nft_counter_lock); + nft_counter_fetch(priv, total); + nft_counter_reset(priv, total); + spin_unlock(&nft_counter_lock); +} + static int nft_counter_do_dump(struct sk_buff *skb, struct nft_counter_percpu_priv *priv, bool reset) { struct nft_counter_tot total; - nft_counter_fetch(priv, &total); + if (unlikely(reset)) + nft_counter_fetch_and_reset(priv, &total); + else + nft_counter_fetch(priv, &total); if (nla_put_be64(skb, NFTA_COUNTER_BYTES, cpu_to_be64(total.bytes), NFTA_COUNTER_PAD) || @@ -162,9 +177,6 @@ static int nft_counter_do_dump(struct sk_buff *skb, NFTA_COUNTER_PAD)) goto nla_put_failure; - if (reset) - nft_counter_reset(priv, &total); - return 0; nla_put_failure: -- 2.47.3