Use atomic64_xchg() to atomically read and zero the consumed value on reset, which is simpler than the previous read+sub pattern and doesn't require spinlock protection. Suggested-by: Pablo Neira Ayuso Signed-off-by: Brian Witte --- net/netfilter/nft_quota.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/net/netfilter/nft_quota.c b/net/netfilter/nft_quota.c index df0798da2329..34c77c872f79 100644 --- a/net/netfilter/nft_quota.c +++ b/net/netfilter/nft_quota.c @@ -140,11 +140,14 @@ static int nft_quota_do_dump(struct sk_buff *skb, struct nft_quota *priv, u64 consumed, consumed_cap, quota; u32 flags = priv->flags; - /* Since we inconditionally increment consumed quota for each packet + /* Since we unconditionally increment consumed quota for each packet * that we see, don't go over the quota boundary in what we send to * userspace. */ - consumed = atomic64_read(priv->consumed); + if (reset) + consumed = atomic64_xchg(priv->consumed, 0); + else + consumed = atomic64_read(priv->consumed); quota = atomic64_read(&priv->quota); if (consumed >= quota) { consumed_cap = quota; @@ -160,10 +163,9 @@ static int nft_quota_do_dump(struct sk_buff *skb, struct nft_quota *priv, nla_put_be32(skb, NFTA_QUOTA_FLAGS, htonl(flags))) goto nla_put_failure; - if (reset) { - atomic64_sub(consumed, priv->consumed); + if (reset) clear_bit(NFT_QUOTA_DEPLETED_BIT, &priv->flags); - } + return 0; nla_put_failure: -- 2.47.3