In preparation of the following patch, abstract access to the @mask field in 'struct rps_sock_flow_table'. Also cleanup rps_sock_flow_sysctl() a bit : - Rename orig_sock_table to o_sock_table. Signed-off-by: Eric Dumazet --- include/net/rps.h | 11 ++++++++--- net/core/dev.c | 4 +++- net/core/sysctl_net_core.c | 19 ++++++++++--------- 3 files changed, 21 insertions(+), 13 deletions(-) diff --git a/include/net/rps.h b/include/net/rps.h index 32cfa250d9f931b8ab1c94e0410d0820bb9c999f..82cdffdf3e6b0035e7ceeb130b5b4ac19772e46c 100644 --- a/include/net/rps.h +++ b/include/net/rps.h @@ -60,18 +60,23 @@ struct rps_dev_flow_table { * meaning we use 32-6=26 bits for the hash. */ struct rps_sock_flow_table { - u32 mask; + u32 _mask; u32 ents[] ____cacheline_aligned_in_smp; }; #define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num])) +static inline u32 rps_sock_flow_table_mask(const struct rps_sock_flow_table *table) +{ + return table->_mask; +} + #define RPS_NO_CPU 0xffff static inline void rps_record_sock_flow(struct rps_sock_flow_table *table, u32 hash) { - unsigned int index = hash & table->mask; + unsigned int index = hash & rps_sock_flow_table_mask(table); u32 val = hash & ~net_hotdata.rps_cpu_mask; /* We only give a hint, preemption can change CPU under us */ @@ -129,7 +134,7 @@ static inline void _sock_rps_delete_flow(const struct sock *sk) rcu_read_lock(); table = rcu_dereference(net_hotdata.rps_sock_flow_table); if (table) { - index = hash & table->mask; + index = hash & rps_sock_flow_table_mask(table); if (READ_ONCE(table->ents[index]) != RPS_NO_CPU) WRITE_ONCE(table->ents[index], RPS_NO_CPU); } diff --git a/net/core/dev.c b/net/core/dev.c index 1cf3ad840697ed93a6c4cc5163aae514fda90eff..de70ef784d6363b3af4f9279e107647c90f5af19 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -5112,12 +5112,14 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, if (flow_table && sock_flow_table) { struct rps_dev_flow *rflow; u32 next_cpu; + u32 flow_id; u32 ident; /* First check into global flow table if there is a match. * This READ_ONCE() pairs with WRITE_ONCE() from rps_record_sock_flow(). */ - ident = READ_ONCE(sock_flow_table->ents[hash & sock_flow_table->mask]); + flow_id = hash & rps_sock_flow_table_mask(sock_flow_table); + ident = READ_ONCE(sock_flow_table->ents[flow_id]); if ((ident ^ hash) & ~net_hotdata.rps_cpu_mask) goto try_rps; diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 0b659c932cffef45e05207890b8187d64ae3c85a..cfbe798493b5789dc8baedf9dcbe9c20918e2ba6 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -145,16 +145,17 @@ static int rps_sock_flow_sysctl(const struct ctl_table *table, int write, .maxlen = sizeof(size), .mode = table->mode }; - struct rps_sock_flow_table *orig_sock_table, *sock_table; + struct rps_sock_flow_table *o_sock_table, *sock_table; static DEFINE_MUTEX(sock_flow_mutex); void *tofree = NULL; mutex_lock(&sock_flow_mutex); - orig_sock_table = rcu_dereference_protected( + o_sock_table = rcu_dereference_protected( net_hotdata.rps_sock_flow_table, lockdep_is_held(&sock_flow_mutex)); - size = orig_size = orig_sock_table ? orig_sock_table->mask + 1 : 0; + size = o_sock_table ? rps_sock_flow_table_mask(o_sock_table) + 1 : 0; + orig_size = size; ret = proc_dointvec(&tmp, write, buffer, lenp, ppos); @@ -165,6 +166,7 @@ static int rps_sock_flow_sysctl(const struct ctl_table *table, int write, mutex_unlock(&sock_flow_mutex); return -EINVAL; } + sock_table = o_sock_table; size = roundup_pow_of_two(size); if (size != orig_size) { sock_table = @@ -175,26 +177,25 @@ static int rps_sock_flow_sysctl(const struct ctl_table *table, int write, } net_hotdata.rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1; - sock_table->mask = size - 1; - } else - sock_table = orig_sock_table; + sock_table->_mask = size - 1; + } for (i = 0; i < size; i++) sock_table->ents[i] = RPS_NO_CPU; } else sock_table = NULL; - if (sock_table != orig_sock_table) { + if (sock_table != o_sock_table) { rcu_assign_pointer(net_hotdata.rps_sock_flow_table, sock_table); if (sock_table) { static_branch_inc(&rps_needed); static_branch_inc(&rfs_needed); } - if (orig_sock_table) { + if (o_sock_table) { static_branch_dec(&rps_needed); static_branch_dec(&rfs_needed); - tofree = orig_sock_table; + tofree = o_sock_table; } } } -- 2.53.0.473.g4a7958ca14-goog