net-sysfs: add rps_sock_flow_table_mask() helper

In preparation of the following patch, abstract access
to the @mask field in 'struct rps_sock_flow_table'.

Also cleanup rps_sock_flow_sysctl() a bit :

- Rename orig_sock_table to o_sock_table.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com>
Link: https://patch.msgid.link/20260302181432.1836150-4-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Eric Dumazet
2026-03-02 18:14:28 +00:00
committed by Jakub Kicinski
parent 61753849b8
commit 9cde131cdd
3 changed files with 21 additions and 13 deletions

View File

@@ -60,18 +60,23 @@ struct rps_dev_flow_table {
* meaning we use 32-6=26 bits for the hash.
*/
struct rps_sock_flow_table {
u32 mask;
u32 _mask;
u32 ents[] ____cacheline_aligned_in_smp;
};
#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num]))
static inline u32 rps_sock_flow_table_mask(const struct rps_sock_flow_table *table)
{
return table->_mask;
}
#define RPS_NO_CPU 0xffff
static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
u32 hash)
{
unsigned int index = hash & table->mask;
unsigned int index = hash & rps_sock_flow_table_mask(table);
u32 val = hash & ~net_hotdata.rps_cpu_mask;
/* We only give a hint, preemption can change CPU under us */
@@ -129,7 +134,7 @@ static inline void _sock_rps_delete_flow(const struct sock *sk)
rcu_read_lock();
table = rcu_dereference(net_hotdata.rps_sock_flow_table);
if (table) {
index = hash & table->mask;
index = hash & rps_sock_flow_table_mask(table);
if (READ_ONCE(table->ents[index]) != RPS_NO_CPU)
WRITE_ONCE(table->ents[index], RPS_NO_CPU);
}

View File

@@ -5112,12 +5112,14 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
if (flow_table && sock_flow_table) {
struct rps_dev_flow *rflow;
u32 next_cpu;
u32 flow_id;
u32 ident;
/* First check into global flow table if there is a match.
* This READ_ONCE() pairs with WRITE_ONCE() from rps_record_sock_flow().
*/
ident = READ_ONCE(sock_flow_table->ents[hash & sock_flow_table->mask]);
flow_id = hash & rps_sock_flow_table_mask(sock_flow_table);
ident = READ_ONCE(sock_flow_table->ents[flow_id]);
if ((ident ^ hash) & ~net_hotdata.rps_cpu_mask)
goto try_rps;

View File

@@ -145,16 +145,17 @@ static int rps_sock_flow_sysctl(const struct ctl_table *table, int write,
.maxlen = sizeof(size),
.mode = table->mode
};
struct rps_sock_flow_table *orig_sock_table, *sock_table;
struct rps_sock_flow_table *o_sock_table, *sock_table;
static DEFINE_MUTEX(sock_flow_mutex);
void *tofree = NULL;
mutex_lock(&sock_flow_mutex);
orig_sock_table = rcu_dereference_protected(
o_sock_table = rcu_dereference_protected(
net_hotdata.rps_sock_flow_table,
lockdep_is_held(&sock_flow_mutex));
size = orig_size = orig_sock_table ? orig_sock_table->mask + 1 : 0;
size = o_sock_table ? rps_sock_flow_table_mask(o_sock_table) + 1 : 0;
orig_size = size;
ret = proc_dointvec(&tmp, write, buffer, lenp, ppos);
@@ -165,6 +166,7 @@ static int rps_sock_flow_sysctl(const struct ctl_table *table, int write,
mutex_unlock(&sock_flow_mutex);
return -EINVAL;
}
sock_table = o_sock_table;
size = roundup_pow_of_two(size);
if (size != orig_size) {
sock_table =
@@ -175,26 +177,25 @@ static int rps_sock_flow_sysctl(const struct ctl_table *table, int write,
}
net_hotdata.rps_cpu_mask =
roundup_pow_of_two(nr_cpu_ids) - 1;
sock_table->mask = size - 1;
} else
sock_table = orig_sock_table;
sock_table->_mask = size - 1;
}
for (i = 0; i < size; i++)
sock_table->ents[i] = RPS_NO_CPU;
} else
sock_table = NULL;
if (sock_table != orig_sock_table) {
if (sock_table != o_sock_table) {
rcu_assign_pointer(net_hotdata.rps_sock_flow_table,
sock_table);
if (sock_table) {
static_branch_inc(&rps_needed);
static_branch_inc(&rfs_needed);
}
if (orig_sock_table) {
if (o_sock_table) {
static_branch_dec(&rps_needed);
static_branch_dec(&rfs_needed);
tofree = orig_sock_table;
tofree = o_sock_table;
}
}
}