mirror of
https://github.com/torvalds/linux.git
synced 2026-05-05 23:05:25 -04:00
udp: add udp_drops_inc() helper
Generic sk_drops_inc() reads sk->sk_drop_counters. We know the precise location for UDP sockets. Move sk_drop_counters out of sock_read_rxtx so that sock_write_rxtx starts at a cache line boundary. Signed-off-by: Eric Dumazet <edumazet@google.com> Reviewed-by: Willem de Bruijn <willemb@google.com> Reviewed-by: David Ahern <dsahern@kernel.org> Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com> Link: https://patch.msgid.link/20250916160951.541279-9-edumazet@google.com Reviewed-by: Jakub Kicinski <kuba@kernel.org> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
committed by
Paolo Abeni
parent
4effb335b5
commit
9db27c8062
@@ -451,7 +451,6 @@ struct sock {
|
||||
#ifdef CONFIG_XFRM
|
||||
struct xfrm_policy __rcu *sk_policy[2];
|
||||
#endif
|
||||
struct numa_drop_counters *sk_drop_counters;
|
||||
__cacheline_group_end(sock_read_rxtx);
|
||||
|
||||
__cacheline_group_begin(sock_write_rxtx);
|
||||
@@ -568,6 +567,7 @@ struct sock {
|
||||
#ifdef CONFIG_BPF_SYSCALL
|
||||
struct bpf_local_storage __rcu *sk_bpf_storage;
|
||||
#endif
|
||||
struct numa_drop_counters *sk_drop_counters;
|
||||
struct rcu_head sk_rcu;
|
||||
netns_tracker ns_tracker;
|
||||
struct xarray sk_user_frags;
|
||||
|
||||
@@ -295,6 +295,11 @@ static inline void udp_lib_init_sock(struct sock *sk)
|
||||
set_bit(SOCK_CUSTOM_SOCKOPT, &sk->sk_socket->flags);
|
||||
}
|
||||
|
||||
static inline void udp_drops_inc(struct sock *sk)
|
||||
{
|
||||
numa_drop_add(&udp_sk(sk)->drop_counters, 1);
|
||||
}
|
||||
|
||||
/* hash routines shared between UDPv4/6 and UDP-Litev4/6 */
|
||||
static inline int udp_lib_hash(struct sock *sk)
|
||||
{
|
||||
|
||||
@@ -4444,7 +4444,6 @@ static int __init sock_struct_check(void)
|
||||
#ifdef CONFIG_MEMCG
|
||||
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rxtx, sk_memcg);
|
||||
#endif
|
||||
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rxtx, sk_drop_counters);
|
||||
|
||||
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_lock);
|
||||
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_reserved_mem);
|
||||
|
||||
@@ -1790,7 +1790,7 @@ uncharge_drop:
|
||||
atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
|
||||
|
||||
drop:
|
||||
sk_drops_inc(sk);
|
||||
udp_drops_inc(sk);
|
||||
busylock_release(busy);
|
||||
return err;
|
||||
}
|
||||
@@ -1855,7 +1855,7 @@ static struct sk_buff *__first_packet_length(struct sock *sk,
|
||||
IS_UDPLITE(sk));
|
||||
__UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS,
|
||||
IS_UDPLITE(sk));
|
||||
sk_drops_inc(sk);
|
||||
udp_drops_inc(sk);
|
||||
__skb_unlink(skb, rcvq);
|
||||
*total += skb->truesize;
|
||||
kfree_skb_reason(skb, SKB_DROP_REASON_UDP_CSUM);
|
||||
@@ -2011,7 +2011,7 @@ try_again:
|
||||
|
||||
__UDP_INC_STATS(net, UDP_MIB_CSUMERRORS, is_udplite);
|
||||
__UDP_INC_STATS(net, UDP_MIB_INERRORS, is_udplite);
|
||||
sk_drops_inc(sk);
|
||||
udp_drops_inc(sk);
|
||||
kfree_skb_reason(skb, SKB_DROP_REASON_UDP_CSUM);
|
||||
goto try_again;
|
||||
}
|
||||
@@ -2081,7 +2081,7 @@ try_again:
|
||||
|
||||
if (unlikely(err)) {
|
||||
if (!peeking) {
|
||||
sk_drops_inc(sk);
|
||||
udp_drops_inc(sk);
|
||||
UDP_INC_STATS(sock_net(sk),
|
||||
UDP_MIB_INERRORS, is_udplite);
|
||||
}
|
||||
@@ -2452,7 +2452,7 @@ csum_error:
|
||||
__UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
|
||||
drop:
|
||||
__UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
|
||||
sk_drops_inc(sk);
|
||||
udp_drops_inc(sk);
|
||||
sk_skb_reason_drop(sk, skb, drop_reason);
|
||||
return -1;
|
||||
}
|
||||
@@ -2537,7 +2537,7 @@ start_lookup:
|
||||
nskb = skb_clone(skb, GFP_ATOMIC);
|
||||
|
||||
if (unlikely(!nskb)) {
|
||||
sk_drops_inc(sk);
|
||||
udp_drops_inc(sk);
|
||||
__UDP_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
|
||||
IS_UDPLITE(sk));
|
||||
__UDP_INC_STATS(net, UDP_MIB_INERRORS,
|
||||
|
||||
@@ -524,7 +524,7 @@ try_again:
|
||||
}
|
||||
if (unlikely(err)) {
|
||||
if (!peeking) {
|
||||
sk_drops_inc(sk);
|
||||
udp_drops_inc(sk);
|
||||
SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
|
||||
}
|
||||
kfree_skb(skb);
|
||||
@@ -908,7 +908,7 @@ csum_error:
|
||||
__UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
|
||||
drop:
|
||||
__UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
|
||||
sk_drops_inc(sk);
|
||||
udp_drops_inc(sk);
|
||||
sk_skb_reason_drop(sk, skb, drop_reason);
|
||||
return -1;
|
||||
}
|
||||
@@ -1013,7 +1013,7 @@ start_lookup:
|
||||
}
|
||||
nskb = skb_clone(skb, GFP_ATOMIC);
|
||||
if (unlikely(!nskb)) {
|
||||
sk_drops_inc(sk);
|
||||
udp_drops_inc(sk);
|
||||
__UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
|
||||
IS_UDPLITE(sk));
|
||||
__UDP6_INC_STATS(net, UDP_MIB_INERRORS,
|
||||
|
||||
Reference in New Issue
Block a user