Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Cross-merge networking fixes after downstream PR (net-7.0-rc8).

Conflicts:

net/ipv6/seg6_iptunnel.c
  c3812651b5 ("seg6: separate dst_cache for input and output paths in seg6 lwtunnel")
  78723a62b9 ("seg6: add per-route tunnel source address")
https://lore.kernel.org/adZhwtOYfo-0ImSa@sirena.org.uk

net/ipv4/icmp.c
  fde29fd934 ("ipv4: icmp: fix null-ptr-deref in icmp_build_probe()")
  d98adfbdd5 ("ipv4: drop ipv6_stub usage and use direct function calls")
https://lore.kernel.org/adO3dccqnr6j-BL9@sirena.org.uk

Adjacent changes:

drivers/net/ethernet/stmicro/stmmac/chain_mode.c
  51f4e090b9 ("net: stmmac: fix integer underflow in chain mode")
  6b4286e055 ("net: stmmac: rename STMMAC_GET_ENTRY() -> STMMAC_NEXT_ENTRY()")

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski
2026-04-02 10:57:09 -07:00
410 changed files with 5337 additions and 1964 deletions

View File

@@ -32,7 +32,7 @@
* recursion involves route lookups and full IP output, consuming much
* more stack per level, so a lower limit is needed.
*/
#define IP_TUNNEL_RECURSION_LIMIT 4
#define IP_TUNNEL_RECURSION_LIMIT 5
/* Keep error state on tunnel for 30 sec */
#define IPTUNNEL_ERR_TIMEO (30*HZ)

View File

@@ -14,6 +14,7 @@
struct nf_ct_timeout {
__u16 l3num;
const struct nf_conntrack_l4proto *l4proto;
struct rcu_head rcu;
char data[];
};

View File

@@ -23,7 +23,6 @@ struct nf_queue_entry {
struct nf_hook_state state;
bool nf_ct_is_unconfirmed;
u16 size; /* sizeof(entry) + saved route keys */
u16 queue_num;
/* extra space to store route keys */
};

View File

@@ -14,7 +14,7 @@
#include <linux/mm.h>
#include <net/sock.h>
#define XDP_UMEM_SG_FLAG (1 << 1)
#define XDP_UMEM_SG_FLAG BIT(3)
struct net_device;
struct xsk_queue;

View File

@@ -41,16 +41,37 @@ static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
return XDP_PACKET_HEADROOM + pool->headroom;
}
static inline u32 xsk_pool_get_tailroom(bool mbuf)
{
return mbuf ? SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : 0;
}
static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool)
{
return pool->chunk_size;
}
static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
static inline u32 __xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
{
return xsk_pool_get_chunk_size(pool) - xsk_pool_get_headroom(pool);
}
static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
{
u32 frame_size = __xsk_pool_get_rx_frame_size(pool);
struct xdp_umem *umem = pool->umem;
bool mbuf;
/* Reserve tailroom only for zero-copy pools that opted into
* multi-buffer. The reserved area is used for skb_shared_info,
* matching the XDP core's xdp_data_hard_end() layout.
*/
mbuf = pool->dev && (umem->flags & XDP_UMEM_SG_FLAG);
frame_size -= xsk_pool_get_tailroom(mbuf);
return ALIGN_DOWN(frame_size, 128);
}
static inline u32 xsk_pool_get_rx_frag_step(struct xsk_buff_pool *pool)
{
return pool->unaligned ? 0 : xsk_pool_get_chunk_size(pool);