Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Merge in late fixes in preparation for the net-next PR.

Conflicts:

include/net/sch_generic.h
  a6bd339dbb ("net_sched: fix skb memory leak in deferred qdisc drops")
  ff2998f29f ("net: sched: introduce qdisc-specific drop reason tracing")
https://lore.kernel.org/adz0iX85FHMz0HdO@sirena.org.uk

drivers/net/ethernet/airoha/airoha_eth.c
  1acdfbdb51 ("net: airoha: Fix VIP configuration for AN7583 SoC")
  bf3471e6e6 ("net: airoha: Make flow control source port mapping dependent on nbq parameter")

Adjacent changes:

drivers/net/ethernet/airoha/airoha_ppe.c
  f44218cd5e ("net: airoha: Reset PPE cpu port configuration in airoha_ppe_hw_init()")
  7da62262ec ("inet: add ip_local_port_step_width sysctl to improve port usage distribution")

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski
2026-04-14 11:54:21 -07:00
60 changed files with 1204 additions and 250 deletions

10
CREDITS
View File

@@ -3592,6 +3592,16 @@ E: wsalamon@tislabs.com
E: wsalamon@nai.com
D: portions of the Linux Security Module (LSM) framework and security modules
N: Salil Mehta
E: salil.mehta@opnsrc.net
D: Co-authored Huawei/HiSilicon Kunpeng 920 SoC HNS3 PF and VF 100G
D: Ethernet driver
D: Co-authored Huawei/HiSilicon Kunpeng 916 SoC HNS 10G Ethernet
D: driver enhancements
D: Maintained Huawei/HiSilicon HNS and HNS3 10G/100G Ethernet drivers
D: for Kunpeng 916 family, 920 family of SoCs
S: Cambridge, Cambridgeshire, United Kingdom
N: Robert Sanders
E: gt8134b@prism.gatech.edu
D: Dosemu

View File

@@ -11534,7 +11534,6 @@ F: drivers/bus/hisi_lpc.c
HISILICON NETWORK SUBSYSTEM 3 DRIVER (HNS3)
M: Jian Shen <shenjian15@huawei.com>
M: Salil Mehta <salil.mehta@huawei.com>
M: Jijie Shao <shaojijie@huawei.com>
L: netdev@vger.kernel.org
S: Maintained
@@ -11549,7 +11548,6 @@ F: drivers/net/ethernet/hisilicon/hibmcge/
HISILICON NETWORK SUBSYSTEM DRIVER
M: Jian Shen <shenjian15@huawei.com>
M: Salil Mehta <salil.mehta@huawei.com>
L: netdev@vger.kernel.org
S: Maintained
W: http://www.hisilicon.com

View File

@@ -1379,7 +1379,7 @@ static int ucan_probe(struct usb_interface *intf,
*/
/* Prepare Memory for control transfers */
ctl_msg_buffer = devm_kzalloc(&udev->dev,
ctl_msg_buffer = devm_kzalloc(&intf->dev,
sizeof(union ucan_ctl_payload),
GFP_KERNEL);
if (!ctl_msg_buffer) {

View File

@@ -107,19 +107,7 @@ static int airoha_set_vip_for_gdm_port(struct airoha_gdm_port *port,
struct airoha_eth *eth = port->qdma->eth;
u32 vip_port;
switch (port->id) {
case AIROHA_GDM3_IDX:
/* FIXME: handle XSI_PCIE1_PORT */
vip_port = XSI_PCIE0_VIP_PORT_MASK;
break;
case AIROHA_GDM4_IDX:
/* FIXME: handle XSI_USB_PORT */
vip_port = XSI_ETH_VIP_PORT_MASK;
break;
default:
return 0;
}
vip_port = eth->soc->ops.get_vip_port(port, port->nbq);
if (enable) {
airoha_fe_set(eth, REG_FE_VIP_PORT_EN, vip_port);
airoha_fe_set(eth, REG_FE_IFC_PORT_EN, vip_port);
@@ -293,16 +281,18 @@ static void airoha_fe_pse_ports_init(struct airoha_eth *eth)
[FE_PSE_PORT_GDM4] = 2,
[FE_PSE_PORT_CDM5] = 2,
};
u32 all_rsv;
int q;
all_rsv = airoha_fe_get_pse_all_rsv(eth);
if (airoha_ppe_is_enabled(eth, 1)) {
u32 all_rsv;
/* hw misses PPE2 oq rsv */
all_rsv = airoha_fe_get_pse_all_rsv(eth);
all_rsv += PSE_RSV_PAGES *
pse_port_num_queues[FE_PSE_PORT_PPE2];
airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET, PSE_ALLRSV_MASK,
FIELD_PREP(PSE_ALLRSV_MASK, all_rsv));
}
airoha_fe_set(eth, REG_FE_PSE_BUF_SET, all_rsv);
/* CMD1 */
for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM1]; q++)
@@ -584,7 +574,7 @@ static int airoha_qdma_fill_rx_queue(struct airoha_queue *q)
static int airoha_qdma_get_gdm_port(struct airoha_eth *eth,
struct airoha_qdma_desc *desc)
{
u32 port, sport, msg1 = le32_to_cpu(desc->msg1);
u32 port, sport, msg1 = le32_to_cpu(READ_ONCE(desc->msg1));
sport = FIELD_GET(QDMA_ETH_RXMSG_SPORT_MASK, msg1);
switch (sport) {
@@ -612,21 +602,24 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
while (done < budget) {
struct airoha_queue_entry *e = &q->entry[q->tail];
struct airoha_qdma_desc *desc = &q->desc[q->tail];
u32 hash, reason, msg1 = le32_to_cpu(desc->msg1);
struct page *page = virt_to_head_page(e->buf);
u32 desc_ctrl = le32_to_cpu(desc->ctrl);
u32 hash, reason, msg1, desc_ctrl;
struct airoha_gdm_port *port;
int data_len, len, p;
struct page *page;
desc_ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
if (!(desc_ctrl & QDMA_DESC_DONE_MASK))
break;
dma_rmb();
q->tail = (q->tail + 1) % q->ndesc;
q->queued--;
dma_sync_single_for_cpu(eth->dev, e->dma_addr,
SKB_WITH_OVERHEAD(q->buf_size), dir);
page = virt_to_head_page(e->buf);
len = FIELD_GET(QDMA_DESC_LEN_MASK, desc_ctrl);
data_len = q->skb ? q->buf_size
: SKB_WITH_OVERHEAD(q->buf_size);
@@ -670,8 +663,8 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
* DMA descriptor. Report DSA tag to the DSA stack
* via skb dst info.
*/
u32 sptag = FIELD_GET(QDMA_ETH_RXMSG_SPTAG,
le32_to_cpu(desc->msg0));
u32 msg0 = le32_to_cpu(READ_ONCE(desc->msg0));
u32 sptag = FIELD_GET(QDMA_ETH_RXMSG_SPTAG, msg0);
if (sptag < ARRAY_SIZE(port->dsa_meta) &&
port->dsa_meta[sptag])
@@ -679,6 +672,7 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
&port->dsa_meta[sptag]->dst);
}
msg1 = le32_to_cpu(READ_ONCE(desc->msg1));
hash = FIELD_GET(AIROHA_RXD4_FOE_ENTRY, msg1);
if (hash != AIROHA_RXD4_FOE_ENTRY)
skb_set_hash(q->skb, jhash_1word(hash, 0),
@@ -819,6 +813,11 @@ static void airoha_qdma_cleanup_rx_queue(struct airoha_queue *q)
}
q->head = q->tail;
/* Set RX_DMA_IDX to RX_CPU_IDX to notify the hw the QDMA RX ring is
* empty.
*/
airoha_qdma_rmw(qdma, REG_RX_CPU_IDX(qid), RX_RING_CPU_IDX_MASK,
FIELD_PREP(RX_RING_CPU_IDX_MASK, q->head));
airoha_qdma_rmw(qdma, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK,
FIELD_PREP(RX_RING_DMA_IDX_MASK, q->tail));
}
@@ -1727,7 +1726,7 @@ static int airoha_dev_set_macaddr(struct net_device *dev, void *p)
static int airoha_set_gdm2_loopback(struct airoha_gdm_port *port)
{
struct airoha_eth *eth = port->qdma->eth;
u32 val, pse_port, chan, nbq;
u32 val, pse_port, chan;
int src_port;
/* Forward the traffic to the proper GDM port */
@@ -1757,9 +1756,7 @@ static int airoha_set_gdm2_loopback(struct airoha_gdm_port *port)
airoha_fe_clear(eth, REG_FE_VIP_PORT_EN, BIT(AIROHA_GDM2_IDX));
airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, BIT(AIROHA_GDM2_IDX));
/* XXX: handle XSI_USB_PORT and XSI_PCE1_PORT */
nbq = port->id == AIROHA_GDM3_IDX && airoha_is_7581(eth) ? 4 : 0;
src_port = eth->soc->ops.get_src_port_id(port, nbq);
src_port = eth->soc->ops.get_src_port_id(port, port->nbq);
if (src_port < 0)
return src_port;
@@ -1773,7 +1770,7 @@ static int airoha_set_gdm2_loopback(struct airoha_gdm_port *port)
__field_prep(SP_CPORT_MASK(val), FE_PSE_PORT_CDM2));
if (port->id == AIROHA_GDM4_IDX && airoha_is_7581(eth)) {
u32 mask = FC_ID_OF_SRC_PORT_MASK(nbq);
u32 mask = FC_ID_OF_SRC_PORT_MASK(port->nbq);
airoha_fe_rmw(eth, REG_SRC_PORT_FC_MAP6, mask,
__field_prep(mask, AIROHA_GDM2_IDX));
@@ -2952,6 +2949,8 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth,
port->eth = eth;
port->dev = dev;
port->id = id;
/* XXX: Read nbq from DTS */
port->nbq = id == AIROHA_GDM3_IDX && airoha_is_7581(eth) ? 4 : 0;
eth->ports[p] = port;
return airoha_metadata_dst_alloc(port);
@@ -3148,6 +3147,28 @@ static int airoha_en7581_get_src_port_id(struct airoha_gdm_port *port, int nbq)
return -EINVAL;
}
static u32 airoha_en7581_get_vip_port(struct airoha_gdm_port *port, int nbq)
{
switch (port->id) {
case AIROHA_GDM3_IDX:
if (nbq == 4)
return XSI_PCIE0_VIP_PORT_MASK;
if (nbq == 5)
return XSI_PCIE1_VIP_PORT_MASK;
break;
case AIROHA_GDM4_IDX:
if (!nbq)
return XSI_ETH_VIP_PORT_MASK;
if (nbq == 1)
return XSI_USB_VIP_PORT_MASK;
break;
default:
break;
}
return 0;
}
static const char * const an7583_xsi_rsts_names[] = {
"xsi-mac",
"hsi0-mac",
@@ -3177,6 +3198,26 @@ static int airoha_an7583_get_src_port_id(struct airoha_gdm_port *port, int nbq)
return -EINVAL;
}
static u32 airoha_an7583_get_vip_port(struct airoha_gdm_port *port, int nbq)
{
switch (port->id) {
case AIROHA_GDM3_IDX:
if (!nbq)
return XSI_ETH_VIP_PORT_MASK;
break;
case AIROHA_GDM4_IDX:
if (!nbq)
return XSI_PCIE0_VIP_PORT_MASK;
if (nbq == 1)
return XSI_USB_VIP_PORT_MASK;
break;
default:
break;
}
return 0;
}
static const struct airoha_eth_soc_data en7581_soc_data = {
.version = 0x7581,
.xsi_rsts_names = en7581_xsi_rsts_names,
@@ -3184,6 +3225,7 @@ static const struct airoha_eth_soc_data en7581_soc_data = {
.num_ppe = 2,
.ops = {
.get_src_port_id = airoha_en7581_get_src_port_id,
.get_vip_port = airoha_en7581_get_vip_port,
},
};
@@ -3194,6 +3236,7 @@ static const struct airoha_eth_soc_data an7583_soc_data = {
.num_ppe = 1,
.ops = {
.get_src_port_id = airoha_an7583_get_src_port_id,
.get_vip_port = airoha_an7583_get_vip_port,
},
};

View File

@@ -537,6 +537,7 @@ struct airoha_gdm_port {
struct airoha_eth *eth;
struct net_device *dev;
int id;
int nbq;
struct airoha_hw_stats stats;
@@ -577,6 +578,7 @@ struct airoha_eth_soc_data {
int num_ppe;
struct {
int (*get_src_port_id)(struct airoha_gdm_port *port, int nbq);
u32 (*get_vip_port)(struct airoha_gdm_port *port, int nbq);
} ops;
};

View File

@@ -125,13 +125,13 @@ static void airoha_ppe_hw_init(struct airoha_ppe *ppe)
airoha_fe_rmw(eth, REG_PPE_BND_AGE0(i),
PPE_BIND_AGE0_DELTA_NON_L4 |
PPE_BIND_AGE0_DELTA_UDP,
FIELD_PREP(PPE_BIND_AGE0_DELTA_NON_L4, 1) |
FIELD_PREP(PPE_BIND_AGE0_DELTA_UDP, 12));
FIELD_PREP(PPE_BIND_AGE0_DELTA_NON_L4, 60) |
FIELD_PREP(PPE_BIND_AGE0_DELTA_UDP, 60));
airoha_fe_rmw(eth, REG_PPE_BND_AGE1(i),
PPE_BIND_AGE1_DELTA_TCP_FIN |
PPE_BIND_AGE1_DELTA_TCP,
FIELD_PREP(PPE_BIND_AGE1_DELTA_TCP_FIN, 1) |
FIELD_PREP(PPE_BIND_AGE1_DELTA_TCP, 7));
FIELD_PREP(PPE_BIND_AGE1_DELTA_TCP, 60));
airoha_fe_rmw(eth, REG_PPE_TB_HASH_CFG(i),
PPE_SRAM_TABLE_EN_MASK |
@@ -159,7 +159,15 @@ static void airoha_ppe_hw_init(struct airoha_ppe *ppe)
FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK,
dram_num_entries));
airoha_fe_rmw(eth, REG_PPE_BIND_RATE(i),
PPE_BIND_RATE_L2B_BIND_MASK |
PPE_BIND_RATE_BIND_MASK,
FIELD_PREP(PPE_BIND_RATE_L2B_BIND_MASK, 0x1e) |
FIELD_PREP(PPE_BIND_RATE_BIND_MASK, 0x1e));
airoha_fe_wr(eth, REG_PPE_HASH_SEED(i), PPE_HASH_SEED);
airoha_fe_clear(eth, REG_PPE_PPE_FLOW_CFG(i),
PPE_FLOW_CFG_IP6_6RD_MASK);
for (p = 0; p < ARRAY_SIZE(eth->ports); p++) {
struct airoha_gdm_port *port = eth->ports[p];

View File

@@ -194,6 +194,7 @@ void bnge_rdma_aux_device_add(struct bnge_dev *bd)
dev_warn(bd->dev, "Failed to add auxiliary device for ROCE\n");
auxiliary_device_uninit(aux_dev);
bd->flags &= ~BNGE_EN_ROCE;
return;
}
bd->auxr_dev->net = bd->netdev;

View File

@@ -1819,15 +1819,15 @@ static struct enet_cb *bcmgenet_put_txcb(struct bcmgenet_priv *priv,
{
struct enet_cb *tx_cb_ptr;
tx_cb_ptr = ring->cbs;
tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
/* Rewinding local write pointer */
if (ring->write_ptr == ring->cb_ptr)
ring->write_ptr = ring->end_ptr;
else
ring->write_ptr--;
tx_cb_ptr = ring->cbs;
tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
return tx_cb_ptr;
}
@@ -1985,6 +1985,7 @@ static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
drop = (ring->prod_index - ring->c_index) & DMA_C_INDEX_MASK;
released += drop;
ring->prod_index = ring->c_index & DMA_C_INDEX_MASK;
ring->free_bds += drop;
while (drop--) {
cb_ptr = bcmgenet_put_txcb(priv, ring);
skb = cb_ptr->skb;
@@ -1996,6 +1997,7 @@ static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
}
if (skb)
dev_consume_skb_any(skb);
netdev_tx_reset_queue(netdev_get_tx_queue(dev, ring->index));
bcmgenet_tdma_ring_writel(priv, ring->index,
ring->prod_index, TDMA_PROD_INDEX);
wr_ptr = ring->write_ptr * WORDS_PER_BD(priv);
@@ -3475,27 +3477,23 @@ static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
static void bcmgenet_timeout(struct net_device *dev, unsigned int txqueue)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
u32 int1_enable = 0;
unsigned int q;
struct bcmgenet_tx_ring *ring = &priv->tx_rings[txqueue];
struct netdev_queue *txq = netdev_get_tx_queue(dev, txqueue);
netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n");
for (q = 0; q <= priv->hw_params->tx_queues; q++)
bcmgenet_dump_tx_queue(&priv->tx_rings[q]);
bcmgenet_dump_tx_queue(ring);
bcmgenet_tx_reclaim_all(dev);
bcmgenet_tx_reclaim(dev, ring, true);
for (q = 0; q <= priv->hw_params->tx_queues; q++)
int1_enable |= (1 << q);
/* Re-enable the TX interrupt for this ring */
bcmgenet_intrl2_1_writel(priv, 1 << txqueue, INTRL2_CPU_MASK_CLEAR);
/* Re-enable TX interrupts if disabled */
bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
txq_trans_cond_update(txq);
netif_trans_update(dev);
BCMGENET_STATS64_INC((&ring->stats64), errors);
BCMGENET_STATS64_INC((&priv->tx_rings[txqueue].stats64), errors);
netif_tx_wake_all_queues(dev);
netif_tx_wake_queue(txq);
}
#define MAX_MDF_FILTER 17

View File

@@ -352,6 +352,11 @@ static int octep_vf_oq_check_hw_for_pkts(struct octep_vf_device *oct,
return new_pkts;
}
static inline u32 octep_vf_oq_next_idx(struct octep_vf_oq *oq, u32 idx)
{
return (idx + 1 == oq->max_count) ? 0 : idx + 1;
}
/**
* __octep_vf_oq_process_rx() - Process hardware Rx queue and push to stack.
*
@@ -409,30 +414,52 @@ static int __octep_vf_oq_process_rx(struct octep_vf_device *oct,
data_offset = OCTEP_VF_OQ_RESP_HW_SIZE;
rx_ol_flags = 0;
}
rx_bytes += buff_info->len;
if (buff_info->len <= oq->max_single_buffer_size) {
skb = napi_build_skb((void *)resp_hw, PAGE_SIZE);
if (!skb) {
oq->stats->alloc_failures++;
desc_used++;
read_idx = octep_vf_oq_next_idx(oq, read_idx);
continue;
}
rx_bytes += buff_info->len;
skb_reserve(skb, data_offset);
skb_put(skb, buff_info->len);
read_idx++;
desc_used++;
if (read_idx == oq->max_count)
read_idx = 0;
read_idx = octep_vf_oq_next_idx(oq, read_idx);
} else {
struct skb_shared_info *shinfo;
u16 data_len;
skb = napi_build_skb((void *)resp_hw, PAGE_SIZE);
if (!skb) {
oq->stats->alloc_failures++;
desc_used++;
read_idx = octep_vf_oq_next_idx(oq, read_idx);
data_len = buff_info->len - oq->max_single_buffer_size;
while (data_len) {
dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr,
PAGE_SIZE, DMA_FROM_DEVICE);
buff_info = (struct octep_vf_rx_buffer *)
&oq->buff_info[read_idx];
buff_info->page = NULL;
if (data_len < oq->buffer_size)
data_len = 0;
else
data_len -= oq->buffer_size;
desc_used++;
read_idx = octep_vf_oq_next_idx(oq, read_idx);
}
continue;
}
rx_bytes += buff_info->len;
skb_reserve(skb, data_offset);
/* Head fragment includes response header(s);
* subsequent fragments contains only data.
*/
skb_put(skb, oq->max_single_buffer_size);
read_idx++;
desc_used++;
if (read_idx == oq->max_count)
read_idx = 0;
read_idx = octep_vf_oq_next_idx(oq, read_idx);
shinfo = skb_shinfo(skb);
data_len = buff_info->len - oq->max_single_buffer_size;
@@ -454,10 +481,8 @@ static int __octep_vf_oq_process_rx(struct octep_vf_device *oct,
buff_info->len,
buff_info->len);
buff_info->page = NULL;
read_idx++;
desc_used++;
if (read_idx == oq->max_count)
read_idx = 0;
read_idx = octep_vf_oq_next_idx(oq, read_idx);
}
}

View File

@@ -3588,12 +3588,23 @@ found:
return NOTIFY_DONE;
}
static int mtk_max_gmac_mtu(struct mtk_eth *eth)
{
int i, max_mtu = ETH_DATA_LEN;
for (i = 0; i < ARRAY_SIZE(eth->netdev); i++)
if (eth->netdev[i] && eth->netdev[i]->mtu > max_mtu)
max_mtu = eth->netdev[i]->mtu;
return max_mtu;
}
static int mtk_open(struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
struct mtk_mac *target_mac;
int i, err, ppe_num;
int i, err, ppe_num, mtu;
ppe_num = eth->soc->ppe_num;
@@ -3640,6 +3651,10 @@ static int mtk_open(struct net_device *dev)
mtk_gdm_config(eth, target_mac->id, gdm_config);
}
mtu = mtk_max_gmac_mtu(eth);
for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
mtk_ppe_update_mtu(eth->ppe[i], mtu);
napi_enable(&eth->tx_napi);
napi_enable(&eth->rx_napi);
mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
@@ -4333,6 +4348,7 @@ static int mtk_change_mtu(struct net_device *dev, int new_mtu)
int length = new_mtu + MTK_RX_ETH_HLEN;
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
int max_mtu, i;
if (rcu_access_pointer(eth->prog) &&
length > MTK_PP_MAX_BUF_SIZE) {
@@ -4343,6 +4359,10 @@ static int mtk_change_mtu(struct net_device *dev, int new_mtu)
mtk_set_mcr_max_rx(mac, length);
WRITE_ONCE(dev->mtu, new_mtu);
max_mtu = mtk_max_gmac_mtu(eth);
for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
mtk_ppe_update_mtu(eth->ppe[i], max_mtu);
return 0;
}

View File

@@ -973,6 +973,36 @@ static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
}
}
void mtk_ppe_update_mtu(struct mtk_ppe *ppe, int mtu)
{
int base;
u32 val;
if (!ppe)
return;
/* The PPE checks output frame size against per-tag-layer MTU limits,
* treating PPPoE and DSA tags just like 802.1Q VLAN tags. The Linux
* device MTU already accounts for PPPoE (PPPOE_SES_HLEN) and DSA tag
* overhead, but 802.1Q VLAN tags are handled transparently without
* being reflected by the lower device MTU being increased by 4.
* Use the maximum MTU across all GMAC interfaces so that PPE output
* frame limits are sufficiently high regardless of which port a flow
* egresses through.
*/
base = ETH_HLEN + mtu;
val = FIELD_PREP(MTK_PPE_VLAN_MTU0_NONE, base) |
FIELD_PREP(MTK_PPE_VLAN_MTU0_1TAG, base + VLAN_HLEN);
ppe_w32(ppe, MTK_PPE_VLAN_MTU0, val);
val = FIELD_PREP(MTK_PPE_VLAN_MTU1_2TAG,
base + 2 * VLAN_HLEN) |
FIELD_PREP(MTK_PPE_VLAN_MTU1_3TAG,
base + 3 * VLAN_HLEN);
ppe_w32(ppe, MTK_PPE_VLAN_MTU1, val);
}
void mtk_ppe_start(struct mtk_ppe *ppe)
{
u32 val;

View File

@@ -346,6 +346,7 @@ struct mtk_ppe {
struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int index);
void mtk_ppe_deinit(struct mtk_eth *eth);
void mtk_ppe_update_mtu(struct mtk_ppe *ppe, int mtu);
void mtk_ppe_start(struct mtk_ppe *ppe);
int mtk_ppe_stop(struct mtk_ppe *ppe);
int mtk_ppe_prepare_reset(struct mtk_ppe *ppe);

View File

@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2017, Mellanox Technologies inc. All rights reserved. */
#include <linux/iopoll.h>
#include "mlx5_core.h"
#include "en.h"
#include "ipsec.h"
@@ -592,7 +594,6 @@ int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry,
struct mlx5_wqe_aso_ctrl_seg *ctrl;
struct mlx5e_hw_objs *res;
struct mlx5_aso_wqe *wqe;
unsigned long expires;
u8 ds_cnt;
int ret;
@@ -614,13 +615,8 @@ int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry,
mlx5e_ipsec_aso_copy(ctrl, data);
mlx5_aso_post_wqe(aso->aso, false, &wqe->ctrl);
expires = jiffies + msecs_to_jiffies(10);
do {
ret = mlx5_aso_poll_cq(aso->aso, false);
if (ret)
/* We are in atomic context */
udelay(10);
} while (ret && time_is_after_jiffies(expires));
read_poll_timeout_atomic(mlx5_aso_poll_cq, ret, !ret, 10,
10 * USEC_PER_MSEC, false, aso->aso, false);
if (!ret)
memcpy(sa_entry->ctx, aso->ctx, MLX5_ST_SZ_BYTES(ipsec_aso));
spin_unlock_bh(&aso->lock);

View File

@@ -6868,6 +6868,14 @@ static int _mlx5e_probe(struct auxiliary_device *adev)
goto err_resume;
}
/* mlx5e_fix_features() returns early when the device is not present
* to avoid dereferencing cleared priv during profile changes.
* This also causes it to be a no-op during register_netdev(), where
* the device is not yet present.
* Trigger an additional features update that will actually work.
*/
mlx5e_update_features(netdev);
mlx5e_dcbnl_init_app(priv);
mlx5_core_uplink_netdev_set(mdev, netdev);
mlx5e_params_print_info(mdev, &priv->channels.params);

View File

@@ -139,7 +139,7 @@ void fbnic_up(struct fbnic_net *fbn)
/* Enable Tx/Rx processing */
fbnic_napi_enable(fbn);
netif_tx_start_all_queues(fbn->netdev);
netif_tx_wake_all_queues(fbn->netdev);
fbnic_service_task_start(fbn);

View File

@@ -2065,11 +2065,8 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
gc->dev = &pdev->dev;
xa_init(&gc->irq_contexts);
if (gc->is_pf)
gc->mana_pci_debugfs = debugfs_create_dir("0", mana_debugfs_root);
else
gc->mana_pci_debugfs = debugfs_create_dir(pci_slot_name(pdev->slot),
mana_debugfs_root);
gc->mana_pci_debugfs = debugfs_create_dir(pci_name(pdev),
mana_debugfs_root);
err = mana_gd_setup(pdev);
if (err)

View File

@@ -3154,6 +3154,8 @@ static int mana_init_port(struct net_device *ndev)
eth_hw_addr_set(ndev, apc->mac_addr);
sprintf(vport, "vport%d", port_idx);
apc->mana_port_debugfs = debugfs_create_dir(vport, gc->mana_pci_debugfs);
debugfs_create_u32("current_speed", 0400, apc->mana_port_debugfs,
&apc->speed);
return 0;
reset_apc:
@@ -3432,8 +3434,6 @@ static int mana_probe_port(struct mana_context *ac, int port_idx,
netif_carrier_on(ndev);
debugfs_create_u32("current_speed", 0400, apc->mana_port_debugfs, &apc->speed);
return 0;
free_indir:

View File

@@ -657,7 +657,9 @@ void txgbe_remove_phy(struct txgbe *txgbe)
return;
case wx_mac_sp:
if (txgbe->wx->media_type == wx_media_copper) {
rtnl_lock();
phylink_disconnect_phy(txgbe->wx->phylink);
rtnl_unlock();
phylink_destroy(txgbe->wx->phylink);
return;
}

View File

@@ -391,7 +391,6 @@ static void sixpack_receive_buf(struct tty_struct *tty, const u8 *cp,
const u8 *fp, size_t count)
{
struct sixpack *sp;
size_t count1;
if (!count)
return;
@@ -401,16 +400,16 @@ static void sixpack_receive_buf(struct tty_struct *tty, const u8 *cp,
return;
/* Read the characters out of the buffer */
count1 = count;
while (count) {
count--;
while (count--) {
if (fp && *fp++) {
if (!test_and_set_bit(SIXPF_ERROR, &sp->flags))
sp->dev->stats.rx_errors++;
cp++;
continue;
}
sixpack_decode(sp, cp, 1);
cp++;
}
sixpack_decode(sp, cp, count1);
tty_unthrottle(tty);
}

View File

@@ -187,6 +187,9 @@ static int bpq_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_ty
len = skb->data[0] + skb->data[1] * 256 - 5;
if (len < 0 || len > skb->len - 2)
goto drop_unlock;
skb_pull(skb, 2); /* Remove the length bytes */
skb_trim(skb, len); /* Set the length of the data */

View File

@@ -1909,6 +1909,8 @@ static int scc_net_siocdevprivate(struct net_device *dev,
if (!capable(CAP_SYS_RAWIO)) return -EPERM;
if (!arg || copy_from_user(&memcfg, arg, sizeof(memcfg)))
return -EINVAL;
if (memcfg.bufsize < 16)
return -EINVAL;
scc->stat.bufsize = memcfg.bufsize;
return 0;

View File

@@ -2044,6 +2044,7 @@ static int gsi_ring_setup(struct gsi *gsi)
count = reg_decode(reg, NUM_EV_PER_EE, val);
} else {
reg = gsi_reg(gsi, HW_PARAM_4);
val = ioread32(gsi->virt + reg_offset(reg));
count = reg_decode(reg, EV_PER_EE, val);
}
if (!count) {

View File

@@ -361,7 +361,7 @@ static void ipa_qtime_config(struct ipa *ipa)
{
const struct reg *reg;
u32 offset;
u32 val;
u32 val = 0;
/* Timer clock divider must be disabled when we change the rate */
reg = ipa_reg(ipa, TIMERS_XO_CLK_DIV_CFG);
@@ -374,8 +374,8 @@ static void ipa_qtime_config(struct ipa *ipa)
val |= reg_bit(reg, DPL_TIMESTAMP_SEL);
}
/* Configure tag and NAT Qtime timestamp resolution as well */
val = reg_encode(reg, TAG_TIMESTAMP_LSB, TAG_TIMESTAMP_SHIFT);
val = reg_encode(reg, NAT_TIMESTAMP_LSB, NAT_TIMESTAMP_SHIFT);
val |= reg_encode(reg, TAG_TIMESTAMP_LSB, TAG_TIMESTAMP_SHIFT);
val |= reg_encode(reg, NAT_TIMESTAMP_LSB, NAT_TIMESTAMP_SHIFT);
iowrite32(val, ipa->reg_virt + reg_offset(reg));

View File

@@ -2584,7 +2584,9 @@ static void macsec_inherit_tso_max(struct net_device *dev)
netif_inherit_tso_max(dev, macsec->real_dev);
}
static int macsec_update_offload(struct net_device *dev, enum macsec_offload offload)
static int macsec_update_offload(struct net_device *dev,
enum macsec_offload offload,
struct netlink_ext_ack *extack)
{
enum macsec_offload prev_offload;
const struct macsec_ops *ops;
@@ -2616,14 +2618,35 @@ static int macsec_update_offload(struct net_device *dev, enum macsec_offload off
if (!ops)
return -EOPNOTSUPP;
macsec->offload = offload;
ctx.secy = &macsec->secy;
ret = offload == MACSEC_OFFLOAD_OFF ? macsec_offload(ops->mdo_del_secy, &ctx)
: macsec_offload(ops->mdo_add_secy, &ctx);
if (ret) {
macsec->offload = prev_offload;
if (ret)
return ret;
/* Remove VLAN filters when disabling offload. */
if (offload == MACSEC_OFFLOAD_OFF) {
vlan_drop_rx_ctag_filter_info(dev);
vlan_drop_rx_stag_filter_info(dev);
}
macsec->offload = offload;
/* Add VLAN filters when enabling offload. */
if (prev_offload == MACSEC_OFFLOAD_OFF) {
ret = vlan_get_rx_ctag_filter_info(dev);
if (ret) {
NL_SET_ERR_MSG_FMT(extack,
"adding ctag VLAN filters failed, err %d",
ret);
goto rollback_offload;
}
ret = vlan_get_rx_stag_filter_info(dev);
if (ret) {
NL_SET_ERR_MSG_FMT(extack,
"adding stag VLAN filters failed, err %d",
ret);
vlan_drop_rx_ctag_filter_info(dev);
goto rollback_offload;
}
}
macsec_set_head_tail_room(dev);
@@ -2633,6 +2656,12 @@ static int macsec_update_offload(struct net_device *dev, enum macsec_offload off
netdev_update_features(dev);
return 0;
rollback_offload:
macsec->offload = prev_offload;
macsec_offload(ops->mdo_del_secy, &ctx);
return ret;
}
@@ -2673,7 +2702,7 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]);
if (macsec->offload != offload)
ret = macsec_update_offload(dev, offload);
ret = macsec_update_offload(dev, offload, info->extack);
out:
rtnl_unlock();
return ret;
@@ -3486,7 +3515,8 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
}
#define MACSEC_FEATURES \
(NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
(NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
NETIF_F_HW_VLAN_STAG_FILTER | NETIF_F_HW_VLAN_CTAG_FILTER)
#define MACSEC_OFFLOAD_FEATURES \
(MACSEC_FEATURES | NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES | \
@@ -3707,6 +3737,29 @@ restore_old_addr:
return err;
}
static int macsec_vlan_rx_add_vid(struct net_device *dev,
__be16 proto, u16 vid)
{
struct macsec_dev *macsec = netdev_priv(dev);
if (!macsec_is_offloaded(macsec))
return 0;
return vlan_vid_add(macsec->real_dev, proto, vid);
}
static int macsec_vlan_rx_kill_vid(struct net_device *dev,
__be16 proto, u16 vid)
{
struct macsec_dev *macsec = netdev_priv(dev);
if (!macsec_is_offloaded(macsec))
return 0;
vlan_vid_del(macsec->real_dev, proto, vid);
return 0;
}
static int macsec_change_mtu(struct net_device *dev, int new_mtu)
{
struct macsec_dev *macsec = macsec_priv(dev);
@@ -3748,6 +3801,8 @@ static const struct net_device_ops macsec_netdev_ops = {
.ndo_set_rx_mode = macsec_dev_set_rx_mode,
.ndo_change_rx_flags = macsec_dev_change_rx_flags,
.ndo_set_mac_address = macsec_set_mac_address,
.ndo_vlan_rx_add_vid = macsec_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = macsec_vlan_rx_kill_vid,
.ndo_start_xmit = macsec_start_xmit,
.ndo_get_stats64 = macsec_get_stats64,
.ndo_get_iflink = macsec_get_iflink,
@@ -3912,7 +3967,7 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
offload = nla_get_u8(data[IFLA_MACSEC_OFFLOAD]);
if (macsec->offload != offload) {
macsec_offload_state_change = true;
ret = macsec_update_offload(dev, offload);
ret = macsec_update_offload(dev, offload, extack);
if (ret)
goto cleanup;
}

View File

@@ -554,6 +554,36 @@ static int nsim_stop(struct net_device *dev)
return 0;
}
static int nsim_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
{
struct netdevsim *ns = netdev_priv(dev);
if (vid >= VLAN_N_VID)
return -EINVAL;
if (proto == htons(ETH_P_8021Q))
WARN_ON_ONCE(test_and_set_bit(vid, ns->vlan.ctag));
else if (proto == htons(ETH_P_8021AD))
WARN_ON_ONCE(test_and_set_bit(vid, ns->vlan.stag));
return 0;
}
static int nsim_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
{
struct netdevsim *ns = netdev_priv(dev);
if (vid >= VLAN_N_VID)
return -EINVAL;
if (proto == htons(ETH_P_8021Q))
WARN_ON_ONCE(!test_and_clear_bit(vid, ns->vlan.ctag));
else if (proto == htons(ETH_P_8021AD))
WARN_ON_ONCE(!test_and_clear_bit(vid, ns->vlan.stag));
return 0;
}
static int nsim_shaper_set(struct net_shaper_binding *binding,
const struct net_shaper *shaper,
struct netlink_ext_ack *extack)
@@ -611,6 +641,8 @@ static const struct net_device_ops nsim_netdev_ops = {
.ndo_bpf = nsim_bpf,
.ndo_open = nsim_open,
.ndo_stop = nsim_stop,
.ndo_vlan_rx_add_vid = nsim_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = nsim_vlan_rx_kill_vid,
.net_shaper_ops = &nsim_shaper_ops,
};
@@ -622,6 +654,8 @@ static const struct net_device_ops nsim_vf_netdev_ops = {
.ndo_change_mtu = nsim_change_mtu,
.ndo_setup_tc = nsim_setup_tc,
.ndo_set_features = nsim_set_features,
.ndo_vlan_rx_add_vid = nsim_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = nsim_vlan_rx_kill_vid,
};
/* We don't have true per-queue stats, yet, so do some random fakery here.
@@ -919,6 +953,20 @@ static const struct file_operations nsim_pp_hold_fops = {
.owner = THIS_MODULE,
};
static int nsim_vlan_show(struct seq_file *s, void *data)
{
struct netdevsim *ns = s->private;
int vid;
for_each_set_bit(vid, ns->vlan.ctag, VLAN_N_VID)
seq_printf(s, "ctag %d\n", vid);
for_each_set_bit(vid, ns->vlan.stag, VLAN_N_VID)
seq_printf(s, "stag %d\n", vid);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(nsim_vlan);
static void nsim_setup(struct net_device *dev)
{
ether_setup(dev);
@@ -931,14 +979,18 @@ static void nsim_setup(struct net_device *dev)
NETIF_F_FRAGLIST |
NETIF_F_HW_CSUM |
NETIF_F_LRO |
NETIF_F_TSO;
NETIF_F_TSO |
NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_HW_VLAN_STAG_FILTER;
dev->hw_features |= NETIF_F_HW_TC |
NETIF_F_SG |
NETIF_F_FRAGLIST |
NETIF_F_HW_CSUM |
NETIF_F_LRO |
NETIF_F_TSO |
NETIF_F_LOOPBACK;
NETIF_F_LOOPBACK |
NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_HW_VLAN_STAG_FILTER;
dev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS;
dev->max_mtu = ETH_MAX_MTU;
dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_HW_OFFLOAD;
@@ -1105,6 +1157,8 @@ struct netdevsim *nsim_create(struct nsim_dev *nsim_dev,
ns->qr_dfs = debugfs_create_file("queue_reset", 0200,
nsim_dev_port->ddir, ns,
&nsim_qreset_fops);
ns->vlan_dfs = debugfs_create_file("vlan", 0400, nsim_dev_port->ddir,
ns, &nsim_vlan_fops);
return ns;
err_free_netdev:
@@ -1116,7 +1170,9 @@ void nsim_destroy(struct netdevsim *ns)
{
struct net_device *dev = ns->netdev;
struct netdevsim *peer;
u16 vid;
debugfs_remove(ns->vlan_dfs);
debugfs_remove(ns->qr_dfs);
debugfs_remove(ns->pp_dfs);
@@ -1142,6 +1198,11 @@ void nsim_destroy(struct netdevsim *ns)
if (nsim_dev_port_is_pf(ns->nsim_dev_port))
nsim_exit_netdevsim(ns);
for_each_set_bit(vid, ns->vlan.ctag, VLAN_N_VID)
WARN_ON_ONCE(1);
for_each_set_bit(vid, ns->vlan.stag, VLAN_N_VID)
WARN_ON_ONCE(1);
/* Put this intentionally late to exercise the orphaning path */
if (ns->page) {
page_pool_put_full_page(pp_page_to_nmdesc(ns->page)->pp,

View File

@@ -18,6 +18,7 @@
#include <linux/ethtool.h>
#include <linux/ethtool_netlink.h>
#include <linux/kernel.h>
#include <linux/if_vlan.h>
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/ptp_mock.h>
@@ -75,6 +76,11 @@ struct nsim_macsec {
u8 nsim_secy_count;
};
struct nsim_vlan {
DECLARE_BITMAP(ctag, VLAN_N_VID);
DECLARE_BITMAP(stag, VLAN_N_VID);
};
struct nsim_ethtool_pauseparam {
bool rx;
bool tx;
@@ -135,6 +141,7 @@ struct netdevsim {
bool bpf_map_accept;
struct nsim_ipsec ipsec;
struct nsim_macsec macsec;
struct nsim_vlan vlan;
struct {
u32 inject_error;
u32 __ports[2][NSIM_UDP_TUNNEL_N_PORTS];
@@ -146,6 +153,7 @@ struct netdevsim {
struct page *page;
struct dentry *pp_dfs;
struct dentry *qr_dfs;
struct dentry *vlan_dfs;
struct nsim_ethtool ethtool;
struct netdevsim __rcu *peer;

View File

@@ -927,8 +927,8 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr,
/* returning -ENODEV doesn't stop bus
* scanning
*/
return (phy_reg == -EIO ||
phy_reg == -ENODEV) ? -ENODEV : -EIO;
return (ret == -EIO ||
ret == -ENODEV) ? -ENODEV : -EIO;
if (!ret)
continue;

View File

@@ -524,7 +524,7 @@ static int at803x_config_init(struct phy_device *phydev)
* behaviour but we still need to accommodate it. XNP is only needed
* for 10Gbps support, so disable XNP.
*/
return phy_modify(phydev, MII_ADVERTISE, MDIO_AN_CTRL1_XNP, 0);
return phy_modify(phydev, MII_ADVERTISE, ADVERTISE_XNP, 0);
}
static void at803x_link_change_notify(struct phy_device *phydev)

View File

@@ -1048,6 +1048,9 @@ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
struct ppp_net *pn;
int __user *p = (int __user *)arg;
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
return -EPERM;
switch (cmd) {
case PPPIOCNEWUNIT:
/* Create a new ppp unit */

View File

@@ -157,11 +157,16 @@ static void rx_complete(struct urb *req)
PAGE_SIZE);
page = NULL;
}
} else {
} else if (skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS) {
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
page, 0, req->actual_length,
PAGE_SIZE);
page = NULL;
} else {
dev_kfree_skb_any(skb);
pnd->rx_skb = NULL;
skb = NULL;
dev->stats.rx_length_errors++;
}
if (req->actual_length < PAGE_SIZE)
pnd->rx_skb = NULL; /* Last fragment */

View File

@@ -1187,12 +1187,22 @@ static inline void tcf_kfree_skb_list(struct sk_buff *skb, struct Qdisc *q,
static inline void qdisc_dequeue_drop(struct Qdisc *q, struct sk_buff *skb,
enum qdisc_drop_reason reason)
{
struct Qdisc *root;
DEBUG_NET_WARN_ON_ONCE(!(q->flags & TCQ_F_DEQUEUE_DROPS));
DEBUG_NET_WARN_ON_ONCE(q->flags & TCQ_F_NOLOCK);
tcf_set_qdisc_drop_reason(skb, reason);
skb->next = q->to_free;
q->to_free = skb;
rcu_read_lock();
root = qdisc_root_sleeping(q);
if (root->flags & TCQ_F_DEQUEUE_DROPS) {
tcf_set_qdisc_drop_reason(skb, reason);
skb->next = root->to_free;
root->to_free = skb;
} else {
kfree_skb_reason(skb, (enum skb_drop_reason)reason);
}
rcu_read_unlock();
}
/* Instead of calling kfree_skb() while root qdisc lock is held,

View File

@@ -82,7 +82,8 @@
#define ADVERTISE_100BASE4 0x0200 /* Try for 100mbps 4k packets */
#define ADVERTISE_PAUSE_CAP 0x0400 /* Try for pause */
#define ADVERTISE_PAUSE_ASYM 0x0800 /* Try for asymetric pause */
#define ADVERTISE_RESV 0x1000 /* Unused... */
#define ADVERTISE_XNP 0x1000 /* Extended Next Page */
#define ADVERTISE_RESV ADVERTISE_XNP /* Used to be reserved */
#define ADVERTISE_RFAULT 0x2000 /* Say we can detect faults */
#define ADVERTISE_LPACK 0x4000 /* Ack link partners response */
#define ADVERTISE_NPAGE 0x8000 /* Next page bit */

View File

@@ -191,10 +191,20 @@ bool cfsrvl_phyid_match(struct cflayer *layer, int phyid)
void caif_free_client(struct cflayer *adap_layer)
{
struct cflayer *serv_layer;
struct cfsrvl *servl;
if (adap_layer == NULL || adap_layer->dn == NULL)
if (!adap_layer)
return;
servl = container_obj(adap_layer->dn);
serv_layer = adap_layer->dn;
if (!serv_layer)
return;
layer_set_dn(adap_layer, NULL);
layer_set_up(serv_layer, NULL);
servl = container_obj(serv_layer);
servl->release(&servl->layer);
}
EXPORT_SYMBOL(caif_free_client);

View File

@@ -362,6 +362,14 @@ static int raw_notifier(struct notifier_block *nb, unsigned long msg,
return NOTIFY_DONE;
}
static void raw_sock_destruct(struct sock *sk)
{
struct raw_sock *ro = raw_sk(sk);
free_percpu(ro->uniq);
can_sock_destruct(sk);
}
static int raw_init(struct sock *sk)
{
struct raw_sock *ro = raw_sk(sk);
@@ -388,6 +396,8 @@ static int raw_init(struct sock *sk)
if (unlikely(!ro->uniq))
return -ENOMEM;
sk->sk_destruct = raw_sock_destruct;
/* set notifier */
spin_lock(&raw_notifier_lock);
list_add_tail(&ro->notifier, &raw_notifier_list);
@@ -437,7 +447,6 @@ static int raw_release(struct socket *sock)
ro->bound = 0;
ro->dev = NULL;
ro->count = 0;
free_percpu(ro->uniq);
sock_orphan(sk);
sock->sk = NULL;

View File

@@ -4394,6 +4394,8 @@ u32 xdp_master_redirect(struct xdp_buff *xdp)
struct net_device *master, *slave;
master = netdev_master_upper_dev_get_rcu(xdp->rxq->dev);
if (unlikely(!(master->flags & IFF_UP)))
return XDP_ABORTED;
slave = master->netdev_ops->ndo_xdp_get_xmit_slave(master, xdp);
if (slave && slave != xdp->rxq->dev) {
/* The target device is different from the receiving device, so
@@ -10570,10 +10572,11 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
si->dst_reg, si->dst_reg, \
offsetof(OBJ, OBJ_FIELD)); \
if (si->dst_reg == si->src_reg) { \
*insn++ = BPF_JMP_A(1); \
*insn++ = BPF_JMP_A(2); \
*insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \
offsetof(struct bpf_sock_ops_kern, \
temp)); \
*insn++ = BPF_MOV64_IMM(si->dst_reg, 0); \
} \
} while (0)
@@ -10607,10 +10610,11 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
si->dst_reg, si->src_reg, \
offsetof(struct bpf_sock_ops_kern, sk));\
if (si->dst_reg == si->src_reg) { \
*insn++ = BPF_JMP_A(1); \
*insn++ = BPF_JMP_A(2); \
*insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \
offsetof(struct bpf_sock_ops_kern, \
temp)); \
*insn++ = BPF_MOV64_IMM(si->dst_reg, 0); \
} \
} while (0)

View File

@@ -284,7 +284,6 @@ static int cookie_tcp_reqsk_init(struct sock *sk, struct sk_buff *skb,
treq->rcv_isn = ntohl(th->seq) - 1;
treq->snt_isn = ntohl(th->ack_seq) - 1;
treq->syn_tos = TCP_SKB_CB(skb)->ip_dsfield;
treq->req_usec_ts = false;
#if IS_ENABLED(CONFIG_MPTCP)
treq->is_mptcp = sk_is_mptcp(sk);
@@ -346,6 +345,7 @@ struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
ireq->wscale_ok = tcp_opt->wscale_ok;
ireq->ecn_ok = !!(tcp_opt->rcv_tsecr & TS_OPT_ECN);
treq->req_usec_ts = false;
treq->ts_off = tsoff;
return req;

View File

@@ -424,6 +424,12 @@ static void digital_in_recv_sdd_res(struct nfc_digital_dev *ddev, void *arg,
size = 4;
}
if (target->nfcid1_len + size > NFC_NFCID1_MAXSIZE) {
PROTOCOL_ERR("4.7.2.1");
rc = -EPROTO;
goto exit;
}
memcpy(target->nfcid1 + target->nfcid1_len, sdd_res->nfcid1 + offset,
size);
target->nfcid1_len += size;

View File

@@ -1091,6 +1091,7 @@ static void nfc_llcp_recv_hdlc(struct nfc_llcp_local *local,
if (sk->sk_state == LLCP_CLOSED) {
release_sock(sk);
nfc_llcp_sock_put(llcp_sock);
return;
}
/* Pass the payload upstream */
@@ -1182,6 +1183,7 @@ static void nfc_llcp_recv_disc(struct nfc_llcp_local *local,
if (sk->sk_state == LLCP_CLOSED) {
release_sock(sk);
nfc_llcp_sock_put(llcp_sock);
return;
}
if (sk->sk_state == LLCP_CONNECTED) {

View File

@@ -22,8 +22,10 @@ static struct {
struct socket *sock;
struct sockaddr_qrtr bcast_sq;
struct list_head lookups;
u32 lookup_count;
struct workqueue_struct *workqueue;
struct work_struct work;
void (*saved_data_ready)(struct sock *sk);
int local_node;
} qrtr_ns;
@@ -67,8 +69,19 @@ struct qrtr_server {
struct qrtr_node {
unsigned int id;
struct xarray servers;
u32 server_count;
};
/* Max nodes, server, lookup limits are chosen based on the current platform
* requirements. If the requirement changes in the future, these values can be
* increased.
*/
#define QRTR_NS_MAX_NODES 64
#define QRTR_NS_MAX_SERVERS 256
#define QRTR_NS_MAX_LOOKUPS 64
static u8 node_count;
static struct qrtr_node *node_get(unsigned int node_id)
{
struct qrtr_node *node;
@@ -77,6 +90,11 @@ static struct qrtr_node *node_get(unsigned int node_id)
if (node)
return node;
if (node_count >= QRTR_NS_MAX_NODES) {
pr_err_ratelimited("QRTR clients exceed max node limit!\n");
return NULL;
}
/* If node didn't exist, allocate and insert it to the tree */
node = kzalloc_obj(*node);
if (!node)
@@ -90,6 +108,8 @@ static struct qrtr_node *node_get(unsigned int node_id)
return NULL;
}
node_count++;
return node;
}
@@ -229,6 +249,17 @@ static struct qrtr_server *server_add(unsigned int service,
if (!service || !port)
return NULL;
node = node_get(node_id);
if (!node)
return NULL;
/* Make sure the new servers per port are capped at the maximum value */
old = xa_load(&node->servers, port);
if (!old && node->server_count >= QRTR_NS_MAX_SERVERS) {
pr_err_ratelimited("QRTR client node %u exceeds max server limit!\n", node_id);
return NULL;
}
srv = kzalloc_obj(*srv);
if (!srv)
return NULL;
@@ -238,10 +269,6 @@ static struct qrtr_server *server_add(unsigned int service,
srv->node = node_id;
srv->port = port;
node = node_get(node_id);
if (!node)
goto err;
/* Delete the old server on the same port */
old = xa_store(&node->servers, port, srv, GFP_KERNEL);
if (old) {
@@ -252,6 +279,8 @@ static struct qrtr_server *server_add(unsigned int service,
} else {
kfree(old);
}
} else {
node->server_count++;
}
trace_qrtr_ns_server_add(srv->service, srv->instance,
@@ -292,6 +321,7 @@ static int server_del(struct qrtr_node *node, unsigned int port, bool bcast)
}
kfree(srv);
node->server_count--;
return 0;
}
@@ -341,7 +371,7 @@ static int ctrl_cmd_bye(struct sockaddr_qrtr *from)
struct qrtr_node *node;
unsigned long index;
struct kvec iv;
int ret;
int ret = 0;
iv.iov_base = &pkt;
iv.iov_len = sizeof(pkt);
@@ -356,8 +386,10 @@ static int ctrl_cmd_bye(struct sockaddr_qrtr *from)
/* Advertise the removal of this client to all local servers */
local_node = node_get(qrtr_ns.local_node);
if (!local_node)
return 0;
if (!local_node) {
ret = 0;
goto delete_node;
}
memset(&pkt, 0, sizeof(pkt));
pkt.cmd = cpu_to_le32(QRTR_TYPE_BYE);
@@ -374,10 +406,19 @@ static int ctrl_cmd_bye(struct sockaddr_qrtr *from)
ret = kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt));
if (ret < 0 && ret != -ENODEV) {
pr_err("failed to send bye cmd\n");
return ret;
goto delete_node;
}
}
return 0;
/* Ignore -ENODEV */
ret = 0;
delete_node:
xa_erase(&nodes, from->sq_node);
kfree(node);
node_count--;
return ret;
}
static int ctrl_cmd_del_client(struct sockaddr_qrtr *from,
@@ -417,6 +458,7 @@ static int ctrl_cmd_del_client(struct sockaddr_qrtr *from,
list_del(&lookup->li);
kfree(lookup);
qrtr_ns.lookup_count--;
}
/* Remove the server belonging to this port but don't broadcast
@@ -534,6 +576,11 @@ static int ctrl_cmd_new_lookup(struct sockaddr_qrtr *from,
if (from->sq_node != qrtr_ns.local_node)
return -EINVAL;
if (qrtr_ns.lookup_count >= QRTR_NS_MAX_LOOKUPS) {
pr_err_ratelimited("QRTR client node exceeds max lookup limit!\n");
return -ENOSPC;
}
lookup = kzalloc_obj(*lookup);
if (!lookup)
return -ENOMEM;
@@ -542,6 +589,7 @@ static int ctrl_cmd_new_lookup(struct sockaddr_qrtr *from,
lookup->service = service;
lookup->instance = instance;
list_add_tail(&lookup->li, &qrtr_ns.lookups);
qrtr_ns.lookup_count++;
memset(&filter, 0, sizeof(filter));
filter.service = service;
@@ -582,6 +630,7 @@ static void ctrl_cmd_del_lookup(struct sockaddr_qrtr *from,
list_del(&lookup->li);
kfree(lookup);
qrtr_ns.lookup_count--;
}
}
@@ -670,7 +719,7 @@ static void qrtr_ns_worker(struct work_struct *work)
}
if (ret < 0)
pr_err("failed while handling packet from %d:%d",
pr_err_ratelimited("failed while handling packet from %d:%d",
sq.sq_node, sq.sq_port);
}
@@ -709,6 +758,7 @@ int qrtr_ns_init(void)
goto err_sock;
}
qrtr_ns.saved_data_ready = qrtr_ns.sock->sk->sk_data_ready;
qrtr_ns.sock->sk->sk_data_ready = qrtr_ns_data_ready;
sq.sq_port = QRTR_PORT_CTRL;
@@ -749,6 +799,10 @@ int qrtr_ns_init(void)
return 0;
err_wq:
write_lock_bh(&qrtr_ns.sock->sk->sk_callback_lock);
qrtr_ns.sock->sk->sk_data_ready = qrtr_ns.saved_data_ready;
write_unlock_bh(&qrtr_ns.sock->sk->sk_callback_lock);
destroy_workqueue(qrtr_ns.workqueue);
err_sock:
sock_release(qrtr_ns.sock);
@@ -758,7 +812,12 @@ EXPORT_SYMBOL_GPL(qrtr_ns_init);
void qrtr_ns_remove(void)
{
write_lock_bh(&qrtr_ns.sock->sk->sk_callback_lock);
qrtr_ns.sock->sk->sk_data_ready = qrtr_ns.saved_data_ready;
write_unlock_bh(&qrtr_ns.sock->sk->sk_callback_lock);
cancel_work_sync(&qrtr_ns.work);
synchronize_net();
destroy_workqueue(qrtr_ns.workqueue);
/* sock_release() expects the two references that were put during

View File

@@ -357,7 +357,8 @@ static int rds_cong_monitor(struct rds_sock *rs, sockptr_t optval, int optlen)
return ret;
}
static int rds_set_transport(struct rds_sock *rs, sockptr_t optval, int optlen)
static int rds_set_transport(struct net *net, struct rds_sock *rs,
sockptr_t optval, int optlen)
{
int t_type;
@@ -373,6 +374,10 @@ static int rds_set_transport(struct rds_sock *rs, sockptr_t optval, int optlen)
if (t_type < 0 || t_type >= RDS_TRANS_COUNT)
return -EINVAL;
/* RDS/IB is restricted to the initial network namespace */
if (t_type != RDS_TRANS_TCP && !net_eq(net, &init_net))
return -EPROTOTYPE;
rs->rs_transport = rds_trans_get(t_type);
return rs->rs_transport ? 0 : -ENOPROTOOPT;
@@ -433,6 +438,7 @@ static int rds_setsockopt(struct socket *sock, int level, int optname,
sockptr_t optval, unsigned int optlen)
{
struct rds_sock *rs = rds_sk_to_rs(sock->sk);
struct net *net = sock_net(sock->sk);
int ret;
if (level != SOL_RDS) {
@@ -461,7 +467,7 @@ static int rds_setsockopt(struct socket *sock, int level, int optname,
break;
case SO_RDS_TRANSPORT:
lock_sock(sock->sk);
ret = rds_set_transport(rs, optval, optlen);
ret = rds_set_transport(net, rs, optval, optlen);
release_sock(sock->sk);
break;
case SO_TIMESTAMP_OLD:

View File

@@ -401,8 +401,8 @@ static void rds6_ib_ic_info(struct socket *sock, unsigned int len,
* allowed to influence which paths have priority. We could call userspace
* asserting this policy "routing".
*/
static int rds_ib_laddr_check(struct net *net, const struct in6_addr *addr,
__u32 scope_id)
static int rds_ib_laddr_check_cm(struct net *net, const struct in6_addr *addr,
__u32 scope_id)
{
int ret;
struct rdma_cm_id *cm_id;
@@ -487,6 +487,26 @@ out:
return ret;
}
static int rds_ib_laddr_check(struct net *net, const struct in6_addr *addr,
__u32 scope_id)
{
struct rds_ib_device *rds_ibdev = NULL;
/* RDS/IB is restricted to the initial network namespace */
if (!net_eq(net, &init_net))
return -EPROTOTYPE;
if (ipv6_addr_v4mapped(addr)) {
rds_ibdev = rds_ib_get_device(addr->s6_addr32[3]);
if (rds_ibdev) {
rds_ib_dev_put(rds_ibdev);
return 0;
}
}
return rds_ib_laddr_check_cm(net, addr, scope_id);
}
static void rds_ib_unregister_client(void)
{
ib_unregister_client(&rds_ib_client);

View File

@@ -381,6 +381,7 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn,
__rds_ib_conn_error(conn, KERN_WARNING "RDS/IB: " fmt)
/* ib_rdma.c */
struct rds_ib_device *rds_ib_get_device(__be32 ipaddr);
int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev,
struct in6_addr *ipaddr);
void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);

View File

@@ -43,7 +43,7 @@ struct workqueue_struct *rds_ib_mr_wq;
static void rds_ib_odp_mr_worker(struct work_struct *work);
static struct rds_ib_device *rds_ib_get_device(__be32 ipaddr)
struct rds_ib_device *rds_ib_get_device(__be32 ipaddr)
{
struct rds_ib_device *rds_ibdev;
struct rds_ib_ipaddr *i_ipaddr;

View File

@@ -270,6 +270,13 @@ int rose_process_rx_frame(struct sock *sk, struct sk_buff *skb)
frametype = rose_decode(skb, &ns, &nr, &q, &d, &m);
/*
* ROSE_CLEAR_REQUEST carries cause and diagnostic in bytes 3..4.
* Reject a malformed frame that is too short to contain them.
*/
if (frametype == ROSE_CLEAR_REQUEST && skb->len < 5)
return 0;
switch (rose->state) {
case ROSE_STATE_1:
queued = rose_state1_machine(sk, skb, frametype);

View File

@@ -328,9 +328,13 @@ static int tcf_ct_flow_table_get(struct net *net, struct tcf_ct_params *params)
int err = -ENOMEM;
mutex_lock(&zones_mutex);
ct_ft = rhashtable_lookup_fast(&zones_ht, &key, zones_params);
if (ct_ft && refcount_inc_not_zero(&ct_ft->ref))
rcu_read_lock();
ct_ft = rhashtable_lookup(&zones_ht, &key, zones_params);
if (ct_ft && refcount_inc_not_zero(&ct_ft->ref)) {
rcu_read_unlock();
goto out_unlock;
}
rcu_read_unlock();
ct_ft = kzalloc_obj(*ct_ft);
if (!ct_ft)

View File

@@ -74,9 +74,13 @@ TC_INDIRECT_SCOPE int fw_classify(struct sk_buff *skb,
}
}
} else {
struct Qdisc *q = tcf_block_q(tp->chain->block);
struct Qdisc *q;
/* Old method: classify the packet using its skb mark. */
if (tcf_block_shared(tp->chain->block))
return -1;
q = tcf_block_q(tp->chain->block);
if (id && (TC_H_MAJ(id) == 0 ||
!(TC_H_MAJ(id ^ q->handle)))) {
res->classid = id;

View File

@@ -201,6 +201,7 @@ new_skb:
cb->chunk = head_cb->chunk;
cb->af = head_cb->af;
cb->encap_port = head_cb->encap_port;
}
}

View File

@@ -261,9 +261,11 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *t)
skb_set_inner_ipproto(skb, IPPROTO_SCTP);
label = ip6_make_flowlabel(sock_net(sk), skb, fl6->flowlabel, true, fl6);
local_bh_disable();
udp_tunnel6_xmit_skb(dst, sk, skb, NULL, &fl6->saddr, &fl6->daddr,
tclass, ip6_dst_hoplimit(dst), label,
sctp_sk(sk)->udp_port, t->encap_port, false, 0);
local_bh_enable();
return 0;
}

View File

@@ -1070,10 +1070,12 @@ static inline int sctp_v4_xmit(struct sk_buff *skb, struct sctp_transport *t)
skb_reset_inner_mac_header(skb);
skb_reset_inner_transport_header(skb);
skb_set_inner_ipproto(skb, IPPROTO_SCTP);
local_bh_disable();
udp_tunnel_xmit_skb(dst_rtable(dst), sk, skb, fl4->saddr,
fl4->daddr, dscp, ip4_dst_hoplimit(dst), df,
sctp_sk(sk)->udp_port, t->encap_port, false, false,
0);
local_bh_enable();
return 0;
}

View File

@@ -45,6 +45,14 @@ static void strp_abort_strp(struct strparser *strp, int err)
strp->stopped = 1;
if (strp->skb_head) {
kfree_skb(strp->skb_head);
strp->skb_head = NULL;
}
strp->skb_nextp = NULL;
strp->need_bytes = 0;
if (strp->sk) {
struct sock *sk = strp->sk;

View File

@@ -1962,12 +1962,12 @@ static void vsock_update_buffer_size(struct vsock_sock *vsk,
const struct vsock_transport *transport,
u64 val)
{
if (val > vsk->buffer_max_size)
val = vsk->buffer_max_size;
if (val < vsk->buffer_min_size)
val = vsk->buffer_min_size;
if (val > vsk->buffer_max_size)
val = vsk->buffer_max_size;
if (val != vsk->buffer_size &&
transport && transport->notify_buffer_size)
transport->notify_buffer_size(vsk, &val);

View File

@@ -0,0 +1,76 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "cgroup_helpers.h"
#include "network_helpers.h"
#include "sock_ops_get_sk.skel.h"
/* See progs/sock_ops_get_sk.c for the bug description. */
static void run_sock_ops_test(int cgroup_fd, int prog_fd)
{
int server_fd, client_fd, err;
err = bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_SOCK_OPS, 0);
if (!ASSERT_OK(err, "prog_attach"))
return;
server_fd = start_server(AF_INET, SOCK_STREAM, NULL, 0, 0);
if (!ASSERT_OK_FD(server_fd, "start_server"))
goto detach;
/* Trigger TCP handshake which causes TCP_NEW_SYN_RECV state where
* is_fullsock == 0 and is_locked_tcp_sock == 0.
*/
client_fd = connect_to_fd(server_fd, 0);
if (!ASSERT_OK_FD(client_fd, "connect_to_fd"))
goto close_server;
close(client_fd);
close_server:
close(server_fd);
detach:
bpf_prog_detach(cgroup_fd, BPF_CGROUP_SOCK_OPS);
}
void test_ns_sock_ops_get_sk(void)
{
struct sock_ops_get_sk *skel;
int cgroup_fd;
cgroup_fd = test__join_cgroup("/sock_ops_get_sk");
if (!ASSERT_OK_FD(cgroup_fd, "join_cgroup"))
return;
skel = sock_ops_get_sk__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open_load"))
goto close_cgroup;
/* Test SOCK_OPS_GET_SK with same src/dst register */
if (test__start_subtest("get_sk")) {
run_sock_ops_test(cgroup_fd,
bpf_program__fd(skel->progs.sock_ops_get_sk_same_reg));
ASSERT_EQ(skel->bss->null_seen, 1, "null_seen");
ASSERT_EQ(skel->bss->bug_detected, 0, "bug_not_detected");
}
/* Test SOCK_OPS_GET_FIELD with same src/dst register */
if (test__start_subtest("get_field")) {
run_sock_ops_test(cgroup_fd,
bpf_program__fd(skel->progs.sock_ops_get_field_same_reg));
ASSERT_EQ(skel->bss->field_null_seen, 1, "field_null_seen");
ASSERT_EQ(skel->bss->field_bug_detected, 0, "field_bug_not_detected");
}
/* Test SOCK_OPS_GET_SK with different src/dst register */
if (test__start_subtest("get_sk_diff_reg")) {
run_sock_ops_test(cgroup_fd,
bpf_program__fd(skel->progs.sock_ops_get_sk_diff_reg));
ASSERT_EQ(skel->bss->diff_reg_null_seen, 1, "diff_reg_null_seen");
ASSERT_EQ(skel->bss->diff_reg_bug_detected, 0, "diff_reg_bug_not_detected");
}
sock_ops_get_sk__destroy(skel);
close_cgroup:
close(cgroup_fd);
}

View File

@@ -191,13 +191,18 @@ fail:
return -1;
}
static void bonding_cleanup(struct skeletons *skeletons)
static void link_cleanup(struct skeletons *skeletons)
{
restore_root_netns();
while (skeletons->nlinks) {
skeletons->nlinks--;
bpf_link__destroy(skeletons->links[skeletons->nlinks]);
}
}
static void bonding_cleanup(struct skeletons *skeletons)
{
restore_root_netns();
link_cleanup(skeletons);
ASSERT_OK(system("ip link delete bond1"), "delete bond1");
ASSERT_OK(system("ip link delete veth1_1"), "delete veth1_1");
ASSERT_OK(system("ip link delete veth1_2"), "delete veth1_2");
@@ -493,6 +498,90 @@ out:
system("ip link del bond_nest2");
}
/*
* Test that XDP redirect via xdp_master_redirect() does not crash when
* the bond master device is not up. When bond is in round-robin mode but
* never opened, rr_tx_counter is NULL.
*/
static void test_xdp_bonding_redirect_no_up(struct skeletons *skeletons)
{
struct nstoken *nstoken = NULL;
int xdp_pass_fd;
int veth1_ifindex;
int err;
char pkt[ETH_HLEN + 1];
struct xdp_md ctx_in = {};
DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
.data_in = &pkt,
.data_size_in = sizeof(pkt),
.ctx_in = &ctx_in,
.ctx_size_in = sizeof(ctx_in),
.flags = BPF_F_TEST_XDP_LIVE_FRAMES,
.repeat = 1,
.batch_size = 1,
);
/* We can't use bonding_setup() because bond will be active */
SYS(out, "ip netns add ns_rr_no_up");
nstoken = open_netns("ns_rr_no_up");
if (!ASSERT_OK_PTR(nstoken, "open ns_rr_no_up"))
goto out;
/* bond0: active-backup, UP with slave veth0.
* Attaching native XDP to bond0 enables bpf_master_redirect_enabled_key
* globally.
*/
SYS(out, "ip link add bond0 type bond mode active-backup");
SYS(out, "ip link add veth0 type veth peer name veth0p");
SYS(out, "ip link set veth0 master bond0");
SYS(out, "ip link set bond0 up");
SYS(out, "ip link set veth0p up");
/* bond1: round-robin, never UP -> rr_tx_counter stays NULL */
SYS(out, "ip link add bond1 type bond mode balance-rr");
SYS(out, "ip link add veth1 type veth peer name veth1p");
SYS(out, "ip link set veth1 master bond1");
veth1_ifindex = if_nametoindex("veth1");
if (!ASSERT_GT(veth1_ifindex, 0, "veth1_ifindex"))
goto out;
/* Attach native XDP to bond0 -> enables global redirect key */
if (xdp_attach(skeletons, skeletons->xdp_tx->progs.xdp_tx, "bond0"))
goto out;
/* Attach generic XDP (XDP_TX) to veth1.
* When packets arrive at veth1 via netif_receive_skb, do_xdp_generic()
* runs this program. XDP_TX + bond slave triggers xdp_master_redirect().
*/
err = bpf_xdp_attach(veth1_ifindex,
bpf_program__fd(skeletons->xdp_tx->progs.xdp_tx),
XDP_FLAGS_SKB_MODE, NULL);
if (!ASSERT_OK(err, "attach generic XDP to veth1"))
goto out;
/* Run BPF_PROG_TEST_RUN with XDP_PASS live frames on veth1.
* XDP_PASS frames become SKBs with skb->dev = veth1, entering
* netif_receive_skb -> do_xdp_generic -> xdp_master_redirect.
* Without the fix, bond_rr_gen_slave_id() dereferences NULL
* rr_tx_counter and crashes.
*/
xdp_pass_fd = bpf_program__fd(skeletons->xdp_dummy->progs.xdp_dummy_prog);
memset(pkt, 0, sizeof(pkt));
ctx_in.data_end = sizeof(pkt);
ctx_in.ingress_ifindex = veth1_ifindex;
err = bpf_prog_test_run_opts(xdp_pass_fd, &opts);
ASSERT_OK(err, "xdp_pass test_run should not crash");
out:
link_cleanup(skeletons);
close_netns(nstoken);
SYS_NOFAIL("ip netns del ns_rr_no_up");
}
static void test_xdp_bonding_features(struct skeletons *skeletons)
{
LIBBPF_OPTS(bpf_xdp_query_opts, query_opts);
@@ -738,6 +827,9 @@ void serial_test_xdp_bonding(void)
if (test__start_subtest("xdp_bonding_redirect_multi"))
test_xdp_bonding_redirect_multi(&skeletons);
if (test__start_subtest("xdp_bonding_redirect_no_up"))
test_xdp_bonding_redirect_no_up(&skeletons);
out:
xdp_dummy__destroy(skeletons.xdp_dummy);
xdp_tx__destroy(skeletons.xdp_tx);

View File

@@ -0,0 +1,117 @@
// SPDX-License-Identifier: GPL-2.0
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
/*
* Test the SOCK_OPS_GET_SK() and SOCK_OPS_GET_FIELD() macros in
* sock_ops_convert_ctx_access() when dst_reg == src_reg.
*
* When dst_reg == src_reg, the macros borrow a temporary register to load
* is_fullsock / is_locked_tcp_sock, because dst_reg holds the ctx pointer
* and cannot be clobbered before ctx->sk / ctx->field is read. If
* is_fullsock == 0 (e.g., TCP_NEW_SYN_RECV with a request_sock), the macro
* must still zero dst_reg so the verifier's PTR_TO_SOCKET_OR_NULL /
* SCALAR_VALUE type is correct at runtime. A missing clear leaves a stale
* ctx pointer in dst_reg that passes NULL checks (GET_SK) or leaks a kernel
* address as a scalar (GET_FIELD).
*
* When dst_reg != src_reg, dst_reg itself is used to load is_fullsock, so
* the JEQ (dst_reg == 0) naturally leaves it zeroed on the !fullsock path.
*/
int bug_detected;
int null_seen;
SEC("sockops")
__naked void sock_ops_get_sk_same_reg(void)
{
asm volatile (
"r7 = *(u32 *)(r1 + %[is_fullsock_off]);"
"r1 = *(u64 *)(r1 + %[sk_off]);"
"if r7 != 0 goto 2f;"
"if r1 == 0 goto 1f;"
"r1 = %[bug_detected] ll;"
"r2 = 1;"
"*(u32 *)(r1 + 0) = r2;"
"goto 2f;"
"1:"
"r1 = %[null_seen] ll;"
"r2 = 1;"
"*(u32 *)(r1 + 0) = r2;"
"2:"
"r0 = 1;"
"exit;"
:
: __imm_const(is_fullsock_off, offsetof(struct bpf_sock_ops, is_fullsock)),
__imm_const(sk_off, offsetof(struct bpf_sock_ops, sk)),
__imm_addr(bug_detected),
__imm_addr(null_seen)
: __clobber_all);
}
/* SOCK_OPS_GET_FIELD: same-register, is_locked_tcp_sock == 0 path. */
int field_bug_detected;
int field_null_seen;
SEC("sockops")
__naked void sock_ops_get_field_same_reg(void)
{
asm volatile (
"r7 = *(u32 *)(r1 + %[is_fullsock_off]);"
"r1 = *(u32 *)(r1 + %[snd_cwnd_off]);"
"if r7 != 0 goto 2f;"
"if r1 == 0 goto 1f;"
"r1 = %[field_bug_detected] ll;"
"r2 = 1;"
"*(u32 *)(r1 + 0) = r2;"
"goto 2f;"
"1:"
"r1 = %[field_null_seen] ll;"
"r2 = 1;"
"*(u32 *)(r1 + 0) = r2;"
"2:"
"r0 = 1;"
"exit;"
:
: __imm_const(is_fullsock_off, offsetof(struct bpf_sock_ops, is_fullsock)),
__imm_const(snd_cwnd_off, offsetof(struct bpf_sock_ops, snd_cwnd)),
__imm_addr(field_bug_detected),
__imm_addr(field_null_seen)
: __clobber_all);
}
/* SOCK_OPS_GET_SK: different-register, is_fullsock == 0 path. */
int diff_reg_bug_detected;
int diff_reg_null_seen;
SEC("sockops")
__naked void sock_ops_get_sk_diff_reg(void)
{
asm volatile (
"r7 = r1;"
"r6 = *(u32 *)(r7 + %[is_fullsock_off]);"
"r2 = *(u64 *)(r7 + %[sk_off]);"
"if r6 != 0 goto 2f;"
"if r2 == 0 goto 1f;"
"r1 = %[diff_reg_bug_detected] ll;"
"r3 = 1;"
"*(u32 *)(r1 + 0) = r3;"
"goto 2f;"
"1:"
"r1 = %[diff_reg_null_seen] ll;"
"r3 = 1;"
"*(u32 *)(r1 + 0) = r3;"
"2:"
"r0 = 1;"
"exit;"
:
: __imm_const(is_fullsock_off, offsetof(struct bpf_sock_ops, is_fullsock)),
__imm_const(sk_off, offsetof(struct bpf_sock_ops, sk)),
__imm_addr(diff_reg_bug_detected),
__imm_addr(diff_reg_null_seen)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";

View File

@@ -12,6 +12,7 @@ TEST_GEN_FILES := \
TEST_PROGS := \
gro.py \
hds.py \
macsec.py \
napi_id.py \
napi_threaded.py \
netpoll_basic.py \

View File

@@ -3,8 +3,10 @@ CONFIG_DEBUG_INFO_BTF=y
CONFIG_DEBUG_INFO_BTF_MODULES=n
CONFIG_INET_PSP=y
CONFIG_IPV6=y
CONFIG_MACSEC=m
CONFIG_NETCONSOLE=m
CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_NETCONSOLE_EXTENDED_LOG=y
CONFIG_NETDEVSIM=m
CONFIG_VLAN_8021Q=m
CONFIG_XDP_SOCKETS=y

View File

@@ -258,6 +258,15 @@ class NetDrvEpEnv(NetDrvEnvBase):
if nsim_test is False and self._ns is not None:
raise KsftXfailEx("Test does not work on netdevsim")
def get_local_nsim_dev(self):
"""Returns the local netdevsim device or None.
Using this method is discouraged, as it makes tests nsim-specific.
Standard interfaces available on all HW should ideally be used.
This method is intended for the few cases where nsim-specific
assertions need to be verified which cannot be verified otherwise.
"""
return self._ns
def _require_cmd(self, comm, key, host=None):
cached = self._required_cmd.get(comm, {})
if cached.get(key) is None:

View File

@@ -0,0 +1,343 @@
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0
"""MACsec tests."""
import os
from lib.py import ksft_run, ksft_exit, ksft_eq, ksft_raises
from lib.py import ksft_variants, KsftNamedVariant
from lib.py import CmdExitFailure, KsftSkipEx
from lib.py import NetDrvEpEnv
from lib.py import cmd, ip, defer, ethtool
MACSEC_KEY = "12345678901234567890123456789012"
MACSEC_VLAN_VID = 10
# Unique prefix per run to avoid collisions in the shared netns.
# Keep it short: IFNAMSIZ is 16 (incl. NUL), and VLAN names append ".<vid>".
MACSEC_PFX = f"ms{os.getpid()}_"
def _macsec_name(idx=0):
return f"{MACSEC_PFX}{idx}"
def _get_macsec_offload(dev):
"""Returns macsec offload mode string from ip -d link show."""
info = ip(f"-d link show dev {dev}", json=True)[0]
return info.get("linkinfo", {}).get("info_data", {}).get("offload")
def _get_features(dev):
"""Returns ethtool features dict for a device."""
return ethtool(f"-k {dev}", json=True)[0]
def _require_ip_macsec(cfg):
"""SKIP if iproute2 on local or remote lacks 'ip macsec' support."""
for host in [None, cfg.remote]:
out = cmd("ip macsec help", fail=False, host=host)
if "Usage" not in out.stdout + out.stderr:
where = "remote" if host else "local"
raise KsftSkipEx(f"iproute2 too old on {where},"
" missing macsec support")
def _require_ip_macsec_offload():
"""SKIP if local iproute2 doesn't understand 'ip macsec offload'."""
out = cmd("ip macsec help", fail=False)
if "offload" not in out.stdout + out.stderr:
raise KsftSkipEx("iproute2 too old, missing macsec offload")
def _require_macsec_offload(cfg):
"""SKIP if local device doesn't support macsec-hw-offload."""
_require_ip_macsec_offload()
try:
feat = ethtool(f"-k {cfg.ifname}", json=True)[0]
except (CmdExitFailure, IndexError) as e:
raise KsftSkipEx(
f"can't query features: {e}") from e
if not feat.get("macsec-hw-offload", {}).get("active"):
raise KsftSkipEx("macsec-hw-offload not supported")
def _get_mac(ifname, host=None):
"""Gets MAC address of an interface."""
dev = ip(f"link show dev {ifname}", json=True, host=host)
return dev[0]["address"]
def _setup_macsec_sa(cfg, name):
"""Adds matching TX/RX SAs on both ends."""
local_mac = _get_mac(name)
remote_mac = _get_mac(name, host=cfg.remote)
ip(f"macsec add {name} tx sa 0 pn 1 on key 01 {MACSEC_KEY}")
ip(f"macsec add {name} rx port 1 address {remote_mac}")
ip(f"macsec add {name} rx port 1 address {remote_mac} "
f"sa 0 pn 1 on key 02 {MACSEC_KEY}")
ip(f"macsec add {name} tx sa 0 pn 1 on key 02 {MACSEC_KEY}",
host=cfg.remote)
ip(f"macsec add {name} rx port 1 address {local_mac}", host=cfg.remote)
ip(f"macsec add {name} rx port 1 address {local_mac} "
f"sa 0 pn 1 on key 01 {MACSEC_KEY}", host=cfg.remote)
def _setup_macsec_devs(cfg, name, offload):
"""Creates macsec devices on both ends.
Only the local device gets HW offload; the remote always uses software
MACsec since it may not support offload at all.
"""
offload_arg = "mac" if offload else "off"
ip(f"link add link {cfg.ifname} {name} "
f"type macsec encrypt on offload {offload_arg}")
defer(ip, f"link del {name}")
ip(f"link add link {cfg.remote_ifname} {name} "
f"type macsec encrypt on", host=cfg.remote)
defer(ip, f"link del {name}", host=cfg.remote)
def _set_offload(name, offload):
"""Sets offload on the local macsec device only."""
offload_arg = "mac" if offload else "off"
ip(f"link set {name} type macsec encrypt on offload {offload_arg}")
def _setup_vlans(cfg, name, vid):
"""Adds VLANs on top of existing macsec devs."""
vlan_name = f"{name}.{vid}"
ip(f"link add link {name} {vlan_name} type vlan id {vid}")
defer(ip, f"link del {vlan_name}")
ip(f"link add link {name} {vlan_name} type vlan id {vid}", host=cfg.remote)
defer(ip, f"link del {vlan_name}", host=cfg.remote)
def _setup_vlan_ips(cfg, name, vid):
"""Adds VLANs and IPs and brings up the macsec + VLAN devices."""
local_ip = "198.51.100.1"
remote_ip = "198.51.100.2"
vlan_name = f"{name}.{vid}"
ip(f"addr add {local_ip}/24 dev {vlan_name}")
ip(f"addr add {remote_ip}/24 dev {vlan_name}", host=cfg.remote)
ip(f"link set {name} up")
ip(f"link set {name} up", host=cfg.remote)
ip(f"link set {vlan_name} up")
ip(f"link set {vlan_name} up", host=cfg.remote)
return vlan_name, remote_ip
def test_offload_api(cfg) -> None:
"""MACsec offload API: create SecY, add SA/rx, toggle offload."""
_require_macsec_offload(cfg)
ms0 = _macsec_name(0)
ms1 = _macsec_name(1)
ms2 = _macsec_name(2)
# Create 3 SecY with offload
ip(f"link add link {cfg.ifname} {ms0} type macsec "
f"port 4 encrypt on offload mac")
defer(ip, f"link del {ms0}")
ip(f"link add link {cfg.ifname} {ms1} type macsec "
f"address aa:bb:cc:dd:ee:ff port 5 encrypt on offload mac")
defer(ip, f"link del {ms1}")
ip(f"link add link {cfg.ifname} {ms2} type macsec "
f"sci abbacdde01020304 encrypt on offload mac")
defer(ip, f"link del {ms2}")
# Add TX SA
ip(f"macsec add {ms0} tx sa 0 pn 1024 on "
"key 01 12345678901234567890123456789012")
# Add RX SC + SA
ip(f"macsec add {ms0} rx port 1234 address 1c:ed:de:ad:be:ef")
ip(f"macsec add {ms0} rx port 1234 address 1c:ed:de:ad:be:ef "
"sa 0 pn 1 on key 00 0123456789abcdef0123456789abcdef")
# Can't disable offload when SAs are configured
with ksft_raises(CmdExitFailure):
ip(f"link set {ms0} type macsec offload off")
with ksft_raises(CmdExitFailure):
ip(f"macsec offload {ms0} off")
# Toggle offload via rtnetlink on SA-free device
ip(f"link set {ms2} type macsec offload off")
ip(f"link set {ms2} type macsec encrypt on offload mac")
# Toggle offload via genetlink
ip(f"macsec offload {ms2} off")
ip(f"macsec offload {ms2} mac")
def test_max_secy(cfg) -> None:
"""nsim-only test for max number of SecYs."""
cfg.require_nsim()
_require_ip_macsec_offload()
ms0 = _macsec_name(0)
ms1 = _macsec_name(1)
ms2 = _macsec_name(2)
ms3 = _macsec_name(3)
ip(f"link add link {cfg.ifname} {ms0} type macsec "
f"port 4 encrypt on offload mac")
defer(ip, f"link del {ms0}")
ip(f"link add link {cfg.ifname} {ms1} type macsec "
f"address aa:bb:cc:dd:ee:ff port 5 encrypt on offload mac")
defer(ip, f"link del {ms1}")
ip(f"link add link {cfg.ifname} {ms2} type macsec "
f"sci abbacdde01020304 encrypt on offload mac")
defer(ip, f"link del {ms2}")
with ksft_raises(CmdExitFailure):
ip(f"link add link {cfg.ifname} {ms3} "
f"type macsec port 8 encrypt on offload mac")
def test_max_sc(cfg) -> None:
"""nsim-only test for max number of SCs."""
cfg.require_nsim()
_require_ip_macsec_offload()
ms0 = _macsec_name(0)
ip(f"link add link {cfg.ifname} {ms0} type macsec "
f"port 4 encrypt on offload mac")
defer(ip, f"link del {ms0}")
ip(f"macsec add {ms0} rx port 1234 address 1c:ed:de:ad:be:ef")
with ksft_raises(CmdExitFailure):
ip(f"macsec add {ms0} rx port 1235 address 1c:ed:de:ad:be:ef")
def test_offload_state(cfg) -> None:
"""Offload state reflects configuration changes."""
_require_macsec_offload(cfg)
ms0 = _macsec_name(0)
# Create with offload on
ip(f"link add link {cfg.ifname} {ms0} type macsec "
f"encrypt on offload mac")
cleanup = defer(ip, f"link del {ms0}")
ksft_eq(_get_macsec_offload(ms0), "mac",
"created with offload: should be mac")
feats_on_1 = _get_features(ms0)
ip(f"link set {ms0} type macsec offload off")
ksft_eq(_get_macsec_offload(ms0), "off",
"offload disabled: should be off")
feats_off_1 = _get_features(ms0)
ip(f"link set {ms0} type macsec encrypt on offload mac")
ksft_eq(_get_macsec_offload(ms0), "mac",
"offload re-enabled: should be mac")
ksft_eq(_get_features(ms0), feats_on_1,
"features should match first offload-on snapshot")
# Delete and recreate without offload
cleanup.exec()
ip(f"link add link {cfg.ifname} {ms0} type macsec")
defer(ip, f"link del {ms0}")
ksft_eq(_get_macsec_offload(ms0), "off",
"created without offload: should be off")
ksft_eq(_get_features(ms0), feats_off_1,
"features should match first offload-off snapshot")
ip(f"link set {ms0} type macsec encrypt on offload mac")
ksft_eq(_get_macsec_offload(ms0), "mac",
"offload enabled after create: should be mac")
ksft_eq(_get_features(ms0), feats_on_1,
"features should match first offload-on snapshot")
def _check_nsim_vid(cfg, vid, expected) -> None:
"""Checks if a VLAN is present. Only works on netdevsim."""
nsim = cfg.get_local_nsim_dev()
if not nsim:
return
vlan_path = os.path.join(nsim.nsims[0].dfs_dir, "vlan")
with open(vlan_path, encoding="utf-8") as f:
vids = f.read()
found = f"ctag {vid}\n" in vids
ksft_eq(found, expected,
f"VLAN {vid} {'expected' if expected else 'not expected'}"
f" in debugfs")
@ksft_variants([
KsftNamedVariant("offloaded", True),
KsftNamedVariant("software", False),
])
def test_vlan(cfg, offload) -> None:
"""Ping through VLAN-over-macsec."""
_require_ip_macsec(cfg)
if offload:
_require_macsec_offload(cfg)
else:
_require_ip_macsec_offload()
name = _macsec_name()
_setup_macsec_devs(cfg, name, offload=offload)
_setup_macsec_sa(cfg, name)
_setup_vlans(cfg, name, MACSEC_VLAN_VID)
vlan_name, remote_ip = _setup_vlan_ips(cfg, name, MACSEC_VLAN_VID)
_check_nsim_vid(cfg, MACSEC_VLAN_VID, offload)
# nsim doesn't handle the data path for offloaded macsec, so skip
# the ping when offloaded on nsim.
if not offload or not cfg.get_local_nsim_dev():
cmd(f"ping -I {vlan_name} -c 1 -W 5 {remote_ip}")
@ksft_variants([
KsftNamedVariant("on_to_off", True),
KsftNamedVariant("off_to_on", False),
])
def test_vlan_toggle(cfg, offload) -> None:
"""Toggle offload: VLAN filters propagate/remove correctly."""
_require_ip_macsec(cfg)
_require_macsec_offload(cfg)
name = _macsec_name()
_setup_macsec_devs(cfg, name, offload=offload)
_setup_vlans(cfg, name, MACSEC_VLAN_VID)
_check_nsim_vid(cfg, MACSEC_VLAN_VID, offload)
_set_offload(name, offload=not offload)
_check_nsim_vid(cfg, MACSEC_VLAN_VID, not offload)
vlan_name, remote_ip = _setup_vlan_ips(cfg, name, MACSEC_VLAN_VID)
_setup_macsec_sa(cfg, name)
# nsim doesn't handle the data path for offloaded macsec, so skip
# the ping when the final state is offloaded on nsim.
if offload or not cfg.get_local_nsim_dev():
cmd(f"ping -I {vlan_name} -c 1 -W 5 {remote_ip}")
def main() -> None:
"""Main program."""
with NetDrvEpEnv(__file__) as cfg:
ksft_run([test_offload_api,
test_max_secy,
test_max_sc,
test_offload_state,
test_vlan,
test_vlan_toggle,
], args=(cfg,))
ksft_exit()
if __name__ == "__main__":
main()

View File

@@ -11,7 +11,6 @@ TEST_PROGS := \
fib.sh \
fib_notifications.sh \
hw_stats_l3.sh \
macsec-offload.sh \
nexthop.sh \
peer.sh \
psample.sh \

View File

@@ -1,117 +0,0 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0-only
source ethtool-common.sh
NSIM_NETDEV=$(make_netdev)
MACSEC_NETDEV=macsec_nsim
set -o pipefail
if ! ethtool -k $NSIM_NETDEV | grep -q 'macsec-hw-offload: on'; then
echo "SKIP: netdevsim doesn't support MACsec offload"
exit 4
fi
if ! ip link add link $NSIM_NETDEV $MACSEC_NETDEV type macsec offload mac 2>/dev/null; then
echo "SKIP: couldn't create macsec device"
exit 4
fi
ip link del $MACSEC_NETDEV
#
# test macsec offload API
#
ip link add link $NSIM_NETDEV "${MACSEC_NETDEV}" type macsec port 4 offload mac
check $?
ip link add link $NSIM_NETDEV "${MACSEC_NETDEV}2" type macsec address "aa:bb:cc:dd:ee:ff" port 5 offload mac
check $?
ip link add link $NSIM_NETDEV "${MACSEC_NETDEV}3" type macsec sci abbacdde01020304 offload mac
check $?
ip link add link $NSIM_NETDEV "${MACSEC_NETDEV}4" type macsec port 8 offload mac 2> /dev/null
check $? '' '' 1
ip macsec add "${MACSEC_NETDEV}" tx sa 0 pn 1024 on key 01 12345678901234567890123456789012
check $?
ip macsec add "${MACSEC_NETDEV}" rx port 1234 address "1c:ed:de:ad:be:ef"
check $?
ip macsec add "${MACSEC_NETDEV}" rx port 1234 address "1c:ed:de:ad:be:ef" sa 0 pn 1 on \
key 00 0123456789abcdef0123456789abcdef
check $?
ip macsec add "${MACSEC_NETDEV}" rx port 1235 address "1c:ed:de:ad:be:ef" 2> /dev/null
check $? '' '' 1
# can't disable macsec offload when SAs are configured
ip link set "${MACSEC_NETDEV}" type macsec offload off 2> /dev/null
check $? '' '' 1
ip macsec offload "${MACSEC_NETDEV}" off 2> /dev/null
check $? '' '' 1
# toggle macsec offload via rtnetlink
ip link set "${MACSEC_NETDEV}2" type macsec offload off
check $?
ip link set "${MACSEC_NETDEV}2" type macsec offload mac
check $?
# toggle macsec offload via genetlink
ip macsec offload "${MACSEC_NETDEV}2" off
check $?
ip macsec offload "${MACSEC_NETDEV}2" mac
check $?
for dev in ${MACSEC_NETDEV}{,2,3} ; do
ip link del $dev
check $?
done
#
# test ethtool features when toggling offload
#
ip link add link $NSIM_NETDEV $MACSEC_NETDEV type macsec offload mac
TMP_FEATS_ON_1="$(ethtool -k $MACSEC_NETDEV)"
ip link set $MACSEC_NETDEV type macsec offload off
TMP_FEATS_OFF_1="$(ethtool -k $MACSEC_NETDEV)"
ip link set $MACSEC_NETDEV type macsec offload mac
TMP_FEATS_ON_2="$(ethtool -k $MACSEC_NETDEV)"
[ "$TMP_FEATS_ON_1" = "$TMP_FEATS_ON_2" ]
check $?
ip link del $MACSEC_NETDEV
ip link add link $NSIM_NETDEV $MACSEC_NETDEV type macsec
check $?
TMP_FEATS_OFF_2="$(ethtool -k $MACSEC_NETDEV)"
[ "$TMP_FEATS_OFF_1" = "$TMP_FEATS_OFF_2" ]
check $?
ip link set $MACSEC_NETDEV type macsec offload mac
check $?
TMP_FEATS_ON_3="$(ethtool -k $MACSEC_NETDEV)"
[ "$TMP_FEATS_ON_1" = "$TMP_FEATS_ON_3" ]
check $?
if [ $num_errors -eq 0 ]; then
echo "PASSED all $((num_passes)) checks"
exit 0
else
echo "FAILED $num_errors/$((num_errors+num_passes)) checks"
exit 1
fi