mirror of
https://github.com/torvalds/linux.git
synced 2026-04-25 18:12:26 -04:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Daniel Borkmann says: ==================== pull-request: bpf-next 2020-09-01 The following pull-request contains BPF updates for your *net-next* tree. There are two small conflicts when pulling, resolve as follows: 1) Merge conflict in tools/lib/bpf/libbpf.c between88a8212028("libbpf: Factor out common ELF operations and improve logging") in bpf-next and1e891e513e("libbpf: Fix map index used in error message") in net-next. Resolve by taking the hunk in bpf-next: [...] scn = elf_sec_by_idx(obj, obj->efile.btf_maps_shndx); data = elf_sec_data(obj, scn); if (!scn || !data) { pr_warn("elf: failed to get %s map definitions for %s\n", MAPS_ELF_SEC, obj->path); return -EINVAL; } [...] 2) Merge conflict in drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c between9647c57b11("xsk: i40e: ice: ixgbe: mlx5: Test for dma_need_sync earlier for better performance") in bpf-next ande20f0dbf20("net/mlx5e: RX, Add a prefetch command for small L1_CACHE_BYTES") in net-next. Resolve the two locations by retaining net_prefetch() and taking xsk_buff_dma_sync_for_cpu() from bpf-next. Should look like: [...] xdp_set_data_meta_invalid(xdp); xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool); net_prefetch(xdp->data); [...] We've added 133 non-merge commits during the last 14 day(s) which contain a total of 246 files changed, 13832 insertions(+), 3105 deletions(-). The main changes are: 1) Initial support for sleepable BPF programs along with bpf_copy_from_user() helper for tracing to reliably access user memory, from Alexei Starovoitov. 2) Add BPF infra for writing and parsing TCP header options, from Martin KaFai Lau. 3) bpf_d_path() helper for returning full path for given 'struct path', from Jiri Olsa. 4) AF_XDP support for shared umems between devices and queues, from Magnus Karlsson. 5) Initial prep work for full BPF-to-BPF call support in libbpf, from Andrii Nakryiko. 6) Generalize bpf_sk_storage map & add local storage for inodes, from KP Singh. 7) Implement sockmap/hash updates from BPF context, from Lorenz Bauer. 8) BPF xor verification for scalar types & add BPF link iterator, from Yonghong Song. 9) Use target's prog type for BPF_PROG_TYPE_EXT prog verification, from Udip Pant. 10) Rework BPF tracing samples to use libbpf loader, from Daniel T. Lee. 11) Fix xdpsock sample to really cycle through all buffers, from Weqaar Janjua. 12) Improve type safety for tun/veth XDP frame handling, from Maciej Żenczykowski. 13) Various smaller cleanups and improvements all over the place. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@@ -3151,7 +3151,7 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
|
||||
#endif
|
||||
|
||||
ixgbe_for_each_ring(ring, q_vector->tx) {
|
||||
bool wd = ring->xsk_umem ?
|
||||
bool wd = ring->xsk_pool ?
|
||||
ixgbe_clean_xdp_tx_irq(q_vector, ring, budget) :
|
||||
ixgbe_clean_tx_irq(q_vector, ring, budget);
|
||||
|
||||
@@ -3171,7 +3171,7 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
|
||||
per_ring_budget = budget;
|
||||
|
||||
ixgbe_for_each_ring(ring, q_vector->rx) {
|
||||
int cleaned = ring->xsk_umem ?
|
||||
int cleaned = ring->xsk_pool ?
|
||||
ixgbe_clean_rx_irq_zc(q_vector, ring,
|
||||
per_ring_budget) :
|
||||
ixgbe_clean_rx_irq(q_vector, ring,
|
||||
@@ -3466,9 +3466,9 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
|
||||
u32 txdctl = IXGBE_TXDCTL_ENABLE;
|
||||
u8 reg_idx = ring->reg_idx;
|
||||
|
||||
ring->xsk_umem = NULL;
|
||||
ring->xsk_pool = NULL;
|
||||
if (ring_is_xdp(ring))
|
||||
ring->xsk_umem = ixgbe_xsk_umem(adapter, ring);
|
||||
ring->xsk_pool = ixgbe_xsk_pool(adapter, ring);
|
||||
|
||||
/* disable queue to avoid issues while updating state */
|
||||
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0);
|
||||
@@ -3708,8 +3708,8 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
|
||||
srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
|
||||
|
||||
/* configure the packet buffer length */
|
||||
if (rx_ring->xsk_umem) {
|
||||
u32 xsk_buf_len = xsk_umem_get_rx_frame_size(rx_ring->xsk_umem);
|
||||
if (rx_ring->xsk_pool) {
|
||||
u32 xsk_buf_len = xsk_pool_get_rx_frame_size(rx_ring->xsk_pool);
|
||||
|
||||
/* If the MAC support setting RXDCTL.RLPML, the
|
||||
* SRRCTL[n].BSIZEPKT is set to PAGE_SIZE and
|
||||
@@ -4054,12 +4054,12 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
|
||||
u8 reg_idx = ring->reg_idx;
|
||||
|
||||
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
|
||||
ring->xsk_umem = ixgbe_xsk_umem(adapter, ring);
|
||||
if (ring->xsk_umem) {
|
||||
ring->xsk_pool = ixgbe_xsk_pool(adapter, ring);
|
||||
if (ring->xsk_pool) {
|
||||
WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
|
||||
MEM_TYPE_XSK_BUFF_POOL,
|
||||
NULL));
|
||||
xsk_buff_set_rxq_info(ring->xsk_umem, &ring->xdp_rxq);
|
||||
xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
|
||||
} else {
|
||||
WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
|
||||
MEM_TYPE_PAGE_SHARED, NULL));
|
||||
@@ -4114,8 +4114,8 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
|
||||
#endif
|
||||
}
|
||||
|
||||
if (ring->xsk_umem && hw->mac.type != ixgbe_mac_82599EB) {
|
||||
u32 xsk_buf_len = xsk_umem_get_rx_frame_size(ring->xsk_umem);
|
||||
if (ring->xsk_pool && hw->mac.type != ixgbe_mac_82599EB) {
|
||||
u32 xsk_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool);
|
||||
|
||||
rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
|
||||
IXGBE_RXDCTL_RLPML_EN);
|
||||
@@ -4137,7 +4137,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
|
||||
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
|
||||
|
||||
ixgbe_rx_desc_queue_enable(adapter, ring);
|
||||
if (ring->xsk_umem)
|
||||
if (ring->xsk_pool)
|
||||
ixgbe_alloc_rx_buffers_zc(ring, ixgbe_desc_unused(ring));
|
||||
else
|
||||
ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
|
||||
@@ -5287,7 +5287,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
|
||||
u16 i = rx_ring->next_to_clean;
|
||||
struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
|
||||
|
||||
if (rx_ring->xsk_umem) {
|
||||
if (rx_ring->xsk_pool) {
|
||||
ixgbe_xsk_clean_rx_ring(rx_ring);
|
||||
goto skip_free;
|
||||
}
|
||||
@@ -5979,7 +5979,7 @@ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
|
||||
u16 i = tx_ring->next_to_clean;
|
||||
struct ixgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
|
||||
|
||||
if (tx_ring->xsk_umem) {
|
||||
if (tx_ring->xsk_pool) {
|
||||
ixgbe_xsk_clean_tx_ring(tx_ring);
|
||||
goto out;
|
||||
}
|
||||
@@ -10141,7 +10141,7 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
|
||||
*/
|
||||
if (need_reset && prog)
|
||||
for (i = 0; i < adapter->num_rx_queues; i++)
|
||||
if (adapter->xdp_ring[i]->xsk_umem)
|
||||
if (adapter->xdp_ring[i]->xsk_pool)
|
||||
(void)ixgbe_xsk_wakeup(adapter->netdev, i,
|
||||
XDP_WAKEUP_RX);
|
||||
|
||||
@@ -10155,8 +10155,8 @@ static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp)
|
||||
switch (xdp->command) {
|
||||
case XDP_SETUP_PROG:
|
||||
return ixgbe_xdp_setup(dev, xdp->prog);
|
||||
case XDP_SETUP_XSK_UMEM:
|
||||
return ixgbe_xsk_umem_setup(adapter, xdp->xsk.umem,
|
||||
case XDP_SETUP_XSK_POOL:
|
||||
return ixgbe_xsk_pool_setup(adapter, xdp->xsk.pool,
|
||||
xdp->xsk.queue_id);
|
||||
|
||||
default:
|
||||
|
||||
Reference in New Issue
Block a user