Files
linux/tools/testing/selftests/bpf/progs/lpm_trie_bench.c
Matt Fleming 737433c6a5 selftests/bpf: Add LPM trie microbenchmarks
Add benchmarks for the standard set of operations: LOOKUP, INSERT,
UPDATE, DELETE. Also include benchmarks to measure the overhead of the
bench framework itself (NOOP) as well as the overhead of generating keys
(BASELINE). Lastly, this includes a benchmark for FREE (trie_free())
which is known to have terrible performance for maps with many entries.

Benchmarks operate on tries without gaps in the key range, i.e. each
test begins or ends with a trie with valid keys in the range [0,
nr_entries). This is intended to cause maximum branching when traversing
the trie.

LOOKUP, UPDATE, DELETE, and FREE fill a BPF LPM trie from userspace
using bpf_map_update_batch() and run the corresponding benchmark
operation via bpf_loop(). INSERT starts with an empty map and fills it
kernel-side from bpf_loop(). FREE records the time to free a filled LPM
trie by attaching and destroying a BPF prog. NOOP measures the overhead
of the test harness by running an empty function with bpf_loop().
BASELINE is similar to NOOP except that the function generates a key.

Each operation runs 10,000 times using bpf_loop(). Note that this value
is intentionally independent of the number of entries in the LPM trie so
that the stability of the results isn't affected by the number of
entries.

For those benchmarks that need to reset the LPM trie once it's full
(INSERT) or empty (DELETE), throughput and latency results are scaled by
the fraction of a second the operation actually ran to ignore any time
spent reinitialising the trie.

By default, benchmarks run using sequential keys in the range [0,
nr_entries). BASELINE, LOOKUP, and UPDATE can use random keys via the
--random parameter but beware there is a runtime cost involved in
generating random keys. Other benchmarks are prohibited from using
random keys because it can skew the results, e.g. when inserting an
existing key or deleting a missing one.

All measurements are recorded from within the kernel to eliminate
syscall overhead. Most benchmarks run an XDP program to generate stats
but FREE needs to collect latencies using fentry/fexit on
map_free_deferred() because it's not possible to use fentry directly on
lpm_trie.c since commit c83508da56 ("bpf: Avoid deadlock caused by
nested kprobe and fentry bpf programs") and there's no way to
create/destroy a map from within an XDP program.

Here is example output from an AMD EPYC 9684X 96-Core machine for each
of the benchmarks using a trie with 10K entries and a 32-bit prefix
length, e.g.

  $ ./bench lpm-trie-$op \
  	--prefix_len=32  \
	--producers=1     \
	--nr_entries=10000

     noop: throughput   74.417 ± 0.032 M ops/s ( 74.417M ops/prod), latency   13.438 ns/op
 baseline: throughput   70.107 ± 0.171 M ops/s ( 70.107M ops/prod), latency   14.264 ns/op
   lookup: throughput    8.467 ± 0.047 M ops/s (  8.467M ops/prod), latency  118.109 ns/op
   insert: throughput    2.440 ± 0.015 M ops/s (  2.440M ops/prod), latency  409.290 ns/op
   update: throughput    2.806 ± 0.042 M ops/s (  2.806M ops/prod), latency  356.322 ns/op
   delete: throughput    4.625 ± 0.011 M ops/s (  4.625M ops/prod), latency  215.613 ns/op
     free: throughput    0.578 ± 0.006 K ops/s (  0.578K ops/prod), latency    1.730 ms/op

And the same benchmarks using random keys:

  $ ./bench lpm-trie-$op \
  	--prefix_len=32  \
	--producers=1     \
	--nr_entries=10000 \
	--random

     noop: throughput   74.259 ± 0.335 M ops/s ( 74.259M ops/prod), latency   13.466 ns/op
 baseline: throughput   35.150 ± 0.144 M ops/s ( 35.150M ops/prod), latency   28.450 ns/op
   lookup: throughput    7.119 ± 0.048 M ops/s (  7.119M ops/prod), latency  140.469 ns/op
   insert: N/A
   update: throughput    2.736 ± 0.012 M ops/s (  2.736M ops/prod), latency  365.523 ns/op
   delete: N/A
     free: N/A

Signed-off-by: Matt Fleming <mfleming@cloudflare.com>
Signed-off-by: Jesper Dangaard Brouer <hawk@kernel.org>
Link: https://lore.kernel.org/r/20250827140149.1001557-1-matt@readmodwrite.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2025-08-27 17:28:14 -07:00

231 lines
4.5 KiB
C

// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2025 Cloudflare */
#include <vmlinux.h>
#include <errno.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
#include "bpf_misc.h"
#include "bpf_atomic.h"
#include "progs/lpm_trie.h"
#define BPF_OBJ_NAME_LEN 16U
#define MAX_ENTRIES 100000000
#define NR_LOOPS 10000
char _license[] SEC("license") = "GPL";
/* Filled by userspace. See fill_map() in bench_lpm_trie_map.c */
struct {
__uint(type, BPF_MAP_TYPE_LPM_TRIE);
__type(key, struct trie_key);
__type(value, __u32);
__uint(map_flags, BPF_F_NO_PREALLOC);
__uint(max_entries, MAX_ENTRIES);
} trie_map SEC(".maps");
long hits;
long duration_ns;
/* Configured from userspace */
__u32 nr_entries;
__u32 prefixlen;
bool random;
__u8 op;
static __u64 latency_free_start;
SEC("fentry/bpf_map_free_deferred")
int BPF_PROG(trie_free_entry, struct work_struct *work)
{
struct bpf_map *map = container_of(work, struct bpf_map, work);
char name[BPF_OBJ_NAME_LEN];
u32 map_type;
map_type = BPF_CORE_READ(map, map_type);
if (map_type != BPF_MAP_TYPE_LPM_TRIE)
return 0;
/*
* Ideally we'd have access to the map ID but that's already
* freed before we enter trie_free().
*/
BPF_CORE_READ_STR_INTO(&name, map, name);
if (bpf_strncmp(name, BPF_OBJ_NAME_LEN, "trie_free_map"))
return 0;
latency_free_start = bpf_ktime_get_ns();
return 0;
}
SEC("fexit/bpf_map_free_deferred")
int BPF_PROG(trie_free_exit, struct work_struct *work)
{
__u64 val;
if (!latency_free_start)
return 0;
val = bpf_ktime_get_ns() - latency_free_start;
latency_free_start = 0;
__sync_add_and_fetch(&duration_ns, val);
__sync_add_and_fetch(&hits, 1);
return 0;
}
static __u32 cur_key;
static __always_inline void generate_key(struct trie_key *key)
{
key->prefixlen = prefixlen;
if (random)
key->data = bpf_get_prandom_u32() % nr_entries;
else
key->data = cur_key++ % nr_entries;
}
static int noop(__u32 index, __u32 *unused)
{
return 0;
}
static int baseline(__u32 index, __u32 *unused)
{
struct trie_key key;
__u32 blackbox = 0;
generate_key(&key);
/* Avoid compiler optimizing out the modulo */
barrier_var(blackbox);
blackbox = READ_ONCE(key.data);
return 0;
}
static int lookup(__u32 index, int *retval)
{
struct trie_key key;
generate_key(&key);
if (!bpf_map_lookup_elem(&trie_map, &key)) {
*retval = -ENOENT;
return 1;
}
return 0;
}
static int insert(__u32 index, int *retval)
{
struct trie_key key;
u32 val = 1;
int err;
generate_key(&key);
err = bpf_map_update_elem(&trie_map, &key, &val, BPF_NOEXIST);
if (err) {
*retval = err;
return 1;
}
/* Is this the last entry? */
if (key.data == nr_entries - 1) {
/* For atomicity concerns, see the comment in delete() */
*retval = LPM_BENCH_REINIT_MAP;
return 1;
}
return 0;
}
static int update(__u32 index, int *retval)
{
struct trie_key key;
u32 val = 1;
int err;
generate_key(&key);
err = bpf_map_update_elem(&trie_map, &key, &val, BPF_EXIST);
if (err) {
*retval = err;
return 1;
}
return 0;
}
static int delete(__u32 index, int *retval)
{
struct trie_key key;
int err;
generate_key(&key);
err = bpf_map_delete_elem(&trie_map, &key);
if (err) {
*retval = err;
return 1;
}
/* Do we need to refill the map? */
if (key.data == nr_entries - 1) {
/*
* Atomicity isn't required because DELETE only supports
* one producer running concurrently. What we need is a
* way to track how many entries have been deleted from
* the trie between consecutive invocations of the BPF
* prog because a single bpf_loop() call might not
* delete all entries, e.g. when NR_LOOPS < nr_entries.
*/
*retval = LPM_BENCH_REINIT_MAP;
return 1;
}
return 0;
}
SEC("xdp")
int BPF_PROG(run_bench)
{
int err = LPM_BENCH_SUCCESS;
u64 start, delta;
int loops;
start = bpf_ktime_get_ns();
switch (op) {
case LPM_OP_NOOP:
loops = bpf_loop(NR_LOOPS, noop, NULL, 0);
break;
case LPM_OP_BASELINE:
loops = bpf_loop(NR_LOOPS, baseline, NULL, 0);
break;
case LPM_OP_LOOKUP:
loops = bpf_loop(NR_LOOPS, lookup, &err, 0);
break;
case LPM_OP_INSERT:
loops = bpf_loop(NR_LOOPS, insert, &err, 0);
break;
case LPM_OP_UPDATE:
loops = bpf_loop(NR_LOOPS, update, &err, 0);
break;
case LPM_OP_DELETE:
loops = bpf_loop(NR_LOOPS, delete, &err, 0);
break;
default:
bpf_printk("invalid benchmark operation\n");
return -1;
}
delta = bpf_ktime_get_ns() - start;
__sync_add_and_fetch(&duration_ns, delta);
__sync_add_and_fetch(&hits, loops);
return err;
}