mirror of
https://github.com/torvalds/linux.git
synced 2026-04-18 14:53:58 -04:00
Commitc27cea4416("rcu: Re-implement RCU Tasks Trace in terms of SRCU-fast") broke map_kptr selftest since it removed the function we were kprobing. Use a new kfunc that invokes call_rcu_tasks_trace and sets a program provided pointer to an integer to 1. Technically this can be unsafe if the memory being written to from the callback disappears, but this is just for usage in a test where we ensure we spin until we see the value to be set to 1, so it's ok. Reported-by: Shung-Hsi Yu <shung-hsi.yu@suse.com> Fixes:c27cea4416("rcu: Re-implement RCU Tasks Trace in terms of SRCU-fast") Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com> Link: https://lore.kernel.org/r/20260211185747.3630539-1-memxor@gmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
184 lines
6.1 KiB
C
184 lines
6.1 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
#include <test_progs.h>
|
|
#include <network_helpers.h>
|
|
|
|
#include "map_kptr.skel.h"
|
|
#include "map_kptr_fail.skel.h"
|
|
#include "rcu_tasks_trace_gp.skel.h"
|
|
|
|
static void test_map_kptr_success(bool test_run)
|
|
{
|
|
LIBBPF_OPTS(bpf_test_run_opts, lopts);
|
|
LIBBPF_OPTS(bpf_test_run_opts, opts,
|
|
.data_in = &pkt_v4,
|
|
.data_size_in = sizeof(pkt_v4),
|
|
.repeat = 1,
|
|
);
|
|
int key = 0, ret, cpu;
|
|
struct map_kptr *skel;
|
|
char buf[16], *pbuf;
|
|
|
|
skel = map_kptr__open_and_load();
|
|
if (!ASSERT_OK_PTR(skel, "map_kptr__open_and_load"))
|
|
return;
|
|
|
|
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref1), &opts);
|
|
ASSERT_OK(ret, "test_map_kptr_ref1 refcount");
|
|
ASSERT_OK(opts.retval, "test_map_kptr_ref1 retval");
|
|
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref2), &opts);
|
|
ASSERT_OK(ret, "test_map_kptr_ref2 refcount");
|
|
ASSERT_OK(opts.retval, "test_map_kptr_ref2 retval");
|
|
|
|
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_ls_map_kptr_ref1), &lopts);
|
|
ASSERT_OK(ret, "test_ls_map_kptr_ref1 refcount");
|
|
ASSERT_OK(lopts.retval, "test_ls_map_kptr_ref1 retval");
|
|
|
|
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_ls_map_kptr_ref2), &lopts);
|
|
ASSERT_OK(ret, "test_ls_map_kptr_ref2 refcount");
|
|
ASSERT_OK(lopts.retval, "test_ls_map_kptr_ref2 retval");
|
|
|
|
if (test_run)
|
|
goto exit;
|
|
|
|
cpu = libbpf_num_possible_cpus();
|
|
if (!ASSERT_GT(cpu, 0, "libbpf_num_possible_cpus"))
|
|
goto exit;
|
|
|
|
pbuf = calloc(cpu, sizeof(buf));
|
|
if (!ASSERT_OK_PTR(pbuf, "calloc(pbuf)"))
|
|
goto exit;
|
|
|
|
ret = bpf_map__update_elem(skel->maps.array_map,
|
|
&key, sizeof(key), buf, sizeof(buf), 0);
|
|
ASSERT_OK(ret, "array_map update");
|
|
skel->data->ref--;
|
|
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref3), &opts);
|
|
ASSERT_OK(ret, "test_map_kptr_ref3 refcount");
|
|
ASSERT_OK(opts.retval, "test_map_kptr_ref3 retval");
|
|
|
|
ret = bpf_map__update_elem(skel->maps.pcpu_array_map,
|
|
&key, sizeof(key), pbuf, cpu * sizeof(buf), 0);
|
|
ASSERT_OK(ret, "pcpu_array_map update");
|
|
skel->data->ref--;
|
|
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref3), &opts);
|
|
ASSERT_OK(ret, "test_map_kptr_ref3 refcount");
|
|
ASSERT_OK(opts.retval, "test_map_kptr_ref3 retval");
|
|
|
|
ret = bpf_map__delete_elem(skel->maps.hash_map, &key, sizeof(key), 0);
|
|
ASSERT_OK(ret, "hash_map delete");
|
|
skel->data->ref--;
|
|
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref3), &opts);
|
|
ASSERT_OK(ret, "test_map_kptr_ref3 refcount");
|
|
ASSERT_OK(opts.retval, "test_map_kptr_ref3 retval");
|
|
|
|
ret = bpf_map__delete_elem(skel->maps.pcpu_hash_map, &key, sizeof(key), 0);
|
|
ASSERT_OK(ret, "pcpu_hash_map delete");
|
|
skel->data->ref--;
|
|
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref3), &opts);
|
|
ASSERT_OK(ret, "test_map_kptr_ref3 refcount");
|
|
ASSERT_OK(opts.retval, "test_map_kptr_ref3 retval");
|
|
|
|
ret = bpf_map__delete_elem(skel->maps.hash_malloc_map, &key, sizeof(key), 0);
|
|
ASSERT_OK(ret, "hash_malloc_map delete");
|
|
skel->data->ref--;
|
|
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref3), &opts);
|
|
ASSERT_OK(ret, "test_map_kptr_ref3 refcount");
|
|
ASSERT_OK(opts.retval, "test_map_kptr_ref3 retval");
|
|
|
|
ret = bpf_map__delete_elem(skel->maps.pcpu_hash_malloc_map, &key, sizeof(key), 0);
|
|
ASSERT_OK(ret, "pcpu_hash_malloc_map delete");
|
|
skel->data->ref--;
|
|
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref3), &opts);
|
|
ASSERT_OK(ret, "test_map_kptr_ref3 refcount");
|
|
ASSERT_OK(opts.retval, "test_map_kptr_ref3 retval");
|
|
|
|
ret = bpf_map__delete_elem(skel->maps.lru_hash_map, &key, sizeof(key), 0);
|
|
ASSERT_OK(ret, "lru_hash_map delete");
|
|
skel->data->ref--;
|
|
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref3), &opts);
|
|
ASSERT_OK(ret, "test_map_kptr_ref3 refcount");
|
|
ASSERT_OK(opts.retval, "test_map_kptr_ref3 retval");
|
|
|
|
ret = bpf_map__delete_elem(skel->maps.lru_pcpu_hash_map, &key, sizeof(key), 0);
|
|
ASSERT_OK(ret, "lru_pcpu_hash_map delete");
|
|
skel->data->ref--;
|
|
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref3), &opts);
|
|
ASSERT_OK(ret, "test_map_kptr_ref3 refcount");
|
|
ASSERT_OK(opts.retval, "test_map_kptr_ref3 retval");
|
|
|
|
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_ls_map_kptr_ref_del), &lopts);
|
|
ASSERT_OK(ret, "test_ls_map_kptr_ref_del delete");
|
|
skel->data->ref--;
|
|
ASSERT_OK(lopts.retval, "test_ls_map_kptr_ref_del retval");
|
|
|
|
free(pbuf);
|
|
exit:
|
|
map_kptr__destroy(skel);
|
|
}
|
|
|
|
static int kern_sync_rcu_tasks_trace(struct rcu_tasks_trace_gp *rcu)
|
|
{
|
|
LIBBPF_OPTS(bpf_test_run_opts, opts);
|
|
int ret;
|
|
|
|
WRITE_ONCE(rcu->bss->done, 0);
|
|
ret = bpf_prog_test_run_opts(bpf_program__fd(rcu->progs.call_rcu_tasks_trace), &opts);
|
|
if (!ASSERT_OK(ret, "call_rcu_tasks_trace"))
|
|
return -EFAULT;
|
|
if (!ASSERT_OK(opts.retval, "call_rcu_tasks_trace retval"))
|
|
return -EFAULT;
|
|
while (!READ_ONCE(rcu->bss->done))
|
|
sched_yield();
|
|
return 0;
|
|
}
|
|
|
|
static void wait_for_map_release(void)
|
|
{
|
|
LIBBPF_OPTS(bpf_test_run_opts, lopts);
|
|
struct map_kptr *skel;
|
|
int ret;
|
|
|
|
skel = map_kptr__open_and_load();
|
|
if (!ASSERT_OK_PTR(skel, "map_kptr__open_and_load"))
|
|
return;
|
|
|
|
do {
|
|
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.count_ref), &lopts);
|
|
ASSERT_OK(ret, "count_ref ret");
|
|
ASSERT_OK(lopts.retval, "count_ref retval");
|
|
} while (skel->bss->num_of_refs != 2);
|
|
|
|
map_kptr__destroy(skel);
|
|
}
|
|
|
|
void serial_test_map_kptr(void)
|
|
{
|
|
struct rcu_tasks_trace_gp *skel;
|
|
|
|
RUN_TESTS(map_kptr_fail);
|
|
|
|
skel = rcu_tasks_trace_gp__open_and_load();
|
|
if (!ASSERT_OK_PTR(skel, "rcu_tasks_trace_gp__open_and_load"))
|
|
return;
|
|
|
|
if (test__start_subtest("success-map")) {
|
|
test_map_kptr_success(true);
|
|
|
|
ASSERT_OK(kern_sync_rcu_tasks_trace(skel), "sync rcu_tasks_trace");
|
|
ASSERT_OK(kern_sync_rcu(), "sync rcu");
|
|
wait_for_map_release();
|
|
|
|
/* Observe refcount dropping to 1 on bpf_map_free_deferred */
|
|
test_map_kptr_success(false);
|
|
|
|
ASSERT_OK(kern_sync_rcu_tasks_trace(skel), "sync rcu_tasks_trace");
|
|
ASSERT_OK(kern_sync_rcu(), "sync rcu");
|
|
wait_for_map_release();
|
|
|
|
/* Observe refcount dropping to 1 on synchronous delete elem */
|
|
test_map_kptr_success(true);
|
|
}
|
|
|
|
rcu_tasks_trace_gp__destroy(skel);
|
|
}
|