Files
linux/tools/testing/selftests/bpf/progs/iters_task_failure.c
Kumar Kartikeya Dwivedi 1512231b6c bpf: Enforce RCU protection for KF_RCU_PROTECTED
Currently, KF_RCU_PROTECTED only applies to iterator APIs and that too
in a convoluted fashion: the presence of this flag on the kfunc is used
to set MEM_RCU in iterator type, and the lack of RCU protection results
in an error only later, once next() or destroy() methods are invoked on
the iterator. While there is no bug, this is certainly a bit
unintuitive, and makes the enforcement of the flag iterator specific.

In the interest of making this flag useful for other upcoming kfuncs,
e.g. scx_bpf_cpu_curr() [0][1], add enforcement for invoking the kfunc
in an RCU critical section in general.

This would also mean that iterator APIs using KF_RCU_PROTECTED will
error out earlier, instead of throwing an error for lack of RCU CS
protection when next() or destroy() methods are invoked.

In addition to this, if the kfuncs tagged KF_RCU_PROTECTED return a
pointer value, ensure that this pointer value is only usable in an RCU
critical section. There might be edge cases where the return value is
special and doesn't need to imply MEM_RCU semantics, but in general, the
assumption should hold for the majority of kfuncs, and we can revisit
things if necessary later.

  [0]: https://lore.kernel.org/all/20250903212311.369697-3-christian.loehle@arm.com
  [1]: https://lore.kernel.org/all/20250909195709.92669-1-arighi@nvidia.com

Tested-by: Andrea Righi <arighi@nvidia.com>
Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/r/20250917032755.4068726-2-memxor@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2025-09-18 15:36:17 -07:00

106 lines
2.5 KiB
C

// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2023 Chuyi Zhou <zhouchuyi@bytedance.com> */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include "bpf_misc.h"
#include "bpf_experimental.h"
char _license[] SEC("license") = "GPL";
struct cgroup *bpf_cgroup_from_id(u64 cgid) __ksym;
void bpf_cgroup_release(struct cgroup *p) __ksym;
void bpf_rcu_read_lock(void) __ksym;
void bpf_rcu_read_unlock(void) __ksym;
SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
__failure __msg("kernel func bpf_iter_task_new requires RCU critical section protection")
int BPF_PROG(iter_tasks_without_lock)
{
struct task_struct *pos;
bpf_for_each(task, pos, NULL, BPF_TASK_ITER_ALL_PROCS) {
}
return 0;
}
SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
__failure __msg("kernel func bpf_iter_css_new requires RCU critical section protection")
int BPF_PROG(iter_css_without_lock)
{
u64 cg_id = bpf_get_current_cgroup_id();
struct cgroup *cgrp = bpf_cgroup_from_id(cg_id);
struct cgroup_subsys_state *root_css, *pos;
if (!cgrp)
return 0;
root_css = &cgrp->self;
bpf_for_each(css, pos, root_css, BPF_CGROUP_ITER_DESCENDANTS_POST) {
}
bpf_cgroup_release(cgrp);
return 0;
}
SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
__failure __msg("expected an RCU CS when using bpf_iter_task_next")
int BPF_PROG(iter_tasks_lock_and_unlock)
{
struct task_struct *pos;
bpf_rcu_read_lock();
bpf_for_each(task, pos, NULL, BPF_TASK_ITER_ALL_PROCS) {
bpf_rcu_read_unlock();
bpf_rcu_read_lock();
}
bpf_rcu_read_unlock();
return 0;
}
SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
__failure __msg("expected an RCU CS when using bpf_iter_css_next")
int BPF_PROG(iter_css_lock_and_unlock)
{
u64 cg_id = bpf_get_current_cgroup_id();
struct cgroup *cgrp = bpf_cgroup_from_id(cg_id);
struct cgroup_subsys_state *root_css, *pos;
if (!cgrp)
return 0;
root_css = &cgrp->self;
bpf_rcu_read_lock();
bpf_for_each(css, pos, root_css, BPF_CGROUP_ITER_DESCENDANTS_POST) {
bpf_rcu_read_unlock();
bpf_rcu_read_lock();
}
bpf_rcu_read_unlock();
bpf_cgroup_release(cgrp);
return 0;
}
SEC("?fentry/" SYS_PREFIX "sys_getpgid")
__failure __msg("css_task_iter is only allowed in bpf_lsm, bpf_iter and sleepable progs")
int BPF_PROG(iter_css_task_for_each)
{
u64 cg_id = bpf_get_current_cgroup_id();
struct cgroup *cgrp = bpf_cgroup_from_id(cg_id);
struct cgroup_subsys_state *css;
struct task_struct *task;
if (cgrp == NULL)
return 0;
css = &cgrp->self;
bpf_for_each(css_task, task, css, CSS_TASK_ITER_PROCS) {
}
bpf_cgroup_release(cgrp);
return 0;
}