Merge tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Pull bpf fixes from Alexei Starovoitov:

 - Fix register equivalence for pointers to packet (Alexei Starovoitov)

 - Fix incorrect pruning due to atomic fetch precision tracking (Daniel
   Borkmann)

 - Fix grace period wait for bpf_link-ed tracepoints (Kumar Kartikeya
   Dwivedi)

 - Fix use-after-free of sockmap's sk->sk_socket (Kuniyuki Iwashima)

 - Reject direct access to nullable PTR_TO_BUF pointers (Qi Tang)

 - Reject sleepable kprobe_multi programs at attach time (Varun R
   Mallya)

* tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf:
  selftests/bpf: Add more precision tracking tests for atomics
  bpf: Fix incorrect pruning due to atomic fetch precision tracking
  bpf: Reject sleepable kprobe_multi programs at attach time
  bpf: reject direct access to nullable PTR_TO_BUF pointers
  bpf: sockmap: Fix use-after-free of sk->sk_socket in sk_psock_verdict_data_ready().
  bpf: Fix grace period wait for tracepoint bpf_link
  bpf: Fix regsafe() for pointers to packet
This commit is contained in:
Linus Torvalds
2026-04-02 18:59:56 -07:00
7 changed files with 432 additions and 12 deletions

View File

@@ -1854,6 +1854,10 @@ struct bpf_link_ops {
* target hook is sleepable, we'll go through tasks trace RCU GP and
* then "classic" RCU GP; this need for chaining tasks trace and
* classic RCU GPs is designated by setting bpf_link->sleepable flag
*
* For non-sleepable tracepoint links we go through SRCU gp instead,
* since RCU is not used in that case. Sleepable tracepoints still
* follow the scheme above.
*/
void (*dealloc_deferred)(struct bpf_link *link);
int (*detach)(struct bpf_link *link);

View File

@@ -122,6 +122,22 @@ static inline bool tracepoint_is_faultable(struct tracepoint *tp)
{
return tp->ext && tp->ext->faultable;
}
/*
* Run RCU callback with the appropriate grace period wait for non-faultable
* tracepoints, e.g., those used in atomic context.
*/
static inline void call_tracepoint_unregister_atomic(struct rcu_head *rcu, rcu_callback_t func)
{
call_srcu(&tracepoint_srcu, rcu, func);
}
/*
* Run RCU callback with the appropriate grace period wait for faultable
* tracepoints, e.g., those used in syscall context.
*/
static inline void call_tracepoint_unregister_syscall(struct rcu_head *rcu, rcu_callback_t func)
{
call_rcu_tasks_trace(rcu, func);
}
#else
static inline void tracepoint_synchronize_unregister(void)
{ }
@@ -129,6 +145,10 @@ static inline bool tracepoint_is_faultable(struct tracepoint *tp)
{
return false;
}
static inline void call_tracepoint_unregister_atomic(struct rcu_head *rcu, rcu_callback_t func)
{ }
static inline void call_tracepoint_unregister_syscall(struct rcu_head *rcu, rcu_callback_t func)
{ }
#endif
#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS

View File

@@ -3261,6 +3261,18 @@ static void bpf_link_defer_dealloc_rcu_gp(struct rcu_head *rcu)
bpf_link_dealloc(link);
}
static bool bpf_link_is_tracepoint(struct bpf_link *link)
{
/*
* Only these combinations support a tracepoint bpf_link.
* BPF_LINK_TYPE_TRACING raw_tp progs are hardcoded to use
* bpf_raw_tp_link_lops and thus dealloc_deferred(), see
* bpf_raw_tp_link_attach().
*/
return link->type == BPF_LINK_TYPE_RAW_TRACEPOINT ||
(link->type == BPF_LINK_TYPE_TRACING && link->attach_type == BPF_TRACE_RAW_TP);
}
static void bpf_link_defer_dealloc_mult_rcu_gp(struct rcu_head *rcu)
{
if (rcu_trace_implies_rcu_gp())
@@ -3279,16 +3291,25 @@ static void bpf_link_free(struct bpf_link *link)
if (link->prog)
ops->release(link);
if (ops->dealloc_deferred) {
/* Schedule BPF link deallocation, which will only then
/*
* Schedule BPF link deallocation, which will only then
* trigger putting BPF program refcount.
* If underlying BPF program is sleepable or BPF link's target
* attach hookpoint is sleepable or otherwise requires RCU GPs
* to ensure link and its underlying BPF program is not
* reachable anymore, we need to first wait for RCU tasks
* trace sync, and then go through "classic" RCU grace period
* trace sync, and then go through "classic" RCU grace period.
*
* For tracepoint BPF links, we need to go through SRCU grace
* period wait instead when non-faultable tracepoint is used. We
* don't need to chain SRCU grace period waits, however, for the
* faultable case, since it exclusively uses RCU Tasks Trace.
*/
if (link->sleepable || (link->prog && link->prog->sleepable))
call_rcu_tasks_trace(&link->rcu, bpf_link_defer_dealloc_mult_rcu_gp);
/* We need to do a SRCU grace period wait for non-faultable tracepoint BPF links. */
else if (bpf_link_is_tracepoint(link))
call_tracepoint_unregister_atomic(&link->rcu, bpf_link_defer_dealloc_rcu_gp);
else
call_rcu(&link->rcu, bpf_link_defer_dealloc_rcu_gp);
} else if (ops->dealloc) {

View File

@@ -617,6 +617,13 @@ static bool is_atomic_load_insn(const struct bpf_insn *insn)
insn->imm == BPF_LOAD_ACQ;
}
static bool is_atomic_fetch_insn(const struct bpf_insn *insn)
{
return BPF_CLASS(insn->code) == BPF_STX &&
BPF_MODE(insn->code) == BPF_ATOMIC &&
(insn->imm & BPF_FETCH);
}
static int __get_spi(s32 off)
{
return (-off - 1) / BPF_REG_SIZE;
@@ -4447,10 +4454,24 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
* dreg still needs precision before this insn
*/
}
} else if (class == BPF_LDX || is_atomic_load_insn(insn)) {
if (!bt_is_reg_set(bt, dreg))
} else if (class == BPF_LDX ||
is_atomic_load_insn(insn) ||
is_atomic_fetch_insn(insn)) {
u32 load_reg = dreg;
/*
* Atomic fetch operation writes the old value into
* a register (sreg or r0) and if it was tracked for
* precision, propagate to the stack slot like we do
* in regular ldx.
*/
if (is_atomic_fetch_insn(insn))
load_reg = insn->imm == BPF_CMPXCHG ?
BPF_REG_0 : sreg;
if (!bt_is_reg_set(bt, load_reg))
return 0;
bt_clear_reg(bt, dreg);
bt_clear_reg(bt, load_reg);
/* scalars can only be spilled into stack w/o losing precision.
* Load from any other memory can be zero extended.
@@ -7905,7 +7926,8 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
} else if (reg->type == CONST_PTR_TO_MAP) {
err = check_ptr_to_map_access(env, regs, regno, off, size, t,
value_regno);
} else if (base_type(reg->type) == PTR_TO_BUF) {
} else if (base_type(reg->type) == PTR_TO_BUF &&
!type_may_be_null(reg->type)) {
bool rdonly_mem = type_is_rdonly_mem(reg->type);
u32 *max_access;
@@ -19915,8 +19937,13 @@ static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
* since someone could have accessed through (ptr - k), or
* even done ptr -= k in a register, to get a safe access.
*/
if (rold->range > rcur->range)
if (rold->range < 0 || rcur->range < 0) {
/* special case for [BEYOND|AT]_PKT_END */
if (rold->range != rcur->range)
return false;
} else if (rold->range > rcur->range) {
return false;
}
/* If the offsets don't match, we can't trust our alignment;
* nor can we be sure that we won't fall out of range.
*/

View File

@@ -2752,6 +2752,10 @@ int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
if (!is_kprobe_multi(prog))
return -EINVAL;
/* kprobe_multi is not allowed to be sleepable. */
if (prog->sleepable)
return -EINVAL;
/* Writing to context is not allowed for kprobes. */
if (prog->aux->kprobe_write_ctx)
return -EINVAL;

View File

@@ -1267,17 +1267,20 @@ out:
static void sk_psock_verdict_data_ready(struct sock *sk)
{
struct socket *sock = sk->sk_socket;
const struct proto_ops *ops;
const struct proto_ops *ops = NULL;
struct socket *sock;
int copied;
trace_sk_data_ready(sk);
if (unlikely(!sock))
return;
ops = READ_ONCE(sock->ops);
rcu_read_lock();
sock = READ_ONCE(sk->sk_socket);
if (likely(sock))
ops = READ_ONCE(sock->ops);
rcu_read_unlock();
if (!ops || !ops->read_skb)
return;
copied = ops->read_skb(sk, sk_psock_verdict_recv);
if (copied >= 0) {
struct sk_psock *psock;

View File

@@ -5,6 +5,13 @@
#include "../../../include/linux/filter.h"
#include "bpf_misc.h"
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u64);
} precision_map SEC(".maps");
SEC("?raw_tp")
__success __log_level(2)
__msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10")
@@ -301,4 +308,338 @@ __naked int bpf_neg_5(void)
::: __clobber_all);
}
SEC("?raw_tp")
__success __log_level(2)
__msg("mark_precise: frame0: regs=r2 stack= before 4: (bf) r3 = r10")
__msg("mark_precise: frame0: regs=r2 stack= before 3: (db) r2 = atomic64_fetch_add((u64 *)(r10 -8), r2)")
__msg("mark_precise: frame0: regs= stack=-8 before 2: (b7) r2 = 0")
__msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1")
__msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8")
__naked int bpf_atomic_fetch_add_precision(void)
{
asm volatile (
"r1 = 8;"
"*(u64 *)(r10 - 8) = r1;"
"r2 = 0;"
".8byte %[fetch_add_insn];" /* r2 = atomic_fetch_add(*(u64 *)(r10 - 8), r2) */
"r3 = r10;"
"r3 += r2;" /* mark_precise */
"r0 = 0;"
"exit;"
:
: __imm_insn(fetch_add_insn,
BPF_ATOMIC_OP(BPF_DW, BPF_ADD | BPF_FETCH, BPF_REG_10, BPF_REG_2, -8))
: __clobber_all);
}
SEC("?raw_tp")
__success __log_level(2)
__msg("mark_precise: frame0: regs=r2 stack= before 4: (bf) r3 = r10")
__msg("mark_precise: frame0: regs=r2 stack= before 3: (db) r2 = atomic64_xchg((u64 *)(r10 -8), r2)")
__msg("mark_precise: frame0: regs= stack=-8 before 2: (b7) r2 = 0")
__msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1")
__msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8")
__naked int bpf_atomic_xchg_precision(void)
{
asm volatile (
"r1 = 8;"
"*(u64 *)(r10 - 8) = r1;"
"r2 = 0;"
".8byte %[xchg_insn];" /* r2 = atomic_xchg(*(u64 *)(r10 - 8), r2) */
"r3 = r10;"
"r3 += r2;" /* mark_precise */
"r0 = 0;"
"exit;"
:
: __imm_insn(xchg_insn,
BPF_ATOMIC_OP(BPF_DW, BPF_XCHG, BPF_REG_10, BPF_REG_2, -8))
: __clobber_all);
}
SEC("?raw_tp")
__success __log_level(2)
__msg("mark_precise: frame0: regs=r2 stack= before 4: (bf) r3 = r10")
__msg("mark_precise: frame0: regs=r2 stack= before 3: (db) r2 = atomic64_fetch_or((u64 *)(r10 -8), r2)")
__msg("mark_precise: frame0: regs= stack=-8 before 2: (b7) r2 = 0")
__msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1")
__msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8")
__naked int bpf_atomic_fetch_or_precision(void)
{
asm volatile (
"r1 = 8;"
"*(u64 *)(r10 - 8) = r1;"
"r2 = 0;"
".8byte %[fetch_or_insn];" /* r2 = atomic_fetch_or(*(u64 *)(r10 - 8), r2) */
"r3 = r10;"
"r3 += r2;" /* mark_precise */
"r0 = 0;"
"exit;"
:
: __imm_insn(fetch_or_insn,
BPF_ATOMIC_OP(BPF_DW, BPF_OR | BPF_FETCH, BPF_REG_10, BPF_REG_2, -8))
: __clobber_all);
}
SEC("?raw_tp")
__success __log_level(2)
__msg("mark_precise: frame0: regs=r2 stack= before 4: (bf) r3 = r10")
__msg("mark_precise: frame0: regs=r2 stack= before 3: (db) r2 = atomic64_fetch_and((u64 *)(r10 -8), r2)")
__msg("mark_precise: frame0: regs= stack=-8 before 2: (b7) r2 = 0")
__msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1")
__msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8")
__naked int bpf_atomic_fetch_and_precision(void)
{
asm volatile (
"r1 = 8;"
"*(u64 *)(r10 - 8) = r1;"
"r2 = 0;"
".8byte %[fetch_and_insn];" /* r2 = atomic_fetch_and(*(u64 *)(r10 - 8), r2) */
"r3 = r10;"
"r3 += r2;" /* mark_precise */
"r0 = 0;"
"exit;"
:
: __imm_insn(fetch_and_insn,
BPF_ATOMIC_OP(BPF_DW, BPF_AND | BPF_FETCH, BPF_REG_10, BPF_REG_2, -8))
: __clobber_all);
}
SEC("?raw_tp")
__success __log_level(2)
__msg("mark_precise: frame0: regs=r2 stack= before 4: (bf) r3 = r10")
__msg("mark_precise: frame0: regs=r2 stack= before 3: (db) r2 = atomic64_fetch_xor((u64 *)(r10 -8), r2)")
__msg("mark_precise: frame0: regs= stack=-8 before 2: (b7) r2 = 0")
__msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1")
__msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8")
__naked int bpf_atomic_fetch_xor_precision(void)
{
asm volatile (
"r1 = 8;"
"*(u64 *)(r10 - 8) = r1;"
"r2 = 0;"
".8byte %[fetch_xor_insn];" /* r2 = atomic_fetch_xor(*(u64 *)(r10 - 8), r2) */
"r3 = r10;"
"r3 += r2;" /* mark_precise */
"r0 = 0;"
"exit;"
:
: __imm_insn(fetch_xor_insn,
BPF_ATOMIC_OP(BPF_DW, BPF_XOR | BPF_FETCH, BPF_REG_10, BPF_REG_2, -8))
: __clobber_all);
}
SEC("?raw_tp")
__success __log_level(2)
__msg("mark_precise: frame0: regs=r0 stack= before 5: (bf) r3 = r10")
__msg("mark_precise: frame0: regs=r0 stack= before 4: (db) r0 = atomic64_cmpxchg((u64 *)(r10 -8), r0, r2)")
__msg("mark_precise: frame0: regs= stack=-8 before 3: (b7) r2 = 0")
__msg("mark_precise: frame0: regs= stack=-8 before 2: (b7) r0 = 0")
__msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1")
__msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8")
__naked int bpf_atomic_cmpxchg_precision(void)
{
asm volatile (
"r1 = 8;"
"*(u64 *)(r10 - 8) = r1;"
"r0 = 0;"
"r2 = 0;"
".8byte %[cmpxchg_insn];" /* r0 = atomic_cmpxchg(*(u64 *)(r10 - 8), r0, r2) */
"r3 = r10;"
"r3 += r0;" /* mark_precise */
"r0 = 0;"
"exit;"
:
: __imm_insn(cmpxchg_insn,
BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_2, -8))
: __clobber_all);
}
/* Regression test for dual precision: Both the fetched value (r2) and
* a reread of the same stack slot (r3) are tracked for precision. After
* the atomic operation, the stack slot is STACK_MISC. Thus, the ldx at
* insn 4 does NOT set INSN_F_STACK_ACCESS. Precision for the stack slot
* propagates solely through the atomic fetch's load side (insn 3).
*/
SEC("?raw_tp")
__success __log_level(2)
__msg("mark_precise: frame0: regs=r2,r3 stack= before 4: (79) r3 = *(u64 *)(r10 -8)")
__msg("mark_precise: frame0: regs=r2 stack= before 3: (db) r2 = atomic64_fetch_add((u64 *)(r10 -8), r2)")
__msg("mark_precise: frame0: regs= stack=-8 before 2: (b7) r2 = 0")
__msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1")
__msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8")
__naked int bpf_atomic_fetch_add_dual_precision(void)
{
asm volatile (
"r1 = 8;"
"*(u64 *)(r10 - 8) = r1;"
"r2 = 0;"
".8byte %[fetch_add_insn];" /* r2 = atomic_fetch_add(*(u64 *)(r10 - 8), r2) */
"r3 = *(u64 *)(r10 - 8);"
"r4 = r2;"
"r4 += r3;"
"r4 &= 7;"
"r5 = r10;"
"r5 += r4;" /* mark_precise */
"r0 = 0;"
"exit;"
:
: __imm_insn(fetch_add_insn,
BPF_ATOMIC_OP(BPF_DW, BPF_ADD | BPF_FETCH, BPF_REG_10, BPF_REG_2, -8))
: __clobber_all);
}
SEC("?raw_tp")
__success __log_level(2)
__msg("mark_precise: frame0: regs=r0,r3 stack= before 5: (79) r3 = *(u64 *)(r10 -8)")
__msg("mark_precise: frame0: regs=r0 stack= before 4: (db) r0 = atomic64_cmpxchg((u64 *)(r10 -8), r0, r2)")
__msg("mark_precise: frame0: regs= stack=-8 before 3: (b7) r2 = 0")
__msg("mark_precise: frame0: regs= stack=-8 before 2: (b7) r0 = 8")
__msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1")
__msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8")
__naked int bpf_atomic_cmpxchg_dual_precision(void)
{
asm volatile (
"r1 = 8;"
"*(u64 *)(r10 - 8) = r1;"
"r0 = 8;"
"r2 = 0;"
".8byte %[cmpxchg_insn];" /* r0 = atomic_cmpxchg(*(u64 *)(r10 - 8), r0, r2) */
"r3 = *(u64 *)(r10 - 8);"
"r4 = r0;"
"r4 += r3;"
"r4 &= 7;"
"r5 = r10;"
"r5 += r4;" /* mark_precise */
"r0 = 0;"
"exit;"
:
: __imm_insn(cmpxchg_insn,
BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_2, -8))
: __clobber_all);
}
SEC("?raw_tp")
__success __log_level(2)
__msg("mark_precise: frame0: regs=r1 stack= before 10: (57) r1 &= 7")
__msg("mark_precise: frame0: regs=r1 stack= before 9: (db) r1 = atomic64_fetch_add((u64 *)(r0 +0), r1)")
__not_msg("falling back to forcing all scalars precise")
__naked int bpf_atomic_fetch_add_map_precision(void)
{
asm volatile (
"r1 = 0;"
"*(u64 *)(r10 - 8) = r1;"
"r2 = r10;"
"r2 += -8;"
"r1 = %[precision_map] ll;"
"call %[bpf_map_lookup_elem];"
"if r0 == 0 goto 1f;"
"r1 = 0;"
".8byte %[fetch_add_insn];" /* r1 = atomic_fetch_add(*(u64 *)(r0 + 0), r1) */
"r1 &= 7;"
"r2 = r10;"
"r2 += r1;" /* mark_precise */
"1: r0 = 0;"
"exit;"
:
: __imm_addr(precision_map),
__imm(bpf_map_lookup_elem),
__imm_insn(fetch_add_insn,
BPF_ATOMIC_OP(BPF_DW, BPF_ADD | BPF_FETCH, BPF_REG_0, BPF_REG_1, 0))
: __clobber_all);
}
SEC("?raw_tp")
__success __log_level(2)
__msg("mark_precise: frame0: regs=r0 stack= before 12: (57) r0 &= 7")
__msg("mark_precise: frame0: regs=r0 stack= before 11: (db) r0 = atomic64_cmpxchg((u64 *)(r6 +0), r0, r1)")
__not_msg("falling back to forcing all scalars precise")
__naked int bpf_atomic_cmpxchg_map_precision(void)
{
asm volatile (
"r1 = 0;"
"*(u64 *)(r10 - 8) = r1;"
"r2 = r10;"
"r2 += -8;"
"r1 = %[precision_map] ll;"
"call %[bpf_map_lookup_elem];"
"if r0 == 0 goto 1f;"
"r6 = r0;"
"r0 = 0;"
"r1 = 0;"
".8byte %[cmpxchg_insn];" /* r0 = atomic_cmpxchg(*(u64 *)(r6 + 0), r0, r1) */
"r0 &= 7;"
"r2 = r10;"
"r2 += r0;" /* mark_precise */
"1: r0 = 0;"
"exit;"
:
: __imm_addr(precision_map),
__imm(bpf_map_lookup_elem),
__imm_insn(cmpxchg_insn,
BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_6, BPF_REG_1, 0))
: __clobber_all);
}
SEC("?raw_tp")
__success __log_level(2)
__msg("mark_precise: frame0: regs=r1 stack= before 10: (57) r1 &= 7")
__msg("mark_precise: frame0: regs=r1 stack= before 9: (c3) r1 = atomic_fetch_add((u32 *)(r0 +0), r1)")
__not_msg("falling back to forcing all scalars precise")
__naked int bpf_atomic_fetch_add_32bit_precision(void)
{
asm volatile (
"r1 = 0;"
"*(u64 *)(r10 - 8) = r1;"
"r2 = r10;"
"r2 += -8;"
"r1 = %[precision_map] ll;"
"call %[bpf_map_lookup_elem];"
"if r0 == 0 goto 1f;"
"r1 = 0;"
".8byte %[fetch_add_insn];" /* r1 = atomic_fetch_add(*(u32 *)(r0 + 0), r1) */
"r1 &= 7;"
"r2 = r10;"
"r2 += r1;" /* mark_precise */
"1: r0 = 0;"
"exit;"
:
: __imm_addr(precision_map),
__imm(bpf_map_lookup_elem),
__imm_insn(fetch_add_insn,
BPF_ATOMIC_OP(BPF_W, BPF_ADD | BPF_FETCH, BPF_REG_0, BPF_REG_1, 0))
: __clobber_all);
}
SEC("?raw_tp")
__success __log_level(2)
__msg("mark_precise: frame0: regs=r0 stack= before 12: (57) r0 &= 7")
__msg("mark_precise: frame0: regs=r0 stack= before 11: (c3) r0 = atomic_cmpxchg((u32 *)(r6 +0), r0, r1)")
__not_msg("falling back to forcing all scalars precise")
__naked int bpf_atomic_cmpxchg_32bit_precision(void)
{
asm volatile (
"r1 = 0;"
"*(u64 *)(r10 - 8) = r1;"
"r2 = r10;"
"r2 += -8;"
"r1 = %[precision_map] ll;"
"call %[bpf_map_lookup_elem];"
"if r0 == 0 goto 1f;"
"r6 = r0;"
"r0 = 0;"
"r1 = 0;"
".8byte %[cmpxchg_insn];" /* r0 = atomic_cmpxchg(*(u32 *)(r6 + 0), r0, r1) */
"r0 &= 7;"
"r2 = r10;"
"r2 += r0;" /* mark_precise */
"1: r0 = 0;"
"exit;"
:
: __imm_addr(precision_map),
__imm(bpf_map_lookup_elem),
__imm_insn(cmpxchg_insn,
BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_6, BPF_REG_1, 0))
: __clobber_all);
}
char _license[] SEC("license") = "GPL";