mirror of
https://github.com/torvalds/linux.git
synced 2026-04-18 06:44:00 -04:00
bpf: simplify liveness to use (callsite, depth) keyed func_instances
Rework func_instance identification and remove the dynamic liveness
API, completing the transition to fully static stack liveness analysis.
Replace callchain-based func_instance keys with (callsite, depth)
pairs. The full callchain (all ancestor callsites) is no longer part
of the hash key; only the immediate callsite and the call depth
matter. This does not lose precision in practice and simplifies the
data structure significantly: struct callchain is removed entirely,
func_instance stores just callsite, depth.
Drop must_write_acc propagation. Previously, must_write marks were
accumulated across successors and propagated to the caller via
propagate_to_outer_instance(). Instead, callee entry liveness
(live_before at subprog start) is pulled directly back to the
caller's callsite in analyze_subprog() after each callee returns.
Since (callsite, depth) instances are shared across different call
chains that invoke the same subprog at the same depth, must_write
marks from one call may be stale for another. To handle this,
analyze_subprog() records into a fresh_instance() when the instance
was already visited (must_write_initialized), then merge_instances()
combines the results: may_read is unioned, must_write is intersected.
This ensures only slots written on ALL paths through all call sites
are marked as guaranteed writes.
This replaces commit_stack_write_marks() logic.
Skip recursive descent into callees that receive no FP-derived
arguments (has_fp_args() check). This is needed because global
subprogram calls can push depth beyond MAX_CALL_FRAMES (max depth
is 64 for global calls but only 8 frames are accommodated for FP
passing). It also handles the case where a callback subprog cannot be
determined by argument tracking: such callbacks will be processed by
analyze_subprog() at depth 0 independently.
Update lookup_instance() (used by is_live_before queries) to search
for the func_instance with maximal depth at the corresponding
callsite, walking depth downward from frameno to 0. This accounts for
the fact that instance depth no longer corresponds 1:1 to
bpf_verifier_state->curframe, since skipped non-FP calls create gaps.
Remove the dynamic public liveness API from verifier.c:
- bpf_mark_stack_{read,write}(), bpf_reset/commit_stack_write_marks()
- bpf_update_live_stack(), bpf_reset_live_stack_callchain()
- All call sites in check_stack_{read,write}_fixed_off(),
check_stack_range_initialized(), mark_stack_slot_obj_read(),
mark/unmark_stack_slots_{dynptr,iter,irq_flag}()
- The per-instruction write mark accumulation in do_check()
- The bpf_update_live_stack() call in prepare_func_exit()
mark_stack_read() and mark_stack_write() become static functions in
liveness.c, called only from the static analysis pass. The
func_instance->updated and must_write_dropped flags are removed.
Remove spis_single_slot(), spis_one_bit() helpers from bpf_verifier.h
as they are no longer used.
Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
Tested-by: Paul Chaignon <paul.chaignon@gmail.com>
Link: https://lore.kernel.org/r/20260410-patch-set-v4-9-5d4eecb343db@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
committed by
Alexei Starovoitov
parent
fed53dbcdb
commit
6762e3a0bc
File diff suppressed because it is too large
Load Diff
@@ -830,9 +830,6 @@ static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_
|
||||
state->stack[spi - 1].spilled_ptr.ref_obj_id = id;
|
||||
}
|
||||
|
||||
bpf_mark_stack_write(env, state->frameno, spis_single_slot(spi));
|
||||
bpf_mark_stack_write(env, state->frameno, spis_single_slot(spi - 1));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -847,9 +844,6 @@ static void invalidate_dynptr(struct bpf_verifier_env *env, struct bpf_func_stat
|
||||
|
||||
__mark_reg_not_init(env, &state->stack[spi].spilled_ptr);
|
||||
__mark_reg_not_init(env, &state->stack[spi - 1].spilled_ptr);
|
||||
|
||||
bpf_mark_stack_write(env, state->frameno, spis_single_slot(spi));
|
||||
bpf_mark_stack_write(env, state->frameno, spis_single_slot(spi - 1));
|
||||
}
|
||||
|
||||
static int unmark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
|
||||
@@ -986,9 +980,6 @@ static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env,
|
||||
__mark_reg_not_init(env, &state->stack[spi].spilled_ptr);
|
||||
__mark_reg_not_init(env, &state->stack[spi - 1].spilled_ptr);
|
||||
|
||||
bpf_mark_stack_write(env, state->frameno, spis_single_slot(spi));
|
||||
bpf_mark_stack_write(env, state->frameno, spis_single_slot(spi - 1));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1114,7 +1105,6 @@ static int mark_stack_slots_iter(struct bpf_verifier_env *env,
|
||||
for (j = 0; j < BPF_REG_SIZE; j++)
|
||||
slot->slot_type[j] = STACK_ITER;
|
||||
|
||||
bpf_mark_stack_write(env, state->frameno, spis_single_slot(spi - i));
|
||||
mark_stack_slot_scratched(env, spi - i);
|
||||
}
|
||||
|
||||
@@ -1143,7 +1133,6 @@ static int unmark_stack_slots_iter(struct bpf_verifier_env *env,
|
||||
for (j = 0; j < BPF_REG_SIZE; j++)
|
||||
slot->slot_type[j] = STACK_INVALID;
|
||||
|
||||
bpf_mark_stack_write(env, state->frameno, spis_single_slot(spi - i));
|
||||
mark_stack_slot_scratched(env, spi - i);
|
||||
}
|
||||
|
||||
@@ -1233,7 +1222,6 @@ static int mark_stack_slot_irq_flag(struct bpf_verifier_env *env,
|
||||
slot = &state->stack[spi];
|
||||
st = &slot->spilled_ptr;
|
||||
|
||||
bpf_mark_stack_write(env, reg->frameno, spis_single_slot(spi));
|
||||
__mark_reg_known_zero(st);
|
||||
st->type = PTR_TO_STACK; /* we don't have dedicated reg type */
|
||||
st->ref_obj_id = id;
|
||||
@@ -1289,8 +1277,6 @@ static int unmark_stack_slot_irq_flag(struct bpf_verifier_env *env, struct bpf_r
|
||||
|
||||
__mark_reg_not_init(env, st);
|
||||
|
||||
bpf_mark_stack_write(env, reg->frameno, spis_single_slot(spi));
|
||||
|
||||
for (i = 0; i < BPF_REG_SIZE; i++)
|
||||
slot->slot_type[i] = STACK_INVALID;
|
||||
|
||||
@@ -3866,15 +3852,10 @@ out:
|
||||
static int mark_stack_slot_obj_read(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
|
||||
int spi, int nr_slots)
|
||||
{
|
||||
int err, i;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nr_slots; i++) {
|
||||
err = bpf_mark_stack_read(env, reg->frameno, env->insn_idx,
|
||||
spis_single_slot(spi - i));
|
||||
if (err)
|
||||
return err;
|
||||
for (i = 0; i < nr_slots; i++)
|
||||
mark_stack_slot_scratched(env, spi - i);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -5425,16 +5406,6 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (!(off % BPF_REG_SIZE) && size == BPF_REG_SIZE)
|
||||
/* 8-byte aligned, 8-byte write */
|
||||
bpf_mark_stack_write(env, state->frameno, spis_single_slot(spi));
|
||||
else if (!(off % BPF_REG_SIZE) && size == BPF_HALF_REG_SIZE)
|
||||
/* 8-byte aligned, 4-byte write */
|
||||
bpf_mark_stack_write(env, state->frameno, spis_one_bit(spi * 2 + 1));
|
||||
else if (!(off % BPF_HALF_REG_SIZE) && size == BPF_HALF_REG_SIZE)
|
||||
/* 4-byte aligned, 4-byte write */
|
||||
bpf_mark_stack_write(env, state->frameno, spis_one_bit(spi * 2));
|
||||
|
||||
check_fastcall_stack_contract(env, state, insn_idx, off);
|
||||
mark_stack_slot_scratched(env, spi);
|
||||
if (reg && !(off % BPF_REG_SIZE) && reg->type == SCALAR_VALUE && env->bpf_capable) {
|
||||
@@ -5691,26 +5662,12 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
|
||||
struct bpf_reg_state *reg;
|
||||
u8 *stype, type;
|
||||
int insn_flags = insn_stack_access_flags(reg_state->frameno, spi);
|
||||
spis_t mask;
|
||||
int err;
|
||||
|
||||
stype = reg_state->stack[spi].slot_type;
|
||||
reg = ®_state->stack[spi].spilled_ptr;
|
||||
|
||||
mark_stack_slot_scratched(env, spi);
|
||||
check_fastcall_stack_contract(env, state, env->insn_idx, off);
|
||||
if (!(off % BPF_REG_SIZE) && size == BPF_HALF_REG_SIZE)
|
||||
/* 8-byte aligned, 4-byte read */
|
||||
mask = spis_one_bit(spi * 2 + 1);
|
||||
else if (!(off % BPF_HALF_REG_SIZE) && size == BPF_HALF_REG_SIZE)
|
||||
/* 4-byte aligned, 4-byte read */
|
||||
mask = spis_one_bit(spi * 2);
|
||||
else
|
||||
mask = spis_single_slot(spi);
|
||||
|
||||
err = bpf_mark_stack_read(env, reg_state->frameno, env->insn_idx, mask);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (is_spilled_reg(®_state->stack[spi])) {
|
||||
u8 spill_size = 1;
|
||||
@@ -8540,18 +8497,7 @@ static int check_stack_range_initialized(
|
||||
}
|
||||
return -EACCES;
|
||||
mark:
|
||||
/* reading any byte out of 8-byte 'spill_slot' will cause
|
||||
* the whole slot to be marked as 'read'
|
||||
*/
|
||||
err = bpf_mark_stack_read(env, reg->frameno, env->insn_idx,
|
||||
spis_single_slot(spi));
|
||||
if (err)
|
||||
return err;
|
||||
/* We do not call bpf_mark_stack_write(), as we can not
|
||||
* be sure that whether stack slot is written to or not. Hence,
|
||||
* we must still conservatively propagate reads upwards even if
|
||||
* helper may write to the entire memory range.
|
||||
*/
|
||||
;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@@ -11171,8 +11117,6 @@ static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
|
||||
/* and go analyze first insn of the callee */
|
||||
*insn_idx = env->subprog_info[subprog].start - 1;
|
||||
|
||||
bpf_reset_live_stack_callchain(env);
|
||||
|
||||
if (env->log.level & BPF_LOG_LEVEL) {
|
||||
verbose(env, "caller:\n");
|
||||
print_verifier_state(env, state, caller->frameno, true);
|
||||
@@ -11457,10 +11401,6 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
|
||||
bool in_callback_fn;
|
||||
int err;
|
||||
|
||||
err = bpf_update_live_stack(env);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
callee = state->frame[state->curframe];
|
||||
r0 = &callee->regs[BPF_REG_0];
|
||||
if (r0->type == PTR_TO_STACK) {
|
||||
@@ -21799,7 +21739,7 @@ static int do_check(struct bpf_verifier_env *env)
|
||||
for (;;) {
|
||||
struct bpf_insn *insn;
|
||||
struct bpf_insn_aux_data *insn_aux;
|
||||
int err, marks_err;
|
||||
int err;
|
||||
|
||||
/* reset current history entry on each new instruction */
|
||||
env->cur_hist_ent = NULL;
|
||||
@@ -21913,15 +21853,7 @@ static int do_check(struct bpf_verifier_env *env)
|
||||
if (state->speculative && insn_aux->nospec)
|
||||
goto process_bpf_exit;
|
||||
|
||||
err = bpf_reset_stack_write_marks(env, env->insn_idx);
|
||||
if (err)
|
||||
return err;
|
||||
err = do_check_insn(env, &do_print_state);
|
||||
if (err >= 0 || error_recoverable_with_nospec(err)) {
|
||||
marks_err = bpf_commit_stack_write_marks(env);
|
||||
if (marks_err)
|
||||
return marks_err;
|
||||
}
|
||||
if (error_recoverable_with_nospec(err) && state->speculative) {
|
||||
/* Prevent this speculative path from ever reaching the
|
||||
* insn that would have been unsafe to execute.
|
||||
@@ -21962,9 +21894,6 @@ static int do_check(struct bpf_verifier_env *env)
|
||||
process_bpf_exit:
|
||||
mark_verifier_state_scratched(env);
|
||||
err = update_branch_counts(env, env->cur_state);
|
||||
if (err)
|
||||
return err;
|
||||
err = bpf_update_live_stack(env);
|
||||
if (err)
|
||||
return err;
|
||||
err = pop_stack(env, &prev_insn_idx, &env->insn_idx,
|
||||
|
||||
Reference in New Issue
Block a user