Verifier assigns ids to scalar registers/stack slots when they are linked through a mov or stack spill/fill instruction. These ids are later used to progagate newly found bounds to from one register to all registers that share the same id. The verifier also compares the ids of these registers in current state and cached state when making pruning decisions. When an ID becomes singular (i.e., only a single register or stack slot has that ID), it can no longer participate in bounds propagation. During comparisons between current and cached states for pruning decisions, however, such stale IDs can prevent pruning of otherwise equivalent states. Find and clear all singular ids before caching a state in is_state_visited(). struct bpf_idset which is currently unused has been repurposed for this use case. Signed-off-by: Puranjay Mohan --- include/linux/bpf_verifier.h | 7 +++- kernel/bpf/verifier.c | 75 ++++++++++++++++++++++++++++++++++++ 2 files changed, 80 insertions(+), 2 deletions(-) diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 8355b585cd18..746025df82c8 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -697,8 +697,11 @@ struct bpf_idmap { }; struct bpf_idset { - u32 count; - u32 ids[BPF_ID_MAP_SIZE]; + u32 num_ids; + struct { + u32 id; + u32 cnt; + } entries[BPF_ID_MAP_SIZE]; }; /* see verifier.c:compute_scc_callchain() */ diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 17b499956156..9324d96aae92 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -19461,6 +19461,78 @@ static void clean_verifier_state(struct bpf_verifier_env *env, * doesn't meant that the states are DONE. The verifier has to compare * the callsites */ + +/* Find id in idset and increment its count, or add new entry */ +static void idset_cnt_inc(struct bpf_idset *idset, u32 id) +{ + u32 i; + + for (i = 0; i < idset->num_ids; i++) { + if (idset->entries[i].id == id) { + idset->entries[i].cnt++; + return; + } + } + /* New id */ + if (idset->num_ids < BPF_ID_MAP_SIZE) { + idset->entries[idset->num_ids].id = id; + idset->entries[idset->num_ids].cnt = 1; + idset->num_ids++; + } +} + +/* Find id in idset and return its count, or 0 if not found */ +static u32 idset_cnt_get(struct bpf_idset *idset, u32 id) +{ + u32 i; + + for (i = 0; i < idset->num_ids; i++) { + if (idset->entries[i].id == id) + return idset->entries[i].cnt; + } + return 0; +} + +/* + * Clear singular scalar ids in a state. + * A register with a non-zero id is called singular if no other register shares + * the same base id. Such registers can be treated as independent (id=0). + * + * Uses two passes to achieve O(N) complexity: + * 1. First pass: count occurrences of each unique id + * 2. Second pass: clear ids with count == 1 (singular) + */ +static void clear_singular_ids(struct bpf_verifier_env *env, + struct bpf_verifier_state *st) +{ + struct bpf_idset *idset = &env->idset_scratch; + struct bpf_func_state *func; + struct bpf_reg_state *reg; + + idset->num_ids = 0; + + /* First pass: count occurrences of each unique id */ + bpf_for_each_reg_in_vstate(st, func, reg, ({ + if (reg->type != SCALAR_VALUE) + continue; + if (!reg->id) + continue; + idset_cnt_inc(idset, reg->id & ~BPF_ADD_CONST); + })); + + /* Second pass: clear singular ids (count == 1) */ + bpf_for_each_reg_in_vstate(st, func, reg, ({ + if (reg->type != SCALAR_VALUE) + continue; + if (!reg->id) + continue; + if (idset_cnt_get(idset, reg->id & ~BPF_ADD_CONST) == 1) { + reg->id = 0; + reg->off = 0; + } + })); +} + static void clean_live_states(struct bpf_verifier_env *env, int insn, struct bpf_verifier_state *cur) { @@ -20459,6 +20531,9 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) if (env->bpf_capable) mark_all_scalars_imprecise(env, cur); + /* find singular ids and clear them */ + clear_singular_ids(env, cur); + /* add new state to the head of linked list */ new = &new_sl->state; err = copy_verifier_state(new, cur); -- 2.47.3