#include <linux/bpf.h>
#include <linux/bpf_verifier.h>
#include <linux/filter.h>
#define verbose(env, fmt, args...) bpf_verifier_log_write(env, fmt, ##args)
#define BPF_COMPLEXITY_LIMIT_STATES 64
static bool is_may_goto_insn_at(struct bpf_verifier_env *env, int insn_idx)
{
return bpf_is_may_goto_insn(&env->prog->insnsi[insn_idx]);
}
static bool is_iter_next_insn(struct bpf_verifier_env *env, int insn_idx)
{
return env->insn_aux_data[insn_idx].is_iter_next;
}
static void update_peak_states(struct bpf_verifier_env *env)
{
u32 cur_states;
cur_states = env->explored_states_size + env->free_list_size + env->num_backedges;
env->peak_states = max(env->peak_states, cur_states);
}
static struct bpf_verifier_state_list *state_parent_as_list(struct bpf_verifier_state *st)
{
if (st->parent)
return container_of(st->parent, struct bpf_verifier_state_list, state);
return NULL;
}
static bool incomplete_read_marks(struct bpf_verifier_env *env,
struct bpf_verifier_state *st);
static void maybe_free_verifier_state(struct bpf_verifier_env *env,
struct bpf_verifier_state_list *sl)
{
if (!sl->in_free_list
|| sl->state.branches != 0
|| incomplete_read_marks(env, &sl->state))
return;
list_del(&sl->node);
bpf_free_verifier_state(&sl->state, false);
kfree(sl);
env->free_list_size--;
}
static bool compute_scc_callchain(struct bpf_verifier_env *env,
struct bpf_verifier_state *st,
struct bpf_scc_callchain *callchain)
{
u32 i, scc, insn_idx;
memset(callchain, 0, sizeof(*callchain));
for (i = 0; i <= st->curframe; i++) {
insn_idx = bpf_frame_insn_idx(st, i);
scc = env->insn_aux_data[insn_idx].scc;
if (scc) {
callchain->scc = scc;
break;
} else if (i < st->curframe) {
callchain->callsites[i] = insn_idx;
} else {
return false;
}
}
return true;
}
static struct bpf_scc_visit *scc_visit_lookup(struct bpf_verifier_env *env,
struct bpf_scc_callchain *callchain)
{
struct bpf_scc_info *info = env->scc_info[callchain->scc];
struct bpf_scc_visit *visits = info->visits;
u32 i;
if (!info)
return NULL;
for (i = 0; i < info->num_visits; i++)
if (memcmp(callchain, &visits[i].callchain, sizeof(*callchain)) == 0)
return &visits[i];
return NULL;
}
static struct bpf_scc_visit *scc_visit_alloc(struct bpf_verifier_env *env,
struct bpf_scc_callchain *callchain)
{
struct bpf_scc_visit *visit;
struct bpf_scc_info *info;
u32 scc, num_visits;
u64 new_sz;
scc = callchain->scc;
info = env->scc_info[scc];
num_visits = info ? info->num_visits : 0;
new_sz = sizeof(*info) + sizeof(struct bpf_scc_visit) * (num_visits + 1);
info = kvrealloc(env->scc_info[scc], new_sz, GFP_KERNEL_ACCOUNT);
if (!info)
return NULL;
env->scc_info[scc] = info;
info->num_visits = num_visits + 1;
visit = &info->visits[num_visits];
memset(visit, 0, sizeof(*visit));
memcpy(&visit->callchain, callchain, sizeof(*callchain));
return visit;
}
static char *format_callchain(struct bpf_verifier_env *env, struct bpf_scc_callchain *callchain)
{
char *buf = env->tmp_str_buf;
int i, delta = 0;
delta += snprintf(buf + delta, TMP_STR_BUF_LEN - delta, "(");
for (i = 0; i < ARRAY_SIZE(callchain->callsites); i++) {
if (!callchain->callsites[i])
break;
delta += snprintf(buf + delta, TMP_STR_BUF_LEN - delta, "%u,",
callchain->callsites[i]);
}
delta += snprintf(buf + delta, TMP_STR_BUF_LEN - delta, "%u)", callchain->scc);
return env->tmp_str_buf;
}
static int maybe_enter_scc(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
{
struct bpf_scc_callchain *callchain = &env->callchain_buf;
struct bpf_scc_visit *visit;
if (!compute_scc_callchain(env, st, callchain))
return 0;
visit = scc_visit_lookup(env, callchain);
visit = visit ?: scc_visit_alloc(env, callchain);
if (!visit)
return -ENOMEM;
if (!visit->entry_state) {
visit->entry_state = st;
if (env->log.level & BPF_LOG_LEVEL2)
verbose(env, "SCC enter %s\n", format_callchain(env, callchain));
}
return 0;
}
static int propagate_backedges(struct bpf_verifier_env *env, struct bpf_scc_visit *visit);
static int maybe_exit_scc(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
{
struct bpf_scc_callchain *callchain = &env->callchain_buf;
struct bpf_scc_visit *visit;
if (!compute_scc_callchain(env, st, callchain))
return 0;
visit = scc_visit_lookup(env, callchain);
if (!visit) {
if (!st->speculative) {
verifier_bug(env, "scc exit: no visit info for call chain %s",
format_callchain(env, callchain));
return -EFAULT;
}
return 0;
}
if (visit->entry_state != st)
return 0;
if (env->log.level & BPF_LOG_LEVEL2)
verbose(env, "SCC exit %s\n", format_callchain(env, callchain));
visit->entry_state = NULL;
env->num_backedges -= visit->num_backedges;
visit->num_backedges = 0;
update_peak_states(env);
return propagate_backedges(env, visit);
}
static int add_scc_backedge(struct bpf_verifier_env *env,
struct bpf_verifier_state *st,
struct bpf_scc_backedge *backedge)
{
struct bpf_scc_callchain *callchain = &env->callchain_buf;
struct bpf_scc_visit *visit;
if (!compute_scc_callchain(env, st, callchain)) {
verifier_bug(env, "add backedge: no SCC in verification path, insn_idx %d",
st->insn_idx);
return -EFAULT;
}
visit = scc_visit_lookup(env, callchain);
if (!visit) {
verifier_bug(env, "add backedge: no visit info for call chain %s",
format_callchain(env, callchain));
return -EFAULT;
}
if (env->log.level & BPF_LOG_LEVEL2)
verbose(env, "SCC backedge %s\n", format_callchain(env, callchain));
backedge->next = visit->backedges;
visit->backedges = backedge;
visit->num_backedges++;
env->num_backedges++;
update_peak_states(env);
return 0;
}
static bool incomplete_read_marks(struct bpf_verifier_env *env,
struct bpf_verifier_state *st)
{
struct bpf_scc_callchain *callchain = &env->callchain_buf;
struct bpf_scc_visit *visit;
if (!compute_scc_callchain(env, st, callchain))
return false;
visit = scc_visit_lookup(env, callchain);
if (!visit)
return false;
return !!visit->backedges;
}
int bpf_update_branch_counts(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
{
struct bpf_verifier_state_list *sl = NULL, *parent_sl;
struct bpf_verifier_state *parent;
int err;
while (st) {
u32 br = --st->branches;
verifier_bug_if((int)br < 0, env, "%s:branches_to_explore=%d", __func__, br);
if (br)
break;
err = maybe_exit_scc(env, st);
if (err)
return err;
parent = st->parent;
parent_sl = state_parent_as_list(st);
if (sl)
maybe_free_verifier_state(env, sl);
st = parent;
sl = parent_sl;
}
return 0;
}
static bool range_within(const struct bpf_reg_state *old,
const struct bpf_reg_state *cur)
{
return old->umin_value <= cur->umin_value &&
old->umax_value >= cur->umax_value &&
old->smin_value <= cur->smin_value &&
old->smax_value >= cur->smax_value &&
old->u32_min_value <= cur->u32_min_value &&
old->u32_max_value >= cur->u32_max_value &&
old->s32_min_value <= cur->s32_min_value &&
old->s32_max_value >= cur->s32_max_value;
}
static bool check_ids(u32 old_id, u32 cur_id, struct bpf_idmap *idmap)
{
struct bpf_id_pair *map = idmap->map;
unsigned int i;
if (!!old_id != !!cur_id)
return false;
if (old_id == 0)
return true;
for (i = 0; i < idmap->cnt; i++) {
if (map[i].old == old_id)
return map[i].cur == cur_id;
if (map[i].cur == cur_id)
return false;
}
if (idmap->cnt < BPF_ID_MAP_SIZE) {
map[idmap->cnt].old = old_id;
map[idmap->cnt].cur = cur_id;
idmap->cnt++;
return true;
}
WARN_ON_ONCE(1);
return false;
}
static bool check_scalar_ids(u32 old_id, u32 cur_id, struct bpf_idmap *idmap)
{
if (!old_id)
return true;
cur_id = cur_id ? cur_id : ++idmap->tmp_id_gen;
if (!check_ids(old_id, cur_id, idmap))
return false;
if (old_id & BPF_ADD_CONST) {
old_id &= ~BPF_ADD_CONST;
cur_id &= ~BPF_ADD_CONST;
if (!check_ids(old_id, cur_id, idmap))
return false;
}
return true;
}
static void __clean_func_state(struct bpf_verifier_env *env,
struct bpf_func_state *st,
u16 live_regs, int frame)
{
int i, j;
for (i = 0; i < BPF_REG_FP; i++) {
if (!(live_regs & BIT(i)))
bpf_mark_reg_not_init(env, &st->regs[i]);
}
for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) {
bool lo_live = bpf_stack_slot_alive(env, frame, i * 2);
bool hi_live = bpf_stack_slot_alive(env, frame, i * 2 + 1);
if (!hi_live || !lo_live) {
int start = !lo_live ? 0 : BPF_REG_SIZE / 2;
int end = !hi_live ? BPF_REG_SIZE : BPF_REG_SIZE / 2;
u8 stype = st->stack[i].slot_type[7];
if (stype == STACK_DYNPTR || stype == STACK_ITER ||
stype == STACK_IRQ_FLAG)
continue;
if (!hi_live) {
struct bpf_reg_state *spill = &st->stack[i].spilled_ptr;
if (lo_live && stype == STACK_SPILL) {
u8 val = STACK_MISC;
if (bpf_register_is_null(spill))
val = STACK_ZERO;
for (j = 0; j < 4; j++) {
u8 *t = &st->stack[i].slot_type[j];
if (*t == STACK_SPILL)
*t = val;
}
}
bpf_mark_reg_not_init(env, spill);
}
for (j = start; j < end; j++)
st->stack[i].slot_type[j] = STACK_POISON;
}
}
}
static int clean_verifier_state(struct bpf_verifier_env *env,
struct bpf_verifier_state *st)
{
int i, err;
err = bpf_live_stack_query_init(env, st);
if (err)
return err;
for (i = 0; i <= st->curframe; i++) {
u32 ip = bpf_frame_insn_idx(st, i);
u16 live_regs = env->insn_aux_data[ip].live_regs_before;
__clean_func_state(env, st->frame[i], live_regs, i);
}
return 0;
}
static bool regs_exact(const struct bpf_reg_state *rold,
const struct bpf_reg_state *rcur,
struct bpf_idmap *idmap)
{
return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
check_ids(rold->id, rcur->id, idmap) &&
check_ids(rold->ref_obj_id, rcur->ref_obj_id, idmap);
}
enum exact_level {
NOT_EXACT,
EXACT,
RANGE_WITHIN
};
static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
struct bpf_reg_state *rcur, struct bpf_idmap *idmap,
enum exact_level exact)
{
if (exact == EXACT)
return regs_exact(rold, rcur, idmap);
if (rold->type == NOT_INIT)
return true;
if (rold->type != rcur->type)
return false;
switch (base_type(rold->type)) {
case SCALAR_VALUE:
if (env->explore_alu_limits) {
return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
check_scalar_ids(rold->id, rcur->id, idmap);
}
if (!rold->precise && exact == NOT_EXACT)
return true;
if (rold->id &&
(rold->id & BPF_ADD_CONST) != (rcur->id & BPF_ADD_CONST))
return false;
if ((rold->id & BPF_ADD_CONST) && rold->delta != rcur->delta)
return false;
if (!check_scalar_ids(rold->id, rcur->id, idmap))
return false;
return range_within(rold, rcur) && tnum_in(rold->var_off, rcur->var_off);
case PTR_TO_MAP_KEY:
case PTR_TO_MAP_VALUE:
case PTR_TO_MEM:
case PTR_TO_BUF:
case PTR_TO_TP_BUFFER:
return memcmp(rold, rcur, offsetof(struct bpf_reg_state, var_off)) == 0 &&
range_within(rold, rcur) &&
tnum_in(rold->var_off, rcur->var_off) &&
check_ids(rold->id, rcur->id, idmap) &&
check_ids(rold->ref_obj_id, rcur->ref_obj_id, idmap);
case PTR_TO_PACKET_META:
case PTR_TO_PACKET:
if (rold->range < 0 || rcur->range < 0) {
if (rold->range != rcur->range)
return false;
} else if (rold->range > rcur->range) {
return false;
}
if (!check_ids(rold->id, rcur->id, idmap))
return false;
return range_within(rold, rcur) &&
tnum_in(rold->var_off, rcur->var_off);
case PTR_TO_STACK:
return regs_exact(rold, rcur, idmap) && rold->frameno == rcur->frameno;
case PTR_TO_ARENA:
return true;
case PTR_TO_INSN:
return memcmp(rold, rcur, offsetof(struct bpf_reg_state, var_off)) == 0 &&
range_within(rold, rcur) && tnum_in(rold->var_off, rcur->var_off);
default:
return regs_exact(rold, rcur, idmap);
}
}
static struct bpf_reg_state unbound_reg;
static __init int unbound_reg_init(void)
{
bpf_mark_reg_unknown_imprecise(&unbound_reg);
return 0;
}
late_initcall(unbound_reg_init);
static bool is_spilled_scalar_after(const struct bpf_stack_state *stack, int im)
{
return stack->slot_type[im] == STACK_SPILL &&
stack->spilled_ptr.type == SCALAR_VALUE;
}
static bool is_stack_misc_after(struct bpf_verifier_env *env,
struct bpf_stack_state *stack, int im)
{
u32 i;
for (i = im; i < ARRAY_SIZE(stack->slot_type); ++i) {
if ((stack->slot_type[i] == STACK_MISC) ||
((stack->slot_type[i] == STACK_INVALID || stack->slot_type[i] == STACK_POISON) &&
env->allow_uninit_stack))
continue;
return false;
}
return true;
}
static struct bpf_reg_state *scalar_reg_for_stack(struct bpf_verifier_env *env,
struct bpf_stack_state *stack, int im)
{
if (is_spilled_scalar_after(stack, im))
return &stack->spilled_ptr;
if (is_stack_misc_after(env, stack, im))
return &unbound_reg;
return NULL;
}
static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
struct bpf_func_state *cur, struct bpf_idmap *idmap,
enum exact_level exact)
{
int i, spi;
for (i = 0; i < old->allocated_stack; i++) {
struct bpf_reg_state *old_reg, *cur_reg;
int im = i % BPF_REG_SIZE;
spi = i / BPF_REG_SIZE;
if (exact == EXACT) {
u8 old_type = old->stack[spi].slot_type[i % BPF_REG_SIZE];
u8 cur_type = i < cur->allocated_stack ?
cur->stack[spi].slot_type[i % BPF_REG_SIZE] : STACK_INVALID;
if (old_type == STACK_POISON)
old_type = STACK_INVALID;
if (cur_type == STACK_POISON)
cur_type = STACK_INVALID;
if (i >= cur->allocated_stack || old_type != cur_type)
return false;
}
if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID ||
old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_POISON)
continue;
if (env->allow_uninit_stack &&
old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC)
continue;
if (i >= cur->allocated_stack)
return false;
if (im == 0 || im == 4) {
old_reg = scalar_reg_for_stack(env, &old->stack[spi], im);
cur_reg = scalar_reg_for_stack(env, &cur->stack[spi], im);
if (old_reg && cur_reg) {
if (!regsafe(env, old_reg, cur_reg, idmap, exact))
return false;
i += (im == 0 ? BPF_REG_SIZE - 1 : 3);
continue;
}
}
if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC &&
cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO)
continue;
if (old->stack[spi].slot_type[i % BPF_REG_SIZE] !=
cur->stack[spi].slot_type[i % BPF_REG_SIZE])
return false;
if (i % BPF_REG_SIZE != BPF_REG_SIZE - 1)
continue;
switch (old->stack[spi].slot_type[BPF_REG_SIZE - 1]) {
case STACK_SPILL:
if (!regsafe(env, &old->stack[spi].spilled_ptr,
&cur->stack[spi].spilled_ptr, idmap, exact))
return false;
break;
case STACK_DYNPTR:
old_reg = &old->stack[spi].spilled_ptr;
cur_reg = &cur->stack[spi].spilled_ptr;
if (old_reg->dynptr.type != cur_reg->dynptr.type ||
old_reg->dynptr.first_slot != cur_reg->dynptr.first_slot ||
!check_ids(old_reg->ref_obj_id, cur_reg->ref_obj_id, idmap))
return false;
break;
case STACK_ITER:
old_reg = &old->stack[spi].spilled_ptr;
cur_reg = &cur->stack[spi].spilled_ptr;
if (old_reg->iter.btf != cur_reg->iter.btf ||
old_reg->iter.btf_id != cur_reg->iter.btf_id ||
old_reg->iter.state != cur_reg->iter.state ||
!check_ids(old_reg->ref_obj_id, cur_reg->ref_obj_id, idmap))
return false;
break;
case STACK_IRQ_FLAG:
old_reg = &old->stack[spi].spilled_ptr;
cur_reg = &cur->stack[spi].spilled_ptr;
if (!check_ids(old_reg->ref_obj_id, cur_reg->ref_obj_id, idmap) ||
old_reg->irq.kfunc_class != cur_reg->irq.kfunc_class)
return false;
break;
case STACK_MISC:
case STACK_ZERO:
case STACK_INVALID:
case STACK_POISON:
continue;
default:
return false;
}
}
return true;
}
static bool refsafe(struct bpf_verifier_state *old, struct bpf_verifier_state *cur,
struct bpf_idmap *idmap)
{
int i;
if (old->acquired_refs != cur->acquired_refs)
return false;
if (old->active_locks != cur->active_locks)
return false;
if (old->active_preempt_locks != cur->active_preempt_locks)
return false;
if (old->active_rcu_locks != cur->active_rcu_locks)
return false;
if (!check_ids(old->active_irq_id, cur->active_irq_id, idmap))
return false;
if (!check_ids(old->active_lock_id, cur->active_lock_id, idmap) ||
old->active_lock_ptr != cur->active_lock_ptr)
return false;
for (i = 0; i < old->acquired_refs; i++) {
if (!check_ids(old->refs[i].id, cur->refs[i].id, idmap) ||
old->refs[i].type != cur->refs[i].type)
return false;
switch (old->refs[i].type) {
case REF_TYPE_PTR:
case REF_TYPE_IRQ:
break;
case REF_TYPE_LOCK:
case REF_TYPE_RES_LOCK:
case REF_TYPE_RES_LOCK_IRQ:
if (old->refs[i].ptr != cur->refs[i].ptr)
return false;
break;
default:
WARN_ONCE(1, "Unhandled enum type for reference state: %d\n", old->refs[i].type);
return false;
}
}
return true;
}
static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_state *old,
struct bpf_func_state *cur, u32 insn_idx, enum exact_level exact)
{
u16 live_regs = env->insn_aux_data[insn_idx].live_regs_before;
u16 i;
if (old->callback_depth > cur->callback_depth)
return false;
for (i = 0; i < MAX_BPF_REG; i++)
if (((1 << i) & live_regs) &&
!regsafe(env, &old->regs[i], &cur->regs[i],
&env->idmap_scratch, exact))
return false;
if (!stacksafe(env, old, cur, &env->idmap_scratch, exact))
return false;
return true;
}
static void reset_idmap_scratch(struct bpf_verifier_env *env)
{
struct bpf_idmap *idmap = &env->idmap_scratch;
idmap->tmp_id_gen = env->id_gen;
idmap->cnt = 0;
}
static bool states_equal(struct bpf_verifier_env *env,
struct bpf_verifier_state *old,
struct bpf_verifier_state *cur,
enum exact_level exact)
{
u32 insn_idx;
int i;
if (old->curframe != cur->curframe)
return false;
reset_idmap_scratch(env);
if (old->speculative && !cur->speculative)
return false;
if (old->in_sleepable != cur->in_sleepable)
return false;
if (!refsafe(old, cur, &env->idmap_scratch))
return false;
for (i = 0; i <= old->curframe; i++) {
insn_idx = bpf_frame_insn_idx(old, i);
if (old->frame[i]->callsite != cur->frame[i]->callsite)
return false;
if (!func_states_equal(env, old->frame[i], cur->frame[i], insn_idx, exact))
return false;
}
return true;
}
static int propagate_precision(struct bpf_verifier_env *env,
const struct bpf_verifier_state *old,
struct bpf_verifier_state *cur,
bool *changed)
{
struct bpf_reg_state *state_reg;
struct bpf_func_state *state;
int i, err = 0, fr;
bool first;
for (fr = old->curframe; fr >= 0; fr--) {
state = old->frame[fr];
state_reg = state->regs;
first = true;
for (i = 0; i < BPF_REG_FP; i++, state_reg++) {
if (state_reg->type != SCALAR_VALUE ||
!state_reg->precise)
continue;
if (env->log.level & BPF_LOG_LEVEL2) {
if (first)
verbose(env, "frame %d: propagating r%d", fr, i);
else
verbose(env, ",r%d", i);
}
bpf_bt_set_frame_reg(&env->bt, fr, i);
first = false;
}
for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
if (!bpf_is_spilled_reg(&state->stack[i]))
continue;
state_reg = &state->stack[i].spilled_ptr;
if (state_reg->type != SCALAR_VALUE ||
!state_reg->precise)
continue;
if (env->log.level & BPF_LOG_LEVEL2) {
if (first)
verbose(env, "frame %d: propagating fp%d",
fr, (-i - 1) * BPF_REG_SIZE);
else
verbose(env, ",fp%d", (-i - 1) * BPF_REG_SIZE);
}
bpf_bt_set_frame_slot(&env->bt, fr, i);
first = false;
}
if (!first && (env->log.level & BPF_LOG_LEVEL2))
verbose(env, "\n");
}
err = bpf_mark_chain_precision(env, cur, -1, changed);
if (err < 0)
return err;
return 0;
}
#define MAX_BACKEDGE_ITERS 64
static int propagate_backedges(struct bpf_verifier_env *env, struct bpf_scc_visit *visit)
{
struct bpf_scc_backedge *backedge;
struct bpf_verifier_state *st;
bool changed;
int i, err;
i = 0;
do {
if (i++ > MAX_BACKEDGE_ITERS) {
if (env->log.level & BPF_LOG_LEVEL2)
verbose(env, "%s: too many iterations\n", __func__);
for (backedge = visit->backedges; backedge; backedge = backedge->next)
bpf_mark_all_scalars_precise(env, &backedge->state);
break;
}
changed = false;
for (backedge = visit->backedges; backedge; backedge = backedge->next) {
st = &backedge->state;
err = propagate_precision(env, st->equal_state, st, &changed);
if (err)
return err;
}
} while (changed);
bpf_free_backedges(visit);
return 0;
}
static bool states_maybe_looping(struct bpf_verifier_state *old,
struct bpf_verifier_state *cur)
{
struct bpf_func_state *fold, *fcur;
int i, fr = cur->curframe;
if (old->curframe != fr)
return false;
fold = old->frame[fr];
fcur = cur->frame[fr];
for (i = 0; i < MAX_BPF_REG; i++)
if (memcmp(&fold->regs[i], &fcur->regs[i],
offsetof(struct bpf_reg_state, frameno)))
return false;
return true;
}
static bool iter_active_depths_differ(struct bpf_verifier_state *old, struct bpf_verifier_state *cur)
{
struct bpf_reg_state *slot, *cur_slot;
struct bpf_func_state *state;
int i, fr;
for (fr = old->curframe; fr >= 0; fr--) {
state = old->frame[fr];
for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
if (state->stack[i].slot_type[0] != STACK_ITER)
continue;
slot = &state->stack[i].spilled_ptr;
if (slot->iter.state != BPF_ITER_STATE_ACTIVE)
continue;
cur_slot = &cur->frame[fr]->stack[i].spilled_ptr;
if (cur_slot->iter.depth != slot->iter.depth)
return true;
}
}
return false;
}
static void mark_all_scalars_imprecise(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
{
struct bpf_func_state *func;
struct bpf_reg_state *reg;
int i, j;
for (i = 0; i <= st->curframe; i++) {
func = st->frame[i];
for (j = 0; j < BPF_REG_FP; j++) {
reg = &func->regs[j];
if (reg->type != SCALAR_VALUE)
continue;
reg->precise = false;
}
for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) {
if (!bpf_is_spilled_reg(&func->stack[j]))
continue;
reg = &func->stack[j].spilled_ptr;
if (reg->type != SCALAR_VALUE)
continue;
reg->precise = false;
}
}
}
int bpf_is_state_visited(struct bpf_verifier_env *env, int insn_idx)
{
struct bpf_verifier_state_list *new_sl;
struct bpf_verifier_state_list *sl;
struct bpf_verifier_state *cur = env->cur_state, *new;
bool force_new_state, add_new_state, loop;
int n, err, states_cnt = 0;
struct list_head *pos, *tmp, *head;
force_new_state = env->test_state_freq || bpf_is_force_checkpoint(env, insn_idx) ||
cur->jmp_history_cnt > 40;
add_new_state = force_new_state;
if (env->jmps_processed - env->prev_jmps_processed >= 2 &&
env->insn_processed - env->prev_insn_processed >= 8)
add_new_state = true;
err = clean_verifier_state(env, cur);
if (err)
return err;
loop = false;
head = bpf_explored_state(env, insn_idx);
list_for_each_safe(pos, tmp, head) {
sl = container_of(pos, struct bpf_verifier_state_list, node);
states_cnt++;
if (sl->state.insn_idx != insn_idx)
continue;
if (sl->state.branches) {
struct bpf_func_state *frame = sl->state.frame[sl->state.curframe];
if (frame->in_async_callback_fn &&
frame->async_entry_cnt != cur->frame[cur->curframe]->async_entry_cnt) {
goto skip_inf_loop_check;
}
if (is_iter_next_insn(env, insn_idx)) {
if (states_equal(env, &sl->state, cur, RANGE_WITHIN)) {
struct bpf_func_state *cur_frame;
struct bpf_reg_state *iter_state, *iter_reg;
int spi;
cur_frame = cur->frame[cur->curframe];
iter_reg = &cur_frame->regs[BPF_REG_1];
spi = bpf_get_spi(iter_reg->var_off.value);
iter_state = &bpf_func(env, iter_reg)->stack[spi].spilled_ptr;
if (iter_state->iter.state == BPF_ITER_STATE_ACTIVE) {
loop = true;
goto hit;
}
}
goto skip_inf_loop_check;
}
if (is_may_goto_insn_at(env, insn_idx)) {
if (sl->state.may_goto_depth != cur->may_goto_depth &&
states_equal(env, &sl->state, cur, RANGE_WITHIN)) {
loop = true;
goto hit;
}
}
if (bpf_calls_callback(env, insn_idx)) {
if (states_equal(env, &sl->state, cur, RANGE_WITHIN)) {
loop = true;
goto hit;
}
goto skip_inf_loop_check;
}
if (states_maybe_looping(&sl->state, cur) &&
states_equal(env, &sl->state, cur, EXACT) &&
!iter_active_depths_differ(&sl->state, cur) &&
sl->state.may_goto_depth == cur->may_goto_depth &&
sl->state.callback_unroll_depth == cur->callback_unroll_depth) {
verbose_linfo(env, insn_idx, "; ");
verbose(env, "infinite loop detected at insn %d\n", insn_idx);
verbose(env, "cur state:");
print_verifier_state(env, cur, cur->curframe, true);
verbose(env, "old state:");
print_verifier_state(env, &sl->state, cur->curframe, true);
return -EINVAL;
}
skip_inf_loop_check:
if (!force_new_state &&
env->jmps_processed - env->prev_jmps_processed < 20 &&
env->insn_processed - env->prev_insn_processed < 100)
add_new_state = false;
goto miss;
}
loop = incomplete_read_marks(env, &sl->state);
if (states_equal(env, &sl->state, cur, loop ? RANGE_WITHIN : NOT_EXACT)) {
hit:
sl->hit_cnt++;
err = 0;
if (bpf_is_jmp_point(env, env->insn_idx))
err = bpf_push_jmp_history(env, cur, 0, 0);
err = err ? : propagate_precision(env, &sl->state, cur, NULL);
if (err)
return err;
if (loop) {
struct bpf_scc_backedge *backedge;
backedge = kzalloc_obj(*backedge,
GFP_KERNEL_ACCOUNT);
if (!backedge)
return -ENOMEM;
err = bpf_copy_verifier_state(&backedge->state, cur);
backedge->state.equal_state = &sl->state;
backedge->state.insn_idx = insn_idx;
err = err ?: add_scc_backedge(env, &sl->state, backedge);
if (err) {
bpf_free_verifier_state(&backedge->state, false);
kfree(backedge);
return err;
}
}
return 1;
}
miss:
if (add_new_state)
sl->miss_cnt++;
n = bpf_is_force_checkpoint(env, insn_idx) && sl->state.branches > 0 ? 64 : 3;
if (sl->miss_cnt > sl->hit_cnt * n + n) {
sl->in_free_list = true;
list_del(&sl->node);
list_add(&sl->node, &env->free_list);
env->free_list_size++;
env->explored_states_size--;
maybe_free_verifier_state(env, sl);
}
}
if (env->max_states_per_insn < states_cnt)
env->max_states_per_insn = states_cnt;
if (!env->bpf_capable && states_cnt > BPF_COMPLEXITY_LIMIT_STATES)
return 0;
if (!add_new_state)
return 0;
new_sl = kzalloc_obj(struct bpf_verifier_state_list, GFP_KERNEL_ACCOUNT);
if (!new_sl)
return -ENOMEM;
env->total_states++;
env->explored_states_size++;
update_peak_states(env);
env->prev_jmps_processed = env->jmps_processed;
env->prev_insn_processed = env->insn_processed;
if (env->bpf_capable)
mark_all_scalars_imprecise(env, cur);
bpf_clear_singular_ids(env, cur);
new = &new_sl->state;
err = bpf_copy_verifier_state(new, cur);
if (err) {
bpf_free_verifier_state(new, false);
kfree(new_sl);
return err;
}
new->insn_idx = insn_idx;
verifier_bug_if(new->branches != 1, env,
"%s:branches_to_explore=%d insn %d",
__func__, new->branches, insn_idx);
err = maybe_enter_scc(env, new);
if (err) {
bpf_free_verifier_state(new, false);
kfree(new_sl);
return err;
}
cur->parent = new;
cur->first_insn_idx = insn_idx;
cur->dfs_depth = new->dfs_depth + 1;
bpf_clear_jmp_history(cur);
list_add(&new_sl->node, head);
return 0;
}