#ifndef _LINUX_BPF_VERIFIER_H
#define _LINUX_BPF_VERIFIER_H 1
#include <linux/bpf.h>
#include <linux/btf.h>
#include <linux/filter.h>
#include <linux/tnum.h>
#define BPF_MAX_VAR_OFF (1 << 29)
#define BPF_MAX_VAR_SIZ (1 << 29)
#define TMP_STR_BUF_LEN 320
#define INSN_BUF_SIZE 32
#define ITER_PREFIX "bpf_iter_"
enum bpf_iter_state {
BPF_ITER_STATE_INVALID,
BPF_ITER_STATE_ACTIVE,
BPF_ITER_STATE_DRAINED,
};
struct bpf_reg_state {
enum bpf_reg_type type;
s32 off;
union {
int range;
struct {
struct bpf_map *map_ptr;
u32 map_uid;
};
struct {
struct btf *btf;
u32 btf_id;
};
struct {
u32 mem_size;
u32 dynptr_id;
};
struct {
enum bpf_dynptr_type type;
bool first_slot;
} dynptr;
struct {
struct btf *btf;
u32 btf_id;
enum bpf_iter_state state:2;
int depth:30;
} iter;
struct {
enum {
IRQ_NATIVE_KFUNC,
IRQ_LOCK_KFUNC,
} kfunc_class;
} irq;
struct {
unsigned long raw1;
unsigned long raw2;
} raw;
u32 subprogno;
};
struct tnum var_off;
s64 smin_value;
s64 smax_value;
u64 umin_value;
u64 umax_value;
s32 s32_min_value;
s32 s32_max_value;
u32 u32_min_value;
u32 u32_max_value;
#define BPF_ADD_CONST64 (1U << 31)
#define BPF_ADD_CONST32 (1U << 30)
#define BPF_ADD_CONST (BPF_ADD_CONST64 | BPF_ADD_CONST32)
u32 id;
u32 ref_obj_id;
u32 frameno;
s32 subreg_def;
bool precise;
};
enum bpf_stack_slot_type {
STACK_INVALID,
STACK_SPILL,
STACK_MISC,
STACK_ZERO,
STACK_DYNPTR,
STACK_ITER,
STACK_IRQ_FLAG,
};
#define BPF_REG_SIZE 8
#define BPF_REGMASK_ARGS ((1 << BPF_REG_1) | (1 << BPF_REG_2) | \
(1 << BPF_REG_3) | (1 << BPF_REG_4) | \
(1 << BPF_REG_5))
#define BPF_DYNPTR_SIZE sizeof(struct bpf_dynptr_kern)
#define BPF_DYNPTR_NR_SLOTS (BPF_DYNPTR_SIZE / BPF_REG_SIZE)
struct bpf_stack_state {
struct bpf_reg_state spilled_ptr;
u8 slot_type[BPF_REG_SIZE];
};
struct bpf_reference_state {
enum ref_state_type {
REF_TYPE_PTR = (1 << 1),
REF_TYPE_IRQ = (1 << 2),
REF_TYPE_LOCK = (1 << 3),
REF_TYPE_RES_LOCK = (1 << 4),
REF_TYPE_RES_LOCK_IRQ = (1 << 5),
REF_TYPE_LOCK_MASK = REF_TYPE_LOCK | REF_TYPE_RES_LOCK | REF_TYPE_RES_LOCK_IRQ,
} type;
int id;
int insn_idx;
void *ptr;
};
struct bpf_retval_range {
s32 minval;
s32 maxval;
};
struct bpf_func_state {
struct bpf_reg_state regs[MAX_BPF_REG];
int callsite;
u32 frameno;
u32 subprogno;
u32 async_entry_cnt;
struct bpf_retval_range callback_ret_range;
bool in_callback_fn;
bool in_async_callback_fn;
bool in_exception_callback_fn;
u32 callback_depth;
struct bpf_stack_state *stack;
int allocated_stack;
};
#define MAX_CALL_FRAMES 8
enum {
INSN_F_FRAMENO_MASK = 0x7,
INSN_F_SPI_MASK = 0x3f,
INSN_F_SPI_SHIFT = 3,
INSN_F_STACK_ACCESS = BIT(9),
INSN_F_DST_REG_STACK = BIT(10),
INSN_F_SRC_REG_STACK = BIT(11),
};
static_assert(INSN_F_FRAMENO_MASK + 1 >= MAX_CALL_FRAMES);
static_assert(INSN_F_SPI_MASK + 1 >= MAX_BPF_STACK / 8);
struct bpf_jmp_history_entry {
u32 idx;
u32 prev_idx : 20;
u32 flags : 12;
u64 linked_regs;
};
#define BPF_ID_MAP_SIZE ((MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) * MAX_CALL_FRAMES)
struct bpf_verifier_state {
struct bpf_func_state *frame[MAX_CALL_FRAMES];
struct bpf_verifier_state *parent;
struct bpf_reference_state *refs;
u32 branches;
u32 insn_idx;
u32 curframe;
u32 acquired_refs;
u32 active_locks;
u32 active_preempt_locks;
u32 active_irq_id;
u32 active_lock_id;
void *active_lock_ptr;
u32 active_rcu_locks;
bool speculative;
bool in_sleepable;
bool cleaned;
u32 first_insn_idx;
u32 last_insn_idx;
struct bpf_verifier_state *equal_state;
struct bpf_jmp_history_entry *jmp_history;
u32 jmp_history_cnt;
u32 dfs_depth;
u32 callback_unroll_depth;
u32 may_goto_depth;
};
#define bpf_get_spilled_reg(slot, frame, mask) \
(((slot < frame->allocated_stack / BPF_REG_SIZE) && \
((1 << frame->stack[slot].slot_type[BPF_REG_SIZE - 1]) & (mask))) \
? &frame->stack[slot].spilled_ptr : NULL)
#define bpf_for_each_spilled_reg(iter, frame, reg, mask) \
for (iter = 0, reg = bpf_get_spilled_reg(iter, frame, mask); \
iter < frame->allocated_stack / BPF_REG_SIZE; \
iter++, reg = bpf_get_spilled_reg(iter, frame, mask))
#define bpf_for_each_reg_in_vstate_mask(__vst, __state, __reg, __mask, __expr) \
({ \
struct bpf_verifier_state *___vstate = __vst; \
int ___i, ___j; \
for (___i = 0; ___i <= ___vstate->curframe; ___i++) { \
struct bpf_reg_state *___regs; \
__state = ___vstate->frame[___i]; \
___regs = __state->regs; \
for (___j = 0; ___j < MAX_BPF_REG; ___j++) { \
__reg = &___regs[___j]; \
(void)(__expr); \
} \
bpf_for_each_spilled_reg(___j, __state, __reg, __mask) { \
if (!__reg) \
continue; \
(void)(__expr); \
} \
} \
})
#define bpf_for_each_reg_in_vstate(__vst, __state, __reg, __expr) \
bpf_for_each_reg_in_vstate_mask(__vst, __state, __reg, 1 << STACK_SPILL, __expr)
struct bpf_verifier_state_list {
struct bpf_verifier_state state;
struct list_head node;
u32 miss_cnt;
u32 hit_cnt:31;
u32 in_free_list:1;
};
struct bpf_loop_inline_state {
unsigned int initialized:1;
unsigned int fit_for_inline:1;
u32 callback_subprogno;
};
struct bpf_map_ptr_state {
struct bpf_map *map_ptr;
bool poison;
bool unpriv;
};
#define BPF_ALU_SANITIZE_SRC (1U << 0)
#define BPF_ALU_SANITIZE_DST (1U << 1)
#define BPF_ALU_NEG_VALUE (1U << 2)
#define BPF_ALU_NON_POINTER (1U << 3)
#define BPF_ALU_IMMEDIATE (1U << 4)
#define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \
BPF_ALU_SANITIZE_DST)
struct bpf_iarray {
int cnt;
u32 items[];
};
struct bpf_insn_aux_data {
union {
enum bpf_reg_type ptr_type;
struct bpf_map_ptr_state map_ptr_state;
s32 call_imm;
u32 alu_limit;
struct {
u32 map_index;
u32 map_off;
};
struct {
enum bpf_reg_type reg_type;
union {
struct {
struct btf *btf;
u32 btf_id;
};
u32 mem_size;
};
} btf_var;
struct bpf_loop_inline_state loop_inline_state;
};
union {
u64 obj_new_size;
u64 insert_off;
};
struct bpf_iarray *jt;
struct btf_struct_meta *kptr_struct_meta;
u64 map_key_state;
int ctx_field_size;
u32 seen;
bool nospec;
bool nospec_result;
bool zext_dst;
bool needs_zext;
bool non_sleepable;
bool is_iter_next;
bool call_with_percpu_alloc_ptr;
u8 alu_state;
u8 fastcall_pattern:1;
u8 fastcall_spills_num:3;
u8 arg_prog:4;
unsigned int orig_idx;
bool jmp_point;
bool prune_point;
bool force_checkpoint;
bool calls_callback;
u32 scc;
u16 live_regs_before;
};
#define MAX_USED_MAPS 64
#define MAX_USED_BTFS 64
#define BPF_VERIFIER_TMP_LOG_SIZE 1024
struct bpf_verifier_log {
u64 start_pos;
u64 end_pos;
char __user *ubuf;
u32 level;
u32 len_total;
u32 len_max;
char kbuf[BPF_VERIFIER_TMP_LOG_SIZE];
};
#define BPF_LOG_LEVEL1 1
#define BPF_LOG_LEVEL2 2
#define BPF_LOG_STATS 4
#define BPF_LOG_FIXED 8
#define BPF_LOG_LEVEL (BPF_LOG_LEVEL1 | BPF_LOG_LEVEL2)
#define BPF_LOG_MASK (BPF_LOG_LEVEL | BPF_LOG_STATS | BPF_LOG_FIXED)
#define BPF_LOG_KERNEL (BPF_LOG_MASK + 1)
#define BPF_LOG_MIN_ALIGNMENT 8U
#define BPF_LOG_ALIGNMENT 40U
static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log)
{
return log && log->level;
}
#define BPF_MAX_SUBPROGS 256
struct bpf_subprog_arg_info {
enum bpf_arg_type arg_type;
union {
u32 mem_size;
u32 btf_id;
};
};
enum priv_stack_mode {
PRIV_STACK_UNKNOWN,
NO_PRIV_STACK,
PRIV_STACK_ADAPTIVE,
};
struct bpf_subprog_info {
u32 start;
u32 linfo_idx;
u32 postorder_start;
u32 exit_idx;
u16 stack_depth;
u16 stack_extra;
s16 fastcall_stack_off;
bool has_tail_call: 1;
bool tail_call_reachable: 1;
bool has_ld_abs: 1;
bool is_cb: 1;
bool is_async_cb: 1;
bool is_exception_cb: 1;
bool args_cached: 1;
bool keep_fastcall_stack: 1;
bool changes_pkt_data: 1;
bool might_sleep: 1;
u8 arg_cnt:3;
enum priv_stack_mode priv_stack_mode;
struct bpf_subprog_arg_info args[MAX_BPF_FUNC_REG_ARGS];
};
struct bpf_verifier_env;
struct backtrack_state {
struct bpf_verifier_env *env;
u32 frame;
u32 reg_masks[MAX_CALL_FRAMES];
u64 stack_masks[MAX_CALL_FRAMES];
};
struct bpf_id_pair {
u32 old;
u32 cur;
};
struct bpf_idmap {
u32 tmp_id_gen;
u32 cnt;
struct bpf_id_pair map[BPF_ID_MAP_SIZE];
};
struct bpf_idset {
u32 num_ids;
struct {
u32 id;
u32 cnt;
} entries[BPF_ID_MAP_SIZE];
};
struct bpf_scc_callchain {
u32 callsites[MAX_CALL_FRAMES - 1];
u32 scc;
};
struct bpf_scc_backedge {
struct bpf_scc_backedge *next;
struct bpf_verifier_state state;
};
struct bpf_scc_visit {
struct bpf_scc_callchain callchain;
struct bpf_verifier_state *entry_state;
struct bpf_scc_backedge *backedges;
u32 num_backedges;
};
struct bpf_scc_info {
u32 num_visits;
struct bpf_scc_visit visits[];
};
struct bpf_liveness;
struct bpf_verifier_env {
u32 insn_idx;
u32 prev_insn_idx;
struct bpf_prog *prog;
const struct bpf_verifier_ops *ops;
struct module *attach_btf_mod;
struct bpf_verifier_stack_elem *head;
int stack_size;
bool strict_alignment;
bool test_state_freq;
bool test_reg_invariants;
struct bpf_verifier_state *cur_state;
struct list_head *explored_states;
struct list_head free_list;
struct bpf_map *used_maps[MAX_USED_MAPS];
struct btf_mod_pair used_btfs[MAX_USED_BTFS];
struct bpf_map *insn_array_maps[MAX_USED_MAPS];
u32 used_map_cnt;
u32 used_btf_cnt;
u32 insn_array_map_cnt;
u32 id_gen;
u32 hidden_subprog_cnt;
int exception_callback_subprog;
bool explore_alu_limits;
bool allow_ptr_leaks;
bool allow_uninit_stack;
bool bpf_capable;
bool bypass_spec_v1;
bool bypass_spec_v4;
bool seen_direct_write;
bool seen_exception;
struct bpf_insn_aux_data *insn_aux_data;
const struct bpf_line_info *prev_linfo;
struct bpf_verifier_log log;
struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 2];
union {
struct bpf_idmap idmap_scratch;
struct bpf_idset idset_scratch;
};
struct {
int *insn_state;
int *insn_stack;
int *insn_postorder;
int cur_stack;
int cur_postorder;
} cfg;
struct backtrack_state bt;
struct bpf_jmp_history_entry *cur_hist_ent;
u32 pass_cnt;
u32 subprog_cnt;
u32 prev_insn_processed, insn_processed;
u32 prev_jmps_processed, jmps_processed;
u64 verification_time;
u32 max_states_per_insn;
u32 total_states;
u32 peak_states;
u32 longest_mark_read_walk;
u32 free_list_size;
u32 explored_states_size;
u32 num_backedges;
bpfptr_t fd_array;
u32 scratched_regs;
u64 scratched_stack_slots;
u64 prev_log_pos, prev_insn_print_pos;
struct bpf_reg_state fake_reg[2];
char tmp_str_buf[TMP_STR_BUF_LEN];
struct bpf_insn insn_buf[INSN_BUF_SIZE];
struct bpf_insn epilogue_buf[INSN_BUF_SIZE];
struct bpf_scc_callchain callchain_buf;
struct bpf_liveness *liveness;
struct bpf_scc_info **scc_info;
u32 scc_cnt;
struct bpf_iarray *succ;
struct bpf_iarray *gotox_tmp_buf;
};
static inline struct bpf_func_info_aux *subprog_aux(struct bpf_verifier_env *env, int subprog)
{
return &env->prog->aux->func_info_aux[subprog];
}
static inline struct bpf_subprog_info *subprog_info(struct bpf_verifier_env *env, int subprog)
{
return &env->subprog_info[subprog];
}
__printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log,
const char *fmt, va_list args);
__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
const char *fmt, ...);
__printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
const char *fmt, ...);
int bpf_vlog_init(struct bpf_verifier_log *log, u32 log_level,
char __user *log_buf, u32 log_size);
void bpf_vlog_reset(struct bpf_verifier_log *log, u64 new_pos);
int bpf_vlog_finalize(struct bpf_verifier_log *log, u32 *log_size_actual);
__printf(3, 4) void verbose_linfo(struct bpf_verifier_env *env,
u32 insn_off,
const char *prefix_fmt, ...);
#define verifier_bug_if(cond, env, fmt, args...) \
({ \
bool __cond = (cond); \
if (unlikely(__cond)) \
verifier_bug(env, fmt " (" #cond ")", ##args); \
(__cond); \
})
#define verifier_bug(env, fmt, args...) \
({ \
BPF_WARN_ONCE(1, "verifier bug: " fmt "\n", ##args); \
bpf_log(&env->log, "verifier bug: " fmt "\n", ##args); \
})
static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env)
{
struct bpf_verifier_state *cur = env->cur_state;
return cur->frame[cur->curframe];
}
static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env)
{
return cur_func(env)->regs;
}
int bpf_prog_offload_verifier_prep(struct bpf_prog *prog);
int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
int insn_idx, int prev_insn_idx);
int bpf_prog_offload_finalize(struct bpf_verifier_env *env);
void
bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off,
struct bpf_insn *insn);
void
bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt);
static inline u64 bpf_trampoline_compute_key(const struct bpf_prog *tgt_prog,
struct btf *btf, u32 btf_id)
{
if (tgt_prog)
return ((u64)tgt_prog->aux->id << 32) | btf_id;
else
return ((u64)btf_obj_id(btf) << 32) | 0x80000000 | btf_id;
}
static inline void bpf_trampoline_unpack_key(u64 key, u32 *obj_id, u32 *btf_id)
{
if (obj_id)
*obj_id = key >> 32;
if (btf_id)
*btf_id = key & 0x7FFFFFFF;
}
int bpf_check_attach_target(struct bpf_verifier_log *log,
const struct bpf_prog *prog,
const struct bpf_prog *tgt_prog,
u32 btf_id,
struct bpf_attach_target_info *tgt_info);
void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab);
int mark_chain_precision(struct bpf_verifier_env *env, int regno);
#define BPF_BASE_TYPE_MASK GENMASK(BPF_BASE_TYPE_BITS - 1, 0)
static inline u32 base_type(u32 type)
{
return type & BPF_BASE_TYPE_MASK;
}
static inline u32 type_flag(u32 type)
{
return type & ~BPF_BASE_TYPE_MASK;
}
static inline enum bpf_prog_type resolve_prog_type(const struct bpf_prog *prog)
{
return (prog->type == BPF_PROG_TYPE_EXT && prog->aux->saved_dst_prog_type) ?
prog->aux->saved_dst_prog_type : prog->type;
}
static inline bool bpf_prog_check_recur(const struct bpf_prog *prog)
{
switch (resolve_prog_type(prog)) {
case BPF_PROG_TYPE_TRACING:
return prog->expected_attach_type != BPF_TRACE_ITER;
case BPF_PROG_TYPE_STRUCT_OPS:
return prog->aux->jits_use_priv_stack;
case BPF_PROG_TYPE_LSM:
case BPF_PROG_TYPE_SYSCALL:
return false;
default:
return true;
}
}
#define BPF_REG_TRUSTED_MODIFIERS (MEM_ALLOC | PTR_TRUSTED | NON_OWN_REF)
static inline bool bpf_type_has_unsafe_modifiers(u32 type)
{
return type_flag(type) & ~BPF_REG_TRUSTED_MODIFIERS;
}
static inline bool type_is_ptr_alloc_obj(u32 type)
{
return base_type(type) == PTR_TO_BTF_ID && type_flag(type) & MEM_ALLOC;
}
static inline bool type_is_non_owning_ref(u32 type)
{
return type_is_ptr_alloc_obj(type) && type_flag(type) & NON_OWN_REF;
}
static inline bool type_is_pkt_pointer(enum bpf_reg_type type)
{
type = base_type(type);
return type == PTR_TO_PACKET ||
type == PTR_TO_PACKET_META;
}
static inline bool type_is_sk_pointer(enum bpf_reg_type type)
{
return type == PTR_TO_SOCKET ||
type == PTR_TO_SOCK_COMMON ||
type == PTR_TO_TCP_SOCK ||
type == PTR_TO_XDP_SOCK;
}
static inline bool type_may_be_null(u32 type)
{
return type & PTR_MAYBE_NULL;
}
static inline void mark_reg_scratched(struct bpf_verifier_env *env, u32 regno)
{
env->scratched_regs |= 1U << regno;
}
static inline void mark_stack_slot_scratched(struct bpf_verifier_env *env, u32 spi)
{
env->scratched_stack_slots |= 1ULL << spi;
}
static inline bool reg_scratched(const struct bpf_verifier_env *env, u32 regno)
{
return (env->scratched_regs >> regno) & 1;
}
static inline bool stack_slot_scratched(const struct bpf_verifier_env *env, u64 regno)
{
return (env->scratched_stack_slots >> regno) & 1;
}
static inline bool verifier_state_scratched(const struct bpf_verifier_env *env)
{
return env->scratched_regs || env->scratched_stack_slots;
}
static inline void mark_verifier_state_clean(struct bpf_verifier_env *env)
{
env->scratched_regs = 0U;
env->scratched_stack_slots = 0ULL;
}
static inline void mark_verifier_state_scratched(struct bpf_verifier_env *env)
{
env->scratched_regs = ~0U;
env->scratched_stack_slots = ~0ULL;
}
static inline bool bpf_stack_narrow_access_ok(int off, int fill_size, int spill_size)
{
#ifdef __BIG_ENDIAN
off -= spill_size - fill_size;
#endif
return !(off % BPF_REG_SIZE);
}
static inline bool insn_is_gotox(struct bpf_insn *insn)
{
return BPF_CLASS(insn->code) == BPF_JMP &&
BPF_OP(insn->code) == BPF_JA &&
BPF_SRC(insn->code) == BPF_X;
}
const char *reg_type_str(struct bpf_verifier_env *env, enum bpf_reg_type type);
const char *dynptr_type_str(enum bpf_dynptr_type type);
const char *iter_type_str(const struct btf *btf, u32 btf_id);
const char *iter_state_str(enum bpf_iter_state state);
void print_verifier_state(struct bpf_verifier_env *env, const struct bpf_verifier_state *vstate,
u32 frameno, bool print_all);
void print_insn_state(struct bpf_verifier_env *env, const struct bpf_verifier_state *vstate,
u32 frameno);
struct bpf_subprog_info *bpf_find_containing_subprog(struct bpf_verifier_env *env, int off);
int bpf_jmp_offset(struct bpf_insn *insn);
struct bpf_iarray *bpf_insn_successors(struct bpf_verifier_env *env, u32 idx);
void bpf_fmt_stack_mask(char *buf, ssize_t buf_sz, u64 stack_mask);
bool bpf_calls_callback(struct bpf_verifier_env *env, int insn_idx);
int bpf_stack_liveness_init(struct bpf_verifier_env *env);
void bpf_stack_liveness_free(struct bpf_verifier_env *env);
int bpf_update_live_stack(struct bpf_verifier_env *env);
int bpf_mark_stack_read(struct bpf_verifier_env *env, u32 frameno, u32 insn_idx, u64 mask);
void bpf_mark_stack_write(struct bpf_verifier_env *env, u32 frameno, u64 mask);
int bpf_reset_stack_write_marks(struct bpf_verifier_env *env, u32 insn_idx);
int bpf_commit_stack_write_marks(struct bpf_verifier_env *env);
int bpf_live_stack_query_init(struct bpf_verifier_env *env, struct bpf_verifier_state *st);
bool bpf_stack_slot_alive(struct bpf_verifier_env *env, u32 frameno, u32 spi);
void bpf_reset_live_stack_callchain(struct bpf_verifier_env *env);
#endif