#ifndef __BPF_EXPERIMENTAL__
#define __BPF_EXPERIMENTAL__
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
#define __contains(name, node) __attribute__((btf_decl_tag("contains:" #name ":" #node)))
extern void *bpf_obj_new_impl(__u64 local_type_id, void *meta) __ksym;
#define bpf_obj_new(type) ((type *)bpf_obj_new_impl(bpf_core_type_id_local(type), NULL))
extern void bpf_obj_drop_impl(void *kptr, void *meta) __ksym;
#define bpf_obj_drop(kptr) bpf_obj_drop_impl(kptr, NULL)
extern void *bpf_refcount_acquire_impl(void *kptr, void *meta) __ksym;
#define bpf_refcount_acquire(kptr) bpf_refcount_acquire_impl(kptr, NULL)
extern int bpf_list_push_front_impl(struct bpf_list_head *head,
struct bpf_list_node *node,
void *meta, __u64 off) __ksym;
#define bpf_list_push_front(head, node) bpf_list_push_front_impl(head, node, NULL, 0)
extern int bpf_list_push_back_impl(struct bpf_list_head *head,
struct bpf_list_node *node,
void *meta, __u64 off) __ksym;
#define bpf_list_push_back(head, node) bpf_list_push_back_impl(head, node, NULL, 0)
extern struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head) __ksym;
extern struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head) __ksym;
extern struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root,
struct bpf_rb_node *node) __ksym;
extern int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node,
bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b),
void *meta, __u64 off) __ksym;
#define bpf_rbtree_add(head, node, less) bpf_rbtree_add_impl(head, node, less, NULL, 0)
extern struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root) __ksym;
extern void *bpf_percpu_obj_new_impl(__u64 local_type_id, void *meta) __ksym;
#define bpf_percpu_obj_new(type) ((type __percpu_kptr *)bpf_percpu_obj_new_impl(bpf_core_type_id_local(type), NULL))
extern void bpf_percpu_obj_drop_impl(void *kptr, void *meta) __ksym;
struct bpf_iter_task_vma;
extern int bpf_iter_task_vma_new(struct bpf_iter_task_vma *it,
struct task_struct *task,
__u64 addr) __ksym;
extern struct vm_area_struct *bpf_iter_task_vma_next(struct bpf_iter_task_vma *it) __ksym;
extern void bpf_iter_task_vma_destroy(struct bpf_iter_task_vma *it) __ksym;
#define bpf_percpu_obj_drop(kptr) bpf_percpu_obj_drop_impl(kptr, NULL)
extern void bpf_throw(u64 cookie) __ksym;
extern struct file *bpf_get_task_exe_file(struct task_struct *task) __ksym;
extern void bpf_put_file(struct file *file) __ksym;
extern int bpf_path_d_path(const struct path *path, char *buf, size_t buf__sz) __ksym;
#define __exception_cb(name) __attribute__((btf_decl_tag("exception_callback:" #name)))
#define __bpf_assert_signed(x) _Generic((x), \
unsigned long: 0, \
unsigned long long: 0, \
signed long: 1, \
signed long long: 1 \
)
#define __bpf_assert_check(LHS, op, RHS) \
_Static_assert(sizeof(&(LHS)), "1st argument must be an lvalue expression"); \
_Static_assert(sizeof(LHS) == 8, "Only 8-byte integers are supported\n"); \
_Static_assert(__builtin_constant_p(__bpf_assert_signed(LHS)), "internal static assert"); \
_Static_assert(__builtin_constant_p((RHS)), "2nd argument must be a constant expression")
#define __bpf_assert(LHS, op, cons, RHS, VAL) \
({ \
(void)bpf_throw; \
asm volatile ("if %[lhs] " op " %[rhs] goto +2; r1 = %[value]; call bpf_throw" \
: : [lhs] "r"(LHS), [rhs] cons(RHS), [value] "ri"(VAL) : ); \
})
#define __bpf_assert_op_sign(LHS, op, cons, RHS, VAL, supp_sign) \
({ \
__bpf_assert_check(LHS, op, RHS); \
if (__bpf_assert_signed(LHS) && !(supp_sign)) \
__bpf_assert(LHS, "s" #op, cons, RHS, VAL); \
else \
__bpf_assert(LHS, #op, cons, RHS, VAL); \
})
#define __bpf_assert_op(LHS, op, RHS, VAL, supp_sign) \
({ \
if (sizeof(typeof(RHS)) == 8) { \
const typeof(RHS) rhs_var = (RHS); \
__bpf_assert_op_sign(LHS, op, "r", rhs_var, VAL, supp_sign); \
} else { \
__bpf_assert_op_sign(LHS, op, "i", RHS, VAL, supp_sign); \
} \
})
#define __cmp_cannot_be_signed(x) \
__builtin_strcmp(#x, "==") == 0 || __builtin_strcmp(#x, "!=") == 0 || \
__builtin_strcmp(#x, "&") == 0
#define __is_signed_type(type) (((type)(-1)) < (type)1)
#define __bpf_cmp(LHS, OP, PRED, RHS, DEFAULT) \
({ \
__label__ l_true; \
bool ret = DEFAULT; \
asm volatile goto("if %[lhs] " OP " %[rhs] goto %l[l_true]" \
:: [lhs] "r"((short)LHS), [rhs] PRED (RHS) :: l_true); \
ret = !DEFAULT; \
l_true: \
ret; \
})
#define _bpf_cmp(LHS, OP, RHS, UNLIKELY) \
({ \
typeof(LHS) __lhs = (LHS); \
typeof(RHS) __rhs = (RHS); \
bool ret; \
_Static_assert(sizeof(&(LHS)), "1st argument must be an lvalue expression"); \
(void)(__lhs OP __rhs); \
if (__cmp_cannot_be_signed(OP) || !__is_signed_type(typeof(__lhs))) { \
if (sizeof(__rhs) == 8) \
\
ret = __bpf_cmp(__lhs, #OP, "r", __rhs, UNLIKELY); \
else \
ret = __bpf_cmp(__lhs, #OP, "ri", __rhs, UNLIKELY); \
} else { \
if (sizeof(__rhs) == 8) \
ret = __bpf_cmp(__lhs, "s"#OP, "r", __rhs, UNLIKELY); \
else \
ret = __bpf_cmp(__lhs, "s"#OP, "ri", __rhs, UNLIKELY); \
} \
ret; \
})
#ifndef bpf_cmp_unlikely
#define bpf_cmp_unlikely(LHS, OP, RHS) _bpf_cmp(LHS, OP, RHS, true)
#endif
#ifndef bpf_cmp_likely
#define bpf_cmp_likely(LHS, OP, RHS) \
({ \
bool ret = 0; \
if (__builtin_strcmp(#OP, "==") == 0) \
ret = _bpf_cmp(LHS, !=, RHS, false); \
else if (__builtin_strcmp(#OP, "!=") == 0) \
ret = _bpf_cmp(LHS, ==, RHS, false); \
else if (__builtin_strcmp(#OP, "<=") == 0) \
ret = _bpf_cmp(LHS, >, RHS, false); \
else if (__builtin_strcmp(#OP, "<") == 0) \
ret = _bpf_cmp(LHS, >=, RHS, false); \
else if (__builtin_strcmp(#OP, ">") == 0) \
ret = _bpf_cmp(LHS, <=, RHS, false); \
else if (__builtin_strcmp(#OP, ">=") == 0) \
ret = _bpf_cmp(LHS, <, RHS, false); \
else \
asm volatile("r0 " #OP " invalid compare"); \
ret; \
})
#endif
#ifdef __BPF_FEATURE_MAY_GOTO
#define can_loop \
({ __label__ l_break, l_continue; \
bool ret = true; \
asm volatile goto("may_goto %l[l_break]" \
:::: l_break); \
goto l_continue; \
l_break: ret = false; \
l_continue:; \
ret; \
})
#define __cond_break(expr) \
({ __label__ l_break, l_continue; \
asm volatile goto("may_goto %l[l_break]" \
:::: l_break); \
goto l_continue; \
l_break: expr; \
l_continue:; \
})
#else
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define can_loop \
({ __label__ l_break, l_continue; \
bool ret = true; \
asm volatile goto("1:.byte 0xe5; \
.byte 0; \
.long ((%l[l_break] - 1b - 8) / 8) & 0xffff; \
.short 0" \
:::: l_break); \
goto l_continue; \
l_break: ret = false; \
l_continue:; \
ret; \
})
#define __cond_break(expr) \
({ __label__ l_break, l_continue; \
asm volatile goto("1:.byte 0xe5; \
.byte 0; \
.long ((%l[l_break] - 1b - 8) / 8) & 0xffff; \
.short 0" \
:::: l_break); \
goto l_continue; \
l_break: expr; \
l_continue:; \
})
#else
#define can_loop \
({ __label__ l_break, l_continue; \
bool ret = true; \
asm volatile goto("1:.byte 0xe5; \
.byte 0; \
.long (((%l[l_break] - 1b - 8) / 8) & 0xffff) << 16; \
.short 0" \
:::: l_break); \
goto l_continue; \
l_break: ret = false; \
l_continue:; \
ret; \
})
#define __cond_break(expr) \
({ __label__ l_break, l_continue; \
asm volatile goto("1:.byte 0xe5; \
.byte 0; \
.long (((%l[l_break] - 1b - 8) / 8) & 0xffff) << 16; \
.short 0" \
:::: l_break); \
goto l_continue; \
l_break: expr; \
l_continue:; \
})
#endif
#endif
#define cond_break __cond_break(break)
#define cond_break_label(label) __cond_break(goto label)
#ifndef bpf_nop_mov
#define bpf_nop_mov(var) \
asm volatile("%[reg]=%[reg]"::[reg]"r"((short)var))
#endif
#ifndef bpf_addr_space_cast
#define bpf_addr_space_cast(var, dst_as, src_as)\
asm volatile(".byte 0xBF; \
.ifc %[reg], r0; \
.byte 0x00; \
.endif; \
.ifc %[reg], r1; \
.byte 0x11; \
.endif; \
.ifc %[reg], r2; \
.byte 0x22; \
.endif; \
.ifc %[reg], r3; \
.byte 0x33; \
.endif; \
.ifc %[reg], r4; \
.byte 0x44; \
.endif; \
.ifc %[reg], r5; \
.byte 0x55; \
.endif; \
.ifc %[reg], r6; \
.byte 0x66; \
.endif; \
.ifc %[reg], r7; \
.byte 0x77; \
.endif; \
.ifc %[reg], r8; \
.byte 0x88; \
.endif; \
.ifc %[reg], r9; \
.byte 0x99; \
.endif; \
.short %[off]; \
.long %[as]" \
: [reg]"+r"(var) \
: [off]"i"(BPF_ADDR_SPACE_CAST) \
, [as]"i"((dst_as << 16) | src_as));
#endif
void bpf_preempt_disable(void) __weak __ksym;
void bpf_preempt_enable(void) __weak __ksym;
typedef struct {
} __bpf_preempt_t;
static inline __bpf_preempt_t __bpf_preempt_constructor(void)
{
__bpf_preempt_t ret = {};
bpf_preempt_disable();
return ret;
}
static inline void __bpf_preempt_destructor(__bpf_preempt_t *t)
{
bpf_preempt_enable();
}
#define bpf_guard_preempt() \
__bpf_preempt_t ___bpf_apply(preempt, __COUNTER__) \
__attribute__((__unused__, __cleanup__(__bpf_preempt_destructor))) = \
__bpf_preempt_constructor()
#define bpf_assert(cond) if (!(cond)) bpf_throw(0);
#define bpf_assert_with(cond, value) if (!(cond)) bpf_throw(value);
#define bpf_assert_range(LHS, BEG, END) \
({ \
_Static_assert(BEG <= END, "BEG must be <= END"); \
barrier_var(LHS); \
__bpf_assert_op(LHS, >=, BEG, 0, false); \
__bpf_assert_op(LHS, <=, END, 0, false); \
})
#define bpf_assert_range_with(LHS, BEG, END, value) \
({ \
_Static_assert(BEG <= END, "BEG must be <= END"); \
barrier_var(LHS); \
__bpf_assert_op(LHS, >=, BEG, value, false); \
__bpf_assert_op(LHS, <=, END, value, false); \
})
struct bpf_iter_css_task;
struct cgroup_subsys_state;
extern int bpf_iter_css_task_new(struct bpf_iter_css_task *it,
struct cgroup_subsys_state *css, unsigned int flags) __weak __ksym;
extern struct task_struct *bpf_iter_css_task_next(struct bpf_iter_css_task *it) __weak __ksym;
extern void bpf_iter_css_task_destroy(struct bpf_iter_css_task *it) __weak __ksym;
struct bpf_iter_task;
extern int bpf_iter_task_new(struct bpf_iter_task *it,
struct task_struct *task, unsigned int flags) __weak __ksym;
extern struct task_struct *bpf_iter_task_next(struct bpf_iter_task *it) __weak __ksym;
extern void bpf_iter_task_destroy(struct bpf_iter_task *it) __weak __ksym;
struct bpf_iter_css;
extern int bpf_iter_css_new(struct bpf_iter_css *it,
struct cgroup_subsys_state *start, unsigned int flags) __weak __ksym;
extern struct cgroup_subsys_state *bpf_iter_css_next(struct bpf_iter_css *it) __weak __ksym;
extern void bpf_iter_css_destroy(struct bpf_iter_css *it) __weak __ksym;
extern int bpf_wq_init(struct bpf_wq *wq, void *p__map, unsigned int flags) __weak __ksym;
extern int bpf_wq_start(struct bpf_wq *wq, unsigned int flags) __weak __ksym;
struct bpf_iter_kmem_cache;
extern int bpf_iter_kmem_cache_new(struct bpf_iter_kmem_cache *it) __weak __ksym;
extern struct kmem_cache *bpf_iter_kmem_cache_next(struct bpf_iter_kmem_cache *it) __weak __ksym;
extern void bpf_iter_kmem_cache_destroy(struct bpf_iter_kmem_cache *it) __weak __ksym;
struct bpf_iter_dmabuf;
extern int bpf_iter_dmabuf_new(struct bpf_iter_dmabuf *it) __weak __ksym;
extern struct dma_buf *bpf_iter_dmabuf_next(struct bpf_iter_dmabuf *it) __weak __ksym;
extern void bpf_iter_dmabuf_destroy(struct bpf_iter_dmabuf *it) __weak __ksym;
extern int bpf_cgroup_read_xattr(struct cgroup *cgroup, const char *name__str,
struct bpf_dynptr *value_p) __weak __ksym;
#define PREEMPT_BITS 8
#define SOFTIRQ_BITS 8
#define HARDIRQ_BITS 4
#define NMI_BITS 4
#define PREEMPT_SHIFT 0
#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
#define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS)
#define __IRQ_MASK(x) ((1UL << (x))-1)
#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
#define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT)
#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
extern bool CONFIG_PREEMPT_RT __kconfig __weak;
#ifdef bpf_target_x86
extern const int __preempt_count __ksym __weak;
struct pcpu_hot___local {
int preempt_count;
} __attribute__((preserve_access_index));
extern struct pcpu_hot___local pcpu_hot __ksym __weak;
#endif
struct task_struct___preempt_rt {
int softirq_disable_cnt;
} __attribute__((preserve_access_index));
static inline int get_preempt_count(void)
{
#if defined(bpf_target_x86)
if (bpf_ksym_exists(&__preempt_count))
return *(int *) bpf_this_cpu_ptr(&__preempt_count);
if (bpf_core_field_exists(pcpu_hot.preempt_count))
return ((struct pcpu_hot___local *)
bpf_this_cpu_ptr(&pcpu_hot))->preempt_count;
#elif defined(bpf_target_arm64)
return bpf_get_current_task_btf()->thread_info.preempt.count;
#endif
return 0;
}
static inline int bpf_in_interrupt(void)
{
struct task_struct___preempt_rt *tsk;
int pcnt;
pcnt = get_preempt_count();
if (!CONFIG_PREEMPT_RT)
return pcnt & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_MASK);
tsk = (void *) bpf_get_current_task_btf();
return (pcnt & (NMI_MASK | HARDIRQ_MASK)) |
(tsk->softirq_disable_cnt & SOFTIRQ_MASK);
}
static inline int bpf_in_nmi(void)
{
return get_preempt_count() & NMI_MASK;
}
static inline int bpf_in_hardirq(void)
{
return get_preempt_count() & HARDIRQ_MASK;
}
static inline int bpf_in_serving_softirq(void)
{
struct task_struct___preempt_rt *tsk;
int pcnt;
pcnt = get_preempt_count();
if (!CONFIG_PREEMPT_RT)
return (pcnt & SOFTIRQ_MASK) & SOFTIRQ_OFFSET;
tsk = (void *) bpf_get_current_task_btf();
return (tsk->softirq_disable_cnt & SOFTIRQ_MASK) & SOFTIRQ_OFFSET;
}
static inline int bpf_in_task(void)
{
struct task_struct___preempt_rt *tsk;
int pcnt;
pcnt = get_preempt_count();
if (!CONFIG_PREEMPT_RT)
return !(pcnt & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET));
tsk = (void *) bpf_get_current_task_btf();
return !((pcnt & (NMI_MASK | HARDIRQ_MASK)) |
((tsk->softirq_disable_cnt & SOFTIRQ_MASK) & SOFTIRQ_OFFSET));
}
#endif