__arena
void __arena* bpf_arena_alloc_pages(void *map, void __arena *addr, __u32 page_cnt,
void bpf_arena_free_pages(void *map, void __arena *ptr, __u32 page_cnt) __ksym __weak;
static inline void __arena* bpf_arena_alloc_pages(void *map, void *addr, __u32 page_cnt,
static inline void bpf_arena_free_pages(void *map, void __arena *ptr, __u32 page_cnt)
void __arena *__mptr = (void __arena *)(ptr); \
void __arena *scx_alloc_from_pool(struct sdt_pool *pool)
void __arena *slab;
void __arena *ptr;
ptr = (void __arena *)((__u64) pool->slab + elem_size * pool->idx);
struct sdt_chunk __arena *chunk;
__u64 __arena *allocated = desc->allocated;
sdt_desc_t * __arena *desc_children;
struct sdt_chunk __arena *chunk;
struct sdt_data __arena *data;
desc_children = (sdt_desc_t * __arena *)chunk->descs;
sdt_desc_t * __arena *desc_children;
struct sdt_chunk __arena *chunk;
stat_inc_##metric(struct scx_stats __arena *stats) \
desc_children = (sdt_desc_t * __arena *)chunk->descs;
void __arena *scx_alloc(struct scx_allocator *alloc)
struct sdt_data __arena *data = NULL;
struct sdt_chunk __arena *chunk;
struct sdt_data __arena *data;
void __arena *scx_task_alloc(struct task_struct *p)
struct sdt_data __arena *data = NULL;
return (void __arena *)data->payload;
void __arena *scx_task_data(struct task_struct *p)
struct sdt_data __arena *data;
return (void __arena *)data->payload;
scx_stat_global_update(struct scx_stats __arena *stats)
struct scx_stats __arena *stats;
struct scx_stats __arena *stats;
struct scx_stats __arena *stats;
struct scx_stats __arena *stats;
void __arena *scx_task_data(struct task_struct *p);
void __arena *scx_task_alloc(struct task_struct *p);
void __arena *slab;
struct sdt_chunk __arena *chunk;
struct sdt_data __arena *data[SDT_TASK_ENTS_PER_CHUNK];
static void __arena * __arena page_frag_cur_page[NR_CPUS];
static int __arena page_frag_cur_offset[NR_CPUS];
static inline void __arena* bpf_alloc(unsigned int size)
__u64 __arena *obj_cnt;
void __arena *page = page_frag_cur_page[cpu];
int __arena *cur_offset = &page_frag_cur_offset[cpu];
static inline void bpf_free(void __arena *addr)
__u64 __arena *obj_cnt;
addr = (void __arena *)(((long)addr) & ~(PAGE_SIZE - 1));
static inline void __arena* bpf_alloc(unsigned int size) { return NULL; }
static inline void bpf_free(void __arena *addr) {}
void __arena *__mptr = (void __arena *)(ptr); \
void __arena* bpf_arena_alloc_pages(void *map, void __arena *addr, __u32 page_cnt,
int bpf_arena_reserve_pages(void *map, void __arena *addr, __u32 page_cnt) __ksym __weak;
void bpf_arena_free_pages(void *map, void __arena *ptr, __u32 page_cnt) __ksym __weak;
#define arena_base(map) ((void __arena *)((struct bpf_arena *)(map))->user_vm_start)
static inline void __arena* bpf_arena_alloc_pages(void *map, void *addr, __u32 page_cnt,
static inline void bpf_arena_free_pages(void *map, void __arena *ptr, __u32 page_cnt)
void __arena *buckets = bpf_arena_alloc_pages(&arena, NULL, 2, NUMA_NO_NODE, 0);
arena_list_node_t * __arena *pprev;
struct arena_list_node __arena *first;
pos = list_entry_safe((void __arena *)___tmp, typeof(*(pos)), member))
arena_list_node_t *first = h->first, * __arena *tmp;
arena_list_node_t * __arena *pprev = n->pprev;
#define LIST_POISON1 ((void __arena *) 0x100 + POISON_POINTER_DELTA)
#define LIST_POISON2 ((void __arena *) 0x122 + POISON_POINTER_DELTA)
__noinline bool glob_match(char const __arena *pat __arg_arena, char const __arena *str __arg_arena)
char const __arena *back_pat = NULL, *back_str;
__noinline int bpf_arena_strlen(const char __arena *s __arg_arena)
char const __arena *class = pat + inverted;
const char __arena *sc;
struct elem __arena *n;
__u32 __arena *page32;
__u64 __arena *page64;
void __arena *page;
page32 = (__u32 __arena *)page;
page64 = (__u64 __arena *)page;
void __arena *htab_for_user;
char __arena arr1[100000];
struct htab __arena *htab;
char __arena *arr = arr1;
struct arena_list_head __arena *list_head;
long __arena arena_sum;
int __arena test_val = 1;
struct arena_list_head __arena global_head;
struct elem __arena *n = bpf_alloc(sizeof(*n));
struct elem __arena *n;
arena_spinlock_t __arena lock;
char const __arena *p = glob_tests;
char const __arena *pat = p;
char const __arena *pat, *str;
static bool test(char const __arena *pat, char const __arena *str, bool expected)
static const char __arena glob_tests[] =
struct arena_qnode __arena qnodes[_Q_MAX_CPUS][_Q_MAX_NODES];
static inline struct arena_mcs_spinlock __arena *decode_tail(u32 tail)
struct arena_mcs_spinlock __arena *grab_mcs_node(struct arena_mcs_spinlock __arena *base, int idx)
return &((struct arena_qnode __arena *)base + idx)->mcs;
static __always_inline u32 xchg_tail(arena_spinlock_t __arena *lock, u32 tail)
static __always_inline void clear_pending(arena_spinlock_t __arena *lock)
#ifndef __arena
static __always_inline void clear_pending_set_locked(arena_spinlock_t __arena *lock)
static __always_inline void set_locked(arena_spinlock_t __arena *lock)
u32 arena_fetch_set_pending_acquire(arena_spinlock_t __arena *lock)
static __always_inline int arena_spin_trylock(arena_spinlock_t __arena *lock)
int arena_spin_lock_slowpath(arena_spinlock_t __arena __arg_arena *lock, u32 val)
struct arena_mcs_spinlock __arena *prev, *next, *node0, *node;
static __always_inline int arena_spin_lock(arena_spinlock_t __arena *lock)
static __always_inline void arena_spin_unlock(arena_spinlock_t __arena *lock)
struct arena_mcs_spinlock __arena *next;
int __arena *addr = (int __arena *)0xdeadbeef;
int __arena *addr = (int __arena *)0xdeadbeef;
volatile char __arena *page1, *page2, *page3, *page4;
bpf_arena_free_pages(&arena, (void __arena *)page1, 2);
volatile char __arena *page1, *page2, *page3, *page4;
bpf_arena_free_pages(&arena, (void __arena *)page1, 2);
volatile char __arena *pages;
volatile char __arena *pages;
char __arena *page;
char __arena *page;
char __arena *page;
char __arena *page;
volatile int __arena *page1, *page2, *no_page;
char __arena *page;
char __arena *page;
char __arena *page;
char __arena *page;
char __arena *page;
bpf_arena_free_pages(&arena, (void __arena *)page2, 1);
volatile int __arena *page1, *page2, *no_page, *page3;
bpf_arena_free_pages(&arena, (void __arena *)page2, 1);
volatile char __arena global_data[GLOBAL_PAGES][PAGE_SIZE];
__u8 __arena *guard, *globals;
volatile char __arena *ptr;
guard = (void __arena *)arena_base(&arena);
globals = (void __arena *)(arena_base(&arena) + (ARENA_PAGES - GLOBAL_PAGES) * PAGE_SIZE);
u8 __arena *ptr;
ptr = (u8 __arena *)((u64)(ARENA_PAGES * PAGE_SIZE - PAGE_SIZE / 2));
char __arena global_data[ARENA_PAGES][PAGE_SIZE];
void __arena *guard;
guard = (void __arena *)arena_base(&arena);
page = (volatile char __arena *)(base + i * PAGE_SIZE);
volatile char __arena *page;
char __arena *base;
char __arena *addr;
char __arena *page;
__u8 __arena * __arena page[PAGE_CNT]; /* occupies the first page */
__u8 __arena *base;
__u8 __arena *pg;
__u8 __arena *pg;
bpf_arena_free_pages(&arena, (void __arena *)base, 1);
bpf_arena_free_pages(&arena, (void __arena *)pg, 1);
volatile char __arena *page1, *page2, *no_page, *page3;
bpf_arena_free_pages(&arena, (void __arena *)pg, 2);
char __arena *pages;
page2 = bpf_arena_alloc_pages(&arena, (void __arena *)(ARENA_SIZE - 2 * PAGE_SIZE),
no_page = bpf_arena_alloc_pages(&arena, (void __arena *)ARENA_SIZE - PAGE_SIZE,
no_page = bpf_arena_alloc_pages(&arena, (void __arena *)ARENA_SIZE,
bpf_arena_free_pages(&arena, (void __arena *)page1, 1);
volatile char __arena *page;
char __arena *base;