__PAGE_SIZE
size = round_up(size, __PAGE_SIZE);
#define PAGE_SIZE __PAGE_SIZE
#define PAGE_SIZE __PAGE_SIZE
const __u32 max_frag_size = __PAGE_SIZE - headroom - sizeof(struct skb_shared_info);
if (likely(off < __PAGE_SIZE - size)) \
if (likely(off < __PAGE_SIZE - size)) { \
#define TLD_MAX_DATA_CNT (__PAGE_SIZE / sizeof(struct tld_metadata) - 1)
char data[__PAGE_SIZE - sizeof(__u64)];
#define PAGE_SIZE __PAGE_SIZE
#define PAGE_SIZE __PAGE_SIZE
sinfo_sz = __PAGE_SIZE - XDP_PACKET_HEADROOM -
page2 = page1 + __PAGE_SIZE;
page3 = page1 + __PAGE_SIZE * 2;
page4 = page1 - __PAGE_SIZE;
page2 = page1 + __PAGE_SIZE;
page3 = page1 + __PAGE_SIZE * 2;
page4 = page1 - __PAGE_SIZE;
__ulong(map_extra, (1ull << 32) | (~0u - __PAGE_SIZE * 2 + 1)); /* start of mmap() region */
page += __PAGE_SIZE;
__ulong(map_extra, (1ull << 44) | (~0u - __PAGE_SIZE * 2 + 1)); /* start of mmap() region */
page += __PAGE_SIZE;
#define ARENA_PAGES (1UL<< (32 - __builtin_ffs(__PAGE_SIZE) + 1))
__ulong(map_extra, (1ull << 32) | (~0u - __PAGE_SIZE * ARENA_PAGES + 1));
__ulong(map_extra, (1ull << 44) | (~0u - __PAGE_SIZE * ARENA_PAGES + 1));
__ulong(map_extra, (1ull << 32) | (~0u - __PAGE_SIZE * ARENA_PAGES + 1));
__ulong(map_extra, (1ull << 44) | (~0u - __PAGE_SIZE * ARENA_PAGES + 1));
page = base = arena_base(&arena) + 4096 * __PAGE_SIZE;
ret = bpf_arena_reserve_pages(&arena, base + 3 * __PAGE_SIZE, 4);
addr = arena_base(&arena) + 32768 * __PAGE_SIZE;
ret = bpf_arena_reserve_pages(&arena, addr + 2 * __PAGE_SIZE, 2);
bpf_arena_free_pages(&arena, addr + __PAGE_SIZE , 2);
page = bpf_arena_alloc_pages(&arena, addr + __PAGE_SIZE, 2, NUMA_NO_NODE, 0);