vmlinux
void *vmlinux;
vmlinux = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, vmlinux_fd, 0);
if (vmlinux == MAP_FAILED) {
eh = vmlinux;
sh = vmlinux + le64toh(eh->e_shoff) + (i * le16toh(eh->e_shentsize));
vmlinux + le64toh(sh->sh_offset),
munmap(vmlinux, st.st_size);
static struct addr_range prep_initrd(struct addr_range vmlinux, void *chosen,
if (initrd_addr < vmlinux.size) {
static void prep_esm_blob(struct addr_range vmlinux, void *chosen)
if (esm_blob_addr < vmlinux.size) {
static inline void prep_esm_blob(struct addr_range vmlinux, void *chosen) { }
struct addr_range vmlinux, initrd;
vmlinux = prep_kernel();
initrd = prep_initrd(vmlinux, chosen,
prep_esm_blob(vmlinux, chosen);
kentry = (kernel_entry_t) vmlinux.addr;
platform_ops.kentry(ft_addr, vmlinux.addr);
NULL, NULL, output, vmlinux.image_size, NULL, decompress_error);
memmove(output, uncompressed_start, vmlinux.image_size);
memset(uncompressed_start, 0, vmlinux.image_size);
if (__boot_data_end - __boot_data_start != vmlinux.bootdata_size)
memcpy((void *)vmlinux.bootdata_off, __boot_data_start, vmlinux.bootdata_size);
if (__boot_data_preserved_end - __boot_data_preserved_start != vmlinux.bootdata_preserved_size)
memcpy((void *)vmlinux.bootdata_preserved_off, __boot_data_preserved_start, vmlinux.bootdata_preserved_size);
for (entry = (u64 *)vmlinux.got_start; entry < (u64 *)vmlinux.got_end; entry++) {
memset((void *)kernel_start + vmlinux.image_size, 0, vmlinux.bss_size);
vmlinux.bootdata_off += offset;
vmlinux.bootdata_preserved_off += offset;
vmlinux.got_start += offset;
vmlinux.got_end += offset;
vmlinux.init_mm_off += offset;
vmlinux.swapper_pg_dir_off += offset;
vmlinux.invalid_pg_dir_off += offset;
vmlinux.alt_instructions += offset;
vmlinux.alt_instructions_end += offset;
vmlinux.stack_prot_start += offset;
vmlinux.stack_prot_end += offset;
vmlinux.kasan_early_shadow_page_off += offset;
vmlinux.kasan_early_shadow_pte_off += offset;
vmlinux.kasan_early_shadow_pmd_off += offset;
vmlinux.kasan_early_shadow_pud_off += offset;
vmlinux.kasan_early_shadow_p4d_off += offset;
unsigned long vmlinux_size = vmlinux.image_size + vmlinux.bss_size;
amode31_lma = randomize_within_range(vmlinux.amode31_size, PAGE_SIZE, amode31_min, SZ_2G);
amode31_lma = text_lma - vmlinux.amode31_size;
physmem_reserve(RR_AMODE31, amode31_lma, vmlinux.amode31_size);
kaslr_adjust_relocs(text_lma, text_lma + vmlinux.image_size,
psw.addr = __kaslr_offset + vmlinux.entry;
#define init_mm (*(struct mm_struct *)vmlinux.init_mm_off)
#define swapper_pg_dir vmlinux.swapper_pg_dir_off
#define invalid_pg_dir vmlinux.invalid_pg_dir_off
#define kasan_early_shadow_page vmlinux.kasan_early_shadow_page_off
#define kasan_early_shadow_pte ((pte_t *)vmlinux.kasan_early_shadow_pte_off)
#define kasan_early_shadow_pmd ((pmd_t *)vmlinux.kasan_early_shadow_pmd_off)
#define kasan_early_shadow_pud ((pud_t *)vmlinux.kasan_early_shadow_pud_off)
#define kasan_early_shadow_p4d ((p4d_t *)vmlinux.kasan_early_shadow_p4d_off)
start = (unsigned long *)vmlinux.stack_prot_start;
end = (unsigned long *)vmlinux.stack_prot_end;
#define KLP_OBJNAME vmlinux
bool vmlinux = str_ends_with(objname, "vmlinux.o");
if (vmlinux && sym->type == STT_FUNC) {
struct machine vmlinux;
machine__init(&vmlinux, "", HOST_KERNEL_ID);
maps = machine__kernel_maps(&vmlinux);
if (machine__create_kernel_maps(&vmlinux) < 0) {
args.vmlinux_map = machine__kernel_map(&vmlinux);
if (machine__load_vmlinux_path(&vmlinux) <= 0) {
machine__exit(&vmlinux);
const char *vmlinux, bool vmlinux_allocated)
if (vmlinux[0] == '/')
snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s", vmlinux);
symbol__join_symfs(symfs_vmlinux, vmlinux);
free((char *) vmlinux);
dso__set_long_name(dso, vmlinux, vmlinux_allocated);
const char *vmlinux, bool vmlinux_allocated);
static struct btf *btf__load_testmod_btf(struct btf *vmlinux)
btf = btf__load_from_kernel_by_id_split(id, vmlinux);