Symbol: kaslr_offset
arch/arm64/kernel/kaslr.c
27
if (kaslr_offset() < MIN_KIMG_ALIGN) {
arch/arm64/kernel/pi/map_kernel.c
103
relocate_kernel(kaslr_offset);
arch/arm64/kernel/pi/map_kernel.c
245
u64 kaslr_offset = pa_base % MIN_KIMG_ALIGN;
arch/arm64/kernel/pi/map_kernel.c
281
kaslr_offset |= kaslr_seed & ~(MIN_KIMG_ALIGN - 1);
arch/arm64/kernel/pi/map_kernel.c
287
va_base = KIMAGE_VADDR + kaslr_offset;
arch/arm64/kernel/pi/map_kernel.c
288
map_kernel(kaslr_offset, va_base - pa_base, root_level);
arch/arm64/kernel/pi/map_kernel.c
39
static void __init map_kernel(u64 kaslr_offset, u64 va_offset, int root_level)
arch/arm64/kernel/setup.c
398
const unsigned long offset = kaslr_offset();
arch/arm64/kernel/vmcore_info.c
35
vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
arch/arm64/kvm/handle_exit.c
513
(void *)(panic_addr + kaslr_offset()));
arch/arm64/kvm/handle_exit.c
529
u64 hyp_offset = elr_in_kimg - kaslr_offset() - elr_virt;
arch/arm64/kvm/handle_exit.c
568
dump_kernel_instr(panic_addr + kaslr_offset());
arch/arm64/kvm/stacktrace.c
154
kvm_err(" [<%016lx>] %pB\n", where, (void *)(where + kaslr_offset()));
arch/powerpc/kernel/setup-common.c
760
kaslr_offset(), KERNELBASE);
arch/powerpc/kernel/setup-common.c
798
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset() > 0)
arch/powerpc/kexec/vmcore_info.c
31
vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
arch/riscv/include/asm/page.h
193
unsigned long kaslr_offset(void);
arch/s390/include/asm/page.h
189
unsigned long kaslr_offset;
arch/s390/include/asm/page.h
197
#define __kaslr_offset vm_layout.kaslr_offset
arch/s390/kernel/os_info.c
81
os_info_entry_add_val(OS_INFO_KASLR_OFFSET, kaslr_offset());
arch/s390/kernel/vmcore_info.c
19
vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
arch/x86/kernel/setup.c
836
kaslr_offset(),
arch/x86/kernel/vmcore_info_64.c
21
vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
arch/x86/lib/kaslr.c
26
#define get_boot_seed() kaslr_offset()
kernel/kcov.c
200
ip -= kaslr_offset();