Symbol: kernel_map
arch/riscv/include/asm/page.h
118
extern struct kernel_mapping kernel_map;
arch/riscv/include/asm/page.h
123
((x) >= kernel_map.virt_addr && (x) < (kernel_map.virt_addr + kernel_map.size))
arch/riscv/include/asm/page.h
129
#define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + kernel_map.va_pa_offset))
arch/riscv/include/asm/page.h
138
(void *)(_y + kernel_map.va_kernel_xip_text_pa_offset) : \
arch/riscv/include/asm/page.h
139
(void *)(_y + kernel_map.va_kernel_xip_data_pa_offset); \
arch/riscv/include/asm/page.h
142
#define kernel_mapping_pa_to_va(y) ((void *)((unsigned long)(y) + kernel_map.va_kernel_pa_offset))
arch/riscv/include/asm/page.h
148
#define linear_mapping_va_to_pa(x) ((unsigned long)(x) - kernel_map.va_pa_offset)
arch/riscv/include/asm/page.h
156
(_y < kernel_map.virt_addr + kernel_map.xiprom_sz) ? \
arch/riscv/include/asm/page.h
157
(_y - kernel_map.va_kernel_xip_text_pa_offset) : \
arch/riscv/include/asm/page.h
158
(_y - kernel_map.va_kernel_xip_data_pa_offset); \
arch/riscv/include/asm/page.h
161
#define kernel_mapping_va_to_pa(y) ((unsigned long)(y) - kernel_map.va_kernel_pa_offset)
arch/riscv/include/asm/page.h
35
#define PAGE_OFFSET kernel_map.page_offset
arch/riscv/kernel/machine_kexec.c
180
this_hart_id, kernel_map.va_pa_offset);
arch/riscv/kernel/setup.c
396
kernel_map.virt_offset,
arch/riscv/kernel/vmcore_info.c
29
kernel_map.va_kernel_pa_offset);
arch/riscv/mm/init.c
1084
return kernel_map.virt_offset;
arch/riscv/mm/init.c
1107
kernel_map.virt_offset = (kaslr_seed % nr_pos) * PMD_SIZE;
arch/riscv/mm/init.c
1111
kernel_map.virt_addr = KERNEL_LINK_ADDR + kernel_map.virt_offset;
arch/riscv/mm/init.c
1114
kernel_map.xiprom = (uintptr_t)CONFIG_XIP_PHYS_ADDR;
arch/riscv/mm/init.c
1115
kernel_map.xiprom_sz = (uintptr_t)(&_exiprom) - (uintptr_t)(&_xiprom);
arch/riscv/mm/init.c
1121
kernel_map.phys_addr = (uintptr_t)CONFIG_PHYS_RAM_BASE;
arch/riscv/mm/init.c
1122
kernel_map.size = (uintptr_t)(&_end) - (uintptr_t)(&_start);
arch/riscv/mm/init.c
1124
kernel_map.va_kernel_xip_text_pa_offset = kernel_map.virt_addr - kernel_map.xiprom;
arch/riscv/mm/init.c
1125
kernel_map.va_kernel_xip_data_pa_offset = kernel_map.virt_addr - kernel_map.phys_addr
arch/riscv/mm/init.c
1128
kernel_map.phys_addr = (uintptr_t)(&_start);
arch/riscv/mm/init.c
1129
kernel_map.size = (uintptr_t)(&_end) - kernel_map.phys_addr;
arch/riscv/mm/init.c
1130
kernel_map.va_kernel_pa_offset = kernel_map.virt_addr - kernel_map.phys_addr;
arch/riscv/mm/init.c
1150
kernel_map.va_pa_offset = IS_ENABLED(CONFIG_64BIT) ?
arch/riscv/mm/init.c
1151
0UL : PAGE_OFFSET - kernel_map.phys_addr;
arch/riscv/mm/init.c
1157
BUG_ON((kernel_map.phys_addr % PMD_SIZE) != 0);
arch/riscv/mm/init.c
1164
BUG_ON((kernel_map.virt_addr + kernel_map.size) > ADDRESS_SPACE_END - SZ_4K);
arch/riscv/mm/init.c
1175
BUG_ON(PUD_SIZE - (kernel_map.virt_addr & (PUD_SIZE - 1)) < kernel_map.size);
arch/riscv/mm/init.c
1198
create_pgd_mapping(trampoline_pg_dir, kernel_map.virt_addr,
arch/riscv/mm/init.c
1201
create_p4d_mapping(trampoline_p4d, kernel_map.virt_addr,
arch/riscv/mm/init.c
1204
create_pud_mapping(trampoline_pud, kernel_map.virt_addr,
arch/riscv/mm/init.c
1207
create_pmd_mapping(trampoline_pmd, kernel_map.virt_addr,
arch/riscv/mm/init.c
1208
kernel_map.xiprom, PMD_SIZE, PAGE_KERNEL_EXEC);
arch/riscv/mm/init.c
1210
create_pmd_mapping(trampoline_pmd, kernel_map.virt_addr,
arch/riscv/mm/init.c
1211
kernel_map.phys_addr, PMD_SIZE, PAGE_KERNEL_EXEC);
arch/riscv/mm/init.c
1215
create_pgd_mapping(trampoline_pg_dir, kernel_map.virt_addr,
arch/riscv/mm/init.c
1216
kernel_map.phys_addr, PGDIR_SIZE, PAGE_KERNEL_EXEC);
arch/riscv/mm/init.c
1381
kernel_map.virt_addr = (uintptr_t)_start;
arch/riscv/mm/init.c
1382
kernel_map.phys_addr = (uintptr_t)_start;
arch/riscv/mm/init.c
161
print_ml("kernel", (unsigned long)kernel_map.virt_addr,
arch/riscv/mm/init.c
257
kernel_map.va_pa_offset = PAGE_OFFSET - phys_ram_base;
arch/riscv/mm/init.c
326
uintptr_t reloc_offset = kernel_map.virt_addr - KERNEL_LINK_ADDR;
arch/riscv/mm/init.c
331
uintptr_t va_kernel_link_pa_offset = KERNEL_LINK_ADDR - kernel_map.phys_addr;
arch/riscv/mm/init.c
42
struct kernel_mapping kernel_map __ro_after_init;
arch/riscv/mm/init.c
43
EXPORT_SYMBOL(kernel_map);
arch/riscv/mm/init.c
45
#define kernel_map (*(struct kernel_mapping *)XIP_FIXUP(&kernel_map))
arch/riscv/mm/init.c
511
BUG_ON((va - kernel_map.virt_addr) >> PUD_SHIFT);
arch/riscv/mm/init.c
576
BUG_ON((va - kernel_map.virt_addr) >> PGDIR_SHIFT);
arch/riscv/mm/init.c
614
BUG_ON((va - kernel_map.virt_addr) >> PGDIR_SHIFT);
arch/riscv/mm/init.c
819
kernel_map.page_offset = PAGE_OFFSET_L4;
arch/riscv/mm/init.c
826
kernel_map.page_offset = PAGE_OFFSET_L3;
arch/riscv/mm/init.c
865
kernel_map.page_offset = PAGE_OFFSET_L5;
arch/riscv/mm/init.c
944
end_va = kernel_map.virt_addr + kernel_map.xiprom_sz;
arch/riscv/mm/init.c
945
for (va = kernel_map.virt_addr; va < end_va; va += PMD_SIZE)
arch/riscv/mm/init.c
947
kernel_map.xiprom + (va - kernel_map.virt_addr),
arch/riscv/mm/init.c
951
start_va = kernel_map.virt_addr + (uintptr_t)&_sdata - (uintptr_t)&_start;
arch/riscv/mm/init.c
952
end_va = kernel_map.virt_addr + kernel_map.size;
arch/riscv/mm/init.c
955
kernel_map.phys_addr + (va - start_va),
arch/riscv/mm/init.c
963
end_va = kernel_map.virt_addr + kernel_map.size;
arch/riscv/mm/init.c
964
for (va = kernel_map.virt_addr; va < end_va; va += PMD_SIZE)
arch/riscv/mm/init.c
966
kernel_map.phys_addr + (va - kernel_map.virt_addr),
arch/riscv/mm/physaddr.c
24
unsigned long kernel_start = kernel_map.virt_addr;
arch/riscv/mm/physaddr.c
25
unsigned long kernel_end = kernel_start + kernel_map.size;
arch/riscv/mm/physaddr.c
39
BUG_ON(!kernel_map.va_pa_offset);
arch/riscv/mm/physaddr.c
41
return ((unsigned long)(x) - kernel_map.va_pa_offset);
arch/riscv/mm/physaddr.c
47
BUG_ON(!kernel_map.va_pa_offset);
arch/riscv/mm/physaddr.c
49
return ((void *)((unsigned long)(x) + kernel_map.va_pa_offset));
arch/riscv/mm/ptdump.c
443
address_markers[KERNEL_MAPPING_NR].start_address = kernel_map.virt_addr;
tools/perf/builtin-kmem.c
346
struct map *kernel_map;
tools/perf/builtin-kmem.c
363
kernel_map = machine__kernel_map(machine);
tools/perf/builtin-kmem.c
364
if (map__load(kernel_map) < 0) {
tools/perf/builtin-kmem.c
369
map__for_each_symbol(kernel_map, sym, node) {
tools/perf/builtin-report.c
622
struct map *kernel_map = machine__kernel_map(&rep->session->machines.host);
tools/perf/builtin-report.c
623
struct kmap *kernel_kmap = kernel_map ? map__kmap(kernel_map) : NULL;
tools/perf/builtin-report.c
628
if (kernel_map == NULL ||
tools/perf/builtin-report.c
629
(dso__hit(map__dso(kernel_map)) &&
tools/perf/builtin-report.c
636
if (kernel_map && map__has_symbols(kernel_map)) {
tools/perf/util/map.c
543
struct map *kernel_map = machine__kernel_map(machine);
tools/perf/util/map.c
545
if (kernel_map)
tools/perf/util/map.c
546
map = kernel_map;