VM_MIN_KERNEL_ADDRESS
return (((addr - VM_MIN_KERNEL_ADDRESS) >> KASAN_SHADOW_SCALE_SHIFT) +
kernmin = vm_page_array == NULL ? VM_MIN_KERNEL_ADDRESS :
return (addr - VM_MIN_KERNEL_ADDRESS + KMSAN_SHAD_MIN_ADDRESS);
return (addr - VM_MIN_KERNEL_ADDRESS + KMSAN_ORIG_MIN_ADDRESS);
return (addr < VM_MIN_KERNEL_ADDRESS || addr >= KERNBASE);
addr >= VM_MIN_KERNEL_ADDRESS
if (state->registers[PC] < VM_MIN_KERNEL_ADDRESS) {
if (va >= VM_MIN_KERNEL_ADDRESS) {
if (va >= VM_MIN_KERNEL_ADDRESS) {
#define INKERNEL(va) (((vm_offset_t)(va)) >= VM_MIN_KERNEL_ADDRESS)
#ifndef VM_MIN_KERNEL_ADDRESS
VM_MIN_KERNEL_ADDRESS + 1) * 2 / 5)
PRELOAD_PUSH_VALUE(uint64_t, VM_MIN_KERNEL_ADDRESS);
(size_t)((vm_offset_t)&end - VM_MIN_KERNEL_ADDRESS));
for (va = VM_MIN_KERNEL_ADDRESS; va < kva_end; va += L2_SIZE) {
mdhdr.kernbase = VM_MIN_KERNEL_ADDRESS;
for (va = VM_MIN_KERNEL_ADDRESS; va < kva_end; va += L2_SIZE) {
else if (i == pmap_l0_index(VM_MIN_KERNEL_ADDRESS))
eva = va + (virtual_avail - VM_MIN_KERNEL_ADDRESS) / scale;
if (virtual_avail - VM_MIN_KERNEL_ADDRESS > L1_SIZE)
KASSERT(sva >= VM_MIN_KERNEL_ADDRESS,
unsigned long ksize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
if (va < VM_MIN_KERNEL_ADDRESS)
!(base >= VM_MIN_KERNEL_ADDRESS && base < VM_MAX_KERNEL_ADDRESS))
(va >= VM_MIN_KERNEL_ADDRESS && va < kernel_vm_end),
if (value >= VM_MIN_KERNEL_ADDRESS) {
KASSERT((uintptr_t)get_pcpu() >= VM_MIN_KERNEL_ADDRESS,
return (((addr - VM_MIN_KERNEL_ADDRESS) >> KASAN_SHADOW_SCALE_SHIFT) +
return (addr < VM_MIN_KERNEL_ADDRESS || addr >= virtual_end);
return (addr - VM_MIN_KERNEL_ADDRESS + KMSAN_SHAD_MIN_ADDRESS);
return (addr - VM_MIN_KERNEL_ADDRESS + KMSAN_ORIG_MIN_ADDRESS);
return (addr < VM_MIN_KERNEL_ADDRESS || addr >= kernel_vm_end);
((va) >= VM_MIN_KERNEL_ADDRESS && (va) <= VM_MAX_KERNEL_ADDRESS)
#define KERNBASE (VM_MIN_KERNEL_ADDRESS)
VM_MIN_KERNEL_ADDRESS + 1) * 3 / 5)
(*func)(0, (uintptr_t)VM_MIN_KERNEL_ADDRESS);
(x) >= VM_MIN_KERNEL_ADDRESS) || \
(*func)(0, (uintptr_t)VM_MIN_KERNEL_ADDRESS);
trampaddr = VM_MIN_KERNEL_ADDRESS;
if (addr >= VM_MIN_KERNEL_ADDRESS && addr < VM_MAX_KERNEL_ADDRESS) {
return ((void *)(uintptr_t)(VM_MIN_KERNEL_ADDRESS +
if (base < VM_MIN_KERNEL_ADDRESS)
VM_MIN_KERNEL_ADDRESS) >> (PDRSHIFT - 2)) + 5) / 10) << PDRSHIFT)
mapbase = VM_MIN_KERNEL_ADDRESS;
KASSERT((vm_offset_t)addr >= VM_MIN_KERNEL_ADDRESS &&
if (maxpipekva > (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / 64)
maxpipekva = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) /
virtual_avail = VM_MIN_KERNEL_ADDRESS;
if (va < VM_MIN_KERNEL_ADDRESS)
if (va < VM_MIN_KERNEL_ADDRESS) {
if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= virtual_end)) {
virtual_avail = VM_MIN_KERNEL_ADDRESS;
virtual_avail = VM_MIN_KERNEL_ADDRESS;
if (va < VM_MIN_KERNEL_ADDRESS)
pte = pmap_pml1e(kernel_pmap, VM_MIN_KERNEL_ADDRESS);
pte = pmap_l1e_to_l2e(pte, VM_MIN_KERNEL_ADDRESS);
pte = pmap_l2e_to_l3e(pte, VM_MIN_KERNEL_ADDRESS);
kernel_vm_end = VM_MIN_KERNEL_ADDRESS + nkpt * L3_PAGE_SIZE;
if (VM_MIN_KERNEL_ADDRESS < addr &&
addr < (VM_MIN_KERNEL_ADDRESS + nkpt * L3_PAGE_SIZE))
mpte->pindex = pmap_l3e_pindex(VM_MIN_KERNEL_ADDRESS) + i;
KASSERT(sva >= VM_MIN_KERNEL_ADDRESS, ("usermode or dmap va %lx", sva));
if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
start = VM_MIN_KERNEL_ADDRESS;
if (va >= VM_MIN_KERNEL_ADDRESS && va <= VM_MAX_KERNEL_ADDRESS)
KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) &&
KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) &&
if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) {
(addr > DMAP_MAX_ADDRESS && addr < VM_MIN_KERNEL_ADDRESS) ||
(addr >= tlb1_map_base && addr < VM_MIN_KERNEL_ADDRESS) ||
KASSERT(((vm_offset_t)ptbl >= VM_MIN_KERNEL_ADDRESS),
kptbl_min = VM_MIN_KERNEL_ADDRESS / PDIR_SIZE;
kernel_ptbls = howmany(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS,
kva_size = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
va = VM_MIN_KERNEL_ADDRESS;
vm_size_t kva_size = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
#define VM_MAX_SAFE_KERNEL_ADDRESS (VM_MIN_KERNEL_ADDRESS + 2*SEGMENT_LENGTH -1)
#define VM_MAX_KERNEL_ADDRESS (VM_MIN_KERNEL_ADDRESS + 3*SEGMENT_LENGTH - 1)
VM_MIN_KERNEL_ADDRESS + 1) * 2 / 5)
#define PA_MIN_ADDRESS VM_MIN_KERNEL_ADDRESS
md_offset = VM_MIN_KERNEL_ADDRESS - kernload;
((va < VM_MIN_KERNEL_ADDRESS) || (va > virtual_end))) {
if (((va >= VM_MIN_KERNEL_ADDRESS) && (va <= virtual_end)) &&
mdhdr.kernbase = VM_MIN_KERNEL_ADDRESS;
VM_MAX_USER_ADDRESS_SV48) || (va) >= VM_MIN_KERNEL_ADDRESS)
#define INKERNEL(va) ((va) >= VM_MIN_KERNEL_ADDRESS && \
#define KERNBASE (VM_MIN_KERNEL_ADDRESS)
VM_MIN_KERNEL_ADDRESS + 1) * 3 / 5)
for (va = VM_MIN_KERNEL_ADDRESS; va < kva_max; va += L2_SIZE) {
for (va = VM_MIN_KERNEL_ADDRESS; va < kva_max; va += L2_SIZE) {
KASSERT(sva >= VM_MIN_KERNEL_ADDRESS, ("usermode va %lx", sva));
for (i = pmap_l1_index(VM_MIN_KERNEL_ADDRESS);
i = pmap_l0_index(VM_MIN_KERNEL_ADDRESS);
unsigned long ksize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
!(base >= VM_MIN_KERNEL_ADDRESS && base < VM_MAX_KERNEL_ADDRESS))
sva = VM_MIN_KERNEL_ADDRESS;
else if (i == pmap_l1_index(VM_MIN_KERNEL_ADDRESS))
if (value >= VM_MIN_KERNEL_ADDRESS) {
if (stval >= VM_MIN_KERNEL_ADDRESS) {
KASSERT((addr - VM_MIN_KERNEL_ADDRESS) % size == 0,
rem = atop(*addrp - VM_MIN_KERNEL_ADDRESS) % kpages;
KASSERT((addr - VM_MIN_KERNEL_ADDRESS) % kpages == 0,
(void *)addr, (void *)VM_MIN_KERNEL_ADDRESS));
vm_pindex_t pindex = atop(ks - VM_MIN_KERNEL_ADDRESS);
atop(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS));
atop(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS));
SYSCTL_NULL_ULONG_PTR, VM_MIN_KERNEL_ADDRESS,
offset = addr - VM_MIN_KERNEL_ADDRESS;
offset = addr - VM_MIN_KERNEL_ADDRESS;
offset = addr - VM_MIN_KERNEL_ADDRESS;
offset = addr - VM_MIN_KERNEL_ADDRESS;
vm_map_init_system(kernel_map, kernel_pmap, VM_MIN_KERNEL_ADDRESS, end);
VM_MIN_KERNEL_ADDRESS,
VM_MIN_KERNEL_ADDRESS), OBJ_UNMANAGED, kernel_object, NULL);
kernel_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS);