VM_MAX_KERNEL_ADDRESS
return (addr < kernmin || addr >= VM_MAX_KERNEL_ADDRESS);
#define VM_KMEM_SIZE_MAX ((VM_MAX_KERNEL_ADDRESS - \
KASSERT(va >= VM_MAX_KERNEL_ADDRESS,
for (; bs_state.va < VM_MAX_KERNEL_ADDRESS; bs_state.va += L1_SIZE)
for (; bs_state.va < VM_MAX_KERNEL_ADDRESS; bs_state.va += L2_SIZE)
virtual_end = VM_MAX_KERNEL_ADDRESS - PMAP_MAPDEV_EARLY_SIZE;
pmap_bootstrap_l3(VM_MAX_KERNEL_ADDRESS - (PMAP_MAPDEV_EARLY_SIZE));
unsigned long ksize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
!(base >= VM_MIN_KERNEL_ADDRESS && base < VM_MAX_KERNEL_ADDRESS))
((va) >= VM_MIN_KERNEL_ADDRESS && (va) <= VM_MAX_KERNEL_ADDRESS)
#define VM_KMEM_SIZE_MAX ((VM_MAX_KERNEL_ADDRESS - \
#define DEVMAP_MAX_VADDR VM_MAX_KERNEL_ADDRESS
#define INKERNEL(x) (((x) <= VM_MAX_KERNEL_ADDRESS && \
if (addr >= VM_MIN_KERNEL_ADDRESS && addr < VM_MAX_KERNEL_ADDRESS) {
_Static_assert(VM_MAX_KERNEL_ADDRESS <= VADDR(PTDPTDI, 0),
KASSERT((pmap == kernel_pmap && va < VM_MAX_KERNEL_ADDRESS) ||
virtual_end = VM_MAX_KERNEL_ADDRESS;
ksize = VM_MAX_KERNEL_ADDRESS - KERNBASE;
kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
#define VM_KMEM_SIZE_MAX (((((VM_MAX_KERNEL_ADDRESS - \
(vm_offset_t)addr < VM_MAX_KERNEL_ADDRESS,
KASSERT(va >= (VM_MAX_KERNEL_ADDRESS - PMAP_MAPDEV_EARLY_SIZE),
if (maxpipekva > (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / 64)
maxpipekva = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) /
while (virtual_end < VM_MAX_KERNEL_ADDRESS &&
KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
if (va >= VM_MIN_KERNEL_ADDRESS && va <= VM_MAX_KERNEL_ADDRESS)
(va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_kenter: invalid va"));
(va <= VM_MAX_KERNEL_ADDRESS)),
(va <= VM_MAX_KERNEL_ADDRESS)),
(va <= VM_MAX_KERNEL_ADDRESS)),
if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) {
(addr > VM_MAX_KERNEL_ADDRESS))
virtual_end = VM_MAX_KERNEL_ADDRESS;
kernel_ptbls = howmany(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS,
kva_size = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
j < PDIR_L1_NENTRIES && va < VM_MAX_KERNEL_ADDRESS;
k < PDIR_NENTRIES && va < VM_MAX_KERNEL_ADDRESS;
vm_size_t kva_size = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
#define VM_MAX_SAFE_KERNEL_ADDRESS VM_MAX_KERNEL_ADDRESS
#define VM_MAX_SAFE_KERNEL_ADDRESS VM_MAX_KERNEL_ADDRESS
(va) <= VM_MAX_KERNEL_ADDRESS)
#define VM_KMEM_SIZE_MAX ((VM_MAX_KERNEL_ADDRESS - \
#define DEVMAP_MAX_VADDR VM_MAX_KERNEL_ADDRESS
ASSYM(VM_MAX_KERNEL_ADDRESS, VM_MAX_KERNEL_ADDRESS);
i < pmap_l1_index(VM_MAX_KERNEL_ADDRESS); i++)
unsigned long ksize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
!(base >= VM_MIN_KERNEL_ADDRESS && base < VM_MAX_KERNEL_ADDRESS))
atop(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS));
atop(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS));
SYSCTL_NULL_ULONG_PTR, VM_MAX_KERNEL_ADDRESS,
_vm_object_allocate(OBJT_PHYS, atop(VM_MAX_KERNEL_ADDRESS -