L1_SIZE
va += L1_SIZE - L2_SIZE;
va += L1_SIZE - L2_SIZE;
sva += L1_SIZE;
sva += L1_SIZE;
(physmap[i + 1] - bs_state.pa) >= L1_SIZE;
bs_state.va += L1_SIZE, bs_state.pa += L1_SIZE) {
for (; bs_state.va < VM_MAX_KERNEL_ADDRESS; bs_state.va += L1_SIZE)
virtual_avail = roundup2(virtual_avail, L1_SIZE);
pmap_bootstrap_l2(KERNBASE + L1_SIZE);
if (virtual_avail - VM_MIN_KERNEL_ADDRESS > L1_SIZE)
pagesizes[3] = L1_SIZE;
sva = (sva & ~L1_OFFSET) + L1_SIZE;
va_next = (sva + L1_SIZE) & ~L1_OFFSET;
pmap_resident_count_dec(pmap, L1_SIZE / PAGE_SIZE);
va_next = (sva + L1_SIZE) & ~L1_OFFSET;
KASSERT(pagesizes[psind] == L1_SIZE,
va_next = (sva + L1_SIZE) & ~L1_OFFSET;
pmap->pm_stats.wired_count -= L1_SIZE / PAGE_SIZE;
va_next = (addr + L1_SIZE) & ~L1_OFFSET;
pmap_resident_count_inc(dst_pmap, L1_SIZE / PAGE_SIZE);
va_next = (sva + L1_SIZE) & ~L1_OFFSET;
tmpva = (tmpva & ~L1_OFFSET) + L1_SIZE;
(base + size - tmpva) >= L1_SIZE) {
pte_size = L1_SIZE;
if (va <= (vm_offset_t)l1 && va + L1_SIZE > (vm_offset_t)l1) {
((((va) | (pa)) & L1_OFFSET) == 0 && (size) >= L1_SIZE)
#define L1_OFFSET (L1_SIZE - 1)
((((va) | (pa)) & L1_OFFSET) == 0 && (size) >= L1_SIZE)
#define L1_OFFSET (L1_SIZE - 1)
va_next = (sva + L1_SIZE) & ~L1_OFFSET;
va_next = (sva + L1_SIZE) & ~L1_OFFSET;
va_next = (sva + L1_SIZE) & ~L1_OFFSET;
tmpva = (tmpva & ~L1_OFFSET) + L1_SIZE;
tmpva + L1_SIZE <= base + size) {
tmpva += L1_SIZE;
tmpva += L1_SIZE;
PHYS_TO_DMAP(phys), L1_SIZE, mode);
tmpva += L1_SIZE;
sva += L1_SIZE;
sva += L1_SIZE;
dmap_phys_base = rounddown(min_pa, L1_SIZE);
if (roundup(pa, L1_SIZE) + L1_SIZE > endpa)
while (pa + L1_SIZE - 1 < endpa) {
pa += L1_SIZE;
va += L1_SIZE;