NBPDP
_Static_assert(NBPDP == AMD64_NBPDP, "NBPDP mismatch");
pt2_1[i] = (pd_entry_t)NBPDP + i * NBPDR;
pt2_2[i] = (pd_entry_t)2 * NBPDP + i * NBPDR;
pt2_3[i] = (pd_entry_t)3 * NBPDP + i * NBPDR;
pages += howmany(end - (start & ~PDPMASK), NBPDP);
pages += howmany(end - (start & ~PDPMASK), NBPDP) - 1;
pages += howmany(source_total, NBPDP);
pages += howmany(pages * PAGE_SIZE, NBPDP);
va += NBPDP;
va += NBPDP;
for (va = kva_layout.km_low; va < kva_end; va += NBPDP) {
v_pd[i] = (NBPDP + (i << PDRSHIFT)) | X86_PG_V | X86_PG_RW |
v_pd[i] = (2UL * NBPDP + (i << PDRSHIFT)) | X86_PG_V |
v_pd[i] = (3UL * NBPDP + (i << PDRSHIFT)) | X86_PG_V |
NBPDP) >= roundup2(spa, NBPDP) + NBPDP)
error = pmap_large_map_getva(len, NBPDP, spa & PDPMASK,
if ((amd_feature & AMDID_PAGE1GB) != 0 && len >= NBPDP &&
inc = NBPDP;
KASSERT(va + NBPDP <= sva + len,
inc = NBPDP;
inc = NBPDP;
va_next = (va + NBPDP) & ~PDPMASK;
sva = rounddown2(sva, NBPDP);
sva += NBPDP;
sva = rounddown2(sva, NBPDP);
sva += NBPDP;
sva += NBPDP;
ndmpdp = howmany(ptoa(Maxmem), NBPDP);
kernphys - rounddown2(kernphys, NBPDP), NBPDP);
for (i = 0, pax = rounddown2(kernphys, NBPDP);
j = rounddown2(kernphys, NBPDP) >> PDPSHIFT;
pagesizes[2] = NBPDP;
npdpg = howmany(size, NBPDP);
KASSERT(start % NBPDP == 0, ("unaligned page array start address"));
va_next = (sva + NBPDP) & ~PDPMASK;
pmap_resident_count_adj(pmap, -NBPDP / PAGE_SIZE);
va_next = (sva + NBPDP) & ~PDPMASK;
va_next = (sva + NBPDP) & ~PDPMASK;
pmap->pm_stats.wired_count -= NBPDP / PAGE_SIZE;
va_next = (addr + NBPDP) & ~PDPMASK;
pmap_resident_count_adj(dst_pmap, NBPDP / PAGE_SIZE);
va_next = (sva + NBPDP) & ~PDPMASK;
tmpva = trunc_1gpage(tmpva) + NBPDP;
tmpva += NBPDP;
pa_end = pa_start + NBPDP;
pa_end += NBPDP;
pa_end = pa_start + NBPDP;
tmpva = trunc_1gpage(tmpva) + NBPDP;
if (len < NBPDP && base < dmaplimit) {
#define PDPMASK (NBPDP-1)