NPML4EPG
_Static_assert(NPML4EPG == AMD64_NPML4EPG, "NPML4EPG mismatch");
PT3_l = &PT4[NPML4EPG * 1];
PT3_u = &PT4[NPML4EPG * 2];
PT2_l0 = &PT4[NPML4EPG * 3];
PT2_l1 = &PT4[NPML4EPG * 4];
PT2_l2 = &PT4[NPML4EPG * 5];
PT2_l3 = &PT4[NPML4EPG * 6];
PT2_u0 = &PT4[NPML4EPG * 7];
PT2_u1 = &PT4[NPML4EPG * 8];
PT4[NPML4EPG - 1] = (pml4_entry_t)PT3_u | PG_V | PG_RW;
PT3_l = &PT4[NPML4EPG * 1];
PT3_u = &PT4[NPML4EPG * 2];
PT2_l0 = &PT4[NPML4EPG * 3];
PT2_l1 = &PT4[NPML4EPG * 4];
PT2_l2 = &PT4[NPML4EPG * 5];
PT2_l3 = &PT4[NPML4EPG * 6];
PT2_u0 = &PT4[NPML4EPG * 7];
PT2_u1 = &PT4[NPML4EPG * 8];
PT4[NPML4EPG - 1] = (pml4_entry_t)PT3_u | PG_V | PG_RW;
PT3_l = &PT4[NPML4EPG * 1];
PT3_u = &PT4[NPML4EPG * 2];
PT2_l0 = &PT4[NPML4EPG * 3];
PT2_l1 = &PT4[NPML4EPG * 4];
PT2_l2 = &PT4[NPML4EPG * 5];
PT2_l3 = &PT4[NPML4EPG * 6];
PT2_u0 = &PT4[NPML4EPG * 7];
PT2_u1 = &PT4[NPML4EPG * 8];
PT4[NPML4EPG - 1] = (pml4_entry_t)pa_pt3_u | PG_V | PG_RW;
NPML4EPG + NPML4EPG * NPDPEPG + NPML4EPG * NPDPEPG * NPDEPG),
ptpages_show_pml4(pg, NPML4EPG, PG_V);
pt[NPML4EPG - 1] = KPML4phys | X86_PG_V | X86_PG_RW | X86_PG_A |
ndmpml4phys = howmany(ndmpdpphys, NPML4EPG);
ndmpdpphys = ndmpml4phys * NPML4EPG;
for (i = 0; i < NPML4EPG; i++)
.kva_max = KV4ADDR(NPML4EPG - 1, NPDPEPG - 1,
.kva_max = KV5ADDR(NPML5EPG - 1, NPML4EPG - 1, NPDPEPG - 1,
#define NUPML4E (NUPML5E * NPML4EPG) /* number of userland PML4
#define NUP4ML4E (NPML4EPG / 2)
#define PML4PML4I (NPML4EPG / 2) /* Index of recursive pml4 mapping */
#define KPML4BASE (NPML4EPG-NKPML4E) /* KVM at highest addresses */
#define KPML4I (NPML4EPG-1)