#include <sys/param.h>
#include <sys/systm.h>
#include <sys/atomic.h>
#include <sys/pool.h>
#include <sys/user.h>
#include <sys/mutex.h>
#include <uvm/uvm.h>
#include <machine/specialreg.h>
#include <dev/isa/isareg.h>
#include <i386/isa/isa_machdep.h>
#include "ksyms.h"
#ifdef PMAPAE_DEBUG
#define DPRINTF(x...) do { printf(x); } while(0)
#else
#define DPRINTF(x...)
#endif
#define PG_FRAME 0xffffff000ULL
#define PG_LGFRAME 0xfffe00000ULL
#undef PDSHIFT
#define PDSHIFT 21
#undef NBPD
#define NBPD (1U << PDSHIFT)
#define PDSHIFT86 22
#undef PDSLOT_PTE
#define PDSLOT_PTE (1660U)
#undef PDSLOT_KERN
#define PDSLOT_KERN (1664U)
#undef PDSLOT_APTE
#define PDSLOT_APTE (2044U)
#define PTE_BASE ((pt_entry_t *) (PDSLOT_PTE * NBPD))
#define APTE_BASE ((pt_entry_t *) (PDSLOT_APTE * NBPD))
#define PDP_BASE ((pd_entry_t *)(((char *)PTE_BASE) + (PDSLOT_PTE * NBPG)))
#define APDP_BASE ((pd_entry_t *)(((char *)APTE_BASE) + (PDSLOT_APTE * NBPG)))
#define PDP_PDE (PDP_BASE + PDSLOT_PTE)
#define APDP_PDE (PDP_BASE + PDSLOT_APTE)
#define PD_MASK 0xffe00000
#define PT_MASK 0x001ff000
#define pdei(VA) (((VA) & PD_MASK) >> PDSHIFT)
#define ptei(VA) (((VA) & PT_MASK) >> PGSHIFT)
#define PD_MASK86 0xffc00000
#define PT_MASK86 0x003ff000
#define i386_round_pdr(x) ((((unsigned)(x)) + ~PD_MASK) & PD_MASK)
#define vtopte(VA) (PTE_BASE + atop((vaddr_t)VA))
#define ptp_i2o(I) ((I) * NBPG)
#define ptp_o2i(O) ((O) / NBPG)
#define ptp_i2v(I) ((I) * NBPD)
#define ptp_v2i(V) ((V) / NBPD)
#define PDE(pm,i) (((pd_entry_t *)(pm)->pm_pdir)[(i)])
typedef u_int64_t pd_entry_t;
typedef u_int64_t pt_entry_t;
#define PG_NX 0x8000000000000000ULL
#define NPTECL 8
extern u_int32_t protection_codes[];
extern int pmap_initialized;
extern vaddr_t kernel_text, etext, __rodata_start, erodata, __data_start;
extern vaddr_t edata, __bss_start, end, ssym, esym, PTmap;
#ifdef MULTIPROCESSOR
#define PTESLEW(pte, id) ((pte)+(id)*NPTECL)
#define VASLEW(va,id) ((va)+(id)*NPTECL*NBPG)
#else
#define PTESLEW(pte, id) (pte)
#define VASLEW(va,id) (va)
#endif
static pt_entry_t *csrc_pte, *cdst_pte, *zero_pte, *ptp_pte, *flsh_pte;
extern caddr_t pmap_csrcp, pmap_cdstp, pmap_zerop, pmap_ptpp, pmap_flshp;
extern int pmap_pg_g;
extern int pmap_pg_wc;
extern struct pmap_head pmaps;
extern struct mutex pmaps_lock;
extern uint32_t cpu_meltdown;
struct vm_page *pmap_alloc_ptp_pae(struct pmap *, int, pt_entry_t);
struct vm_page *pmap_get_ptp_pae(struct pmap *, int);
void pmap_drop_ptp_pae(struct pmap *, vaddr_t, struct vm_page *,
pt_entry_t *);
pt_entry_t *pmap_map_ptes_pae(struct pmap *);
void pmap_unmap_ptes_pae(struct pmap *);
void pmap_do_remove_pae(struct pmap *, vaddr_t, vaddr_t, int);
void pmap_remove_ptes_pae(struct pmap *, struct vm_page *,
vaddr_t, vaddr_t, vaddr_t, int, struct pv_entry **);
void pmap_sync_flags_pte_pae(struct vm_page *, pt_entry_t);
static __inline u_int
pmap_pte2flags(pt_entry_t pte)
{
return (((pte & PG_U) ? PG_PMAP_REF : 0) |
((pte & PG_M) ? PG_PMAP_MOD : 0));
}
void
pmap_sync_flags_pte_pae(struct vm_page *pg, pt_entry_t pte)
{
if (pte & (PG_U|PG_M)) {
atomic_setbits_int(&pg->pg_flags, pmap_pte2flags(pte));
}
}
pt_entry_t *
pmap_map_ptes_pae(struct pmap *pmap)
{
pd_entry_t opde;
if (pmap == pmap_kernel()) {
return(PTE_BASE);
}
mtx_enter(&pmap->pm_mtx);
if (pmap_is_curpmap(pmap)) {
return(PTE_BASE);
}
mtx_enter(&curcpu()->ci_curpmap->pm_apte_mtx);
opde = *APDP_PDE;
#if defined(MULTIPROCESSOR) && defined(DIAGNOSTIC)
if (pmap_valid_entry(opde))
panic("pmap_map_ptes_pae: APTE valid");
#endif
if (!pmap_valid_entry(opde) || (opde & PG_FRAME) != pmap->pm_pdidx[0]) {
APDP_PDE[0] = pmap->pm_pdidx[0] | PG_RW | PG_V | PG_U | PG_M;
APDP_PDE[1] = pmap->pm_pdidx[1] | PG_RW | PG_V | PG_U | PG_M;
APDP_PDE[2] = pmap->pm_pdidx[2] | PG_RW | PG_V | PG_U | PG_M;
APDP_PDE[3] = pmap->pm_pdidx[3] | PG_RW | PG_V | PG_U | PG_M;
if (pmap_valid_entry(opde))
pmap_apte_flush();
}
return(APTE_BASE);
}
void
pmap_unmap_ptes_pae(struct pmap *pmap)
{
if (pmap == pmap_kernel())
return;
if (!pmap_is_curpmap(pmap)) {
#if defined(MULTIPROCESSOR)
APDP_PDE[0] = 0;
APDP_PDE[1] = 0;
APDP_PDE[2] = 0;
APDP_PDE[3] = 0;
pmap_apte_flush();
#endif
mtx_leave(&curcpu()->ci_curpmap->pm_apte_mtx);
}
mtx_leave(&pmap->pm_mtx);
}
u_int32_t
pmap_pte_set_pae(vaddr_t va, paddr_t pa, u_int32_t bits)
{
pt_entry_t pte, *ptep = vtopte(va);
uint64_t nx;
pa &= PMAP_PA_MASK;
if (bits & PG_X)
nx = 0;
else
nx = PG_NX;
pte = i386_atomic_testset_uq(ptep, pa | bits | nx);
return (pte & ~PG_FRAME);
}
u_int32_t
pmap_pte_setbits_pae(vaddr_t va, u_int32_t set, u_int32_t clr)
{
pt_entry_t *ptep = vtopte(va);
pt_entry_t pte = *ptep;
i386_atomic_testset_uq(ptep, (pte | set) & ~(pt_entry_t)clr);
return (pte & ~PG_FRAME);
}
u_int32_t
pmap_pte_bits_pae(vaddr_t va)
{
pt_entry_t *ptep = vtopte(va);
return (*ptep & ~PG_FRAME);
}
paddr_t
pmap_pte_paddr_pae(vaddr_t va)
{
pt_entry_t *ptep = vtopte(va);
return (*ptep & PG_FRAME);
}
void
pmap_alloc_pdir_intel_pae(struct pmap *pmap)
{
vaddr_t va;
int i;
KASSERT(pmap->pm_pdir_intel == 0);
va = (vaddr_t)km_alloc(4 * NBPG, &kv_any, &kp_zero, &kd_waitok);
if (va == 0)
panic("kernel_map out of virtual space");
pmap->pm_pdir_intel = va;
if (!pmap_extract(pmap_kernel(), (vaddr_t)&pmap->pm_pdidx_intel,
&pmap->pm_pdirpa_intel))
panic("can't locate PDPT");
for (i = 0; i < 4; i++) {
pmap->pm_pdidx_intel[i] = 0;
if (!pmap_extract(pmap, va + i * NBPG,
(paddr_t *)&pmap->pm_pdidx_intel[i]))
panic("can't locate PD page");
pmap->pm_pdidx_intel[i] |= PG_V;
DPRINTF("%s: pm_pdidx_intel[%d] = 0x%llx\n", __func__,
i, pmap->pm_pdidx_intel[i]);
}
}
void
pmap_bootstrap_pae(void)
{
extern int nkpde;
struct pmap *kpm = pmap_kernel();
struct vm_page *ptp;
paddr_t ptaddr;
u_int32_t bits, *pd = NULL;
vaddr_t va, eva;
pt_entry_t pte;
if ((cpu_feature & CPUID_PAE) == 0 ||
(ecpu_feature & CPUID_NXE) == 0)
return;
cpu_pae = 1;
DPRINTF("%s: pm_pdir 0x%x pm_pdirpa 0x%x pm_pdirsize %d\n", __func__,
(uint32_t)kpm->pm_pdir, (uint32_t)kpm->pm_pdirpa,
kpm->pm_pdirsize);
va = (vaddr_t)kpm->pm_pdir;
kpm->pm_pdidx[0] = (va + 0*NBPG - KERNBASE) | PG_V;
kpm->pm_pdidx[1] = (va + 1*NBPG - KERNBASE) | PG_V;
kpm->pm_pdidx[2] = (va + 2*NBPG - KERNBASE) | PG_V;
kpm->pm_pdidx[3] = (va + 3*NBPG - KERNBASE) | PG_V;
PDE(kpm, PDSLOT_PTE+0) = kpm->pm_pdidx[0] | PG_KW | PG_M | PG_U;
PDE(kpm, PDSLOT_PTE+1) = kpm->pm_pdidx[1] | PG_KW | PG_M | PG_U;
PDE(kpm, PDSLOT_PTE+2) = kpm->pm_pdidx[2] | PG_KW | PG_M | PG_U;
PDE(kpm, PDSLOT_PTE+3) = kpm->pm_pdidx[3] | PG_KW | PG_M | PG_U;
if (kpm->pm_pdir_intel) {
pd = (uint32_t *)kpm->pm_pdir_intel;
kpm->pm_pdir_intel = kpm->pm_pdirpa_intel = 0;
pmap_alloc_pdir_intel_pae(kpm);
}
for (va = KERNBASE, eva = va + (nkpde << PDSHIFT86);
va < eva; va += PAGE_SIZE) {
if (!pmap_valid_entry(PDE(kpm, pdei(va)))) {
ptp = uvm_pagealloc(&kpm->pm_obj, va, NULL,
UVM_PGA_ZERO);
if (ptp == NULL)
panic("%s: uvm_pagealloc() failed", __func__);
ptaddr = VM_PAGE_TO_PHYS(ptp);
PDE(kpm, pdei(va)) = ptaddr | PG_KW | PG_V |
PG_U | PG_M;
pmap_pte_set_86((vaddr_t)vtopte(va),
ptaddr, PG_KW | PG_V | PG_U | PG_M);
kpm->pm_stats.resident_count++;
}
bits = pmap_pte_bits_86(va) | pmap_pg_g;
if ((va >= (vaddr_t)&kernel_text && va <= (vaddr_t)&etext) ||
(va >= (vaddr_t)atdevbase && va <=
(vaddr_t)(atdevbase + IOM_SIZE)))
bits |= PG_X;
else
bits &= ~PG_X;
if (pmap_valid_entry(bits))
pmap_pte_set_pae(va, pmap_pte_paddr_86(va), bits);
}
if (pd) {
uint32_t *ptp;
uint32_t l1idx, l2idx;
paddr_t npa;
struct vm_page *ptppg;
for (va = KERNBASE, eva = va + (nkpde << PDSHIFT86); va < eva;
va += PAGE_SIZE) {
l1idx = ((va & PT_MASK86) >> PGSHIFT);
l2idx = ((va & PD_MASK86) >> PDSHIFT86);
if (!pmap_valid_entry(pd[l2idx]))
continue;
npa = pd[l2idx] & PMAP_PA_MASK;
ptppg = PHYS_TO_VM_PAGE(npa);
mtx_enter(&ptppg->mdpage.pv_mtx);
ptp = (uint32_t *)pmap_tmpmap_pa_86(npa);
if (!pmap_valid_entry(ptp[l1idx])) {
mtx_leave(&ptppg->mdpage.pv_mtx);
pmap_tmpunmap_pa_86();
continue;
}
DPRINTF("%s: va 0x%x l2idx %u 0x%x lx1idx %u 0x%x\n",
__func__, (uint32_t)va, l2idx, (uint32_t)pd[l2idx],
l1idx, (uint32_t)ptp[l1idx]);
bits = ptp[l1idx] & (PG_PROT|PG_N|PG_WT);
npa = ptp[l1idx] & PMAP_PA_MASK;
pmap_tmpunmap_pa_86();
mtx_leave(&ptppg->mdpage.pv_mtx);
cpu_pae = 0;
pmap_enter_special_pae(va, npa, 0, bits);
cpu_pae = 1;
if (--ptppg->wire_count == 1) {
ptppg->wire_count = 0;
uvm_pagerealloc(ptppg, NULL, 0);
DPRINTF("%s: freeing PT page 0x%x\n", __func__,
(uint32_t)VM_PAGE_TO_PHYS(ptppg));
}
}
km_free(pd, NBPG, &kv_any, &kp_dirty);
DPRINTF("%s: freeing PDP 0x%x\n", __func__, (uint32_t)pd);
}
if (!cpu_paenable(&kpm->pm_pdidx[0])) {
extern struct user *proc0paddr;
proc0paddr->u_pcb.pcb_cr3 = kpm->pm_pdirpa =
(vaddr_t)kpm - KERNBASE;
kpm->pm_pdirsize = 4 * NBPG;
cpu_update_nmi_cr3(kpm->pm_pdirpa);
DPRINTF("%s: pm_pdir 0x%x pm_pdirpa 0x%x pm_pdirsize %d\n",
__func__, (uint32_t)kpm->pm_pdir, (uint32_t)kpm->pm_pdirpa,
kpm->pm_pdirsize);
csrc_pte = vtopte(pmap_csrcp);
cdst_pte = vtopte(pmap_cdstp);
zero_pte = vtopte(pmap_zerop);
ptp_pte = vtopte(pmap_ptpp);
flsh_pte = vtopte(pmap_flshp);
nkpde *= 2;
nkptp_max = 2048 - PDSLOT_KERN - 4;
pmap_pte_set_p = pmap_pte_set_pae;
pmap_pte_setbits_p = pmap_pte_setbits_pae;
pmap_pte_bits_p = pmap_pte_bits_pae;
pmap_pte_paddr_p = pmap_pte_paddr_pae;
pmap_clear_attrs_p = pmap_clear_attrs_pae;
pmap_enter_p = pmap_enter_pae;
pmap_enter_special_p = pmap_enter_special_pae;
pmap_extract_p = pmap_extract_pae;
pmap_growkernel_p = pmap_growkernel_pae;
pmap_page_remove_p = pmap_page_remove_pae;
pmap_do_remove_p = pmap_do_remove_pae;
pmap_test_attrs_p = pmap_test_attrs_pae;
pmap_unwire_p = pmap_unwire_pae;
pmap_write_protect_p = pmap_write_protect_pae;
pmap_pinit_pd_p = pmap_pinit_pd_pae;
pmap_zero_phys_p = pmap_zero_phys_pae;
pmap_copy_page_p = pmap_copy_page_pae;
bzero((void *)kpm->pm_pdir + 8, (PDSLOT_PTE-1) * 8);
}
for (va = (vaddr_t)&PTmap; va < KERNBASE; va += NBPD) {
pte = PDE(kpm, pdei(va));
PDE(kpm, pdei(va)) = pte | PG_NX;
}
va = (vaddr_t)APTE_BASE;
pte = PDE(kpm, pdei(va));
PDE(kpm, pdei(va)) = pte | PG_NX;
pmap_write_protect(kpm, (vaddr_t)&kernel_text, (vaddr_t)&etext,
PROT_READ | PROT_EXEC);
pmap_write_protect(kpm, (vaddr_t)&__rodata_start,
(vaddr_t)&erodata, PROT_READ);
pmap_write_protect(kpm, (vaddr_t)&__data_start, (vaddr_t)&edata,
PROT_READ | PROT_WRITE);
pmap_write_protect(kpm, (vaddr_t)&__bss_start, (vaddr_t)&end,
PROT_READ | PROT_WRITE);
#if defined(DDB) || NKSYMS > 0
pmap_write_protect(kpm, ssym, esym, PROT_READ);
#endif
}
struct vm_page *
pmap_alloc_ptp_pae(struct pmap *pmap, int pde_index, pt_entry_t pde_flags)
{
struct vm_page *ptp;
pd_entry_t *pva_intel;
ptp = uvm_pagealloc(&pmap->pm_obj, ptp_i2o(pde_index), NULL,
UVM_PGA_USERESERVE|UVM_PGA_ZERO);
if (ptp == NULL)
return (NULL);
atomic_clearbits_int(&ptp->pg_flags, PG_BUSY);
ptp->wire_count = 1;
PDE(pmap, pde_index) = (pd_entry_t)(VM_PAGE_TO_PHYS(ptp) |
PG_RW | PG_V | PG_M | PG_U | pde_flags);
if (pmap->pm_pdir_intel && ptp_i2v(pde_index) < VM_MAXUSER_ADDRESS) {
pva_intel = (pd_entry_t *)pmap->pm_pdir_intel;
pva_intel[pde_index] = PDE(pmap, pde_index);
DPRINTF("%s: copying usermode PDE (content=0x%llx) pde_index "
"%d from 0x%llx -> 0x%llx\n", __func__,
PDE(pmap, pde_index), pde_index,
(uint64_t)&PDE(pmap, pde_index),
(uint64_t)&(pva_intel[pde_index]));
}
pmap->pm_stats.resident_count++;
pmap->pm_ptphint = ptp;
return(ptp);
}
struct vm_page *
pmap_get_ptp_pae(struct pmap *pmap, int pde_index)
{
struct vm_page *ptp;
if (pmap_valid_entry(PDE(pmap, pde_index))) {
if (pmap->pm_ptphint &&
(PDE(pmap, pde_index) & PG_FRAME) ==
VM_PAGE_TO_PHYS(pmap->pm_ptphint))
return(pmap->pm_ptphint);
ptp = uvm_pagelookup(&pmap->pm_obj, ptp_i2o(pde_index));
#ifdef DIAGNOSTIC
if (ptp == NULL)
panic("pmap_get_ptp_pae: unmanaged user PTP");
#endif
pmap->pm_ptphint = ptp;
return(ptp);
}
return (pmap_alloc_ptp_pae(pmap, pde_index, PG_u));
}
void
pmap_drop_ptp_pae(struct pmap *pm, vaddr_t va, struct vm_page *ptp,
pt_entry_t *ptes)
{
pd_entry_t *pva_intel;
i386_atomic_testset_uq(&PDE(pm, pdei(va)), 0);
pmap_tlb_shootpage(curcpu()->ci_curpmap, ((vaddr_t)ptes) + ptp->offset);
#ifdef MULTIPROCESSOR
pmap_tlb_shootpage(pm, ((vaddr_t)PTE_BASE) + ptp->offset);
#endif
pm->pm_stats.resident_count--;
if (pm->pm_ptphint == ptp)
pm->pm_ptphint = RBT_ROOT(uvm_objtree, &pm->pm_obj.memt);
ptp->wire_count = 0;
uvm_pagerealloc(ptp, NULL, 0);
if (pm->pm_pdir_intel) {
KASSERT(va < VM_MAXUSER_ADDRESS);
pva_intel = (pd_entry_t *)pm->pm_pdir_intel;
i386_atomic_testset_uq(&pva_intel[pdei(va)], 0);
DPRINTF("%s: cleared meltdown PDE @ index %lu "
"(va range start 0x%x)\n", __func__, pdei(va),
(uint32_t)va);
}
}
void
pmap_pinit_pd_pae(struct pmap *pmap)
{
extern int nkpde;
vaddr_t va;
paddr_t pdidx[4];
pmap->pm_pdir = (vaddr_t)km_alloc(4 * NBPG, &kv_any, &kp_dirty,
&kd_waitok);
if (pmap->pm_pdir == 0)
panic("kernel_map out of virtual space");
pmap_extract(pmap_kernel(), (vaddr_t)pmap, &pmap->pm_pdirpa);
va = (vaddr_t)pmap->pm_pdir;
pmap_extract(pmap_kernel(), va + 0*NBPG, &pdidx[0]);
pmap_extract(pmap_kernel(), va + 1*NBPG, &pdidx[1]);
pmap_extract(pmap_kernel(), va + 2*NBPG, &pdidx[2]);
pmap_extract(pmap_kernel(), va + 3*NBPG, &pdidx[3]);
pmap->pm_pdidx[0] = (uint64_t)pdidx[0];
pmap->pm_pdidx[1] = (uint64_t)pdidx[1];
pmap->pm_pdidx[2] = (uint64_t)pdidx[2];
pmap->pm_pdidx[3] = (uint64_t)pdidx[3];
pmap->pm_pdidx[0] |= PG_V;
pmap->pm_pdidx[1] |= PG_V;
pmap->pm_pdidx[2] |= PG_V;
pmap->pm_pdidx[3] |= PG_V;
pmap->pm_pdirsize = 4 * NBPG;
bzero((void *)pmap->pm_pdir, PDSLOT_PTE * sizeof(pd_entry_t));
PDE(pmap, PDSLOT_PTE+0) = pmap->pm_pdidx[0] | PG_KW | PG_U |
PG_M | PG_V | PG_NX;
PDE(pmap, PDSLOT_PTE+1) = pmap->pm_pdidx[1] | PG_KW | PG_U |
PG_M | PG_V | PG_NX;
PDE(pmap, PDSLOT_PTE+2) = pmap->pm_pdidx[2] | PG_KW | PG_U |
PG_M | PG_V | PG_NX;
PDE(pmap, PDSLOT_PTE+3) = pmap->pm_pdidx[3] | PG_KW | PG_U |
PG_M | PG_V | PG_NX;
bcopy(&PDP_BASE[PDSLOT_KERN], &PDE(pmap, PDSLOT_KERN),
nkpde * sizeof(pd_entry_t));
bzero(&PDE(pmap, PDSLOT_KERN + nkpde), pmap->pm_pdirsize -
((PDSLOT_KERN + nkpde) * sizeof(pd_entry_t)));
if (cpu_meltdown) {
pmap_alloc_pdir_intel_pae(pmap);
bcopy((void *)pmap_kernel()->pm_pdir_intel,
(void *)pmap->pm_pdir_intel, 4 * NBPG);
DPRINTF("%s: pmap %p pm_pdir 0x%lx pm_pdirpa 0x%lx "
"pdir_intel 0x%lx pdirpa_intel 0x%lx\n",
__func__, pmap, pmap->pm_pdir, pmap->pm_pdirpa,
pmap->pm_pdir_intel, pmap->pm_pdirpa_intel);
}
mtx_enter(&pmaps_lock);
LIST_INSERT_HEAD(&pmaps, pmap, pm_list);
mtx_leave(&pmaps_lock);
}
int
pmap_extract_pae(struct pmap *pmap, vaddr_t va, paddr_t *pap)
{
pt_entry_t *ptes, pte;
ptes = pmap_map_ptes_pae(pmap);
if (pmap_valid_entry(PDE(pmap, pdei(va)))) {
pte = ptes[atop(va)];
pmap_unmap_ptes_pae(pmap);
if (!pmap_valid_entry(pte))
return 0;
if (pap != NULL)
*pap = (pte & PG_FRAME) | (va & ~PG_FRAME);
return 1;
}
pmap_unmap_ptes_pae(pmap);
return 0;
}
extern void (*pagezero)(void *, size_t);
void
pmap_zero_phys_pae(paddr_t pa)
{
#ifdef MULTIPROCESSOR
int id = cpu_number();
#endif
pt_entry_t *zpte = PTESLEW(zero_pte, id);
caddr_t zerova = VASLEW(pmap_zerop, id);
#ifdef DIAGNOSTIC
if (*zpte)
panic("pmap_zero_phys_pae: lock botch");
#endif
*zpte = (pa & PG_FRAME) | PG_V | PG_RW;
pmap_update_pg((vaddr_t)zerova);
pagezero(zerova, PAGE_SIZE);
*zpte = 0;
}
void
pmap_copy_page_pae(struct vm_page *srcpg, struct vm_page *dstpg)
{
paddr_t srcpa = VM_PAGE_TO_PHYS(srcpg);
paddr_t dstpa = VM_PAGE_TO_PHYS(dstpg);
#ifdef MULTIPROCESSOR
int id = cpu_number();
#endif
pt_entry_t *spte = PTESLEW(csrc_pte, id);
pt_entry_t *dpte = PTESLEW(cdst_pte, id);
caddr_t csrcva = VASLEW(pmap_csrcp, id);
caddr_t cdstva = VASLEW(pmap_cdstp, id);
#ifdef DIAGNOSTIC
if (*spte || *dpte)
panic("pmap_copy_page_pae: lock botch");
#endif
*spte = (srcpa & PG_FRAME) | PG_V | PG_RW;
*dpte = (dstpa & PG_FRAME) | PG_V | PG_RW;
pmap_update_2pg((vaddr_t)csrcva, (vaddr_t)cdstva);
bcopy(csrcva, cdstva, PAGE_SIZE);
*spte = *dpte = 0;
pmap_update_2pg((vaddr_t)csrcva, (vaddr_t)cdstva);
}
void
pmap_remove_ptes_pae(struct pmap *pmap, struct vm_page *ptp, vaddr_t ptpva,
vaddr_t startva, vaddr_t endva, int flags, struct pv_entry **free_pvs)
{
struct pv_entry *pve;
pt_entry_t *pte = (pt_entry_t *) ptpva;
struct vm_page *pg;
pt_entry_t opte;
for (; startva < endva && (ptp == NULL || ptp->wire_count > 1)
; pte++, startva += NBPG) {
if (!pmap_valid_entry(*pte))
continue;
if ((flags & PMAP_REMOVE_SKIPWIRED) && (*pte & PG_W))
continue;
opte = i386_atomic_testset_uq(pte, 0);
if (opte & PG_W)
pmap->pm_stats.wired_count--;
pmap->pm_stats.resident_count--;
if (ptp)
ptp->wire_count--;
pg = PHYS_TO_VM_PAGE(opte & PG_FRAME);
if ((opte & PG_PVLIST) == 0) {
#ifdef DIAGNOSTIC
if (pg != NULL)
panic("pmap_remove_ptes_pae: managed page "
"without PG_PVLIST for 0x%lx", startva);
#endif
continue;
}
#ifdef DIAGNOSTIC
if (pg == NULL)
panic("pmap_remove_ptes_pae: unmanaged page marked "
"PG_PVLIST, va = 0x%lx, pa = 0x%lx",
startva, (u_long)(opte & PG_FRAME));
#endif
pmap_sync_flags_pte_pae(pg, opte);
pve = pmap_remove_pv(pg, pmap, startva);
if (pve) {
pve->pv_next = *free_pvs;
*free_pvs = pve;
}
}
}
void
pmap_do_remove_pae(struct pmap *pmap, vaddr_t sva, vaddr_t eva, int flags)
{
pt_entry_t *ptes;
paddr_t ptppa;
vaddr_t blkendva;
struct vm_page *ptp;
struct pv_entry *pve;
struct pv_entry *free_pvs = NULL;
TAILQ_HEAD(, vm_page) empty_ptps;
int shootall;
vaddr_t va;
TAILQ_INIT(&empty_ptps);
ptes = pmap_map_ptes_pae(pmap);
if ((eva - sva > 32 * PAGE_SIZE) && pmap != pmap_kernel())
shootall = 1;
else
shootall = 0;
for (va = sva ; va < eva ; va = blkendva) {
blkendva = i386_round_pdr(va + 1);
if (blkendva > eva)
blkendva = eva;
if (pdei(va) >= PDSLOT_PTE && pdei(va) <= (PDSLOT_PTE + 3))
continue;
if (!pmap_valid_entry(PDE(pmap, pdei(va))))
continue;
ptppa = PDE(pmap, pdei(va)) & PG_FRAME;
if (pmap == pmap_kernel()) {
ptp = NULL;
} else {
if (pmap->pm_ptphint &&
VM_PAGE_TO_PHYS(pmap->pm_ptphint) == ptppa) {
ptp = pmap->pm_ptphint;
} else {
ptp = PHYS_TO_VM_PAGE(ptppa);
#ifdef DIAGNOSTIC
if (ptp == NULL)
panic("pmap_do_remove_pae: unmanaged "
"PTP detected");
#endif
}
}
pmap_remove_ptes_pae(pmap, ptp, (vaddr_t)&ptes[atop(va)],
va, blkendva, flags, &free_pvs);
if (ptp && ptp->wire_count <= 1) {
pmap_drop_ptp_pae(pmap, va, ptp, ptes);
TAILQ_INSERT_TAIL(&empty_ptps, ptp, pageq);
}
if (!shootall)
pmap_tlb_shootrange(pmap, va, blkendva);
}
if (shootall)
pmap_tlb_shoottlb();
pmap_unmap_ptes_pae(pmap);
pmap_tlb_shootwait();
while ((pve = free_pvs) != NULL) {
free_pvs = pve->pv_next;
pool_put(&pmap_pv_pool, pve);
}
while ((ptp = TAILQ_FIRST(&empty_ptps)) != NULL) {
TAILQ_REMOVE(&empty_ptps, ptp, pageq);
uvm_pagefree(ptp);
}
}
void
pmap_page_remove_pae(struct vm_page *pg)
{
struct pv_entry *pve;
struct pmap *pm;
pt_entry_t *ptes, opte;
TAILQ_HEAD(, vm_page) empty_ptps;
struct vm_page *ptp;
if (pg->mdpage.pv_list == NULL)
return;
TAILQ_INIT(&empty_ptps);
mtx_enter(&pg->mdpage.pv_mtx);
while ((pve = pg->mdpage.pv_list) != NULL) {
pmap_reference(pve->pv_pmap);
pm = pve->pv_pmap;
mtx_leave(&pg->mdpage.pv_mtx);
ptes = pmap_map_ptes_pae(pm);
mtx_enter(&pg->mdpage.pv_mtx);
if ((pve = pg->mdpage.pv_list) == NULL ||
pve->pv_pmap != pm) {
mtx_leave(&pg->mdpage.pv_mtx);
pmap_unmap_ptes_pae(pm);
pmap_destroy(pm);
mtx_enter(&pg->mdpage.pv_mtx);
continue;
}
pg->mdpage.pv_list = pve->pv_next;
mtx_leave(&pg->mdpage.pv_mtx);
#ifdef DIAGNOSTIC
if (pve->pv_ptp && (PDE(pve->pv_pmap, pdei(pve->pv_va)) &
PG_FRAME)
!= VM_PAGE_TO_PHYS(pve->pv_ptp)) {
printf("pmap_page_remove_pae: pg=%p: va=%lx, "
"pv_ptp=%p\n",
pg, pve->pv_va, pve->pv_ptp);
printf("pmap_page_remove_pae: PTP's phys addr: "
"actual=%llx, recorded=%lx\n",
(PDE(pve->pv_pmap, pdei(pve->pv_va)) &
PG_FRAME), VM_PAGE_TO_PHYS(pve->pv_ptp));
panic("pmap_page_remove_pae: mapped managed page has "
"invalid pv_ptp field");
}
#endif
opte = i386_atomic_testset_uq(&ptes[atop(pve->pv_va)], 0);
if (opte & PG_W)
pve->pv_pmap->pm_stats.wired_count--;
pve->pv_pmap->pm_stats.resident_count--;
pmap_sync_flags_pte_pae(pg, opte);
if (pve->pv_ptp && --pve->pv_ptp->wire_count <= 1) {
pmap_drop_ptp_pae(pve->pv_pmap, pve->pv_va,
pve->pv_ptp, ptes);
TAILQ_INSERT_TAIL(&empty_ptps, pve->pv_ptp, pageq);
}
pmap_tlb_shootpage(pve->pv_pmap, pve->pv_va);
pmap_unmap_ptes_pae(pve->pv_pmap);
pmap_destroy(pve->pv_pmap);
pool_put(&pmap_pv_pool, pve);
mtx_enter(&pg->mdpage.pv_mtx);
}
mtx_leave(&pg->mdpage.pv_mtx);
pmap_tlb_shootwait();
while ((ptp = TAILQ_FIRST(&empty_ptps)) != NULL) {
TAILQ_REMOVE(&empty_ptps, ptp, pageq);
uvm_pagefree(ptp);
}
}
int
pmap_test_attrs_pae(struct vm_page *pg, int testbits)
{
struct pv_entry *pve;
pt_entry_t *ptes, pte;
u_long mybits, testflags;
paddr_t ptppa;
testflags = pmap_pte2flags(testbits);
if (pg->pg_flags & testflags)
return 1;
mybits = 0;
mtx_enter(&pg->mdpage.pv_mtx);
for (pve = pg->mdpage.pv_list; pve != NULL && mybits == 0;
pve = pve->pv_next) {
ptppa = PDE(pve->pv_pmap, pdei(pve->pv_va)) & PG_FRAME;
ptes = (pt_entry_t *)pmap_tmpmap_pa(ptppa);
pte = ptes[ptei(pve->pv_va)];
pmap_tmpunmap_pa();
mybits |= (pte & testbits);
}
mtx_leave(&pg->mdpage.pv_mtx);
if (mybits == 0)
return 0;
atomic_setbits_int(&pg->pg_flags, pmap_pte2flags(mybits));
return 1;
}
int
pmap_clear_attrs_pae(struct vm_page *pg, int clearbits)
{
struct pv_entry *pve;
pt_entry_t *ptes, npte, opte;
u_long clearflags;
paddr_t ptppa;
int result;
clearflags = pmap_pte2flags(clearbits);
result = pg->pg_flags & clearflags;
if (result)
atomic_clearbits_int(&pg->pg_flags, clearflags);
mtx_enter(&pg->mdpage.pv_mtx);
for (pve = pg->mdpage.pv_list; pve != NULL; pve = pve->pv_next) {
ptppa = PDE(pve->pv_pmap, pdei(pve->pv_va)) & PG_FRAME;
ptes = (pt_entry_t *)pmap_tmpmap_pa(ptppa);
#ifdef DIAGNOSTIC
if (!pmap_valid_entry(PDE(pve->pv_pmap, pdei(pve->pv_va))))
panic("pmap_clear_attrs_pae: mapping without PTP "
"detected");
#endif
opte = ptes[ptei(pve->pv_va)];
if (opte & clearbits) {
result = 1;
npte = opte & ~clearbits;
opte = i386_atomic_testset_uq(
&ptes[ptei(pve->pv_va)], npte);
pmap_tlb_shootpage(pve->pv_pmap, pve->pv_va);
}
pmap_tmpunmap_pa();
}
mtx_leave(&pg->mdpage.pv_mtx);
pmap_tlb_shootwait();
return (result != 0);
}
void
pmap_write_protect_pae(struct pmap *pmap, vaddr_t sva, vaddr_t eva,
vm_prot_t prot)
{
pt_entry_t *ptes, *spte, *epte, npte, opte;
vaddr_t blkendva;
u_int64_t md_prot;
vaddr_t va;
int shootall = 0;
ptes = pmap_map_ptes_pae(pmap);
if ((eva - sva > 32 * PAGE_SIZE) && pmap != pmap_kernel())
shootall = 1;
for (va = sva; va < eva; va = blkendva) {
blkendva = i386_round_pdr(va + 1);
if (blkendva > eva)
blkendva = eva;
if (pdei(va) >= PDSLOT_PTE && pdei(va) <= (PDSLOT_PTE + 3))
continue;
if (!pmap_valid_entry(PDE(pmap, pdei(va))))
continue;
md_prot = protection_codes[prot];
if (!(prot & PROT_EXEC))
md_prot |= PG_NX;
if (va < VM_MAXUSER_ADDRESS)
md_prot |= PG_u;
else if (va < VM_MAX_ADDRESS)
md_prot |= PG_RW;
spte = &ptes[atop(va)];
epte = &ptes[atop(blkendva)];
for (; spte < epte ; spte++, va += PAGE_SIZE) {
if (!pmap_valid_entry(*spte))
continue;
opte = *spte;
npte = (opte & ~(pt_entry_t)PG_PROT) | md_prot;
if (npte != opte) {
pmap_exec_account(pmap, va, *spte, npte);
i386_atomic_testset_uq(spte, npte);
}
}
}
if (shootall)
pmap_tlb_shoottlb();
else
pmap_tlb_shootrange(pmap, sva, eva);
pmap_unmap_ptes_pae(pmap);
pmap_tlb_shootwait();
}
void
pmap_unwire_pae(struct pmap *pmap, vaddr_t va)
{
pt_entry_t *ptes;
if (pmap_valid_entry(PDE(pmap, pdei(va)))) {
ptes = pmap_map_ptes_pae(pmap);
#ifdef DIAGNOSTIC
if (!pmap_valid_entry(ptes[atop(va)]))
panic("pmap_unwire_pae: invalid (unmapped) va "
"0x%lx", va);
#endif
if ((ptes[atop(va)] & PG_W) != 0) {
i386_atomic_testset_uq(&ptes[atop(va)],
ptes[atop(va)] & ~PG_W);
pmap->pm_stats.wired_count--;
}
#ifdef DIAGNOSTIC
else {
printf("pmap_unwire_pae: wiring for pmap %p va 0x%lx "
"didn't change!\n", pmap, va);
}
#endif
pmap_unmap_ptes_pae(pmap);
}
#ifdef DIAGNOSTIC
else {
panic("pmap_unwire_pae: invalid PDE");
}
#endif
}
int
pmap_enter_pae(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot,
int flags)
{
pt_entry_t *ptes, opte, npte;
struct vm_page *ptp;
struct pv_entry *pve, *opve = NULL;
int wired = (flags & PMAP_WIRED) != 0;
int nocache = (pa & PMAP_NOCACHE) != 0;
int wc = (pa & PMAP_WC) != 0;
struct vm_page *pg = NULL;
int error, wired_count, resident_count, ptp_count;
KASSERT(!(wc && nocache));
pa &= PMAP_PA_MASK;
#ifdef DIAGNOSTIC
if (va >= VM_MAX_KERNEL_ADDRESS)
panic("pmap_enter_pae: too big");
if (va == (vaddr_t) PDP_BASE || va == (vaddr_t) APDP_BASE)
panic("pmap_enter_pae: trying to map over PDP/APDP!");
if (va >= VM_MIN_KERNEL_ADDRESS &&
!pmap_valid_entry(PDE(pmap, pdei(va))))
panic("pmap_enter_pae: missing kernel PTP!");
#endif
if (pmap_initialized)
pve = pool_get(&pmap_pv_pool, PR_NOWAIT);
else
pve = NULL;
wired_count = resident_count = ptp_count = 0;
ptes = pmap_map_ptes_pae(pmap);
if (pmap == pmap_kernel()) {
ptp = NULL;
} else {
ptp = pmap_get_ptp_pae(pmap, pdei(va));
if (ptp == NULL) {
if (flags & PMAP_CANFAIL) {
error = ENOMEM;
pmap_unmap_ptes_pae(pmap);
goto out;
}
panic("pmap_enter_pae: get ptp failed");
}
}
opte = ptes[atop(va)];
if (pmap_valid_entry(opte)) {
if (wired && (opte & PG_W) == 0)
wired_count++;
else if (!wired && (opte & PG_W) != 0)
wired_count--;
if ((opte & PG_FRAME) == pa) {
if (opte & PG_PVLIST) {
pg = PHYS_TO_VM_PAGE(pa);
#ifdef DIAGNOSTIC
if (pg == NULL)
panic("pmap_enter_pae: same pa "
"PG_PVLIST mapping with "
"unmanaged page "
"pa = 0x%lx (0x%lx)", pa,
atop(pa));
#endif
pmap_sync_flags_pte_pae(pg, opte);
}
goto enter_now;
}
if (opte & PG_PVLIST) {
pg = PHYS_TO_VM_PAGE(opte & PG_FRAME);
#ifdef DIAGNOSTIC
if (pg == NULL)
panic("pmap_enter_pae: PG_PVLIST mapping with "
"unmanaged page "
"pa = 0x%lx (0x%lx)", pa, atop(pa));
#endif
pmap_sync_flags_pte_pae(pg, opte);
opve = pmap_remove_pv(pg, pmap, va);
pg = NULL;
}
} else {
resident_count++;
if (wired)
wired_count++;
if (ptp)
ptp_count++;
}
if (pmap_initialized && pg == NULL)
pg = PHYS_TO_VM_PAGE(pa);
if (pg != NULL) {
if (pve == NULL) {
pve = opve;
opve = NULL;
}
if (pve == NULL) {
if (flags & PMAP_CANFAIL) {
pmap_unmap_ptes_pae(pmap);
error = ENOMEM;
goto out;
}
panic("pmap_enter_pae: no pv entries available");
}
pmap_enter_pv(pg, pve, pmap, va, ptp);
pve = NULL;
}
enter_now:
npte = pa | protection_codes[prot] | PG_V;
if (!(prot & PROT_EXEC))
npte |= PG_NX;
pmap_exec_account(pmap, va, opte, npte);
if (wired)
npte |= PG_W;
if (nocache)
npte |= PG_N;
if (va < VM_MAXUSER_ADDRESS)
npte |= PG_u;
else if (va < VM_MAX_ADDRESS)
npte |= PG_RW;
if (pmap == pmap_kernel())
npte |= pmap_pg_g;
if (flags & PROT_READ)
npte |= PG_U;
if (flags & PROT_WRITE)
npte |= PG_M;
if (pg) {
npte |= PG_PVLIST;
if (pg->pg_flags & PG_PMAP_WC) {
KASSERT(nocache == 0);
wc = 1;
}
pmap_sync_flags_pte_pae(pg, npte);
}
if (wc)
npte |= pmap_pg_wc;
opte = i386_atomic_testset_uq(&ptes[atop(va)], npte);
if (ptp)
ptp->wire_count += ptp_count;
pmap->pm_stats.resident_count += resident_count;
pmap->pm_stats.wired_count += wired_count;
if (pmap_valid_entry(opte)) {
if (nocache && (opte & PG_N) == 0)
wbinvd_on_all_cpus();
pmap_tlb_shootpage(pmap, va);
}
pmap_unmap_ptes_pae(pmap);
pmap_tlb_shootwait();
error = 0;
out:
if (pve)
pool_put(&pmap_pv_pool, pve);
if (opve)
pool_put(&pmap_pv_pool, opve);
return error;
}
void
pmap_enter_special_pae(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int32_t flags)
{
struct pmap *pmap = pmap_kernel();
struct vm_page *ptppg = NULL;
pd_entry_t *pd, *ptp;
pt_entry_t *ptes;
uint32_t l2idx, l1idx;
paddr_t npa;
if (!cpu_meltdown)
return;
if (va < VM_MIN_KERNEL_ADDRESS)
panic("invalid special mapping va 0x%lx requested", va);
KASSERT(pmap->pm_pdir_intel != 0);
DPRINTF("%s: pm_pdir_intel 0x%x pm_pdirpa_intel 0x%x\n", __func__,
(uint32_t)pmap->pm_pdir_intel, (uint32_t)pmap->pm_pdirpa_intel);
l2idx = pdei(va);
l1idx = ptei(va);
DPRINTF("%s: va 0x%08lx pa 0x%08lx prot 0x%08lx flags 0x%08x "
"l2idx %u l1idx %u\n", __func__, va, pa, (unsigned long)prot,
flags, l2idx, l1idx);
if ((pd = (pd_entry_t *)pmap->pm_pdir_intel) == 0)
panic("%s: PD not initialized for pmap @ %p", __func__, pmap);
npa = pd[l2idx] & PMAP_PA_MASK;
if (!npa) {
ptppg = uvm_pagealloc(&pmap->pm_obj, ptp_i2o(l2idx + 2048),
NULL, UVM_PGA_USERESERVE|UVM_PGA_ZERO);
if (ptppg == NULL)
panic("%s: failed to allocate PT page", __func__);
atomic_clearbits_int(&ptppg->pg_flags, PG_BUSY);
ptppg->wire_count = 1;
npa = VM_PAGE_TO_PHYS(ptppg);
pd[l2idx] = (npa | PG_RW | PG_V | PG_M | PG_U);
DPRINTF("%s: allocated new PT page at phys 0x%x, "
"setting PDE[%d] = 0x%llx\n", __func__, (uint32_t)npa,
l2idx, pd[l2idx]);
}
if (ptppg == NULL && (ptppg = PHYS_TO_VM_PAGE(npa)) == NULL)
panic("%s: no vm_page for PT page", __func__);
mtx_enter(&ptppg->mdpage.pv_mtx);
ptp = (pd_entry_t *)pmap_tmpmap_pa(npa);
ptp[l1idx] = (pa | protection_codes[prot] | PG_V | PG_M | PG_U | flags);
DPRINTF("%s: setting PTE[%d] = 0x%llx\n", __func__, l1idx, ptp[l1idx]);
pmap_tmpunmap_pa();
mtx_leave(&ptppg->mdpage.pv_mtx);
if (!(cpu_feature & CPUID_PGE))
return;
ptes = pmap_map_ptes_pae(pmap);
if (pmap_valid_entry(ptes[atop(va)]))
ptes[atop(va)] |= PG_G;
else
DPRINTF("%s: no U+K mapping for special mapping?\n", __func__);
pmap_unmap_ptes_pae(pmap);
}
vaddr_t
pmap_growkernel_pae(vaddr_t maxkvaddr)
{
extern int nkpde;
struct pmap *kpm = pmap_kernel(), *pm;
int needed_kpde;
int s;
paddr_t ptaddr;
needed_kpde = (int)(maxkvaddr - VM_MIN_KERNEL_ADDRESS + (NBPD-1))
/ NBPD;
if (needed_kpde <= nkpde)
goto out;
s = splhigh();
for ( ; nkpde < needed_kpde ; nkpde++) {
if (uvm.page_init_done == 0) {
if (uvm_page_physget(&ptaddr) == 0)
panic("pmap_growkernel: out of memory");
pmap_zero_phys_pae(ptaddr);
PDE(kpm, PDSLOT_KERN + nkpde) =
ptaddr | PG_RW | PG_V | PG_U | PG_M;
kpm->pm_stats.resident_count++;
continue;
}
while (!pmap_alloc_ptp_pae(kpm, PDSLOT_KERN + nkpde, 0))
uvm_wait("pmap_growkernel");
mtx_enter(&pmaps_lock);
LIST_FOREACH(pm, &pmaps, pm_list) {
PDE(pm, PDSLOT_KERN + nkpde) =
PDE(kpm, PDSLOT_KERN + nkpde);
}
mtx_leave(&pmaps_lock);
}
splx(s);
out:
return (VM_MIN_KERNEL_ADDRESS + (nkpde * NBPD));
}
void
pmap_prealloc_lowmem_ptp_pae(void)
{
pt_entry_t *pte, npte;
vaddr_t ptpva = (vaddr_t)vtopte(0);
pte = vtopte(ptpva);
npte = PTP0_PA | PG_RW | PG_V | PG_U | PG_M;
i386_atomic_testset_uq(pte, npte);
memset((void *)ptpva, 0, NBPG);
}
vaddr_t
pmap_tmpmap_pa_pae(paddr_t pa)
{
#ifdef MULTIPROCESSOR
int id = cpu_number();
#endif
pt_entry_t *ptpte = PTESLEW(ptp_pte, id);
caddr_t ptpva = VASLEW(pmap_ptpp, id);
#if defined(DIAGNOSTIC)
if (*ptpte)
panic("pmap_tmpmap_pa_pae: ptp_pte in use?");
#endif
*ptpte = PG_V | PG_RW | pa;
return((vaddr_t)ptpva);
}
void
pmap_tmpunmap_pa_pae(void)
{
#ifdef MULTIPROCESSOR
int id = cpu_number();
#endif
pt_entry_t *ptpte = PTESLEW(ptp_pte, id);
caddr_t ptpva = VASLEW(pmap_ptpp, id);
#if defined(DIAGNOSTIC)
if (!pmap_valid_entry(*ptpte))
panic("pmap_tmpunmap_pa_pae: our pte invalid?");
#endif
*ptpte = 0;
pmap_update_pg((vaddr_t)ptpva);
#ifdef MULTIPROCESSOR
#endif
}
paddr_t
vtophys_pae(vaddr_t va)
{
return ((*vtopte(va) & PG_FRAME) | (va & ~PG_FRAME));
}
void
pmap_flush_page_pae(paddr_t pa)
{
#ifdef MULTIPROCESSOR
int id = cpu_number();
#endif
pt_entry_t *pte = PTESLEW(flsh_pte, id);
caddr_t va = VASLEW(pmap_flshp, id);
KDASSERT(PHYS_TO_VM_PAGE(pa) != NULL);
#ifdef DIAGNOSTIC
if (*pte)
panic("pmap_flush_page_pae: lock botch");
#endif
*pte = (pa & PG_FRAME) | PG_V | PG_RW;
pmap_update_pg(va);
pmap_flush_cache((vaddr_t)va, PAGE_SIZE);
*pte = 0;
pmap_update_pg(va);
}