#include <sys/param.h>
#include <sys/systm.h>
#include <sys/atomic.h>
#include <sys/pool.h>
#include <sys/proc.h>
#include <uvm/uvm.h>
#include <machine/cpufunc.h>
#include <machine/pmap.h>
#include <machine/db_machdep.h>
#include <ddb/db_extern.h>
#include <ddb/db_output.h>
void pmap_setttb(struct proc *p);
void pmap_allocate_asid(pmap_t);
void pmap_free_asid(pmap_t pm);
#define ASID_USER 1
static inline void
ttlb_flush(pmap_t pm, vaddr_t va)
{
vaddr_t resva;
if (!pm->pm_active)
return;
resva = ((va >> PAGE_SHIFT) & ((1ULL << 44) - 1));
if (pm == pmap_kernel()) {
cpu_tlb_flush_all_asid(resva);
} else {
resva |= (uint64_t)pm->pm_asid << 48;
cpu_tlb_flush_asid(resva);
resva |= (uint64_t)ASID_USER << 48;
cpu_tlb_flush_asid(resva);
}
}
struct pmap kernel_pmap_;
struct pmap pmap_tramp;
LIST_HEAD(pted_pv_head, pte_desc);
struct pte_desc {
LIST_ENTRY(pte_desc) pted_pv_list;
uint64_t pted_pte;
pmap_t pted_pmap;
vaddr_t pted_va;
};
struct pmapvp0 {
uint64_t l0[VP_IDX0_CNT];
struct pmapvp1 *vp[VP_IDX0_CNT];
};
struct pmapvp1 {
uint64_t l1[VP_IDX1_CNT];
struct pmapvp2 *vp[VP_IDX1_CNT];
};
struct pmapvp2 {
uint64_t l2[VP_IDX2_CNT];
struct pmapvp3 *vp[VP_IDX2_CNT];
};
struct pmapvp3 {
uint64_t l3[VP_IDX3_CNT];
struct pte_desc *vp[VP_IDX3_CNT];
};
CTASSERT(sizeof(struct pmapvp0) == sizeof(struct pmapvp1));
CTASSERT(sizeof(struct pmapvp0) == sizeof(struct pmapvp2));
CTASSERT(sizeof(struct pmapvp0) == sizeof(struct pmapvp3));
void pmap_vp_destroy(pmap_t pm);
void *pmap_vp_page_alloc(struct pool *, int, int *);
void pmap_vp_page_free(struct pool *, void *);
struct pool_allocator pmap_vp_allocator = {
pmap_vp_page_alloc, pmap_vp_page_free, sizeof(struct pmapvp0)
};
void pmap_remove_pted(pmap_t, struct pte_desc *);
void pmap_kremove_pg(vaddr_t);
void pmap_set_l1(struct pmap *, uint64_t, struct pmapvp1 *);
void pmap_set_l2(struct pmap *, uint64_t, struct pmapvp1 *,
struct pmapvp2 *);
void pmap_set_l3(struct pmap *, uint64_t, struct pmapvp2 *,
struct pmapvp3 *);
void pmap_fill_pte(pmap_t, vaddr_t, paddr_t, struct pte_desc *,
vm_prot_t, int, int);
void pmap_icache_sync_page(struct pmap *, paddr_t);
void pmap_pte_insert(struct pte_desc *);
void pmap_pte_remove(struct pte_desc *, int);
void pmap_pte_update(struct pte_desc *, uint64_t *);
void pmap_release(pmap_t);
paddr_t pmap_steal_avail(size_t, int, void **);
void pmap_remove_avail(paddr_t, paddr_t);
vaddr_t pmap_map_stolen(vaddr_t);
vaddr_t vmmap;
vaddr_t zero_page;
vaddr_t copy_src_page;
vaddr_t copy_dst_page;
struct pool pmap_pmap_pool;
struct pool pmap_pted_pool;
struct pool pmap_vp_pool;
int pmap_initialized = 0;
struct mem_region {
vaddr_t start;
vsize_t size;
};
struct mem_region pmap_avail_regions[10];
struct mem_region pmap_allocated_regions[10];
struct mem_region *pmap_avail = &pmap_avail_regions[0];
struct mem_region *pmap_allocated = &pmap_allocated_regions[0];
int pmap_cnt_avail, pmap_cnt_allocated;
uint64_t pmap_avail_kvo;
static inline void
pmap_lock(struct pmap *pmap)
{
if (pmap != pmap_kernel())
mtx_enter(&pmap->pm_mtx);
}
static inline void
pmap_unlock(struct pmap *pmap)
{
if (pmap != pmap_kernel())
mtx_leave(&pmap->pm_mtx);
}
#define PMAP_ASSERT_LOCKED(pmap) \
if ((pmap) != pmap_kernel()) \
MUTEX_ASSERT_LOCKED(&(pmap)->pm_mtx);
static inline int
VP_IDX0(vaddr_t va)
{
return (va >> VP_IDX0_POS) & VP_IDX0_MASK;
}
static inline int
VP_IDX1(vaddr_t va)
{
return (va >> VP_IDX1_POS) & VP_IDX1_MASK;
}
static inline int
VP_IDX2(vaddr_t va)
{
return (va >> VP_IDX2_POS) & VP_IDX2_MASK;
}
static inline int
VP_IDX3(vaddr_t va)
{
return (va >> VP_IDX3_POS) & VP_IDX3_MASK;
}
const uint64_t ap_bits_user[8] = {
[PROT_NONE] = 0,
[PROT_READ] = ATTR_PXN|ATTR_UXN|ATTR_AF|ATTR_AP(3),
[PROT_WRITE] = ATTR_PXN|ATTR_UXN|ATTR_AF|ATTR_AP(1),
[PROT_WRITE|PROT_READ] = ATTR_PXN|ATTR_UXN|ATTR_AF|ATTR_AP(1),
[PROT_EXEC] = ATTR_PXN|ATTR_AF|ATTR_AP(2),
[PROT_EXEC|PROT_READ] = ATTR_PXN|ATTR_AF|ATTR_AP(3),
[PROT_EXEC|PROT_WRITE] = ATTR_PXN|ATTR_AF|ATTR_AP(1),
[PROT_EXEC|PROT_WRITE|PROT_READ] = ATTR_PXN|ATTR_AF|ATTR_AP(1),
};
const uint64_t ap_bits_kern[8] = {
[PROT_NONE] = 0,
[PROT_READ] = ATTR_PXN|ATTR_UXN|ATTR_AF|ATTR_AP(2),
[PROT_WRITE] = ATTR_PXN|ATTR_UXN|ATTR_AF|ATTR_AP(0),
[PROT_WRITE|PROT_READ] = ATTR_PXN|ATTR_UXN|ATTR_AF|ATTR_AP(0),
[PROT_EXEC] = ATTR_UXN|ATTR_AF|ATTR_AP(2),
[PROT_EXEC|PROT_READ] = ATTR_UXN|ATTR_AF|ATTR_AP(2),
[PROT_EXEC|PROT_WRITE] = ATTR_UXN|ATTR_AF|ATTR_AP(0),
[PROT_EXEC|PROT_WRITE|PROT_READ] = ATTR_UXN|ATTR_AF|ATTR_AP(0),
};
#define PMAP_MAX_NASID (1 << 16)
#define PMAP_ASID_MASK (PMAP_MAX_NASID - 1)
int pmap_nasid = (1 << 8);
uint32_t pmap_asid[PMAP_MAX_NASID / 32];
unsigned long pmap_asid_gen = PMAP_MAX_NASID;
struct mutex pmap_asid_mtx = MUTEX_INITIALIZER(IPL_HIGH);
int
pmap_find_asid(pmap_t pm)
{
uint32_t bits;
int asid, bit;
int retry;
MUTEX_ASSERT_LOCKED(&pmap_asid_mtx);
asid = pm->pm_asid & PMAP_ASID_MASK;
bit = asid & (32 - 1);
bits = pmap_asid[asid / 32];
if ((bits & (3U << bit)) == 0)
return asid;
for (retry = 5; retry > 0; retry--) {
asid = arc4random() & (pmap_nasid - 2);
bit = (asid & (32 - 1));
bits = pmap_asid[asid / 32];
if ((bits & (3U << bit)) == 0)
return asid;
}
for (asid = 0; asid < pmap_nasid; asid += 32) {
bits = pmap_asid[asid / 32];
if (bits == ~0)
continue;
for (bit = 0; bit < 32; bit += 2) {
if ((bits & (3U << bit)) == 0)
return asid + bit;
}
}
return -1;
}
int
pmap_rollover_asid(pmap_t pm)
{
struct cpu_info *ci;
CPU_INFO_ITERATOR cii;
unsigned long gen;
int asid, bit;
MUTEX_ASSERT_LOCKED(&pmap_asid_mtx);
gen = atomic_add_long_nv(&pmap_asid_gen, PMAP_MAX_NASID);
memset(pmap_asid, 0, (pmap_nasid / 32) * sizeof(uint32_t));
pmap_asid[0] |= (3U << 0);
CPU_INFO_FOREACH(cii, ci) {
asid = ci->ci_curpm->pm_asid & PMAP_ASID_MASK;
ci->ci_curpm->pm_asid = asid | gen;
bit = (asid & (32 - 1));
pmap_asid[asid / 32] |= (3U << bit);
}
cpu_tlb_flush();
if ((pm->pm_asid & ~PMAP_ASID_MASK) == gen)
return pm->pm_asid & PMAP_ASID_MASK;
return pmap_find_asid(pm);
}
void
pmap_allocate_asid(pmap_t pm)
{
int asid, bit;
mtx_enter(&pmap_asid_mtx);
asid = pmap_find_asid(pm);
if (asid == -1) {
asid = pmap_rollover_asid(pm);
}
KASSERT(asid > 0 && asid < pmap_nasid);
bit = asid & (32 - 1);
pmap_asid[asid / 32] |= (3U << bit);
pm->pm_asid = asid | pmap_asid_gen;
mtx_leave(&pmap_asid_mtx);
}
void
pmap_free_asid(pmap_t pm)
{
int asid, bit;
KASSERT(pm != curcpu()->ci_curpm);
cpu_tlb_flush_asid_all((uint64_t)pm->pm_asid << 48);
cpu_tlb_flush_asid_all((uint64_t)(pm->pm_asid | ASID_USER) << 48);
mtx_enter(&pmap_asid_mtx);
if ((pm->pm_asid & ~PMAP_ASID_MASK) == pmap_asid_gen) {
asid = pm->pm_asid & PMAP_ASID_MASK;
bit = (asid & (32 - 1));
pmap_asid[asid / 32] &= ~(3U << bit);
}
mtx_leave(&pmap_asid_mtx);
}
struct pte_desc *
pmap_vp_lookup(pmap_t pm, vaddr_t va, uint64_t **pl3entry)
{
struct pmapvp1 *vp1;
struct pmapvp2 *vp2;
struct pmapvp3 *vp3;
struct pte_desc *pted;
if (pm->have_4_level_pt) {
if (pm->pm_vp.l0 == NULL) {
return NULL;
}
vp1 = pm->pm_vp.l0->vp[VP_IDX0(va)];
} else {
vp1 = pm->pm_vp.l1;
}
if (vp1 == NULL) {
return NULL;
}
vp2 = vp1->vp[VP_IDX1(va)];
if (vp2 == NULL) {
return NULL;
}
vp3 = vp2->vp[VP_IDX2(va)];
if (vp3 == NULL) {
return NULL;
}
pted = vp3->vp[VP_IDX3(va)];
if (pl3entry != NULL)
*pl3entry = &(vp3->l3[VP_IDX3(va)]);
return pted;
}
int
pmap_vp_enter(pmap_t pm, vaddr_t va, struct pte_desc *pted, int flags)
{
struct pmapvp1 *vp1;
struct pmapvp2 *vp2;
struct pmapvp3 *vp3;
PMAP_ASSERT_LOCKED(pm);
if (pm->have_4_level_pt) {
vp1 = pm->pm_vp.l0->vp[VP_IDX0(va)];
if (vp1 == NULL) {
vp1 = pool_get(&pmap_vp_pool, PR_NOWAIT | PR_ZERO);
if (vp1 == NULL) {
if ((flags & PMAP_CANFAIL) == 0)
panic("%s: unable to allocate L1",
__func__);
return ENOMEM;
}
pmap_set_l1(pm, va, vp1);
}
} else {
vp1 = pm->pm_vp.l1;
}
vp2 = vp1->vp[VP_IDX1(va)];
if (vp2 == NULL) {
vp2 = pool_get(&pmap_vp_pool, PR_NOWAIT | PR_ZERO);
if (vp2 == NULL) {
if ((flags & PMAP_CANFAIL) == 0)
panic("%s: unable to allocate L2", __func__);
return ENOMEM;
}
pmap_set_l2(pm, va, vp1, vp2);
}
vp3 = vp2->vp[VP_IDX2(va)];
if (vp3 == NULL) {
vp3 = pool_get(&pmap_vp_pool, PR_NOWAIT | PR_ZERO);
if (vp3 == NULL) {
if ((flags & PMAP_CANFAIL) == 0)
panic("%s: unable to allocate L3", __func__);
return ENOMEM;
}
pmap_set_l3(pm, va, vp2, vp3);
}
vp3->vp[VP_IDX3(va)] = pted;
return 0;
}
void
pmap_vp_populate(pmap_t pm, vaddr_t va)
{
struct pte_desc *pted;
struct pmapvp1 *vp1;
struct pmapvp2 *vp2;
struct pmapvp3 *vp3;
void *vp;
pted = pool_get(&pmap_pted_pool, PR_WAITOK | PR_ZERO);
vp = pool_get(&pmap_vp_pool, PR_WAITOK | PR_ZERO);
pmap_lock(pm);
if (pm->have_4_level_pt) {
vp1 = pm->pm_vp.l0->vp[VP_IDX0(va)];
if (vp1 == NULL) {
vp1 = vp; vp = NULL;
pmap_set_l1(pm, va, vp1);
}
} else {
vp1 = pm->pm_vp.l1;
}
if (vp == NULL) {
pmap_unlock(pm);
vp = pool_get(&pmap_vp_pool, PR_WAITOK | PR_ZERO);
pmap_lock(pm);
}
vp2 = vp1->vp[VP_IDX1(va)];
if (vp2 == NULL) {
vp2 = vp; vp = NULL;
pmap_set_l2(pm, va, vp1, vp2);
}
if (vp == NULL) {
pmap_unlock(pm);
vp = pool_get(&pmap_vp_pool, PR_WAITOK | PR_ZERO);
pmap_lock(pm);
}
vp3 = vp2->vp[VP_IDX2(va)];
if (vp3 == NULL) {
vp3 = vp; vp = NULL;
pmap_set_l3(pm, va, vp2, vp3);
}
if (vp3->vp[VP_IDX3(va)] == NULL) {
vp3->vp[VP_IDX3(va)] = pted;
pted = NULL;
}
pmap_unlock(pm);
if (vp)
pool_put(&pmap_vp_pool, vp);
if (pted)
pool_put(&pmap_pted_pool, pted);
}
void *
pmap_vp_page_alloc(struct pool *pp, int flags, int *slowdown)
{
struct kmem_dyn_mode kd = KMEM_DYN_INITIALIZER;
kd.kd_waitok = ISSET(flags, PR_WAITOK);
kd.kd_trylock = ISSET(flags, PR_NOWAIT);
kd.kd_slowdown = slowdown;
return km_alloc(pp->pr_pgsize, &kv_any, &kp_dirty, &kd);
}
void
pmap_vp_page_free(struct pool *pp, void *v)
{
km_free(v, pp->pr_pgsize, &kv_any, &kp_dirty);
}
static inline u_int32_t
PTED_MANAGED(struct pte_desc *pted)
{
return (pted->pted_va & PTED_VA_MANAGED_M);
}
static inline u_int32_t
PTED_WIRED(struct pte_desc *pted)
{
return (pted->pted_va & PTED_VA_WIRED_M);
}
static inline u_int32_t
PTED_VALID(struct pte_desc *pted)
{
return (pted->pted_pte != 0);
}
void
pmap_enter_pv(struct pte_desc *pted, struct vm_page *pg)
{
if (__predict_false(!pmap_initialized))
return;
mtx_enter(&pg->mdpage.pv_mtx);
LIST_INSERT_HEAD(&(pg->mdpage.pv_list), pted, pted_pv_list);
pted->pted_va |= PTED_VA_MANAGED_M;
mtx_leave(&pg->mdpage.pv_mtx);
}
void
pmap_remove_pv(struct pte_desc *pted)
{
struct vm_page *pg = PHYS_TO_VM_PAGE(pted->pted_pte & PTE_RPGN);
mtx_enter(&pg->mdpage.pv_mtx);
LIST_REMOVE(pted, pted_pv_list);
mtx_leave(&pg->mdpage.pv_mtx);
}
int
pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
{
struct pte_desc *pted;
struct vm_page *pg;
int error;
int cache = PMAP_CACHE_WB;
if (pa & PMAP_NOCACHE)
cache = PMAP_CACHE_CI;
if (pa & PMAP_DEVICE)
cache = PMAP_CACHE_DEV_NGNRNE;
pg = PHYS_TO_VM_PAGE(pa);
pmap_lock(pm);
pted = pmap_vp_lookup(pm, va, NULL);
if (pted && PTED_VALID(pted)) {
if ((pted->pted_pte & PTE_RPGN) == (pa & PTE_RPGN) &&
(pted->pted_va & PROT_MASK) == (prot & PROT_MASK) &&
(pted->pted_va & PMAP_CACHE_BITS) == cache) {
pmap_unlock(pm);
return 0;
}
pmap_remove_pted(pm, pted);
if (pm != pmap_kernel())
pted = pmap_vp_lookup(pm, va, NULL);
}
pm->pm_stats.resident_count++;
if (pted == NULL) {
pted = pool_get(&pmap_pted_pool, PR_NOWAIT | PR_ZERO);
if (pted == NULL) {
if ((flags & PMAP_CANFAIL) == 0)
panic("%s: failed to allocate pted", __func__);
error = ENOMEM;
goto out;
}
if (pmap_vp_enter(pm, va, pted, flags)) {
if ((flags & PMAP_CANFAIL) == 0)
panic("%s: failed to allocate L2/L3", __func__);
error = ENOMEM;
pool_put(&pmap_pted_pool, pted);
goto out;
}
}
if (pg != NULL &&
((flags & PROT_MASK) || (pg->pg_flags & PG_PMAP_REF))) {
atomic_setbits_int(&pg->pg_flags, PG_PMAP_REF);
if ((prot & PROT_WRITE) && (flags & PROT_WRITE)) {
atomic_setbits_int(&pg->pg_flags, PG_PMAP_MOD);
atomic_clearbits_int(&pg->pg_flags, PG_PMAP_EXE);
}
}
pmap_fill_pte(pm, va, pa, pted, prot, flags, cache);
if (pg != NULL) {
pmap_enter_pv(pted, pg);
}
if (pg != NULL && (flags & PROT_EXEC)) {
if ((pg->pg_flags & PG_PMAP_EXE) == 0)
pmap_icache_sync_page(pm, pa);
atomic_setbits_int(&pg->pg_flags, PG_PMAP_EXE);
}
if (flags & (PROT_READ|PROT_WRITE|PROT_EXEC|PMAP_WIRED)) {
pmap_pte_insert(pted);
ttlb_flush(pm, va & ~PAGE_MASK);
}
error = 0;
out:
pmap_unlock(pm);
return error;
}
void
pmap_populate(pmap_t pm, vaddr_t va)
{
pmap_vp_populate(pm, va);
}
void
pmap_remove(pmap_t pm, vaddr_t sva, vaddr_t eva)
{
struct pte_desc *pted;
vaddr_t va;
pmap_lock(pm);
for (va = sva; va < eva; va += PAGE_SIZE) {
pted = pmap_vp_lookup(pm, va, NULL);
if (pted == NULL)
continue;
if (PTED_WIRED(pted)) {
pm->pm_stats.wired_count--;
pted->pted_va &= ~PTED_VA_WIRED_M;
}
if (PTED_VALID(pted))
pmap_remove_pted(pm, pted);
}
pmap_unlock(pm);
}
void
pmap_remove_pted(pmap_t pm, struct pte_desc *pted)
{
pm->pm_stats.resident_count--;
if (PTED_WIRED(pted)) {
pm->pm_stats.wired_count--;
pted->pted_va &= ~PTED_VA_WIRED_M;
}
pmap_pte_remove(pted, pm != pmap_kernel());
ttlb_flush(pm, pted->pted_va & ~PAGE_MASK);
if (PTED_MANAGED(pted))
pmap_remove_pv(pted);
pted->pted_pte = 0;
pted->pted_va = 0;
if (pm != pmap_kernel())
pool_put(&pmap_pted_pool, pted);
}
void
_pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, int flags, int cache)
{
pmap_t pm = pmap_kernel();
struct pte_desc *pted;
struct vm_page *pg;
pted = pmap_vp_lookup(pm, va, NULL);
if (pted == NULL) {
panic("pted not preallocated in pmap_kernel() va %lx pa %lx",
va, pa);
}
if (pted && PTED_VALID(pted))
pmap_kremove_pg(va);
pm->pm_stats.resident_count++;
flags |= PMAP_WIRED;
pmap_fill_pte(pm, va, pa, pted, prot, flags, cache);
pmap_pte_insert(pted);
ttlb_flush(pm, va & ~PAGE_MASK);
pg = PHYS_TO_VM_PAGE(pted->pted_pte & PTE_RPGN);
if (pg && (cache == PMAP_CACHE_CI || cache == PMAP_CACHE_DEV_NGNRNE))
cpu_idcache_wbinv_range(va & ~PAGE_MASK, PAGE_SIZE);
}
void
pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
{
int cache = PMAP_CACHE_WB;
if (pa & PMAP_NOCACHE)
cache = PMAP_CACHE_CI;
if (pa & PMAP_DEVICE)
cache = PMAP_CACHE_DEV_NGNRNE;
_pmap_kenter_pa(va, pa, prot, prot, cache);
}
void
pmap_kenter_cache(vaddr_t va, paddr_t pa, vm_prot_t prot, int cacheable)
{
_pmap_kenter_pa(va, pa, prot, prot, cacheable);
}
void
pmap_kremove_pg(vaddr_t va)
{
pmap_t pm = pmap_kernel();
struct pte_desc *pted;
int s;
pted = pmap_vp_lookup(pm, va, NULL);
if (pted == NULL)
return;
if (!PTED_VALID(pted))
return;
s = splvm();
pm->pm_stats.resident_count--;
pmap_pte_remove(pted, 0);
ttlb_flush(pm, pted->pted_va & ~PAGE_MASK);
if (PTED_MANAGED(pted))
pmap_remove_pv(pted);
if (PTED_WIRED(pted))
pm->pm_stats.wired_count--;
pted->pted_pte = 0;
pted->pted_va = 0;
splx(s);
}
void
pmap_kremove(vaddr_t va, vsize_t len)
{
for (len >>= PAGE_SHIFT; len >0; len--, va += PAGE_SIZE)
pmap_kremove_pg(va);
}
void
pmap_fill_pte(pmap_t pm, vaddr_t va, paddr_t pa, struct pte_desc *pted,
vm_prot_t prot, int flags, int cache)
{
pted->pted_va = va;
pted->pted_pmap = pm;
switch (cache) {
case PMAP_CACHE_WB:
break;
case PMAP_CACHE_WT:
break;
case PMAP_CACHE_CI:
break;
case PMAP_CACHE_DEV_NGNRNE:
break;
case PMAP_CACHE_DEV_NGNRE:
break;
default:
panic("%s: invalid cache mode", __func__);
}
pted->pted_va |= cache;
pted->pted_va |= prot & PROT_MASK;
if (flags & PMAP_WIRED) {
pted->pted_va |= PTED_VA_WIRED_M;
pm->pm_stats.wired_count++;
}
pted->pted_pte = pa & PTE_RPGN;
pted->pted_pte |= flags & PROT_MASK;
}
void
pmap_zero_page(struct vm_page *pg)
{
paddr_t pa = VM_PAGE_TO_PHYS(pg);
vaddr_t va = zero_page + cpu_number() * PAGE_SIZE;
KASSERT(curcpu()->ci_idepth == 0);
pmap_kenter_pa(va, pa, PROT_READ|PROT_WRITE);
pagezero_cache(va);
pmap_kremove_pg(va);
}
void
pmap_copy_page(struct vm_page *srcpg, struct vm_page *dstpg)
{
paddr_t srcpa = VM_PAGE_TO_PHYS(srcpg);
paddr_t dstpa = VM_PAGE_TO_PHYS(dstpg);
vaddr_t srcva = copy_src_page + cpu_number() * PAGE_SIZE;
vaddr_t dstva = copy_dst_page + cpu_number() * PAGE_SIZE;
int s;
s = splbio();
pmap_kenter_pa(srcva, srcpa, PROT_READ);
pmap_kenter_pa(dstva, dstpa, PROT_READ|PROT_WRITE);
memcpy((void *)dstva, (void *)srcva, PAGE_SIZE);
pmap_kremove_pg(srcva);
pmap_kremove_pg(dstva);
splx(s);
}
void
pmap_pinit(pmap_t pm)
{
vaddr_t l0va;
if (pm->have_4_level_pt) {
while (pm->pm_vp.l0 == NULL) {
pm->pm_vp.l0 = pool_get(&pmap_vp_pool,
PR_WAITOK | PR_ZERO);
}
l0va = (vaddr_t)pm->pm_vp.l0->l0;
} else {
while (pm->pm_vp.l1 == NULL) {
pm->pm_vp.l1 = pool_get(&pmap_vp_pool,
PR_WAITOK | PR_ZERO);
}
l0va = (vaddr_t)pm->pm_vp.l1->l1;
}
pmap_extract(pmap_kernel(), l0va, (paddr_t *)&pm->pm_pt0pa);
pmap_reference(pm);
}
int pmap_vp_poolcache = 0;
pmap_t
pmap_create(void)
{
pmap_t pmap;
pmap = pool_get(&pmap_pmap_pool, PR_WAITOK | PR_ZERO);
mtx_init(&pmap->pm_mtx, IPL_VM);
pmap_pinit(pmap);
if (pmap_vp_poolcache == 0) {
pool_setlowat(&pmap_vp_pool, 20);
pmap_vp_poolcache = 20;
}
return (pmap);
}
void
pmap_reference(pmap_t pm)
{
atomic_inc_int(&pm->pm_refs);
}
void
pmap_destroy(pmap_t pm)
{
int refs;
refs = atomic_dec_int_nv(&pm->pm_refs);
if (refs > 0)
return;
pmap_release(pm);
pmap_free_asid(pm);
pool_put(&pmap_pmap_pool, pm);
}
void
pmap_release(pmap_t pm)
{
pmap_vp_destroy(pm);
}
void
pmap_vp_destroy_l2_l3(pmap_t pm, struct pmapvp1 *vp1)
{
struct pmapvp2 *vp2;
struct pmapvp3 *vp3;
struct pte_desc *pted;
int j, k, l;
for (j = 0; j < VP_IDX1_CNT; j++) {
vp2 = vp1->vp[j];
if (vp2 == NULL)
continue;
vp1->vp[j] = NULL;
for (k = 0; k < VP_IDX2_CNT; k++) {
vp3 = vp2->vp[k];
if (vp3 == NULL)
continue;
vp2->vp[k] = NULL;
for (l = 0; l < VP_IDX3_CNT; l++) {
pted = vp3->vp[l];
if (pted == NULL)
continue;
vp3->vp[l] = NULL;
pool_put(&pmap_pted_pool, pted);
}
pool_put(&pmap_vp_pool, vp3);
}
pool_put(&pmap_vp_pool, vp2);
}
}
void
pmap_vp_destroy(pmap_t pm)
{
struct pmapvp0 *vp0;
struct pmapvp1 *vp1;
int i;
if (!pm->have_4_level_pt) {
pmap_vp_destroy_l2_l3(pm, pm->pm_vp.l1);
pool_put(&pmap_vp_pool, pm->pm_vp.l1);
pm->pm_vp.l1 = NULL;
return;
}
vp0 = pm->pm_vp.l0;
for (i = 0; i < VP_IDX0_CNT; i++) {
vp1 = vp0->vp[i];
if (vp1 == NULL)
continue;
vp0->vp[i] = NULL;
pmap_vp_destroy_l2_l3(pm, vp1);
pool_put(&pmap_vp_pool, vp1);
}
pool_put(&pmap_vp_pool, vp0);
pm->pm_vp.l0 = NULL;
}
vaddr_t virtual_avail;
int pmap_virtual_space_called;
static inline uint64_t
VP_Lx(paddr_t pa)
{
return pa | Lx_TYPE_PT;
}
vaddr_t pmap_maxkvaddr = VM_MIN_KERNEL_ADDRESS + 1024 * 1024 * 1024;
struct vm_map *pmap_kvp_map;
const struct kmem_va_mode kv_kvp = {
.kv_map = &pmap_kvp_map,
.kv_wait = 0
};
void *
pmap_kvp_alloc(void)
{
void *kvp;
if (!uvm.page_init_done && !pmap_virtual_space_called) {
paddr_t pa[2];
vaddr_t va;
if (!uvm_page_physget(&pa[0]) || !uvm_page_physget(&pa[1]))
panic("%s: out of memory", __func__);
va = virtual_avail;
virtual_avail += 2 * PAGE_SIZE;
KASSERT(virtual_avail <= pmap_maxkvaddr);
kvp = (void *)va;
pmap_kenter_pa(va, pa[0], PROT_READ|PROT_WRITE);
pmap_kenter_pa(va + PAGE_SIZE, pa[1], PROT_READ|PROT_WRITE);
pagezero_cache(va);
pagezero_cache(va + PAGE_SIZE);
} else {
kvp = km_alloc(sizeof(struct pmapvp0), &kv_kvp, &kp_zero,
&kd_nowait);
}
return kvp;
}
struct pte_desc *
pmap_kpted_alloc(void)
{
static struct pte_desc *pted;
static int npted;
if (npted == 0) {
if (!uvm.page_init_done && !pmap_virtual_space_called) {
paddr_t pa;
vaddr_t va;
if (!uvm_page_physget(&pa))
panic("%s: out of memory", __func__);
va = virtual_avail;
virtual_avail += PAGE_SIZE;
KASSERT(virtual_avail <= pmap_maxkvaddr);
pted = (struct pte_desc *)va;
pmap_kenter_pa(va, pa, PROT_READ|PROT_WRITE);
pagezero_cache(va);
} else {
pted = km_alloc(PAGE_SIZE, &kv_kvp, &kp_zero,
&kd_nowait);
if (pted == NULL)
return NULL;
}
npted = PAGE_SIZE / sizeof(struct pte_desc);
}
npted--;
return pted++;
}
vaddr_t
pmap_growkernel(vaddr_t maxkvaddr)
{
struct pmapvp1 *vp1 = pmap_kernel()->pm_vp.l1;
struct pmapvp2 *vp2;
struct pmapvp3 *vp3;
struct pte_desc *pted;
paddr_t pa;
int lb_idx2, ub_idx2;
int i, j, k;
int s;
if (maxkvaddr <= pmap_maxkvaddr)
return pmap_maxkvaddr;
s = splvm();
for (i = VP_IDX1(pmap_maxkvaddr); i <= VP_IDX1(maxkvaddr - 1); i++) {
vp2 = vp1->vp[i];
if (vp2 == NULL) {
vp2 = pmap_kvp_alloc();
if (vp2 == NULL)
goto fail;
pmap_extract(pmap_kernel(), (vaddr_t)vp2, &pa);
vp1->vp[i] = vp2;
vp1->l1[i] = VP_Lx(pa);
}
if (i == VP_IDX1(pmap_maxkvaddr)) {
lb_idx2 = VP_IDX2(pmap_maxkvaddr);
} else {
lb_idx2 = 0;
}
if (i == VP_IDX1(maxkvaddr - 1)) {
ub_idx2 = VP_IDX2(maxkvaddr - 1);
} else {
ub_idx2 = VP_IDX2_CNT - 1;
}
for (j = lb_idx2; j <= ub_idx2; j++) {
vp3 = vp2->vp[j];
if (vp3 == NULL) {
vp3 = pmap_kvp_alloc();
if (vp3 == NULL)
goto fail;
pmap_extract(pmap_kernel(), (vaddr_t)vp3, &pa);
vp2->vp[j] = vp3;
vp2->l2[j] = VP_Lx(pa);
}
for (k = 0; k <= VP_IDX3_CNT - 1; k++) {
if (vp3->vp[k] == NULL) {
pted = pmap_kpted_alloc();
if (pted == NULL)
goto fail;
vp3->vp[k] = pted;
pmap_maxkvaddr += PAGE_SIZE;
}
}
}
}
KASSERT(pmap_maxkvaddr >= maxkvaddr);
fail:
splx(s);
return pmap_maxkvaddr;
}
void pmap_setup_avail(uint64_t ram_start, uint64_t ram_end, uint64_t kvo);
CTASSERT(sizeof(struct pmapvp0) == 2 * PAGE_SIZE);
int mappings_allocated = 0;
int pted_allocated = 0;
extern char __text_start[], _etext[];
extern char __rodata_start[], _erodata[];
vaddr_t
pmap_bootstrap(long kvo, paddr_t lpt1, long kernelstart, long kernelend,
long ram_start, long ram_end)
{
void *va;
paddr_t pa, pt1pa;
struct pmapvp1 *vp1;
struct pmapvp2 *vp2;
struct pmapvp3 *vp3;
struct pte_desc *pted;
vaddr_t vstart;
uint64_t id_aa64mmfr0;
int i, j, k;
int lb_idx2, ub_idx2;
pmap_setup_avail(ram_start, ram_end, kvo);
printf("removing %lx-%lx\n", ram_start, kernelstart+kvo);
pmap_remove_avail(ram_start, kernelstart+kvo);
printf("removing %lx-%lx\n", kernelstart+kvo, kernelend+kvo);
pmap_remove_avail(kernelstart+kvo, kernelend+kvo);
pt1pa = pmap_steal_avail(2 * sizeof(struct pmapvp1), Lx_TABLE_ALIGN,
&va);
vp1 = (struct pmapvp1 *)pt1pa;
pmap_kernel()->pm_vp.l1 = (struct pmapvp1 *)va;
pmap_kernel()->pm_privileged = 1;
pmap_kernel()->pm_active = 1;
pmap_kernel()->pm_guarded = ATTR_GP;
pmap_kernel()->pm_asid = 0;
mtx_init(&pmap_tramp.pm_mtx, IPL_VM);
pmap_tramp.pm_vp.l1 = (struct pmapvp1 *)va + 1;
pmap_tramp.pm_privileged = 1;
pmap_tramp.pm_active = 1;
pmap_tramp.pm_guarded = ATTR_GP;
pmap_tramp.pm_asid = 0;
pmap_asid[0] |= (3U << 0);
for (i = VP_IDX1(VM_MIN_KERNEL_ADDRESS);
i <= VP_IDX1(pmap_maxkvaddr - 1);
i++) {
mappings_allocated++;
pa = pmap_steal_avail(sizeof(struct pmapvp2), Lx_TABLE_ALIGN,
&va);
vp2 = (struct pmapvp2 *)pa;
vp1->vp[i] = va;
vp1->l1[i] = VP_Lx(pa);
if (i == VP_IDX1(VM_MIN_KERNEL_ADDRESS)) {
lb_idx2 = VP_IDX2(VM_MIN_KERNEL_ADDRESS);
} else {
lb_idx2 = 0;
}
if (i == VP_IDX1(pmap_maxkvaddr - 1)) {
ub_idx2 = VP_IDX2(pmap_maxkvaddr - 1);
} else {
ub_idx2 = VP_IDX2_CNT - 1;
}
for (j = lb_idx2; j <= ub_idx2; j++) {
mappings_allocated++;
pa = pmap_steal_avail(sizeof(struct pmapvp3),
Lx_TABLE_ALIGN, &va);
vp3 = (struct pmapvp3 *)pa;
vp2->vp[j] = va;
vp2->l2[j] = VP_Lx(pa);
}
}
for (i = VP_IDX1(VM_MIN_KERNEL_ADDRESS);
i <= VP_IDX1(pmap_maxkvaddr - 1);
i++) {
vp2 = (void *)((long)vp1->vp[i] + kvo);
if (i == VP_IDX1(VM_MIN_KERNEL_ADDRESS)) {
lb_idx2 = VP_IDX2(VM_MIN_KERNEL_ADDRESS);
} else {
lb_idx2 = 0;
}
if (i == VP_IDX1(pmap_maxkvaddr - 1)) {
ub_idx2 = VP_IDX2(pmap_maxkvaddr - 1);
} else {
ub_idx2 = VP_IDX2_CNT - 1;
}
for (j = lb_idx2; j <= ub_idx2; j++) {
vp3 = (void *)((long)vp2->vp[j] + kvo);
for (k = 0; k <= VP_IDX3_CNT - 1; k++) {
pted_allocated++;
pa = pmap_steal_avail(sizeof(struct pte_desc),
4, &va);
pted = va;
vp3->vp[k] = pted;
}
}
}
pa = pmap_steal_avail(Lx_TABLE_ALIGN, Lx_TABLE_ALIGN, &va);
memset((void *)pa, 0, Lx_TABLE_ALIGN);
pmap_kernel()->pm_pt0pa = pa;
pmap_avail_fixup();
vstart = pmap_map_stolen(kernelstart);
void (switch_mmu_kernel)(long);
void (*switch_mmu_kernel_table)(long) =
(void *)((long)&switch_mmu_kernel + kvo);
switch_mmu_kernel_table(pt1pa);
printf("all mapped\n");
curcpu()->ci_curpm = pmap_kernel();
id_aa64mmfr0 = READ_SPECIALREG(id_aa64mmfr0_el1);
if (ID_AA64MMFR0_ASID_BITS(id_aa64mmfr0) == ID_AA64MMFR0_ASID_BITS_16)
pmap_nasid = (1 << 16);
vmmap = vstart;
vstart += PAGE_SIZE;
return vstart;
}
void
pmap_set_l1(struct pmap *pm, uint64_t va, struct pmapvp1 *l1_va)
{
uint64_t pg_entry;
paddr_t l1_pa;
int idx0;
if (pmap_extract(pmap_kernel(), (vaddr_t)l1_va, &l1_pa) == 0)
panic("unable to find vp pa mapping %p", l1_va);
if (l1_pa & (Lx_TABLE_ALIGN-1))
panic("misaligned L2 table");
pg_entry = VP_Lx(l1_pa);
idx0 = VP_IDX0(va);
pm->pm_vp.l0->vp[idx0] = l1_va;
pm->pm_vp.l0->l0[idx0] = pg_entry;
}
void
pmap_set_l2(struct pmap *pm, uint64_t va, struct pmapvp1 *vp1,
struct pmapvp2 *l2_va)
{
uint64_t pg_entry;
paddr_t l2_pa;
int idx1;
if (pmap_extract(pmap_kernel(), (vaddr_t)l2_va, &l2_pa) == 0)
panic("unable to find vp pa mapping %p", l2_va);
if (l2_pa & (Lx_TABLE_ALIGN-1))
panic("misaligned L2 table");
pg_entry = VP_Lx(l2_pa);
idx1 = VP_IDX1(va);
vp1->vp[idx1] = l2_va;
vp1->l1[idx1] = pg_entry;
}
void
pmap_set_l3(struct pmap *pm, uint64_t va, struct pmapvp2 *vp2,
struct pmapvp3 *l3_va)
{
uint64_t pg_entry;
paddr_t l3_pa;
int idx2;
if (pmap_extract(pmap_kernel(), (vaddr_t)l3_va, &l3_pa) == 0)
panic("unable to find vp pa mapping %p", l3_va);
if (l3_pa & (Lx_TABLE_ALIGN-1))
panic("misaligned L2 table");
pg_entry = VP_Lx(l3_pa);
idx2 = VP_IDX2(va);
vp2->vp[idx2] = l3_va;
vp2->l2[idx2] = pg_entry;
}
void
pmap_activate(struct proc *p)
{
pmap_t pm = p->p_vmspace->vm_map.pmap;
atomic_inc_int(&pm->pm_active);
if (p == curproc && pm != curcpu()->ci_curpm)
pmap_setttb(p);
}
void
pmap_deactivate(struct proc *p)
{
pmap_t pm = p->p_vmspace->vm_map.pmap;
KASSERT(p == curproc);
if (pm->pm_active == 0)
return;
WRITE_SPECIALREG(ttbr0_el1, pmap_kernel()->pm_pt0pa);
__asm volatile("isb");
atomic_dec_int(&pm->pm_active);
}
void
pmap_purge(struct proc *p)
{
pmap_t pm = p->p_vmspace->vm_map.pmap;
KASSERT(p->p_p->ps_threadcnt == 0);
KASSERT(p == curproc);
while (pm->pm_active != 1)
CPU_BUSY_CYCLE();
WRITE_SPECIALREG(ttbr0_el1, pmap_kernel()->pm_pt0pa);
__asm volatile("isb");
cpu_tlb_flush_asid_all((uint64_t)pm->pm_asid << 48);
cpu_tlb_flush_asid_all((uint64_t)(pm->pm_asid | ASID_USER) << 48);
pm->pm_pt0pa = pmap_kernel()->pm_pt0pa;
pm->pm_active = 0;
}
int
pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pap)
{
struct pte_desc *pted;
pmap_lock(pm);
pted = pmap_vp_lookup(pm, va, NULL);
if (!pted || !PTED_VALID(pted)) {
pmap_unlock(pm);
return 0;
}
if (pap != NULL)
*pap = (pted->pted_pte & PTE_RPGN) | (va & PAGE_MASK);
pmap_unlock(pm);
return 1;
}
void
pmap_page_ro(pmap_t pm, vaddr_t va, vm_prot_t prot)
{
struct pte_desc *pted;
uint64_t *pl3;
pted = pmap_vp_lookup(pm, va, &pl3);
if (!pted || !PTED_VALID(pted)) {
return;
}
pted->pted_va &= ~PROT_WRITE;
pted->pted_pte &= ~PROT_WRITE;
if ((prot & PROT_READ) == 0) {
pted->pted_va &= ~PROT_READ;
pted->pted_pte &= ~PROT_READ;
}
if ((prot & PROT_EXEC) == 0) {
pted->pted_va &= ~PROT_EXEC;
pted->pted_pte &= ~PROT_EXEC;
}
pmap_pte_update(pted, pl3);
ttlb_flush(pm, pted->pted_va & ~PAGE_MASK);
}
#ifdef DDB
void
pmap_page_rw(pmap_t pm, vaddr_t va)
{
struct pte_desc *pted;
uint64_t *pl3;
pted = pmap_vp_lookup(pm, va, &pl3);
if (!pted || !PTED_VALID(pted)) {
return;
}
pted->pted_va |= PROT_WRITE;
pted->pted_pte |= PROT_WRITE;
pmap_pte_update(pted, pl3);
ttlb_flush(pm, pted->pted_va & ~PAGE_MASK);
}
#endif
void
pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
{
struct pte_desc *pted;
struct pmap *pm;
if (prot != PROT_NONE) {
mtx_enter(&pg->mdpage.pv_mtx);
LIST_FOREACH(pted, &(pg->mdpage.pv_list), pted_pv_list) {
pmap_page_ro(pted->pted_pmap, pted->pted_va, prot);
}
mtx_leave(&pg->mdpage.pv_mtx);
return;
}
mtx_enter(&pg->mdpage.pv_mtx);
while ((pted = LIST_FIRST(&(pg->mdpage.pv_list))) != NULL) {
pmap_reference(pted->pted_pmap);
pm = pted->pted_pmap;
mtx_leave(&pg->mdpage.pv_mtx);
pmap_lock(pm);
mtx_enter(&pg->mdpage.pv_mtx);
pted = LIST_FIRST(&(pg->mdpage.pv_list));
if (pted == NULL || pted->pted_pmap != pm) {
mtx_leave(&pg->mdpage.pv_mtx);
pmap_unlock(pm);
pmap_destroy(pm);
mtx_enter(&pg->mdpage.pv_mtx);
continue;
}
mtx_leave(&pg->mdpage.pv_mtx);
pmap_remove_pted(pm, pted);
pmap_unlock(pm);
pmap_destroy(pm);
mtx_enter(&pg->mdpage.pv_mtx);
}
atomic_clearbits_int(&pg->pg_flags, PG_PMAP_EXE);
mtx_leave(&pg->mdpage.pv_mtx);
}
void
pmap_protect(pmap_t pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
{
if (prot & (PROT_READ | PROT_EXEC)) {
pmap_lock(pm);
while (sva < eva) {
pmap_page_ro(pm, sva, prot);
sva += PAGE_SIZE;
}
pmap_unlock(pm);
return;
}
pmap_remove(pm, sva, eva);
}
void
pmap_init(void)
{
uint64_t tcr;
WRITE_SPECIALREG(ttbr0_el1, pmap_kernel()->pm_pt0pa);
__asm volatile("isb");
tcr = READ_SPECIALREG(tcr_el1);
tcr &= ~TCR_T0SZ(0x3f);
tcr |= TCR_T0SZ(64 - USER_SPACE_BITS);
tcr |= TCR_A1;
WRITE_SPECIALREG(tcr_el1, tcr);
cpu_tlb_flush();
pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, IPL_NONE, 0,
"pmap", NULL);
pool_setlowat(&pmap_pmap_pool, 2);
pool_init(&pmap_pted_pool, sizeof(struct pte_desc), 0, IPL_VM, 0,
"pted", NULL);
pool_setlowat(&pmap_pted_pool, 20);
pool_init(&pmap_vp_pool, sizeof(struct pmapvp0), PAGE_SIZE, IPL_VM, 0,
"vp", &pmap_vp_allocator);
pmap_initialized = 1;
}
void
pmap_proc_iflush(struct process *pr, vaddr_t va, vsize_t len)
{
struct pmap *pm = vm_map_pmap(&pr->ps_vmspace->vm_map);
vaddr_t kva = zero_page + cpu_number() * PAGE_SIZE;
paddr_t pa;
vsize_t clen;
vsize_t off;
if (pr == curproc->p_p) {
cpu_icache_sync_range(va, len);
return;
}
while (len > 0) {
clen = round_page(va + 1) - va;
if (clen > len)
clen = len;
off = va - trunc_page(va);
if (pmap_extract(pm, trunc_page(va), &pa)) {
pmap_kenter_pa(kva, pa, PROT_READ|PROT_WRITE);
cpu_icache_sync_range(kva + off, clen);
pmap_kremove_pg(kva);
}
len -= clen;
va += clen;
}
}
void
pmap_icache_sync_page(struct pmap *pm, paddr_t pa)
{
vaddr_t kva = zero_page + cpu_number() * PAGE_SIZE;
pmap_kenter_pa(kva, pa, PROT_READ|PROT_WRITE);
cpu_icache_sync_range(kva, PAGE_SIZE);
pmap_kremove_pg(kva);
}
void
pmap_pte_insert(struct pte_desc *pted)
{
pmap_t pm = pted->pted_pmap;
uint64_t *pl3;
if (pmap_vp_lookup(pm, pted->pted_va, &pl3) == NULL) {
panic("%s: have a pted, but missing a vp"
" for %lx va pmap %p", __func__, pted->pted_va, pm);
}
pmap_pte_update(pted, pl3);
}
void
pmap_pte_update(struct pte_desc *pted, uint64_t *pl3)
{
uint64_t pte, access_bits;
pmap_t pm = pted->pted_pmap;
uint64_t attr = ATTR_nG;
switch (pted->pted_va & PMAP_CACHE_BITS) {
case PMAP_CACHE_WB:
attr |= ATTR_IDX(PTE_ATTR_WB);
attr |= ATTR_SH(SH_INNER);
break;
case PMAP_CACHE_WT:
attr |= ATTR_IDX(PTE_ATTR_WT);
attr |= ATTR_SH(SH_INNER);
break;
case PMAP_CACHE_CI:
attr |= ATTR_IDX(PTE_ATTR_CI);
attr |= ATTR_SH(SH_INNER);
break;
case PMAP_CACHE_DEV_NGNRNE:
attr |= ATTR_IDX(PTE_ATTR_DEV_NGNRNE);
attr |= ATTR_SH(SH_INNER);
break;
case PMAP_CACHE_DEV_NGNRE:
attr |= ATTR_IDX(PTE_ATTR_DEV_NGNRE);
attr |= ATTR_SH(SH_INNER);
break;
default:
panic("%s: invalid cache mode", __func__);
}
if (pm->pm_privileged)
access_bits = ap_bits_kern[pted->pted_pte & PROT_MASK];
else
access_bits = ap_bits_user[pted->pted_pte & PROT_MASK];
#ifndef SMALL_KERNEL
access_bits |= pm->pm_guarded;
#endif
pte = (pted->pted_pte & PTE_RPGN) | attr | access_bits | L3_P;
*pl3 = access_bits ? pte : 0;
}
void
pmap_pte_remove(struct pte_desc *pted, int remove_pted)
{
struct pmapvp1 *vp1;
struct pmapvp2 *vp2;
struct pmapvp3 *vp3;
pmap_t pm = pted->pted_pmap;
if (pm->have_4_level_pt)
vp1 = pm->pm_vp.l0->vp[VP_IDX0(pted->pted_va)];
else
vp1 = pm->pm_vp.l1;
if (vp1 == NULL) {
panic("have a pted, but missing the l1 for %lx va pmap %p",
pted->pted_va, pm);
}
vp2 = vp1->vp[VP_IDX1(pted->pted_va)];
if (vp2 == NULL) {
panic("have a pted, but missing the l2 for %lx va pmap %p",
pted->pted_va, pm);
}
vp3 = vp2->vp[VP_IDX2(pted->pted_va)];
if (vp3 == NULL) {
panic("have a pted, but missing the l3 for %lx va pmap %p",
pted->pted_va, pm);
}
vp3->l3[VP_IDX3(pted->pted_va)] = 0;
if (remove_pted)
vp3->vp[VP_IDX3(pted->pted_va)] = NULL;
}
int
pmap_fault_fixup(pmap_t pm, vaddr_t va, vm_prot_t ftype)
{
struct pte_desc *pted;
struct vm_page *pg;
paddr_t pa;
uint64_t *pl3 = NULL;
int retcode = 0;
pmap_lock(pm);
pted = pmap_vp_lookup(pm, va, &pl3);
if (!pted || !PTED_VALID(pted))
goto done;
pa = (pted->pted_pte & PTE_RPGN);
pg = PHYS_TO_VM_PAGE(pa);
if (pg == NULL)
goto done;
if ((ftype & PROT_WRITE) &&
!(pted->pted_pte & PROT_WRITE) &&
(pted->pted_va & PROT_WRITE)) {
atomic_setbits_int(&pg->pg_flags, PG_PMAP_MOD|PG_PMAP_REF);
atomic_clearbits_int(&pg->pg_flags, PG_PMAP_EXE);
pted->pted_pte |=
(pted->pted_va & (PROT_READ|PROT_WRITE|PROT_EXEC));
} else if ((ftype & PROT_EXEC) &&
!(pted->pted_pte & PROT_EXEC) &&
(pted->pted_va & PROT_EXEC)) {
atomic_setbits_int(&pg->pg_flags, PG_PMAP_REF);
pted->pted_pte |= (pted->pted_va & (PROT_READ|PROT_EXEC));
} else if ((ftype & PROT_READ) &&
!(pted->pted_pte & PROT_READ) &&
(pted->pted_va & PROT_READ)) {
atomic_setbits_int(&pg->pg_flags, PG_PMAP_REF);
pted->pted_pte |= (pted->pted_va & (PROT_READ|PROT_EXEC));
} else {
goto done;
}
if (pted->pted_va & PROT_EXEC) {
if ((pg->pg_flags & PG_PMAP_EXE) == 0)
pmap_icache_sync_page(pm, pa);
atomic_setbits_int(&pg->pg_flags, PG_PMAP_EXE);
}
pmap_pte_update(pted, pl3);
ttlb_flush(pm, va & ~PAGE_MASK);
retcode = 1;
done:
pmap_unlock(pm);
return retcode;
}
void
pmap_postinit(void)
{
extern char trampoline_vectors[];
extern char trampoline_vectors_end[];
paddr_t pa;
vaddr_t minaddr, maxaddr;
u_long npteds, npages;
memset(pmap_tramp.pm_vp.l1, 0, sizeof(struct pmapvp1));
pmap_extract(pmap_kernel(), (vaddr_t)trampoline_vectors, &pa);
minaddr = (vaddr_t)trampoline_vectors;
maxaddr = (vaddr_t)trampoline_vectors_end;
while (minaddr < maxaddr) {
pmap_enter(&pmap_tramp, minaddr, pa,
PROT_READ | PROT_EXEC, PROT_READ | PROT_EXEC | PMAP_WIRED);
minaddr += PAGE_SIZE;
pa += PAGE_SIZE;
}
npteds = (VM_MAX_KERNEL_ADDRESS - pmap_maxkvaddr + 1) / PAGE_SIZE;
npteds = roundup(npteds, VP_IDX3_CNT);
npages = howmany(npteds, PAGE_SIZE / (sizeof(struct pte_desc)));
npages += 2 * howmany(npteds, VP_IDX3_CNT);
npages += 2 * howmany(npteds, VP_IDX3_CNT * VP_IDX2_CNT);
npages += 2 * howmany(npteds, VP_IDX3_CNT * VP_IDX2_CNT * VP_IDX1_CNT);
minaddr = vm_map_min(kernel_map);
pmap_kvp_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
npages * PAGE_SIZE, VM_MAP_INTRSAFE, FALSE, NULL);
}
void
pmap_init_percpu(void)
{
pool_cache_init(&pmap_pted_pool);
pool_cache_init(&pmap_vp_pool);
}
void
pmap_update(pmap_t pm)
{
}
int
pmap_is_referenced(struct vm_page *pg)
{
return ((pg->pg_flags & PG_PMAP_REF) != 0);
}
int
pmap_is_modified(struct vm_page *pg)
{
return ((pg->pg_flags & PG_PMAP_MOD) != 0);
}
int
pmap_clear_modify(struct vm_page *pg)
{
struct pte_desc *pted;
atomic_clearbits_int(&pg->pg_flags, PG_PMAP_MOD);
mtx_enter(&pg->mdpage.pv_mtx);
LIST_FOREACH(pted, &(pg->mdpage.pv_list), pted_pv_list) {
pted->pted_pte &= ~PROT_WRITE;
pmap_pte_insert(pted);
ttlb_flush(pted->pted_pmap, pted->pted_va & ~PAGE_MASK);
}
mtx_leave(&pg->mdpage.pv_mtx);
return 0;
}
int
pmap_clear_reference(struct vm_page *pg)
{
struct pte_desc *pted;
atomic_clearbits_int(&pg->pg_flags, PG_PMAP_REF);
mtx_enter(&pg->mdpage.pv_mtx);
LIST_FOREACH(pted, &(pg->mdpage.pv_list), pted_pv_list) {
pted->pted_pte &= ~PROT_MASK;
pmap_pte_insert(pted);
ttlb_flush(pted->pted_pmap, pted->pted_va & ~PAGE_MASK);
}
mtx_leave(&pg->mdpage.pv_mtx);
return 0;
}
void
pmap_unwire(pmap_t pm, vaddr_t va)
{
struct pte_desc *pted;
pmap_lock(pm);
pted = pmap_vp_lookup(pm, va, NULL);
if (pted != NULL && PTED_WIRED(pted)) {
pm->pm_stats.wired_count--;
pted->pted_va &= ~PTED_VA_WIRED_M;
}
pmap_unlock(pm);
}
void
pmap_remove_holes(struct vmspace *vm)
{
}
void
pmap_virtual_space(vaddr_t *start, vaddr_t *end)
{
*start = virtual_avail;
*end = VM_MAX_KERNEL_ADDRESS;
pmap_virtual_space_called = 1;
}
void
pmap_setup_avail(uint64_t ram_start, uint64_t ram_end, uint64_t kvo)
{
pmap_avail_kvo = kvo;
pmap_avail[0].start = ram_start;
pmap_avail[0].size = ram_end-ram_start;
physmem = atop(pmap_avail[0].size);
pmap_cnt_avail = 1;
pmap_avail_fixup();
}
void
pmap_avail_fixup(void)
{
struct mem_region *mp;
vaddr_t align;
vaddr_t end;
mp = pmap_avail;
while(mp->size !=0) {
align = round_page(mp->start);
if (mp->start != align) {
pmap_remove_avail(mp->start, align);
mp = pmap_avail;
continue;
}
end = mp->start+mp->size;
align = trunc_page(end);
if (end != align) {
pmap_remove_avail(align, end);
mp = pmap_avail;
continue;
}
mp++;
}
}
void
pmap_remove_avail(paddr_t base, paddr_t end)
{
struct mem_region *mp;
int i;
long mpend;
for (mp = pmap_avail; mp->size; mp++) {
mpend = mp->start + mp->size;
if (base > mpend) {
continue;
}
if (base <= mp->start) {
if (end <= mp->start)
break;
if (end >= mpend) {
for (i = mp - pmap_avail;
i < pmap_cnt_avail;
i++) {
pmap_avail[i] = pmap_avail[i+1];
}
pmap_cnt_avail--;
pmap_avail[pmap_cnt_avail].size = 0;
} else {
mp->start = end;
mp->size = mpend - end;
}
} else {
if (end >= mpend) {
mp->size = base - mp->start;
} else {
for (i = pmap_cnt_avail;
i > (mp - pmap_avail);
i--) {
pmap_avail[i] = pmap_avail[i - 1];
}
pmap_cnt_avail++;
mp->size = base - mp->start;
mp++;
mp->start = end;
mp->size = mpend - end;
}
}
}
for (mp = pmap_allocated; mp->size != 0; mp++) {
if (base < mp->start) {
if (end == mp->start) {
mp->start = base;
mp->size += end - base;
break;
}
for (i = pmap_cnt_allocated; i > (mp - pmap_allocated);
i--) {
pmap_allocated[i] = pmap_allocated[i - 1];
}
pmap_cnt_allocated++;
mp->start = base;
mp->size = end - base;
return;
}
if (base == (mp->start + mp->size)) {
mp->size += end - base;
return;
}
}
if (mp->size == 0) {
mp->start = base;
mp->size = end - base;
pmap_cnt_allocated++;
}
}
paddr_t
pmap_steal_avail(size_t size, int align, void **kva)
{
struct mem_region *mp;
long start;
long remsize;
for (mp = pmap_avail; mp->size; mp++) {
if (mp->size > size) {
start = (mp->start + (align -1)) & ~(align -1);
remsize = mp->size - (start - mp->start);
if (remsize >= 0) {
pmap_remove_avail(start, start+size);
if (kva != NULL){
*kva = (void *)(start - pmap_avail_kvo);
}
bzero((void*)(start), size);
return start;
}
}
}
panic ("unable to allocate region with size %lx align %x",
size, align);
}
vaddr_t
pmap_map_stolen(vaddr_t kernel_start)
{
struct mem_region *mp;
paddr_t pa;
vaddr_t va;
uint64_t e;
for (mp = pmap_allocated; mp->size; mp++) {
for (e = 0; e < mp->size; e += PAGE_SIZE) {
int prot = PROT_READ | PROT_WRITE;
pa = mp->start + e;
va = pa - pmap_avail_kvo;
if (va < VM_MIN_KERNEL_ADDRESS ||
va >= VM_MAX_KERNEL_ADDRESS)
continue;
if (va >= (vaddr_t)__text_start &&
va < (vaddr_t)_etext)
prot = PROT_READ | PROT_EXEC;
else if (va >= (vaddr_t)__rodata_start &&
va < (vaddr_t)_erodata)
prot = PROT_READ;
pmap_kenter_cache(va, pa, prot, PMAP_CACHE_WB);
}
}
return va + PAGE_SIZE;
}
void
pmap_physload_avail(void)
{
struct mem_region *mp;
uint64_t start, end;
for (mp = pmap_avail; mp->size; mp++) {
if (mp->size < PAGE_SIZE) {
printf(" skipped - too small\n");
continue;
}
start = mp->start;
if (start & PAGE_MASK) {
start = PAGE_SIZE + (start & PMAP_PA_MASK);
}
end = mp->start + mp->size;
if (end & PAGE_MASK) {
end = (end & PMAP_PA_MASK);
}
uvm_page_physload(atop(start), atop(end),
atop(start), atop(end), 0);
}
}
void
pmap_show_mapping(uint64_t va)
{
struct pmapvp1 *vp1;
struct pmapvp2 *vp2;
struct pmapvp3 *vp3;
struct pte_desc *pted;
struct pmap *pm;
uint64_t ttbr0, tcr;
printf("showing mapping of %llx\n", va);
if (va & 1ULL << 63)
pm = pmap_kernel();
else
pm = curproc->p_vmspace->vm_map.pmap;
if (pm->have_4_level_pt) {
printf(" vp0 = %p off %x\n", pm->pm_vp.l0, VP_IDX0(va)*8);
vp1 = pm->pm_vp.l0->vp[VP_IDX0(va)];
if (vp1 == NULL)
return;
} else {
vp1 = pm->pm_vp.l1;
}
__asm volatile ("mrs %x0, ttbr0_el1" : "=r"(ttbr0));
__asm volatile ("mrs %x0, tcr_el1" : "=r"(tcr));
printf(" ttbr0 %llx %llx tcr %llx\n", ttbr0, pm->pm_pt0pa, tcr);
printf(" vp1 = %p\n", vp1);
vp2 = vp1->vp[VP_IDX1(va)];
printf(" vp2 = %p lp2 = %llx idx1 off %x\n",
vp2, vp1->l1[VP_IDX1(va)], VP_IDX1(va)*8);
if (vp2 == NULL)
return;
vp3 = vp2->vp[VP_IDX2(va)];
printf(" vp3 = %p lp3 = %llx idx2 off %x\n",
vp3, vp2->l2[VP_IDX2(va)], VP_IDX2(va)*8);
if (vp3 == NULL)
return;
pted = vp3->vp[VP_IDX3(va)];
printf(" pted = %p lp3 = %llx idx3 off %x\n",
pted, vp3->l3[VP_IDX3(va)], VP_IDX3(va)*8);
}
__attribute__((target("+pauth")))
void
pmap_setpauthkeys(struct pmap *pm)
{
if (ID_AA64ISAR1_APA(cpu_id_aa64isar1) >= ID_AA64ISAR1_APA_PAC ||
ID_AA64ISAR1_API(cpu_id_aa64isar1) >= ID_AA64ISAR1_API_PAC ||
ID_AA64ISAR2_APA3(cpu_id_aa64isar2) >= ID_AA64ISAR2_APA3_PAC) {
__asm volatile ("msr apiakeylo_el1, %0"
:: "r"(pm->pm_apiakey[0]));
__asm volatile ("msr apiakeyhi_el1, %0"
:: "r"(pm->pm_apiakey[1]));
__asm volatile ("msr apdakeylo_el1, %0"
:: "r"(pm->pm_apdakey[0]));
__asm volatile ("msr apdakeyhi_el1, %0"
:: "r"(pm->pm_apdakey[1]));
__asm volatile ("msr apibkeylo_el1, %0"
:: "r"(pm->pm_apibkey[0]));
__asm volatile ("msr apibkeyhi_el1, %0"
:: "r"(pm->pm_apibkey[1]));
__asm volatile ("msr apdbkeylo_el1, %0"
:: "r"(pm->pm_apdbkey[0]));
__asm volatile ("msr apdbkeyhi_el1, %0"
:: "r"(pm->pm_apdbkey[1]));
}
if (ID_AA64ISAR1_GPA(cpu_id_aa64isar1) >= ID_AA64ISAR1_GPA_IMPL ||
ID_AA64ISAR1_GPI(cpu_id_aa64isar1) >= ID_AA64ISAR1_GPI_IMPL ||
ID_AA64ISAR2_GPA3(cpu_id_aa64isar2) >= ID_AA64ISAR2_GPA3_IMPL) {
__asm volatile ("msr apgakeylo_el1, %0"
:: "r"(pm->pm_apgakey[0]));
__asm volatile ("msr apgakeyhi_el1, %0"
:: "r"(pm->pm_apgakey[1]));
}
}
void
pmap_setttb(struct proc *p)
{
struct cpu_info *ci = curcpu();
pmap_t pm = p->p_vmspace->vm_map.pmap;
if (pm != pmap_kernel() &&
(pm->pm_asid & ~PMAP_ASID_MASK) != READ_ONCE(pmap_asid_gen))
pmap_allocate_asid(pm);
if (pm != pmap_kernel())
pmap_setpauthkeys(pm);
WRITE_SPECIALREG(ttbr0_el1, pmap_kernel()->pm_pt0pa);
__asm volatile("isb");
cpu_setttb(pm->pm_asid, pm->pm_pt0pa);
ci->ci_curpm = pm;
ci->ci_flush_bp();
}