#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
#include <sys/malloc.h>
#include <sys/user.h>
#include <sys/pool.h>
#include <uvm/uvm.h>
#include <machine/pmap.h>
#include <machine/pcb.h>
#include <machine/param.h>
#include <arm/cpufunc.h>
#define PTE_L1_S_CACHE_MODE (L1_S_B | L1_S_C)
#define PTE_L1_S_CACHE_MODE_PT (L1_S_B | L1_S_C)
#define PTE_L2_L_CACHE_MODE (L2_B | L2_C)
#define PTE_L2_S_CACHE_MODE (L2_B | L2_C)
#define PTE_L2_L_CACHE_MODE_PT (L2_B | L2_C)
#define PTE_L2_S_CACHE_MODE_PT (L2_B | L2_C)
#ifdef PMAP_DEBUG
#define PDB_FOLLOW 0x0001
#define PDB_INIT 0x0002
#define PDB_ENTER 0x0004
#define PDB_REMOVE 0x0008
#define PDB_CREATE 0x0010
#define PDB_PTPAGE 0x0020
#define PDB_GROWKERN 0x0040
#define PDB_BITS 0x0080
#define PDB_COLLECT 0x0100
#define PDB_PROTECT 0x0200
#define PDB_MAP_L1 0x0400
#define PDB_BOOTSTRAP 0x1000
#define PDB_PARANOIA 0x2000
#define PDB_WIRING 0x4000
#define PDB_PVDUMP 0x8000
#define PDB_KENTER 0x20000
#define PDB_KREMOVE 0x40000
#define pmapdebug (cold ? 0 : 0xffffffff)
#define NPDEBUG(_lev_,_stat_) \
if (pmapdebug & (_lev_)) \
((_stat_))
#else
#define NPDEBUG(_lev_,_stat_)
#endif
struct pmap kernel_pmap_store;
struct pool pmap_pmap_pool;
struct pool pmap_pv_pool;
void *pmap_pv_page_alloc(struct pool *, int, int *);
void pmap_pv_page_free(struct pool *, void *);
struct pool_allocator pmap_pv_allocator = {
pmap_pv_page_alloc, pmap_pv_page_free
};
struct pool pmap_l2dtable_pool;
vaddr_t pmap_kernel_l2dtable_kva;
struct pool pmap_l2ptp_pool;
vaddr_t pmap_kernel_l2ptp_kva;
paddr_t pmap_kernel_l2ptp_phys;
pt_entry_t *csrc_pte, *cdst_pte;
vaddr_t csrcp, cdstp;
char *memhook;
extern caddr_t msgbufaddr;
int pmap_initialized;
struct l1_ttable {
TAILQ_ENTRY(l1_ttable) l1_link;
paddr_t l1_physaddr;
pd_entry_t *l1_kva;
};
#define L1_IDX(va) (((vaddr_t)(va)) >> L1_S_SHIFT)
pd_entry_t l1_c_pxn;
TAILQ_HEAD(, l1_ttable) l1_list;
struct l2_dtable {
u_int l2_occupancy;
struct l2_bucket {
pt_entry_t *l2b_kva;
paddr_t l2b_phys;
u_short l2b_l1idx;
u_short l2b_occupancy;
} l2_bucket[L2_BUCKET_SIZE];
};
#define L2_IDX(l1idx) (((l1idx) >> L2_BUCKET_LOG2) & \
(L2_SIZE - 1))
#define L2_BUCKET(l1idx) ((l1idx) & (L2_BUCKET_SIZE - 1))
#define L2_NEXT_BUCKET(va) (((va) & L1_S_FRAME) + L1_S_SIZE)
#define pmap_alloc_l2_dtable() \
pool_get(&pmap_l2dtable_pool, PR_NOWAIT|PR_ZERO)
#define pmap_free_l2_dtable(l2) \
pool_put(&pmap_l2dtable_pool, (l2))
int pmap_needs_pte_sync;
struct pv_entry {
struct pv_entry *pv_next;
pmap_t pv_pmap;
vaddr_t pv_va;
u_int pv_flags;
};
#define PV_BEEN_EXECD(f) (((f) & PVF_EXEC) != 0)
void pmap_alloc_specials(vaddr_t *, int, vaddr_t *,
pt_entry_t **);
static int pmap_is_current(pmap_t);
void pmap_enter_pv(struct vm_page *, struct pv_entry *,
pmap_t, vaddr_t, u_int);
static struct pv_entry *pmap_find_pv(struct vm_page *, pmap_t, vaddr_t);
struct pv_entry *pmap_remove_pv(struct vm_page *, pmap_t, vaddr_t);
u_int pmap_modify_pv(struct vm_page *, pmap_t, vaddr_t,
u_int, u_int);
void pmap_alloc_l1(pmap_t);
void pmap_free_l1(pmap_t);
struct l2_bucket *pmap_get_l2_bucket(pmap_t, vaddr_t);
struct l2_bucket *pmap_alloc_l2_bucket(pmap_t, vaddr_t);
void pmap_free_l2_bucket(pmap_t, struct l2_bucket *, u_int);
void pmap_clearbit(struct vm_page *, u_int);
void pmap_clean_page(struct vm_page *);
void pmap_page_remove(struct vm_page *);
void pmap_init_l1(struct l1_ttable *, pd_entry_t *);
vaddr_t kernel_pt_lookup(paddr_t);
extern void bzero_page(vaddr_t);
extern void bcopy_page(vaddr_t, vaddr_t);
vaddr_t virtual_avail;
vaddr_t virtual_end;
vaddr_t pmap_curmaxkvaddr;
extern pv_addr_t systempage;
static __inline int
pmap_is_current(pmap_t pm)
{
if (pm == pmap_kernel() ||
(curproc && curproc->p_vmspace->vm_map.pmap == pm))
return 1;
return 0;
}
static __inline void
pmap_tlb_flushID_SE(pmap_t pm, vaddr_t va)
{
if (pmap_is_current(pm))
cpu_tlb_flushID_SE(va);
}
static __inline void
pmap_tlb_flushID(pmap_t pm)
{
if (pmap_is_current(pm))
cpu_tlb_flushID();
}
struct l2_bucket *
pmap_get_l2_bucket(pmap_t pm, vaddr_t va)
{
struct l2_dtable *l2;
struct l2_bucket *l2b;
u_short l1idx;
l1idx = L1_IDX(va);
if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL ||
(l2b = &l2->l2_bucket[L2_BUCKET(l1idx)])->l2b_kva == NULL)
return (NULL);
return (l2b);
}
void
pmap_enter_pv(struct vm_page *pg, struct pv_entry *pve, pmap_t pm,
vaddr_t va, u_int flags)
{
NPDEBUG(PDB_PVDUMP,
printf("pmap_enter_pv: pm %p, pg %p, flags 0x%x\n", pm, pg, flags));
pve->pv_pmap = pm;
pve->pv_va = va;
pve->pv_flags = flags;
pve->pv_next = pg->mdpage.pvh_list;
pg->mdpage.pvh_list = pve;
pg->mdpage.pvh_attrs |= flags & (PVF_REF | PVF_MOD);
if (pve->pv_flags & PVF_WIRED)
++pm->pm_stats.wired_count;
}
static __inline struct pv_entry *
pmap_find_pv(struct vm_page *pg, pmap_t pm, vaddr_t va)
{
struct pv_entry *pv;
for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) {
if (pm == pv->pv_pmap && va == pv->pv_va)
break;
}
return (pv);
}
struct pv_entry *
pmap_remove_pv(struct vm_page *pg, pmap_t pm, vaddr_t va)
{
struct pv_entry *pve, **prevptr;
NPDEBUG(PDB_PVDUMP,
printf("pmap_remove_pv: pm %p, pg %p, va 0x%08lx\n", pm, pg, va));
prevptr = &pg->mdpage.pvh_list;
pve = *prevptr;
while (pve) {
if (pve->pv_pmap == pm && pve->pv_va == va) {
NPDEBUG(PDB_PVDUMP,
printf("pmap_remove_pv: pm %p, pg %p, flags 0x%x\n", pm, pg, pve->pv_flags));
*prevptr = pve->pv_next;
if (pve->pv_flags & PVF_WIRED)
--pm->pm_stats.wired_count;
break;
}
prevptr = &pve->pv_next;
pve = pve->pv_next;
}
return(pve);
}
u_int
pmap_modify_pv(struct vm_page *pg, pmap_t pm, vaddr_t va,
u_int clr_mask, u_int set_mask)
{
struct pv_entry *npv;
u_int flags, oflags;
if ((npv = pmap_find_pv(pg, pm, va)) == NULL)
return (0);
NPDEBUG(PDB_PVDUMP,
printf("pmap_modify_pv: pm %p, pg %p, clr 0x%x, set 0x%x, flags 0x%x\n", pm, pg, clr_mask, set_mask, npv->pv_flags));
if (clr_mask & (PVF_REF | PVF_MOD))
pg->mdpage.pvh_attrs |= set_mask & (PVF_REF | PVF_MOD);
oflags = npv->pv_flags;
npv->pv_flags = flags = (oflags & ~clr_mask) | set_mask;
if ((flags ^ oflags) & PVF_WIRED) {
if (flags & PVF_WIRED)
++pm->pm_stats.wired_count;
else
--pm->pm_stats.wired_count;
}
return (oflags);
}
uint nl1;
void
pmap_alloc_l1(pmap_t pm)
{
struct l1_ttable *l1;
struct pglist plist;
struct vm_page *m;
pd_entry_t *pl1pt;
vaddr_t va, eva;
int error;
#ifdef PMAP_DEBUG
printf("%s: %d\n", __func__, ++nl1);
#endif
l1 = malloc(sizeof(*l1), M_VMPMAP, M_WAITOK);
for (;;) {
va = (vaddr_t)km_alloc(L1_TABLE_SIZE, &kv_any, &kp_none,
&kd_nowait);
if (va != 0)
break;
uvm_wait("alloc_l1_va");
}
for (;;) {
TAILQ_INIT(&plist);
error = uvm_pglistalloc(L1_TABLE_SIZE, 0, (paddr_t)-1,
L1_TABLE_SIZE, 0, &plist, 1, UVM_PLA_WAITOK);
if (error == 0)
break;
uvm_wait("alloc_l1_pg");
}
pl1pt = (pd_entry_t *)va;
m = TAILQ_FIRST(&plist);
for (eva = va + L1_TABLE_SIZE; va < eva; va += PAGE_SIZE) {
paddr_t pa = VM_PAGE_TO_PHYS(m);
pmap_kenter_pa(va, pa, PROT_READ | PROT_WRITE);
m = TAILQ_NEXT(m, pageq);
}
pmap_init_l1(l1, pl1pt);
pm->pm_l1 = l1;
}
void
pmap_free_l1(pmap_t pm)
{
struct l1_ttable *l1 = pm->pm_l1;
struct pglist mlist;
struct vm_page *pg;
struct l2_bucket *l2b;
pt_entry_t *ptep;
vaddr_t va;
uint npg;
pm->pm_l1 = NULL;
TAILQ_REMOVE(&l1_list, l1, l1_link);
TAILQ_INIT(&mlist);
va = (vaddr_t)l1->l1_kva;
for (npg = atop(L1_TABLE_SIZE); npg != 0; npg--) {
l2b = pmap_get_l2_bucket(pmap_kernel(), va);
ptep = &l2b->l2b_kva[l2pte_index(va)];
pg = PHYS_TO_VM_PAGE(l2pte_pa(*ptep));
TAILQ_INSERT_TAIL(&mlist, pg, pageq);
va += PAGE_SIZE;
}
pmap_kremove((vaddr_t)l1->l1_kva, L1_TABLE_SIZE);
uvm_pglistfree(&mlist);
km_free(l1->l1_kva, L1_TABLE_SIZE, &kv_any, &kp_none);
free(l1, M_VMPMAP, 0);
}
static __inline void
pmap_free_l2_ptp(pt_entry_t *l2)
{
pool_put(&pmap_l2ptp_pool, (void *)l2);
}
struct l2_bucket *
pmap_alloc_l2_bucket(pmap_t pm, vaddr_t va)
{
struct l2_dtable *l2;
struct l2_bucket *l2b;
u_short l1idx;
l1idx = L1_IDX(va);
if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) {
if ((l2 = pmap_alloc_l2_dtable()) == NULL)
return (NULL);
pm->pm_l2[L2_IDX(l1idx)] = l2;
}
l2b = &l2->l2_bucket[L2_BUCKET(l1idx)];
if (l2b->l2b_kva == NULL) {
pt_entry_t *ptep;
ptep = pool_get(&pmap_l2ptp_pool, PR_NOWAIT|PR_ZERO);
if (ptep == NULL) {
if (l2->l2_occupancy == 0) {
pm->pm_l2[L2_IDX(l1idx)] = NULL;
pmap_free_l2_dtable(l2);
}
return (NULL);
}
PTE_SYNC_RANGE(ptep, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t));
pmap_extract(pmap_kernel(), (vaddr_t)ptep, &l2b->l2b_phys);
l2->l2_occupancy++;
l2b->l2b_kva = ptep;
l2b->l2b_l1idx = l1idx;
}
return (l2b);
}
void
pmap_free_l2_bucket(pmap_t pm, struct l2_bucket *l2b, u_int count)
{
struct l2_dtable *l2;
pd_entry_t *pl1pd;
pt_entry_t *ptep;
u_short l1idx;
KDASSERT(count <= l2b->l2b_occupancy);
l2b->l2b_occupancy -= count;
if (l2b->l2b_occupancy > 0 || pm == pmap_kernel())
return;
l1idx = l2b->l2b_l1idx;
ptep = l2b->l2b_kva;
l2b->l2b_kva = NULL;
pl1pd = &pm->pm_l1->l1_kva[l1idx];
*pl1pd = L1_TYPE_INV;
PTE_SYNC(pl1pd);
pmap_tlb_flushID_SE(pm, l1idx << L1_S_SHIFT);
pmap_free_l2_ptp(ptep);
l2 = pm->pm_l2[L2_IDX(l1idx)];
if (--l2->l2_occupancy > 0)
return;
pm->pm_l2[L2_IDX(l1idx)] = NULL;
pmap_free_l2_dtable(l2);
}
void
pmap_clearbit(struct vm_page *pg, u_int maskbits)
{
struct l2_bucket *l2b;
struct pv_entry *pv;
pt_entry_t *ptep, npte, opte;
pmap_t pm;
vaddr_t va;
u_int oflags;
NPDEBUG(PDB_BITS,
printf("pmap_clearbit: pg %p (0x%08lx) mask 0x%x\n",
pg, pg->phys_addr, maskbits));
pg->mdpage.pvh_attrs &= ~(maskbits & (PVF_MOD | PVF_REF));
if (pg->mdpage.pvh_list == NULL)
return;
for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) {
va = pv->pv_va;
pm = pv->pv_pmap;
oflags = pv->pv_flags;
pv->pv_flags &= ~maskbits;
l2b = pmap_get_l2_bucket(pm, va);
KDASSERT(l2b != NULL);
ptep = &l2b->l2b_kva[l2pte_index(va)];
npte = opte = *ptep;
NPDEBUG(PDB_BITS,
printf(
"pmap_clearbit: pv %p, pm %p, va 0x%08lx, flag 0x%x\n",
pv, pv->pv_pmap, pv->pv_va, oflags));
if (maskbits & (PVF_WRITE|PVF_MOD)) {
npte |= L2_V7_AP(0x4);
}
if (maskbits & PVF_REF) {
npte &= ~L2_V7_AF;
}
if (npte != opte) {
*ptep = npte;
PTE_SYNC(ptep);
if (opte & L2_V7_AF)
pmap_tlb_flushID_SE(pm, pv->pv_va);
}
NPDEBUG(PDB_BITS,
printf("pmap_clearbit: pm %p va 0x%lx opte 0x%08x npte 0x%08x\n",
pm, va, opte, npte));
}
}
void
pmap_clean_page(struct vm_page *pg)
{
pmap_t pm;
struct pv_entry *pv;
if (curproc)
pm = curproc->p_vmspace->vm_map.pmap;
else
pm = pmap_kernel();
for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) {
if (pv->pv_pmap != pmap_kernel() && pv->pv_pmap != pm)
continue;
if (PV_BEEN_EXECD(pv->pv_flags))
cpu_icache_sync_range(pv->pv_va, PAGE_SIZE);
}
}
void
pmap_page_remove(struct vm_page *pg)
{
struct l2_bucket *l2b;
struct pv_entry *pv, *npv;
pmap_t pm, curpm;
pt_entry_t *ptep, opte;
int flush;
NPDEBUG(PDB_FOLLOW,
printf("pmap_page_remove: pg %p (0x%08lx)\n", pg, pg->phys_addr));
pv = pg->mdpage.pvh_list;
if (pv == NULL)
return;
flush = 0;
if (curproc)
curpm = curproc->p_vmspace->vm_map.pmap;
else
curpm = pmap_kernel();
while (pv) {
pm = pv->pv_pmap;
l2b = pmap_get_l2_bucket(pm, pv->pv_va);
KDASSERT(l2b != NULL);
ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
opte = *ptep;
if (opte != L2_TYPE_INV) {
if ((opte & L2_V7_AF) &&
(pm == curpm || pm == pmap_kernel())) {
if (PV_BEEN_EXECD(pv->pv_flags))
cpu_icache_sync_range(pv->pv_va, PAGE_SIZE);
flush = 1;
}
--pm->pm_stats.resident_count;
if (pv->pv_flags & PVF_WIRED)
--pm->pm_stats.wired_count;
*ptep = L2_TYPE_INV;
PTE_SYNC(ptep);
if (flush)
cpu_tlb_flushID_SE(pv->pv_va);
pmap_free_l2_bucket(pm, l2b, 1);
}
npv = pv->pv_next;
pool_put(&pmap_pv_pool, pv);
pv = npv;
}
pg->mdpage.pvh_list = NULL;
}
pmap_t
pmap_create(void)
{
pmap_t pm;
pm = pool_get(&pmap_pmap_pool, PR_WAITOK|PR_ZERO);
pm->pm_refs = 1;
pm->pm_stats.wired_count = 0;
pmap_alloc_l1(pm);
return (pm);
}
int
pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
{
struct l2_bucket *l2b;
struct vm_page *pg, *opg;
struct pv_entry *pve;
pt_entry_t *ptep, npte, opte;
u_int nflags;
u_int oflags;
int mapped = 1;
NPDEBUG(PDB_ENTER, printf("pmap_enter: pm %p va 0x%lx pa 0x%lx prot %x flag %x\n", pm, va, pa, prot, flags));
KDASSERT((flags & PMAP_WIRED) == 0 || (flags & PROT_MASK) != 0);
KDASSERT(((va | pa) & PGOFSET) == 0);
pg = pmap_initialized ? PHYS_TO_VM_PAGE(pa) : NULL;
nflags = 0;
if (prot & PROT_WRITE)
nflags |= PVF_WRITE;
if (prot & PROT_EXEC)
nflags |= PVF_EXEC;
if (flags & PMAP_WIRED)
nflags |= PVF_WIRED;
if (pm == pmap_kernel())
l2b = pmap_get_l2_bucket(pm, va);
else
l2b = pmap_alloc_l2_bucket(pm, va);
if (l2b == NULL) {
if (flags & PMAP_CANFAIL)
return (ENOMEM);
panic("pmap_enter: failed to allocate L2 bucket");
}
ptep = &l2b->l2b_kva[l2pte_index(va)];
opte = *ptep;
npte = L2_S_PROTO | pa;
if (opte != L2_TYPE_INV) {
if (l2pte_pa(opte) != pa)
opg = PHYS_TO_VM_PAGE(l2pte_pa(opte));
else
opg = pg;
} else
opg = NULL;
if (pg) {
if ((flags & PROT_MASK) ||
(pg->mdpage.pvh_attrs & PVF_REF)) {
nflags |= PVF_REF;
npte |= L2_V7_AF;
if ((flags & PROT_WRITE) ||
(pg->mdpage.pvh_attrs & PVF_MOD)) {
nflags |= PVF_MOD;
} else {
prot &= ~PROT_WRITE;
}
} else {
prot &= ~PROT_WRITE;
mapped = 0;
}
npte |= PTE_L2_S_CACHE_MODE;
if (pg == opg) {
oflags = pmap_modify_pv(pg, pm, va,
PVF_WRITE | PVF_EXEC | PVF_WIRED |
PVF_MOD | PVF_REF, nflags);
} else {
if (opg) {
pve = pmap_remove_pv(opg, pm, va);
} else
if ((pve = pool_get(&pmap_pv_pool, PR_NOWAIT)) == NULL){
if ((flags & PMAP_CANFAIL) == 0)
panic("pmap_enter: no pv entries");
if (pm != pmap_kernel())
pmap_free_l2_bucket(pm, l2b, 0);
NPDEBUG(PDB_ENTER,
printf("pmap_enter: ENOMEM\n"));
return (ENOMEM);
}
pmap_enter_pv(pg, pve, pm, va, nflags);
}
} else {
npte |= L2_V7_AF;
if (opg) {
pve = pmap_remove_pv(opg, pm, va);
pool_put(&pmap_pv_pool, pve);
}
}
npte |= L2_S_PROT(pm == pmap_kernel() ? PTE_KERNEL : PTE_USER, prot);
if (opte == L2_TYPE_INV) {
l2b->l2b_occupancy++;
pm->pm_stats.resident_count++;
}
NPDEBUG(PDB_ENTER,
printf("pmap_enter: opte 0x%08x npte 0x%08x\n", opte, npte));
if (npte != opte) {
*ptep = npte;
PTE_SYNC(ptep);
if (npte & L2_V7_AF) {
pd_entry_t *pl1pd, l1pd;
pl1pd = &pm->pm_l1->l1_kva[L1_IDX(va)];
l1pd = L1_C_PROTO | l2b->l2b_phys | l1_c_pxn;
if (*pl1pd != l1pd) {
*pl1pd = l1pd;
PTE_SYNC(pl1pd);
}
}
if (opte & L2_V7_AF)
pmap_tlb_flushID_SE(pm, va);
}
if (mapped && (prot & PROT_EXEC) != 0 && pmap_is_current(pm))
cpu_icache_sync_range(va, PAGE_SIZE);
return (0);
}
void
pmap_remove(pmap_t pm, vaddr_t sva, vaddr_t eva)
{
struct l2_bucket *l2b;
vaddr_t next_bucket;
pt_entry_t *ptep;
u_int mappings;
NPDEBUG(PDB_REMOVE, printf("pmap_remove: pmap=%p sva=%08lx eva=%08lx\n",
pm, sva, eva));
while (sva < eva) {
next_bucket = L2_NEXT_BUCKET(sva);
if (next_bucket > eva)
next_bucket = eva;
l2b = pmap_get_l2_bucket(pm, sva);
if (l2b == NULL) {
sva = next_bucket;
continue;
}
ptep = &l2b->l2b_kva[l2pte_index(sva)];
mappings = 0;
while (sva < next_bucket) {
struct vm_page *pg;
pt_entry_t pte;
paddr_t pa;
pte = *ptep;
if (pte == L2_TYPE_INV) {
sva += PAGE_SIZE;
ptep++;
continue;
}
pm->pm_stats.resident_count--;
pa = l2pte_pa(pte);
pg = PHYS_TO_VM_PAGE(pa);
if (pg != NULL) {
struct pv_entry *pve;
pve = pmap_remove_pv(pg, pm, sva);
if (pve != NULL)
pool_put(&pmap_pv_pool, pve);
}
if (pg != NULL)
pmap_clean_page(pg);
*ptep = L2_TYPE_INV;
PTE_SYNC(ptep);
if (pte & L2_V7_AF)
pmap_tlb_flushID_SE(pm, sva);
sva += PAGE_SIZE;
ptep++;
mappings++;
}
if (!pmap_is_current(pm))
cpu_idcache_wbinv_all();
pmap_free_l2_bucket(pm, l2b, mappings);
}
}
void
pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
{
struct l2_bucket *l2b;
pt_entry_t *ptep, opte, npte;
pt_entry_t cache_mode = PTE_L2_S_CACHE_MODE;
NPDEBUG(PDB_KENTER,
printf("pmap_kenter_pa: va 0x%08lx, pa 0x%08lx, prot 0x%x\n",
va, pa, prot));
l2b = pmap_get_l2_bucket(pmap_kernel(), va);
KDASSERT(l2b != NULL);
ptep = &l2b->l2b_kva[l2pte_index(va)];
opte = *ptep;
if (opte == L2_TYPE_INV)
l2b->l2b_occupancy++;
if (pa & PMAP_DEVICE)
cache_mode = L2_B | L2_V7_S_XN;
else if (pa & PMAP_NOCACHE)
cache_mode = L2_V7_S_TEX(1);
npte = L2_S_PROTO | (pa & PMAP_PA_MASK) | L2_V7_AF |
L2_S_PROT(PTE_KERNEL, prot) | cache_mode;
*ptep = npte;
PTE_SYNC(ptep);
if (opte & L2_V7_AF)
cpu_tlb_flushD_SE(va);
if (pa & PMAP_NOCACHE) {
cpu_dcache_wbinv_range(va, PAGE_SIZE);
cpu_sdcache_wbinv_range(va, (pa & PMAP_PA_MASK), PAGE_SIZE);
}
}
void
pmap_kenter_cache(vaddr_t va, paddr_t pa, vm_prot_t prot, int cacheable)
{
if (cacheable == 0)
pa |= PMAP_NOCACHE;
pmap_kenter_pa(va, pa, prot);
}
void
pmap_kremove(vaddr_t va, vsize_t len)
{
struct l2_bucket *l2b;
pt_entry_t *ptep, *sptep, opte;
vaddr_t next_bucket, eva;
u_int mappings;
NPDEBUG(PDB_KREMOVE, printf("pmap_kremove: va 0x%08lx, len 0x%08lx\n",
va, len));
eva = va + len;
while (va < eva) {
next_bucket = L2_NEXT_BUCKET(va);
if (next_bucket > eva)
next_bucket = eva;
l2b = pmap_get_l2_bucket(pmap_kernel(), va);
KDASSERT(l2b != NULL);
sptep = ptep = &l2b->l2b_kva[l2pte_index(va)];
mappings = 0;
while (va < next_bucket) {
opte = *ptep;
if (opte != L2_TYPE_INV) {
*ptep = L2_TYPE_INV;
PTE_SYNC(ptep);
mappings++;
}
if (opte & L2_V7_AF)
cpu_tlb_flushD_SE(va);
va += PAGE_SIZE;
ptep++;
}
KDASSERT(mappings <= l2b->l2b_occupancy);
l2b->l2b_occupancy -= mappings;
}
}
int
pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pap)
{
struct l2_dtable *l2;
pd_entry_t *pl1pd, l1pd;
pt_entry_t *ptep, pte;
paddr_t pa;
u_int l1idx;
l1idx = L1_IDX(va);
pl1pd = &pm->pm_l1->l1_kva[l1idx];
l1pd = *pl1pd;
if (l1pte_section_p(l1pd)) {
KDASSERT(pm == pmap_kernel());
pa = (l1pd & L1_S_FRAME) | (va & L1_S_OFFSET);
} else {
l2 = pm->pm_l2[L2_IDX(l1idx)];
if (l2 == NULL ||
(ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) {
return 0;
}
ptep = &ptep[l2pte_index(va)];
pte = *ptep;
if (pte == L2_TYPE_INV)
return 0;
switch (pte & L2_TYPE_MASK) {
case L2_TYPE_L:
pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET);
break;
default:
pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET);
break;
}
}
if (pap != NULL)
*pap = pa;
return 1;
}
void
pmap_protect(pmap_t pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
{
struct l2_bucket *l2b;
pt_entry_t *ptep, opte, npte;
vaddr_t next_bucket;
int flush;
NPDEBUG(PDB_PROTECT,
printf("pmap_protect: pm %p sva 0x%lx eva 0x%lx prot 0x%x",
pm, sva, eva, prot));
if ((prot & (PROT_WRITE | PROT_EXEC)) == (PROT_WRITE | PROT_EXEC))
return;
if (prot == PROT_NONE) {
pmap_remove(pm, sva, eva);
return;
}
if (pmap_is_current(pm))
flush = ((eva - sva) > (PAGE_SIZE * 4)) ? -1 : 0;
else
flush = -1;
while (sva < eva) {
next_bucket = L2_NEXT_BUCKET(sva);
if (next_bucket > eva)
next_bucket = eva;
l2b = pmap_get_l2_bucket(pm, sva);
if (l2b == NULL) {
sva = next_bucket;
continue;
}
ptep = &l2b->l2b_kva[l2pte_index(sva)];
while (sva < next_bucket) {
npte = opte = *ptep;
if (opte != L2_TYPE_INV) {
struct vm_page *pg;
if ((prot & PROT_WRITE) == 0)
npte |= L2_V7_AP(0x4);
if ((prot & PROT_EXEC) == 0)
npte |= L2_V7_S_XN;
*ptep = npte;
PTE_SYNC(ptep);
pg = PHYS_TO_VM_PAGE(l2pte_pa(opte));
if (pg != NULL && (prot & PROT_WRITE) == 0)
pmap_modify_pv(pg, pm, sva,
PVF_WRITE, 0);
if (flush >= 0) {
flush++;
if (opte & L2_V7_AF)
cpu_tlb_flushID_SE(sva);
}
}
sva += PAGE_SIZE;
ptep++;
}
}
if (flush < 0)
pmap_tlb_flushID(pm);
NPDEBUG(PDB_PROTECT, printf("\n"));
}
void
pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
{
NPDEBUG(PDB_PROTECT,
printf("pmap_page_protect: pg %p (0x%08lx), prot 0x%x\n",
pg, pg->phys_addr, prot));
switch(prot) {
case PROT_READ | PROT_WRITE | PROT_EXEC:
case PROT_READ | PROT_WRITE:
return;
case PROT_READ:
case PROT_READ | PROT_EXEC:
pmap_clearbit(pg, PVF_WRITE);
break;
default:
pmap_page_remove(pg);
break;
}
}
int
pmap_clear_modify(struct vm_page *pg)
{
int rv;
if (pg->mdpage.pvh_attrs & PVF_MOD) {
rv = 1;
pmap_clearbit(pg, PVF_MOD);
} else
rv = 0;
return (rv);
}
int
pmap_clear_reference(struct vm_page *pg)
{
int rv;
if (pg->mdpage.pvh_attrs & PVF_REF) {
rv = 1;
pmap_clearbit(pg, PVF_REF);
} else
rv = 0;
return (rv);
}
int
dab_access(trapframe_t *tf, u_int fsr, u_int far, struct proc *p)
{
struct pmap *pm = p->p_vmspace->vm_map.pmap;
vaddr_t va = trunc_page(far);
struct l2_dtable *l2;
struct l2_bucket *l2b;
pt_entry_t *ptep, pte;
struct pv_entry *pv;
struct vm_page *pg;
paddr_t pa;
u_int l1idx;
if (!TRAP_USERMODE(tf) && far >= VM_MIN_KERNEL_ADDRESS)
pm = pmap_kernel();
l1idx = L1_IDX(va);
l2 = pm->pm_l2[L2_IDX(l1idx)];
KASSERT(l2 != NULL);
l2b = &l2->l2_bucket[L2_BUCKET(l1idx)];
KASSERT(l2b->l2b_kva != NULL);
ptep = &l2b->l2b_kva[l2pte_index(va)];
pte = *ptep;
KASSERT(pte != L2_TYPE_INV);
pa = l2pte_pa(pte);
KASSERT((pte & L2_V7_AF) == 0);
pg = PHYS_TO_VM_PAGE(pa);
KASSERT(pg != NULL);
pv = pmap_find_pv(pg, pm, va);
KASSERT(pv != NULL);
pg->mdpage.pvh_attrs |= PVF_REF;
pv->pv_flags |= PVF_REF;
pte |= L2_V7_AF;
*ptep = pte;
PTE_SYNC(ptep);
return 0;
}
void
pmap_proc_iflush(struct process *pr, vaddr_t va, vsize_t len)
{
if (pr == curproc->p_p)
cpu_icache_sync_range(va, len);
}
void
pmap_unwire(pmap_t pm, vaddr_t va)
{
struct l2_bucket *l2b;
pt_entry_t *ptep, pte;
struct vm_page *pg;
paddr_t pa;
NPDEBUG(PDB_WIRING, printf("pmap_unwire: pm %p, va 0x%08lx\n", pm, va));
l2b = pmap_get_l2_bucket(pm, va);
KDASSERT(l2b != NULL);
ptep = &l2b->l2b_kva[l2pte_index(va)];
pte = *ptep;
pa = l2pte_pa(pte);
if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) {
(void) pmap_modify_pv(pg, pm, va, PVF_WIRED, 0);
}
}
void
pmap_activate(struct proc *p)
{
pmap_t pm;
struct pcb *pcb;
pm = p->p_vmspace->vm_map.pmap;
pcb = &p->p_addr->u_pcb;
pmap_set_pcb_pagedir(pm, pcb);
if (p == curproc) {
u_int cur_ttb;
__asm volatile("mrc p15, 0, %0, c2, c0, 0" : "=r"(cur_ttb));
cur_ttb &= ~(L1_TABLE_SIZE - 1);
if (cur_ttb == (u_int)pcb->pcb_pagedir) {
return;
}
__asm volatile("cpsid if");
cpu_setttb(pcb->pcb_pagedir);
__asm volatile("cpsie if");
}
}
void
pmap_update(pmap_t pm)
{
}
void
pmap_destroy(pmap_t pm)
{
u_int count;
count = --pm->pm_refs;
if (count > 0)
return;
pmap_free_l1(pm);
pool_put(&pmap_pmap_pool, pm);
}
void
pmap_reference(pmap_t pm)
{
if (pm == NULL)
return;
pm->pm_refs++;
}
void
pmap_zero_page(struct vm_page *pg)
{
paddr_t phys = VM_PAGE_TO_PHYS(pg);
#ifdef DEBUG
if (pg->mdpage.pvh_list != NULL)
panic("pmap_zero_page: page has mappings");
#endif
*cdst_pte = L2_S_PROTO | phys | L2_V7_AF |
L2_S_PROT(PTE_KERNEL, PROT_WRITE) | PTE_L2_S_CACHE_MODE;
PTE_SYNC(cdst_pte);
cpu_tlb_flushD_SE(cdstp);
bzero_page(cdstp);
}
void
pmap_copy_page(struct vm_page *src_pg, struct vm_page *dst_pg)
{
paddr_t src = VM_PAGE_TO_PHYS(src_pg);
paddr_t dst = VM_PAGE_TO_PHYS(dst_pg);
#ifdef DEBUG
if (dst_pg->mdpage.pvh_list != NULL)
panic("pmap_copy_page: dst page has mappings");
#endif
*csrc_pte = L2_S_PROTO | src | L2_V7_AF |
L2_S_PROT(PTE_KERNEL, PROT_READ) | PTE_L2_S_CACHE_MODE;
PTE_SYNC(csrc_pte);
*cdst_pte = L2_S_PROTO | dst | L2_V7_AF |
L2_S_PROT(PTE_KERNEL, PROT_WRITE) | PTE_L2_S_CACHE_MODE;
PTE_SYNC(cdst_pte);
cpu_tlb_flushD_SE(csrcp);
cpu_tlb_flushD_SE(cdstp);
bcopy_page(csrcp, cdstp);
}
void
pmap_virtual_space(vaddr_t *start, vaddr_t *end)
{
*start = virtual_avail;
*end = virtual_end;
}
static __inline int
pmap_grow_map(vaddr_t va, pt_entry_t cache_mode, paddr_t *pap)
{
struct l2_bucket *l2b;
pt_entry_t *ptep;
paddr_t pa;
KASSERT((va & PAGE_MASK) == 0);
if (uvm.page_init_done == 0) {
if (uvm_page_physget(&pa) == 0)
return (1);
} else {
struct vm_page *pg;
pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
if (pg == NULL)
return (1);
pa = VM_PAGE_TO_PHYS(pg);
}
if (pap)
*pap = pa;
l2b = pmap_get_l2_bucket(pmap_kernel(), va);
KDASSERT(l2b != NULL);
ptep = &l2b->l2b_kva[l2pte_index(va)];
*ptep = L2_S_PROTO | pa | L2_V7_AF | cache_mode |
L2_S_PROT(PTE_KERNEL, PROT_READ | PROT_WRITE);
PTE_SYNC(ptep);
cpu_tlb_flushD_SE(va);
memset((void *)va, 0, PAGE_SIZE);
return (0);
}
static __inline struct l2_bucket *
pmap_grow_l2_bucket(pmap_t pm, vaddr_t va)
{
struct l2_dtable *l2;
struct l2_bucket *l2b;
u_short l1idx;
vaddr_t nva;
l1idx = L1_IDX(va);
if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) {
nva = pmap_kernel_l2dtable_kva;
if ((nva & PGOFSET) == 0) {
if (pmap_grow_map(nva, PTE_L2_S_CACHE_MODE, NULL))
return (NULL);
}
l2 = (struct l2_dtable *)nva;
nva += sizeof(struct l2_dtable);
if ((nva & PGOFSET) < (pmap_kernel_l2dtable_kva & PGOFSET)) {
if (pmap_grow_map(trunc_page(nva),
PTE_L2_S_CACHE_MODE, NULL))
return (NULL);
}
pmap_kernel_l2dtable_kva = nva;
pm->pm_l2[L2_IDX(l1idx)] = l2;
}
l2b = &l2->l2_bucket[L2_BUCKET(l1idx)];
if (l2b->l2b_kva == NULL) {
pt_entry_t *ptep;
nva = pmap_kernel_l2ptp_kva;
ptep = (pt_entry_t *)nva;
if ((nva & PGOFSET) == 0) {
if (pmap_grow_map(nva, PTE_L2_S_CACHE_MODE_PT,
&pmap_kernel_l2ptp_phys))
return (NULL);
PTE_SYNC_RANGE(ptep, PAGE_SIZE / sizeof(pt_entry_t));
}
l2->l2_occupancy++;
l2b->l2b_kva = ptep;
l2b->l2b_l1idx = l1idx;
l2b->l2b_phys = pmap_kernel_l2ptp_phys;
pmap_kernel_l2ptp_kva += L2_TABLE_SIZE_REAL;
pmap_kernel_l2ptp_phys += L2_TABLE_SIZE_REAL;
}
return (l2b);
}
vaddr_t
pmap_growkernel(vaddr_t maxkvaddr)
{
pmap_t kpm = pmap_kernel();
struct l1_ttable *l1;
struct l2_bucket *l2b;
pd_entry_t *pl1pd;
int s;
if (maxkvaddr <= pmap_curmaxkvaddr)
goto out;
NPDEBUG(PDB_GROWKERN,
printf("pmap_growkernel: growing kernel from 0x%lx to 0x%lx\n",
pmap_curmaxkvaddr, maxkvaddr));
KDASSERT(maxkvaddr <= virtual_end);
s = splhigh();
for (; pmap_curmaxkvaddr < maxkvaddr; pmap_curmaxkvaddr += L1_S_SIZE) {
l2b = pmap_grow_l2_bucket(kpm, pmap_curmaxkvaddr);
KDASSERT(l2b != NULL);
TAILQ_FOREACH(l1, &l1_list, l1_link) {
pl1pd = &l1->l1_kva[L1_IDX(pmap_curmaxkvaddr)];
*pl1pd = L1_C_PROTO | l2b->l2b_phys;
PTE_SYNC(pl1pd);
}
}
cpu_dcache_wbinv_all();
cpu_sdcache_wbinv_all();
cpu_tlb_flushD();
splx(s);
out:
return (pmap_curmaxkvaddr);
}
void
vector_page_setprot(int prot)
{
struct l2_bucket *l2b;
pt_entry_t *ptep;
l2b = pmap_get_l2_bucket(pmap_kernel(), vector_page);
KDASSERT(l2b != NULL);
ptep = &l2b->l2b_kva[l2pte_index(vector_page)];
*ptep = (*ptep & ~L2_S_PROT_MASK) | L2_S_PROT(PTE_KERNEL, prot);
PTE_SYNC(ptep);
cpu_tlb_flushD_SE(vector_page);
}
void
pmap_set_pcb_pagedir(pmap_t pm, struct pcb *pcb)
{
KDASSERT(pm->pm_l1);
pcb->pcb_pagedir = pm->pm_l1->l1_physaddr;
}
int
pmap_get_pde_pte(pmap_t pm, vaddr_t va, pd_entry_t **pdp, pt_entry_t **ptp)
{
struct l2_dtable *l2;
pd_entry_t *pl1pd, l1pd;
pt_entry_t *ptep;
u_short l1idx;
if (pm->pm_l1 == NULL)
return 0;
l1idx = L1_IDX(va);
*pdp = pl1pd = &pm->pm_l1->l1_kva[l1idx];
l1pd = *pl1pd;
if (l1pte_section_p(l1pd)) {
*ptp = NULL;
return 1;
}
l2 = pm->pm_l2[L2_IDX(l1idx)];
if (l2 == NULL ||
(ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) {
return 0;
}
*ptp = &ptep[l2pte_index(va)];
return 1;
}
void
pmap_init_l1(struct l1_ttable *l1, pd_entry_t *l1pt)
{
l1->l1_kva = l1pt;
if (pmap_initialized)
memcpy(l1pt, pmap_kernel()->pm_l1->l1_kva, L1_TABLE_SIZE);
if (pmap_extract(pmap_kernel(), (vaddr_t)l1pt, &l1->l1_physaddr) == 0)
panic("pmap_init_l1: can't get PA of L1 at %p", l1pt);
TAILQ_INSERT_TAIL(&l1_list, l1, l1_link);
}
#define PMAP_STATIC_L2_SIZE 16
void
pmap_bootstrap(pd_entry_t *kernel_l1pt, vaddr_t vstart, vaddr_t vend)
{
static struct l1_ttable static_l1;
static struct l2_dtable static_l2[PMAP_STATIC_L2_SIZE];
struct l1_ttable *l1 = &static_l1;
struct l2_dtable *l2;
struct l2_bucket *l2b;
pmap_t pm = pmap_kernel();
pd_entry_t pde;
pt_entry_t *ptep;
paddr_t pa;
vsize_t size;
int l1idx, l2idx, l2next = 0;
pm->pm_l1 = l1;
pm->pm_refs = 1;
for (l1idx = 0; l1idx < (L1_TABLE_SIZE / sizeof(pd_entry_t)); l1idx++) {
pde = kernel_l1pt[l1idx];
if ((pde & L1_TYPE_MASK) != L1_TYPE_C)
continue;
pa = (paddr_t)(pde & L1_C_ADDR_MASK);
ptep = (pt_entry_t *)kernel_pt_lookup(pa);
if (ptep == NULL) {
panic("pmap_bootstrap: No L2 for va 0x%x, pa 0x%lx",
(u_int)l1idx << L1_S_SHIFT, pa);
}
if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) {
if (l2next == PMAP_STATIC_L2_SIZE)
panic("pmap_bootstrap: out of static L2s");
pm->pm_l2[L2_IDX(l1idx)] = l2 = &static_l2[l2next++];
}
l2->l2_occupancy++;
l2b = &l2->l2_bucket[L2_BUCKET(l1idx)];
l2b->l2b_kva = ptep;
l2b->l2b_phys = pa;
l2b->l2b_l1idx = l1idx;
for (l2idx = 0;
l2idx < (L2_TABLE_SIZE_REAL / sizeof(pt_entry_t));
l2idx++) {
if (ptep[l2idx] != L2_TYPE_INV)
l2b->l2b_occupancy++;
}
}
cpu_idcache_wbinv_all();
cpu_sdcache_wbinv_all();
cpu_tlb_flushID();
virtual_avail = vstart;
virtual_end = vend;
pmap_alloc_specials(&virtual_avail, 1, &csrcp, &csrc_pte);
pmap_alloc_specials(&virtual_avail, 1, &cdstp, &cdst_pte);
pmap_alloc_specials(&virtual_avail, 1, (void *)&memhook, NULL);
pmap_alloc_specials(&virtual_avail, round_page(MSGBUFSIZE) / PAGE_SIZE,
(void *)&msgbufaddr, NULL);
size = ((virtual_end - pmap_curmaxkvaddr) + L1_S_OFFSET) / L1_S_SIZE;
pmap_alloc_specials(&virtual_avail,
round_page(size * L2_TABLE_SIZE_REAL) / PAGE_SIZE,
&pmap_kernel_l2ptp_kva, NULL);
size = (size + (L2_BUCKET_SIZE - 1)) / L2_BUCKET_SIZE;
pmap_alloc_specials(&virtual_avail,
round_page(size * sizeof(struct l2_dtable)) / PAGE_SIZE,
&pmap_kernel_l2dtable_kva, NULL);
TAILQ_INIT(&l1_list);
pmap_init_l1(l1, kernel_l1pt);
pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, IPL_NONE, 0,
"pmappl", &pool_allocator_single);
pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, IPL_VM, 0,
"pvepl", &pmap_pv_allocator);
pool_init(&pmap_l2dtable_pool, sizeof(struct l2_dtable), 0, IPL_VM, 0,
"l2dtblpl", NULL);
pool_init(&pmap_l2ptp_pool, L2_TABLE_SIZE_REAL, L2_TABLE_SIZE_REAL,
IPL_VM, 0, "l2ptppl", &pool_allocator_single);
cpu_dcache_wbinv_all();
cpu_sdcache_wbinv_all();
}
void
pmap_alloc_specials(vaddr_t *availp, int pages, vaddr_t *vap, pt_entry_t **ptep)
{
vaddr_t va = *availp;
struct l2_bucket *l2b;
if (ptep) {
l2b = pmap_get_l2_bucket(pmap_kernel(), va);
if (l2b == NULL)
panic("pmap_alloc_specials: no l2b for 0x%lx", va);
if (ptep)
*ptep = &l2b->l2b_kva[l2pte_index(va)];
}
*vap = va;
*availp = va + (PAGE_SIZE * pages);
}
void
pmap_init(void)
{
pool_setlowat(&pmap_pv_pool, (PAGE_SIZE / sizeof(struct pv_entry)) * 2);
pmap_initialized = 1;
}
void *
pmap_pv_page_alloc(struct pool *pp, int flags, int *slowdown)
{
struct kmem_dyn_mode kd = KMEM_DYN_INITIALIZER;
kd.kd_waitok = ISSET(flags, PR_WAITOK);
kd.kd_slowdown = slowdown;
return (km_alloc(pp->pr_pgsize,
pmap_initialized ? &kv_page : &kv_any, pp->pr_crange, &kd));
}
void
pmap_pv_page_free(struct pool *pp, void *v)
{
km_free(v, pp->pr_pgsize, &kv_page, pp->pr_crange);
}
void
pmap_postinit(void)
{
pool_setlowat(&pmap_l2ptp_pool,
(PAGE_SIZE / L2_TABLE_SIZE_REAL) * 4);
pool_setlowat(&pmap_l2dtable_pool,
(PAGE_SIZE / sizeof(struct l2_dtable)) * 2);
}
SLIST_HEAD(, pv_addr) kernel_pt_list = SLIST_HEAD_INITIALIZER(kernel_pt_list);
vaddr_t
kernel_pt_lookup(paddr_t pa)
{
pv_addr_t *pv;
SLIST_FOREACH(pv, &kernel_pt_list, pv_list) {
#ifndef ARM32_NEW_VM_LAYOUT
if (pv->pv_pa == (pa & ~PGOFSET))
return (pv->pv_va | (pa & PGOFSET));
#else
if (pv->pv_pa == pa)
return (pv->pv_va);
#endif
}
return (0);
}
void
pmap_map_section(vaddr_t l1pt, vaddr_t va, paddr_t pa, int prot, int cache)
{
pd_entry_t *pde = (pd_entry_t *) l1pt;
pd_entry_t fl;
switch (cache) {
case PTE_NOCACHE:
default:
fl = 0;
break;
case PTE_CACHE:
fl = PTE_L1_S_CACHE_MODE;
break;
case PTE_PAGETABLE:
fl = PTE_L1_S_CACHE_MODE_PT;
break;
}
pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa | L1_S_V7_AF |
L1_S_PROT(PTE_KERNEL, prot) | fl;
PTE_SYNC(&pde[va >> L1_S_SHIFT]);
}
void
pmap_map_entry(vaddr_t l1pt, vaddr_t va, paddr_t pa, int prot, int cache)
{
pd_entry_t *pde = (pd_entry_t *) l1pt;
pt_entry_t fl;
pt_entry_t *pte;
switch (cache) {
case PTE_NOCACHE:
default:
fl = 0;
break;
case PTE_CACHE:
fl = PTE_L2_S_CACHE_MODE;
break;
case PTE_PAGETABLE:
fl = PTE_L2_S_CACHE_MODE_PT;
break;
}
if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C)
panic("pmap_map_entry: no L2 table for VA 0x%08lx", va);
#ifndef ARM32_NEW_VM_LAYOUT
pte = (pt_entry_t *)
kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME);
#else
pte = (pt_entry_t *) kernel_pt_lookup(pde[L1_IDX(va)] & L1_C_ADDR_MASK);
#endif
if (pte == NULL)
panic("pmap_map_entry: can't find L2 table for VA 0x%08lx", va);
#ifndef ARM32_NEW_VM_LAYOUT
pte[(va >> PGSHIFT) & 0x3ff] = L2_S_PROTO | pa | L2_V7_AF |
L2_S_PROT(PTE_KERNEL, prot) | fl;
PTE_SYNC(&pte[(va >> PGSHIFT) & 0x3ff]);
#else
pte[l2pte_index(va)] = L2_S_PROTO | pa | L2_V7_AF |
L2_S_PROT(PTE_KERNEL, prot) | fl;
PTE_SYNC(&pte[l2pte_index(va)]);
#endif
}
void
pmap_link_l2pt(vaddr_t l1pt, vaddr_t va, pv_addr_t *l2pv)
{
pd_entry_t *pde = (pd_entry_t *) l1pt;
u_int slot = va >> L1_S_SHIFT;
pde[slot + 0] = L1_C_PROTO | (l2pv->pv_pa + 0x000);
#ifdef ARM32_NEW_VM_LAYOUT
PTE_SYNC(&pde[slot]);
#else
pde[slot + 1] = L1_C_PROTO | (l2pv->pv_pa + 0x400);
pde[slot + 2] = L1_C_PROTO | (l2pv->pv_pa + 0x800);
pde[slot + 3] = L1_C_PROTO | (l2pv->pv_pa + 0xc00);
PTE_SYNC_RANGE(&pde[slot + 0], 4);
#endif
SLIST_INSERT_HEAD(&kernel_pt_list, l2pv, pv_list);
}
vsize_t
pmap_map_chunk(vaddr_t l1pt, vaddr_t va, paddr_t pa, vsize_t size,
int prot, int cache)
{
pd_entry_t *pde = (pd_entry_t *) l1pt;
pt_entry_t *pte, f1, f2s, f2l;
vsize_t resid;
int i;
resid = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
if (l1pt == 0)
panic("pmap_map_chunk: no L1 table provided");
#ifdef VERBOSE_INIT_ARM
printf("pmap_map_chunk: pa=0x%lx va=0x%lx size=0x%lx resid=0x%lx "
"prot=0x%x cache=%d\n", pa, va, size, resid, prot, cache);
#endif
switch (cache) {
case PTE_NOCACHE:
default:
f1 = 0;
f2l = 0;
f2s = 0;
break;
case PTE_CACHE:
f1 = PTE_L1_S_CACHE_MODE;
f2l = PTE_L2_L_CACHE_MODE;
f2s = PTE_L2_S_CACHE_MODE;
break;
case PTE_PAGETABLE:
f1 = PTE_L1_S_CACHE_MODE_PT;
f2l = PTE_L2_L_CACHE_MODE_PT;
f2s = PTE_L2_S_CACHE_MODE_PT;
break;
}
size = resid;
while (resid > 0) {
if (L1_S_MAPPABLE_P(va, pa, resid)) {
#ifdef VERBOSE_INIT_ARM
printf("S");
#endif
pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa |
L1_S_V7_AF | L1_S_PROT(PTE_KERNEL, prot) | f1;
PTE_SYNC(&pde[va >> L1_S_SHIFT]);
va += L1_S_SIZE;
pa += L1_S_SIZE;
resid -= L1_S_SIZE;
continue;
}
if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C)
panic("pmap_map_chunk: no L2 table for VA 0x%08lx", va);
#ifndef ARM32_NEW_VM_LAYOUT
pte = (pt_entry_t *)
kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME);
#else
pte = (pt_entry_t *) kernel_pt_lookup(
pde[L1_IDX(va)] & L1_C_ADDR_MASK);
#endif
if (pte == NULL)
panic("pmap_map_chunk: can't find L2 table for VA"
"0x%08lx", va);
if (L2_L_MAPPABLE_P(va, pa, resid)) {
#ifdef VERBOSE_INIT_ARM
printf("L");
#endif
for (i = 0; i < 16; i++) {
#ifndef ARM32_NEW_VM_LAYOUT
pte[((va >> PGSHIFT) & 0x3f0) + i] =
L2_L_PROTO | pa | L2_V7_AF |
L2_L_PROT(PTE_KERNEL, prot) | f2l;
PTE_SYNC(&pte[((va >> PGSHIFT) & 0x3f0) + i]);
#else
pte[l2pte_index(va) + i] =
L2_L_PROTO | pa | L2_V7_AF |
L2_L_PROT(PTE_KERNEL, prot) | f2l;
PTE_SYNC(&pte[l2pte_index(va) + i]);
#endif
}
va += L2_L_SIZE;
pa += L2_L_SIZE;
resid -= L2_L_SIZE;
continue;
}
#ifdef VERBOSE_INIT_ARM
printf("P");
#endif
#ifndef ARM32_NEW_VM_LAYOUT
pte[(va >> PGSHIFT) & 0x3ff] = L2_S_PROTO | pa | L2_V7_AF |
L2_S_PROT(PTE_KERNEL, prot) | f2s;
PTE_SYNC(&pte[(va >> PGSHIFT) & 0x3ff]);
#else
pte[l2pte_index(va)] = L2_S_PROTO | pa | L2_V7_AF |
L2_S_PROT(PTE_KERNEL, prot) | f2s;
PTE_SYNC(&pte[l2pte_index(va)]);
#endif
va += PAGE_SIZE;
pa += PAGE_SIZE;
resid -= PAGE_SIZE;
}
#ifdef VERBOSE_INIT_ARM
printf("\n");
#endif
return (size);
}
void
pmap_pte_init_armv7(void)
{
uint32_t id_mmfr0, id_mmfr3;
pmap_needs_pte_sync = 1;
__asm volatile("mrc p15, 0, %0, c0, c1, 4" : "=r"(id_mmfr0));
if ((id_mmfr0 & ID_MMFR0_VMSA_MASK) >= VMSA_V7_PXN)
l1_c_pxn = L1_C_V7_PXN;
__asm volatile("mrc p15, 0, %0, c0, c1, 7" : "=r"(id_mmfr3));
if ((id_mmfr3 & 0x00f00000) == 0x00100000)
pmap_needs_pte_sync = 0;
}