#include <sys/types.h>
#include <vm/hat.h>
#include <vm/hat_sfmmu.h>
#include <vm/page.h>
#include <sys/sysmacros.h>
#include <sys/cmn_err.h>
#include <sys/machsystm.h>
#include <vm/seg_kpm.h>
#include <sys/cpu_module.h>
#include <vm/mach_kpm.h>
static caddr_t sfmmu_kpm_mapin(page_t *);
static void sfmmu_kpm_mapout(page_t *, caddr_t);
static int sfmmu_kpme_lookup(struct kpme *, page_t *);
static void sfmmu_kpme_add(struct kpme *, page_t *);
static void sfmmu_kpme_sub(struct kpme *, page_t *);
static caddr_t sfmmu_kpm_getvaddr(page_t *, int *);
static int sfmmu_kpm_fault(caddr_t, struct memseg *, page_t *);
static int sfmmu_kpm_fault_small(caddr_t, struct memseg *, page_t *);
static void sfmmu_kpm_vac_conflict(page_t *, caddr_t);
void sfmmu_kpm_pageunload(page_t *);
void sfmmu_kpm_vac_unload(page_t *, caddr_t);
static void sfmmu_kpm_demap_large(caddr_t);
static void sfmmu_kpm_demap_small(caddr_t);
static void sfmmu_kpm_demap_tlbs(caddr_t);
void sfmmu_kpm_hme_unload(page_t *);
kpm_hlk_t *sfmmu_kpm_kpmp_enter(page_t *, pgcnt_t);
void sfmmu_kpm_kpmp_exit(kpm_hlk_t *kpmp);
void sfmmu_kpm_page_cache(page_t *, int, int);
extern uint_t vac_colors;
void
mach_kpm_init()
{}
caddr_t
hat_kpm_mapin(struct page *pp, struct kpme *kpme)
{
kmutex_t *pml;
caddr_t vaddr;
if (kpm_enable == 0) {
cmn_err(CE_WARN, "hat_kpm_mapin: kpm_enable not set");
return ((caddr_t)NULL);
}
if (pp == NULL || PAGE_LOCKED(pp) == 0) {
cmn_err(CE_WARN, "hat_kpm_mapin: pp zero or not locked");
return ((caddr_t)NULL);
}
pml = sfmmu_mlist_enter(pp);
ASSERT(pp->p_kpmref >= 0);
vaddr = (pp->p_kpmref == 0) ?
sfmmu_kpm_mapin(pp) : hat_kpm_page2va(pp, 1);
if (kpme != NULL) {
if ((sfmmu_kpme_lookup(kpme, pp)) == 0)
sfmmu_kpme_add(kpme, pp);
ASSERT(pp->p_kpmref > 0);
} else {
pp->p_kpmref++;
}
sfmmu_mlist_exit(pml);
return (vaddr);
}
void
hat_kpm_mapout(struct page *pp, struct kpme *kpme, caddr_t vaddr)
{
kmutex_t *pml;
if (kpm_enable == 0) {
cmn_err(CE_WARN, "hat_kpm_mapout: kpm_enable not set");
return;
}
if (IS_KPM_ADDR(vaddr) == 0) {
cmn_err(CE_WARN, "hat_kpm_mapout: no kpm address");
return;
}
if (pp == NULL || PAGE_LOCKED(pp) == 0) {
cmn_err(CE_WARN, "hat_kpm_mapout: page zero or not locked");
return;
}
if (kpme != NULL) {
ASSERT(pp == kpme->kpe_page);
pp = kpme->kpe_page;
pml = sfmmu_mlist_enter(pp);
if (sfmmu_kpme_lookup(kpme, pp) == 0)
panic("hat_kpm_mapout: kpme not found pp=%p",
(void *)pp);
ASSERT(pp->p_kpmref > 0);
sfmmu_kpme_sub(kpme, pp);
} else {
pml = sfmmu_mlist_enter(pp);
pp->p_kpmref--;
}
ASSERT(pp->p_kpmref >= 0);
if (pp->p_kpmref == 0)
sfmmu_kpm_mapout(pp, vaddr);
sfmmu_mlist_exit(pml);
}
caddr_t
hat_kpm_mapin_pfn(pfn_t pfn)
{
caddr_t paddr, vaddr;
tte_t tte;
uint_t szc = kpm_smallpages ? TTE8K : TTE4M;
uint_t shift = kpm_smallpages ? MMU_PAGESHIFT : MMU_PAGESHIFT4M;
if (kpm_enable == 0 || vac_colors > 1 ||
page_numtomemseg_nolock(pfn) != NULL)
return ((caddr_t)NULL);
paddr = (caddr_t)ptob(pfn);
vaddr = (uintptr_t)kpm_vbase + paddr;
KPM_TTE_VCACHED(tte.ll, pfn, szc);
sfmmu_kpm_load_tsb(vaddr, &tte, shift);
return (vaddr);
}
void
hat_kpm_mapout_pfn(pfn_t pfn)
{
}
caddr_t
hat_kpm_page2va(struct page *pp, int checkswap)
{
int vcolor, vcolor_pa;
uintptr_t paddr, vaddr;
ASSERT(kpm_enable);
paddr = ptob(pp->p_pagenum);
vcolor_pa = addr_to_vcolor(paddr);
if (checkswap && pp->p_vnode && IS_SWAPFSVP(pp->p_vnode))
vcolor = (PP_ISNC(pp)) ? vcolor_pa : PP_GET_VCOLOR(pp);
else
vcolor = addr_to_vcolor(pp->p_offset);
vaddr = (uintptr_t)kpm_vbase + paddr;
if (vcolor_pa != vcolor) {
vaddr += ((uintptr_t)(vcolor - vcolor_pa) << MMU_PAGESHIFT);
vaddr += (vcolor_pa > vcolor) ?
((uintptr_t)vcolor_pa << kpm_size_shift) :
((uintptr_t)(vcolor - vcolor_pa) << kpm_size_shift);
}
return ((caddr_t)vaddr);
}
page_t *
hat_kpm_vaddr2page(caddr_t vaddr)
{
uintptr_t paddr;
pfn_t pfn;
ASSERT(IS_KPM_ADDR(vaddr));
SFMMU_KPM_VTOP(vaddr, paddr);
pfn = (pfn_t)btop(paddr);
return (page_numtopp_nolock(pfn));
}
#define PP2KPMPG(pp, kp) { \
struct memseg *mseg; \
pgcnt_t inx; \
pfn_t pfn; \
\
pfn = pp->p_pagenum; \
mseg = page_numtomemseg_nolock(pfn); \
ASSERT(mseg); \
inx = ptokpmp(kpmptop(ptokpmp(pfn)) - mseg->kpm_pbase); \
ASSERT(inx < mseg->kpm_nkpmpgs); \
kp = &mseg->kpm_pages[inx]; \
}
#define PP2KPMSPG(pp, ksp) { \
struct memseg *mseg; \
pgcnt_t inx; \
pfn_t pfn; \
\
pfn = pp->p_pagenum; \
mseg = page_numtomemseg_nolock(pfn); \
ASSERT(mseg); \
inx = pfn - mseg->kpm_pbase; \
ksp = &mseg->kpm_spages[inx]; \
}
int
hat_kpm_fault(struct hat *hat, caddr_t vaddr)
{
int error;
uintptr_t paddr;
pfn_t pfn;
struct memseg *mseg;
page_t *pp;
if (kpm_enable == 0) {
cmn_err(CE_WARN, "hat_kpm_fault: kpm_enable not set");
return (ENOTSUP);
}
ASSERT(hat == ksfmmup);
ASSERT(IS_KPM_ADDR(vaddr));
SFMMU_KPM_VTOP(vaddr, paddr);
pfn = (pfn_t)btop(paddr);
if ((mseg = page_numtomemseg_nolock(pfn)) != NULL) {
pp = &mseg->pages[(pgcnt_t)(pfn - mseg->pages_base)];
ASSERT((pfn_t)pp->p_pagenum == pfn);
}
if (vac_colors == 1 && mseg == NULL) {
tte_t tte;
uint_t szc = kpm_smallpages ? TTE8K : TTE4M;
uint_t shift = kpm_smallpages ? MMU_PAGESHIFT : MMU_PAGESHIFT4M;
ASSERT(address_in_memlist(phys_install, paddr, 1));
KPM_TTE_VCACHED(tte.ll, pfn, szc);
sfmmu_kpm_load_tsb(vaddr, &tte, shift);
error = 0;
} else if (mseg == NULL || !PAGE_LOCKED(pp))
error = EFAULT;
else if (kpm_smallpages == 0)
error = sfmmu_kpm_fault(vaddr, mseg, pp);
else
error = sfmmu_kpm_fault_small(vaddr, mseg, pp);
return (error);
}
void
hat_kpm_mseghash_clear(int nentries)
{
pgcnt_t i;
if (kpm_enable == 0)
return;
for (i = 0; i < nentries; i++)
memseg_phash[i] = MSEG_NULLPTR_PA;
}
void
hat_kpm_mseghash_update(pgcnt_t inx, struct memseg *msp)
{
if (kpm_enable == 0)
return;
memseg_phash[inx] = (msp) ? va_to_pa(msp) : MSEG_NULLPTR_PA;
}
void
hat_kpm_addmem_mseg_update(struct memseg *msp, pgcnt_t nkpmpgs,
offset_t kpm_pages_off)
{
if (kpm_enable == 0)
return;
msp->kpm_pages = (kpm_page_t *)((caddr_t)msp->pages + kpm_pages_off);
msp->kpm_nkpmpgs = nkpmpgs;
msp->kpm_pbase = kpmptop(ptokpmp(msp->pages_base));
msp->pagespa = va_to_pa(msp->pages);
msp->epagespa = va_to_pa(msp->epages);
msp->kpm_pagespa = va_to_pa(msp->kpm_pages);
}
void
hat_kpm_addmem_mseg_insert(struct memseg *msp)
{
if (kpm_enable == 0)
return;
ASSERT(memsegs_lock_held());
msp->nextpa = (memsegs) ? va_to_pa(memsegs) : MSEG_NULLPTR_PA;
}
void
hat_kpm_addmem_memsegs_update(struct memseg *msp)
{
if (kpm_enable == 0)
return;
ASSERT(memsegs_lock_held());
ASSERT(memsegs);
memsegspa = va_to_pa(msp);
}
caddr_t
hat_kpm_mseg_reuse(struct memseg *msp)
{
caddr_t end;
if (kpm_smallpages == 0)
end = (caddr_t)(msp->kpm_pages + msp->kpm_nkpmpgs);
else
end = (caddr_t)(msp->kpm_spages + msp->kpm_nkpmpgs);
return (end);
}
void
hat_kpm_delmem_mseg_update(struct memseg *msp, struct memseg **mspp)
{
struct memseg *lmsp;
if (kpm_enable == 0)
return;
ASSERT(memsegs_lock_held());
if (mspp == &memsegs) {
memsegspa = (msp->next) ?
va_to_pa(msp->next) : MSEG_NULLPTR_PA;
} else {
lmsp = (struct memseg *)
((uint64_t)mspp - offsetof(struct memseg, next));
lmsp->nextpa = (msp->next) ?
va_to_pa(msp->next) : MSEG_NULLPTR_PA;
}
}
void
hat_kpm_split_mseg_update(struct memseg *msp, struct memseg **mspp,
struct memseg *lo, struct memseg *mid, struct memseg *hi)
{
pgcnt_t start, end, kbase, kstart, num;
struct memseg *lmsp;
if (kpm_enable == 0)
return;
ASSERT(memsegs_lock_held());
ASSERT(msp && mid && msp->kpm_pages);
kbase = ptokpmp(msp->kpm_pbase);
if (lo) {
num = lo->pages_end - lo->pages_base;
start = kpmptop(ptokpmp(lo->pages_base));
end = kpmptop(ptokpmp(start + num - 1)) + kpmpnpgs;
lo->kpm_pbase = start;
lo->kpm_nkpmpgs = ptokpmp(end - start);
lo->kpm_pages = msp->kpm_pages;
lo->kpm_pagespa = va_to_pa(lo->kpm_pages);
lo->pagespa = va_to_pa(lo->pages);
lo->epagespa = va_to_pa(lo->epages);
lo->nextpa = va_to_pa(lo->next);
}
num = mid->pages_end - mid->pages_base;
kstart = ptokpmp(mid->pages_base);
start = kpmptop(kstart);
end = kpmptop(ptokpmp(start + num - 1)) + kpmpnpgs;
mid->kpm_pbase = start;
mid->kpm_nkpmpgs = ptokpmp(end - start);
if (kpm_smallpages == 0) {
mid->kpm_pages = msp->kpm_pages + (kstart - kbase);
} else {
mid->kpm_spages = msp->kpm_spages + (kstart - kbase);
}
mid->kpm_pagespa = va_to_pa(mid->kpm_pages);
mid->pagespa = va_to_pa(mid->pages);
mid->epagespa = va_to_pa(mid->epages);
mid->nextpa = (mid->next) ? va_to_pa(mid->next) : MSEG_NULLPTR_PA;
if (hi) {
num = hi->pages_end - hi->pages_base;
kstart = ptokpmp(hi->pages_base);
start = kpmptop(kstart);
end = kpmptop(ptokpmp(start + num - 1)) + kpmpnpgs;
hi->kpm_pbase = start;
hi->kpm_nkpmpgs = ptokpmp(end - start);
if (kpm_smallpages == 0) {
hi->kpm_pages = msp->kpm_pages + (kstart - kbase);
} else {
hi->kpm_spages = msp->kpm_spages + (kstart - kbase);
}
hi->kpm_pagespa = va_to_pa(hi->kpm_pages);
hi->pagespa = va_to_pa(hi->pages);
hi->epagespa = va_to_pa(hi->epages);
hi->nextpa = (hi->next) ? va_to_pa(hi->next) : MSEG_NULLPTR_PA;
}
if (mspp == &memsegs) {
memsegspa = (lo) ? va_to_pa(lo) : va_to_pa(mid);
} else {
lmsp = (struct memseg *)
((uint64_t)mspp - offsetof(struct memseg, next));
lmsp->nextpa = (lo) ? va_to_pa(lo) : va_to_pa(mid);
}
}
void
hat_kpm_walk(void (*func)(void *, void *, size_t), void *arg)
{
pfn_t pbase, pend;
int vcolor;
void *base;
size_t size;
struct memseg *msp;
for (msp = memsegs; msp; msp = msp->next) {
pbase = msp->pages_base;
pend = msp->pages_end;
for (vcolor = 0; vcolor < vac_colors; vcolor++) {
base = ptob(pbase) + kpm_vbase + kpm_size * vcolor;
size = ptob(pend - pbase);
func(arg, base, size);
}
}
}
pfn_t
sfmmu_kpm_vatopfn(caddr_t vaddr)
{
uintptr_t paddr;
pfn_t pfn;
page_t *pp;
ASSERT(kpm_enable && IS_KPM_ADDR(vaddr));
SFMMU_KPM_VTOP(vaddr, paddr);
pfn = (pfn_t)btop(paddr);
pp = page_numtopp_nolock(pfn);
if (pp && pp->p_kpmref)
return (pfn);
else
return ((pfn_t)PFN_INVALID);
}
static int
sfmmu_kpme_lookup(struct kpme *kpme, page_t *pp)
{
struct kpme *p;
for (p = pp->p_kpmelist; p; p = p->kpe_next) {
if (p == kpme)
return (1);
}
return (0);
}
static void
sfmmu_kpme_add(struct kpme *kpme, page_t *pp)
{
ASSERT(pp->p_kpmref >= 0);
kpme->kpe_prev = NULL;
kpme->kpe_next = pp->p_kpmelist;
if (pp->p_kpmelist)
pp->p_kpmelist->kpe_prev = kpme;
pp->p_kpmelist = kpme;
kpme->kpe_page = pp;
pp->p_kpmref++;
}
static void
sfmmu_kpme_sub(struct kpme *kpme, page_t *pp)
{
ASSERT(pp->p_kpmref > 0);
if (kpme->kpe_prev) {
ASSERT(pp->p_kpmelist != kpme);
ASSERT(kpme->kpe_prev->kpe_page == pp);
kpme->kpe_prev->kpe_next = kpme->kpe_next;
} else {
ASSERT(pp->p_kpmelist == kpme);
pp->p_kpmelist = kpme->kpe_next;
}
if (kpme->kpe_next) {
ASSERT(kpme->kpe_next->kpe_page == pp);
kpme->kpe_next->kpe_prev = kpme->kpe_prev;
}
kpme->kpe_next = kpme->kpe_prev = NULL;
kpme->kpe_page = NULL;
pp->p_kpmref--;
}
static caddr_t
sfmmu_kpm_mapin(page_t *pp)
{
kpm_page_t *kp;
kpm_hlk_t *kpmp;
caddr_t vaddr;
int kpm_vac_range;
pfn_t pfn;
tte_t tte;
kmutex_t *pmtx;
int uncached;
kpm_spage_t *ksp;
kpm_shlk_t *kpmsp;
int oldval;
ASSERT(sfmmu_mlist_held(pp));
ASSERT(pp->p_kpmref == 0);
vaddr = sfmmu_kpm_getvaddr(pp, &kpm_vac_range);
ASSERT(IS_KPM_ADDR(vaddr));
uncached = PP_ISNC(pp);
pfn = pp->p_pagenum;
if (kpm_smallpages)
goto smallpages_mapin;
PP2KPMPG(pp, kp);
kpmp = KPMP_HASH(kp);
mutex_enter(&kpmp->khl_mutex);
ASSERT(PP_ISKPMC(pp) == 0);
ASSERT(PP_ISKPMS(pp) == 0);
if (uncached) {
if (kpm_vac_range == 0) {
if (kp->kp_refcnts == 0) {
if (kp->kp_refcntc == -1) {
sfmmu_kpm_tsbmtl(&kp->kp_refcntc,
&kpmp->khl_lock, KPMTSBM_STOP);
}
if (kp->kp_refcnt > 0 && kp->kp_refcntc == 0)
sfmmu_kpm_demap_large(vaddr);
}
ASSERT(kp->kp_refcntc >= 0);
kp->kp_refcntc++;
}
pmtx = sfmmu_page_enter(pp);
PP_SETKPMC(pp);
sfmmu_page_exit(pmtx);
}
if ((kp->kp_refcntc > 0 || kp->kp_refcnts > 0) && kpm_vac_range == 0) {
if (uncached == 0)
KPM_TTE_VCACHED(tte.ll, pfn, TTE8K);
else
KPM_TTE_VUNCACHED(tte.ll, pfn, TTE8K);
sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT);
pmtx = sfmmu_page_enter(pp);
PP_SETKPMS(pp);
sfmmu_page_exit(pmtx);
kp->kp_refcnts++;
ASSERT(kp->kp_refcnts > 0);
goto exit;
}
if (kpm_vac_range == 0) {
if (kp->kp_refcnt == 0) {
KPM_TTE_VCACHED(tte.ll, pfn, TTE4M);
sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT4M);
if (kp->kp_refcntc == 0)
sfmmu_kpm_tsbmtl(&kp->kp_refcntc,
&kpmp->khl_lock, KPMTSBM_START);
ASSERT(kp->kp_refcntc == -1);
}
kp->kp_refcnt++;
ASSERT(kp->kp_refcnt);
} else {
if (uncached == 0) {
KPM_TTE_VCACHED(tte.ll, pfn, TTE8K);
} else {
KPM_TTE_VUNCACHED(tte.ll, pfn, TTE8K);
}
sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT);
kp->kp_refcnta++;
if (kp->kp_refcntc == -1) {
ASSERT(kp->kp_refcnt > 0);
sfmmu_kpm_tsbmtl(&kp->kp_refcntc, &kpmp->khl_lock,
KPMTSBM_STOP);
}
ASSERT(kp->kp_refcntc >= 0);
}
exit:
mutex_exit(&kpmp->khl_mutex);
return (vaddr);
smallpages_mapin:
if (uncached == 0) {
KPM_TTE_VCACHED(tte.ll, pfn, TTE8K);
} else {
sfmmu_kpm_demap_small(vaddr);
pmtx = sfmmu_page_enter(pp);
PP_SETKPMC(pp);
sfmmu_page_exit(pmtx);
KPM_TTE_VUNCACHED(tte.ll, pfn, TTE8K);
}
sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT);
PP2KPMSPG(pp, ksp);
kpmsp = KPMP_SHASH(ksp);
oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped_flag, &kpmsp->kshl_lock,
(uncached) ? (KPM_MAPPED_GO | KPM_MAPPEDSC) :
(KPM_MAPPED_GO | KPM_MAPPEDS));
if (oldval != 0)
panic("sfmmu_kpm_mapin: stale smallpages mapping");
return (vaddr);
}
static void
sfmmu_kpm_mapout(page_t *pp, caddr_t vaddr)
{
kpm_page_t *kp;
kpm_hlk_t *kpmp;
int alias_range;
kmutex_t *pmtx;
kpm_spage_t *ksp;
kpm_shlk_t *kpmsp;
int oldval;
ASSERT(sfmmu_mlist_held(pp));
ASSERT(pp->p_kpmref == 0);
alias_range = IS_KPM_ALIAS_RANGE(vaddr);
if (kpm_smallpages)
goto smallpages_mapout;
PP2KPMPG(pp, kp);
kpmp = KPMP_HASH(kp);
mutex_enter(&kpmp->khl_mutex);
if (alias_range) {
ASSERT(PP_ISKPMS(pp) == 0);
if (kp->kp_refcnta <= 0) {
panic("sfmmu_kpm_mapout: bad refcnta kp=%p",
(void *)kp);
}
if (PP_ISTNC(pp)) {
if (PP_ISKPMC(pp) == 0) {
panic("sfmmu_kpm_mapout: uncached page not "
"kpm marked");
}
sfmmu_kpm_demap_small(vaddr);
pmtx = sfmmu_page_enter(pp);
PP_CLRKPMC(pp);
sfmmu_page_exit(pmtx);
conv_tnc(pp, TTE8K);
} else if (PP_ISKPMC(pp) == 0) {
sfmmu_kpm_unload_tsb(vaddr, MMU_PAGESHIFT);
} else {
pmtx = sfmmu_page_enter(pp);
PP_CLRKPMC(pp);
sfmmu_page_exit(pmtx);
}
kp->kp_refcnta--;
goto exit;
}
if (kp->kp_refcntc <= 0 && kp->kp_refcnts == 0) {
ASSERT(kp->kp_refcntc >= -1);
ASSERT(!(pp->p_nrm & (P_KPMC | P_KPMS | P_TNC | P_PNC)));
if (kp->kp_refcnt <= 0)
panic("sfmmu_kpm_mapout: bad refcnt kp=%p", (void *)kp);
if (--kp->kp_refcnt == 0) {
if (kp->kp_refcntc == -1) {
sfmmu_kpm_tsbmtl(&kp->kp_refcntc,
&kpmp->khl_lock, KPMTSBM_STOP);
}
ASSERT(kp->kp_refcntc == 0);
sfmmu_kpm_unload_tsb(vaddr, MMU_PAGESHIFT4M);
#ifdef DEBUG
if (kpm_tlb_flush)
sfmmu_kpm_demap_tlbs(vaddr);
#endif
}
} else {
if (PP_ISKPMS(pp)) {
if (kp->kp_refcnts < 1) {
panic("sfmmu_kpm_mapout: bad refcnts kp=%p",
(void *)kp);
}
sfmmu_kpm_demap_small(vaddr);
if (PP_ISTNC(pp)) {
if (!PP_ISKPMC(pp)) {
panic("sfmmu_kpm_mapout: uncached "
"page not kpm marked");
}
conv_tnc(pp, TTE8K);
}
kp->kp_refcnts--;
kp->kp_refcnt++;
pmtx = sfmmu_page_enter(pp);
PP_CLRKPMS(pp);
sfmmu_page_exit(pmtx);
}
if (PP_ISKPMC(pp)) {
if (kp->kp_refcntc < 1) {
panic("sfmmu_kpm_mapout: bad refcntc kp=%p",
(void *)kp);
}
pmtx = sfmmu_page_enter(pp);
PP_CLRKPMC(pp);
sfmmu_page_exit(pmtx);
kp->kp_refcntc--;
}
if (kp->kp_refcnt-- < 1)
panic("sfmmu_kpm_mapout: bad refcnt kp=%p", (void *)kp);
}
exit:
mutex_exit(&kpmp->khl_mutex);
return;
smallpages_mapout:
PP2KPMSPG(pp, ksp);
kpmsp = KPMP_SHASH(ksp);
if (PP_ISKPMC(pp) == 0) {
oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped_flag,
&kpmsp->kshl_lock, 0);
if (oldval != KPM_MAPPEDS) {
if (oldval != KPM_MAPPEDSC)
panic("sfmmu_kpm_mapout: incorrect mapping");
}
sfmmu_kpm_unload_tsb(vaddr, MMU_PAGESHIFT);
#ifdef DEBUG
if (kpm_tlb_flush)
sfmmu_kpm_demap_tlbs(vaddr);
#endif
} else if (PP_ISTNC(pp)) {
oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped_flag,
&kpmsp->kshl_lock, 0);
if (oldval != KPM_MAPPEDSC || PP_ISKPMC(pp) == 0)
panic("sfmmu_kpm_mapout: inconsistent TNC mapping");
sfmmu_kpm_demap_small(vaddr);
pmtx = sfmmu_page_enter(pp);
PP_CLRKPMC(pp);
sfmmu_page_exit(pmtx);
conv_tnc(pp, TTE8K);
} else {
oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped_flag,
&kpmsp->kshl_lock, 0);
if (oldval != KPM_MAPPEDSC)
panic("sfmmu_kpm_mapout: inconsistent mapping");
pmtx = sfmmu_page_enter(pp);
PP_CLRKPMC(pp);
sfmmu_page_exit(pmtx);
}
}
#define abs(x) ((x) < 0 ? -(x) : (x))
static caddr_t
sfmmu_kpm_getvaddr(page_t *pp, int *kpm_vac_rangep)
{
int vcolor, vcolor_pa;
caddr_t vaddr;
uintptr_t paddr;
ASSERT(sfmmu_mlist_held(pp));
paddr = ptob(pp->p_pagenum);
vcolor_pa = addr_to_vcolor(paddr);
if (pp->p_vnode && IS_SWAPFSVP(pp->p_vnode)) {
vcolor = (PP_NEWPAGE(pp) || PP_ISNC(pp)) ?
vcolor_pa : PP_GET_VCOLOR(pp);
} else {
vcolor = addr_to_vcolor(pp->p_offset);
}
vaddr = kpm_vbase + paddr;
*kpm_vac_rangep = 0;
if (vcolor_pa != vcolor) {
*kpm_vac_rangep = abs(vcolor - vcolor_pa);
vaddr += ((uintptr_t)(vcolor - vcolor_pa) << MMU_PAGESHIFT);
vaddr += (vcolor_pa > vcolor) ?
((uintptr_t)vcolor_pa << kpm_size_shift) :
((uintptr_t)(vcolor - vcolor_pa) << kpm_size_shift);
ASSERT(!PP_ISMAPPED_LARGE(pp));
}
if (PP_ISNC(pp))
return (vaddr);
if (PP_NEWPAGE(pp)) {
PP_SET_VCOLOR(pp, vcolor);
return (vaddr);
}
if (PP_GET_VCOLOR(pp) == vcolor)
return (vaddr);
ASSERT(!PP_ISMAPPED_KPM(pp));
sfmmu_kpm_vac_conflict(pp, vaddr);
return (vaddr);
}
#define KPM_KC 0x00000008
#define KPM_C 0x00000004
#define KPM_KS 0x00000002
#define KPM_S 0x00000001
#define KPM_TSBM_CONFL_GONE (0)
#define KPM_TSBM_MAPS_RASM (KPM_KS)
#define KPM_TSBM_RPLS_RASM (KPM_KS | KPM_S)
#define KPM_TSBM_MAPS_BRKO (KPM_KC)
#define KPM_TSBM_MAPS (KPM_KC | KPM_KS)
#define KPM_TSBM_RPLS (KPM_KC | KPM_KS | KPM_S)
#define KPM_TSBM_MAPS_BRKT (KPM_KC | KPM_C)
#define KPM_TSBM_MAPS_CONFL (KPM_KC | KPM_C | KPM_KS)
#define KPM_TSBM_RPLS_CONFL (KPM_KC | KPM_C | KPM_KS | KPM_S)
int
sfmmu_kpm_fault(caddr_t vaddr, struct memseg *mseg, page_t *pp)
{
int error;
pgcnt_t inx;
kpm_page_t *kp;
tte_t tte;
pfn_t pfn = pp->p_pagenum;
kpm_hlk_t *kpmp;
kmutex_t *pml;
int alias_range;
int uncached = 0;
kmutex_t *pmtx;
int badstate;
uint_t tsbmcase;
alias_range = IS_KPM_ALIAS_RANGE(vaddr);
inx = ptokpmp(kpmptop(ptokpmp(pfn)) - mseg->kpm_pbase);
if (inx >= mseg->kpm_nkpmpgs) {
cmn_err(CE_PANIC, "sfmmu_kpm_fault: kpm overflow in memseg "
"0x%p pp 0x%p", (void *)mseg, (void *)pp);
}
kp = &mseg->kpm_pages[inx];
kpmp = KPMP_HASH(kp);
pml = sfmmu_mlist_enter(pp);
if (!PP_ISMAPPED_KPM(pp)) {
sfmmu_mlist_exit(pml);
return (EFAULT);
}
mutex_enter(&kpmp->khl_mutex);
if (alias_range) {
ASSERT(!PP_ISMAPPED_LARGE(pp));
if (kp->kp_refcnta > 0) {
if (PP_ISKPMC(pp)) {
pmtx = sfmmu_page_enter(pp);
PP_CLRKPMC(pp);
sfmmu_page_exit(pmtx);
}
mutex_exit(&kpmp->khl_mutex);
sfmmu_kpm_vac_conflict(pp, vaddr);
mutex_enter(&kpmp->khl_mutex);
if (PP_ISNC(pp)) {
uncached = 1;
pmtx = sfmmu_page_enter(pp);
PP_SETKPMC(pp);
sfmmu_page_exit(pmtx);
}
goto smallexit;
} else {
error = EFAULT;
}
goto exit;
}
badstate = (kp->kp_refcnt < 0 || kp->kp_refcnts < 0);
if (kp->kp_refcntc == -1) {
badstate |= (kp->kp_refcnt == 0 || kp->kp_refcnts > 0 ||
PP_ISKPMC(pp) || PP_ISKPMS(pp) || PP_ISNC(pp));
if (badstate == 0)
goto largeexit;
}
if (badstate || kp->kp_refcntc < 0)
goto badstate_exit;
tsbmcase = (((kp->kp_refcntc > 0) ? KPM_KC : 0) |
((kp->kp_refcnts > 0) ? KPM_KS : 0) |
(PP_ISKPMC(pp) ? KPM_C : 0) |
(PP_ISKPMS(pp) ? KPM_S : 0));
switch (tsbmcase) {
case KPM_TSBM_CONFL_GONE:
mutex_exit(&kpmp->khl_mutex);
sfmmu_kpm_vac_conflict(pp, vaddr);
mutex_enter(&kpmp->khl_mutex);
if (PP_ISNC(pp) || kp->kp_refcnt <= 0 ||
addr_to_vcolor(vaddr) != PP_GET_VCOLOR(pp)) {
panic("sfmmu_kpm_fault: inconsistent CONFL_GONE "
"state, pp=%p", (void *)pp);
}
goto largeexit;
case KPM_TSBM_MAPS_RASM:
case KPM_TSBM_MAPS:
mutex_exit(&kpmp->khl_mutex);
sfmmu_kpm_vac_conflict(pp, vaddr);
mutex_enter(&kpmp->khl_mutex);
if (PP_ISNC(pp) || kp->kp_refcnt <= 0 ||
addr_to_vcolor(vaddr) != PP_GET_VCOLOR(pp)) {
panic("sfmmu_kpm_fault: inconsistent MAPS state, "
"pp=%p", (void *)pp);
}
kp->kp_refcnt--;
kp->kp_refcnts++;
pmtx = sfmmu_page_enter(pp);
PP_SETKPMS(pp);
sfmmu_page_exit(pmtx);
goto smallexit;
case KPM_TSBM_RPLS_RASM:
case KPM_TSBM_RPLS:
if (PP_ISNC(pp) ||
addr_to_vcolor(vaddr) != PP_GET_VCOLOR(pp)) {
panic("sfmmu_kpm_fault: inconsistent RPLS state, "
"pp=%p", (void *)pp);
}
goto smallexit;
case KPM_TSBM_MAPS_BRKO:
mutex_exit(&kpmp->khl_mutex);
sfmmu_kpm_vac_conflict(pp, vaddr);
mutex_enter(&kpmp->khl_mutex);
if (PP_ISNC(pp) || kp->kp_refcnt <= 0 ||
addr_to_vcolor(vaddr) != PP_GET_VCOLOR(pp)) {
panic("sfmmu_kpm_fault: inconsistent MAPS_BRKO state, "
"pp=%p", (void *)pp);
}
kp->kp_refcnt--;
kp->kp_refcnts++;
pmtx = sfmmu_page_enter(pp);
PP_SETKPMS(pp);
sfmmu_page_exit(pmtx);
goto smallexit;
case KPM_TSBM_MAPS_BRKT:
case KPM_TSBM_MAPS_CONFL:
if (!PP_ISMAPPED(pp)) {
panic("sfmmu_kpm_fault: stale VAC conflict, pp=%p",
(void *)pp);
}
mutex_exit(&kpmp->khl_mutex);
sfmmu_kpm_vac_conflict(pp, vaddr);
mutex_enter(&kpmp->khl_mutex);
if (kp->kp_refcnt <= 0)
panic("sfmmu_kpm_fault: bad refcnt kp=%p", (void *)kp);
if (PP_ISNC(pp)) {
uncached = 1;
} else {
ASSERT(addr_to_vcolor(vaddr) == PP_GET_VCOLOR(pp));
ASSERT(kp->kp_refcntc > 0);
kp->kp_refcntc--;
pmtx = sfmmu_page_enter(pp);
PP_CLRKPMC(pp);
sfmmu_page_exit(pmtx);
ASSERT(PP_ISKPMS(pp) == 0);
if (kp->kp_refcntc == 0 && kp->kp_refcnts == 0)
goto largeexit;
}
kp->kp_refcnt--;
kp->kp_refcnts++;
pmtx = sfmmu_page_enter(pp);
PP_SETKPMS(pp);
sfmmu_page_exit(pmtx);
goto smallexit;
case KPM_TSBM_RPLS_CONFL:
if (!PP_ISMAPPED(pp)) {
panic("sfmmu_kpm_fault: stale VAC conflict, pp=%p",
(void *)pp);
}
if (!PP_ISNC(pp)) {
panic("sfmmu_kpm_fault: page not uncached, pp=%p",
(void *)pp);
}
uncached = 1;
goto smallexit;
default:
badstate_exit:
panic("sfmmu_kpm_fault: inconsistent VAC state, vaddr=%p kp=%p "
"pp=%p", (void *)vaddr, (void *)kp, (void *)pp);
}
smallexit:
if (uncached == 0)
KPM_TTE_VCACHED(tte.ll, pfn, TTE8K);
else
KPM_TTE_VUNCACHED(tte.ll, pfn, TTE8K);
sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT);
error = 0;
goto exit;
largeexit:
if (kp->kp_refcnt > 0) {
KPM_TTE_VCACHED(tte.ll, pfn, TTE4M);
sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT4M);
if (kp->kp_refcntc == 0) {
sfmmu_kpm_tsbmtl(&kp->kp_refcntc, &kpmp->khl_lock,
KPMTSBM_START);
}
ASSERT(kp->kp_refcntc == -1);
error = 0;
} else
error = EFAULT;
exit:
mutex_exit(&kpmp->khl_mutex);
sfmmu_mlist_exit(pml);
return (error);
}
int
sfmmu_kpm_fault_small(caddr_t vaddr, struct memseg *mseg, page_t *pp)
{
int error = 0;
pgcnt_t inx;
kpm_spage_t *ksp;
kpm_shlk_t *kpmsp;
kmutex_t *pml;
pfn_t pfn = pp->p_pagenum;
tte_t tte;
kmutex_t *pmtx;
int oldval;
inx = pfn - mseg->kpm_pbase;
ksp = &mseg->kpm_spages[inx];
kpmsp = KPMP_SHASH(ksp);
pml = sfmmu_mlist_enter(pp);
if (!PP_ISMAPPED_KPM(pp)) {
sfmmu_mlist_exit(pml);
return (EFAULT);
}
if (ksp->kp_mapped == KPM_MAPPEDS) {
ASSERT(!PP_ISKPMC(pp));
ASSERT(!PP_ISNC(pp));
KPM_TTE_VCACHED(tte.ll, pfn, TTE8K);
sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT);
} else if (ksp->kp_mapped == KPM_MAPPEDSC) {
sfmmu_kpm_vac_conflict(pp, vaddr);
if (PP_ISNC(pp)) {
KPM_TTE_VUNCACHED(tte.ll, pfn, TTE8K);
sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT);
oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped_flag,
&kpmsp->kshl_lock, (KPM_MAPPED_GO | KPM_MAPPEDSC));
if (oldval != KPM_MAPPEDSC)
panic("sfmmu_kpm_fault_small: "
"stale smallpages mapping");
} else {
if (PP_ISKPMC(pp)) {
pmtx = sfmmu_page_enter(pp);
PP_CLRKPMC(pp);
sfmmu_page_exit(pmtx);
}
KPM_TTE_VCACHED(tte.ll, pfn, TTE8K);
sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT);
oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped_flag,
&kpmsp->kshl_lock, (KPM_MAPPED_GO | KPM_MAPPEDS));
if (oldval != KPM_MAPPEDSC)
panic("sfmmu_kpm_fault_small: "
"stale smallpages mapping");
}
} else {
error = EFAULT;
}
sfmmu_mlist_exit(pml);
return (error);
}
static void
sfmmu_kpm_vac_conflict(page_t *pp, caddr_t vaddr)
{
int vcolor;
struct sf_hment *sfhmep;
struct hat *tmphat;
struct sf_hment *tmphme = NULL;
struct hme_blk *hmeblkp;
tte_t tte;
ASSERT(sfmmu_mlist_held(pp));
if (PP_ISNC(pp))
return;
vcolor = addr_to_vcolor(vaddr);
if (PP_GET_VCOLOR(pp) == vcolor)
return;
ASSERT(!PP_ISMAPPED_LARGE(pp));
if (!PP_ISMAPPED(pp)) {
SFMMU_STAT(sf_pgcolor_conflict);
sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp));
PP_SET_VCOLOR(pp, vcolor);
return;
}
for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) {
tmphme = sfhmep->hme_next;
if (IS_PAHME(sfhmep))
continue;
hmeblkp = sfmmu_hmetohblk(sfhmep);
tmphat = hblktosfmmu(hmeblkp);
sfmmu_copytte(&sfhmep->hme_tte, &tte);
ASSERT(TTE_IS_VALID(&tte));
if ((tmphat == ksfmmup) || hmeblkp->hblk_lckcnt) {
SFMMU_STAT(sf_uncache_conflict);
sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH, 1);
return;
}
}
SFMMU_STAT(sf_unload_conflict);
for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) {
tmphme = sfhmep->hme_next;
if (IS_PAHME(sfhmep))
continue;
hmeblkp = sfmmu_hmetohblk(sfhmep);
(void) sfmmu_pageunload(pp, sfhmep, TTE8K);
}
sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp));
PP_SET_VCOLOR(pp, vcolor);
}
void
sfmmu_kpm_pageunload(page_t *pp)
{
caddr_t vaddr;
struct kpme *kpme, *nkpme;
ASSERT(pp != NULL);
ASSERT(pp->p_kpmref);
ASSERT(sfmmu_mlist_held(pp));
vaddr = hat_kpm_page2va(pp, 1);
for (kpme = pp->p_kpmelist; kpme; kpme = nkpme) {
ASSERT(kpme->kpe_page == pp);
if (pp->p_kpmref == 0)
panic("sfmmu_kpm_pageunload: stale p_kpmref pp=%p "
"kpme=%p", (void *)pp, (void *)kpme);
nkpme = kpme->kpe_next;
sfmmu_kpme_sub(kpme, pp);
}
if (pp->p_kpmref != 0)
panic("sfmmu_kpm_pageunload: bad refcnt pp=%p", (void *)pp);
sfmmu_kpm_mapout(pp, vaddr);
}
static void
sfmmu_kpm_demap_large(caddr_t vaddr)
{
sfmmu_kpm_unload_tsb(vaddr, MMU_PAGESHIFT4M);
sfmmu_kpm_demap_tlbs(vaddr);
}
static void
sfmmu_kpm_demap_small(caddr_t vaddr)
{
sfmmu_kpm_unload_tsb(vaddr, MMU_PAGESHIFT);
sfmmu_kpm_demap_tlbs(vaddr);
}
static void
sfmmu_kpm_demap_tlbs(caddr_t vaddr)
{
cpuset_t cpuset;
kpreempt_disable();
cpuset = ksfmmup->sfmmu_cpusran;
CPUSET_AND(cpuset, cpu_ready_set);
CPUSET_DEL(cpuset, CPU->cpu_id);
SFMMU_XCALL_STATS(ksfmmup);
xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)vaddr,
(uint64_t)ksfmmup);
vtag_flushpage(vaddr, (uint64_t)ksfmmup);
kpreempt_enable();
}
#define KPM_VUL_BIG (0)
#define KPM_VUL_CONFL_INCR1 (KPM_KS)
#define KPM_VUL_UNMAP_SMALL1 (KPM_KS | KPM_S)
#define KPM_VUL_CONFL_INCR2 (KPM_KC)
#define KPM_VUL_CONFL_INCR3 (KPM_KC | KPM_KS)
#define KPM_VUL_UNMAP_SMALL2 (KPM_KC | KPM_KS | KPM_S)
#define KPM_VUL_CONFL_DECR1 (KPM_KC | KPM_C)
#define KPM_VUL_CONFL_DECR2 (KPM_KC | KPM_C | KPM_KS)
#define KPM_VUL_TNC (KPM_KC | KPM_C | KPM_KS | KPM_S)
void
sfmmu_kpm_vac_unload(page_t *pp, caddr_t vaddr)
{
kpm_page_t *kp;
kpm_hlk_t *kpmp;
caddr_t kpmvaddr = hat_kpm_page2va(pp, 1);
int newcolor;
kmutex_t *pmtx;
uint_t vacunlcase;
int badstate = 0;
kpm_spage_t *ksp;
kpm_shlk_t *kpmsp;
ASSERT(PAGE_LOCKED(pp));
ASSERT(sfmmu_mlist_held(pp));
ASSERT(!PP_ISNC(pp));
newcolor = addr_to_vcolor(kpmvaddr) != addr_to_vcolor(vaddr);
if (kpm_smallpages)
goto smallpages_vac_unload;
PP2KPMPG(pp, kp);
kpmp = KPMP_HASH(kp);
mutex_enter(&kpmp->khl_mutex);
if (IS_KPM_ALIAS_RANGE(kpmvaddr)) {
if (kp->kp_refcnta < 1) {
panic("sfmmu_kpm_vac_unload: bad refcnta kpm_page=%p\n",
(void *)kp);
}
if (PP_ISKPMC(pp) == 0) {
if (newcolor == 0)
goto exit;
sfmmu_kpm_demap_small(kpmvaddr);
pmtx = sfmmu_page_enter(pp);
PP_SETKPMC(pp);
sfmmu_page_exit(pmtx);
} else if (newcolor == 0) {
pmtx = sfmmu_page_enter(pp);
PP_CLRKPMC(pp);
sfmmu_page_exit(pmtx);
} else {
badstate++;
}
goto exit;
}
badstate = (kp->kp_refcnt < 0 || kp->kp_refcnts < 0);
if (kp->kp_refcntc == -1) {
badstate |= (kp->kp_refcnt == 0 || kp->kp_refcnts > 0 ||
PP_ISKPMC(pp) || PP_ISKPMS(pp) || PP_ISNC(pp));
} else {
badstate |= (kp->kp_refcntc < 0);
}
if (badstate)
goto exit;
if (PP_ISKPMC(pp) == 0 && newcolor == 0) {
ASSERT(PP_ISKPMS(pp) == 0);
goto exit;
}
vacunlcase = (((kp->kp_refcntc > 0) ? KPM_KC : 0) |
((kp->kp_refcnts > 0) ? KPM_KS : 0) |
(PP_ISKPMC(pp) ? KPM_C : 0) |
(PP_ISKPMS(pp) ? KPM_S : 0));
switch (vacunlcase) {
case KPM_VUL_BIG:
if (kp->kp_refcntc == -1) {
sfmmu_kpm_tsbmtl(&kp->kp_refcntc,
&kpmp->khl_lock, KPMTSBM_STOP);
}
sfmmu_kpm_demap_large(kpmvaddr);
ASSERT(kp->kp_refcntc == 0);
kp->kp_refcntc++;
pmtx = sfmmu_page_enter(pp);
PP_SETKPMC(pp);
sfmmu_page_exit(pmtx);
break;
case KPM_VUL_UNMAP_SMALL1:
case KPM_VUL_UNMAP_SMALL2:
ASSERT(newcolor);
sfmmu_kpm_demap_small(kpmvaddr);
kp->kp_refcnts--;
kp->kp_refcnt++;
kp->kp_refcntc++;
pmtx = sfmmu_page_enter(pp);
PP_CLRKPMS(pp);
PP_SETKPMC(pp);
sfmmu_page_exit(pmtx);
break;
case KPM_VUL_CONFL_INCR1:
case KPM_VUL_CONFL_INCR2:
case KPM_VUL_CONFL_INCR3:
ASSERT(newcolor);
kp->kp_refcntc++;
pmtx = sfmmu_page_enter(pp);
PP_SETKPMC(pp);
sfmmu_page_exit(pmtx);
break;
case KPM_VUL_CONFL_DECR1:
case KPM_VUL_CONFL_DECR2:
ASSERT(newcolor == 0);
kp->kp_refcntc--;
pmtx = sfmmu_page_enter(pp);
PP_CLRKPMC(pp);
sfmmu_page_exit(pmtx);
break;
case KPM_VUL_TNC:
cmn_err(CE_NOTE, "sfmmu_kpm_vac_unload: "
"page not in NC state");
default:
badstate++;
}
exit:
if (badstate) {
panic("sfmmu_kpm_vac_unload: inconsistent VAC state, "
"kpmvaddr=%p kp=%p pp=%p",
(void *)kpmvaddr, (void *)kp, (void *)pp);
}
mutex_exit(&kpmp->khl_mutex);
return;
smallpages_vac_unload:
if (newcolor == 0)
return;
PP2KPMSPG(pp, ksp);
kpmsp = KPMP_SHASH(ksp);
if (PP_ISKPMC(pp) == 0) {
if (ksp->kp_mapped == KPM_MAPPEDS) {
(void) sfmmu_kpm_stsbmtl(&ksp->kp_mapped_flag,
&kpmsp->kshl_lock, KPM_MAPPEDSC);
sfmmu_kpm_demap_small(kpmvaddr);
} else if (ksp->kp_mapped != KPM_MAPPEDSC) {
panic("sfmmu_kpm_vac_unload: inconsistent mapping");
}
pmtx = sfmmu_page_enter(pp);
PP_SETKPMC(pp);
sfmmu_page_exit(pmtx);
} else {
if (ksp->kp_mapped != KPM_MAPPEDSC)
panic("sfmmu_kpm_vac_unload: inconsistent mapping");
}
}
void
sfmmu_kpm_hme_unload(page_t *pp)
{
kpm_page_t *kp;
kpm_hlk_t *kpmp;
caddr_t vaddr;
kmutex_t *pmtx;
uint_t flags;
kpm_spage_t *ksp;
ASSERT(sfmmu_mlist_held(pp));
ASSERT(PP_ISMAPPED_KPM(pp));
flags = pp->p_nrm & (P_KPMC | P_KPMS);
if (kpm_smallpages)
goto smallpages_hme_unload;
if (flags == (P_KPMC | P_KPMS)) {
panic("sfmmu_kpm_hme_unload: page should be uncached");
} else if (flags == P_KPMS) {
return;
}
vaddr = hat_kpm_page2va(pp, 1);
PP2KPMPG(pp, kp);
kpmp = KPMP_HASH(kp);
mutex_enter(&kpmp->khl_mutex);
if (IS_KPM_ALIAS_RANGE(vaddr)) {
if (kp->kp_refcnta < 1) {
panic("sfmmu_kpm_hme_unload: bad refcnta kpm_page=%p\n",
(void *)kp);
}
} else {
if (kp->kp_refcntc < 1) {
panic("sfmmu_kpm_hme_unload: bad refcntc kpm_page=%p\n",
(void *)kp);
}
kp->kp_refcntc--;
}
pmtx = sfmmu_page_enter(pp);
PP_CLRKPMC(pp);
sfmmu_page_exit(pmtx);
mutex_exit(&kpmp->khl_mutex);
return;
smallpages_hme_unload:
if (flags != P_KPMC)
panic("sfmmu_kpm_hme_unload: page should be uncached");
vaddr = hat_kpm_page2va(pp, 1);
PP2KPMSPG(pp, ksp);
if (ksp->kp_mapped != KPM_MAPPEDSC)
panic("sfmmu_kpm_hme_unload: inconsistent mapping");
pmtx = sfmmu_page_enter(pp);
PP_CLRKPMC(pp);
sfmmu_page_exit(pmtx);
}
kpm_hlk_t *
sfmmu_kpm_kpmp_enter(page_t *pp, pgcnt_t npages)
{
kpm_page_t *kp;
kpm_hlk_t *kpmp;
ASSERT(sfmmu_mlist_held(pp));
if (kpm_smallpages || PP_ISMAPPED_KPM(pp) == 0)
return (NULL);
ASSERT(npages <= kpmpnpgs);
PP2KPMPG(pp, kp);
kpmp = KPMP_HASH(kp);
mutex_enter(&kpmp->khl_mutex);
return (kpmp);
}
void
sfmmu_kpm_kpmp_exit(kpm_hlk_t *kpmp)
{
if (kpm_smallpages || kpmp == NULL)
return;
mutex_exit(&kpmp->khl_mutex);
}
#define KPM_UNC_BIG (0)
#define KPM_UNC_NODEMAP1 (KPM_KS)
#define KPM_UNC_SMALL1 (KPM_KS | KPM_S)
#define KPM_UNC_NODEMAP2 (KPM_KC)
#define KPM_UNC_NODEMAP3 (KPM_KC | KPM_KS)
#define KPM_UNC_SMALL2 (KPM_KC | KPM_KS | KPM_S)
#define KPM_UNC_NOP1 (KPM_KC | KPM_C)
#define KPM_UNC_NOP2 (KPM_KC | KPM_C | KPM_KS)
#define KPM_CACHE_NOMAP (KPM_KC | KPM_C)
#define KPM_CACHE_NOMAPO (KPM_KC | KPM_C | KPM_KS)
#define KPM_CACHE_MAPS (KPM_KC | KPM_C | KPM_KS | KPM_S)
void
sfmmu_kpm_page_cache(page_t *pp, int flags, int cache_flush_tag)
{
kpm_page_t *kp;
kpm_hlk_t *kpmp;
caddr_t kpmvaddr;
int badstate = 0;
uint_t pgcacase;
kpm_spage_t *ksp;
kpm_shlk_t *kpmsp;
int oldval;
ASSERT(PP_ISMAPPED_KPM(pp));
ASSERT(sfmmu_mlist_held(pp));
ASSERT(sfmmu_page_spl_held(pp));
if (flags != HAT_TMPNC && flags != HAT_CACHE)
panic("sfmmu_kpm_page_cache: bad flags");
kpmvaddr = hat_kpm_page2va(pp, 1);
if (flags == HAT_TMPNC && cache_flush_tag == CACHE_FLUSH) {
pfn_t pfn = pp->p_pagenum;
int vcolor = addr_to_vcolor(kpmvaddr);
cpuset_t cpuset = cpu_ready_set;
CPUSET_DEL(cpuset, CPU->cpu_id);
SFMMU_XCALL_STATS(ksfmmup);
xt_some(cpuset, vac_flushpage_tl1, pfn, vcolor);
vac_flushpage(pfn, vcolor);
}
if (kpm_smallpages)
goto smallpages_page_cache;
PP2KPMPG(pp, kp);
kpmp = KPMP_HASH(kp);
ASSERT(MUTEX_HELD(&kpmp->khl_mutex));
if (IS_KPM_ALIAS_RANGE(kpmvaddr)) {
if (kp->kp_refcnta < 1) {
panic("sfmmu_kpm_page_cache: bad refcnta "
"kpm_page=%p\n", (void *)kp);
}
sfmmu_kpm_demap_small(kpmvaddr);
if (flags == HAT_TMPNC) {
PP_SETKPMC(pp);
ASSERT(!PP_ISKPMS(pp));
} else {
ASSERT(PP_ISKPMC(pp));
PP_CLRKPMC(pp);
}
goto exit;
}
badstate = (kp->kp_refcnt < 0 || kp->kp_refcnts < 0);
if (kp->kp_refcntc == -1) {
badstate |= (kp->kp_refcnt == 0 || kp->kp_refcnts > 0 ||
PP_ISKPMC(pp) || PP_ISKPMS(pp) || PP_ISNC(pp));
} else {
badstate |= (kp->kp_refcntc < 0);
}
if (badstate)
goto exit;
pgcacase = (((kp->kp_refcntc > 0) ? KPM_KC : 0) |
((kp->kp_refcnts > 0) ? KPM_KS : 0) |
(PP_ISKPMC(pp) ? KPM_C : 0) |
(PP_ISKPMS(pp) ? KPM_S : 0));
if (flags == HAT_CACHE) {
switch (pgcacase) {
case KPM_CACHE_MAPS:
sfmmu_kpm_demap_small(kpmvaddr);
if (kp->kp_refcnts < 1) {
panic("sfmmu_kpm_page_cache: bad refcnts "
"kpm_page=%p\n", (void *)kp);
}
kp->kp_refcnts--;
kp->kp_refcnt++;
PP_CLRKPMS(pp);
case KPM_CACHE_NOMAP:
case KPM_CACHE_NOMAPO:
kp->kp_refcntc--;
PP_CLRKPMC(pp);
break;
default:
badstate++;
}
goto exit;
}
switch (pgcacase) {
case KPM_UNC_BIG:
if (kp->kp_refcnt < 1) {
panic("sfmmu_kpm_page_cache: bad refcnt "
"kpm_page=%p\n", (void *)kp);
}
if (kp->kp_refcntc == -1) {
sfmmu_kpm_tsbmtl(&kp->kp_refcntc,
&kpmp->khl_lock, KPMTSBM_STOP);
}
ASSERT(kp->kp_refcntc == 0);
sfmmu_kpm_demap_large(kpmvaddr);
kp->kp_refcntc++;
PP_SETKPMC(pp);
break;
case KPM_UNC_SMALL1:
case KPM_UNC_SMALL2:
sfmmu_kpm_demap_small(kpmvaddr);
kp->kp_refcntc++;
kp->kp_refcnts--;
kp->kp_refcnt++;
PP_CLRKPMS(pp);
PP_SETKPMC(pp);
break;
case KPM_UNC_NODEMAP1:
case KPM_UNC_NODEMAP2:
case KPM_UNC_NODEMAP3:
kp->kp_refcntc++;
PP_SETKPMC(pp);
break;
case KPM_UNC_NOP1:
case KPM_UNC_NOP2:
break;
default:
badstate++;
}
exit:
if (badstate) {
panic("sfmmu_kpm_page_cache: inconsistent VAC state "
"kpmvaddr=%p kp=%p pp=%p", (void *)kpmvaddr,
(void *)kp, (void *)pp);
}
return;
smallpages_page_cache:
PP2KPMSPG(pp, ksp);
kpmsp = KPMP_SHASH(ksp);
oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped_flag, &kpmsp->kshl_lock,
KPM_MAPPEDSC);
if (!(oldval == KPM_MAPPEDS || oldval == KPM_MAPPEDSC))
panic("smallpages_page_cache: inconsistent mapping");
sfmmu_kpm_demap_small(kpmvaddr);
if (flags == HAT_TMPNC) {
PP_SETKPMC(pp);
ASSERT(!PP_ISKPMS(pp));
} else {
ASSERT(PP_ISKPMC(pp));
PP_CLRKPMC(pp);
}
}