#pragma ident "%Z%%M% %I% %E% SMI"
#include <sys/types.h>
#include <sys/systm.h>
#include <sys/sysmacros.h>
#include <sys/archsystm.h>
#include <sys/vmsystm.h>
#include <sys/machparam.h>
#include <sys/machsystm.h>
#include <vm/vm_dep.h>
#include <vm/hat_sfmmu.h>
#include <vm/seg_kmem.h>
#include <sys/cmn_err.h>
#include <sys/debug.h>
#include <sys/cpu_module.h>
#include <sys/sysmacros.h>
#include <sys/panic.h>
static int panther_only = 0;
static uint_t pan_disable_large_pages = (1 << TTE256M);
static uint_t chjag_disable_large_pages = ((1 << TTE32M) | (1 << TTE256M));
static uint_t mmu_disable_ism_large_pages = ((1 << TTE64K) |
(1 << TTE512K) | (1 << TTE32M) | (1 << TTE256M));
static uint_t mmu_disable_auto_data_large_pages = ((1 << TTE64K) |
(1 << TTE512K) | (1 << TTE32M) | (1 << TTE256M));
static uint_t mmu_disable_auto_text_large_pages = ((1 << TTE64K) |
(1 << TTE512K) | (1 << TTE32M) | (1 << TTE256M));
uint_t
mmu_large_pages_disabled(uint_t flag)
{
uint_t pages_disable = 0;
extern int use_text_pgsz64K;
extern int use_text_pgsz512K;
if (flag == HAT_LOAD) {
if (panther_only) {
pages_disable = pan_disable_large_pages;
} else {
pages_disable = chjag_disable_large_pages;
}
} else if (flag == HAT_LOAD_SHARE) {
pages_disable = mmu_disable_ism_large_pages;
} else if (flag == HAT_AUTO_DATA) {
pages_disable = mmu_disable_auto_data_large_pages;
} else if (flag == HAT_AUTO_TEXT) {
pages_disable = mmu_disable_auto_text_large_pages;
if (use_text_pgsz512K) {
pages_disable &= ~(1 << TTE512K);
}
if (use_text_pgsz64K) {
pages_disable &= ~(1 << TTE64K);
}
}
return (pages_disable);
}
#if defined(CPU_IMP_DUAL_PAGESIZE)
#ifdef MIXEDCPU_DR_SUPPORTED
int panther_dtlb_restrictions = 1;
#else
int panther_dtlb_restrictions = 0;
#endif
int init_mmu_page_sizes = 0;
void
mmu_init_large_pages(size_t ism_pagesize)
{
if (cpu_impl_dual_pgsz == 0) {
pan_disable_large_pages = ((1 << TTE32M) | (1 << TTE256M));
mmu_disable_ism_large_pages = ((1 << TTE64K) |
(1 << TTE512K) | (1 << TTE32M) | (1 << TTE256M));
mmu_disable_auto_data_large_pages = ((1 << TTE64K) |
(1 << TTE512K) | (1 << TTE32M) | (1 << TTE256M));
return;
}
switch (ism_pagesize) {
case MMU_PAGESIZE4M:
pan_disable_large_pages = (1 << TTE256M);
mmu_disable_ism_large_pages = ((1 << TTE64K) |
(1 << TTE512K) | (1 << TTE32M) | (1 << TTE256M));
mmu_disable_auto_data_large_pages = ((1 << TTE64K) |
(1 << TTE512K) | (1 << TTE32M) | (1 << TTE256M));
break;
case MMU_PAGESIZE32M:
pan_disable_large_pages = (1 << TTE256M);
mmu_disable_ism_large_pages = ((1 << TTE64K) |
(1 << TTE512K) | (1 << TTE256M));
mmu_disable_auto_data_large_pages = ((1 << TTE64K) |
(1 << TTE512K) | (1 << TTE4M) | (1 << TTE256M));
adjust_data_maxlpsize(ism_pagesize);
break;
case MMU_PAGESIZE256M:
pan_disable_large_pages = (1 << TTE32M);
mmu_disable_ism_large_pages = ((1 << TTE64K) |
(1 << TTE512K) | (1 << TTE32M));
mmu_disable_auto_data_large_pages = ((1 << TTE64K) |
(1 << TTE512K) | (1 << TTE4M) | (1 << TTE32M));
adjust_data_maxlpsize(ism_pagesize);
break;
default:
cmn_err(CE_WARN, "Unrecognized mmu_ism_pagesize value 0x%lx",
ism_pagesize);
break;
}
}
int
mmu_init_mmu_page_sizes(int cinfo)
{
int npanther = cinfo;
if (!init_mmu_page_sizes) {
if (npanther == ncpunode) {
mmu_page_sizes = MMU_PAGE_SIZES;
mmu_hashcnt = MAX_HASHCNT;
mmu_ism_pagesize = DEFAULT_ISM_PAGESIZE;
mmu_exported_pagesize_mask = (1 << TTE8K) |
(1 << TTE64K) | (1 << TTE512K) | (1 << TTE4M) |
(1 << TTE32M) | (1 << TTE256M);
panther_dtlb_restrictions = 1;
panther_only = 1;
} else if (npanther > 0) {
panther_dtlb_restrictions = 1;
}
init_mmu_page_sizes = 1;
return (0);
}
return (1);
}
#ifndef LOCKED_DTLB_ENTRIES
#define LOCKED_DTLB_ENTRIES 5
#endif
#define TOTAL_DTLB_ENTRIES 16
#define AVAIL_32M_ENTRIES 0
#define AVAIL_256M_ENTRIES 0
#define AVAIL_DTLB_ENTRIES (TOTAL_DTLB_ENTRIES - LOCKED_DTLB_ENTRIES)
static uint64_t ttecnt_threshold[MMU_PAGE_SIZES] = {
AVAIL_DTLB_ENTRIES, AVAIL_DTLB_ENTRIES,
AVAIL_DTLB_ENTRIES, AVAIL_DTLB_ENTRIES,
AVAIL_32M_ENTRIES, AVAIL_256M_ENTRIES };
static void
mmu_fixup_large_pages(struct hat *hat, uint64_t *ttecnt, uint8_t *tmp_pgsz)
{
uint_t pgsz0 = tmp_pgsz[0];
uint_t pgsz1 = tmp_pgsz[1];
uint_t spgsz;
ASSERT(hat->sfmmu_ismhat == 0);
ASSERT(hat != ksfmmup);
ASSERT(cpu_impl_dual_pgsz == 1);
ASSERT(!SFMMU_TTEFLAGS_ISSET(hat, HAT_32M_FLAG) ||
!SFMMU_TTEFLAGS_ISSET(hat, HAT_256M_FLAG));
ASSERT(!SFMMU_TTEFLAGS_ISSET(hat, HAT_256M_FLAG) ||
!SFMMU_TTEFLAGS_ISSET(hat, HAT_32M_FLAG));
ASSERT(!SFMMU_FLAGS_ISSET(hat, HAT_32M_ISM) ||
!SFMMU_FLAGS_ISSET(hat, HAT_256M_ISM));
ASSERT(!SFMMU_FLAGS_ISSET(hat, HAT_256M_ISM) ||
!SFMMU_FLAGS_ISSET(hat, HAT_32M_ISM));
if (SFMMU_TTEFLAGS_ISSET(hat, HAT_32M_FLAG) ||
(ttecnt[TTE32M] != 0) ||
SFMMU_FLAGS_ISSET(hat, HAT_32M_ISM)) {
spgsz = pgsz1;
pgsz1 = TTE32M;
if (pgsz0 == TTE32M)
pgsz0 = spgsz;
} else if (SFMMU_TTEFLAGS_ISSET(hat, HAT_256M_FLAG) ||
(ttecnt[TTE256M] != 0) ||
SFMMU_FLAGS_ISSET(hat, HAT_256M_ISM)) {
spgsz = pgsz1;
pgsz1 = TTE256M;
if (pgsz0 == TTE256M)
pgsz0 = spgsz;
} else if ((pgsz1 == TTE512K) || (pgsz1 == TTE4M)) {
if ((pgsz0 != TTE512K) && (pgsz0 != TTE4M)) {
spgsz = pgsz0;
pgsz0 = pgsz1;
pgsz1 = spgsz;
} else {
pgsz1 = page_szc(MMU_PAGESIZE);
}
}
if (ttecnt[pgsz0] < ttecnt_threshold[pgsz0])
pgsz0 = page_szc(MMU_PAGESIZE);
if (ttecnt[pgsz1] < ttecnt_threshold[pgsz1])
pgsz1 = page_szc(MMU_PAGESIZE);
tmp_pgsz[0] = pgsz0;
tmp_pgsz[1] = pgsz1;
}
static void
mmu_setup_page_sizes(struct hat *hat, uint64_t *ttecnt, uint8_t *tmp_pgsz)
{
uint_t pgsz0, pgsz1;
ASSERT(hat->sfmmu_ismhat == NULL);
ASSERT(hat != ksfmmup);
if (cpu_impl_dual_pgsz == 0)
return;
if (panther_dtlb_restrictions) {
if ((tmp_pgsz[1] == TTE512K) || (tmp_pgsz[1] == TTE4M)) {
if ((tmp_pgsz[0] != TTE512K) &&
(tmp_pgsz[0] != TTE4M)) {
pgsz1 = tmp_pgsz[0];
pgsz0 = tmp_pgsz[1];
} else {
pgsz0 = tmp_pgsz[0];
pgsz1 = page_szc(MMU_PAGESIZE);
}
} else {
pgsz0 = tmp_pgsz[0];
pgsz1 = tmp_pgsz[1];
}
} else {
pgsz0 = MIN(tmp_pgsz[0], tmp_pgsz[1]);
pgsz1 = MAX(tmp_pgsz[0], tmp_pgsz[1]);
}
if (ttecnt[pgsz0] < ttecnt_threshold[pgsz0])
pgsz0 = page_szc(MMU_PAGESIZE);
if (ttecnt[pgsz1] < ttecnt_threshold[pgsz1])
pgsz1 = page_szc(MMU_PAGESIZE);
tmp_pgsz[0] = pgsz0;
tmp_pgsz[1] = pgsz1;
}
void
mmu_set_ctx_page_sizes(struct hat *hat)
{
uint_t pgsz0, pgsz1;
uint_t new_cext;
ASSERT(sfmmu_hat_lock_held(hat));
ASSERT(hat != ksfmmup);
if (cpu_impl_dual_pgsz == 0)
return;
pgsz0 = hat->sfmmu_pgsz[0];
pgsz1 = hat->sfmmu_pgsz[1];
ASSERT(pgsz0 < mmu_page_sizes);
ASSERT(pgsz1 < mmu_page_sizes);
#ifdef DEBUG
if (panther_dtlb_restrictions) {
ASSERT(pgsz1 != TTE512K);
ASSERT(pgsz1 != TTE4M);
}
if (panther_only) {
ASSERT(pgsz0 != TTE32M);
ASSERT(pgsz0 != TTE256M);
}
#endif
new_cext = TAGACCEXT_MKSZPAIR(pgsz1, pgsz0);
if (hat->sfmmu_cext != new_cext) {
#ifdef DEBUG
int i;
for (i = 0; i < max_mmu_ctxdoms; i++) {
ASSERT(hat->sfmmu_ctxs[i].cnum == INVALID_CONTEXT);
}
#endif
hat->sfmmu_cext = new_cext;
}
}
void
mmu_check_page_sizes(sfmmu_t *sfmmup, uint64_t *ttecnt)
{
uint64_t sortcnt[MMU_PAGE_SIZES];
uint8_t tmp_pgsz[MMU_PAGE_SIZES];
uint8_t i, j, max;
uint16_t oldval, newval;
if (SFMMU_LGPGS_INUSE(sfmmup)) {
for (i = 0; i < mmu_page_sizes; i++) {
sortcnt[i] = ttecnt[i];
}
for (j = 0; j < mmu_page_sizes; j++) {
for (i = mmu_page_sizes - 1, max = 0; i > 0; i--) {
if (sortcnt[i] > sortcnt[max])
max = i;
}
tmp_pgsz[j] = max;
sortcnt[max] = 0;
}
oldval = sfmmup->sfmmu_pgsz[0] << 8 | sfmmup->sfmmu_pgsz[1];
if (panther_only) {
mmu_fixup_large_pages(sfmmup, ttecnt, tmp_pgsz);
} else {
mmu_setup_page_sizes(sfmmup, ttecnt, tmp_pgsz);
}
newval = tmp_pgsz[0] << 8 | tmp_pgsz[1];
if (newval != oldval) {
sfmmu_reprog_pgsz_arr(sfmmup, tmp_pgsz);
}
}
}
#endif
struct heap_lp_page_size {
int impl;
uint_t tte;
int use_dt512;
};
struct heap_lp_page_size heap_lp_pgsz[] = {
{CHEETAH_IMPL, TTE8K, 0},
{CHEETAH_IMPL, TTE64K, 0},
{CHEETAH_IMPL, TTE4M, 0},
{ CHEETAH_PLUS_IMPL, TTE4M, 1 },
{ CHEETAH_PLUS_IMPL, TTE4M, 0 },
{ CHEETAH_PLUS_IMPL, TTE64K, 1 },
{ CHEETAH_PLUS_IMPL, TTE64K, 0 },
{ CHEETAH_PLUS_IMPL, TTE8K, 0 },
{ JALAPENO_IMPL, TTE4M, 1 },
{ JALAPENO_IMPL, TTE4M, 0 },
{ JALAPENO_IMPL, TTE64K, 1 },
{ JALAPENO_IMPL, TTE64K, 0 },
{ JALAPENO_IMPL, TTE8K, 0 },
{ JAGUAR_IMPL, TTE4M, 1 },
{ JAGUAR_IMPL, TTE4M, 0 },
{ JAGUAR_IMPL, TTE64K, 1 },
{ JAGUAR_IMPL, TTE64K, 0 },
{ JAGUAR_IMPL, TTE8K, 0 },
{ SERRANO_IMPL, TTE4M, 1 },
{ SERRANO_IMPL, TTE4M, 0 },
{ SERRANO_IMPL, TTE64K, 1 },
{ SERRANO_IMPL, TTE64K, 0 },
{ SERRANO_IMPL, TTE8K, 0 },
{ PANTHER_IMPL, TTE4M, 1 },
{ PANTHER_IMPL, TTE4M, 0 },
{ PANTHER_IMPL, TTE64K, 1 },
{ PANTHER_IMPL, TTE64K, 0 },
{ PANTHER_IMPL, TTE8K, 0 }
};
int heaplp_use_dt512 = -1;
void
mmu_init_kernel_pgsz(struct hat *hat)
{
uint_t tte = page_szc(segkmem_lpsize);
uchar_t new_cext_primary, new_cext_nucleus;
if (heaplp_use_dt512 == 0 || tte > TTE4M) {
tte = TTE8K;
}
new_cext_nucleus = TAGACCEXT_MKSZPAIR(tte, TTE8K);
new_cext_primary = TAGACCEXT_MKSZPAIR(TTE8K, tte);
hat->sfmmu_cext = new_cext_primary;
kcontextreg = ((uint64_t)new_cext_nucleus << CTXREG_NEXT_SHIFT) |
((uint64_t)new_cext_primary << CTXREG_EXT_SHIFT);
}
size_t
mmu_get_kernel_lpsize(size_t lpsize)
{
struct heap_lp_page_size *p_lpgsz, *pend_lpgsz;
int impl = cpunodes[getprocessorid()].implementation;
uint_t tte = TTE8K;
if (cpu_impl_dual_pgsz == 0) {
heaplp_use_dt512 = 0;
return (MMU_PAGESIZE);
}
pend_lpgsz = (struct heap_lp_page_size *)
((char *)heap_lp_pgsz + sizeof (heap_lp_pgsz));
for (p_lpgsz = heap_lp_pgsz; p_lpgsz < pend_lpgsz; p_lpgsz++) {
if (impl != p_lpgsz->impl)
continue;
if (lpsize == 0) {
tte = p_lpgsz->tte;
heaplp_use_dt512 = p_lpgsz->use_dt512;
break;
}
if (lpsize == TTEBYTES(p_lpgsz->tte) &&
(heaplp_use_dt512 == -1 ||
heaplp_use_dt512 == p_lpgsz->use_dt512)) {
tte = p_lpgsz->tte;
heaplp_use_dt512 = p_lpgsz->use_dt512;
break;
}
}
if (p_lpgsz == pend_lpgsz) {
tte = TTE8K;
heaplp_use_dt512 = 0;
}
lpsize = TTEBYTES(tte);
return (lpsize);
}