#include <sys/types.h>
#include <sys/systm.h>
#include <sys/archsystm.h>
#include <sys/machparam.h>
#include <sys/machsystm.h>
#include <sys/cpu.h>
#include <sys/elf_SPARC.h>
#include <vm/hat_sfmmu.h>
#include <vm/page.h>
#include <vm/vm_dep.h>
#include <sys/cpuvar.h>
#include <sys/async.h>
#include <sys/cmn_err.h>
#include <sys/debug.h>
#include <sys/dditypes.h>
#include <sys/sunddi.h>
#include <sys/cpu_module.h>
#include <sys/prom_debug.h>
#include <sys/vmsystm.h>
#include <sys/prom_plat.h>
#include <sys/sysmacros.h>
#include <sys/intreg.h>
#include <sys/machtrap.h>
#include <sys/ontrap.h>
#include <sys/ivintr.h>
#include <sys/atomic.h>
#include <sys/panic.h>
#include <sys/dtrace.h>
#include <sys/simulate.h>
#include <sys/fault.h>
#include <sys/niagara2regs.h>
#include <sys/hsvc.h>
#include <sys/trapstat.h>
#include <sys/mutex_impl.h>
uint_t root_phys_addr_lo_mask = 0xffffffffU;
#if defined(NIAGARA2_IMPL)
char cpu_module_name[] = "SUNW,UltraSPARC-T2";
#elif defined(VFALLS_IMPL)
char cpu_module_name[] = "SUNW,UltraSPARC-T2+";
#elif defined(KT_IMPL)
char cpu_module_name[] = "SPARC-T3";
#endif
static boolean_t cpu_hsvc_available = B_TRUE;
static uint64_t cpu_sup_minor;
#if defined(NIAGARA2_IMPL)
static hsvc_info_t cpu_hsvc = {
HSVC_REV_1, NULL, HSVC_GROUP_NIAGARA2_CPU, NIAGARA2_HSVC_MAJOR,
NIAGARA2_HSVC_MINOR, cpu_module_name
};
#elif defined(VFALLS_IMPL)
static hsvc_info_t cpu_hsvc = {
HSVC_REV_1, NULL, HSVC_GROUP_VFALLS_CPU, VFALLS_HSVC_MAJOR,
VFALLS_HSVC_MINOR, cpu_module_name
};
#elif defined(KT_IMPL)
static hsvc_info_t cpu_hsvc = {
HSVC_REV_1, NULL, HSVC_GROUP_KT_CPU, KT_HSVC_MAJOR,
KT_HSVC_MINOR, cpu_module_name
};
#endif
void
cpu_setup(void)
{
extern int mmu_exported_pagesize_mask;
extern int cpc_has_overflow_intr;
extern size_t contig_mem_prealloc_base_size;
int status;
status = hsvc_register(&cpu_hsvc, &cpu_sup_minor);
if (status != 0) {
cmn_err(CE_WARN, "%s: cannot negotiate hypervisor services "
"group: 0x%lx major: 0x%lx minor: 0x%lx errno: %d",
cpu_hsvc.hsvc_modname, cpu_hsvc.hsvc_group,
cpu_hsvc.hsvc_major, cpu_hsvc.hsvc_minor, status);
cpu_hsvc_available = B_FALSE;
}
cpu_setup_common(NULL);
if (cpu_hwcap_flags == 0) {
#ifdef KT_IMPL
ASSERT(cpu_hwcap_flags != 0);
cpu_hwcap_flags |= AV_SPARC_VIS3 | AV_SPARC_HPC | AV_SPARC_FMAF;
#endif
cpu_hwcap_flags |= AV_SPARC_VIS | AV_SPARC_VIS2 |
AV_SPARC_ASI_BLK_INIT | AV_SPARC_POPC;
}
cache |= (CACHE_PTAG | CACHE_IOCOHERENT);
if ((mmu_exported_pagesize_mask &
DEFAULT_SUN4V_MMU_PAGESIZE_MASK) !=
DEFAULT_SUN4V_MMU_PAGESIZE_MASK)
cmn_err(CE_PANIC, "machine description"
" does not have required sun4v page sizes"
" 8K, 64K and 4M: MD mask is 0x%x",
mmu_exported_pagesize_mask);
hole_start = (caddr_t)((1ull << (va_bits - 1)) - (1ull << 32));
hole_end = (caddr_t)((0ull - (1ull << (va_bits - 1))) + (1ull << 32));
cpc_has_overflow_intr = 1;
max_uheap_lpsize = MMU_PAGESIZE4M;
max_ustack_lpsize = MMU_PAGESIZE4M;
max_privmap_lpsize = MMU_PAGESIZE4M;
#ifdef SUN4V_CONTIG_MEM_PREALLOC_SIZE_MB
contig_mem_prealloc_base_size = MB(SUN4V_CONTIG_MEM_PREALLOC_SIZE_MB);
#endif
}
void
cpu_fiximp(struct cpu_node *cpunode)
{
if (cpunode->ecache_size == 0)
cpunode->ecache_size = L2CACHE_SIZE;
if (cpunode->ecache_linesize == 0)
cpunode->ecache_linesize = L2CACHE_LINESIZE;
if (cpunode->ecache_associativity == 0)
cpunode->ecache_associativity = L2CACHE_ASSOCIATIVITY;
}
void
cpu_map_exec_units(struct cpu *cp)
{
ASSERT(MUTEX_HELD(&cpu_lock));
cp->cpu_m.cpu_ipipe = cpunodes[cp->cpu_id].exec_unit_mapping;
if (cp->cpu_m.cpu_ipipe == NO_EU_MAPPING_FOUND)
cp->cpu_m.cpu_ipipe = (id_t)(cp->cpu_id);
cp->cpu_m.cpu_fpu = cpunodes[cp->cpu_id].fpu_mapping;
if (cp->cpu_m.cpu_fpu == NO_EU_MAPPING_FOUND)
cp->cpu_m.cpu_fpu = (id_t)(cp->cpu_id);
cp->cpu_m.cpu_core = cp->cpu_m.cpu_fpu;
cp->cpu_m.cpu_mpipe = cpunodes[cp->cpu_id].l2_cache_mapping;
if (cp->cpu_m.cpu_mpipe == NO_L2_CACHE_MAPPING_FOUND)
cp->cpu_m.cpu_mpipe = CPU_L2_CACHEID_INVALID;
cp->cpu_m.cpu_chip = cpunodes[cp->cpu_id].l2_cache_mapping;
if (cp->cpu_m.cpu_chip == NO_L2_CACHE_MAPPING_FOUND)
cp->cpu_m.cpu_chip = CPU_CHIPID_INVALID;
}
static int cpucnt;
void
cpu_init_private(struct cpu *cp)
{
extern void niagara_kstat_init(void);
ASSERT(MUTEX_HELD(&cpu_lock));
cpu_map_exec_units(cp);
if ((cpucnt++ == 0) && (cpu_hsvc_available == B_TRUE))
(void) niagara_kstat_init();
mutex_delay = rdccr_delay;
}
void
cpu_uninit_private(struct cpu *cp)
{
extern void niagara_kstat_fini(void);
ASSERT(MUTEX_HELD(&cpu_lock));
if ((--cpucnt == 0) && (cpu_hsvc_available == B_TRUE))
(void) niagara_kstat_fini();
}
void
dtrace_flush_sec(uintptr_t addr)
{
doflush(0);
}
int
cpu_trapstat_conf(int cmd)
{
int status = 0;
switch (cmd) {
case CPU_TSTATCONF_INIT:
case CPU_TSTATCONF_FINI:
case CPU_TSTATCONF_ENABLE:
case CPU_TSTATCONF_DISABLE:
break;
default:
status = EINVAL;
break;
}
return (status);
}
void
cpu_trapstat_data(void *buf, uint_t tstat_pgszs)
{
tstat_pgszdata_t *tstatp = (tstat_pgszdata_t *)buf;
int i;
for (i = 0; i < tstat_pgszs; i++, tstatp++) {
tstatp->tpgsz_kernel.tmode_itlb.ttlb_tlb.tmiss_count = 0;
tstatp->tpgsz_kernel.tmode_itlb.ttlb_tlb.tmiss_time = 0;
tstatp->tpgsz_user.tmode_itlb.ttlb_tlb.tmiss_count = 0;
tstatp->tpgsz_user.tmode_itlb.ttlb_tlb.tmiss_time = 0;
tstatp->tpgsz_kernel.tmode_dtlb.ttlb_tlb.tmiss_count = 0;
tstatp->tpgsz_kernel.tmode_dtlb.ttlb_tlb.tmiss_time = 0;
tstatp->tpgsz_user.tmode_dtlb.ttlb_tlb.tmiss_count = 0;
tstatp->tpgsz_user.tmode_dtlb.ttlb_tlb.tmiss_time = 0;
}
}
typedef struct n2color {
uchar_t nnbits;
uchar_t nnmask;
uchar_t lomask;
uchar_t lobits;
} n2color_t;
n2color_t n2color[MMU_PAGE_SIZES];
static uchar_t nhbits[] = {7, 7, 6, 5, 5, 5};
static inline uint_t
n2_hash2color(uint_t color, uchar_t szc)
{
n2color_t m = n2color[szc];
if (m.nnbits > 0) {
color = ((color >> m.nnbits) & ~m.lomask) | (color & m.lomask);
ASSERT((color & ~(hw_page_array[szc].hp_colors - 1)) == 0);
}
return (color);
}
static inline uint_t
n2_color2hash(uint_t color, uchar_t szc, uint_t node)
{
n2color_t m = n2color[szc];
if (m.nnbits > 0) {
color = ((color & ~m.lomask) << m.nnbits) | (color & m.lomask);
color |= (node & m.nnmask) << m.lobits;
}
return (color);
}
uint_t
page_pfn_2_color_cpu(pfn_t pfn, uchar_t szc, void *cookie)
{
mem_node_iterator_t *it = cookie;
uint_t color;
ASSERT(szc <= TTE256M);
if (it == ((mem_node_iterator_t *)(-1))) {
pfn = plat_rapfn_to_papfn(pfn);
} else if (it != NULL) {
ASSERT(pfn >= it->mi_mblock_base && pfn <= it->mi_mblock_end);
pfn = pfn + it->mi_ra_to_pa;
}
pfn = PFN_BASE(pfn, szc);
color = ((pfn >> 15) ^ pfn) & 0x1f;
if (szc < TTE4M) {
color = (color << 2) | ((pfn >> 5) & 0x3);
if (szc > TTE64K)
color >>= 1;
}
return (n2_hash2color(color, szc));
}
static uint_t
page_papfn_2_color_cpu(pfn_t papfn, uchar_t szc)
{
uint_t color;
ASSERT(szc <= TTE256M);
papfn = PFN_BASE(papfn, szc);
color = ((papfn >> 15) ^ papfn) & 0x1f;
if (szc < TTE4M) {
color = (color << 2) | ((papfn >> 5) & 0x3);
if (szc > TTE64K)
color >>= 1;
}
return (color);
}
#if TTE256M != 5
#error TTE256M is not 5
#endif
uint_t
page_get_nsz_color_mask_cpu(uchar_t szc, uint_t mask)
{
static uint_t ni2_color_masks[5] = {0x63, 0x1e, 0x3e, 0x1f, 0x1f};
ASSERT(szc < TTE256M);
mask = n2_color2hash(mask, szc, 0);
mask &= ni2_color_masks[szc];
if (szc == TTE64K || szc == TTE512K)
mask >>= 1;
return (n2_hash2color(mask, szc + 1));
}
uint_t
page_get_nsz_color_cpu(uchar_t szc, uint_t color)
{
ASSERT(szc < TTE256M);
color = n2_color2hash(color, szc, 0);
if (szc == TTE64K || szc == TTE512K)
color >>= 1;
return (n2_hash2color(color, szc + 1));
}
uint_t
page_get_color_shift_cpu(uchar_t szc, uchar_t nszc)
{
uint_t s;
ASSERT(nszc >= szc);
ASSERT(nszc <= TTE256M);
s = nhbits[szc] - n2color[szc].nnbits;
s -= nhbits[nszc] - n2color[nszc].nnbits;
return (s);
}
uint_t
page_convert_color_cpu(uint_t ncolor, uchar_t szc, uchar_t nszc)
{
uint_t color;
ASSERT(nszc > szc);
ASSERT(nszc <= TTE256M);
ncolor = n2_color2hash(ncolor, nszc, 0);
color = ncolor << (nhbits[szc] - nhbits[nszc]);
color = n2_hash2color(color, szc);
return (color);
}
#define PAPFN_2_MNODE(pfn) \
(((pfn) & it->mi_mnode_pfn_mask) >> it->mi_mnode_pfn_shift)
pfn_t
page_next_pfn_for_color_cpu(pfn_t pfn, uchar_t szc, uint_t color,
uint_t ceq_mask, uint_t color_mask, void *cookie)
{
mem_node_iterator_t *it = cookie;
pfn_t pstep = PNUM_SIZE(szc);
pfn_t npfn, pfn_ceq_mask, pfn_color;
pfn_t tmpmask, mask = (pfn_t)-1;
uint_t pfnmn;
ASSERT((color & ~ceq_mask) == 0);
ASSERT(pfn >= it->mi_mblock_base && pfn <= it->mi_mblock_end);
if (it->mi_init) {
it->mi_hash_ceq_mask =
n2_color2hash(ceq_mask, szc, it->mi_mnode_mask);
it->mi_hash_color =
n2_color2hash(color, szc, it->mi_mnode);
it->mi_init = 0;
} else {
ASSERT(it->mi_hash_ceq_mask ==
n2_color2hash(ceq_mask, szc, it->mi_mnode_mask));
ASSERT(it->mi_hash_color ==
n2_color2hash(color, szc, it->mi_mnode));
}
ceq_mask = it->mi_hash_ceq_mask;
color = it->mi_hash_color;
pfn += it->mi_ra_to_pa;
next_mem_block:
pfnmn = PAPFN_2_MNODE(pfn);
if ((((page_papfn_2_color_cpu(pfn, szc) ^ color) & ceq_mask) == 0) &&
(pfnmn == it->mi_mnode)) {
if (szc >= TTE512K) {
if (szc >= TTE4M) {
pfn_ceq_mask = ceq_mask << 15;
} else {
pfn_ceq_mask = ((ceq_mask & 1) << 6) |
((ceq_mask >> 1) << 15);
}
pfn_ceq_mask |= it->mi_mnode_pfn_mask;
npfn = ADD_MASKED(pfn, pstep, pfn_ceq_mask, mask);
goto done;
} else {
pfn_ceq_mask = ((ceq_mask & 3) << 5) | (ceq_mask >> 2);
pfn_ceq_mask |= it->mi_mnode_pfn_mask;
npfn = ADD_MASKED(pfn, pstep, pfn_ceq_mask, mask);
if ((((npfn ^ pfn) >> 15) & 0x1f) == 0)
goto done;
npfn = (pfn >> 15) << 15;
npfn |= (ceq_mask & color & 3) << 5;
pfn_ceq_mask = (szc == TTE8K) ? 0 :
(ceq_mask & 0x1c) << 13;
pfn_ceq_mask |= it->mi_mnode_pfn_mask;
npfn = ADD_MASKED(npfn, (1 << 15), pfn_ceq_mask, mask);
npfn |= ((npfn >> 15) ^ (color >> 2)) & (ceq_mask >> 2);
goto done;
}
}
if (szc >= TTE512K) {
if (szc >= TTE4M) {
npfn = ((pfn >> 20) << 20) | (color << 15);
pfn_ceq_mask = (ceq_mask << 15) | 0x7fff;
} else {
npfn = pfn + pstep;
pfnmn = PAPFN_2_MNODE(npfn);
if ((((page_papfn_2_color_cpu(npfn, szc) ^ color) &
ceq_mask) == 0) && (pfnmn == it->mi_mnode))
goto done;
pfn_ceq_mask = ((ceq_mask & 1) << 6) |
((ceq_mask >> 1) << 15) | (0xff << 7);
pfn_color = ((color & 1) << 6) | ((color >> 1) << 15);
npfn = ((pfn >> 20) << 20) | pfn_color;
}
if ((pfnmn = PAPFN_2_MNODE(npfn)) != it->mi_mnode)
npfn += ((it->mi_mnode - pfnmn) & it->mi_mnode_mask) <<
it->mi_mnode_pfn_shift;
pfn_ceq_mask |= it->mi_mnode_pfn_mask;
while (npfn <= pfn) {
npfn = ADD_MASKED(npfn, pstep, pfn_ceq_mask, mask);
}
goto done;
}
pfn_ceq_mask = ((ceq_mask & 3) << 5) | (ceq_mask >> 2);
pfn_color = ((color & 3) << 5) | (color >> 2);
if (pfnmn == it->mi_mnode) {
npfn = (pfn & ~(pfn_t)0x7f);
npfn |= (((pfn >> 15) & 0x1f) ^ pfn_color) & pfn_ceq_mask;
npfn = (szc == TTE64K) ? (npfn & ~(pfn_t)0x7) : npfn;
if (((page_papfn_2_color_cpu(npfn, szc) ^ color) &
ceq_mask) == 0) {
pfn_ceq_mask |= it->mi_mnode_pfn_mask;
while (npfn <= pfn) {
npfn = ADD_MASKED(npfn, pstep, pfn_ceq_mask,
mask);
}
if ((((npfn ^ pfn) >> 15) & 0x1f) == 0)
goto done;
}
}
npfn = (szc == TTE8K) ? ((pfn >> 15) << 15) :
(((pfn >> 18) << 18) | ((color & 0x1c) << 13));
if ((pfnmn = PAPFN_2_MNODE(npfn)) != it->mi_mnode) {
npfn += ((it->mi_mnode - pfnmn) & it->mi_mnode_mask) <<
it->mi_mnode_pfn_shift;
}
tmpmask = (szc == TTE8K) ? 0 : (ceq_mask & 0x1c) << 13;
tmpmask |= it->mi_mnode_pfn_mask;
while (npfn <= pfn) {
npfn = ADD_MASKED(npfn, (1 << 15), tmpmask, mask);
}
npfn |= (((npfn >> 15) & 0x1f) ^ pfn_color) & pfn_ceq_mask;
npfn = (szc == TTE64K) ? (npfn & ~(pfn_t)0x7) : npfn;
done:
ASSERT(((page_papfn_2_color_cpu(npfn, szc) ^ color) & ceq_mask) == 0);
ASSERT(PAPFN_2_MNODE(npfn) == it->mi_mnode);
npfn -= it->mi_ra_to_pa;
if (npfn > it->mi_mblock_end) {
pfn = plat_mem_node_iterator_init(npfn, it->mi_mnode, szc, it,
0);
if (pfn == (pfn_t)-1)
return (pfn);
ASSERT(pfn >= it->mi_mblock_base && pfn <= it->mi_mblock_end);
pfn += it->mi_ra_to_pa;
goto next_mem_block;
}
return (npfn);
}
void
page_coloring_init_cpu()
{
int i;
uchar_t id;
uchar_t lo;
uchar_t hi;
n2color_t m;
mem_node_iterator_t it;
static uchar_t idmask[] = {0, 0x7, 0x1f, 0x1f, 0x1f, 0x1f};
for (i = 0; i < max_mem_nodes; i++) {
memset(&it, 0, sizeof (it));
if (plat_mem_node_iterator_init(0, i, 0, &it, 1) != (pfn_t)-1)
break;
}
ASSERT(i < max_mem_nodes);
for (i = 0; i < mmu_page_sizes; i++) {
(void) memset(&m, 0, sizeof (m));
id = it.mi_mnode_pfn_mask >> 15;
id &= idmask[i];
lo = lowbit(id);
if (lo > 0) {
hi = highbit(id);
m.nnbits = hi - lo + 1;
m.nnmask = (1 << m.nnbits) - 1;
lo += nhbits[i] - 5;
m.lomask = (1 << (lo - 1)) - 1;
m.lobits = lo - 1;
}
hw_page_array[i].hp_colors = 1 << (nhbits[i] - m.nnbits);
n2color[i] = m;
}
}
void
page_set_colorequiv_arr_cpu(void)
{
static uint_t nequiv_shades_log2[MMU_PAGE_SIZES] = {2, 5, 0, 0, 0, 0};
nequiv_shades_log2[1] -= n2color[1].nnbits;
if (colorequiv > 1) {
int i;
uint_t sv_a = lowbit(colorequiv) - 1;
if (sv_a > 15)
sv_a = 15;
for (i = 0; i < MMU_PAGE_SIZES; i++) {
uint_t colors;
uint_t a = sv_a;
if ((colors = hw_page_array[i].hp_colors) <= 1)
continue;
while ((colors >> a) == 0)
a--;
if (a > (colorequivszc[i] & 0xf) +
(colorequivszc[i] >> 4)) {
if (a <= nequiv_shades_log2[i]) {
colorequivszc[i] = (uchar_t)a;
} else {
colorequivszc[i] =
((a - nequiv_shades_log2[i]) << 4) |
nequiv_shades_log2[i];
}
}
}
}
}