#include <sys/types.h>
#include <sys/systm.h>
#include <sys/ddi.h>
#include <sys/sysmacros.h>
#include <sys/archsystm.h>
#include <sys/vmsystm.h>
#include <sys/machparam.h>
#include <sys/machsystm.h>
#include <sys/machthread.h>
#include <sys/cpu.h>
#include <sys/cmp.h>
#include <sys/elf_SPARC.h>
#include <vm/vm_dep.h>
#include <vm/hat_sfmmu.h>
#include <vm/seg_kpm.h>
#include <sys/cpuvar.h>
#include <sys/cheetahregs.h>
#include <sys/us3_module.h>
#include <sys/async.h>
#include <sys/cmn_err.h>
#include <sys/debug.h>
#include <sys/dditypes.h>
#include <sys/prom_debug.h>
#include <sys/prom_plat.h>
#include <sys/cpu_module.h>
#include <sys/sysmacros.h>
#include <sys/intreg.h>
#include <sys/clock.h>
#include <sys/platform_module.h>
#include <sys/machtrap.h>
#include <sys/ontrap.h>
#include <sys/panic.h>
#include <sys/memlist.h>
#include <sys/bootconf.h>
#include <sys/ivintr.h>
#include <sys/atomic.h>
#include <sys/taskq.h>
#include <sys/note.h>
#include <sys/ndifm.h>
#include <sys/ddifm.h>
#include <sys/fm/protocol.h>
#include <sys/fm/util.h>
#include <sys/fm/cpu/UltraSPARC-III.h>
#include <sys/fpras_impl.h>
#include <sys/dtrace.h>
#include <sys/watchpoint.h>
#include <sys/plat_ecc_unum.h>
#include <sys/cyclic.h>
#include <sys/errorq.h>
#include <sys/errclassify.h>
#include <sys/pghw.h>
#include <sys/clock_impl.h>
#ifdef CHEETAHPLUS_ERRATUM_25
#include <sys/xc_impl.h>
#endif
ch_cpu_logout_t clop_before_flush;
ch_cpu_logout_t clop_after_flush;
uint_t flush_retries_done = 0;
uint64_t ch_err_tl1_paddrs[NCPU];
ch_err_tl1_data_t ch_err_tl1_data;
uchar_t ch_err_tl1_pending[NCPU];
taskq_t *ch_check_ce_tq;
static int cpu_async_log_err(void *flt, errorq_elem_t *eqep);
static void cpu_log_diag_info(ch_async_flt_t *ch_flt);
static void cpu_queue_one_event(ch_async_flt_t *ch_flt, char *reason,
ecc_type_to_info_t *eccp, ch_diag_data_t *cdp);
static int cpu_flt_in_memory_one_event(ch_async_flt_t *ch_flt,
uint64_t t_afsr_bit);
static int clear_ecc(struct async_flt *ecc);
#if defined(CPU_IMP_ECACHE_ASSOC)
static int cpu_ecache_line_valid(ch_async_flt_t *ch_flt);
#endif
int cpu_ecache_set_size(struct cpu *cp);
static int cpu_ectag_line_invalid(int cachesize, uint64_t tag);
int cpu_ectag_pa_to_subblk(int cachesize, uint64_t subaddr);
uint64_t cpu_ectag_to_pa(int setsize, uint64_t tag);
int cpu_ectag_pa_to_subblk_state(int cachesize,
uint64_t subaddr, uint64_t tag);
static void cpu_flush_ecache_line(ch_async_flt_t *ch_flt);
static int afsr_to_afar_status(uint64_t afsr, uint64_t afsr_bit);
static int afsr_to_esynd_status(uint64_t afsr, uint64_t afsr_bit);
static int afsr_to_msynd_status(uint64_t afsr, uint64_t afsr_bit);
static int afsr_to_synd_status(uint_t cpuid, uint64_t afsr, uint64_t afsr_bit);
static int synd_to_synd_code(int synd_status, ushort_t synd, uint64_t afsr_bit);
static int cpu_get_mem_unum_synd(int synd_code, struct async_flt *, char *buf);
static void cpu_uninit_ecache_scrub_dr(struct cpu *cp);
static void cpu_scrubphys(struct async_flt *aflt);
static void cpu_payload_add_aflt(struct async_flt *, nvlist_t *, nvlist_t *,
int *, int *);
static void cpu_payload_add_ecache(struct async_flt *, nvlist_t *);
static void cpu_ereport_init(struct async_flt *aflt);
static int cpu_check_secondary_errors(ch_async_flt_t *, uint64_t, uint64_t);
static uint8_t cpu_flt_bit_to_plat_error(struct async_flt *aflt);
static void cpu_log_fast_ecc_error(caddr_t tpc, int priv, int tl, uint64_t ceen,
uint64_t nceen, ch_cpu_logout_t *clop);
static int cpu_ce_delayed_ec_logout(uint64_t);
static int cpu_matching_ecache_line(uint64_t, void *, int, int *);
static int cpu_error_is_ecache_data(int, uint64_t);
static void cpu_fmri_cpu_set(nvlist_t *, int);
static int cpu_error_to_resource_type(struct async_flt *aflt);
#ifdef CHEETAHPLUS_ERRATUM_25
static int mondo_recover_proc(uint16_t, int);
static void cheetah_nudge_init(void);
static void cheetah_nudge_onln(void *arg, cpu_t *cpu, cyc_handler_t *hdlr,
cyc_time_t *when);
static void cheetah_nudge_buddy(void);
#endif
#if defined(CPU_IMP_L1_CACHE_PARITY)
static void cpu_dcache_parity_info(ch_async_flt_t *ch_flt);
static void cpu_dcache_parity_check(ch_async_flt_t *ch_flt, int index);
static void cpu_record_dc_data_parity(ch_async_flt_t *ch_flt,
ch_dc_data_t *dest_dcp, ch_dc_data_t *src_dcp, int way, int word);
static void cpu_icache_parity_info(ch_async_flt_t *ch_flt);
static void cpu_icache_parity_check(ch_async_flt_t *ch_flt, int index);
static void cpu_pcache_parity_info(ch_async_flt_t *ch_flt);
static void cpu_pcache_parity_check(ch_async_flt_t *ch_flt, int index);
static void cpu_payload_add_dcache(struct async_flt *, nvlist_t *);
static void cpu_payload_add_icache(struct async_flt *, nvlist_t *);
#endif
int (*p2get_mem_info)(int synd_code, uint64_t paddr,
uint64_t *mem_sizep, uint64_t *seg_sizep, uint64_t *bank_sizep,
int *segsp, int *banksp, int *mcidp);
#define C0 128
#define C1 129
#define C2 130
#define C3 131
#define C4 132
#define C5 133
#define C6 134
#define C7 135
#define C8 136
#define MT0 137
#define MT1 138
#define MT2 139
#define MTC0 140
#define MTC1 141
#define MTC2 142
#define MTC3 143
#define M2 144
#define M3 145
#define M4 146
#define M 147
#define NA 148
#if defined(JALAPENO) || defined(SERRANO)
#define S003 149
#define S003MEM 150
#define SLAST S003MEM
#else
#define S003 149
#define S071 150
#define S11C 151
#define SLAST S11C
#endif
#if defined(JALAPENO) || defined(SERRANO)
#define BPAR0 152
#define BPAR15 167
#endif
static uint8_t ecc_syndrome_tab[] =
{
NA, C0, C1, S003, C2, M2, M3, 47, C3, M2, M2, 53, M2, 41, 29, M,
C4, M, M, 50, M2, 38, 25, M2, M2, 33, 24, M2, 11, M, M2, 16,
C5, M, M, 46, M2, 37, 19, M2, M, 31, 32, M, 7, M2, M2, 10,
M2, 40, 13, M2, 59, M, M2, 66, M, M2, M2, 0, M2, 67, 71, M,
C6, M, M, 43, M, 36, 18, M, M2, 49, 15, M, 63, M2, M2, 6,
M2, 44, 28, M2, M, M2, M2, 52, 68, M2, M2, 62, M2, M3, M3, M4,
M2, 26, 106, M2, 64, M, M2, 2, 120, M, M2, M3, M, M3, M3, M4,
#if defined(JALAPENO) || defined(SERRANO)
116, M2, M2, M3, M2, M3, M, M4, M2, 58, 54, M2, M, M4, M4, M3,
#else
116, S071, M2, M3, M2, M3, M, M4, M2, 58, 54, M2, M, M4, M4, M3,
#endif
C7, M2, M, 42, M, 35, 17, M2, M, 45, 14, M2, 21, M2, M2, 5,
M, 27, M, M, 99, M, M, 3, 114, M2, M2, 20, M2, M3, M3, M,
M2, 23, 113, M2, 112, M2, M, 51, 95, M, M2, M3, M2, M3, M3, M2,
103, M, M2, M3, M2, M3, M3, M4, M2, 48, M, M, 73, M2, M, M3,
M2, 22, 110, M2, 109, M2, M, 9, 108, M2, M, M3, M2, M3, M3, M,
102, M2, M, M, M2, M3, M3, M, M2, M3, M3, M2, M, M4, M, M3,
98, M, M2, M3, M2, M, M3, M4, M2, M3, M3, M4, M3, M, M, M,
M2, M3, M3, M, M3, M, M, M, 56, M4, M, M3, M4, M, M, M,
C8, M, M2, 39, M, 34, 105, M2, M, 30, 104, M, 101, M, M, 4,
#if defined(JALAPENO) || defined(SERRANO)
M, M, 100, M, 83, M, M2, 12, 87, M, M, 57, M2, M, M3, M,
#else
M, M, 100, M, 83, M, M2, 12, 87, M, M, 57, S11C, M, M3, M,
#endif
M2, 97, 82, M2, 78, M2, M2, 1, 96, M, M, M, M, M, M3, M2,
94, M, M2, M3, M2, M, M3, M, M2, M, 79, M, 69, M, M4, M,
M2, 93, 92, M, 91, M, M2, 8, 90, M2, M2, M, M, M, M, M4,
89, M, M, M3, M2, M3, M3, M, M, M, M3, M2, M3, M2, M, M3,
86, M, M2, M3, M2, M, M3, M, M2, M, M3, M, M3, M, M, M3,
M, M, M3, M2, M3, M2, M4, M, 60, M, M2, M3, M4, M, M, M2,
M2, 88, 85, M2, 84, M, M2, 55, 81, M2, M2, M3, M2, M3, M3, M4,
77, M, M, M, M2, M3, M, M, M2, M3, M3, M4, M3, M2, M, M,
74, M, M2, M3, M, M, M3, M, M, M, M3, M, M3, M, M4, M3,
M2, 70, 107, M4, 65, M2, M2, M, 127, M, M, M, M2, M3, M3, M,
80, M2, M2, 72, M, 119, 118, M, M2, 126, 76, M, 125, M, M4, M3,
M2, 115, 124, M, 75, M, M, M3, 61, M, M4, M, M4, M, M, M,
M, 123, 122, M4, 121, M4, M, M3, 117, M2, M2, M3, M4, M3, M, M,
111, M, M, M, M4, M3, M3, M, M, M, M3, M, M3, M2, M, M
};
#define ESYND_TBL_SIZE (sizeof (ecc_syndrome_tab) / sizeof (uint8_t))
#if !(defined(JALAPENO) || defined(SERRANO))
short mtag_syndrome_tab[] =
{
NA, MTC0, MTC1, M2, MTC2, M2, M2, MT0, MTC3, M2, M2, MT1, M2, MT2, M2, M2
};
#define MSYND_TBL_SIZE (sizeof (mtag_syndrome_tab) / sizeof (short))
#else
#define BSYND_TBL_SIZE 16
#endif
#define VA13 INT64_C(0x0000000000000002)
#define ERRTYPE_UNKNOWN 0
#define ERRTYPE_CPU 1
#define ERRTYPE_MEMORY 2
#define ERRTYPE_ECACHE_DATA 3
static ce_dispact_t ce_disp_table[CE_INITDISPTBL_SIZE];
static int ce_disp_inited;
int ce_xdiag_off;
static int ce_xdiag_drops;
static int ce_xdiag_lkydrops;
static int ce_xdiag_ptnrdrops;
static int ce_xdiag_bad;
typedef struct {
struct async_flt *lkycb_aflt;
errorq_t *lkycb_eqp;
errorq_elem_t *lkycb_eqep;
} ce_lkychk_cb_t;
#define ECACHE_FLUSH_LINE 1
#define ECACHE_FLUSH_ALL 2
#define STICK_ITERATION 10
#define MAX_TSKEW 1
#define EV_A_START 0
#define EV_A_END 1
#define EV_B_START 2
#define EV_B_END 3
#define EVENTS 4
static int64_t stick_iter = STICK_ITERATION;
static int64_t stick_tsk = MAX_TSKEW;
typedef enum {
EVENT_NULL = 0,
SLAVE_START,
SLAVE_CONT,
MASTER_START
} event_cmd_t;
static volatile event_cmd_t stick_sync_cmd = EVENT_NULL;
static int64_t timestamp[EVENTS];
static volatile int slave_done;
#ifdef DEBUG
#define DSYNC_ATTEMPTS 64
typedef struct {
int64_t skew_val[DSYNC_ATTEMPTS];
} ss_t;
ss_t stick_sync_stats[NCPU];
#endif
uint_t cpu_impl_dual_pgsz = 0;
#if defined(CPU_IMP_DUAL_PAGESIZE)
uint_t disable_dual_pgsz = 0;
#endif
uint64_t cache_boot_state;
uint_t root_phys_addr_lo_mask = 0x7fffffu;
bus_config_eclk_t bus_config_eclk[] = {
#if defined(JALAPENO) || defined(SERRANO)
{JBUS_CONFIG_ECLK_1_DIV, JBUS_CONFIG_ECLK_1},
{JBUS_CONFIG_ECLK_2_DIV, JBUS_CONFIG_ECLK_2},
{JBUS_CONFIG_ECLK_32_DIV, JBUS_CONFIG_ECLK_32},
#else
{SAFARI_CONFIG_ECLK_1_DIV, SAFARI_CONFIG_ECLK_1},
{SAFARI_CONFIG_ECLK_2_DIV, SAFARI_CONFIG_ECLK_2},
{SAFARI_CONFIG_ECLK_32_DIV, SAFARI_CONFIG_ECLK_32},
#endif
{0, 0}
};
int cpu_ceen_delay_secs = CPU_CEEN_DELAY_SECS;
int cpu_berr_to_verbose = 0;
uint64_t cpu_ce_not_deferred = CPU_CE_NOT_DEFERRED;
uint64_t cpu_ce_not_deferred_ext = CPU_CE_NOT_DEFERRED_EXT;
cpuset_t cpu_offline_set;
static void cpu_delayed_check_ce_errors(void *);
static void cpu_check_ce_errors(void *);
void cpu_error_ecache_flush(ch_async_flt_t *);
static int cpu_error_ecache_flush_required(ch_async_flt_t *);
static void cpu_log_and_clear_ce(ch_async_flt_t *);
void cpu_ce_detected(ch_cpu_errors_t *, int);
clock_t cpu_ce_lkychk_timeout_usec = 128000;
int cpu_ce_ptnr_cachetime_sec = 60;
#define CH_SET_TRAP(ttentry, ttlabel) \
bcopy((const void *)&ttlabel, &ttentry, 32); \
flush_instr_mem((caddr_t)&ttentry, 32);
static int min_ecache_size;
static uint_t priv_hcl_1;
static uint_t priv_hcl_2;
static uint_t priv_hcl_4;
static uint_t priv_hcl_8;
void
cpu_setup(void)
{
extern int at_flags;
extern int cpc_has_overflow_intr;
cpu_init_trap();
cache |= (CACHE_VAC | CACHE_PTAG | CACHE_IOCOHERENT);
at_flags = EF_SPARC_32PLUS | EF_SPARC_SUN_US1 | EF_SPARC_SUN_US3;
cache_boot_state = get_dcu() & DCU_CACHE;
pp_slots = MIN(8, MAXPP_SLOTS);
pp_consistent_coloring = PPAGE_STORE_VCOLORING | PPAGE_LOADS_POLLUTE;
if (use_page_coloring) {
do_pg_coloring = 1;
}
isa_list =
"sparcv9+vis2 sparcv9+vis sparcv9 "
"sparcv8plus+vis2 sparcv8plus+vis sparcv8plus "
"sparcv8 sparcv8-fsmuld sparcv7 sparc";
cpu_hwcap_flags = AV_SPARC_VIS | AV_SPARC_VIS2;
hole_start = hole_end = 0;
kpm_size = (size_t)(8ull * 1024 * 1024 * 1024 * 1024);
kpm_size_shift = 43;
kpm_vbase = (caddr_t)0x8000000000000000ull;
kpm_smallpages = 1;
traptrace_use_stick = 1;
cpc_has_overflow_intr = 1;
#if defined(CPU_IMP_DUAL_PAGESIZE)
if (!disable_dual_pgsz) {
cpu_impl_dual_pgsz = 1;
}
#endif
fpras_implemented = 1;
CE_INITDISPTBL_POPULATE(ce_disp_table);
ce_disp_inited = 1;
}
void
cpu_init_tick_freq(void)
{
if (system_clock_freq == 0)
cmn_err(CE_PANIC, "setcpudelay: invalid system_clock_freq");
sys_tick_freq = system_clock_freq;
}
#ifdef CHEETAHPLUS_ERRATUM_25
int cheetah_bpe_off = 0;
int cheetah_sendmondo_recover = 1;
int cheetah_sendmondo_fullscan = 0;
int cheetah_sendmondo_recover_delay = 5;
#define CHEETAH_LIVELOCK_MIN_DELAY 1
typedef struct cheetah_livelock_entry {
int cpuid;
int buddy;
clock_t lbolt;
hrtime_t recovery_time;
} cheetah_livelock_entry_t;
#define CHEETAH_LIVELOCK_NENTRY 32
cheetah_livelock_entry_t cheetah_livelock_hist[CHEETAH_LIVELOCK_NENTRY];
int cheetah_livelock_entry_nxt;
#define CHEETAH_LIVELOCK_ENTRY_NEXT(statp) { \
statp = cheetah_livelock_hist + cheetah_livelock_entry_nxt; \
if (++cheetah_livelock_entry_nxt >= CHEETAH_LIVELOCK_NENTRY) { \
cheetah_livelock_entry_nxt = 0; \
} \
}
#define CHEETAH_LIVELOCK_ENTRY_SET(statp, item, val) statp->item = val
struct {
hrtime_t hrt;
int recovery;
int full_claimed;
int proc_entry;
int proc_tsb_scan;
int proc_tsb_partscan;
int proc_tsb_fullscan;
int proc_claimed;
int proc_user;
int proc_kernel;
int proc_onflt;
int proc_cpu;
int proc_thread;
int proc_proc;
int proc_as;
int proc_hat;
int proc_hat_inval;
int proc_hat_busy;
int proc_tsb_reloc;
int proc_cnum_bad;
int proc_cnum;
tte_t proc_tte;
} cheetah_livelock_stat;
#define CHEETAH_LIVELOCK_STAT(item) cheetah_livelock_stat.item++
#define CHEETAH_LIVELOCK_STATSET(item, value) \
cheetah_livelock_stat.item = value
#define CHEETAH_LIVELOCK_MAXSTAT(item, value) { \
if (value > cheetah_livelock_stat.item) \
cheetah_livelock_stat.item = value; \
}
int
mondo_recover_proc(uint16_t cpuid, int bn)
{
label_t ljb;
cpu_t *cp;
kthread_t *t;
proc_t *p;
struct as *as;
struct hat *hat;
uint_t cnum;
struct tsb_info *tsbinfop;
struct tsbe *tsbep;
caddr_t tsbp;
caddr_t end_tsbp;
uint64_t paddr;
uint64_t idsr;
u_longlong_t pahi, palo;
int pages_claimed = 0;
tte_t tsbe_tte;
int tried_kernel_tsb = 0;
mmu_ctx_t *mmu_ctxp;
CHEETAH_LIVELOCK_STAT(proc_entry);
if (on_fault(&ljb)) {
CHEETAH_LIVELOCK_STAT(proc_onflt);
goto badstruct;
}
if ((cp = cpu[cpuid]) == NULL) {
CHEETAH_LIVELOCK_STAT(proc_cpu);
goto badstruct;
}
if ((t = cp->cpu_thread) == NULL) {
CHEETAH_LIVELOCK_STAT(proc_thread);
goto badstruct;
}
if ((p = ttoproc(t)) == NULL) {
CHEETAH_LIVELOCK_STAT(proc_proc);
goto badstruct;
}
if ((as = p->p_as) == NULL) {
CHEETAH_LIVELOCK_STAT(proc_as);
goto badstruct;
}
if ((hat = as->a_hat) == NULL) {
CHEETAH_LIVELOCK_STAT(proc_hat);
goto badstruct;
}
if (hat != ksfmmup) {
CHEETAH_LIVELOCK_STAT(proc_user);
if (hat->sfmmu_flags & (HAT_BUSY | HAT_SWAPPED | HAT_SWAPIN)) {
CHEETAH_LIVELOCK_STAT(proc_hat_busy);
goto badstruct;
}
tsbinfop = hat->sfmmu_tsb;
if (tsbinfop == NULL) {
CHEETAH_LIVELOCK_STAT(proc_hat_inval);
goto badstruct;
}
tsbp = tsbinfop->tsb_va;
end_tsbp = tsbp + TSB_BYTES(tsbinfop->tsb_szc);
} else {
CHEETAH_LIVELOCK_STAT(proc_kernel);
tsbinfop = NULL;
tsbp = ktsb_base;
end_tsbp = tsbp + TSB_BYTES(ktsb_sz);
}
if (hat->sfmmu_as != as) {
CHEETAH_LIVELOCK_STAT(proc_hat_inval);
goto badstruct;
}
mmu_ctxp = CPU_MMU_CTXP(cp);
ASSERT(mmu_ctxp);
cnum = hat->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum;
CHEETAH_LIVELOCK_STATSET(proc_cnum, cnum);
if ((cnum < 0) || (cnum == INVALID_CONTEXT) ||
(cnum >= mmu_ctxp->mmu_nctxs)) {
CHEETAH_LIVELOCK_STAT(proc_cnum_bad);
goto badstruct;
}
do {
CHEETAH_LIVELOCK_STAT(proc_tsb_scan);
if (hat != ksfmmup &&
(tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) {
CHEETAH_LIVELOCK_STAT(proc_tsb_reloc);
goto next_tsbinfo;
}
for (tsbep = (struct tsbe *)tsbp;
tsbep < (struct tsbe *)end_tsbp; tsbep++) {
tsbe_tte = tsbep->tte_data;
if (tsbe_tte.tte_val == 0) {
continue;
}
if (tsbe_tte.tte_se) {
continue;
}
if (tsbe_tte.tte_cp == 0) {
continue;
}
if (tsbep->tte_tag.tag_invalid != 0) {
continue;
}
CHEETAH_LIVELOCK_STATSET(proc_tte, tsbe_tte);
idsr = getidsr();
if ((idsr & (IDSR_NACK_BIT(bn) |
IDSR_BUSY_BIT(bn))) == 0) {
CHEETAH_LIVELOCK_STAT(proc_tsb_partscan);
goto done;
}
pahi = tsbe_tte.tte_pahi;
palo = tsbe_tte.tte_palo;
paddr = (uint64_t)((pahi << 32) |
(palo << MMU_PAGESHIFT));
claimlines(paddr, TTEBYTES(TTE_CSZ(&tsbe_tte)),
CH_ECACHE_SUBBLK_SIZE);
if ((idsr & IDSR_BUSY_BIT(bn)) == 0) {
shipit(cpuid, bn);
}
pages_claimed++;
}
next_tsbinfo:
if (tsbinfop != NULL)
tsbinfop = tsbinfop->tsb_next;
if (tsbinfop != NULL) {
tsbp = tsbinfop->tsb_va;
end_tsbp = tsbp + TSB_BYTES(tsbinfop->tsb_szc);
} else if (tsbp == ktsb_base) {
tried_kernel_tsb = 1;
} else if (!tried_kernel_tsb) {
tsbp = ktsb_base;
end_tsbp = tsbp + TSB_BYTES(ktsb_sz);
hat = ksfmmup;
tsbinfop = NULL;
}
} while (tsbinfop != NULL ||
((tsbp == ktsb_base) && !tried_kernel_tsb));
CHEETAH_LIVELOCK_STAT(proc_tsb_fullscan);
CHEETAH_LIVELOCK_MAXSTAT(proc_claimed, pages_claimed);
no_fault();
idsr = getidsr();
if ((idsr & (IDSR_NACK_BIT(bn) |
IDSR_BUSY_BIT(bn))) == 0) {
return (1);
} else {
return (0);
}
done:
no_fault();
CHEETAH_LIVELOCK_MAXSTAT(proc_claimed, pages_claimed);
return (1);
badstruct:
no_fault();
return (0);
}
int
mondo_recover(uint16_t cpuid, int bn)
{
struct memseg *seg;
uint64_t begin_pa, end_pa, cur_pa;
hrtime_t begin_hrt, end_hrt;
int retval = 0;
int pages_claimed = 0;
cheetah_livelock_entry_t *histp;
uint64_t idsr;
if (atomic_cas_32(&sendmondo_in_recover, 0, 1) != 0) {
while (sendmondo_in_recover) {
drv_usecwait(1);
}
return (retval);
}
CHEETAH_LIVELOCK_ENTRY_NEXT(histp);
CHEETAH_LIVELOCK_ENTRY_SET(histp, lbolt, LBOLT_WAITFREE);
CHEETAH_LIVELOCK_ENTRY_SET(histp, cpuid, cpuid);
CHEETAH_LIVELOCK_ENTRY_SET(histp, buddy, CPU->cpu_id);
begin_hrt = gethrtime_waitfree();
if (mondo_recover_proc(cpuid, bn) == 1) {
goto done;
}
if (cheetah_sendmondo_fullscan == 0) {
retval = 1;
goto done;
}
for (seg = memsegs; seg; seg = seg->next) {
begin_pa = (uint64_t)(seg->pages_base) << MMU_PAGESHIFT;
end_pa = (uint64_t)(seg->pages_end) << MMU_PAGESHIFT;
for (cur_pa = begin_pa; cur_pa < end_pa;
cur_pa += MMU_PAGESIZE) {
idsr = getidsr();
if ((idsr & (IDSR_NACK_BIT(bn) |
IDSR_BUSY_BIT(bn))) == 0) {
goto done;
}
claimlines(cur_pa, MMU_PAGESIZE,
CH_ECACHE_SUBBLK_SIZE);
if ((idsr & IDSR_BUSY_BIT(bn)) == 0) {
shipit(cpuid, bn);
}
pages_claimed++;
}
}
retval = 1;
done:
end_hrt = gethrtime_waitfree();
CHEETAH_LIVELOCK_STAT(recovery);
CHEETAH_LIVELOCK_MAXSTAT(hrt, (end_hrt - begin_hrt));
CHEETAH_LIVELOCK_MAXSTAT(full_claimed, pages_claimed);
CHEETAH_LIVELOCK_ENTRY_SET(histp, recovery_time, \
(end_hrt - begin_hrt));
while (atomic_cas_32(&sendmondo_in_recover, 1, 0) != 1)
;
return (retval);
}
static void
cheetah_nudge_onln(void *arg, cpu_t *cpu, cyc_handler_t *hdlr, cyc_time_t *when)
{
hdlr->cyh_func = (cyc_func_t)cheetah_nudge_buddy;
hdlr->cyh_level = CY_LOW_LEVEL;
hdlr->cyh_arg = NULL;
when->cyt_when = cpu->cpu_id * (NANOSEC / NCPU);
if (cheetah_sendmondo_recover_delay < CHEETAH_LIVELOCK_MIN_DELAY) {
cheetah_sendmondo_recover_delay = CHEETAH_LIVELOCK_MIN_DELAY;
}
when->cyt_interval = cheetah_sendmondo_recover_delay * NANOSEC;
}
static void
cheetah_nudge_init(void)
{
cyc_omni_handler_t hdlr;
if (max_ncpus == 1) {
return;
}
hdlr.cyo_online = cheetah_nudge_onln;
hdlr.cyo_offline = NULL;
hdlr.cyo_arg = NULL;
mutex_enter(&cpu_lock);
(void) cyclic_add_omni(&hdlr);
mutex_exit(&cpu_lock);
}
void
cheetah_nudge_buddy(void)
{
kpreempt_disable();
if ((CPU->cpu_next_onln != CPU) && (sendmondo_in_recover == 0)) {
xt_one(CPU->cpu_next_onln->cpu_id, (xcfunc_t *)xt_sync_tl1,
0, 0);
}
kpreempt_enable();
}
#endif
#ifdef SEND_MONDO_STATS
uint32_t x_one_stimes[64];
uint32_t x_one_ltimes[16];
uint32_t x_set_stimes[64];
uint32_t x_set_ltimes[16];
uint32_t x_set_cpus[NCPU];
uint32_t x_nack_stimes[64];
#endif
void
send_one_mondo(int cpuid)
{
int busy, nack;
uint64_t idsr, starttick, endtick, tick, lasttick;
uint64_t busymask;
#ifdef CHEETAHPLUS_ERRATUM_25
int recovered = 0;
#endif
CPU_STATS_ADDQ(CPU, sys, xcalls, 1);
starttick = lasttick = gettick();
shipit(cpuid, 0);
endtick = starttick + xc_tick_limit;
busy = nack = 0;
#if defined(JALAPENO) || defined(SERRANO)
busymask = IDSR_BUSY_BIT(cpuid);
#else
busymask = IDSR_BUSY;
#endif
for (;;) {
idsr = getidsr();
if (idsr == 0)
break;
tick = gettick();
if (tick > (lasttick + xc_tick_jump_limit))
endtick += (tick - lasttick);
lasttick = tick;
if (tick > endtick) {
if (panic_quiesce)
return;
#ifdef CHEETAHPLUS_ERRATUM_25
if (cheetah_sendmondo_recover && recovered == 0) {
if (mondo_recover(cpuid, 0)) {
recovered++;
}
tick = gettick();
endtick = tick + xc_tick_limit;
lasttick = tick;
continue;
} else
#endif
{
cmn_err(CE_PANIC, "send mondo timeout "
"(target 0x%x) [%d NACK %d BUSY]",
cpuid, nack, busy);
}
}
if (idsr & busymask) {
busy++;
continue;
}
drv_usecwait(1);
shipit(cpuid, 0);
nack++;
busy = 0;
}
#ifdef SEND_MONDO_STATS
{
int n = gettick() - starttick;
if (n < 8192)
x_one_stimes[n >> 7]++;
else
x_one_ltimes[(n >> 13) & 0xf]++;
}
#endif
}
void
syncfpu(void)
{
}
int
cpu_aflt_size(void)
{
return (sizeof (ch_async_flt_t));
}
int enable_check_other_cpus_logout = 1;
static void
cpu_check_cpu_logout(int cpuid, caddr_t tpc, int tl, int ecc_type,
ch_cpu_logout_t *clop)
{
struct async_flt *aflt;
ch_async_flt_t ch_flt;
uint64_t t_afar, t_afsr, t_afsr_ext, t_afsr_errs;
if (clop == NULL || clop->clo_data.chd_afar == LOGOUT_INVALID) {
return;
}
bzero(&ch_flt, sizeof (ch_async_flt_t));
t_afar = clop->clo_data.chd_afar;
t_afsr = clop->clo_data.chd_afsr;
t_afsr_ext = clop->clo_data.chd_afsr_ext;
#if defined(SERRANO)
ch_flt.afar2 = clop->clo_data.chd_afar2;
#endif
t_afsr_errs = (t_afsr_ext & C_AFSR_EXT_ALL_ERRS) |
(t_afsr & C_AFSR_ALL_ERRS);
aflt = (struct async_flt *)&ch_flt;
aflt->flt_id = gethrtime_waitfree();
ch_flt.afsr_ext = t_afsr_ext;
ch_flt.afsr_errs = t_afsr_errs;
aflt->flt_stat = t_afsr;
aflt->flt_addr = t_afar;
aflt->flt_bus_id = cpuid;
aflt->flt_inst = cpuid;
aflt->flt_pc = tpc;
aflt->flt_prot = AFLT_PROT_NONE;
aflt->flt_class = CPU_FAULT;
aflt->flt_priv = ((t_afsr & C_AFSR_PRIV) != 0);
aflt->flt_tl = tl;
aflt->flt_status = ecc_type;
aflt->flt_panic = C_AFSR_PANIC(t_afsr_errs);
if (cpu_queue_events(&ch_flt, NULL, t_afsr_errs, clop) == 0) {
ch_flt.flt_type = CPU_INV_AFSR;
cpu_errorq_dispatch(FM_EREPORT_CPU_USIII_INVALID_AFSR,
(void *)&ch_flt, sizeof (ch_async_flt_t), ue_queue,
aflt->flt_panic);
}
bzero(clop, sizeof (ch_cpu_logout_t));
clop->clo_data.chd_afar = LOGOUT_INVALID;
}
static void
cpu_check_other_cpus_logout(void)
{
int i, j;
processorid_t myid;
struct cpu *cp;
ch_err_tl1_data_t *cl1p;
myid = CPU->cpu_id;
for (i = 0; i < NCPU; i++) {
cp = cpu[i];
if ((cp == NULL) || !(cp->cpu_flags & CPU_EXISTS) ||
(cp->cpu_id == myid) || (CPU_PRIVATE(cp) == NULL)) {
continue;
}
cl1p = CPU_PRIVATE_PTR(cp, chpr_tl1_err_data[0]);
for (j = 0; j < CH_ERR_TL1_TLMAX; j++, cl1p++) {
if (cl1p->ch_err_tl1_flags == 0)
continue;
cpu_check_cpu_logout(i, (caddr_t)cl1p->ch_err_tl1_tpc,
1, ECC_F_TRAP, &cl1p->ch_err_tl1_logout);
}
cpu_check_cpu_logout(i, NULL, 0, ECC_F_TRAP,
CPU_PRIVATE_PTR(cp, chpr_fecctl0_logout));
cpu_check_cpu_logout(i, NULL, 0, ECC_C_TRAP,
CPU_PRIVATE_PTR(cp, chpr_cecc_logout));
cpu_check_cpu_logout(i, NULL, 0, ECC_D_TRAP,
CPU_PRIVATE_PTR(cp, chpr_async_logout));
}
}
void
cpu_fast_ecc_error(struct regs *rp, ulong_t p_clo_flags)
{
ch_cpu_logout_t *clop;
uint64_t ceen, nceen;
if (CPU_PRIVATE(CPU) == NULL) {
clop = NULL;
ceen = p_clo_flags & EN_REG_CEEN;
nceen = p_clo_flags & EN_REG_NCEEN;
} else {
clop = CPU_PRIVATE_PTR(CPU, chpr_fecctl0_logout);
ceen = clop->clo_flags & EN_REG_CEEN;
nceen = clop->clo_flags & EN_REG_NCEEN;
}
cpu_log_fast_ecc_error((caddr_t)rp->r_pc,
(rp->r_tstate & TSTATE_PRIV) ? 1 : 0, 0, ceen, nceen, clop);
}
static void
cpu_log_fast_ecc_error(caddr_t tpc, int priv, int tl, uint64_t ceen,
uint64_t nceen, ch_cpu_logout_t *clop)
{
struct async_flt *aflt;
ch_async_flt_t ch_flt;
uint64_t t_afar, t_afsr, t_afsr_ext, t_afsr_errs;
char pr_reason[MAX_REASON_STRING];
ch_cpu_errors_t cpu_error_regs;
bzero(&ch_flt, sizeof (ch_async_flt_t));
if (clop == NULL) {
ch_flt.flt_diag_data.chd_afar = LOGOUT_INVALID;
get_cpu_error_state(&cpu_error_regs);
set_cpu_error_state(&cpu_error_regs);
t_afar = cpu_error_regs.afar;
t_afsr = cpu_error_regs.afsr;
t_afsr_ext = cpu_error_regs.afsr_ext;
#if defined(SERRANO)
ch_flt.afar2 = cpu_error_regs.afar2;
#endif
} else {
t_afar = clop->clo_data.chd_afar;
t_afsr = clop->clo_data.chd_afsr;
t_afsr_ext = clop->clo_data.chd_afsr_ext;
#if defined(SERRANO)
ch_flt.afar2 = clop->clo_data.chd_afar2;
#endif
}
t_afsr_errs = (t_afsr_ext & C_AFSR_EXT_ALL_ERRS) |
(t_afsr & C_AFSR_ALL_ERRS);
pr_reason[0] = '\0';
aflt = (struct async_flt *)&ch_flt;
aflt->flt_id = gethrtime_waitfree();
ch_flt.afsr_ext = t_afsr_ext;
ch_flt.afsr_errs = t_afsr_errs;
aflt->flt_stat = t_afsr;
aflt->flt_addr = t_afar;
aflt->flt_bus_id = getprocessorid();
aflt->flt_inst = CPU->cpu_id;
aflt->flt_pc = tpc;
aflt->flt_prot = AFLT_PROT_NONE;
aflt->flt_class = CPU_FAULT;
aflt->flt_priv = priv;
aflt->flt_tl = tl;
aflt->flt_status = ECC_F_TRAP;
aflt->flt_panic = C_AFSR_PANIC(t_afsr_errs);
if ((t_afsr_errs & (C_AFSR_UCU | C_AFSR_L3_UCU)) &&
aflt->flt_panic == 0 && aflt->flt_priv != 0 &&
curthread->t_ontrap == NULL &&
curthread->t_lofault == (uintptr_t)NULL) {
get_cpu_error_state(&cpu_error_regs);
if (IS_PANTHER(cpunodes[CPU->cpu_id].implementation)) {
aflt->flt_panic |=
((cpu_error_regs.afsr & C_AFSR_WDU) &&
(cpu_error_regs.afsr_ext & C_AFSR_L3_WDU) &&
(cpu_error_regs.afar == t_afar));
aflt->flt_panic |= ((clop == NULL) &&
(t_afsr_errs & C_AFSR_WDU) &&
(t_afsr_errs & C_AFSR_L3_WDU));
} else {
aflt->flt_panic |=
((cpu_error_regs.afsr & C_AFSR_WDU) &&
(cpu_error_regs.afar == t_afar));
aflt->flt_panic |= ((clop == NULL) &&
(t_afsr_errs & C_AFSR_WDU));
}
}
if (cpu_queue_events(&ch_flt, pr_reason, t_afsr_errs, clop) == 0 ||
((t_afsr_errs & (C_AFSR_FECC_ERRS | C_AFSR_EXT_FECC_ERRS)) == 0)) {
ch_flt.flt_type = CPU_INV_AFSR;
cpu_errorq_dispatch(FM_EREPORT_CPU_USIII_INVALID_AFSR,
(void *)&ch_flt, sizeof (ch_async_flt_t), ue_queue,
aflt->flt_panic);
}
if (clop) {
bzero(clop, sizeof (ch_cpu_logout_t));
clop->clo_data.chd_afar = LOGOUT_INVALID;
}
set_error_enable(get_error_enable() | (nceen | ceen));
if (clear_errors(&ch_flt)) {
aflt->flt_panic |= ((ch_flt.afsr_errs &
(C_AFSR_EXT_ASYNC_ERRS | C_AFSR_ASYNC_ERRS)) != 0);
(void) cpu_queue_events(&ch_flt, pr_reason, ch_flt.afsr_errs,
NULL);
}
if (aflt->flt_panic)
fm_panic("%sError(s)", pr_reason);
cpu_flush_ecache();
}
void
cpu_tl1_error(struct regs *rp, int panic)
{
ch_err_tl1_data_t *cl1p, cl1;
int i, ncl1ps;
uint64_t me_flags;
uint64_t ceen, nceen;
if (ch_err_tl1_paddrs[CPU->cpu_id] == 0) {
cl1p = &ch_err_tl1_data;
ncl1ps = 1;
} else if (CPU_PRIVATE(CPU) != NULL) {
cl1p = CPU_PRIVATE_PTR(CPU, chpr_tl1_err_data[0]);
ncl1ps = CH_ERR_TL1_TLMAX;
} else {
ncl1ps = 0;
}
for (i = 0; i < ncl1ps; i++, cl1p++) {
if (cl1p->ch_err_tl1_flags == 0)
continue;
cl1 = *cl1p;
bzero(cl1p, sizeof (ch_err_tl1_data_t));
cl1p->ch_err_tl1_logout.clo_data.chd_afar = LOGOUT_INVALID;
me_flags = CH_ERR_ME_FLAGS(cl1.ch_err_tl1_flags);
if (cl1.ch_err_tl1_flags & CH_ERR_FECC) {
ceen = get_error_enable() & EN_REG_CEEN;
nceen = get_error_enable() & EN_REG_NCEEN;
cpu_log_fast_ecc_error((caddr_t)cl1.ch_err_tl1_tpc, 1,
1, ceen, nceen, &cl1.ch_err_tl1_logout);
}
#if defined(CPU_IMP_L1_CACHE_PARITY)
if (cl1.ch_err_tl1_flags & (CH_ERR_IPE | CH_ERR_DPE)) {
cpu_parity_error(rp, cl1.ch_err_tl1_flags,
(caddr_t)cl1.ch_err_tl1_tpc);
}
#endif
if (me_flags & CH_ERR_FECC) {
ch_cpu_errors_t cpu_error_regs;
uint64_t t_afsr_errs;
get_cpu_error_state(&cpu_error_regs);
t_afsr_errs = (cpu_error_regs.afsr_ext &
C_AFSR_EXT_ALL_ERRS) |
(cpu_error_regs.afsr & C_AFSR_ALL_ERRS);
if (t_afsr_errs != 0) {
ceen = get_error_enable() & EN_REG_CEEN;
nceen = get_error_enable() & EN_REG_NCEEN;
cpu_log_fast_ecc_error((caddr_t)NULL, 1,
1, ceen, nceen, NULL);
}
}
#if defined(CPU_IMP_L1_CACHE_PARITY)
if (me_flags & (CH_ERR_IPE | CH_ERR_DPE)) {
cpu_parity_error(rp, me_flags, (caddr_t)NULL);
}
#endif
}
}
void
cpu_tl1_err_panic(struct regs *rp, ulong_t flags)
{
cpu_tl1_error(rp, 1);
fm_panic("Unsurvivable ECC Error at TL>0");
}
void
cpu_disrupting_error(struct regs *rp, ulong_t p_clo_flags)
{
struct async_flt *aflt;
ch_async_flt_t ch_flt;
char pr_reason[MAX_REASON_STRING];
ch_cpu_logout_t *clop;
uint64_t t_afar, t_afsr, t_afsr_ext, t_afsr_errs;
ch_cpu_errors_t cpu_error_regs;
bzero(&ch_flt, sizeof (ch_async_flt_t));
if (CPU_PRIVATE(CPU) == NULL) {
clop = NULL;
ch_flt.flt_diag_data.chd_afar = LOGOUT_INVALID;
get_cpu_error_state(&cpu_error_regs);
set_cpu_error_state(&cpu_error_regs);
t_afar = cpu_error_regs.afar;
t_afsr = cpu_error_regs.afsr;
t_afsr_ext = cpu_error_regs.afsr_ext;
#if defined(SERRANO)
ch_flt.afar2 = cpu_error_regs.afar2;
#endif
} else {
clop = CPU_PRIVATE_PTR(CPU, chpr_cecc_logout);
t_afar = clop->clo_data.chd_afar;
t_afsr = clop->clo_data.chd_afsr;
t_afsr_ext = clop->clo_data.chd_afsr_ext;
#if defined(SERRANO)
ch_flt.afar2 = clop->clo_data.chd_afar2;
#endif
}
t_afsr_errs = (t_afsr_ext & C_AFSR_EXT_ALL_ERRS) |
(t_afsr & C_AFSR_ALL_ERRS);
pr_reason[0] = '\0';
aflt = (struct async_flt *)&ch_flt;
ch_flt.afsr_ext = t_afsr_ext;
ch_flt.afsr_errs = t_afsr_errs;
aflt->flt_stat = t_afsr;
aflt->flt_addr = t_afar;
aflt->flt_pc = (caddr_t)rp->r_pc;
aflt->flt_priv = (rp->r_tstate & TSTATE_PRIV) ? 1 : 0;
aflt->flt_tl = 0;
aflt->flt_panic = C_AFSR_PANIC(t_afsr_errs);
if (!(t_afsr_errs & (cpu_ce_not_deferred | cpu_ce_not_deferred_ext)) &&
!aflt->flt_panic)
ch_flt.flt_trapped_ce = CE_CEEN_DEFER | CE_CEEN_TRAPPED;
else
ch_flt.flt_trapped_ce = CE_CEEN_NODEFER | CE_CEEN_TRAPPED;
cpu_log_and_clear_ce(&ch_flt);
if (ch_flt.flt_trapped_ce & CE_CEEN_NODEFER)
set_error_enable(get_error_enable() | EN_REG_CEEN);
if (clear_errors(&ch_flt)) {
(void) cpu_queue_events(&ch_flt, pr_reason, ch_flt.afsr_errs,
NULL);
}
if (aflt->flt_panic)
fm_panic("%sError(s)", pr_reason);
}
void
cpu_deferred_error(struct regs *rp, ulong_t p_clo_flags)
{
ushort_t ttype, tl;
ch_async_flt_t ch_flt;
struct async_flt *aflt;
int trampolined = 0;
char pr_reason[MAX_REASON_STRING];
ch_cpu_logout_t *clop;
uint64_t ceen, clo_flags;
uint64_t log_afsr;
uint64_t t_afar, t_afsr, t_afsr_ext, t_afsr_errs;
ch_cpu_errors_t cpu_error_regs;
int expected = DDI_FM_ERR_UNEXPECTED;
ddi_acc_hdl_t *hp;
uint_t pflag = ttoproc(curthread)->p_flag;
bzero(&ch_flt, sizeof (ch_async_flt_t));
if (CPU_PRIVATE(CPU) == NULL) {
clop = NULL;
ch_flt.flt_diag_data.chd_afar = LOGOUT_INVALID;
get_cpu_error_state(&cpu_error_regs);
set_cpu_error_state(&cpu_error_regs);
t_afar = cpu_error_regs.afar;
t_afsr = cpu_error_regs.afsr;
t_afsr_ext = cpu_error_regs.afsr_ext;
#if defined(SERRANO)
ch_flt.afar2 = cpu_error_regs.afar2;
#endif
clo_flags = p_clo_flags;
} else {
clop = CPU_PRIVATE_PTR(CPU, chpr_async_logout);
t_afar = clop->clo_data.chd_afar;
t_afsr = clop->clo_data.chd_afsr;
t_afsr_ext = clop->clo_data.chd_afsr_ext;
#if defined(SERRANO)
ch_flt.afar2 = clop->clo_data.chd_afar2;
#endif
clo_flags = clop->clo_flags;
}
t_afsr_errs = (t_afsr_ext & C_AFSR_EXT_ALL_ERRS) |
(t_afsr & C_AFSR_ALL_ERRS);
pr_reason[0] = '\0';
ceen = clo_flags & EN_REG_CEEN;
tl = (clo_flags & CLO_FLAGS_TL_MASK) >> CLO_FLAGS_TL_SHIFT;
ttype = (clo_flags & CLO_FLAGS_TT_MASK) >> CLO_FLAGS_TT_SHIFT;
aflt = (struct async_flt *)&ch_flt;
aflt->flt_id = gethrtime_waitfree();
aflt->flt_bus_id = getprocessorid();
aflt->flt_inst = CPU->cpu_id;
ch_flt.afsr_ext = t_afsr_ext;
ch_flt.afsr_errs = t_afsr_errs;
aflt->flt_stat = t_afsr;
aflt->flt_addr = t_afar;
aflt->flt_pc = (caddr_t)rp->r_pc;
aflt->flt_prot = AFLT_PROT_NONE;
aflt->flt_class = CPU_FAULT;
aflt->flt_priv = (rp->r_tstate & TSTATE_PRIV) ? 1 : 0;
aflt->flt_tl = (uchar_t)tl;
aflt->flt_panic = ((tl != 0) || (aft_testfatal != 0) ||
C_AFSR_PANIC(t_afsr_errs));
aflt->flt_core = (pflag & SDOCORE) ? 1 : 0;
aflt->flt_status = ((ttype == T_DATA_ERROR) ? ECC_D_TRAP : ECC_I_TRAP);
if (aflt->flt_priv && tl == 0) {
if (curthread->t_ontrap != NULL) {
on_trap_data_t *otp = curthread->t_ontrap;
if (otp->ot_prot & OT_DATA_EC) {
aflt->flt_prot = AFLT_PROT_EC;
otp->ot_trap |= OT_DATA_EC;
rp->r_pc = otp->ot_trampoline;
rp->r_npc = rp->r_pc + 4;
trampolined = 1;
}
if ((t_afsr & (C_AFSR_TO | C_AFSR_BERR)) &&
(otp->ot_prot & OT_DATA_ACCESS)) {
aflt->flt_prot = AFLT_PROT_ACCESS;
otp->ot_trap |= OT_DATA_ACCESS;
rp->r_pc = otp->ot_trampoline;
rp->r_npc = rp->r_pc + 4;
trampolined = 1;
hp = (ddi_acc_hdl_t *)otp->ot_handle;
if (!hp)
expected = DDI_FM_ERR_PEEK;
else if (hp->ah_acc.devacc_attr_access ==
DDI_CAUTIOUS_ACC)
expected = DDI_FM_ERR_EXPECTED;
}
} else if (curthread->t_lofault) {
aflt->flt_prot = AFLT_PROT_COPY;
rp->r_g1 = EFAULT;
rp->r_pc = curthread->t_lofault;
rp->r_npc = rp->r_pc + 4;
trampolined = 1;
}
}
if (!aflt->flt_priv || aflt->flt_prot == AFLT_PROT_COPY) {
if (t_afsr_errs &
((C_AFSR_ASYNC_ERRS | C_AFSR_EXT_ASYNC_ERRS) &
~(C_AFSR_BERR | C_AFSR_TO)))
aflt->flt_panic |= aft_panic;
} else if (!trampolined) {
aflt->flt_panic = 1;
}
log_afsr = t_afsr_errs;
if (trampolined) {
log_afsr &= ~(C_AFSR_TO | C_AFSR_BERR);
} else if (!aflt->flt_priv) {
if (!cpu_berr_to_verbose)
log_afsr &= ~(C_AFSR_TO | C_AFSR_BERR);
}
if (((log_afsr &
((C_AFSR_ALL_ERRS | C_AFSR_EXT_ALL_ERRS) & ~C_AFSR_ME)) &&
cpu_queue_events(&ch_flt, pr_reason, log_afsr, clop) == 0) ||
(t_afsr_errs & (C_AFSR_ASYNC_ERRS | C_AFSR_EXT_ASYNC_ERRS)) == 0) {
ch_flt.flt_type = CPU_INV_AFSR;
cpu_errorq_dispatch(FM_EREPORT_CPU_USIII_INVALID_AFSR,
(void *)&ch_flt, sizeof (ch_async_flt_t), ue_queue,
aflt->flt_panic);
}
if (clop) {
bzero(clop, sizeof (ch_cpu_logout_t));
clop->clo_data.chd_afar = LOGOUT_INVALID;
}
#if defined(JALAPENO) || defined(SERRANO)
if (t_afsr & (C_AFSR_UE|C_AFSR_RUE|C_AFSR_TO|C_AFSR_BERR)) {
cpu_run_bus_error_handlers(aflt, expected);
}
if (t_afsr & (C_AFSR_UE|C_AFSR_RUE)) {
if (bus_func_invoke(BF_TYPE_UE) == BF_FATAL)
aflt->flt_panic = 1;
if (aflt->flt_panic && cpu_flt_in_memory(&ch_flt, C_AFSR_UE)) {
panic_aflt = *aflt;
}
}
if (t_afsr & (C_AFSR_UE | C_AFSR_RUE | C_AFSR_EDU | C_AFSR_BERR))
cpu_error_ecache_flush(&ch_flt);
#else
if (t_afsr & (C_AFSR_UE|C_AFSR_TO|C_AFSR_BERR)) {
cpu_run_bus_error_handlers(aflt, expected);
}
if (t_afsr & C_AFSR_UE) {
if (bus_func_invoke(BF_TYPE_UE) == BF_FATAL)
aflt->flt_panic = 1;
if (aflt->flt_panic && cpu_flt_in_memory(&ch_flt, C_AFSR_UE)) {
panic_aflt = *aflt;
}
}
if (t_afsr_errs &
(C_AFSR_UE | C_AFSR_EDU | C_AFSR_BERR | C_AFSR_L3_EDU))
cpu_error_ecache_flush(&ch_flt);
#endif
set_error_enable(get_error_enable() | (EN_REG_NCEEN | ceen));
if (clear_errors(&ch_flt)) {
if (cpu_check_secondary_errors(&ch_flt, t_afsr_errs,
t_afar) == 0) {
aflt->flt_panic |= ((ch_flt.afsr_errs &
(C_AFSR_ASYNC_ERRS | C_AFSR_EXT_ASYNC_ERRS)) != 0);
}
(void) cpu_queue_events(&ch_flt, pr_reason, ch_flt.afsr_errs,
NULL);
}
if (aflt->flt_panic)
fm_panic("%sError(s)", pr_reason);
if (!aflt->flt_priv || aflt->flt_prot == AFLT_PROT_COPY) {
int pcb_flag = 0;
if (t_afsr_errs &
(C_AFSR_ASYNC_ERRS | C_AFSR_EXT_ASYNC_ERRS &
~(C_AFSR_BERR | C_AFSR_TO)))
pcb_flag |= ASYNC_HWERR;
if (t_afsr & C_AFSR_BERR)
pcb_flag |= ASYNC_BERR;
if (t_afsr & C_AFSR_TO)
pcb_flag |= ASYNC_BTO;
ttolwp(curthread)->lwp_pcb.pcb_flags |= pcb_flag;
aston(curthread);
}
}
#if defined(CPU_IMP_L1_CACHE_PARITY)
void
cpu_parity_error(struct regs *rp, uint_t flags, caddr_t tpc)
{
ch_async_flt_t ch_flt;
struct async_flt *aflt;
uchar_t tl = ((flags & CH_ERR_TL) != 0);
uchar_t iparity = ((flags & CH_ERR_IPE) != 0);
uchar_t panic = ((flags & CH_ERR_PANIC) != 0);
char *error_class;
int index, way, word;
ch_dc_data_t tmp_dcp;
int dc_set_size = dcache_size / CH_DCACHE_NWAY;
uint64_t parity_bits, pbits;
static int parity_bits_popc[] = { 0, 1, 1, 0 };
bzero(&ch_flt, sizeof (ch_async_flt_t));
aflt = (struct async_flt *)&ch_flt;
aflt->flt_id = gethrtime_waitfree();
aflt->flt_bus_id = getprocessorid();
aflt->flt_inst = CPU->cpu_id;
aflt->flt_pc = tpc;
aflt->flt_addr = iparity ? (uint64_t)tpc : AFLT_INV_ADDR;
aflt->flt_prot = AFLT_PROT_NONE;
aflt->flt_class = CPU_FAULT;
aflt->flt_priv = (tl || (rp->r_tstate & TSTATE_PRIV)) ? 1 : 0;
aflt->flt_tl = tl;
aflt->flt_panic = panic;
aflt->flt_status = iparity ? ECC_IP_TRAP : ECC_DP_TRAP;
ch_flt.flt_type = iparity ? CPU_IC_PARITY : CPU_DC_PARITY;
if (iparity) {
cpu_icache_parity_info(&ch_flt);
if (ch_flt.parity_data.ipe.cpl_off != -1)
error_class = FM_EREPORT_CPU_USIII_IDSPE;
else if (ch_flt.parity_data.ipe.cpl_way != -1)
error_class = FM_EREPORT_CPU_USIII_ITSPE;
else
error_class = FM_EREPORT_CPU_USIII_IPE;
aflt->flt_payload = FM_EREPORT_PAYLOAD_ICACHE_PE;
} else {
cpu_dcache_parity_info(&ch_flt);
if (ch_flt.parity_data.dpe.cpl_off != -1) {
way = ch_flt.parity_data.dpe.cpl_way;
if ((tl == 0) && (way != 0) &&
IS_JALAPENO(cpunodes[CPU->cpu_id].implementation)) {
for (index = 0; index < dc_set_size;
index += dcache_linesize) {
get_dcache_dtag(index + way *
dc_set_size,
(uint64_t *)&tmp_dcp);
parity_bits = tmp_dcp.dc_utag >> 8;
for (word = 0; word < 4; word++) {
pbits = (parity_bits >>
(6 - word * 2)) & 3;
if (((popc64(
tmp_dcp.dc_data[word]) +
parity_bits_popc[pbits]) &
1) && (tmp_dcp.dc_tag &
VA13)) {
correct_dcache_parity(
dcache_size,
dcache_linesize);
if (cache_boot_state &
DCU_DC) {
flush_dcache();
}
set_dcu(get_dcu() |
cache_boot_state);
return;
}
}
}
}
error_class = FM_EREPORT_CPU_USIII_DDSPE;
} else if (ch_flt.parity_data.dpe.cpl_way != -1)
error_class = FM_EREPORT_CPU_USIII_DTSPE;
else
error_class = FM_EREPORT_CPU_USIII_DPE;
aflt->flt_payload = FM_EREPORT_PAYLOAD_DCACHE_PE;
if (IS_PANTHER(cpunodes[CPU->cpu_id].implementation)) {
cpu_pcache_parity_info(&ch_flt);
if (ch_flt.parity_data.dpe.cpl_cache == CPU_PC_PARITY) {
error_class = FM_EREPORT_CPU_USIII_PDSPE;
aflt->flt_payload =
FM_EREPORT_PAYLOAD_PCACHE_PE;
}
}
}
cpu_errorq_dispatch(error_class, (void *)&ch_flt,
sizeof (ch_async_flt_t), ue_queue, aflt->flt_panic);
if (iparity) {
if (cache_boot_state & DCU_IC) {
flush_icache();
}
if (IS_PANTHER(cpunodes[CPU->cpu_id].implementation)) {
flush_ipb();
correct_dcache_parity(dcache_size,
dcache_linesize);
flush_pcache();
}
} else {
correct_dcache_parity(dcache_size, dcache_linesize);
if (IS_PANTHER(cpunodes[CPU->cpu_id].implementation)) {
flush_icache();
flush_ipb();
flush_pcache();
}
}
if (cache_boot_state & DCU_DC) {
flush_dcache();
}
set_dcu(get_dcu() | cache_boot_state);
if (aflt->flt_panic)
fm_panic("%sError(s)", iparity ? "IPE " : "DPE ");
if (tl) {
cpu_flush_ecache();
}
}
void
cpu_async_log_ic_parity_err(ch_async_flt_t *ch_flt)
{
int way = ch_flt->parity_data.ipe.cpl_way;
int offset = ch_flt->parity_data.ipe.cpl_off;
int tag_index;
struct async_flt *aflt = (struct async_flt *)ch_flt;
if ((offset != -1) || (way != -1)) {
tag_index = ch_flt->parity_data.ipe.cpl_ic[way].ic_idx;
if (IS_PANTHER(cpunodes[aflt->flt_inst].implementation))
ch_flt->parity_data.ipe.cpl_ic[way].ic_way =
PN_ICIDX_TO_WAY(tag_index);
else
ch_flt->parity_data.ipe.cpl_ic[way].ic_way =
CH_ICIDX_TO_WAY(tag_index);
ch_flt->parity_data.ipe.cpl_ic[way].ic_logflag =
IC_LOGFLAG_MAGIC;
} else {
for (way = 0; way < CH_ICACHE_NWAY; way++) {
tag_index = ch_flt->parity_data.ipe.cpl_ic[way].ic_idx;
if (IS_PANTHER(cpunodes[aflt->flt_inst].implementation))
ch_flt->parity_data.ipe.cpl_ic[way].ic_way =
PN_ICIDX_TO_WAY(tag_index);
else
ch_flt->parity_data.ipe.cpl_ic[way].ic_way =
CH_ICIDX_TO_WAY(tag_index);
ch_flt->parity_data.ipe.cpl_ic[way].ic_logflag =
IC_LOGFLAG_MAGIC;
}
}
}
void
cpu_async_log_dc_parity_err(ch_async_flt_t *ch_flt)
{
int way = ch_flt->parity_data.dpe.cpl_way;
int offset = ch_flt->parity_data.dpe.cpl_off;
int tag_index;
if (offset != -1) {
if (ch_flt->parity_data.dpe.cpl_cache == CPU_PC_PARITY) {
tag_index = ch_flt->parity_data.dpe.cpl_pc[way].pc_idx;
ch_flt->parity_data.dpe.cpl_pc[way].pc_way =
CH_PCIDX_TO_WAY(tag_index);
ch_flt->parity_data.dpe.cpl_pc[way].pc_logflag =
PC_LOGFLAG_MAGIC;
} else {
tag_index = ch_flt->parity_data.dpe.cpl_dc[way].dc_idx;
ch_flt->parity_data.dpe.cpl_dc[way].dc_way =
CH_DCIDX_TO_WAY(tag_index);
ch_flt->parity_data.dpe.cpl_dc[way].dc_logflag =
DC_LOGFLAG_MAGIC;
}
} else if (way != -1) {
tag_index = ch_flt->parity_data.dpe.cpl_dc[way].dc_idx;
ch_flt->parity_data.dpe.cpl_dc[way].dc_way =
CH_DCIDX_TO_WAY(tag_index);
ch_flt->parity_data.dpe.cpl_dc[way].dc_logflag =
DC_LOGFLAG_MAGIC;
}
}
#endif
static int
cpu_async_log_err(void *flt, errorq_elem_t *eqep)
{
ch_async_flt_t *ch_flt = (ch_async_flt_t *)flt;
struct async_flt *aflt = (struct async_flt *)flt;
uint64_t errors;
extern void memscrub_induced_error(void);
switch (ch_flt->flt_type) {
case CPU_INV_AFSR:
if ((aflt->flt_status & ECC_C_TRAP) &&
(!(aflt->flt_stat & C_AFSR_MASK)))
return (0);
else
return (1);
case CPU_TO:
case CPU_BERR:
case CPU_FATAL:
case CPU_FPUERR:
return (1);
case CPU_UE_ECACHE_RETIRE:
cpu_log_err(aflt);
cpu_page_retire(ch_flt);
return (1);
case CPU_CE:
case CPU_EMC:
if ((ch_flt->afsr_errs &
(C_AFSR_ALL_ERRS | C_AFSR_EXT_ALL_ERRS)) == C_AFSR_CE &&
aflt->flt_prot == AFLT_PROT_EC) {
if (page_retire_check(aflt->flt_addr, NULL) == 0) {
if (ch_flt->flt_trapped_ce & CE_CEEN_DEFER) {
(void) timeout(cpu_delayed_check_ce_errors,
(void *)(uintptr_t)aflt->flt_inst,
drv_usectohz((clock_t)cpu_ceen_delay_secs
* MICROSEC));
}
memscrub_induced_error();
return (0);
}
}
if (page_retire_check(aflt->flt_addr, &errors) == EINVAL) {
CE_XDIAG_SETSKIPCODE(aflt->flt_disp,
CE_XDIAG_SKIP_NOPP);
} else {
if (errors != PR_OK) {
CE_XDIAG_SETSKIPCODE(aflt->flt_disp,
CE_XDIAG_SKIP_PAGEDET);
} else if (ce_scrub_xdiag_recirc(aflt, ce_queue, eqep,
offsetof(ch_async_flt_t, cmn_asyncflt))) {
return (0);
}
}
case CPU_CE_ECACHE:
case CPU_UE_ECACHE:
case CPU_IV:
case CPU_ORPH:
cpu_log_err(aflt);
return (1);
case CPU_UE:
if (!panicstr && (ch_flt->afsr_errs &
(C_AFSR_ALL_ERRS | C_AFSR_EXT_ALL_ERRS)) == C_AFSR_UE &&
aflt->flt_prot == AFLT_PROT_EC) {
if (page_retire_check(aflt->flt_addr, NULL) == 0) {
softcall(ecc_page_zero, (void *)aflt->flt_addr);
memscrub_induced_error();
return (0);
}
}
cpu_log_err(aflt);
break;
default:
switch (cpu_impl_async_log_err(flt, eqep)) {
case CH_ASYNC_LOG_DONE:
return (1);
case CH_ASYNC_LOG_RECIRC:
return (0);
case CH_ASYNC_LOG_CONTINUE:
break;
default:
cmn_err(CE_WARN, "discarding error 0x%p with "
"invalid fault type (0x%x)",
(void *)aflt, ch_flt->flt_type);
return (0);
}
}
if (aflt->flt_addr != AFLT_INV_ADDR && aflt->flt_in_memory) {
if (!panicstr) {
cpu_page_retire(ch_flt);
} else {
cpu_clearphys(aflt);
(void) clear_errors(NULL);
}
}
return (1);
}
void
cpu_page_retire(ch_async_flt_t *ch_flt)
{
struct async_flt *aflt = (struct async_flt *)ch_flt;
(void) page_retire(aflt->flt_addr, PR_UE);
}
static int
cpu_error_is_ecache_data(int cpuid, uint64_t t_afsr)
{
#if defined(JALAPENO) || defined(SERRANO)
return (0);
#elif defined(CHEETAH_PLUS)
if (IS_PANTHER(cpunodes[cpuid].implementation))
return ((t_afsr & C_AFSR_EXT_L3_DATA_ERRS) != 0);
return ((t_afsr & C_AFSR_EC_DATA_ERRS) != 0);
#else
return ((t_afsr & C_AFSR_EC_DATA_ERRS) != 0);
#endif
}
void
cpu_log_err(struct async_flt *aflt)
{
char unum[UNUM_NAMLEN];
int synd_status, synd_code, afar_status;
ch_async_flt_t *ch_flt = (ch_async_flt_t *)aflt;
if (cpu_error_is_ecache_data(aflt->flt_inst, ch_flt->flt_bit))
aflt->flt_status |= ECC_ECACHE;
else
aflt->flt_status &= ~ECC_ECACHE;
synd_status = afsr_to_synd_status(aflt->flt_inst,
ch_flt->afsr_errs, ch_flt->flt_bit);
if (pf_is_memory(aflt->flt_addr >> MMU_PAGESHIFT))
afar_status = afsr_to_afar_status(ch_flt->afsr_errs,
ch_flt->flt_bit);
else
afar_status = AFLT_STAT_INVALID;
synd_code = synd_to_synd_code(synd_status,
aflt->flt_synd, ch_flt->flt_bit);
if (afar_status != AFLT_STAT_INVALID) {
(void) cpu_get_mem_unum_synd(synd_code, aflt, unum);
} else {
unum[0] = '\0';
}
if (&plat_ecc_capability_sc_get &&
plat_ecc_capability_sc_get(PLAT_ECC_ERROR_MESSAGE)) {
if (&plat_log_fruid_error)
plat_log_fruid_error(synd_code, aflt, unum,
ch_flt->flt_bit);
}
if (aflt->flt_func != NULL)
aflt->flt_func(aflt, unum);
if (afar_status != AFLT_STAT_INVALID)
cpu_log_diag_info(ch_flt);
if (ch_flt->flt_trapped_ce & CE_CEEN_DEFER) {
(void) timeout(cpu_delayed_check_ce_errors,
(void *)(uintptr_t)aflt->flt_inst,
drv_usectohz((clock_t)cpu_ceen_delay_secs * MICROSEC));
}
}
void
cpu_error_init(int items)
{
ch_check_ce_tq = taskq_create("cheetah_check_ce", 1, minclsyspri,
items, items, TASKQ_PREPOPULATE);
}
void
cpu_ce_log_err(struct async_flt *aflt, errorq_elem_t *eqep)
{
char unum[UNUM_NAMLEN];
int len;
switch (aflt->flt_class) {
case CPU_FAULT:
cpu_ereport_init(aflt);
if (cpu_async_log_err(aflt, eqep))
cpu_ereport_post(aflt);
break;
case BUS_FAULT:
if (aflt->flt_func != NULL) {
(void) cpu_get_mem_unum_aflt(AFLT_STAT_VALID, aflt,
unum, UNUM_NAMLEN, &len);
aflt->flt_func(aflt, unum);
}
break;
case RECIRC_CPU_FAULT:
aflt->flt_class = CPU_FAULT;
cpu_log_err(aflt);
cpu_ereport_post(aflt);
break;
case RECIRC_BUS_FAULT:
ASSERT(aflt->flt_class != RECIRC_BUS_FAULT);
default:
cmn_err(CE_WARN, "discarding CE error 0x%p with invalid "
"fault class (0x%x)", (void *)aflt, aflt->flt_class);
return;
}
}
static uchar_t
cpu_ce_scrub_mem_err_common(struct async_flt *ecc, boolean_t logout_tried)
{
uchar_t disp = CE_XDIAG_EXTALG;
on_trap_data_t otd;
uint64_t orig_err;
ch_cpu_logout_t *clop;
kpreempt_disable();
orig_err = get_error_enable();
if (orig_err & EN_REG_CEEN)
set_error_enable(orig_err & ~EN_REG_CEEN);
if (logout_tried == B_FALSE) {
if (!cpu_ce_delayed_ec_logout(ecc->flt_addr))
disp |= CE_XDIAG_NOLOGOUT;
}
if (!on_trap(&otd, OT_DATA_ACCESS)) {
cpu_scrubphys(ecc);
} else {
no_trap();
if (orig_err & EN_REG_CEEN)
set_error_enable(orig_err);
kpreempt_enable();
return (disp);
}
no_trap();
if (clear_ecc(ecc))
disp |= CE_XDIAG_CE1;
if (!on_trap(&otd, OT_DATA_ACCESS)) {
(void) lddphys(P2ALIGN(ecc->flt_addr, 8));
} else {
no_trap();
if (orig_err & EN_REG_CEEN)
set_error_enable(orig_err);
kpreempt_enable();
return (disp);
}
no_trap();
if (clear_ecc(ecc))
disp |= CE_XDIAG_CE2;
clop = CPU_PRIVATE(CPU) ? CPU_PRIVATE_PTR(CPU, chpr_cecc_logout) : NULL;
if (!(disp & CE_XDIAG_NOLOGOUT) && clop &&
clop->clo_data.chd_afar != LOGOUT_INVALID) {
int hit, level;
int state;
int totalsize;
ch_ec_data_t *ecp;
hit = cpu_matching_ecache_line(ecc->flt_addr, &clop->clo_data,
0, &level);
if (hit) {
--hit;
disp |= CE_XDIAG_AFARMATCH;
if (IS_PANTHER(cpunodes[CPU->cpu_id].implementation)) {
if (level == 2)
ecp = &clop->clo_data.chd_l2_data[hit];
else
ecp = &clop->clo_data.chd_ec_data[hit];
} else {
ASSERT(level == 2);
ecp = &clop->clo_data.chd_ec_data[hit];
}
totalsize = cpunodes[CPU->cpu_id].ecache_size;
state = cpu_ectag_pa_to_subblk_state(totalsize,
ecc->flt_addr, ecp->ec_tag);
switch (state) {
case CH_ECSTATE_MOD:
disp |= EC_STATE_M;
break;
case CH_ECSTATE_OWN:
case CH_ECSTATE_OWS:
disp |= EC_STATE_O;
break;
case CH_ECSTATE_EXL:
disp |= EC_STATE_E;
break;
case CH_ECSTATE_SHR:
disp |= EC_STATE_S;
break;
default:
disp |= EC_STATE_I;
break;
}
}
if (logout_tried == B_FALSE) {
bzero(clop, sizeof (ch_cpu_logout_t));
clop->clo_data.chd_afar = LOGOUT_INVALID;
}
}
if (orig_err & EN_REG_CEEN)
set_error_enable(orig_err);
kpreempt_enable();
return (disp);
}
void
cpu_ce_scrub_mem_err(struct async_flt *ecc, boolean_t logout_tried)
{
ecc->flt_status &= ~(ECC_INTERMITTENT | ECC_PERSISTENT | ECC_STICKY);
if (ecc->flt_status & ECC_IOBUS)
ecc->flt_stat = C_AFSR_MEMORY;
ecc->flt_disp = cpu_ce_scrub_mem_err_common(ecc, logout_tried);
}
#define PTNR_SIBLINGOK 0x1
#define PTNR_SELFOK 0x2
static cpu_t *
ce_ptnr_select(struct async_flt *aflt, int flags, int *typep)
{
cpu_t *sp, *dtcr, *ptnr, *locptnr, *sibptnr;
hrtime_t lasttime, thistime;
ASSERT(curthread->t_preempt > 0 || getpil() >= DISP_LEVEL);
dtcr = cpu[aflt->flt_inst];
if (dtcr == NULL || !cpu_flagged_active(dtcr->cpu_flags)) {
return (NULL);
}
if (ncpus == 1 || dtcr->cpu_part->cp_ncpus == 1) {
if (flags & PTNR_SELFOK) {
*typep = CE_XDIAG_PTNR_SELF;
return (dtcr);
} else {
return (NULL);
}
}
thistime = gethrtime();
lasttime = CPU_PRIVATE_VAL(dtcr, chpr_ceptnr_seltime);
if (!lasttime) {
sp = dtcr->cpu_next_part;
} else if (thistime - lasttime < cpu_ce_ptnr_cachetime_sec * NANOSEC) {
sp = cpu[CPU_PRIVATE_VAL(dtcr, chpr_ceptnr_id)];
if (sp == NULL || sp->cpu_part != dtcr->cpu_part ||
!cpu_flagged_active(sp->cpu_flags) ||
(sp == dtcr && !(flags & PTNR_SELFOK)) ||
(pg_plat_cpus_share(sp, dtcr, PGHW_CHIP) &&
!(flags & PTNR_SIBLINGOK))) {
sp = dtcr->cpu_next_part;
} else {
if (sp->cpu_lpl->lpl_lgrp != dtcr->cpu_lpl->lpl_lgrp) {
*typep = CE_XDIAG_PTNR_REMOTE;
} else if (sp == dtcr) {
*typep = CE_XDIAG_PTNR_SELF;
} else if (pg_plat_cpus_share(sp, dtcr, PGHW_CHIP)) {
*typep = CE_XDIAG_PTNR_SIBLING;
} else {
*typep = CE_XDIAG_PTNR_LOCAL;
}
return (sp);
}
} else {
sp = cpu[CPU_PRIVATE_VAL(dtcr, chpr_ceptnr_id)];
if (sp == NULL) {
sp = dtcr->cpu_next_part;
} else {
sp = sp->cpu_next_part;
if (sp->cpu_part != dtcr->cpu_part)
sp = dtcr;
}
}
if (!cpu_flagged_active(sp->cpu_flags)) {
sp = dtcr;
}
ptnr = sp;
locptnr = NULL;
sibptnr = NULL;
do {
if (ptnr == dtcr || !cpu_flagged_active(ptnr->cpu_flags))
continue;
if (ptnr->cpu_lpl->lpl_lgrp != dtcr->cpu_lpl->lpl_lgrp) {
CPU_PRIVATE_VAL(dtcr, chpr_ceptnr_id) = ptnr->cpu_id;
CPU_PRIVATE_VAL(dtcr, chpr_ceptnr_seltime) = thistime;
*typep = CE_XDIAG_PTNR_REMOTE;
return (ptnr);
}
if (pg_plat_cpus_share(ptnr, dtcr, PGHW_CHIP)) {
if (sibptnr == NULL)
sibptnr = ptnr;
continue;
}
if (locptnr == NULL)
locptnr = ptnr;
} while ((ptnr = ptnr->cpu_next_part) != sp);
if (locptnr) {
CPU_PRIVATE_VAL(dtcr, chpr_ceptnr_id) = locptnr->cpu_id;
CPU_PRIVATE_VAL(dtcr, chpr_ceptnr_seltime) = thistime;
*typep = CE_XDIAG_PTNR_LOCAL;
return (locptnr);
} else if (sibptnr && flags & PTNR_SIBLINGOK) {
CPU_PRIVATE_VAL(dtcr, chpr_ceptnr_id) = sibptnr->cpu_id;
CPU_PRIVATE_VAL(dtcr, chpr_ceptnr_seltime) = thistime;
*typep = CE_XDIAG_PTNR_SIBLING;
return (sibptnr);
} else if (flags & PTNR_SELFOK) {
CPU_PRIVATE_VAL(dtcr, chpr_ceptnr_id) = dtcr->cpu_id;
CPU_PRIVATE_VAL(dtcr, chpr_ceptnr_seltime) = thistime;
*typep = CE_XDIAG_PTNR_SELF;
return (dtcr);
}
return (NULL);
}
static void
ce_ptnrchk_xc(struct async_flt *aflt, uchar_t *dispp)
{
*dispp = cpu_ce_scrub_mem_err_common(aflt, B_FALSE);
}
static void
ce_lkychk_cb(ce_lkychk_cb_t *cbarg)
{
struct async_flt *aflt = cbarg->lkycb_aflt;
uchar_t disp;
cpu_t *cp;
int ptnrtype;
kpreempt_disable();
if (cp = ce_ptnr_select(aflt, PTNR_SIBLINGOK | PTNR_SELFOK,
&ptnrtype)) {
xc_one(cp->cpu_id, (xcfunc_t *)ce_ptnrchk_xc, (uint64_t)aflt,
(uint64_t)&disp);
CE_XDIAG_SETLKYINFO(aflt->flt_disp, disp);
CE_XDIAG_SETPTNRID(aflt->flt_disp, cp->cpu_id);
CE_XDIAG_SETPTNRTYPE(aflt->flt_disp, ptnrtype);
} else {
ce_xdiag_lkydrops++;
if (ncpus > 1)
CE_XDIAG_SETSKIPCODE(aflt->flt_disp,
CE_XDIAG_SKIP_NOPTNR);
}
kpreempt_enable();
errorq_commit(cbarg->lkycb_eqp, cbarg->lkycb_eqep, ERRORQ_ASYNC);
kmem_free(cbarg, sizeof (ce_lkychk_cb_t));
}
#ifdef DEBUG
static int ce_xdiag_forceaction;
#endif
int
ce_scrub_xdiag_recirc(struct async_flt *aflt, errorq_t *eqp,
errorq_elem_t *eqep, size_t afltoffset)
{
ce_dispact_t dispact, action;
cpu_t *cp;
uchar_t dtcrinfo, disp;
int ptnrtype;
if (!ce_disp_inited || panicstr || ce_xdiag_off) {
ce_xdiag_drops++;
return (0);
} else if (!aflt->flt_in_memory) {
ce_xdiag_drops++;
CE_XDIAG_SETSKIPCODE(aflt->flt_disp, CE_XDIAG_SKIP_NOTMEM);
return (0);
}
dtcrinfo = CE_XDIAG_DTCRINFO(aflt->flt_disp);
if (!CE_XDIAG_EXT_ALG_APPLIED(dtcrinfo)) {
ce_xdiag_drops++;
CE_XDIAG_SETSKIPCODE(aflt->flt_disp, CE_XDIAG_SKIP_NOSCRUB);
return (0);
}
dispact = CE_DISPACT(ce_disp_table,
CE_XDIAG_AFARMATCHED(dtcrinfo),
CE_XDIAG_STATE(dtcrinfo),
CE_XDIAG_CE1SEEN(dtcrinfo),
CE_XDIAG_CE2SEEN(dtcrinfo));
action = CE_ACT(dispact);
#ifdef DEBUG
if (ce_xdiag_forceaction != 0)
action = ce_xdiag_forceaction;
#endif
switch (action) {
case CE_ACT_LKYCHK: {
caddr_t ndata;
errorq_elem_t *neqep;
struct async_flt *ecc;
ce_lkychk_cb_t *cbargp;
if ((ndata = errorq_elem_dup(eqp, eqep, &neqep)) == NULL) {
ce_xdiag_lkydrops++;
CE_XDIAG_SETSKIPCODE(aflt->flt_disp,
CE_XDIAG_SKIP_DUPFAIL);
break;
}
ecc = (struct async_flt *)(ndata + afltoffset);
ASSERT(ecc->flt_class == CPU_FAULT ||
ecc->flt_class == BUS_FAULT);
ecc->flt_class = (ecc->flt_class == CPU_FAULT) ?
RECIRC_CPU_FAULT : RECIRC_BUS_FAULT;
cbargp = kmem_alloc(sizeof (ce_lkychk_cb_t), KM_SLEEP);
cbargp->lkycb_aflt = ecc;
cbargp->lkycb_eqp = eqp;
cbargp->lkycb_eqep = neqep;
(void) timeout((void (*)(void *))ce_lkychk_cb,
(void *)cbargp, drv_usectohz(cpu_ce_lkychk_timeout_usec));
return (1);
}
case CE_ACT_PTNRCHK:
kpreempt_disable();
if ((cp = ce_ptnr_select(aflt, 0, &ptnrtype)) != NULL) {
xc_one(cp->cpu_id, (xcfunc_t *)ce_ptnrchk_xc,
(uint64_t)aflt, (uint64_t)&disp);
CE_XDIAG_SETPTNRINFO(aflt->flt_disp, disp);
CE_XDIAG_SETPTNRID(aflt->flt_disp, cp->cpu_id);
CE_XDIAG_SETPTNRTYPE(aflt->flt_disp, ptnrtype);
} else if (ncpus > 1) {
ce_xdiag_ptnrdrops++;
CE_XDIAG_SETSKIPCODE(aflt->flt_disp,
CE_XDIAG_SKIP_NOPTNR);
} else {
ce_xdiag_ptnrdrops++;
CE_XDIAG_SETSKIPCODE(aflt->flt_disp,
CE_XDIAG_SKIP_UNIPROC);
}
kpreempt_enable();
break;
case CE_ACT_DONE:
break;
case CE_DISP_BAD:
default:
#ifdef DEBUG
cmn_err(CE_PANIC, "ce_scrub_post: Bad action '%d'", action);
#endif
ce_xdiag_bad++;
CE_XDIAG_SETSKIPCODE(aflt->flt_disp, CE_XDIAG_SKIP_ACTBAD);
break;
}
return (0);
}
void
cpu_ue_log_err(struct async_flt *aflt)
{
switch (aflt->flt_class) {
case CPU_FAULT:
cpu_ereport_init(aflt);
if (cpu_async_log_err(aflt, NULL))
cpu_ereport_post(aflt);
break;
case BUS_FAULT:
bus_async_log_err(aflt);
break;
default:
cmn_err(CE_WARN, "discarding async error %p with invalid "
"fault class (0x%x)", (void *)aflt, aflt->flt_class);
return;
}
}
void
cpu_async_panic_callb(void)
{
ch_async_flt_t ch_flt;
struct async_flt *aflt;
ch_cpu_errors_t cpu_error_regs;
uint64_t afsr_errs;
get_cpu_error_state(&cpu_error_regs);
afsr_errs = (cpu_error_regs.afsr & C_AFSR_ALL_ERRS) |
(cpu_error_regs.afsr_ext & C_AFSR_EXT_ALL_ERRS);
if (afsr_errs) {
bzero(&ch_flt, sizeof (ch_async_flt_t));
aflt = (struct async_flt *)&ch_flt;
aflt->flt_id = gethrtime_waitfree();
aflt->flt_bus_id = getprocessorid();
aflt->flt_inst = CPU->cpu_id;
aflt->flt_stat = cpu_error_regs.afsr;
aflt->flt_addr = cpu_error_regs.afar;
aflt->flt_prot = AFLT_PROT_NONE;
aflt->flt_class = CPU_FAULT;
aflt->flt_priv = ((cpu_error_regs.afsr & C_AFSR_PRIV) != 0);
aflt->flt_panic = 1;
ch_flt.afsr_ext = cpu_error_regs.afsr_ext;
ch_flt.afsr_errs = afsr_errs;
#if defined(SERRANO)
ch_flt.afar2 = cpu_error_regs.afar2;
#endif
(void) cpu_queue_events(&ch_flt, NULL, afsr_errs, NULL);
}
}
static int
synd_to_synd_code(int synd_status, ushort_t synd, uint64_t afsr_bit)
{
if (synd_status == AFLT_STAT_INVALID)
return (-1);
if (afsr_bit &
(C_AFSR_MSYND_ERRS | C_AFSR_ESYND_ERRS | C_AFSR_EXT_ESYND_ERRS)) {
if (afsr_bit & C_AFSR_MSYND_ERRS) {
#if defined(JALAPENO) || defined(SERRANO)
if ((synd == 0) || (synd >= BSYND_TBL_SIZE))
return (-1);
else
return (BPAR0 + synd);
#else
if ((synd == 0) || (synd >= MSYND_TBL_SIZE))
return (-1);
else
return (mtag_syndrome_tab[synd]);
#endif
} else {
if ((synd == 0) || (synd >= ESYND_TBL_SIZE))
return (-1);
else
return (ecc_syndrome_tab[synd]);
}
} else {
return (-1);
}
}
int
cpu_get_mem_sid(char *unum, char *buf, int buflen, int *lenp)
{
if (&plat_get_mem_sid)
return (plat_get_mem_sid(unum, buf, buflen, lenp));
else
return (ENOTSUP);
}
int
cpu_get_mem_offset(uint64_t flt_addr, uint64_t *offp)
{
if (&plat_get_mem_offset)
return (plat_get_mem_offset(flt_addr, offp));
else
return (ENOTSUP);
}
int
cpu_get_mem_addr(char *unum, char *sid, uint64_t offset, uint64_t *addrp)
{
if (&plat_get_mem_addr)
return (plat_get_mem_addr(unum, sid, offset, addrp));
else
return (ENOTSUP);
}
int
cpu_get_mem_unum(int synd_status, ushort_t flt_synd, uint64_t flt_stat,
uint64_t flt_addr, int flt_bus_id, int flt_in_memory,
ushort_t flt_status, char *buf, int buflen, int *lenp)
{
int synd_code;
int ret;
if (flt_stat == (uint64_t)-1)
flt_stat = C_AFSR_CE;
synd_code = synd_to_synd_code(synd_status, flt_synd, flt_stat);
if (synd_code < 0 || synd_code >= M2)
synd_code = -1;
if (&plat_get_mem_unum) {
if ((ret = plat_get_mem_unum(synd_code, flt_addr, flt_bus_id,
flt_in_memory, flt_status, buf, buflen, lenp)) != 0) {
buf[0] = '\0';
*lenp = 0;
}
return (ret);
}
return (ENOTSUP);
}
int
cpu_get_mem_unum_aflt(int synd_status, struct async_flt *aflt,
char *buf, int buflen, int *lenp)
{
return (cpu_get_mem_unum(synd_status, aflt->flt_synd,
(aflt->flt_class == BUS_FAULT) ?
(uint64_t)-1 : ((ch_async_flt_t *)aflt)->flt_bit,
aflt->flt_addr, aflt->flt_bus_id, aflt->flt_in_memory,
aflt->flt_status, buf, buflen, lenp));
}
static int
cpu_get_mem_unum_synd(int synd_code, struct async_flt *aflt, char *buf)
{
int ret, len;
if (synd_code < 0 || synd_code >= M2)
synd_code = -1;
if (&plat_get_mem_unum) {
if ((ret = plat_get_mem_unum(synd_code, aflt->flt_addr,
aflt->flt_bus_id, aflt->flt_in_memory,
aflt->flt_status, buf, UNUM_NAMLEN, &len)) != 0) {
buf[0] = '\0';
}
return (ret);
}
buf[0] = '\0';
return (ENOTSUP);
}
int
cpu_get_mem_name(uint64_t synd, uint64_t *afsr, uint64_t afar,
char *buf, int buflen, int *lenp)
{
int synd_status, flt_in_memory, ret;
ushort_t flt_status = 0;
char unum[UNUM_NAMLEN];
uint64_t t_afsr_errs;
if (afar == (uint64_t)-1)
return (ENXIO);
if (synd == (uint64_t)-1)
synd_status = AFLT_STAT_INVALID;
else
synd_status = AFLT_STAT_VALID;
flt_in_memory = (*afsr & C_AFSR_MEMORY) &&
pf_is_memory(afar >> MMU_PAGESHIFT);
if (*afsr == (uint64_t)-1)
t_afsr_errs = C_AFSR_CE;
else {
t_afsr_errs = (*afsr & C_AFSR_ALL_ERRS);
#if defined(CHEETAH_PLUS)
if (IS_PANTHER(cpunodes[CPU->cpu_id].implementation))
t_afsr_errs |= (*(afsr + 1) & C_AFSR_EXT_ALL_ERRS);
#endif
}
if (cpu_error_is_ecache_data(CPU->cpu_id, t_afsr_errs))
flt_status |= ECC_ECACHE;
ret = cpu_get_mem_unum(synd_status, (ushort_t)synd, t_afsr_errs, afar,
CPU->cpu_id, flt_in_memory, flt_status, unum, UNUM_NAMLEN, lenp);
if (ret != 0)
return (ret);
if (*lenp >= buflen)
return (ENAMETOOLONG);
(void) strncpy(buf, unum, buflen);
return (0);
}
int
cpu_get_mem_info(uint64_t synd, uint64_t afar,
uint64_t *mem_sizep, uint64_t *seg_sizep, uint64_t *bank_sizep,
int *segsp, int *banksp, int *mcidp)
{
int synd_status, synd_code;
if (afar == (uint64_t)-1)
return (ENXIO);
if (synd == (uint64_t)-1)
synd_status = AFLT_STAT_INVALID;
else
synd_status = AFLT_STAT_VALID;
synd_code = synd_to_synd_code(synd_status, synd, C_AFSR_CE);
if (p2get_mem_info != NULL)
return ((p2get_mem_info)(synd_code, afar,
mem_sizep, seg_sizep, bank_sizep,
segsp, banksp, mcidp));
else
return (ENOTSUP);
}
int
cpu_get_cpu_unum(int cpuid, char *buf, int buflen, int *lenp)
{
int ret;
char unum[UNUM_NAMLEN];
if (&plat_get_cpu_unum) {
if ((ret = plat_get_cpu_unum(cpuid, unum, UNUM_NAMLEN, lenp))
!= 0)
return (ret);
} else {
return (ENOTSUP);
}
if (*lenp >= buflen)
return (ENAMETOOLONG);
(void) strncpy(buf, unum, buflen);
return (0);
}
size_t
cpu_get_name_bufsize()
{
return (UNUM_NAMLEN);
}
void
cpu_read_paddr(struct async_flt *ecc, short verbose, short ce_err)
{}
void
read_ecc_data(struct async_flt *aflt, short verbose, short ce_err)
{}
int
clear_errors(ch_async_flt_t *ch_flt)
{
struct async_flt *aflt = (struct async_flt *)ch_flt;
ch_cpu_errors_t cpu_error_regs;
get_cpu_error_state(&cpu_error_regs);
if (ch_flt != NULL) {
aflt->flt_stat = cpu_error_regs.afsr & C_AFSR_MASK;
aflt->flt_addr = cpu_error_regs.afar;
ch_flt->afsr_ext = cpu_error_regs.afsr_ext;
ch_flt->afsr_errs = (cpu_error_regs.afsr & C_AFSR_ALL_ERRS) |
(cpu_error_regs.afsr_ext & C_AFSR_EXT_ALL_ERRS);
#if defined(SERRANO)
ch_flt->afar2 = cpu_error_regs.afar2;
#endif
}
set_cpu_error_state(&cpu_error_regs);
return (((cpu_error_regs.afsr & C_AFSR_ALL_ERRS) |
(cpu_error_regs.afsr_ext & C_AFSR_EXT_ALL_ERRS)) != 0);
}
static int
clear_ecc(struct async_flt *aflt)
{
ch_cpu_errors_t cpu_error_regs;
get_cpu_error_state(&cpu_error_regs);
set_cpu_error_state(&cpu_error_regs);
return ((cpu_error_regs.afsr & (C_AFSR_MEMORY & aflt->flt_stat)) != 0 &&
cpu_error_regs.afar == aflt->flt_addr);
}
void
cpu_disable_errors(void)
{
xt_all(set_error_enable_tl1, EN_REG_DISABLE, EER_SET_ABSOLUTE);
if (enable_check_other_cpus_logout) {
cpu_check_other_cpus_logout();
cpu_check_other_cpus_logout();
}
}
void
cpu_enable_errors(void)
{
xt_all(set_error_enable_tl1, EN_REG_ENABLE, EER_SET_ABSOLUTE);
}
void
cpu_flush_ecache(void)
{
flush_ecache(ecache_flushaddr, cpunodes[CPU->cpu_id].ecache_size,
cpunodes[CPU->cpu_id].ecache_linesize);
}
int
cpu_ecache_set_size(struct cpu *cp)
{
if (CPU_PRIVATE(cp))
return (CPU_PRIVATE_VAL(cp, chpr_ec_set_size));
return (cpunodes[cp->cpu_id].ecache_size / cpu_ecache_nway());
}
static void
cpu_flush_ecache_line(ch_async_flt_t *ch_flt)
{
struct async_flt *aflt = (struct async_flt *)ch_flt;
int ec_set_size = cpu_ecache_set_size(CPU);
ecache_flush_line(aflt->flt_addr, ec_set_size);
}
static void
cpu_scrubphys(struct async_flt *aflt)
{
int ec_set_size = cpu_ecache_set_size(CPU);
scrubphys(aflt->flt_addr, ec_set_size);
}
void
cpu_clearphys(struct async_flt *aflt)
{
int lsize = cpunodes[CPU->cpu_id].ecache_linesize;
int ec_set_size = cpu_ecache_set_size(CPU);
clearphys(aflt->flt_addr, ec_set_size, lsize);
}
#if defined(CPU_IMP_ECACHE_ASSOC)
static int
cpu_ecache_line_valid(ch_async_flt_t *ch_flt)
{
struct async_flt *aflt = (struct async_flt *)ch_flt;
int totalsize = cpunodes[CPU->cpu_id].ecache_size;
int ec_set_size = cpu_ecache_set_size(CPU);
ch_ec_data_t *ecp = &ch_flt->flt_diag_data.chd_ec_data[0];
int nway = cpu_ecache_nway();
int i;
for (i = 0; i < nway; i++, ecp++) {
if (!cpu_ectag_line_invalid(totalsize, ecp->ec_tag) &&
(aflt->flt_addr & P2ALIGN(C_AFAR_PA, ec_set_size)) ==
cpu_ectag_to_pa(ec_set_size, ecp->ec_tag))
return (i+1);
}
return (0);
}
#endif
static int
cpu_matching_ecache_line(uint64_t faddr, void *data, int reqval, int *level)
{
ch_diag_data_t *cdp = data;
ch_ec_data_t *ecp;
int totalsize, ec_set_size;
int i, ways;
int match = 0;
int tagvalid;
uint64_t addr, tagpa;
int ispanther = IS_PANTHER(cpunodes[CPU->cpu_id].implementation);
if (ispanther) {
ecp = &cdp->chd_l2_data[0];
ec_set_size = PN_L2_SET_SIZE;
ways = PN_L2_NWAYS;
} else {
ecp = &cdp->chd_ec_data[0];
ec_set_size = cpu_ecache_set_size(CPU);
ways = cpu_ecache_nway();
totalsize = cpunodes[CPU->cpu_id].ecache_size;
}
addr = faddr & P2ALIGN(C_AFAR_PA, ec_set_size);
for (i = 0; i < ways; i++, ecp++) {
if (ispanther) {
tagpa = PN_L2TAG_TO_PA(ecp->ec_tag);
tagvalid = !PN_L2_LINE_INVALID(ecp->ec_tag);
} else {
tagpa = cpu_ectag_to_pa(ec_set_size, ecp->ec_tag);
tagvalid = !cpu_ectag_line_invalid(totalsize,
ecp->ec_tag);
}
if (tagpa == addr && (!reqval || tagvalid)) {
match = i + 1;
*level = 2;
break;
}
}
if (match || !ispanther)
return (match);
ecp = &cdp->chd_ec_data[0];
ec_set_size = PN_L3_SET_SIZE;
ways = PN_L3_NWAYS;
addr = faddr & P2ALIGN(C_AFAR_PA, ec_set_size);
for (i = 0; i < ways; i++, ecp++) {
if (PN_L3TAG_TO_PA(ecp->ec_tag) == addr && (!reqval ||
!PN_L3_LINE_INVALID(ecp->ec_tag))) {
match = i + 1;
*level = 3;
break;
}
}
return (match);
}
#if defined(CPU_IMP_L1_CACHE_PARITY)
static void
cpu_dcache_parity_info(ch_async_flt_t *ch_flt)
{
int dc_set_size = dcache_size / CH_DCACHE_NWAY;
int index;
if (ch_flt->parity_data.dpe.cpl_lcnt == 0) {
ch_flt->parity_data.dpe.cpl_way = -1;
ch_flt->parity_data.dpe.cpl_off = -1;
}
for (index = 0; index < dc_set_size; index += dcache_linesize)
cpu_dcache_parity_check(ch_flt, index);
}
static void
cpu_dcache_parity_check(ch_async_flt_t *ch_flt, int index)
{
int dc_set_size = dcache_size / CH_DCACHE_NWAY;
uint64_t parity_bits, pbits, data_word;
static int parity_bits_popc[] = { 0, 1, 1, 0 };
int way, word, data_byte;
ch_dc_data_t *dcp = &ch_flt->parity_data.dpe.cpl_dc[0];
ch_dc_data_t tmp_dcp;
for (way = 0; way < CH_DCACHE_NWAY; way++, dcp++) {
get_dcache_dtag(index + way * dc_set_size,
(uint64_t *)&tmp_dcp);
if (popc64(tmp_dcp.dc_tag & CHP_DCTAG_PARMASK) & 1) {
if (ch_flt->parity_data.dpe.cpl_lcnt == 0) {
ch_flt->parity_data.dpe.cpl_way = way;
ch_flt->parity_data.dpe.cpl_cache =
CPU_DC_PARITY;
ch_flt->parity_data.dpe.cpl_tag |= CHP_DC_TAG;
if (popc64(tmp_dcp.dc_sntag &
CHP_DCSNTAG_PARMASK) & 1) {
ch_flt->parity_data.dpe.cpl_tag |=
CHP_DC_SNTAG;
ch_flt->parity_data.dpe.cpl_lcnt++;
}
bcopy(&tmp_dcp, dcp, sizeof (ch_dc_data_t));
}
ch_flt->parity_data.dpe.cpl_lcnt++;
}
if (IS_PANTHER(cpunodes[CPU->cpu_id].implementation)) {
parity_bits = tmp_dcp.dc_pn_data_parity;
for (word = 0; word < 4; word++) {
data_word = tmp_dcp.dc_data[word];
pbits = parity_bits & PN_DC_DATA_PARITY_MASK;
for (data_byte = 0; data_byte < 8;
data_byte++) {
if (((popc64(data_word &
PN_DC_DATA_PARITY_MASK)) & 1) ^
(pbits & 1)) {
cpu_record_dc_data_parity(
ch_flt, dcp, &tmp_dcp, way,
word);
}
pbits >>= 1;
data_word >>= 8;
}
parity_bits >>= 8;
}
} else {
parity_bits = tmp_dcp.dc_utag >> 8;
for (word = 0; word < 4; word++) {
pbits = (parity_bits >> (6 - word * 2)) & 3;
if ((popc64(tmp_dcp.dc_data[word]) +
parity_bits_popc[pbits]) & 1) {
cpu_record_dc_data_parity(ch_flt, dcp,
&tmp_dcp, way, word);
}
}
}
}
}
static void
cpu_record_dc_data_parity(ch_async_flt_t *ch_flt,
ch_dc_data_t *dest_dcp, ch_dc_data_t *src_dcp, int way, int word)
{
if (ch_flt->parity_data.dpe.cpl_lcnt == 0) {
ch_flt->parity_data.dpe.cpl_way = way;
ch_flt->parity_data.dpe.cpl_cache = CPU_DC_PARITY;
ch_flt->parity_data.dpe.cpl_off = word * 8;
bcopy(src_dcp, dest_dcp, sizeof (ch_dc_data_t));
}
ch_flt->parity_data.dpe.cpl_lcnt++;
}
static void
cpu_icache_parity_info(ch_async_flt_t *ch_flt)
{
int ic_set_size;
int ic_linesize;
int index;
if (CPU_PRIVATE(CPU)) {
ic_set_size = CPU_PRIVATE_VAL(CPU, chpr_icache_size) /
CH_ICACHE_NWAY;
ic_linesize = CPU_PRIVATE_VAL(CPU, chpr_icache_linesize);
} else {
ic_set_size = icache_size / CH_ICACHE_NWAY;
ic_linesize = icache_linesize;
}
ch_flt->parity_data.ipe.cpl_way = -1;
ch_flt->parity_data.ipe.cpl_off = -1;
for (index = 0; index < ic_set_size; index += ic_linesize)
cpu_icache_parity_check(ch_flt, index);
}
static void
cpu_icache_parity_check(ch_async_flt_t *ch_flt, int index)
{
uint64_t parmask, pn_inst_parity;
int ic_set_size;
int ic_linesize;
int flt_index, way, instr, num_instr;
struct async_flt *aflt = (struct async_flt *)ch_flt;
ch_ic_data_t *icp = &ch_flt->parity_data.ipe.cpl_ic[0];
ch_ic_data_t tmp_icp;
if (CPU_PRIVATE(CPU)) {
ic_set_size = CPU_PRIVATE_VAL(CPU, chpr_icache_size) /
CH_ICACHE_NWAY;
ic_linesize = CPU_PRIVATE_VAL(CPU, chpr_icache_linesize);
} else {
ic_set_size = icache_size / CH_ICACHE_NWAY;
ic_linesize = icache_linesize;
}
if (IS_PANTHER(cpunodes[CPU->cpu_id].implementation)) {
num_instr = PN_IC_DATA_REG_SIZE / sizeof (uint64_t);
pn_inst_parity = PN_ICDATA_PARITY_BIT_MASK;
} else {
num_instr = CH_IC_DATA_REG_SIZE / sizeof (uint64_t);
pn_inst_parity = 0;
}
flt_index = P2ALIGN(aflt->flt_addr % ic_set_size, ic_linesize);
for (way = 0; way < CH_ICACHE_NWAY; way++, icp++) {
get_icache_dtag(2 * (index + way * ic_set_size),
(uint64_t *)&tmp_icp);
if (flt_index == index)
bcopy(&tmp_icp, icp, sizeof (ch_ic_data_t));
if (popc64(tmp_icp.ic_patag & CHP_ICPATAG_PARMASK) & 1) {
if (flt_index == index) {
ch_flt->parity_data.ipe.cpl_way = way;
ch_flt->parity_data.ipe.cpl_tag |= CHP_IC_TAG;
if (popc64(tmp_icp.ic_sntag &
CHP_ICSNTAG_PARMASK) & 1) {
ch_flt->parity_data.ipe.cpl_tag |=
CHP_IC_SNTAG;
ch_flt->parity_data.ipe.cpl_lcnt++;
}
}
ch_flt->parity_data.ipe.cpl_lcnt++;
continue;
}
for (instr = 0; instr < num_instr; instr++) {
parmask = (tmp_icp.ic_data[instr] &
CH_ICDATA_PRED_ISPCREL) ?
(CHP_ICDATA_PCREL_PARMASK | pn_inst_parity) :
(CHP_ICDATA_NPCREL_PARMASK | pn_inst_parity);
if (popc64(tmp_icp.ic_data[instr] & parmask) & 1) {
if (flt_index == index) {
ch_flt->parity_data.ipe.cpl_way = way;
ch_flt->parity_data.ipe.cpl_off =
instr * 4;
}
ch_flt->parity_data.ipe.cpl_lcnt++;
continue;
}
}
}
}
static void
cpu_pcache_parity_info(ch_async_flt_t *ch_flt)
{
int pc_set_size = CH_PCACHE_SIZE / CH_PCACHE_NWAY;
int index;
if (ch_flt->parity_data.dpe.cpl_lcnt == 0) {
ch_flt->parity_data.dpe.cpl_way = -1;
ch_flt->parity_data.dpe.cpl_off = -1;
}
for (index = 0; index < pc_set_size; index += CH_PCACHE_LSIZE)
cpu_pcache_parity_check(ch_flt, index);
}
static void
cpu_pcache_parity_check(ch_async_flt_t *ch_flt, int index)
{
int pc_set_size = CH_PCACHE_SIZE / CH_PCACHE_NWAY;
int pc_data_words = CH_PC_DATA_REG_SIZE / sizeof (uint64_t);
int way, word, pbit, parity_bits;
ch_pc_data_t *pcp = &ch_flt->parity_data.dpe.cpl_pc[0];
ch_pc_data_t tmp_pcp;
for (way = 0; way < CH_PCACHE_NWAY; way++, pcp++) {
get_pcache_dtag(index + way * pc_set_size,
(uint64_t *)&tmp_pcp);
parity_bits = PN_PC_PARITY_BITS(tmp_pcp.pc_status);
for (word = 0; word < pc_data_words; word++) {
pbit = (parity_bits >> (pc_data_words - word - 1)) & 1;
if ((popc64(tmp_pcp.pc_data[word]) & 1) ^ pbit) {
if (ch_flt->parity_data.dpe.cpl_lcnt == 0) {
ch_flt->parity_data.dpe.cpl_way = way;
ch_flt->parity_data.dpe.cpl_cache =
CPU_PC_PARITY;
ch_flt->parity_data.dpe.cpl_off =
word * sizeof (uint64_t);
bcopy(&tmp_pcp, pcp,
sizeof (ch_pc_data_t));
}
ch_flt->parity_data.dpe.cpl_lcnt++;
}
}
}
}
static void
cpu_payload_add_dcache(struct async_flt *aflt, nvlist_t *nvl)
{
ch_async_flt_t *ch_flt = (ch_async_flt_t *)aflt;
ch_dc_data_t *dcp;
ch_dc_data_t dcdata[CH_DCACHE_NWAY];
uint_t nelem;
int i, ways_to_check, ways_logged = 0;
if (ch_flt->flt_type == CPU_DC_PARITY)
ways_to_check = CH_DCACHE_NWAY;
else
ways_to_check = 1;
for (i = 0; i < ways_to_check; i++) {
if (ch_flt->flt_type == CPU_DC_PARITY)
dcp = &ch_flt->parity_data.dpe.cpl_dc[i];
else
dcp = &ch_flt->flt_diag_data.chd_dc_data;
if (dcp->dc_logflag == DC_LOGFLAG_MAGIC) {
bcopy(dcp, &dcdata[ways_logged],
sizeof (ch_dc_data_t));
ways_logged++;
}
}
fm_payload_set(nvl, FM_EREPORT_PAYLOAD_NAME_L1D_WAYS,
DATA_TYPE_UINT8, (uint8_t)ways_logged, NULL);
if (ways_logged != 0) {
nelem = sizeof (ch_dc_data_t) / sizeof (uint64_t) * ways_logged;
fm_payload_set(nvl, FM_EREPORT_PAYLOAD_NAME_L1D_DATA,
DATA_TYPE_UINT64_ARRAY, nelem, (uint64_t *)dcdata, NULL);
}
}
static void
cpu_payload_add_icache(struct async_flt *aflt, nvlist_t *nvl)
{
ch_async_flt_t *ch_flt = (ch_async_flt_t *)aflt;
ch_ic_data_t *icp;
ch_ic_data_t icdata[CH_ICACHE_NWAY];
uint_t nelem;
int i, ways_to_check, ways_logged = 0;
if (ch_flt->flt_type == CPU_IC_PARITY)
ways_to_check = CH_ICACHE_NWAY;
else
ways_to_check = 1;
for (i = 0; i < ways_to_check; i++) {
if (ch_flt->flt_type == CPU_IC_PARITY)
icp = &ch_flt->parity_data.ipe.cpl_ic[i];
else
icp = &ch_flt->flt_diag_data.chd_ic_data;
if (icp->ic_logflag == IC_LOGFLAG_MAGIC) {
bcopy(icp, &icdata[ways_logged],
sizeof (ch_ic_data_t));
ways_logged++;
}
}
fm_payload_set(nvl, FM_EREPORT_PAYLOAD_NAME_L1I_WAYS,
DATA_TYPE_UINT8, (uint8_t)ways_logged, NULL);
if (ways_logged != 0) {
nelem = sizeof (ch_ic_data_t) / sizeof (uint64_t) * ways_logged;
fm_payload_set(nvl, FM_EREPORT_PAYLOAD_NAME_L1I_DATA,
DATA_TYPE_UINT64_ARRAY, nelem, (uint64_t *)icdata, NULL);
}
}
#endif
static void
cpu_payload_add_ecache(struct async_flt *aflt, nvlist_t *nvl)
{
ch_async_flt_t *ch_flt = (ch_async_flt_t *)aflt;
ch_ec_data_t *ecp;
ch_ec_data_t ecdata[CHD_EC_DATA_SETS];
uint_t nelem;
int i, ways_logged = 0;
for (i = 0; i < CHD_EC_DATA_SETS; i++) {
ecp = &ch_flt->flt_diag_data.chd_ec_data[i];
if (ecp->ec_logflag == EC_LOGFLAG_MAGIC) {
bcopy(ecp, &ecdata[ways_logged],
sizeof (ch_ec_data_t));
ways_logged++;
}
}
if (IS_PANTHER(cpunodes[aflt->flt_inst].implementation)) {
fm_payload_set(nvl, FM_EREPORT_PAYLOAD_NAME_L3_WAYS,
DATA_TYPE_UINT8, (uint8_t)ways_logged, NULL);
if (ways_logged != 0) {
nelem = sizeof (ch_ec_data_t) /
sizeof (uint64_t) * ways_logged;
fm_payload_set(nvl, FM_EREPORT_PAYLOAD_NAME_L3_DATA,
DATA_TYPE_UINT64_ARRAY, nelem,
(uint64_t *)ecdata, NULL);
}
ways_logged = 0;
for (i = 0; i < PN_L2_NWAYS; i++) {
ecp = &ch_flt->flt_diag_data.chd_l2_data[i];
if (ecp->ec_logflag == EC_LOGFLAG_MAGIC) {
bcopy(ecp, &ecdata[ways_logged],
sizeof (ch_ec_data_t));
ways_logged++;
}
}
}
fm_payload_set(nvl, FM_EREPORT_PAYLOAD_NAME_L2_WAYS,
DATA_TYPE_UINT8, (uint8_t)ways_logged, NULL);
if (ways_logged != 0) {
nelem = sizeof (ch_ec_data_t) /
sizeof (uint64_t) * ways_logged;
fm_payload_set(nvl, FM_EREPORT_PAYLOAD_NAME_L2_DATA,
DATA_TYPE_UINT64_ARRAY, nelem, (uint64_t *)ecdata, NULL);
}
}
static void
cpu_fmri_cpu_set(nvlist_t *cpu_fmri, int cpuid)
{
char sbuf[21];
uint8_t mask;
mask = cpunodes[cpuid].version;
(void) snprintf(sbuf, sizeof (sbuf), "%llX",
(u_longlong_t)cpunodes[cpuid].device_id);
(void) fm_fmri_cpu_set(cpu_fmri, FM_CPU_SCHEME_VERSION, NULL,
cpuid, &mask, (const char *)sbuf);
}
static int
cpu_error_to_resource_type(struct async_flt *aflt)
{
ch_async_flt_t *ch_flt = (ch_async_flt_t *)aflt;
switch (ch_flt->flt_type) {
case CPU_CE_ECACHE:
case CPU_UE_ECACHE:
case CPU_UE_ECACHE_RETIRE:
case CPU_ORPH:
if (cpu_error_is_ecache_data(aflt->flt_inst,
ch_flt->flt_bit))
return (ERRTYPE_ECACHE_DATA);
return (ERRTYPE_CPU);
case CPU_CE:
case CPU_UE:
case CPU_EMC:
case CPU_DUE:
case CPU_RCE:
case CPU_RUE:
case CPU_FRC:
case CPU_FRU:
return (ERRTYPE_MEMORY);
case CPU_IC_PARITY:
case CPU_DC_PARITY:
case CPU_FPUERR:
case CPU_PC_PARITY:
case CPU_ITLB_PARITY:
case CPU_DTLB_PARITY:
return (ERRTYPE_CPU);
}
return (ERRTYPE_UNKNOWN);
}
static void
cpu_payload_add_aflt(struct async_flt *aflt, nvlist_t *payload,
nvlist_t *resource, int *afar_status, int *synd_status)
{
ch_async_flt_t *ch_flt = (ch_async_flt_t *)aflt;
*synd_status = AFLT_STAT_INVALID;
*afar_status = AFLT_STAT_INVALID;
if (aflt->flt_payload & FM_EREPORT_PAYLOAD_FLAG_AFSR) {
fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_AFSR,
DATA_TYPE_UINT64, aflt->flt_stat, NULL);
}
if ((aflt->flt_payload & FM_EREPORT_PAYLOAD_FLAG_AFSR_EXT) &&
IS_PANTHER(cpunodes[aflt->flt_inst].implementation)) {
fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_AFSR_EXT,
DATA_TYPE_UINT64, ch_flt->afsr_ext, NULL);
}
if (aflt->flt_payload & FM_EREPORT_PAYLOAD_FLAG_AFAR_STATUS) {
*afar_status = afsr_to_afar_status(ch_flt->afsr_errs,
ch_flt->flt_bit);
fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_AFAR_STATUS,
DATA_TYPE_UINT8, (uint8_t)*afar_status, NULL);
}
if (aflt->flt_payload & FM_EREPORT_PAYLOAD_FLAG_AFAR) {
fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_AFAR,
DATA_TYPE_UINT64, aflt->flt_addr, NULL);
}
if (aflt->flt_payload & FM_EREPORT_PAYLOAD_FLAG_PC) {
fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PC,
DATA_TYPE_UINT64, (uint64_t)aflt->flt_pc, NULL);
}
if (aflt->flt_payload & FM_EREPORT_PAYLOAD_FLAG_TL) {
fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_TL,
DATA_TYPE_UINT8, (uint8_t)aflt->flt_tl, NULL);
}
if (aflt->flt_payload & FM_EREPORT_PAYLOAD_FLAG_TT) {
fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_TT,
DATA_TYPE_UINT8, flt_to_trap_type(aflt), NULL);
}
if (aflt->flt_payload & FM_EREPORT_PAYLOAD_FLAG_PRIV) {
fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PRIV,
DATA_TYPE_BOOLEAN_VALUE,
(aflt->flt_priv ? B_TRUE : B_FALSE), NULL);
}
if (aflt->flt_payload & FM_EREPORT_PAYLOAD_FLAG_ME) {
fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_ME,
DATA_TYPE_BOOLEAN_VALUE,
(aflt->flt_stat & C_AFSR_ME) ? B_TRUE : B_FALSE, NULL);
}
if (aflt->flt_payload & FM_EREPORT_PAYLOAD_FLAG_SYND_STATUS) {
*synd_status = afsr_to_synd_status(aflt->flt_inst,
ch_flt->afsr_errs, ch_flt->flt_bit);
fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_SYND_STATUS,
DATA_TYPE_UINT8, (uint8_t)*synd_status, NULL);
}
if (aflt->flt_payload & FM_EREPORT_PAYLOAD_FLAG_SYND) {
fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_SYND,
DATA_TYPE_UINT16, (uint16_t)aflt->flt_synd, NULL);
}
if (aflt->flt_payload & FM_EREPORT_PAYLOAD_FLAG_ERR_TYPE) {
fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_ERR_TYPE,
DATA_TYPE_STRING, flt_to_error_type(aflt), NULL);
}
if (aflt->flt_payload & FM_EREPORT_PAYLOAD_FLAG_ERR_DISP) {
fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_ERR_DISP,
DATA_TYPE_UINT64, aflt->flt_disp, NULL);
}
if (aflt->flt_payload & FM_EREPORT_PAYLOAD_FLAGS_L2)
cpu_payload_add_ecache(aflt, payload);
if (aflt->flt_payload & FM_EREPORT_PAYLOAD_FLAG_COPYFUNCTION) {
fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_COPYFUNCTION,
DATA_TYPE_UINT8, (uint8_t)aflt->flt_status & 0xff, NULL);
}
if (aflt->flt_payload & FM_EREPORT_PAYLOAD_FLAG_HOWDETECTED) {
fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_HOWDETECTED,
DATA_TYPE_UINT8, (uint8_t)(aflt->flt_status >> 8), NULL);
}
if (aflt->flt_payload & FM_EREPORT_PAYLOAD_FLAG_INSTRBLOCK) {
fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_INSTRBLOCK,
DATA_TYPE_UINT32_ARRAY, 16,
(uint32_t *)&ch_flt->flt_fpdata, NULL);
}
#if defined(CPU_IMP_L1_CACHE_PARITY)
if (aflt->flt_payload & FM_EREPORT_PAYLOAD_FLAGS_L1D)
cpu_payload_add_dcache(aflt, payload);
if (aflt->flt_payload & FM_EREPORT_PAYLOAD_FLAGS_L1I)
cpu_payload_add_icache(aflt, payload);
#endif
#if defined(CHEETAH_PLUS)
if (aflt->flt_payload & FM_EREPORT_PAYLOAD_FLAGS_L1P)
cpu_payload_add_pcache(aflt, payload);
if (aflt->flt_payload & FM_EREPORT_PAYLOAD_FLAGS_TLB)
cpu_payload_add_tlb(aflt, payload);
#endif
if (aflt->flt_payload & FM_EREPORT_PAYLOAD_FLAG_RESOURCE) {
char unum[UNUM_NAMLEN] = "";
char sid[DIMM_SERIAL_ID_LEN] = "";
int len, ret, rtype, synd_code;
uint64_t offset = (uint64_t)-1;
rtype = cpu_error_to_resource_type(aflt);
switch (rtype) {
case ERRTYPE_MEMORY:
case ERRTYPE_ECACHE_DATA:
if (*afar_status == AFLT_STAT_INVALID)
break;
if (rtype == ERRTYPE_ECACHE_DATA)
aflt->flt_status |= ECC_ECACHE;
else
aflt->flt_status &= ~ECC_ECACHE;
synd_code = synd_to_synd_code(*synd_status,
aflt->flt_synd, ch_flt->flt_bit);
if (cpu_get_mem_unum_synd(synd_code, aflt, unum) != 0)
break;
ret = cpu_get_mem_sid(unum, sid, DIMM_SERIAL_ID_LEN,
&len);
if (ret == 0) {
(void) cpu_get_mem_offset(aflt->flt_addr,
&offset);
}
fm_fmri_mem_set(resource, FM_MEM_SCHEME_VERSION,
NULL, unum, (ret == 0) ? sid : NULL, offset);
fm_payload_set(payload,
FM_EREPORT_PAYLOAD_NAME_RESOURCE,
DATA_TYPE_NVLIST, resource, NULL);
break;
case ERRTYPE_CPU:
cpu_fmri_cpu_set(resource, aflt->flt_inst);
fm_payload_set(payload,
FM_EREPORT_PAYLOAD_NAME_RESOURCE,
DATA_TYPE_NVLIST, resource, NULL);
break;
}
}
}
void
cpu_ereport_init(struct async_flt *aflt)
{
ch_async_flt_t *ch_flt = (ch_async_flt_t *)aflt;
ch_ec_data_t *ecp = &ch_flt->flt_diag_data.chd_ec_data[0];
ch_ec_data_t *l2p = &ch_flt->flt_diag_data.chd_l2_data[0];
int i;
for (i = 0; i < CHD_EC_DATA_SETS; i++)
(ecp + i)->ec_way = i;
for (i = 0; i < PN_L2_NWAYS; i++)
(l2p + i)->ec_way = i;
}
int
cpu_flt_in_memory(ch_async_flt_t *ch_flt, uint64_t t_afsr_bit)
{
struct async_flt *aflt = (struct async_flt *)ch_flt;
return ((t_afsr_bit & C_AFSR_MEMORY) &&
afsr_to_afar_status(ch_flt->afsr_errs, t_afsr_bit) ==
AFLT_STAT_VALID &&
pf_is_memory(aflt->flt_addr >> MMU_PAGESHIFT));
}
static int
cpu_flt_in_memory_one_event(ch_async_flt_t *ch_flt, uint64_t t_afsr_bit)
{
struct async_flt *aflt = (struct async_flt *)ch_flt;
int afar_status;
uint64_t afsr_errs, afsr_ow, *ow_bits;
if (!(t_afsr_bit & C_AFSR_MEMORY) ||
!pf_is_memory(aflt->flt_addr >> MMU_PAGESHIFT))
return (0);
afsr_errs = ch_flt->afsr_errs;
afar_status = afsr_to_afar_status(afsr_errs, t_afsr_bit);
switch (afar_status) {
case AFLT_STAT_VALID:
return (1);
case AFLT_STAT_AMBIGUOUS:
ow_bits = afar_overwrite;
while ((afsr_ow = *ow_bits++) != 0) {
if (afsr_ow & t_afsr_bit) {
if ((afsr_errs & afsr_ow) & ~C_AFSR_MEMORY)
return (0);
else
return (1);
}
}
default:
return (0);
}
}
static void
cpu_log_diag_info(ch_async_flt_t *ch_flt)
{
struct async_flt *aflt = (struct async_flt *)ch_flt;
ch_dc_data_t *dcp = &ch_flt->flt_diag_data.chd_dc_data;
ch_ic_data_t *icp = &ch_flt->flt_diag_data.chd_ic_data;
ch_ec_data_t *ecp = &ch_flt->flt_diag_data.chd_ec_data[0];
#if defined(CPU_IMP_ECACHE_ASSOC)
int i, nway;
#endif
if (ch_flt->flt_diag_data.chd_afar == LOGOUT_INVALID ||
ch_flt->flt_data_incomplete)
return;
#if defined(CPU_IMP_ECACHE_ASSOC)
nway = cpu_ecache_nway();
i = cpu_ecache_line_valid(ch_flt);
if (i == 0 || i > nway) {
for (i = 0; i < nway; i++)
ecp[i].ec_logflag = EC_LOGFLAG_MAGIC;
} else
ecp[i - 1].ec_logflag = EC_LOGFLAG_MAGIC;
#else
ecp->ec_logflag = EC_LOGFLAG_MAGIC;
#endif
#if defined(CHEETAH_PLUS)
pn_cpu_log_diag_l2_info(ch_flt);
#endif
if (CH_DCTAG_MATCH(dcp->dc_tag, aflt->flt_addr)) {
dcp->dc_way = CH_DCIDX_TO_WAY(dcp->dc_idx);
dcp->dc_logflag = DC_LOGFLAG_MAGIC;
}
if (CH_ICTAG_MATCH(icp, aflt->flt_addr)) {
if (IS_PANTHER(cpunodes[aflt->flt_inst].implementation))
icp->ic_way = PN_ICIDX_TO_WAY(icp->ic_idx);
else
icp->ic_way = CH_ICIDX_TO_WAY(icp->ic_idx);
icp->ic_logflag = IC_LOGFLAG_MAGIC;
}
}
static uint64_t ch_ecc_table[9][2] = {
{ 0x46bffffeccd1177f, 0x488800022100014c },
{ 0x42fccc81331ff77f, 0x14424f1010249184 },
{ 0x8898827c222f1ffe, 0x22c1222808184aaf },
{ 0xf7632203e131ccf1, 0xe1241121848292b8 },
{ 0x7f5511421b113809, 0x901c88d84288aafe },
{ 0x1d49412184882487, 0x8f338c87c044c6ef },
{ 0xf552181014448344, 0x7ff8f4443e411911 },
{ 0x2189240808f24228, 0xfeeff8cc81333f42 },
{ 0x3280008440001112, 0xfee88b337ffffd62 },
};
int
popc64(uint64_t val)
{
int cnt;
for (cnt = 0; val != 0; val &= val - 1)
cnt++;
return (cnt);
}
uint32_t
us3_gen_ecc(uint64_t data_low, uint64_t data_high)
{
int bitno, s;
int synd = 0;
for (bitno = 0; bitno < 9; bitno++) {
s = (popc64(data_low & ch_ecc_table[bitno][0]) +
popc64(data_high & ch_ecc_table[bitno][1])) & 1;
synd |= (s << bitno);
}
return (synd);
}
static void
cpu_queue_one_event(ch_async_flt_t *ch_flt, char *reason,
ecc_type_to_info_t *eccp, ch_diag_data_t *cdp)
{
struct async_flt *aflt = (struct async_flt *)ch_flt;
if (reason &&
strlen(reason) + strlen(eccp->ec_reason) < MAX_REASON_STRING) {
(void) strcat(reason, eccp->ec_reason);
}
ch_flt->flt_bit = eccp->ec_afsr_bit;
ch_flt->flt_type = eccp->ec_flt_type;
if (cdp != NULL && cdp->chd_afar != LOGOUT_INVALID)
ch_flt->flt_diag_data = *cdp;
else
ch_flt->flt_diag_data.chd_afar = LOGOUT_INVALID;
aflt->flt_in_memory =
cpu_flt_in_memory_one_event(ch_flt, ch_flt->flt_bit);
if (ch_flt->flt_bit & C_AFSR_MSYND_ERRS)
aflt->flt_synd = GET_M_SYND(aflt->flt_stat);
else if (ch_flt->flt_bit & (C_AFSR_ESYND_ERRS | C_AFSR_EXT_ESYND_ERRS))
aflt->flt_synd = GET_E_SYND(aflt->flt_stat);
else
aflt->flt_synd = 0;
aflt->flt_payload = eccp->ec_err_payload;
if (aflt->flt_panic || (eccp->ec_afsr_bit &
(C_AFSR_LEVEL1 | C_AFSR_EXT_LEVEL1)))
cpu_errorq_dispatch(eccp->ec_err_class,
(void *)ch_flt, sizeof (ch_async_flt_t), ue_queue,
aflt->flt_panic);
else
cpu_errorq_dispatch(eccp->ec_err_class,
(void *)ch_flt, sizeof (ch_async_flt_t), ce_queue,
aflt->flt_panic);
}
int
cpu_queue_events(ch_async_flt_t *ch_flt, char *reason, uint64_t t_afsr_errs,
ch_cpu_logout_t *clop)
{
struct async_flt *aflt = (struct async_flt *)ch_flt;
ecc_type_to_info_t *eccp;
int nevents = 0;
uint64_t primary_afar = aflt->flt_addr, primary_afsr = aflt->flt_stat;
#if defined(CHEETAH_PLUS)
uint64_t orig_t_afsr_errs;
#endif
uint64_t primary_afsr_ext = ch_flt->afsr_ext;
uint64_t primary_afsr_errs = ch_flt->afsr_errs;
ch_diag_data_t *cdp = NULL;
t_afsr_errs &= ((C_AFSR_ALL_ERRS & ~C_AFSR_ME) | C_AFSR_EXT_ALL_ERRS);
#if defined(CHEETAH_PLUS)
orig_t_afsr_errs = t_afsr_errs;
if (clop != NULL) {
cdp = &clop->clo_sdw_data;
aflt->flt_addr = ch_flt->flt_sdw_afar = cdp->chd_afar;
aflt->flt_stat = ch_flt->flt_sdw_afsr = cdp->chd_afsr;
ch_flt->afsr_ext = ch_flt->flt_sdw_afsr_ext = cdp->chd_afsr_ext;
ch_flt->afsr_errs = (cdp->chd_afsr_ext & C_AFSR_EXT_ALL_ERRS) |
(cdp->chd_afsr & C_AFSR_ALL_ERRS);
if ((primary_afar != cdp->chd_afar) ||
(primary_afsr_errs != ch_flt->afsr_errs)) {
aflt->flt_stat |= (1ull << C_AFSR_FIRSTFLT_SHIFT);
}
for (eccp = ecc_type_to_info; eccp->ec_desc != NULL; eccp++) {
if ((eccp->ec_afsr_bit &
(ch_flt->afsr_errs & t_afsr_errs)) &&
((eccp->ec_flags & aflt->flt_status) != 0)) {
cpu_queue_one_event(ch_flt, reason, eccp, cdp);
cdp = NULL;
t_afsr_errs &= ~eccp->ec_afsr_bit;
nevents++;
}
}
if ((primary_afsr & C_AFSR_ME) != 0)
t_afsr_errs = (orig_t_afsr_errs & C_AFSR_ALL_ME_ERRS);
}
#endif
if (clop != NULL)
cdp = &clop->clo_data;
for (eccp = ecc_type_to_info; t_afsr_errs != 0 && eccp->ec_desc != NULL;
eccp++) {
if ((eccp->ec_afsr_bit & t_afsr_errs) != 0 &&
(eccp->ec_flags & aflt->flt_status) != 0) {
#if defined(SERRANO)
if (eccp->ec_afsr_bit & (C_AFSR_FRC | C_AFSR_FRU)) {
if (clop != NULL)
cdp = &clop->clo_sdw_data;
aflt->flt_addr = ch_flt->afar2;
} else {
if (clop != NULL)
cdp = &clop->clo_data;
aflt->flt_addr = primary_afar;
}
#else
aflt->flt_addr = primary_afar;
#endif
aflt->flt_stat = primary_afsr;
ch_flt->afsr_ext = primary_afsr_ext;
ch_flt->afsr_errs = primary_afsr_errs;
cpu_queue_one_event(ch_flt, reason, eccp, cdp);
cdp = NULL;
t_afsr_errs &= ~eccp->ec_afsr_bit;
nevents++;
}
}
for (eccp = ecc_type_to_info; t_afsr_errs != 0 && eccp->ec_desc != NULL;
eccp++) {
if (eccp->ec_afsr_bit & t_afsr_errs) {
#if defined(SERRANO)
if (eccp->ec_afsr_bit & (C_AFSR_FRC | C_AFSR_FRU)) {
if (clop != NULL)
cdp = &clop->clo_sdw_data;
aflt->flt_addr = ch_flt->afar2;
} else {
if (clop != NULL)
cdp = &clop->clo_data;
aflt->flt_addr = primary_afar;
}
#else
aflt->flt_addr = primary_afar;
#endif
aflt->flt_stat = primary_afsr;
ch_flt->afsr_ext = primary_afsr_ext;
ch_flt->afsr_errs = primary_afsr_errs;
cpu_queue_one_event(ch_flt, reason, eccp, cdp);
cdp = NULL;
t_afsr_errs &= ~eccp->ec_afsr_bit;
nevents++;
}
}
return (nevents);
}
uint8_t
flt_to_trap_type(struct async_flt *aflt)
{
if (aflt->flt_status & ECC_I_TRAP)
return (TRAP_TYPE_ECC_I);
if (aflt->flt_status & ECC_D_TRAP)
return (TRAP_TYPE_ECC_D);
if (aflt->flt_status & ECC_F_TRAP)
return (TRAP_TYPE_ECC_F);
if (aflt->flt_status & ECC_C_TRAP)
return (TRAP_TYPE_ECC_C);
if (aflt->flt_status & ECC_DP_TRAP)
return (TRAP_TYPE_ECC_DP);
if (aflt->flt_status & ECC_IP_TRAP)
return (TRAP_TYPE_ECC_IP);
if (aflt->flt_status & ECC_ITLB_TRAP)
return (TRAP_TYPE_ECC_ITLB);
if (aflt->flt_status & ECC_DTLB_TRAP)
return (TRAP_TYPE_ECC_DTLB);
return (TRAP_TYPE_UNKNOWN);
}
static char *cetypes[] = {
CE_DISP_DESC_U,
CE_DISP_DESC_I,
CE_DISP_DESC_PP,
CE_DISP_DESC_P,
CE_DISP_DESC_L,
CE_DISP_DESC_PS,
CE_DISP_DESC_S
};
char *
flt_to_error_type(struct async_flt *aflt)
{
ce_dispact_t dispact, disp;
uchar_t dtcrinfo, ptnrinfo, lkyinfo;
if (!ce_disp_inited || !aflt->flt_in_memory || aflt->flt_disp == 0)
return (cetypes[CE_DISP_UNKNOWN]);
dtcrinfo = CE_XDIAG_DTCRINFO(aflt->flt_disp);
if (!CE_XDIAG_EXT_ALG_APPLIED(dtcrinfo))
return (cetypes[CE_DISP_UNKNOWN]);
dispact = CE_DISPACT(ce_disp_table,
CE_XDIAG_AFARMATCHED(dtcrinfo),
CE_XDIAG_STATE(dtcrinfo),
CE_XDIAG_CE1SEEN(dtcrinfo),
CE_XDIAG_CE2SEEN(dtcrinfo));
ASSERT(dispact != CE_DISP_BAD);
if (dispact == CE_DISP_BAD)
return (cetypes[CE_DISP_UNKNOWN]);
disp = CE_DISP(dispact);
switch (disp) {
case CE_DISP_UNKNOWN:
case CE_DISP_INTERMITTENT:
break;
case CE_DISP_POSS_PERS:
lkyinfo = CE_XDIAG_LKYINFO(aflt->flt_disp);
if (CE_XDIAG_TESTVALID(lkyinfo)) {
if (CE_XDIAG_CE1SEEN(lkyinfo) ||
CE_XDIAG_CE2SEEN(lkyinfo))
disp = CE_DISP_LEAKY;
else
disp = CE_DISP_PERS;
}
break;
case CE_DISP_POSS_STICKY:
ptnrinfo = CE_XDIAG_PTNRINFO(aflt->flt_disp);
if (CE_XDIAG_TESTVALID(ptnrinfo) &&
CE_XDIAG_CE1SEEN(ptnrinfo) && CE_XDIAG_CE2SEEN(ptnrinfo))
disp = CE_DISP_STICKY;
if (disp == CE_DISP_POSS_STICKY &&
CE_XDIAG_SKIPCODE(disp) == CE_XDIAG_SKIP_UNIPROC)
disp = CE_DISP_STICKY;
break;
default:
disp = CE_DISP_UNKNOWN;
break;
}
return (cetypes[disp]);
}
int
afsr_to_overw_status(uint64_t afsr, uint64_t afsr_bit, uint64_t *ow_bits)
{
uint64_t afsr_ow;
while ((afsr_ow = *ow_bits++) != 0) {
if (afsr_ow & afsr_bit) {
if (afsr & (afsr_ow & ~afsr_bit))
return (AFLT_STAT_AMBIGUOUS);
return (AFLT_STAT_VALID);
} else if (afsr & afsr_ow)
break;
}
return (AFLT_STAT_INVALID);
}
static int
afsr_to_afar_status(uint64_t afsr, uint64_t afsr_bit)
{
#if defined(SERRANO)
if (afsr_bit & (C_AFSR_FRC | C_AFSR_FRU))
return (afsr_to_overw_status(afsr, afsr_bit, afar2_overwrite));
else
#endif
return (afsr_to_overw_status(afsr, afsr_bit, afar_overwrite));
}
static int
afsr_to_esynd_status(uint64_t afsr, uint64_t afsr_bit)
{
return (afsr_to_overw_status(afsr, afsr_bit, esynd_overwrite));
}
static int
afsr_to_msynd_status(uint64_t afsr, uint64_t afsr_bit)
{
return (afsr_to_overw_status(afsr, afsr_bit, msynd_overwrite));
}
static int
afsr_to_synd_status(uint_t cpuid, uint64_t afsr, uint64_t afsr_bit)
{
#ifdef lint
cpuid = cpuid;
#endif
#if defined(CHEETAH_PLUS)
if (afsr_bit & C_AFSR_MSYND_ERRS) {
if (IS_PANTHER(cpunodes[cpuid].implementation))
return (afsr_to_msynd_status(afsr, afsr_bit));
else
return (afsr_to_esynd_status(afsr, afsr_bit));
} else if (afsr_bit & (C_AFSR_ESYND_ERRS | C_AFSR_EXT_ESYND_ERRS)) {
if (IS_PANTHER(cpunodes[cpuid].implementation))
return (afsr_to_pn_esynd_status(afsr, afsr_bit));
else
return (afsr_to_esynd_status(afsr, afsr_bit));
#else
if (afsr_bit & C_AFSR_MSYND_ERRS) {
return (afsr_to_msynd_status(afsr, afsr_bit));
} else if (afsr_bit & (C_AFSR_ESYND_ERRS | C_AFSR_EXT_ESYND_ERRS)) {
return (afsr_to_esynd_status(afsr, afsr_bit));
#endif
} else {
return (AFLT_STAT_INVALID);
}
}
void
sticksync_slave(void)
{
int i;
int tries = 0;
int64_t tskew;
int64_t av_tskew;
kpreempt_disable();
while (stick_sync_cmd != SLAVE_START)
;
while (slave_done == 0) {
av_tskew = tskew = 0;
for (i = 0; i < stick_iter; i++) {
timestamp[EV_A_START] = 0;
stick_timestamp(×tamp[EV_A_START]);
stick_sync_cmd = MASTER_START;
while (stick_sync_cmd != SLAVE_CONT)
;
stick_timestamp(×tamp[EV_B_END]);
tskew = ((timestamp[EV_B_END] - timestamp[EV_B_START])
- (timestamp[EV_A_END] - timestamp[EV_A_START]))
/ 2;
av_tskew += tskew;
}
if (stick_iter != 0)
av_tskew = av_tskew/stick_iter;
if (ABS(av_tskew) > stick_tsk) {
if (av_tskew == 1)
av_tskew++;
stick_adj(-av_tskew);
} else
slave_done = 1;
#ifdef DEBUG
if (tries < DSYNC_ATTEMPTS)
stick_sync_stats[CPU->cpu_id].skew_val[tries] =
av_tskew;
++tries;
#endif
#ifdef lint
tries = tries;
#endif
}
stick_sync_cmd = EVENT_NULL;
kpreempt_enable();
}
void
sticksync_master(void)
{
int i;
kpreempt_disable();
slave_done = 0;
stick_sync_cmd = SLAVE_START;
while (slave_done == 0) {
for (i = 0; i < stick_iter; i++) {
while (stick_sync_cmd != MASTER_START)
;
stick_timestamp(×tamp[EV_A_END]);
timestamp[EV_B_START] = 0;
stick_timestamp(×tamp[EV_B_START]);
stick_sync_cmd = SLAVE_CONT;
}
while (stick_sync_cmd == SLAVE_CONT)
;
}
kpreempt_enable();
}
void
cpu_check_allcpus(struct async_flt *aflt)
{}
struct kmem_cache *ch_private_cache;
void
cpu_uninit_private(struct cpu *cp)
{
cheetah_private_t *chprp = CPU_PRIVATE(cp);
ASSERT(chprp);
cpu_uninit_ecache_scrub_dr(cp);
CPU_PRIVATE(cp) = NULL;
ch_err_tl1_paddrs[cp->cpu_id] = 0;
kmem_cache_free(ch_private_cache, chprp);
cmp_delete_cpu(cp->cpu_id);
}
int dcache_scrub_enable = 1;
uint_t ecache_scrub_pil = PIL_9;
uint_t dcache_scrub_pil = PIL_9;
uint_t icache_scrub_pil = PIL_9;
#if defined(JALAPENO)
int ecache_scrub_enable = 0;
#else
int ecache_scrub_enable = 1;
#endif
#if defined(CHEETAH_PLUS) || defined(JALAPENO) || defined(SERRANO)
int icache_scrub_enable = 0;
int ecache_calls_a_sec = 1;
int dcache_calls_a_sec = 2;
int icache_calls_a_sec = 2;
int ecache_scan_rate_idle = 1;
int ecache_scan_rate_busy = 1;
int dcache_scan_rate_idle = 1;
int dcache_scan_rate_busy = 1;
int icache_scan_rate_idle = 1;
int icache_scan_rate_busy = 1;
#else
int icache_scrub_enable = 1;
int ecache_calls_a_sec = 100;
int dcache_calls_a_sec = 100;
int icache_calls_a_sec = 100;
int ecache_scan_rate_idle = 100;
int ecache_scan_rate_busy = 100;
int dcache_scan_rate_idle = 100;
int dcache_scan_rate_busy = 100;
int icache_scan_rate_idle = 100;
int icache_scan_rate_busy = 100;
#endif
extern xcfunc_t cache_scrubreq_tl1;
static uint_t scrub_ecache_line_intr(caddr_t arg1, caddr_t arg2);
static uint_t scrub_dcache_line_intr(caddr_t arg1, caddr_t arg2);
static uint_t scrub_icache_line_intr(caddr_t arg1, caddr_t arg2);
struct scrub_info {
int *csi_enable;
int csi_freq;
int csi_index;
uint64_t csi_inum;
cyclic_id_t csi_omni_cyc_id;
cyclic_id_t csi_offline_cyc_id;
char csi_name[3];
} cache_scrub_info[] = {
{ &ecache_scrub_enable, 0, CACHE_SCRUBBER_INFO_E, 0, 0, 0, "E$"},
{ &dcache_scrub_enable, 0, CACHE_SCRUBBER_INFO_D, 0, 0, 0, "D$"},
{ &icache_scrub_enable, 0, CACHE_SCRUBBER_INFO_I, 0, 0, 0, "I$"}
};
static void
do_scrub(struct scrub_info *csi)
{
ch_scrub_misc_t *csmp = CPU_PRIVATE_PTR(CPU, chpr_scrub_misc);
int index = csi->csi_index;
uint32_t *outstanding = &csmp->chsm_outstanding[index];
if (*(csi->csi_enable) && (csmp->chsm_enable[index])) {
if (atomic_inc_32_nv(outstanding) == 1) {
xt_one_unchecked(CPU->cpu_id, setsoftint_tl1,
csi->csi_inum, 0);
}
}
}
static void
do_scrub_offline(struct scrub_info *csi)
{
ch_scrub_misc_t *csmp = CPU_PRIVATE_PTR(CPU, chpr_scrub_misc);
if (CPUSET_ISNULL(cpu_offline_set)) {
return;
}
if (*(csi->csi_enable) && (csmp->chsm_enable[csi->csi_index])) {
xt_some(cpu_offline_set, cache_scrubreq_tl1, csi->csi_inum,
csi->csi_index);
}
}
static void
cpu_scrub_cyclic_setup(void *arg, cpu_t *cpu, cyc_handler_t *hdlr,
cyc_time_t *when)
{
struct scrub_info *csi = (struct scrub_info *)arg;
ASSERT(csi != NULL);
hdlr->cyh_func = (cyc_func_t)do_scrub;
hdlr->cyh_level = CY_LOW_LEVEL;
hdlr->cyh_arg = arg;
when->cyt_when = 0;
when->cyt_interval = NANOSEC / csi->csi_freq;
}
void
cpu_init_cache_scrub(void)
{
int i;
struct scrub_info *csi;
cyc_omni_handler_t omni_hdlr;
cyc_handler_t offline_hdlr;
cyc_time_t when;
dcache_nlines = dcache_size / dcache_linesize;
cache_scrub_info[CACHE_SCRUBBER_INFO_E].csi_inum =
add_softintr(ecache_scrub_pil, scrub_ecache_line_intr,
(caddr_t)&cache_scrub_info[CACHE_SCRUBBER_INFO_E], SOFTINT_MT);
cache_scrub_info[CACHE_SCRUBBER_INFO_E].csi_freq = ecache_calls_a_sec;
cache_scrub_info[CACHE_SCRUBBER_INFO_D].csi_inum =
add_softintr(dcache_scrub_pil, scrub_dcache_line_intr,
(caddr_t)&cache_scrub_info[CACHE_SCRUBBER_INFO_D], SOFTINT_MT);
cache_scrub_info[CACHE_SCRUBBER_INFO_D].csi_freq = dcache_calls_a_sec;
cache_scrub_info[CACHE_SCRUBBER_INFO_I].csi_inum =
add_softintr(icache_scrub_pil, scrub_icache_line_intr,
(caddr_t)&cache_scrub_info[CACHE_SCRUBBER_INFO_I], SOFTINT_MT);
cache_scrub_info[CACHE_SCRUBBER_INFO_I].csi_freq = icache_calls_a_sec;
mutex_enter(&cpu_lock);
for (i = 0; i < CACHE_SCRUBBER_COUNT; i++) {
csi = &cache_scrub_info[i];
if (!(*csi->csi_enable))
continue;
if (csi->csi_freq > hz) {
cmn_err(CE_NOTE, "%s scrub calls_a_sec set too high "
"(%d); resetting to hz (%d)", csi->csi_name,
csi->csi_freq, hz);
csi->csi_freq = hz;
} else if (csi->csi_freq < 1) {
cmn_err(CE_NOTE, "%s scrub calls_a_sec set too low "
"(%d); resetting to 1", csi->csi_name,
csi->csi_freq);
csi->csi_freq = 1;
}
omni_hdlr.cyo_online = cpu_scrub_cyclic_setup;
omni_hdlr.cyo_offline = NULL;
omni_hdlr.cyo_arg = (void *)csi;
offline_hdlr.cyh_func = (cyc_func_t)do_scrub_offline;
offline_hdlr.cyh_arg = (void *)csi;
offline_hdlr.cyh_level = CY_LOW_LEVEL;
when.cyt_when = 0;
when.cyt_interval = NANOSEC / csi->csi_freq;
csi->csi_omni_cyc_id = cyclic_add_omni(&omni_hdlr);
csi->csi_offline_cyc_id = cyclic_add(&offline_hdlr, &when);
}
register_cpu_setup_func(cpu_scrub_cpu_setup, NULL);
mutex_exit(&cpu_lock);
}
void
cpu_idle_ecache_scrub(struct cpu *cp)
{
if (CPU_PRIVATE(cp) != NULL) {
ch_scrub_misc_t *csmp = CPU_PRIVATE_PTR(cp, chpr_scrub_misc);
csmp->chsm_ecache_busy = ECACHE_CPU_IDLE;
}
}
void
cpu_busy_ecache_scrub(struct cpu *cp)
{
if (CPU_PRIVATE(cp) != NULL) {
ch_scrub_misc_t *csmp = CPU_PRIVATE_PTR(cp, chpr_scrub_misc);
csmp->chsm_ecache_busy = ECACHE_CPU_BUSY;
}
}
void
cpu_init_ecache_scrub_dr(struct cpu *cp)
{
ch_scrub_misc_t *csmp = CPU_PRIVATE_PTR(cp, chpr_scrub_misc);
int cpuid = cp->cpu_id;
csmp->chsm_ecache_nlines = cpunodes[cpuid].ecache_size /
cpunodes[cpuid].ecache_linesize;
csmp->chsm_icache_nlines = CPU_PRIVATE_VAL(cp, chpr_icache_size) /
CPU_PRIVATE_VAL(cp, chpr_icache_linesize);
csmp->chsm_enable[CACHE_SCRUBBER_INFO_E] = 1;
csmp->chsm_enable[CACHE_SCRUBBER_INFO_D] = 1;
csmp->chsm_enable[CACHE_SCRUBBER_INFO_I] = 1;
cpu_busy_ecache_scrub(cp);
}
static void
cpu_uninit_ecache_scrub_dr(struct cpu *cp)
{
ch_scrub_misc_t *csmp = CPU_PRIVATE_PTR(cp, chpr_scrub_misc);
bzero(csmp, sizeof (ch_scrub_misc_t));
cpu_idle_ecache_scrub(cp);
}
static void
scrub_dcache(int how_many)
{
int i;
ch_scrub_misc_t *csmp = CPU_PRIVATE_PTR(CPU, chpr_scrub_misc);
int index = csmp->chsm_flush_index[CACHE_SCRUBBER_INFO_D];
for (i = 0; i < how_many; i++) {
dcache_inval_line(index);
index = (index + 1) & (dcache_nlines - 1);
}
csmp->chsm_flush_index[CACHE_SCRUBBER_INFO_D] = index;
}
static uint_t
scrub_dcache_line_intr(caddr_t arg1, caddr_t arg2)
{
int i;
int how_many;
int outstanding;
ch_scrub_misc_t *csmp = CPU_PRIVATE_PTR(CPU, chpr_scrub_misc);
uint32_t *countp = &csmp->chsm_outstanding[CACHE_SCRUBBER_INFO_D];
struct scrub_info *csi = (struct scrub_info *)arg1;
int scan_rate = (csmp->chsm_ecache_busy == ECACHE_CPU_IDLE) ?
dcache_scan_rate_idle : dcache_scan_rate_busy;
how_many = (dcache_nlines * scan_rate) / (1000 * csi->csi_freq);
do {
outstanding = *countp;
for (i = 0; i < outstanding; i++) {
scrub_dcache(how_many);
}
} while (atomic_add_32_nv(countp, -outstanding));
return (DDI_INTR_CLAIMED);
}
static void
scrub_icache(int how_many)
{
int i;
ch_scrub_misc_t *csmp = CPU_PRIVATE_PTR(CPU, chpr_scrub_misc);
int index = csmp->chsm_flush_index[CACHE_SCRUBBER_INFO_I];
int icache_nlines = csmp->chsm_icache_nlines;
for (i = 0; i < how_many; i++) {
if (index == 0) {
icache_inval_all();
}
index = (index + 1) & (icache_nlines - 1);
}
csmp->chsm_flush_index[CACHE_SCRUBBER_INFO_I] = index;
}
static uint_t
scrub_icache_line_intr(caddr_t arg1, caddr_t arg2)
{
int i;
int how_many;
int outstanding;
ch_scrub_misc_t *csmp = CPU_PRIVATE_PTR(CPU, chpr_scrub_misc);
uint32_t *countp = &csmp->chsm_outstanding[CACHE_SCRUBBER_INFO_I];
struct scrub_info *csi = (struct scrub_info *)arg1;
int scan_rate = (csmp->chsm_ecache_busy == ECACHE_CPU_IDLE) ?
icache_scan_rate_idle : icache_scan_rate_busy;
int icache_nlines = csmp->chsm_icache_nlines;
how_many = (icache_nlines * scan_rate) / (1000 * csi->csi_freq);
do {
outstanding = *countp;
for (i = 0; i < outstanding; i++) {
scrub_icache(how_many);
}
} while (atomic_add_32_nv(countp, -outstanding));
return (DDI_INTR_CLAIMED);
}
static void
scrub_ecache(int how_many)
{
ch_scrub_misc_t *csmp = CPU_PRIVATE_PTR(CPU, chpr_scrub_misc);
int i;
int cpuid = CPU->cpu_id;
int index = csmp->chsm_flush_index[CACHE_SCRUBBER_INFO_E];
int nlines = csmp->chsm_ecache_nlines;
int linesize = cpunodes[cpuid].ecache_linesize;
int ec_set_size = cpu_ecache_set_size(CPU);
for (i = 0; i < how_many; i++) {
ecache_flush_line(ecache_flushaddr + (index * linesize),
ec_set_size);
index = (index + 1) & ((nlines << 1) - 1);
}
csmp->chsm_flush_index[CACHE_SCRUBBER_INFO_E] = index;
}
static uint_t
scrub_ecache_line_intr(caddr_t arg1, caddr_t arg2)
{
int i;
int how_many;
int outstanding;
ch_scrub_misc_t *csmp = CPU_PRIVATE_PTR(CPU, chpr_scrub_misc);
uint32_t *countp = &csmp->chsm_outstanding[CACHE_SCRUBBER_INFO_E];
struct scrub_info *csi = (struct scrub_info *)arg1;
int scan_rate = (csmp->chsm_ecache_busy == ECACHE_CPU_IDLE) ?
ecache_scan_rate_idle : ecache_scan_rate_busy;
int ecache_nlines = csmp->chsm_ecache_nlines;
how_many = (ecache_nlines * scan_rate) / (1000 * csi->csi_freq);
do {
outstanding = *countp;
for (i = 0; i < outstanding; i++) {
scrub_ecache(how_many);
}
} while (atomic_add_32_nv(countp, -outstanding));
return (DDI_INTR_CLAIMED);
}
static void
cpu_delayed_check_ce_errors(void *arg)
{
if (taskq_dispatch(ch_check_ce_tq, cpu_check_ce_errors, arg,
TQ_NOSLEEP) == TASKQID_INVALID) {
(void) timeout(cpu_delayed_check_ce_errors, arg,
drv_usectohz((clock_t)cpu_ceen_delay_secs * MICROSEC));
}
}
static void
cpu_check_ce_errors(void *arg)
{
int cpuid = (int)(uintptr_t)arg;
cpu_t *cp;
ASSERT(curthread->t_pil == 0);
mutex_enter(&cpu_lock);
cp = cpu_get(cpuid);
if (cp == NULL) {
mutex_exit(&cpu_lock);
return;
}
kpreempt_disable();
if (cp->cpu_id == CPU->cpu_id) {
mutex_exit(&cpu_lock);
cpu_check_ce(TIMEOUT_CEEN_CHECK, 0, 0, 0);
kpreempt_enable();
return;
}
kpreempt_enable();
if (CPU_XCALL_READY(cp->cpu_id) && (!(cp->cpu_flags & CPU_QUIESCED))) {
xc_one(cp->cpu_id, (xcfunc_t *)cpu_check_ce,
TIMEOUT_CEEN_CHECK, 0);
mutex_exit(&cpu_lock);
} else {
mutex_exit(&cpu_lock);
(void) timeout(cpu_delayed_check_ce_errors,
(void *)(uintptr_t)cp->cpu_id,
drv_usectohz((clock_t)cpu_ceen_delay_secs * MICROSEC));
}
}
void
cpu_check_ce(int flag, uint64_t pa, caddr_t va, uint_t psz)
{
ch_cpu_errors_t cpu_error_regs;
uint64_t ec_err_enable;
uint64_t page_offset;
get_cpu_error_state(&cpu_error_regs);
if (((cpu_error_regs.afsr & C_AFSR_CECC_ERRS) |
(cpu_error_regs.afsr_ext & C_AFSR_EXT_CECC_ERRS)) == 0) {
if (flag == TIMEOUT_CEEN_CHECK &&
!((ec_err_enable = get_error_enable()) & EN_REG_CEEN))
set_error_enable(ec_err_enable | EN_REG_CEEN);
return;
}
if ((ec_err_enable = get_error_enable()) & EN_REG_CEEN)
set_error_enable(ec_err_enable & ~EN_REG_CEEN);
if (!((cpu_error_regs.afsr & cpu_ce_not_deferred) |
(cpu_error_regs.afsr_ext & cpu_ce_not_deferred_ext)))
cpu_ce_detected(&cpu_error_regs,
CE_CEEN_DEFER | CE_CEEN_TIMEOUT);
else
cpu_ce_detected(&cpu_error_regs, CE_CEEN_TIMEOUT);
if (flag == SCRUBBER_CEEN_CHECK && va) {
if ((cpu_error_regs.afar >= pa) &&
(cpu_error_regs.afar < (pa + psz))) {
page_offset = (uint64_t)(cpu_error_regs.afar &
(psz - 1));
va = (caddr_t)(va + (P2ALIGN(page_offset, 64)));
psz -= (uint_t)(P2ALIGN(page_offset, 64));
cpu_check_block((caddr_t)(P2ALIGN((uint64_t)va, 64)),
psz);
}
}
if ((flag == TIMEOUT_CEEN_CHECK) &&
(cpu_error_regs.afsr & cpu_ce_not_deferred))
set_error_enable(ec_err_enable | EN_REG_CEEN);
}
static int
cpu_ce_delayed_ec_logout(uint64_t afar)
{
ch_cpu_logout_t *clop;
if (CPU_PRIVATE(CPU) == NULL)
return (0);
clop = CPU_PRIVATE_PTR(CPU, chpr_cecc_logout);
if (atomic_cas_64(&clop->clo_data.chd_afar, LOGOUT_INVALID, afar) !=
LOGOUT_INVALID)
return (0);
cpu_delayed_logout(afar, clop);
return (1);
}
void
cpu_ce_detected(ch_cpu_errors_t *cpu_error_regs, int flag)
{
ch_async_flt_t ch_flt;
struct async_flt *aflt;
char pr_reason[MAX_REASON_STRING];
bzero(&ch_flt, sizeof (ch_async_flt_t));
ch_flt.flt_trapped_ce = flag;
aflt = (struct async_flt *)&ch_flt;
aflt->flt_stat = cpu_error_regs->afsr & C_AFSR_MASK;
ch_flt.afsr_ext = cpu_error_regs->afsr_ext;
ch_flt.afsr_errs = (cpu_error_regs->afsr_ext & C_AFSR_EXT_ALL_ERRS) |
(cpu_error_regs->afsr & C_AFSR_ALL_ERRS);
aflt->flt_addr = cpu_error_regs->afar;
#if defined(SERRANO)
ch_flt.afar2 = cpu_error_regs->afar2;
#endif
aflt->flt_pc = NULL;
aflt->flt_priv = ((cpu_error_regs->afsr & C_AFSR_PRIV) != 0);
aflt->flt_tl = 0;
aflt->flt_panic = 0;
cpu_log_and_clear_ce(&ch_flt);
if (clear_errors(&ch_flt)) {
pr_reason[0] = '\0';
(void) cpu_queue_events(&ch_flt, pr_reason, ch_flt.afsr_errs,
NULL);
}
}
static void
cpu_log_and_clear_ce(ch_async_flt_t *ch_flt)
{
struct async_flt *aflt;
uint64_t afsr, afsr_errs;
ch_cpu_logout_t *clop;
char pr_reason[MAX_REASON_STRING];
on_trap_data_t *otp = curthread->t_ontrap;
aflt = (struct async_flt *)ch_flt;
afsr = aflt->flt_stat;
afsr_errs = ch_flt->afsr_errs;
aflt->flt_id = gethrtime_waitfree();
aflt->flt_bus_id = getprocessorid();
aflt->flt_inst = CPU->cpu_id;
aflt->flt_prot = AFLT_PROT_NONE;
aflt->flt_class = CPU_FAULT;
aflt->flt_status = ECC_C_TRAP;
pr_reason[0] = '\0';
if (CPU_PRIVATE(CPU) == NULL) {
clop = NULL;
ch_flt->flt_diag_data.chd_afar = LOGOUT_INVALID;
} else {
clop = CPU_PRIVATE_PTR(CPU, chpr_cecc_logout);
}
if (clop && ch_flt->flt_trapped_ce & CE_CEEN_TIMEOUT) {
ch_cpu_errors_t cpu_error_regs;
get_cpu_error_state(&cpu_error_regs);
(void) cpu_ce_delayed_ec_logout(cpu_error_regs.afar);
clop->clo_data.chd_afsr = cpu_error_regs.afsr;
clop->clo_data.chd_afar = cpu_error_regs.afar;
clop->clo_data.chd_afsr_ext = cpu_error_regs.afsr_ext;
clop->clo_sdw_data.chd_afsr = cpu_error_regs.shadow_afsr;
clop->clo_sdw_data.chd_afar = cpu_error_regs.shadow_afar;
clop->clo_sdw_data.chd_afsr_ext =
cpu_error_regs.shadow_afsr_ext;
#if defined(SERRANO)
clop->clo_data.chd_afar2 = cpu_error_regs.afar2;
#endif
ch_flt->flt_data_incomplete = 1;
set_cpu_error_state(&cpu_error_regs);
}
#if defined(JALAPENO) || defined(SERRANO)
if ((afsr & (C_AFSR_CE|C_AFSR_RCE)) &&
(cpu_flt_in_memory(ch_flt, (afsr & C_AFSR_CE)) ||
cpu_flt_in_memory(ch_flt, (afsr & C_AFSR_RCE)))) {
cpu_ce_scrub_mem_err(aflt, B_TRUE);
}
#else
if (afsr & (C_AFSR_CE|C_AFSR_EMC)) {
if (cpu_flt_in_memory(ch_flt, (afsr & C_AFSR_CE)) ||
cpu_flt_in_memory(ch_flt, (afsr & C_AFSR_EMC))) {
cpu_ce_scrub_mem_err(aflt, B_TRUE);
}
}
#endif
if (otp != NULL && (otp->ot_prot & OT_DATA_EC))
aflt->flt_prot = AFLT_PROT_EC;
if (cpu_queue_events(ch_flt, pr_reason, afsr_errs, clop) == 0 ||
(afsr_errs & (C_AFSR_CECC_ERRS | C_AFSR_EXT_CECC_ERRS)) == 0) {
ch_flt->flt_type = CPU_INV_AFSR;
cpu_errorq_dispatch(FM_EREPORT_CPU_USIII_INVALID_AFSR,
(void *)ch_flt, sizeof (ch_async_flt_t), ue_queue,
aflt->flt_panic);
}
if (clop) {
bzero(clop, sizeof (ch_cpu_logout_t));
clop->clo_data.chd_afar = LOGOUT_INVALID;
}
#if defined(JALAPENO) || defined(SERRANO)
if (afsr & (C_AFSR_EDC | C_AFSR_CPC | C_AFSR_CPU | C_AFSR_WDC))
#else
if (afsr_errs & (C_AFSR_EDC | C_AFSR_CPC | C_AFSR_WDC | C_AFSR_L3_EDC |
C_AFSR_L3_CPC | C_AFSR_L3_WDC))
#endif
cpu_error_ecache_flush(ch_flt);
}
static int
cpu_error_ecache_flush_required(ch_async_flt_t *ch_flt)
{
struct async_flt *aflt;
uint64_t afsr;
uint64_t afsr_errs = ch_flt->afsr_errs;
aflt = (struct async_flt *)ch_flt;
afsr = aflt->flt_stat;
if (afsr & C_AFSR_ME) {
return (ECACHE_FLUSH_ALL);
}
#if defined(JALAPENO) || defined(SERRANO)
if (afsr & (C_AFSR_CPC | C_AFSR_CPU | C_AFSR_EDC | C_AFSR_WDC)) {
#else
if (afsr_errs & (C_AFSR_CPC | C_AFSR_EDC | C_AFSR_WDC | C_AFSR_L3_CPC |
C_AFSR_L3_EDC | C_AFSR_L3_WDC)) {
#endif
return (ECACHE_FLUSH_ALL);
}
#if defined(JALAPENO) || defined(SERRANO)
if (afsr & (C_AFSR_UE|C_AFSR_RUE)) {
if ((afsr & C_AFSR_ALL_ERRS) == C_AFSR_UE ||
(afsr & C_AFSR_ALL_ERRS) == C_AFSR_RUE) {
return (ECACHE_FLUSH_LINE);
} else {
return (ECACHE_FLUSH_ALL);
}
}
#else
if (afsr_errs & C_AFSR_UE) {
if ((afsr_errs & (C_AFSR_ALL_ERRS | C_AFSR_EXT_ALL_ERRS)) ==
C_AFSR_UE) {
return (ECACHE_FLUSH_LINE);
} else {
return (ECACHE_FLUSH_ALL);
}
}
#endif
if (afsr_errs & (C_AFSR_EDU | C_AFSR_L3_EDU)) {
if (((afsr_errs & ~C_AFSR_EDU) == 0) ||
((afsr_errs & ~C_AFSR_L3_EDU) == 0)) {
return (ECACHE_FLUSH_LINE);
} else {
return (ECACHE_FLUSH_ALL);
}
}
if (afsr_errs & C_AFSR_BERR) {
if ((afsr_errs & ~C_AFSR_BERR) == 0) {
return (ECACHE_FLUSH_LINE);
} else {
return (ECACHE_FLUSH_ALL);
}
}
return (0);
}
void
cpu_error_ecache_flush(ch_async_flt_t *ch_flt)
{
int ecache_flush_flag =
cpu_error_ecache_flush_required(ch_flt);
if (ecache_flush_flag == ECACHE_FLUSH_ALL)
cpu_flush_ecache();
else if (ecache_flush_flag == ECACHE_FLUSH_LINE) {
cpu_flush_ecache_line(ch_flt);
}
}
uint64_t
cpu_ectag_to_pa(int setsize, uint64_t tag)
{
if (IS_JAGUAR(cpunodes[CPU->cpu_id].implementation))
return (JG_ECTAG_TO_PA(setsize, tag));
else if (IS_PANTHER(cpunodes[CPU->cpu_id].implementation))
return (PN_L3TAG_TO_PA(tag));
else
return (CH_ECTAG_TO_PA(setsize, tag));
}
int
cpu_ectag_pa_to_subblk(int cachesize, uint64_t subaddr)
{
if (IS_JAGUAR(cpunodes[CPU->cpu_id].implementation))
return (JG_ECTAG_PA_TO_SUBBLK(cachesize, subaddr));
else if (IS_PANTHER(cpunodes[CPU->cpu_id].implementation))
return (0);
else
return (CH_ECTAG_PA_TO_SUBBLK(cachesize, subaddr));
}
int
cpu_ectag_line_invalid(int cachesize, uint64_t tag)
{
if (IS_JAGUAR(cpunodes[CPU->cpu_id].implementation))
return (JG_ECTAG_LINE_INVALID(cachesize, tag));
else if (IS_PANTHER(cpunodes[CPU->cpu_id].implementation))
return (PN_L3_LINE_INVALID(tag));
else
return (CH_ECTAG_LINE_INVALID(cachesize, tag));
}
int
cpu_ectag_pa_to_subblk_state(int cachesize, uint64_t subaddr, uint64_t tag)
{
if (IS_JAGUAR(cpunodes[CPU->cpu_id].implementation))
return (JG_ECTAG_PA_TO_SUBBLK_STATE(cachesize, subaddr, tag));
else if (IS_PANTHER(cpunodes[CPU->cpu_id].implementation))
return (tag & CH_ECSTATE_MASK);
else
return (CH_ECTAG_PA_TO_SUBBLK_STATE(cachesize, subaddr, tag));
}
void
cpu_mp_init(void)
{
#ifdef CHEETAHPLUS_ERRATUM_25
if (cheetah_sendmondo_recover) {
cheetah_nudge_init();
}
#endif
}
void
cpu_ereport_post(struct async_flt *aflt)
{
char *cpu_type, buf[FM_MAX_CLASS];
nv_alloc_t *nva = NULL;
nvlist_t *ereport, *detector, *resource;
errorq_elem_t *eqep;
ch_async_flt_t *ch_flt = (ch_async_flt_t *)aflt;
char unum[UNUM_NAMLEN];
int synd_code;
uint8_t msg_type;
plat_ecc_ch_async_flt_t plat_ecc_ch_flt;
if (aflt->flt_panic || panicstr) {
eqep = errorq_reserve(ereport_errorq);
if (eqep == NULL)
return;
ereport = errorq_elem_nvl(ereport_errorq, eqep);
nva = errorq_elem_nva(ereport_errorq, eqep);
} else {
ereport = fm_nvlist_create(nva);
}
detector = fm_nvlist_create(nva);
resource = fm_nvlist_create(nva);
switch (cpunodes[aflt->flt_inst].implementation) {
case CHEETAH_IMPL:
cpu_type = FM_EREPORT_CPU_USIII;
break;
case CHEETAH_PLUS_IMPL:
cpu_type = FM_EREPORT_CPU_USIIIplus;
break;
case JALAPENO_IMPL:
cpu_type = FM_EREPORT_CPU_USIIIi;
break;
case SERRANO_IMPL:
cpu_type = FM_EREPORT_CPU_USIIIiplus;
break;
case JAGUAR_IMPL:
cpu_type = FM_EREPORT_CPU_USIV;
break;
case PANTHER_IMPL:
cpu_type = FM_EREPORT_CPU_USIVplus;
break;
default:
cpu_type = FM_EREPORT_CPU_UNSUPPORTED;
break;
}
cpu_fmri_cpu_set(detector, aflt->flt_inst);
(void) snprintf(buf, FM_MAX_CLASS, "%s.%s.%s",
FM_ERROR_CPU, cpu_type, aflt->flt_erpt_class);
fm_ereport_set(ereport, FM_EREPORT_VERSION, buf,
fm_ena_generate_cpu(aflt->flt_id, aflt->flt_inst, FM_ENA_FMT1),
detector, NULL);
cpu_payload_add_aflt(aflt, ereport, resource,
&plat_ecc_ch_flt.ecaf_afar_status,
&plat_ecc_ch_flt.ecaf_synd_status);
if (aflt->flt_panic || panicstr) {
errorq_commit(ereport_errorq, eqep, ERRORQ_SYNC);
} else {
(void) fm_ereport_post(ereport, EVCH_TRYHARD);
fm_nvlist_destroy(ereport, FM_NVA_FREE);
fm_nvlist_destroy(detector, FM_NVA_FREE);
fm_nvlist_destroy(resource, FM_NVA_FREE);
}
if (&plat_ecc_capability_sc_get &&
plat_ecc_capability_sc_get(PLAT_ECC_ERROR2_MESSAGE)) {
msg_type = cpu_flt_bit_to_plat_error(aflt);
if (msg_type != PLAT_ECC_ERROR2_NONE) {
if (plat_ecc_ch_flt.ecaf_afar_status !=
AFLT_STAT_INVALID) {
synd_code = synd_to_synd_code(
plat_ecc_ch_flt.ecaf_synd_status,
aflt->flt_synd, ch_flt->flt_bit);
(void) cpu_get_mem_unum_synd(synd_code,
aflt, unum);
} else {
unum[0] = '\0';
}
plat_ecc_ch_flt.ecaf_sdw_afar = ch_flt->flt_sdw_afar;
plat_ecc_ch_flt.ecaf_sdw_afsr = ch_flt->flt_sdw_afsr;
plat_ecc_ch_flt.ecaf_afsr_ext = ch_flt->afsr_ext;
plat_ecc_ch_flt.ecaf_sdw_afsr_ext =
ch_flt->flt_sdw_afsr_ext;
if (&plat_log_fruid_error2)
plat_log_fruid_error2(msg_type, unum, aflt,
&plat_ecc_ch_flt);
}
}
}
void
cpu_run_bus_error_handlers(struct async_flt *aflt, int expected)
{
int status;
ddi_fm_error_t de;
bzero(&de, sizeof (ddi_fm_error_t));
de.fme_version = DDI_FME_VERSION;
de.fme_ena = fm_ena_generate_cpu(aflt->flt_id, aflt->flt_inst,
FM_ENA_FMT1);
de.fme_flag = expected;
de.fme_bus_specific = (void *)aflt->flt_addr;
status = ndi_fm_handler_dispatch(ddi_root_node(), NULL, &de);
if ((aflt->flt_prot == AFLT_PROT_NONE) && (status == DDI_FM_FATAL))
aflt->flt_panic = 1;
}
void
cpu_errorq_dispatch(char *error_class, void *payload, size_t payload_sz,
errorq_t *eqp, uint_t flag)
{
struct async_flt *aflt = (struct async_flt *)payload;
aflt->flt_erpt_class = error_class;
errorq_dispatch(eqp, payload, payload_sz, flag);
}
void
cpu_ce_count_unum(struct async_flt *ecc, int len, char *unum)
{}
void
adjust_hw_copy_limits(int ecache_size)
{
if (min_ecache_size == 0 && use_hw_bcopy) {
if (hw_copy_limit_1 == 0) {
hw_copy_limit_1 = VIS_COPY_THRESHOLD;
priv_hcl_1 = hw_copy_limit_1;
}
if (hw_copy_limit_2 == 0) {
hw_copy_limit_2 = 2 * VIS_COPY_THRESHOLD;
priv_hcl_2 = hw_copy_limit_2;
}
if (hw_copy_limit_4 == 0) {
hw_copy_limit_4 = 4 * VIS_COPY_THRESHOLD;
priv_hcl_4 = hw_copy_limit_4;
}
if (hw_copy_limit_8 == 0) {
hw_copy_limit_8 = 4 * VIS_COPY_THRESHOLD;
priv_hcl_8 = hw_copy_limit_8;
}
min_ecache_size = ecache_size;
} else {
if (ecache_size == min_ecache_size) {
if (hw_copy_limit_8 == 4 * VIS_COPY_THRESHOLD) {
if ((ecache_size > 1048576) &&
(priv_hcl_8 == hw_copy_limit_8)) {
if (ecache_size <= 2097152)
hw_copy_limit_8 = 4 *
VIS_COPY_THRESHOLD;
else if (ecache_size <= 4194304)
hw_copy_limit_8 = 4 *
VIS_COPY_THRESHOLD;
else
hw_copy_limit_8 = 4 *
VIS_COPY_THRESHOLD;
priv_hcl_8 = hw_copy_limit_8;
}
}
} else if (ecache_size < min_ecache_size) {
if (priv_hcl_8 == hw_copy_limit_8) {
if (ecache_size <= 1048576)
hw_copy_limit_8 = 8 *
VIS_COPY_THRESHOLD;
else if (ecache_size <= 2097152)
hw_copy_limit_8 = 8 *
VIS_COPY_THRESHOLD;
else if (ecache_size <= 4194304)
hw_copy_limit_8 = 8 *
VIS_COPY_THRESHOLD;
else
hw_copy_limit_8 = 10 *
VIS_COPY_THRESHOLD;
priv_hcl_8 = hw_copy_limit_8;
min_ecache_size = ecache_size;
}
}
}
}
int
fpras_chktrap(struct regs *rp)
{
int op;
struct fpras_chkfngrp *cgp;
uintptr_t tpc = (uintptr_t)rp->r_pc;
if (fpras_chkfngrps == NULL)
return (0);
cgp = &fpras_chkfngrps[CPU->cpu_id];
for (op = 0; op < FPRAS_NCOPYOPS; ++op) {
if (tpc >= (uintptr_t)&cgp->fpras_fn[op].fpras_blk0 &&
tpc < (uintptr_t)&cgp->fpras_fn[op].fpras_chkresult)
break;
}
if (op == FPRAS_NCOPYOPS)
return (0);
rp->r_pc = (uintptr_t)&cgp->fpras_fn[op].fpras_trampoline;
rp->r_npc = rp->r_pc + 4;
return (1);
}
int
fpras_failure(int op, int how)
{
int use_hw_bcopy_orig, use_hw_bzero_orig;
uint_t hcl1_orig, hcl2_orig, hcl4_orig, hcl8_orig;
ch_async_flt_t ch_flt;
struct async_flt *aflt = (struct async_flt *)&ch_flt;
struct fpras_chkfn *sfp, *cfp;
uint32_t *sip, *cip;
int i;
use_hw_bcopy_orig = use_hw_bcopy;
use_hw_bzero_orig = use_hw_bzero;
hcl1_orig = hw_copy_limit_1;
hcl2_orig = hw_copy_limit_2;
hcl4_orig = hw_copy_limit_4;
hcl8_orig = hw_copy_limit_8;
use_hw_bcopy = use_hw_bzero = 0;
hw_copy_limit_1 = hw_copy_limit_2 = hw_copy_limit_4 =
hw_copy_limit_8 = 0;
bzero(&ch_flt, sizeof (ch_async_flt_t));
aflt->flt_id = gethrtime_waitfree();
aflt->flt_class = CPU_FAULT;
aflt->flt_inst = CPU->cpu_id;
aflt->flt_status = (how << 8) | op;
aflt->flt_payload = FM_EREPORT_PAYLOAD_FPU_HWCOPY;
ch_flt.flt_type = CPU_FPUERR;
aflt->flt_panic = (curthread->t_lofault == (uintptr_t)NULL);
sfp = (struct fpras_chkfn *)fpras_chkfn_type1;
cfp = &fpras_chkfngrps[CPU->cpu_id].fpras_fn[op];
if (op == FPRAS_BCOPY || op == FPRAS_COPYOUT) {
sip = &sfp->fpras_blk0[0];
cip = &cfp->fpras_blk0[0];
} else {
sip = &sfp->fpras_blk1[0];
cip = &cfp->fpras_blk1[0];
}
for (i = 0; i < 16; ++i, ++sip, ++cip)
ch_flt.flt_fpdata[i] = *sip ^ *cip;
cpu_errorq_dispatch(FM_EREPORT_CPU_USIII_FPU_HWCOPY, (void *)&ch_flt,
sizeof (ch_async_flt_t), ue_queue, aflt->flt_panic);
if (aflt->flt_panic)
fm_panic("FPU failure on CPU %d", CPU->cpu_id);
ttolwp(curthread)->lwp_pcb.pcb_flags |= ASYNC_HWERR;
aston(curthread);
use_hw_bcopy = use_hw_bcopy_orig;
use_hw_bzero = use_hw_bzero_orig;
hw_copy_limit_1 = hcl1_orig;
hw_copy_limit_2 = hcl2_orig;
hw_copy_limit_4 = hcl4_orig;
hw_copy_limit_8 = hcl8_orig;
return (1);
}
#define VIS_BLOCKSIZE 64
int
dtrace_blksuword32_err(uintptr_t addr, uint32_t *data)
{
int ret, watched;
watched = watch_disable_addr((void *)addr, VIS_BLOCKSIZE, S_WRITE);
ret = dtrace_blksuword32(addr, data, 0);
if (watched)
watch_enable_addr((void *)addr, VIS_BLOCKSIZE, S_WRITE);
return (ret);
}
void
cpu_faulted_enter(struct cpu *cp)
{
xt_one(cp->cpu_id, set_error_enable_tl1, EN_REG_CEEN, EER_SET_CLRBITS);
}
void
cpu_faulted_exit(struct cpu *cp)
{
ch_cpu_errors_t cpu_error_regs;
cpu_error_regs.afsr = C_AFSR_CECC_ERRS;
if (IS_PANTHER(cpunodes[cp->cpu_id].implementation))
cpu_error_regs.afsr_ext &= C_AFSR_EXT_CECC_ERRS;
xc_one(cp->cpu_id, (xcfunc_t *)set_cpu_error_state,
(uint64_t)&cpu_error_regs, 0);
xt_one(cp->cpu_id, set_error_enable_tl1, EN_REG_CEEN, EER_SET_SETBITS);
}
static int
cpu_check_secondary_errors(ch_async_flt_t *ch_flt, uint64_t t_afsr_errs,
uint64_t t_afar)
{
#if defined(CHEETAH_PLUS)
#else
struct async_flt *aflt = (struct async_flt *)ch_flt;
#endif
if ((t_afsr_errs & ~(C_AFSR_BERR | C_AFSR_TO | C_AFSR_ME)) == 0) {
if ((ch_flt->afsr_errs &
~(C_AFSR_BERR | C_AFSR_TO | C_AFSR_ME)) == 0)
return (1);
}
#if defined(CHEETAH_PLUS)
return (0);
#else
if (t_afsr_errs != C_AFSR_UE)
return (0);
if ((ch_flt->afsr_errs & ~(C_AFSR_EDU|C_AFSR_WDU)) != 0)
return (0);
if (P2ALIGN(aflt->flt_addr, 64) != P2ALIGN(t_afar, 64))
return (0);
return (1);
#endif
}
static uint8_t
cpu_flt_bit_to_plat_error(struct async_flt *aflt)
{
#if defined(JALAPENO)
return (PLAT_ECC_ERROR2_NONE);
#else
ch_async_flt_t *ch_flt = (ch_async_flt_t *)aflt;
switch (ch_flt->flt_bit) {
case C_AFSR_CE:
return (PLAT_ECC_ERROR2_CE);
case C_AFSR_UCC:
case C_AFSR_EDC:
case C_AFSR_WDC:
case C_AFSR_CPC:
return (PLAT_ECC_ERROR2_L2_CE);
case C_AFSR_EMC:
return (PLAT_ECC_ERROR2_EMC);
case C_AFSR_IVC:
return (PLAT_ECC_ERROR2_IVC);
case C_AFSR_UE:
return (PLAT_ECC_ERROR2_UE);
case C_AFSR_UCU:
case C_AFSR_EDU:
case C_AFSR_WDU:
case C_AFSR_CPU:
return (PLAT_ECC_ERROR2_L2_UE);
case C_AFSR_IVU:
return (PLAT_ECC_ERROR2_IVU);
case C_AFSR_TO:
return (PLAT_ECC_ERROR2_TO);
case C_AFSR_BERR:
return (PLAT_ECC_ERROR2_BERR);
#if defined(CHEETAH_PLUS)
case C_AFSR_L3_EDC:
case C_AFSR_L3_UCC:
case C_AFSR_L3_CPC:
case C_AFSR_L3_WDC:
return (PLAT_ECC_ERROR2_L3_CE);
case C_AFSR_IMC:
return (PLAT_ECC_ERROR2_IMC);
case C_AFSR_TSCE:
return (PLAT_ECC_ERROR2_L2_TSCE);
case C_AFSR_THCE:
return (PLAT_ECC_ERROR2_L2_THCE);
case C_AFSR_L3_MECC:
return (PLAT_ECC_ERROR2_L3_MECC);
case C_AFSR_L3_THCE:
return (PLAT_ECC_ERROR2_L3_THCE);
case C_AFSR_L3_CPU:
case C_AFSR_L3_EDU:
case C_AFSR_L3_UCU:
case C_AFSR_L3_WDU:
return (PLAT_ECC_ERROR2_L3_UE);
case C_AFSR_DUE:
return (PLAT_ECC_ERROR2_DUE);
case C_AFSR_DTO:
return (PLAT_ECC_ERROR2_DTO);
case C_AFSR_DBERR:
return (PLAT_ECC_ERROR2_DBERR);
#endif
default:
switch (ch_flt->flt_type) {
#if defined(CPU_IMP_L1_CACHE_PARITY)
case CPU_IC_PARITY:
return (PLAT_ECC_ERROR2_IPE);
case CPU_DC_PARITY:
if (IS_PANTHER(cpunodes[CPU->cpu_id].implementation)) {
if (ch_flt->parity_data.dpe.cpl_cache ==
CPU_PC_PARITY) {
return (PLAT_ECC_ERROR2_PCACHE);
}
}
return (PLAT_ECC_ERROR2_DPE);
#endif
case CPU_ITLB_PARITY:
return (PLAT_ECC_ERROR2_ITLB);
case CPU_DTLB_PARITY:
return (PLAT_ECC_ERROR2_DTLB);
default:
return (PLAT_ECC_ERROR2_NONE);
}
}
#endif
}