#include "assym.h"
#include <sys/asm_linkage.h>
#include <sys/mmu.h>
#include <vm/hat_sfmmu.h>
#include <sys/machparam.h>
#include <sys/machcpuvar.h>
#include <sys/machthread.h>
#include <sys/privregs.h>
#include <sys/asm_linkage.h>
#include <sys/machasi.h>
#include <sys/trap.h>
#include <sys/spitregs.h>
#include <sys/xc_impl.h>
#include <sys/intreg.h>
#include <sys/async.h>
#ifdef TRAPTRACE
#include <sys/traptrace.h>
#endif
#define DCACHE_FLUSHPAGE(arg1, arg2, tmp1, tmp2, tmp3) \
ldxa [%g0]ASI_LSU, tmp1 ;\
btst LSU_DC, tmp1 ;\
bz,pn %icc, 1f ;\
sethi %hi(dcache_linesize), tmp1 ;\
ld [tmp1 + %lo(dcache_linesize)], tmp1 ;\
sethi %hi(dflush_type), tmp2 ;\
ld [tmp2 + %lo(dflush_type)], tmp2 ;\
cmp tmp2, FLUSHPAGE_TYPE ;\
be,pt %icc, 2f ;\
sllx arg1, SF_DC_VBIT_SHIFT, arg1 ;\
sethi %hi(dcache_size), tmp3 ;\
ld [tmp3 + %lo(dcache_size)], tmp3 ;\
cmp tmp2, FLUSHMATCH_TYPE ;\
be,pt %icc, 3f ;\
nop ;\
\
sub tmp3, tmp1, tmp2 ;\
4: \
stxa %g0, [tmp2]ASI_DC_TAG ;\
membar #Sync ;\
cmp %g0, tmp2 ;\
bne,pt %icc, 4b ;\
sub tmp2, tmp1, tmp2 ;\
ba,pt %icc, 1f ;\
nop ;\
\
2: \
set MMU_PAGESIZE, tmp3 ;\
sllx arg2, MMU_PAGESHIFT, arg2 ;\
sub tmp3, tmp1, tmp3 ;\
4: \
ldxa [arg2 + tmp3]ASI_DC_TAG, tmp2 ;\
btst SF_DC_VBIT_MASK, tmp2 ;\
bz,pn %icc, 5f ;\
andn tmp2, SF_DC_VBIT_MASK, tmp2 ;\
cmp tmp2, arg1 ;\
bne,pn %icc, 5f ;\
nop ;\
stxa %g0, [arg2 + tmp3]ASI_DC_TAG ;\
membar #Sync ;\
5: \
cmp %g0, tmp3 ;\
bnz,pt %icc, 4b ;\
sub tmp3, tmp1, tmp3 ;\
ba,pt %icc, 1f ;\
nop ;\
\
3: \
sub tmp3, tmp1, arg2 ;\
4: \
ldxa [arg2]ASI_DC_TAG, tmp2 ;\
btst SF_DC_VBIT_MASK, tmp2 ;\
bz,pn %icc, 5f ;\
andn tmp2, SF_DC_VBIT_MASK, tmp2 ;\
cmp tmp2, arg1 ;\
bne,pn %icc, 5f ;\
nop ;\
stxa %g0, [arg2]ASI_DC_TAG ;\
membar #Sync ;\
5: \
cmp %g0, arg2 ;\
bne,pt %icc, 4b ;\
sub arg2, tmp1, arg2 ;\
1:
#define DCACHE_FLUSHCOLOR(arg, tmp1, tmp2) \
ldxa [%g0]ASI_LSU, tmp1; \
btst LSU_DC, tmp1; \
bz,pn %icc, 1f; \
sethi %hi(dcache_linesize), tmp1; \
ld [tmp1 + %lo(dcache_linesize)], tmp1; \
set MMU_PAGESIZE, tmp2; \
\
sllx arg, MMU_PAGESHIFT, arg; \
sub tmp2, tmp1, tmp2; \
2: \
stxa %g0, [arg + tmp2]ASI_DC_TAG; \
membar #Sync; \
cmp %g0, tmp2; \
bne,pt %icc, 2b; \
sub tmp2, tmp1, tmp2; \
1:
#define DCACHE_FLUSHALL(size, linesize, tmp) \
ldxa [%g0]ASI_LSU, tmp; \
btst LSU_DC, tmp; \
bz,pn %icc, 1f; \
\
sub size, linesize, tmp; \
2: \
stxa %g0, [tmp]ASI_DC_TAG; \
membar #Sync; \
cmp %g0, tmp; \
bne,pt %icc, 2b; \
sub tmp, linesize, tmp; \
1:
#define ICACHE_FLUSHALL(size, linesize, tmp) \
ldxa [%g0]ASI_LSU, tmp; \
btst LSU_IC, tmp; \
bz,pn %icc, 1f; \
\
sub size, linesize, tmp; \
2: \
stxa %g0, [tmp]ASI_IC_TAG; \
membar #Sync; \
cmp %g0, tmp; \
bne,pt %icc, 2b; \
sub tmp, linesize, tmp; \
1:
#ifdef SF_ERRATA_32
#define SF_WORKAROUND(tmp1, tmp2) \
sethi %hi(FLUSH_ADDR), tmp2 ;\
set MMU_PCONTEXT, tmp1 ;\
stxa %g0, [tmp1]ASI_DMMU ;\
flush tmp2 ;
#else
#define SF_WORKAROUND(tmp1, tmp2)
#endif
#define VTAG_FLUSHUPAGE(lbl, arg1, arg2, tmp1, tmp2, tmp3, tmp4) \
rdpr %pstate, tmp1 ;\
andn tmp1, PSTATE_IE, tmp2 ;\
wrpr tmp2, 0, %pstate ;\
sethi %hi(FLUSH_ADDR), tmp2 ;\
set MMU_SCONTEXT, tmp3 ;\
ldxa [tmp3]ASI_DMMU, tmp4 ;\
or DEMAP_SECOND | DEMAP_PAGE_TYPE, arg1, arg1 ;\
cmp tmp4, arg2 ;\
be,a,pt %icc, lbl##4 ;\
nop ;\
stxa arg2, [tmp3]ASI_DMMU ;\
lbl##4: ;\
stxa %g0, [arg1]ASI_DTLB_DEMAP ;\
stxa %g0, [arg1]ASI_ITLB_DEMAP ;\
flush tmp2 ;\
be,a,pt %icc, lbl##5 ;\
nop ;\
stxa tmp4, [tmp3]ASI_DMMU ;\
flush tmp2 ;\
lbl##5: ;\
wrpr %g0, tmp1, %pstate
#define DTLB_FLUSH_UNLOCKED_UCTXS(lbl, arg1, tmp1, tmp2, tmp3, \
tmp4, tmp5, tmp6) \
lbl##0: ;\
sllx arg1, 3, tmp3 ;\
SF_WORKAROUND(tmp1, tmp2) ;\
ldxa [tmp3]ASI_DTLB_ACCESS, tmp4 ;\
srlx tmp4, 6, tmp4 ;\
andcc tmp4, 1, %g0 ;\
bnz,pn %xcc, lbl##1 ;\
srlx tmp4, 57, tmp4 ;\
andcc tmp4, 1, %g0 ;\
beq,pn %xcc, lbl##1 ;\
nop ;\
set TAGREAD_CTX_MASK, tmp1 ;\
ldxa [tmp3]ASI_DTLB_TAGREAD, tmp2 ;\
and tmp2, tmp1, tmp6 ;\
andn tmp2, tmp1, tmp5 ;\
set KCONTEXT, tmp4 ;\
cmp tmp6, tmp4 ;\
be lbl##1 ;\
nop ;\
VTAG_FLUSHUPAGE(VD##lbl, tmp5, tmp6, tmp1, tmp2, tmp3, tmp4) ;\
lbl##1: ;\
brgz,pt arg1, lbl##0 ;\
sub arg1, 1, arg1
#define ITLB_FLUSH_UNLOCKED_UCTXS(lbl, arg1, tmp1, tmp2, tmp3, \
tmp4, tmp5, tmp6) \
lbl##0: ;\
sllx arg1, 3, tmp3 ;\
SF_WORKAROUND(tmp1, tmp2) ;\
ldxa [tmp3]ASI_ITLB_ACCESS, tmp4 ;\
srlx tmp4, 6, tmp4 ;\
andcc tmp4, 1, %g0 ;\
bnz,pn %xcc, lbl##1 ;\
srlx tmp4, 57, tmp4 ;\
andcc tmp4, 1, %g0 ;\
beq,pn %xcc, lbl##1 ;\
nop ;\
set TAGREAD_CTX_MASK, tmp1 ;\
ldxa [tmp3]ASI_ITLB_TAGREAD, tmp2 ;\
and tmp2, tmp1, tmp6 ;\
andn tmp2, tmp1, tmp5 ;\
set KCONTEXT, tmp4 ;\
cmp tmp6, tmp4 ;\
be lbl##1 ;\
nop ;\
VTAG_FLUSHUPAGE(VI##lbl, tmp5, tmp6, tmp1, tmp2, tmp3, tmp4) ;\
lbl##1: ;\
brgz,pt arg1, lbl##0 ;\
sub arg1, 1, arg1
#define GET_CPU_PRIVATE_PTR(r_or_s, scr1, scr2, label) \
CPU_ADDR(scr1, scr2); \
ldn [scr1 + CPU_PRIVATE], scr1; \
cmp scr1, 0; \
be label; \
nop; \
add scr1, r_or_s, scr1; \
#ifdef HUMMINGBIRD
#define HB_ECACHE_FLUSH_CNT 2
#define HB_PHYS_FLUSH_CNT 10
#endif
ENTRY_NP(vtag_flushpage)
rdpr %pstate, %o5
#ifdef DEBUG
PANIC_IF_INTR_DISABLED_PSTR(%o5, sfdi_label1, %g1)
#endif
andn %o5, PSTATE_IE, %o4
wrpr %o4, 0, %pstate
sethi %hi(ksfmmup), %o3
ldx [%o3 + %lo(ksfmmup)], %o3
cmp %o3, %o1
bne,pt %xcc, 1f ! if not kernel as, go to 1
sethi %hi(FLUSH_ADDR), %o3
stxa %g0, [%o0]ASI_DTLB_DEMAP
stxa %g0, [%o0]ASI_ITLB_DEMAP
flush %o3
b 5f
nop
1:
SFMMU_CPU_CNUM(%o1, %g1, %g2)
set MMU_SCONTEXT, %o4
ldxa [%o4]ASI_DMMU, %o2
or DEMAP_SECOND | DEMAP_PAGE_TYPE, %o0, %o0
cmp %o2, %g1
be,pt %icc, 4f
nop
stxa %g1, [%o4]ASI_DMMU
4:
stxa %g0, [%o0]ASI_DTLB_DEMAP
stxa %g0, [%o0]ASI_ITLB_DEMAP
flush %o3
be,pt %icc, 5f
nop
stxa %o2, [%o4]ASI_DMMU
flush %o3
5:
retl
wrpr %g0, %o5, %pstate
SET_SIZE(vtag_flushpage)
.seg ".text"
.flushallmsg:
.asciz "sfmmu_asm: unimplemented flush operation"
ENTRY_NP(vtag_flushall)
sethi %hi(.flushallmsg), %o0
call panic
or %o0, %lo(.flushallmsg), %o0
SET_SIZE(vtag_flushall)
ENTRY_NP(vtag_flushall_uctxs)
CPU_INDEX(%g1, %g2)
mulx %g1, CPU_NODE_SIZE, %g1
set cpunodes, %g2
add %g1, %g2, %g1
lduh [%g1 + ITLB_SIZE], %g2 ! %g2 = # entries in ITLB
lduh [%g1 + DTLB_SIZE], %g1 ! %g1 = # entries in DTLB
sub %g2, 1, %g2 ! %g2 = # entries in ITLB - 1
sub %g1, 1, %g1 ! %g1 = # entries in DTLB - 1
!
! Flush itlb's
!
ITLB_FLUSH_UNLOCKED_UCTXS(I, %g2, %g3, %g4, %o2, %o3, %o4, %o5)
!
! Flush dtlb's
!
DTLB_FLUSH_UNLOCKED_UCTXS(D, %g1, %g3, %g4, %o2, %o3, %o4, %o5)
membar #Sync
retl
nop
SET_SIZE(vtag_flushall_uctxs)
ENTRY_NP(vtag_flushpage_tl1)
srln %g1, MMU_PAGESHIFT, %g1
slln %g1, MMU_PAGESHIFT, %g1
SFMMU_CPU_CNUM(%g2, %g3, %g4)
set MMU_SCONTEXT, %g4
ldxa [%g4]ASI_DMMU, %g5
or DEMAP_SECOND | DEMAP_PAGE_TYPE, %g1, %g1
stxa %g3, [%g4]ASI_DMMU
stxa %g0, [%g1]ASI_DTLB_DEMAP
stxa %g0, [%g1]ASI_ITLB_DEMAP
stxa %g5, [%g4]ASI_DMMU
membar #Sync
retry
SET_SIZE(vtag_flushpage_tl1)
ENTRY_NP(vtag_flush_pgcnt_tl1)
srln %g1, MMU_PAGESHIFT, %g1
slln %g1, MMU_PAGESHIFT, %g1
or DEMAP_SECOND | DEMAP_PAGE_TYPE, %g1, %g1
set SFMMU_PGCNT_MASK, %g4
and %g4, %g2, %g3
add %g3, 1, %g3
andn %g2, SFMMU_PGCNT_MASK, %g2
SFMMU_CPU_CNUM(%g2, %g5, %g6) ! %g5 = sfmmu cnum on this CPU
set MMU_SCONTEXT, %g4
ldxa [%g4]ASI_DMMU, %g6
stxa %g5, [%g4]ASI_DMMU
set MMU_PAGESIZE, %g2
sethi %hi(FLUSH_ADDR), %g5
1:
stxa %g0, [%g1]ASI_DTLB_DEMAP
stxa %g0, [%g1]ASI_ITLB_DEMAP
flush %g5
deccc %g3
bnz,pt %icc,1b
add %g1, %g2, %g1
stxa %g6, [%g4]ASI_DMMU
membar #Sync
retry
SET_SIZE(vtag_flush_pgcnt_tl1)
! Not implemented on US1/US2
ENTRY_NP(vtag_flushall_tl1)
retry
SET_SIZE(vtag_flushall_tl1)
.seg ".data"
.align 8
.global dflush_type
dflush_type:
.word FLUSHPAGE_TYPE
.seg ".text"
ENTRY(vac_flushpage)
DCACHE_FLUSHPAGE(%o0, %o1, %o2, %o3, %o4)
retl
nop
SET_SIZE(vac_flushpage)
ENTRY_NP(vac_flushpage_tl1)
DCACHE_FLUSHPAGE(%g1, %g2, %g3, %g4, %g5)
retry
SET_SIZE(vac_flushpage_tl1)
ENTRY(vac_flushcolor)
DCACHE_FLUSHCOLOR(%o0, %o1, %o2)
retl
nop
SET_SIZE(vac_flushcolor)
ENTRY(vac_flushcolor_tl1)
DCACHE_FLUSHCOLOR(%g1, %g2, %g3)
retry
SET_SIZE(vac_flushcolor_tl1)
.global _dispatch_status_busy
_dispatch_status_busy:
.asciz "ASI_INTR_DISPATCH_STATUS error: busy"
.align 4
ENTRY(idsr_busy)
ldxa [%g0]ASI_INTR_DISPATCH_STATUS, %g1
clr %o0
btst IDSR_BUSY, %g1
bz,a,pt %xcc, 1f
mov 1, %o0
1:
retl
nop
SET_SIZE(idsr_busy)
.seg "text"
ENTRY(init_mondo)
#ifdef DEBUG
!
! IDSR should not be busy at the moment
!
ldxa [%g0]ASI_INTR_DISPATCH_STATUS, %g1
btst IDSR_BUSY, %g1
bz,pt %xcc, 1f
nop
sethi %hi(_dispatch_status_busy), %o0
call panic
or %o0, %lo(_dispatch_status_busy), %o0
#endif
ALTENTRY(init_mondo_nocheck)
!
! interrupt vector dispach data reg 0
!
1:
mov IDDR_0, %g1
mov IDDR_1, %g2
mov IDDR_2, %g3
stxa %o0, [%g1]ASI_INTR_DISPATCH
!
! interrupt vector dispach data reg 1
!
stxa %o1, [%g2]ASI_INTR_DISPATCH
!
! interrupt vector dispach data reg 2
!
stxa %o2, [%g3]ASI_INTR_DISPATCH
retl
membar #Sync ! allowed to be in the delay slot
SET_SIZE(init_mondo)
ENTRY_NP(shipit)
sll %o0, IDCR_PID_SHIFT, %g1 ! IDCR<18:14> = upa id
or %g1, IDCR_OFFSET, %g1 ! IDCR<13:0> = 0x70
stxa %g0, [%g1]ASI_INTR_DISPATCH ! interrupt vector dispatch
#if defined(SF_ERRATA_54)
membar #Sync ! store must occur before load
mov 0x20, %g3 ! UDBH Control Register Read
ldxa [%g3]ASI_SDB_INTR_R, %g0
#endif
retl
membar #Sync
SET_SIZE(shipit)
ENTRY(flush_instr_mem)
membar #StoreStore ! Ensure the stores
! are globally visible
1:
flush %o0
subcc %o1, ICACHE_FLUSHSZ, %o1 ! bytes = bytes-0x20
bgu,pt %ncc, 1b
add %o0, ICACHE_FLUSHSZ, %o0 ! vaddr = vaddr+0x20
retl
nop
SET_SIZE(flush_instr_mem)
ENTRY(flush_ecache)
#ifndef HUMMINGBIRD
b 2f
nop
1:
ldxa [%o0 + %o1]ASI_MEM, %g0 ! start reading from physaddr + size
2:
subcc %o1, %o2, %o1
bcc,a,pt %ncc, 1b
nop
#else
rdpr %pstate, %g4 ! current pstate (restored later)
andn %g4, PSTATE_IE, %g5
wrpr %g0, %g5, %pstate ! disable interrupts
! Place E$ in direct map mode for data access
or %g0, 1, %g5
sllx %g5, HB_UPA_DMAP_DATA_BIT, %g5
ldxa [%g0]ASI_UPA_CONFIG, %g1 ! current UPA config (restored later)
or %g1, %g5, %g5
membar #Sync
stxa %g5, [%g0]ASI_UPA_CONFIG ! enable direct map for data access
membar #Sync
! flush entire ecache HB_ECACHE_FLUSH_CNT times
mov HB_ECACHE_FLUSH_CNT-1, %g5
2:
sub %o1, %o2, %g3 ! start from last entry
1:
ldxa [%o0 + %g3]ASI_MEM, %g0 ! start reading from physaddr + size
subcc %g3, %o2, %g3
bgeu,a,pt %ncc, 1b
nop
brgz,a,pt %g5, 2b
dec %g5
membar #Sync
stxa %g1, [%g0]ASI_UPA_CONFIG ! restore UPA config reg
membar #Sync
wrpr %g0, %g4, %pstate ! restore earlier pstate
#endif
retl
nop
SET_SIZE(flush_ecache)
ENTRY(kdi_flush_idcache)
DCACHE_FLUSHALL(%o0, %o1, %g1)
ICACHE_FLUSHALL(%o2, %o3, %g1)
membar #Sync
retl
nop
SET_SIZE(kdi_flush_idcache)
ENTRY(get_ecache_dtag)
save %sp, -SA(MINFRAME), %sp
or %g0, 1, %l4
sllx %l4, 39, %l4 ! set bit 39 for e$ data access
or %i0, %l4, %g6 ! %g6 = e$ addr for data read
sllx %l4, 1, %l4 ! set bit 40 for e$ tag access
or %i0, %l4, %l4 ! %l4 = e$ addr for tag read
rdpr %pstate, %i5
andn %i5, PSTATE_IE | PSTATE_AM, %i0
wrpr %i0, %g0, %pstate ! clear IE, AM bits
ldxa [%g0]ASI_ESTATE_ERR, %g1
stxa %g0, [%g0]ASI_ESTATE_ERR ! disable errors
membar #Sync
ldxa [%g0]ASI_AFSR, %i0 ! grab the old-afsr before tag read
stx %i0, [%i3] ! write back the old-afsr
ldxa [%l4]ASI_EC_R, %g0 ! read tag into E$ tag reg
ldxa [%g0]ASI_EC_DIAG, %i0 ! read tag from E$ tag reg
stx %i0, [%i2] ! write back tag result
clr %i2 ! loop count
brz %i4, 1f ! acc_afsr == NULL?
ldxa [%g0]ASI_AFSR, %i0 ! grab the old-afsr before clearing
srlx %i0, P_AFSR_CP_SHIFT, %l0
btst 1, %l0
bz 1f
nop
ldx [%i4], %g4
or %g4, %i0, %g4 ! aggregate AFSR in cpu private
stx %g4, [%i4]
1:
stxa %i0, [%g0]ASI_AFSR ! clear AFSR
membar #Sync
ldxa [%g6]ASI_EC_R, %i0 ! read the 8byte E$data
stx %i0, [%i1] ! save the E$data
add %g6, 8, %g6
add %i1, 8, %i1
ldxa [%g0]ASI_AFSR, %i0 ! read AFSR for this 16byte read
srlx %i0, P_AFSR_CP_SHIFT, %l0
btst 1, %l0
bz 2f
stx %i0, [%i1] ! save the AFSR
brz %i4, 2f ! acc_afsr == NULL?
nop
ldx [%i4], %g4
or %g4, %i0, %g4 ! aggregate AFSR in cpu private
stx %g4, [%i4]
2:
add %i2, 8, %i2
cmp %i2, 64
bl,a 1b
add %i1, 8, %i1
stxa %i0, [%g0]ASI_AFSR ! clear AFSR
membar #Sync
stxa %g1, [%g0]ASI_ESTATE_ERR ! restore error enable
membar #Sync
wrpr %g0, %i5, %pstate
ret
restore
SET_SIZE(get_ecache_dtag)
ENTRY_NP(ce_err)
ldxa [%g0]ASI_AFSR, %g3 ! save afsr in g3
!
! Check for a UE... From Kevin.Normoyle:
! We try to switch to the trap for the UE, but since that's
! a hardware pipeline, we might get to the CE trap before we
! can switch. The UDB and AFSR registers will have both the
! UE and CE bits set but the UDB syndrome and the AFAR will be
! for the UE.
!
or %g0, 1, %g1 ! put 1 in g1
sllx %g1, 21, %g1 ! shift left to <21> afsr UE
andcc %g1, %g3, %g0 ! check for UE in afsr
bnz async_err ! handle the UE, not the CE
or %g0, 0x63, %g5 ! pass along the CE ttype
!
! Disable further CE traps to avoid recursion (stack overflow)
! and staying above XCALL_PIL for extended periods.
!
ldxa [%g0]ASI_ESTATE_ERR, %g2
andn %g2, 0x1, %g2 ! clear bit 0 - CEEN
stxa %g2, [%g0]ASI_ESTATE_ERR
membar #Sync ! required
!
! handle the CE
ldxa [%g0]ASI_AFAR, %g2 ! save afar in g2
set P_DER_H, %g4 ! put P_DER_H in g4
ldxa [%g4]ASI_SDB_INTR_R, %g5 ! read sdb upper half into g5
or %g0, 1, %g6 ! put 1 in g6
sllx %g6, 8, %g6 ! shift g6 to <8> sdb CE
andcc %g5, %g6, %g1 ! check for CE in upper half
sllx %g5, 33, %g5 ! shift upper bits to <42:33>
or %g3, %g5, %g3 ! or with afsr bits
bz,a 1f ! no error, goto 1f
nop
stxa %g1, [%g4]ASI_SDB_INTR_W ! clear sdb reg error bit
membar #Sync ! membar sync required
1:
set P_DER_L, %g4 ! put P_DER_L in g4
ldxa [%g4]ASI_SDB_INTR_R, %g5 ! read sdb lower half into g6
andcc %g5, %g6, %g1 ! check for CE in lower half
sllx %g5, 43, %g5 ! shift upper bits to <52:43>
or %g3, %g5, %g3 ! or with afsr bits
bz,a 2f ! no error, goto 2f
nop
stxa %g1, [%g4]ASI_SDB_INTR_W ! clear sdb reg error bit
membar #Sync ! membar sync required
2:
or %g0, 1, %g4 ! put 1 in g4
sllx %g4, 20, %g4 ! shift left to <20> afsr CE
stxa %g4, [%g0]ASI_AFSR ! use g4 to clear afsr CE error
membar #Sync ! membar sync required
set cpu_ce_error, %g1 ! put *cpu_ce_error() in g1
rdpr %pil, %g6 ! read pil into %g6
subcc %g6, PIL_15, %g0
movneg %icc, PIL_14, %g4 ! run at pil 14 unless already at 15
sethi %hi(sys_trap), %g5
jmp %g5 + %lo(sys_trap) ! goto sys_trap
movge %icc, PIL_15, %g4 ! already at pil 15
SET_SIZE(ce_err)
ENTRY_NP(ce_err_tl1)
#ifndef TRAPTRACE
ldxa [%g0]ASI_AFSR, %g7
stxa %g7, [%g0]ASI_AFSR
membar #Sync
retry
#else
set ce_trap_tl1, %g1
sethi %hi(dis_err_panic1), %g4
jmp %g4 + %lo(dis_err_panic1)
nop
#endif
SET_SIZE(ce_err_tl1)
#ifdef TRAPTRACE
.celevel1msg:
.asciz "Softerror with trap tracing at tl1: AFAR 0x%08x.%08x AFSR 0x%08x.%08x";
ENTRY_NP(ce_trap_tl1)
! upper 32 bits of AFSR already in o3
mov %o4, %o0 ! save AFAR upper 32 bits
mov %o2, %o4 ! lower 32 bits of AFSR
mov %o1, %o2 ! lower 32 bits of AFAR
mov %o0, %o1 ! upper 32 bits of AFAR
set .celevel1msg, %o0
call panic
nop
SET_SIZE(ce_trap_tl1)
#endif
ENTRY_NP(async_err)
stxa %g0, [%g0]ASI_ESTATE_ERR ! disable ecc and other cpu errors
membar #Sync ! membar sync required
ldxa [%g0]ASI_AFSR, %g3 ! save afsr in g3
ldxa [%g0]ASI_AFAR, %g2 ! save afar in g2
sllx %g5, 53, %g5 ! move ttype to <63:53>
or %g3, %g5, %g3 ! or to afsr in g3
or %g0, 1, %g1 ! put 1 in g1
sllx %g1, 21, %g1 ! shift left to <21> afsr UE
andcc %g1, %g3, %g0 ! check for UE in afsr
bz,a,pn %icc, 2f ! if !UE skip sdb read/clear
nop
set P_DER_H, %g4 ! put P_DER_H in g4
ldxa [%g4]ASI_SDB_INTR_R, %g5 ! read sdb upper half into 56
or %g0, 1, %g6 ! put 1 in g6
sllx %g6, 9, %g6 ! shift g6 to <9> sdb UE
andcc %g5, %g6, %g1 ! check for UE in upper half
sllx %g5, 33, %g5 ! shift upper bits to <42:33>
or %g3, %g5, %g3 ! or with afsr bits
bz,a 1f ! no error, goto 1f
nop
stxa %g1, [%g4]ASI_SDB_INTR_W ! clear sdb reg UE error bit
membar #Sync ! membar sync required
1:
set P_DER_L, %g4 ! put P_DER_L in g4
ldxa [%g4]ASI_SDB_INTR_R, %g5 ! read sdb lower half into g5
andcc %g5, %g6, %g1 ! check for UE in lower half
sllx %g5, 43, %g5 ! shift upper bits to <52:43>
or %g3, %g5, %g3 ! or with afsr bits
bz,a 2f ! no error, goto 2f
nop
stxa %g1, [%g4]ASI_SDB_INTR_W ! clear sdb reg UE error bit
membar #Sync ! membar sync required
2:
stxa %g3, [%g0]ASI_AFSR ! clear all the sticky bits
membar #Sync ! membar sync required
RESET_USER_RTT_REGS(%g4, %g5, async_err_resetskip)
async_err_resetskip:
set cpu_async_error, %g1 ! put cpu_async_error in g1
sethi %hi(sys_trap), %g5
jmp %g5 + %lo(sys_trap) ! goto sys_trap
or %g0, PIL_15, %g4 ! run at pil 15
SET_SIZE(async_err)
ENTRY_NP(dis_err_panic1)
stxa %g0, [%g0]ASI_ESTATE_ERR ! disable all error traps
membar #Sync
! save destination routine is in g1
ldxa [%g0]ASI_AFAR, %g2 ! read afar
ldxa [%g0]ASI_AFSR, %g3 ! read afsr
set P_DER_H, %g4 ! put P_DER_H in g4
ldxa [%g4]ASI_SDB_INTR_R, %g5 ! read sdb upper half into g5
sllx %g5, 33, %g5 ! shift upper bits to <42:33>
or %g3, %g5, %g3 ! or with afsr bits
set P_DER_L, %g4 ! put P_DER_L in g4
ldxa [%g4]ASI_SDB_INTR_R, %g5 ! read sdb lower half into g5
sllx %g5, 43, %g5 ! shift upper bits to <52:43>
or %g3, %g5, %g3 ! or with afsr bits
RESET_USER_RTT_REGS(%g4, %g5, dis_err_panic1_resetskip)
dis_err_panic1_resetskip:
sethi %hi(sys_trap), %g5
jmp %g5 + %lo(sys_trap) ! goto sys_trap
sub %g0, 1, %g4
SET_SIZE(dis_err_panic1)
ENTRY(clr_datapath)
set P_DER_H, %o4 ! put P_DER_H in o4
ldxa [%o4]ASI_SDB_INTR_R, %o5 ! read sdb upper half into o3
or %g0, 0x3, %o2 ! put 0x3 in o2
sllx %o2, 8, %o2 ! shift o2 to <9:8> sdb
andcc %o5, %o2, %o1 ! check for UE,CE in upper half
bz,a 1f ! no error, goto 1f
nop
stxa %o1, [%o4]ASI_SDB_INTR_W ! clear sdb reg UE,CE error bits
membar #Sync ! membar sync required
1:
set P_DER_L, %o4 ! put P_DER_L in o4
ldxa [%o4]ASI_SDB_INTR_R, %o5 ! read sdb lower half into o5
andcc %o5, %o2, %o1 ! check for UE,CE in lower half
bz,a 2f ! no error, goto 2f
nop
stxa %o1, [%o4]ASI_SDB_INTR_W ! clear sdb reg UE,CE error bits
membar #Sync
2:
retl
nop
SET_SIZE(clr_datapath)
ENTRY(get_udb_errors)
set P_DER_H, %o3
ldxa [%o3]ASI_SDB_INTR_R, %o2
stx %o2, [%o0]
set P_DER_L, %o3
ldxa [%o3]ASI_SDB_INTR_R, %o2
retl
stx %o2, [%o1]
SET_SIZE(get_udb_errors)
ENTRY_NP(itlb_rd_entry)
sllx %o0, 3, %o0
#if defined(SF_ERRATA_32)
sethi %hi(FLUSH_ADDR), %g2
set MMU_PCONTEXT, %g1
stxa %g0, [%g1]ASI_DMMU ! KCONTEXT
flush %g2
#endif
ldxa [%o0]ASI_ITLB_ACCESS, %g1
set TTE_SPITFIRE_PFNHI_CLEAR, %g2 ! spitfire only
sllx %g2, TTE_SPITFIRE_PFNHI_SHIFT, %g2 ! see comment above
andn %g1, %g2, %g1 ! for details
stx %g1, [%o1]
ldxa [%o0]ASI_ITLB_TAGREAD, %g2
set TAGREAD_CTX_MASK, %o4
andn %g2, %o4, %o5
retl
stx %o5, [%o2]
SET_SIZE(itlb_rd_entry)
ENTRY_NP(dtlb_rd_entry)
sllx %o0, 3, %o0
#if defined(SF_ERRATA_32)
sethi %hi(FLUSH_ADDR), %g2
set MMU_PCONTEXT, %g1
stxa %g0, [%g1]ASI_DMMU ! KCONTEXT
flush %g2
#endif
ldxa [%o0]ASI_DTLB_ACCESS, %g1
set TTE_SPITFIRE_PFNHI_CLEAR, %g2 ! spitfire only
sllx %g2, TTE_SPITFIRE_PFNHI_SHIFT, %g2 ! see comment above
andn %g1, %g2, %g1 ! itlb_rd_entry
stx %g1, [%o1]
ldxa [%o0]ASI_DTLB_TAGREAD, %g2
set TAGREAD_CTX_MASK, %o4
andn %g2, %o4, %o5
retl
stx %o5, [%o2]
SET_SIZE(dtlb_rd_entry)
ENTRY(set_lsu)
stxa %o0, [%g0]ASI_LSU ! store to LSU
retl
membar #Sync
SET_SIZE(set_lsu)
ENTRY(get_lsu)
retl
ldxa [%g0]ASI_LSU, %o0 ! load LSU
SET_SIZE(get_lsu)
ENTRY_NP(cpu_clearticknpt)
rdpr %pstate, %g1
andn %g1, PSTATE_IE, %g3
wrpr %g0, %g3, %pstate
rdpr %tick, %g2
brgez,pn %g2, 1f
mov 1, %g3
sllx %g3, 63, %g3
ba,a,pt %xcc, 2f
.align 64
2:
rdpr %tick, %g2
wrpr %g3, %g2, %tick
#if defined(BB_ERRATA_1)
rdpr %tick, %g0
#endif
1:
jmp %g4 + 4
wrpr %g0, %g1, %pstate
SET_SIZE(cpu_clearticknpt)
ENTRY(get_ecache_tag)
or %g0, 1, %o4
sllx %o4, 40, %o4 ! set bit 40 for e$ tag access
or %o0, %o4, %o4 ! %o4 = e$ addr for tag read
rdpr %pstate, %o5
andn %o5, PSTATE_IE | PSTATE_AM, %o0
wrpr %o0, %g0, %pstate ! clear IE, AM bits
ldxa [%g0]ASI_ESTATE_ERR, %g1
stxa %g0, [%g0]ASI_ESTATE_ERR ! Turn off Error enable
membar #Sync
ldxa [%g0]ASI_AFSR, %o0
srlx %o0, P_AFSR_CP_SHIFT, %o3
btst 1, %o3
bz 1f
nop
ldx [%o2], %g4
or %g4, %o0, %g4 ! aggregate AFSR in cpu private
stx %g4, [%o2]
1:
stxa %o0, [%g0]ASI_AFSR ! clear AFSR
membar #Sync
ldxa [%o4]ASI_EC_R, %g0
ldxa [%g0]ASI_EC_DIAG, %o0 ! read tag from e$ tag reg
ldxa [%g0]ASI_AFSR, %o3
srlx %o3, P_AFSR_CP_SHIFT, %o4
btst 1, %o4
bz 2f
stx %o3, [%o1] ! AFSR after sticky clear
ldx [%o2], %g4
or %g4, %o3, %g4 ! aggregate AFSR in cpu private
stx %g4, [%o2]
2:
membar #Sync
stxa %g1, [%g0]ASI_ESTATE_ERR ! Turn error enable back on
membar #Sync
retl
wrpr %g0, %o5, %pstate
SET_SIZE(get_ecache_tag)
ENTRY(check_ecache_line)
or %g0, 1, %o4
sllx %o4, 39, %o4 ! set bit 39 for e$ data access
or %o0, %o4, %o4 ! %o4 = e$ addr for data read
rdpr %pstate, %o5
andn %o5, PSTATE_IE | PSTATE_AM, %o0
wrpr %o0, %g0, %pstate ! clear IE, AM bits
ldxa [%g0]ASI_ESTATE_ERR, %g1
stxa %g0, [%g0]ASI_ESTATE_ERR ! Turn off Error enable
membar #Sync
ldxa [%g0]ASI_AFSR, %o0
srlx %o0, P_AFSR_CP_SHIFT, %o2
btst 1, %o2
bz 1f
clr %o2 ! loop count
ldx [%o1], %o3
or %o3, %o0, %o3 ! aggregate AFSR in cpu private
stx %o3, [%o1]
1:
stxa %o0, [%g0]ASI_AFSR ! clear AFSR
membar #Sync
2:
ldxa [%o4]ASI_EC_R, %g0 ! Read the E$ data 8bytes each
add %o2, 1, %o2
cmp %o2, 8
bl,a 2b
add %o4, 8, %o4
membar #Sync
ldxa [%g0]ASI_AFSR, %o0 ! read accumulated AFSR
srlx %o0, P_AFSR_CP_SHIFT, %o2
btst 1, %o2
bz 3f
nop
ldx [%o1], %o3
or %o3, %o0, %o3 ! aggregate AFSR in cpu private
stx %o3, [%o1]
3:
stxa %o0, [%g0]ASI_AFSR ! clear AFSR
membar #Sync
stxa %g1, [%g0]ASI_ESTATE_ERR ! Turn error enable back on
membar #Sync
retl
wrpr %g0, %o5, %pstate
SET_SIZE(check_ecache_line)
ENTRY(read_and_clear_afsr)
ldxa [%g0]ASI_AFSR, %o0
retl
stxa %o0, [%g0]ASI_AFSR ! clear AFSR
SET_SIZE(read_and_clear_afsr)
ENTRY(scrubphys)
or %o1, %g0, %o2 ! put ecache size in %o2
#ifndef HUMMINGBIRD
xor %o0, %o2, %o1 ! calculate alias address
add %o2, %o2, %o3 ! 2 * ecachesize in case
! addr == ecache_flushaddr
sub %o3, 1, %o3 ! -1 == mask
and %o1, %o3, %o1 ! and with xor'd address
set ecache_flushaddr, %o3
ldx [%o3], %o3
rdpr %pstate, %o4
andn %o4, PSTATE_IE | PSTATE_AM, %o5
wrpr %o5, %g0, %pstate ! clear IE, AM bits
ldxa [%o1 + %o3]ASI_MEM, %g0 ! load ecache_flushaddr + alias
casxa [%o0]ASI_MEM, %g0, %g0
ldxa [%o1 + %o3]ASI_MEM, %g0 ! load ecache_flushaddr + alias
#else
sethi %hi(ecache_associativity), %g5
ld [%g5 + %lo(ecache_associativity)], %g5
udivx %o2, %g5, %g2 ! set size (i.e. ecache_size/#sets)
xor %o0, %o2, %o1 ! calculate alias address
add %o2, %o2, %g3 ! 2 * ecachesize in case
! addr == ecache_flushaddr
sub %g3, 1, %g3 ! 2 * ecachesize -1 == mask
and %o1, %g3, %o1 ! and with xor'd address
sethi %hi(ecache_flushaddr), %o3
ldx [%o3 + %lo(ecache_flushaddr)], %o3
rdpr %pstate, %o4
andn %o4, PSTATE_IE | PSTATE_AM, %o5
wrpr %o5, %g0, %pstate ! clear IE, AM bits
! Place E$ in direct map mode for data access
or %g0, 1, %g5
sllx %g5, HB_UPA_DMAP_DATA_BIT, %g5
ldxa [%g0]ASI_UPA_CONFIG, %g1 ! current UPA config (restored later)
or %g1, %g5, %g5
membar #Sync
stxa %g5, [%g0]ASI_UPA_CONFIG ! enable direct map for data access
membar #Sync
! Displace cache line from each set of E$ starting at the
! aliased address. at set-size stride, wrapping at 2*ecache_size
! and skipping load from physaddr. We need 10 loads to flush the
! physaddr from E$.
mov HB_PHYS_FLUSH_CNT-1, %g4 ! #loads to flush phys addr
sub %o0, %o3, %o5 ! physaddr - ecache_flushaddr
or %o1, %g0, %g5 ! starting aliased offset
2:
ldxa [%g5 + %o3]ASI_MEM, %g0 ! load ecache_flushaddr + alias
1:
add %g5, %g2, %g5 ! calculate offset in next set
and %g5, %g3, %g5 ! force offset within aliased range
cmp %g5, %o5 ! skip loads from physaddr
be,pn %ncc, 1b
nop
brgz,pt %g4, 2b
dec %g4
casxa [%o0]ASI_MEM, %g0, %g0
! Flush %o0 from ecahe again.
! Need single displacement flush at offset %o1 this time as
! the E$ is already in direct map mode.
ldxa [%o1 + %o3]ASI_MEM, %g0 ! load ecache_flushaddr + alias
membar #Sync
stxa %g1, [%g0]ASI_UPA_CONFIG ! restore UPA config (DM bits)
membar #Sync
#endif
wrpr %g0, %o4, %pstate ! restore earlier pstate register value
retl
membar #Sync ! move the data out of the load buffer
SET_SIZE(scrubphys)
ENTRY(clearphys)
or %o2, %g0, %o3 ! ecache linesize
or %o1, %g0, %o2 ! ecache size
#ifndef HUMMINGBIRD
or %o3, %g0, %o4 ! save ecache linesize
xor %o0, %o2, %o1 ! calculate alias address
add %o2, %o2, %o3 ! 2 * ecachesize
sub %o3, 1, %o3 ! -1 == mask
and %o1, %o3, %o1 ! and with xor'd address
set ecache_flushaddr, %o3
ldx [%o3], %o3
or %o4, %g0, %o2 ! saved ecache linesize
rdpr %pstate, %o4
andn %o4, PSTATE_IE | PSTATE_AM, %o5
wrpr %o5, %g0, %pstate ! clear IE, AM bits
ldxa [%g0]ASI_ESTATE_ERR, %g1
stxa %g0, [%g0]ASI_ESTATE_ERR ! disable errors
membar #Sync
! need to put zeros in the cache line before displacing it
sub %o2, 8, %o2 ! get offset of last double word in ecache line
1:
stxa %g0, [%o0 + %o2]ASI_MEM ! put zeros in the ecache line
sub %o2, 8, %o2
brgez,a,pt %o2, 1b
nop
ldxa [%o1 + %o3]ASI_MEM, %g0 ! load ecache_flushaddr + alias
casxa [%o0]ASI_MEM, %g0, %g0
ldxa [%o1 + %o3]ASI_MEM, %g0 ! load ecache_flushaddr + alias
stxa %g1, [%g0]ASI_ESTATE_ERR ! restore error enable
membar #Sync
#else
or %o3, %g0, %o4 ! save ecache linesize
sethi %hi(ecache_associativity), %g5
ld [%g5 + %lo(ecache_associativity)], %g5
udivx %o2, %g5, %g2 ! set size (i.e. ecache_size/#sets)
xor %o0, %o2, %o1 ! calculate alias address
add %o2, %o2, %g3 ! 2 * ecachesize
sub %g3, 1, %g3 ! 2 * ecachesize -1 == mask
and %o1, %g3, %o1 ! and with xor'd address
sethi %hi(ecache_flushaddr), %o3
ldx [%o3 +%lo(ecache_flushaddr)], %o3
or %o4, %g0, %o2 ! saved ecache linesize
rdpr %pstate, %o4
andn %o4, PSTATE_IE | PSTATE_AM, %o5
wrpr %o5, %g0, %pstate ! clear IE, AM bits
! Place E$ in direct map mode for data access
or %g0, 1, %g5
sllx %g5, HB_UPA_DMAP_DATA_BIT, %g5
ldxa [%g0]ASI_UPA_CONFIG, %g1 ! current UPA config (restored later)
or %g1, %g5, %g5
membar #Sync
stxa %g5, [%g0]ASI_UPA_CONFIG ! enable direct map for data access
membar #Sync
! need to put zeros in the cache line before displacing it
sub %o2, 8, %o2 ! get offset of last double word in ecache line
1:
stxa %g0, [%o0 + %o2]ASI_MEM ! put zeros in the ecache line
sub %o2, 8, %o2
brgez,a,pt %o2, 1b
nop
! Displace cache line from each set of E$ starting at the
! aliased address. at set-size stride, wrapping at 2*ecache_size
! and skipping load from physaddr. We need 10 loads to flush the
! physaddr from E$.
mov HB_PHYS_FLUSH_CNT-1, %g4 ! #loads to flush phys addr
sub %o0, %o3, %o5 ! physaddr - ecache_flushaddr
or %o1, %g0, %g5 ! starting offset
2:
ldxa [%g5 + %o3]ASI_MEM, %g0 ! load ecache_flushaddr + alias
3:
add %g5, %g2, %g5 ! calculate offset in next set
and %g5, %g3, %g5 ! force offset within aliased range
cmp %g5, %o5 ! skip loads from physaddr
be,pn %ncc, 3b
nop
brgz,pt %g4, 2b
dec %g4
casxa [%o0]ASI_MEM, %g0, %g0
! Flush %o0 from ecahe again.
! Need single displacement flush at offset %o1 this time as
! the E$ is already in direct map mode.
ldxa [%o1 + %o3]ASI_MEM, %g0 ! load ecache_flushaddr + alias
membar #Sync
stxa %g1, [%g0]ASI_UPA_CONFIG ! restore UPA config (DM bits)
membar #Sync
#endif
retl
wrpr %g0, %o4, %pstate ! restore earlier pstate register value
SET_SIZE(clearphys)
ENTRY(flushecacheline)
or %o1, %g0, %o2 ! put ecache size in %o2
#ifndef HUMMINGBIRD
xor %o0, %o2, %o1 ! calculate alias address
add %o2, %o2, %o3 ! 2 * ecachesize in case
! addr == ecache_flushaddr
sub %o3, 1, %o3 ! -1 == mask
and %o1, %o3, %o1 ! and with xor'd address
set ecache_flushaddr, %o3
ldx [%o3], %o3
rdpr %pstate, %o4
andn %o4, PSTATE_IE | PSTATE_AM, %o5
wrpr %o5, %g0, %pstate ! clear IE, AM bits
ldxa [%g0]ASI_ESTATE_ERR, %g1
stxa %g0, [%g0]ASI_ESTATE_ERR ! disable errors
membar #Sync
ldxa [%o1 + %o3]ASI_MEM, %g0 ! load ecache_flushaddr + alias
membar #Sync
stxa %g1, [%g0]ASI_ESTATE_ERR ! restore error enable
membar #Sync
#else
sethi %hi(ecache_associativity), %g5
ld [%g5 + %lo(ecache_associativity)], %g5
udivx %o2, %g5, %g2 ! set size (i.e. ecache_size/#sets)
xor %o0, %o2, %o1 ! calculate alias address
add %o2, %o2, %g3 ! 2 * ecachesize in case
! addr == ecache_flushaddr
sub %g3, 1, %g3 ! 2 * ecachesize -1 == mask
and %o1, %g3, %o1 ! and with xor'd address
sethi %hi(ecache_flushaddr), %o3
ldx [%o3 + %lo(ecache_flushaddr)], %o3
rdpr %pstate, %o4
andn %o4, PSTATE_IE | PSTATE_AM, %o5
wrpr %o5, %g0, %pstate ! clear IE, AM bits
! Place E$ in direct map mode for data access
or %g0, 1, %g5
sllx %g5, HB_UPA_DMAP_DATA_BIT, %g5
ldxa [%g0]ASI_UPA_CONFIG, %g4 ! current UPA config (restored later)
or %g4, %g5, %g5
membar #Sync
stxa %g5, [%g0]ASI_UPA_CONFIG ! enable direct map for data access
membar #Sync
ldxa [%g0]ASI_ESTATE_ERR, %g1
stxa %g0, [%g0]ASI_ESTATE_ERR ! disable errors
membar #Sync
! Displace cache line from each set of E$ starting at the
! aliased address. at set-size stride, wrapping at 2*ecache_size
! and skipping load from physaddr. We need 10 loads to flush the
! physaddr from E$.
mov HB_PHYS_FLUSH_CNT-1, %g5 ! #loads to flush physaddr
sub %o0, %o3, %o5 ! physaddr - ecache_flushaddr
2:
ldxa [%o1 + %o3]ASI_MEM, %g0 ! load ecache_flushaddr + alias
3:
add %o1, %g2, %o1 ! calculate offset in next set
and %o1, %g3, %o1 ! force offset within aliased range
cmp %o1, %o5 ! skip loads from physaddr
be,pn %ncc, 3b
nop
brgz,pt %g5, 2b
dec %g5
membar #Sync
stxa %g1, [%g0]ASI_ESTATE_ERR ! restore error enable
membar #Sync
stxa %g4, [%g0]ASI_UPA_CONFIG ! restore UPA config (DM bits)
membar #Sync
#endif
retl
wrpr %g0, %o4, %pstate
SET_SIZE(flushecacheline)
! Register usage:
!
! Arguments:
! %g1 - inum
!
! Internal:
! %g2, %g3, %g5 - scratch
! %g4 - ptr. to spitfire_scrub_misc ec_scrub_outstanding.
! %g6 - setsoftint_tl1 address
ENTRY_NP(ecache_scrubreq_tl1)
set SFPR_SCRUB_MISC + EC_SCRUB_OUTSTANDING, %g2
GET_CPU_PRIVATE_PTR(%g2, %g4, %g5, 1f);
ld [%g4], %g2 ! cpu's ec_scrub_outstanding.
set setsoftint_tl1, %g6
!
! no need to use atomic instructions for the following
! increment - we're at tl1
!
add %g2, 0x1, %g3
brnz,pn %g2, 1f ! no need to enqueue more intr_vec
st %g3, [%g4] ! delay - store incremented counter
jmp %g6 ! setsoftint_tl1(%g1) - queue intr_vec
nop
! not reached
1:
retry
SET_SIZE(ecache_scrubreq_tl1)
ENTRY(write_ec_tag_parity)
or %g0, 1, %o4
sllx %o4, 39, %o4 ! set bit 40 for e$ tag access
or %o0, %o4, %o4 ! %o4 = ecache addr for tag write
rdpr %pstate, %o5
andn %o5, PSTATE_IE | PSTATE_AM, %o1
wrpr %o1, %g0, %pstate ! clear IE, AM bits
ldxa [%g0]ASI_ESTATE_ERR, %g1
stxa %g0, [%g0]ASI_ESTATE_ERR ! Turn off Error enable
membar #Sync
ba 1f
nop
.align 64
1:
set S_EC_PARITY, %o3 ! clear tag, state invalid
sllx %o3, S_ECPAR_SHIFT, %o3 ! and with good tag parity
stxa %o3, [%g0]ASI_EC_DIAG ! update with the above info
stxa %g0, [%o4]ASI_EC_W
membar #Sync
stxa %g1, [%g0]ASI_ESTATE_ERR ! Turn error enable back on
membar #Sync
retl
wrpr %g0, %o5, %pstate
SET_SIZE(write_ec_tag_parity)
ENTRY(write_hb_ec_tag_parity)
or %g0, 1, %o4
sllx %o4, 39, %o4 ! set bit 40 for e$ tag access
or %o0, %o4, %o4 ! %o4 = ecache addr for tag write
rdpr %pstate, %o5
andn %o5, PSTATE_IE | PSTATE_AM, %o1
wrpr %o1, %g0, %pstate ! clear IE, AM bits
ldxa [%g0]ASI_ESTATE_ERR, %g1
stxa %g0, [%g0]ASI_ESTATE_ERR ! Turn off Error enable
membar #Sync
ba 1f
nop
.align 64
1:
#ifdef HUMMINGBIRD
set HB_EC_PARITY, %o3 ! clear tag, state invalid
sllx %o3, HB_ECPAR_SHIFT, %o3 ! and with good tag parity
#else
set SB_EC_PARITY, %o3 ! clear tag, state invalid
sllx %o3, SB_ECPAR_SHIFT, %o3 ! and with good tag parity
#endif
stxa %o3, [%g0]ASI_EC_DIAG ! update with the above info
stxa %g0, [%o4]ASI_EC_W
membar #Sync
stxa %g1, [%g0]ASI_ESTATE_ERR ! Turn error enable back on
membar #Sync
retl
wrpr %g0, %o5, %pstate
SET_SIZE(write_hb_ec_tag_parity)
#define VIS_BLOCKSIZE 64
ENTRY(dtrace_blksuword32)
save %sp, -SA(MINFRAME + 4), %sp
rdpr %pstate, %l1
andn %l1, PSTATE_IE, %l2 ! disable interrupts to
wrpr %g0, %l2, %pstate ! protect our FPU diddling
rd %fprs, %l0
andcc %l0, FPRS_FEF, %g0
bz,a,pt %xcc, 1f ! if the fpu is disabled
wr %g0, FPRS_FEF, %fprs ! ... enable the fpu
st %f0, [%fp + STACK_BIAS - 4] ! save %f0 to the stack
1:
set 0f, %l5
ld [%i1], %f0 ! modify the block
membar #Sync
stn %l5, [THREAD_REG + T_LOFAULT] ! set up the lofault handler
stda %d0, [%i0]ASI_BLK_COMMIT_S ! store the modified block
membar #Sync
stn %g0, [THREAD_REG + T_LOFAULT] ! remove the lofault handler
bz,a,pt %xcc, 1f
wr %g0, %l0, %fprs ! restore %fprs
ld [%fp + STACK_BIAS - 4], %f0 ! restore %f0
1:
wrpr %g0, %l1, %pstate ! restore interrupts
ret
restore %g0, %g0, %o0
0:
membar #Sync
stn %g0, [THREAD_REG + T_LOFAULT] ! remove the lofault handler
bz,a,pt %xcc, 1f
wr %g0, %l0, %fprs ! restore %fprs
ld [%fp + STACK_BIAS - 4], %f0 ! restore %f0
1:
wrpr %g0, %l1, %pstate ! restore interrupts
brnz,pt %i2, 1f
nop
ret
restore %g0, -1, %o0
1:
call dtrace_blksuword32_err
restore
SET_SIZE(dtrace_blksuword32)