[SPARC64]: Revamp Spitfire error trap handling.

Current uncorrectable error handling was poor enough
that the processor could just loop taking the same
trap over and over again.  Fix things up so that we
at least get a log message and perhaps even some register
state.

In the process, much consolidation became possible,
particularly with the correctable error handler.

Prefix assembler and C function names with "spitfire"
to indicate that these are for Ultra-I/II/IIi/IIe only.

More work is needed to make these routines robust and
featureful to the level of the Ultra-III error handlers.

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2005-08-29 12:45:11 -07:00
parent bde4e4ee9f
commit 6c52a96e6c
6 changed files with 453 additions and 280 deletions

View file

@ -21,6 +21,7 @@
#include <asm/visasm.h> #include <asm/visasm.h>
#include <asm/estate.h> #include <asm/estate.h>
#include <asm/auxio.h> #include <asm/auxio.h>
#include <asm/sfafsr.h>
#define curptr g6 #define curptr g6
@ -690,88 +691,102 @@ netbsd_syscall:
retl retl
nop nop
.globl __do_data_access_exception /* We need to carefully read the error status, ACK
.globl __do_data_access_exception_tl1 * the errors, prevent recursive traps, and pass the
__do_data_access_exception_tl1: * information on to C code for logging.
rdpr %pstate, %g4 *
wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate * We pass the AFAR in as-is, and we encode the status
mov TLB_SFSR, %g3 * information as described in asm-sparc64/sfafsr.h
mov DMMU_SFAR, %g5 */
ldxa [%g3] ASI_DMMU, %g4 ! Get SFSR .globl __spitfire_access_error
ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR __spitfire_access_error:
stxa %g0, [%g3] ASI_DMMU ! Clear SFSR.FaultValid bit /* Disable ESTATE error reporting so that we do not
* take recursive traps and RED state the processor.
*/
stxa %g0, [%g0] ASI_ESTATE_ERROR_EN
membar #Sync membar #Sync
mov UDBE_UE, %g1
ldxa [%g0] ASI_AFSR, %g4 ! Get AFSR
/* __spitfire_cee_trap branches here with AFSR in %g4 and
* UDBE_CE in %g1. It only clears ESTATE_ERR_CE in the
* ESTATE Error Enable register.
*/
__spitfire_cee_trap_continue:
ldxa [%g0] ASI_AFAR, %g5 ! Get AFAR
rdpr %tt, %g3 rdpr %tt, %g3
cmp %g3, 0x80 ! first win spill/fill trap and %g3, 0x1ff, %g3 ! Paranoia
blu,pn %xcc, 1f sllx %g3, SFSTAT_TRAP_TYPE_SHIFT, %g3
cmp %g3, 0xff ! last win spill/fill trap or %g4, %g3, %g4
bgu,pn %xcc, 1f rdpr %tl, %g3
cmp %g3, 1
mov 1, %g3
bleu %xcc, 1f
sllx %g3, SFSTAT_TL_GT_ONE_SHIFT, %g3
or %g4, %g3, %g4
/* Read in the UDB error register state, clearing the
* sticky error bits as-needed. We only clear them if
* the UE bit is set. Likewise, __spitfire_cee_trap
* below will only do so if the CE bit is set.
*
* NOTE: UltraSparc-I/II have high and low UDB error
* registers, corresponding to the two UDB units
* present on those chips. UltraSparc-IIi only
* has a single UDB, called "SDB" in the manual.
* For IIi the upper UDB register always reads
* as zero so for our purposes things will just
* work with the checks below.
*/
1: ldxa [%g0] ASI_UDBH_ERROR_R, %g3
and %g3, 0x3ff, %g7 ! Paranoia
sllx %g7, SFSTAT_UDBH_SHIFT, %g7
or %g4, %g7, %g4
andcc %g3, %g1, %g3 ! UDBE_UE or UDBE_CE
be,pn %xcc, 1f
nop nop
ba,pt %xcc, winfix_dax stxa %g3, [%g0] ASI_UDB_ERROR_W
rdpr %tpc, %g3 membar #Sync
1: sethi %hi(109f), %g7
1: mov 0x18, %g3
ldxa [%g3] ASI_UDBL_ERROR_R, %g3
and %g3, 0x3ff, %g7 ! Paranoia
sllx %g7, SFSTAT_UDBL_SHIFT, %g7
or %g4, %g7, %g4
andcc %g3, %g1, %g3 ! UDBE_UE or UDBE_CE
be,pn %xcc, 1f
nop
mov 0x18, %g7
stxa %g3, [%g7] ASI_UDB_ERROR_W
membar #Sync
1: /* Ok, now that we've latched the error state,
* clear the sticky bits in the AFSR.
*/
stxa %g4, [%g0] ASI_AFSR
membar #Sync
rdpr %tl, %g2
cmp %g2, 1
rdpr %pil, %g2
bleu,pt %xcc, 1f
wrpr %g0, 15, %pil
ba,pt %xcc, etraptl1 ba,pt %xcc, etraptl1
109: or %g7, %lo(109b), %g7 rd %pc, %g7
mov %l4, %o1
mov %l5, %o2
call data_access_exception_tl1
add %sp, PTREGS_OFF, %o0
ba,pt %xcc, rtrap
clr %l6
__do_data_access_exception: ba,pt %xcc, 2f
rdpr %pstate, %g4 nop
wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
mov TLB_SFSR, %g3
mov DMMU_SFAR, %g5
ldxa [%g3] ASI_DMMU, %g4 ! Get SFSR
ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR
stxa %g0, [%g3] ASI_DMMU ! Clear SFSR.FaultValid bit
membar #Sync
sethi %hi(109f), %g7
ba,pt %xcc, etrap
109: or %g7, %lo(109b), %g7
mov %l4, %o1
mov %l5, %o2
call data_access_exception
add %sp, PTREGS_OFF, %o0
ba,pt %xcc, rtrap
clr %l6
.globl __do_instruction_access_exception 1: ba,pt %xcc, etrap_irq
.globl __do_instruction_access_exception_tl1 rd %pc, %g7
__do_instruction_access_exception_tl1:
rdpr %pstate, %g4
wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
mov TLB_SFSR, %g3
ldxa [%g3] ASI_IMMU, %g4 ! Get SFSR
rdpr %tpc, %g5 ! IMMU has no SFAR, use TPC
stxa %g0, [%g3] ASI_IMMU ! Clear FaultValid bit
membar #Sync
sethi %hi(109f), %g7
ba,pt %xcc, etraptl1
109: or %g7, %lo(109b), %g7
mov %l4, %o1
mov %l5, %o2
call instruction_access_exception_tl1
add %sp, PTREGS_OFF, %o0
ba,pt %xcc, rtrap
clr %l6
__do_instruction_access_exception: 2: mov %l4, %o1
rdpr %pstate, %g4
wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
mov TLB_SFSR, %g3
ldxa [%g3] ASI_IMMU, %g4 ! Get SFSR
rdpr %tpc, %g5 ! IMMU has no SFAR, use TPC
stxa %g0, [%g3] ASI_IMMU ! Clear FaultValid bit
membar #Sync
sethi %hi(109f), %g7
ba,pt %xcc, etrap
109: or %g7, %lo(109b), %g7
mov %l4, %o1
mov %l5, %o2 mov %l5, %o2
call instruction_access_exception call spitfire_access_error
add %sp, PTREGS_OFF, %o0 add %sp, PTREGS_OFF, %o0
ba,pt %xcc, rtrap ba,pt %xcc, rtrap
clr %l6 clr %l6
@ -793,79 +808,124 @@ __do_instruction_access_exception:
* as it is the only situation where we can safely record * as it is the only situation where we can safely record
* and log. For trap level >1 we just clear the CE bit * and log. For trap level >1 we just clear the CE bit
* in the AFSR and return. * in the AFSR and return.
*/
/* Our trap handling infrastructure allows us to preserve
* two 64-bit values during etrap for arguments to
* subsequent C code. Therefore we encode the information
* as follows:
* *
* value 1) Full 64-bits of AFAR * This is just like __spiftire_access_error above, but it
* value 2) Low 33-bits of AFSR, then bits 33-->42 * specifically handles correctable errors. If an
* are UDBL error status and bits 43-->52 * uncorrectable error is indicated in the AFSR we
* are UDBH error status * will branch directly above to __spitfire_access_error
* to handle it instead. Uncorrectable therefore takes
* priority over correctable, and the error logging
* C code will notice this case by inspecting the
* trap type.
*/ */
.align 64 .globl __spitfire_cee_trap
.globl cee_trap __spitfire_cee_trap:
cee_trap: ldxa [%g0] ASI_AFSR, %g4 ! Get AFSR
ldxa [%g0] ASI_AFSR, %g1 ! Read AFSR mov 1, %g3
ldxa [%g0] ASI_AFAR, %g2 ! Read AFAR sllx %g3, SFAFSR_UE_SHIFT, %g3
sllx %g1, 31, %g1 ! Clear reserved bits andcc %g4, %g3, %g0 ! Check for UE
srlx %g1, 31, %g1 ! in AFSR bne,pn %xcc, __spitfire_access_error
nop
/* NOTE: UltraSparc-I/II have high and low UDB error /* Ok, in this case we only have a correctable error.
* registers, corresponding to the two UDB units * Indicate we only wish to capture that state in register
* present on those chips. UltraSparc-IIi only * %g1, and we only disable CE error reporting unlike UE
* has a single UDB, called "SDB" in the manual. * handling which disables all errors.
* For IIi the upper UDB register always reads
* as zero so for our purposes things will just
* work with the checks below.
*/ */
ldxa [%g0] ASI_UDBL_ERROR_R, %g3 ! Read UDB-Low error status ldxa [%g0] ASI_ESTATE_ERROR_EN, %g3
andcc %g3, (1 << 8), %g4 ! Check CE bit andn %g3, ESTATE_ERR_CE, %g3
sllx %g3, (64 - 10), %g3 ! Clear reserved bits stxa %g3, [%g0] ASI_ESTATE_ERROR_EN
srlx %g3, (64 - 10), %g3 ! in UDB-Low error status membar #Sync
sllx %g3, (33 + 0), %g3 ! Shift up to encoding area /* Preserve AFSR in %g4, indicate UDB state to capture in %g1 */
or %g1, %g3, %g1 ! Or it in ba,pt %xcc, __spitfire_cee_trap_continue
be,pn %xcc, 1f ! Branch if CE bit was clear mov UDBE_CE, %g1
.globl __spitfire_data_access_exception
.globl __spitfire_data_access_exception_tl1
__spitfire_data_access_exception_tl1:
rdpr %pstate, %g4
wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
mov TLB_SFSR, %g3
mov DMMU_SFAR, %g5
ldxa [%g3] ASI_DMMU, %g4 ! Get SFSR
ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR
stxa %g0, [%g3] ASI_DMMU ! Clear SFSR.FaultValid bit
membar #Sync
rdpr %tt, %g3
cmp %g3, 0x80 ! first win spill/fill trap
blu,pn %xcc, 1f
cmp %g3, 0xff ! last win spill/fill trap
bgu,pn %xcc, 1f
nop nop
stxa %g4, [%g0] ASI_UDB_ERROR_W ! Clear CE sticky bit in UDBL ba,pt %xcc, winfix_dax
membar #Sync ! Synchronize ASI stores rdpr %tpc, %g3
1: mov 0x18, %g5 ! Addr of UDB-High error status 1: sethi %hi(109f), %g7
ldxa [%g5] ASI_UDBH_ERROR_R, %g3 ! Read it ba,pt %xcc, etraptl1
109: or %g7, %lo(109b), %g7
mov %l4, %o1
mov %l5, %o2
call spitfire_data_access_exception_tl1
add %sp, PTREGS_OFF, %o0
ba,pt %xcc, rtrap
clr %l6
andcc %g3, (1 << 8), %g4 ! Check CE bit __spitfire_data_access_exception:
sllx %g3, (64 - 10), %g3 ! Clear reserved bits rdpr %pstate, %g4
srlx %g3, (64 - 10), %g3 ! in UDB-High error status wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
sllx %g3, (33 + 10), %g3 ! Shift up to encoding area mov TLB_SFSR, %g3
or %g1, %g3, %g1 ! Or it in mov DMMU_SFAR, %g5
be,pn %xcc, 1f ! Branch if CE bit was clear ldxa [%g3] ASI_DMMU, %g4 ! Get SFSR
nop ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR
nop stxa %g0, [%g3] ASI_DMMU ! Clear SFSR.FaultValid bit
membar #Sync
sethi %hi(109f), %g7
ba,pt %xcc, etrap
109: or %g7, %lo(109b), %g7
mov %l4, %o1
mov %l5, %o2
call spitfire_data_access_exception
add %sp, PTREGS_OFF, %o0
ba,pt %xcc, rtrap
clr %l6
stxa %g4, [%g5] ASI_UDB_ERROR_W ! Clear CE sticky bit in UDBH .globl __spitfire_insn_access_exception
membar #Sync ! Synchronize ASI stores .globl __spitfire_insn_access_exception_tl1
1: mov 1, %g5 ! AFSR CE bit is __spitfire_insn_access_exception_tl1:
sllx %g5, 20, %g5 ! bit 20 rdpr %pstate, %g4
stxa %g5, [%g0] ASI_AFSR ! Clear CE sticky bit in AFSR wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
membar #Sync ! Synchronize ASI stores mov TLB_SFSR, %g3
sllx %g2, (64 - 41), %g2 ! Clear reserved bits ldxa [%g3] ASI_IMMU, %g4 ! Get SFSR
srlx %g2, (64 - 41), %g2 ! in latched AFAR rdpr %tpc, %g5 ! IMMU has no SFAR, use TPC
stxa %g0, [%g3] ASI_IMMU ! Clear FaultValid bit
membar #Sync
sethi %hi(109f), %g7
ba,pt %xcc, etraptl1
109: or %g7, %lo(109b), %g7
mov %l4, %o1
mov %l5, %o2
call spitfire_insn_access_exception_tl1
add %sp, PTREGS_OFF, %o0
ba,pt %xcc, rtrap
clr %l6
andn %g2, 0x0f, %g2 ! Finish resv bit clearing __spitfire_insn_access_exception:
mov %g1, %g4 ! Move AFSR+UDB* into save reg rdpr %pstate, %g4
mov %g2, %g5 ! Move AFAR into save reg wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
rdpr %pil, %g2 mov TLB_SFSR, %g3
wrpr %g0, 15, %pil ldxa [%g3] ASI_IMMU, %g4 ! Get SFSR
ba,pt %xcc, etrap_irq rdpr %tpc, %g5 ! IMMU has no SFAR, use TPC
rd %pc, %g7 stxa %g0, [%g3] ASI_IMMU ! Clear FaultValid bit
mov %l4, %o0 membar #Sync
sethi %hi(109f), %g7
mov %l5, %o1 ba,pt %xcc, etrap
call cee_log 109: or %g7, %lo(109b), %g7
add %sp, PTREGS_OFF, %o2 mov %l4, %o1
ba,a,pt %xcc, rtrap_irq mov %l5, %o2
call spitfire_insn_access_exception
add %sp, PTREGS_OFF, %o0
ba,pt %xcc, rtrap
clr %l6
/* Capture I/D/E-cache state into per-cpu error scoreboard. /* Capture I/D/E-cache state into per-cpu error scoreboard.
* *

View file

@ -33,6 +33,7 @@
#include <asm/dcu.h> #include <asm/dcu.h>
#include <asm/estate.h> #include <asm/estate.h>
#include <asm/chafsr.h> #include <asm/chafsr.h>
#include <asm/sfafsr.h>
#include <asm/psrcompat.h> #include <asm/psrcompat.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/timer.h> #include <asm/timer.h>
@ -143,8 +144,7 @@ void do_BUG(const char *file, int line)
} }
#endif #endif
void instruction_access_exception(struct pt_regs *regs, void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
unsigned long sfsr, unsigned long sfar)
{ {
siginfo_t info; siginfo_t info;
@ -153,8 +153,8 @@ void instruction_access_exception(struct pt_regs *regs,
return; return;
if (regs->tstate & TSTATE_PRIV) { if (regs->tstate & TSTATE_PRIV) {
printk("instruction_access_exception: SFSR[%016lx] SFAR[%016lx], going.\n", printk("spitfire_insn_access_exception: SFSR[%016lx] "
sfsr, sfar); "SFAR[%016lx], going.\n", sfsr, sfar);
die_if_kernel("Iax", regs); die_if_kernel("Iax", regs);
} }
if (test_thread_flag(TIF_32BIT)) { if (test_thread_flag(TIF_32BIT)) {
@ -169,19 +169,17 @@ void instruction_access_exception(struct pt_regs *regs,
force_sig_info(SIGSEGV, &info, current); force_sig_info(SIGSEGV, &info, current);
} }
void instruction_access_exception_tl1(struct pt_regs *regs, void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
unsigned long sfsr, unsigned long sfar)
{ {
if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs, if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
0, 0x8, SIGTRAP) == NOTIFY_STOP) 0, 0x8, SIGTRAP) == NOTIFY_STOP)
return; return;
dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
instruction_access_exception(regs, sfsr, sfar); spitfire_insn_access_exception(regs, sfsr, sfar);
} }
void data_access_exception(struct pt_regs *regs, void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
unsigned long sfsr, unsigned long sfar)
{ {
siginfo_t info; siginfo_t info;
@ -207,8 +205,8 @@ void data_access_exception(struct pt_regs *regs,
return; return;
} }
/* Shit... */ /* Shit... */
printk("data_access_exception: SFSR[%016lx] SFAR[%016lx], going.\n", printk("spitfire_data_access_exception: SFSR[%016lx] "
sfsr, sfar); "SFAR[%016lx], going.\n", sfsr, sfar);
die_if_kernel("Dax", regs); die_if_kernel("Dax", regs);
} }
@ -220,15 +218,14 @@ void data_access_exception(struct pt_regs *regs,
force_sig_info(SIGSEGV, &info, current); force_sig_info(SIGSEGV, &info, current);
} }
void data_access_exception_tl1(struct pt_regs *regs, void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
unsigned long sfsr, unsigned long sfar)
{ {
if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs, if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
0, 0x30, SIGTRAP) == NOTIFY_STOP) 0, 0x30, SIGTRAP) == NOTIFY_STOP)
return; return;
dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
data_access_exception(regs, sfsr, sfar); spitfire_data_access_exception(regs, sfsr, sfar);
} }
#ifdef CONFIG_PCI #ifdef CONFIG_PCI
@ -264,54 +261,13 @@ static void spitfire_clean_and_reenable_l1_caches(void)
: "memory"); : "memory");
} }
void do_iae(struct pt_regs *regs) static void spitfire_enable_estate_errors(void)
{ {
siginfo_t info; __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
"membar #Sync"
spitfire_clean_and_reenable_l1_caches(); : /* no outputs */
: "r" (ESTATE_ERR_ALL),
if (notify_die(DIE_TRAP, "instruction access exception", regs, "i" (ASI_ESTATE_ERROR_EN));
0, 0x8, SIGTRAP) == NOTIFY_STOP)
return;
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_OBJERR;
info.si_addr = (void *)0;
info.si_trapno = 0;
force_sig_info(SIGBUS, &info, current);
}
void do_dae(struct pt_regs *regs)
{
siginfo_t info;
#ifdef CONFIG_PCI
if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
spitfire_clean_and_reenable_l1_caches();
pci_poke_faulted = 1;
/* Why the fuck did they have to change this? */
if (tlb_type == cheetah || tlb_type == cheetah_plus)
regs->tpc += 4;
regs->tnpc = regs->tpc + 4;
return;
}
#endif
spitfire_clean_and_reenable_l1_caches();
if (notify_die(DIE_TRAP, "data access exception", regs,
0, 0x30, SIGTRAP) == NOTIFY_STOP)
return;
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_OBJERR;
info.si_addr = (void *)0;
info.si_trapno = 0;
force_sig_info(SIGBUS, &info, current);
} }
static char ecc_syndrome_table[] = { static char ecc_syndrome_table[] = {
@ -349,65 +305,15 @@ static char ecc_syndrome_table[] = {
0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a 0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a
}; };
/* cee_trap in entry.S encodes AFSR/UDBH/UDBL error status
* in the following format. The AFAR is left as is, with
* reserved bits cleared, and is a raw 40-bit physical
* address.
*/
#define CE_STATUS_UDBH_UE (1UL << (43 + 9))
#define CE_STATUS_UDBH_CE (1UL << (43 + 8))
#define CE_STATUS_UDBH_ESYNDR (0xffUL << 43)
#define CE_STATUS_UDBH_SHIFT 43
#define CE_STATUS_UDBL_UE (1UL << (33 + 9))
#define CE_STATUS_UDBL_CE (1UL << (33 + 8))
#define CE_STATUS_UDBL_ESYNDR (0xffUL << 33)
#define CE_STATUS_UDBL_SHIFT 33
#define CE_STATUS_AFSR_MASK (0x1ffffffffUL)
#define CE_STATUS_AFSR_ME (1UL << 32)
#define CE_STATUS_AFSR_PRIV (1UL << 31)
#define CE_STATUS_AFSR_ISAP (1UL << 30)
#define CE_STATUS_AFSR_ETP (1UL << 29)
#define CE_STATUS_AFSR_IVUE (1UL << 28)
#define CE_STATUS_AFSR_TO (1UL << 27)
#define CE_STATUS_AFSR_BERR (1UL << 26)
#define CE_STATUS_AFSR_LDP (1UL << 25)
#define CE_STATUS_AFSR_CP (1UL << 24)
#define CE_STATUS_AFSR_WP (1UL << 23)
#define CE_STATUS_AFSR_EDP (1UL << 22)
#define CE_STATUS_AFSR_UE (1UL << 21)
#define CE_STATUS_AFSR_CE (1UL << 20)
#define CE_STATUS_AFSR_ETS (0xfUL << 16)
#define CE_STATUS_AFSR_ETS_SHIFT 16
#define CE_STATUS_AFSR_PSYND (0xffffUL << 0)
#define CE_STATUS_AFSR_PSYND_SHIFT 0
/* Layout of Ecache TAG Parity Syndrome of AFSR */
#define AFSR_ETSYNDROME_7_0 0x1UL /* E$-tag bus bits <7:0> */
#define AFSR_ETSYNDROME_15_8 0x2UL /* E$-tag bus bits <15:8> */
#define AFSR_ETSYNDROME_21_16 0x4UL /* E$-tag bus bits <21:16> */
#define AFSR_ETSYNDROME_24_22 0x8UL /* E$-tag bus bits <24:22> */
static char *syndrome_unknown = "<Unknown>"; static char *syndrome_unknown = "<Unknown>";
asmlinkage void cee_log(unsigned long ce_status, static void spitfire_log_udb_syndrome(unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long bit)
unsigned long afar,
struct pt_regs *regs)
{ {
char memmod_str[64]; unsigned short scode;
char *p; char memmod_str[64], *p;
unsigned short scode, udb_reg;
printk(KERN_WARNING "CPU[%d]: Correctable ECC Error " if (udbl & bit) {
"AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx]\n", scode = ecc_syndrome_table[udbl & 0xff];
smp_processor_id(),
(ce_status & CE_STATUS_AFSR_MASK),
afar,
((ce_status >> CE_STATUS_UDBL_SHIFT) & 0x3ffUL),
((ce_status >> CE_STATUS_UDBH_SHIFT) & 0x3ffUL));
udb_reg = ((ce_status >> CE_STATUS_UDBL_SHIFT) & 0x3ffUL);
if (udb_reg & (1 << 8)) {
scode = ecc_syndrome_table[udb_reg & 0xff];
if (prom_getunumber(scode, afar, if (prom_getunumber(scode, afar,
memmod_str, sizeof(memmod_str)) == -1) memmod_str, sizeof(memmod_str)) == -1)
p = syndrome_unknown; p = syndrome_unknown;
@ -418,9 +324,8 @@ asmlinkage void cee_log(unsigned long ce_status,
smp_processor_id(), scode, p); smp_processor_id(), scode, p);
} }
udb_reg = ((ce_status >> CE_STATUS_UDBH_SHIFT) & 0x3ffUL); if (udbh & bit) {
if (udb_reg & (1 << 8)) { scode = ecc_syndrome_table[udbh & 0xff];
scode = ecc_syndrome_table[udb_reg & 0xff];
if (prom_getunumber(scode, afar, if (prom_getunumber(scode, afar,
memmod_str, sizeof(memmod_str)) == -1) memmod_str, sizeof(memmod_str)) == -1)
p = syndrome_unknown; p = syndrome_unknown;
@ -430,6 +335,127 @@ asmlinkage void cee_log(unsigned long ce_status,
"Memory Module \"%s\"\n", "Memory Module \"%s\"\n",
smp_processor_id(), scode, p); smp_processor_id(), scode, p);
} }
}
static void spitfire_cee_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, int tl1, struct pt_regs *regs)
{
printk(KERN_WARNING "CPU[%d]: Correctable ECC Error "
"AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx] TL>1[%d]\n",
smp_processor_id(), afsr, afar, udbl, udbh, tl1);
spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_CE);
/* We always log it, even if someone is listening for this
* trap.
*/
notify_die(DIE_TRAP, "Correctable ECC Error", regs,
0, TRAP_TYPE_CEE, SIGTRAP);
/* The Correctable ECC Error trap does not disable I/D caches. So
* we only have to restore the ESTATE Error Enable register.
*/
spitfire_enable_estate_errors();
}
static void spitfire_ue_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long tt, int tl1, struct pt_regs *regs)
{
siginfo_t info;
printk(KERN_WARNING "CPU[%d]: Uncorrectable Error AFSR[%lx] "
"AFAR[%lx] UDBL[%lx] UDBH[%ld] TT[%lx] TL>1[%d]\n",
smp_processor_id(), afsr, afar, udbl, udbh, tt, tl1);
/* XXX add more human friendly logging of the error status
* XXX as is implemented for cheetah
*/
spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_UE);
/* We always log it, even if someone is listening for this
* trap.
*/
notify_die(DIE_TRAP, "Uncorrectable Error", regs,
0, tt, SIGTRAP);
if (regs->tstate & TSTATE_PRIV) {
if (tl1)
dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
die_if_kernel("UE", regs);
}
/* XXX need more intelligent processing here, such as is implemented
* XXX for cheetah errors, in fact if the E-cache still holds the
* XXX line with bad parity this will loop
*/
spitfire_clean_and_reenable_l1_caches();
spitfire_enable_estate_errors();
if (test_thread_flag(TIF_32BIT)) {
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_OBJERR;
info.si_addr = (void *)0;
info.si_trapno = 0;
force_sig_info(SIGBUS, &info, current);
}
void spitfire_access_error(struct pt_regs *regs, unsigned long status_encoded, unsigned long afar)
{
unsigned long afsr, tt, udbh, udbl;
int tl1;
afsr = (status_encoded & SFSTAT_AFSR_MASK) >> SFSTAT_AFSR_SHIFT;
tt = (status_encoded & SFSTAT_TRAP_TYPE) >> SFSTAT_TRAP_TYPE_SHIFT;
tl1 = (status_encoded & SFSTAT_TL_GT_ONE) ? 1 : 0;
udbl = (status_encoded & SFSTAT_UDBL_MASK) >> SFSTAT_UDBL_SHIFT;
udbh = (status_encoded & SFSTAT_UDBH_MASK) >> SFSTAT_UDBH_SHIFT;
#ifdef CONFIG_PCI
if (tt == TRAP_TYPE_DAE &&
pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
spitfire_clean_and_reenable_l1_caches();
spitfire_enable_estate_errors();
pci_poke_faulted = 1;
regs->tnpc = regs->tpc + 4;
return;
}
#endif
if (afsr & SFAFSR_UE)
spitfire_ue_log(afsr, afar, udbh, udbl, tt, tl1, regs);
if (tt == TRAP_TYPE_CEE) {
/* Handle the case where we took a CEE trap, but ACK'd
* only the UE state in the UDB error registers.
*/
if (afsr & SFAFSR_UE) {
if (udbh & UDBE_CE) {
__asm__ __volatile__(
"stxa %0, [%1] %2\n\t"
"membar #Sync"
: /* no outputs */
: "r" (udbh & UDBE_CE),
"r" (0x0), "i" (ASI_UDB_ERROR_W));
}
if (udbl & UDBE_CE) {
__asm__ __volatile__(
"stxa %0, [%1] %2\n\t"
"membar #Sync"
: /* no outputs */
: "r" (udbl & UDBE_CE),
"r" (0x18), "i" (ASI_UDB_ERROR_W));
}
}
spitfire_cee_log(afsr, afar, udbh, udbl, tl1, regs);
}
} }
int cheetah_pcache_forced_on; int cheetah_pcache_forced_on;

View file

@ -18,9 +18,10 @@ sparc64_ttable_tl0:
tl0_resv000: BOOT_KERNEL BTRAP(0x1) BTRAP(0x2) BTRAP(0x3) tl0_resv000: BOOT_KERNEL BTRAP(0x1) BTRAP(0x2) BTRAP(0x3)
tl0_resv004: BTRAP(0x4) BTRAP(0x5) BTRAP(0x6) BTRAP(0x7) tl0_resv004: BTRAP(0x4) BTRAP(0x5) BTRAP(0x6) BTRAP(0x7)
tl0_iax: membar #Sync tl0_iax: membar #Sync
TRAP_NOSAVE_7INSNS(__do_instruction_access_exception) TRAP_NOSAVE_7INSNS(__spitfire_insn_access_exception)
tl0_resv009: BTRAP(0x9) tl0_resv009: BTRAP(0x9)
tl0_iae: TRAP(do_iae) tl0_iae: membar #Sync
TRAP_NOSAVE_7INSNS(__spitfire_access_error)
tl0_resv00b: BTRAP(0xb) BTRAP(0xc) BTRAP(0xd) BTRAP(0xe) BTRAP(0xf) tl0_resv00b: BTRAP(0xb) BTRAP(0xc) BTRAP(0xd) BTRAP(0xe) BTRAP(0xf)
tl0_ill: membar #Sync tl0_ill: membar #Sync
TRAP_7INSNS(do_illegal_instruction) TRAP_7INSNS(do_illegal_instruction)
@ -36,9 +37,10 @@ tl0_cwin: CLEAN_WINDOW
tl0_div0: TRAP(do_div0) tl0_div0: TRAP(do_div0)
tl0_resv029: BTRAP(0x29) BTRAP(0x2a) BTRAP(0x2b) BTRAP(0x2c) BTRAP(0x2d) BTRAP(0x2e) tl0_resv029: BTRAP(0x29) BTRAP(0x2a) BTRAP(0x2b) BTRAP(0x2c) BTRAP(0x2d) BTRAP(0x2e)
tl0_resv02f: BTRAP(0x2f) tl0_resv02f: BTRAP(0x2f)
tl0_dax: TRAP_NOSAVE(__do_data_access_exception) tl0_dax: TRAP_NOSAVE(__spitfire_data_access_exception)
tl0_resv031: BTRAP(0x31) tl0_resv031: BTRAP(0x31)
tl0_dae: TRAP(do_dae) tl0_dae: membar #Sync
TRAP_NOSAVE_7INSNS(__spitfire_access_error)
tl0_resv033: BTRAP(0x33) tl0_resv033: BTRAP(0x33)
tl0_mna: TRAP_NOSAVE(do_mna) tl0_mna: TRAP_NOSAVE(do_mna)
tl0_lddfmna: TRAP_NOSAVE(do_lddfmna) tl0_lddfmna: TRAP_NOSAVE(do_lddfmna)
@ -73,7 +75,8 @@ tl0_resv05c: BTRAP(0x5c) BTRAP(0x5d) BTRAP(0x5e) BTRAP(0x5f)
tl0_ivec: TRAP_IVEC tl0_ivec: TRAP_IVEC
tl0_paw: TRAP(do_paw) tl0_paw: TRAP(do_paw)
tl0_vaw: TRAP(do_vaw) tl0_vaw: TRAP(do_vaw)
tl0_cee: TRAP_NOSAVE(cee_trap) tl0_cee: membar #Sync
TRAP_NOSAVE_7INSNS(__spitfire_cee_trap)
tl0_iamiss: tl0_iamiss:
#include "itlb_base.S" #include "itlb_base.S"
tl0_damiss: tl0_damiss:
@ -175,9 +178,10 @@ tl0_resv1f0: BTRAPS(0x1f0) BTRAPS(0x1f8)
sparc64_ttable_tl1: sparc64_ttable_tl1:
tl1_resv000: BOOT_KERNEL BTRAPTL1(0x1) BTRAPTL1(0x2) BTRAPTL1(0x3) tl1_resv000: BOOT_KERNEL BTRAPTL1(0x1) BTRAPTL1(0x2) BTRAPTL1(0x3)
tl1_resv004: BTRAPTL1(0x4) BTRAPTL1(0x5) BTRAPTL1(0x6) BTRAPTL1(0x7) tl1_resv004: BTRAPTL1(0x4) BTRAPTL1(0x5) BTRAPTL1(0x6) BTRAPTL1(0x7)
tl1_iax: TRAP_NOSAVE(__do_instruction_access_exception_tl1) tl1_iax: TRAP_NOSAVE(__spitfire_insn_access_exception_tl1)
tl1_resv009: BTRAPTL1(0x9) tl1_resv009: BTRAPTL1(0x9)
tl1_iae: TRAPTL1(do_iae_tl1) tl1_iae: membar #Sync
TRAP_NOSAVE_7INSNS(__spitfire_access_error)
tl1_resv00b: BTRAPTL1(0xb) BTRAPTL1(0xc) BTRAPTL1(0xd) BTRAPTL1(0xe) BTRAPTL1(0xf) tl1_resv00b: BTRAPTL1(0xb) BTRAPTL1(0xc) BTRAPTL1(0xd) BTRAPTL1(0xe) BTRAPTL1(0xf)
tl1_ill: TRAPTL1(do_ill_tl1) tl1_ill: TRAPTL1(do_ill_tl1)
tl1_privop: BTRAPTL1(0x11) tl1_privop: BTRAPTL1(0x11)
@ -193,9 +197,10 @@ tl1_cwin: CLEAN_WINDOW
tl1_div0: TRAPTL1(do_div0_tl1) tl1_div0: TRAPTL1(do_div0_tl1)
tl1_resv029: BTRAPTL1(0x29) BTRAPTL1(0x2a) BTRAPTL1(0x2b) BTRAPTL1(0x2c) tl1_resv029: BTRAPTL1(0x29) BTRAPTL1(0x2a) BTRAPTL1(0x2b) BTRAPTL1(0x2c)
tl1_resv02d: BTRAPTL1(0x2d) BTRAPTL1(0x2e) BTRAPTL1(0x2f) tl1_resv02d: BTRAPTL1(0x2d) BTRAPTL1(0x2e) BTRAPTL1(0x2f)
tl1_dax: TRAP_NOSAVE(__do_data_access_exception_tl1) tl1_dax: TRAP_NOSAVE(__spitfire_data_access_exception_tl1)
tl1_resv031: BTRAPTL1(0x31) tl1_resv031: BTRAPTL1(0x31)
tl1_dae: TRAPTL1(do_dae_tl1) tl1_dae: membar #Sync
TRAP_NOSAVE_7INSNS(__spitfire_access_error)
tl1_resv033: BTRAPTL1(0x33) tl1_resv033: BTRAPTL1(0x33)
tl1_mna: TRAP_NOSAVE(do_mna) tl1_mna: TRAP_NOSAVE(do_mna)
tl1_lddfmna: TRAPTL1(do_lddfmna_tl1) tl1_lddfmna: TRAPTL1(do_lddfmna_tl1)
@ -219,8 +224,8 @@ tl1_paw: TRAPTL1(do_paw_tl1)
tl1_vaw: TRAPTL1(do_vaw_tl1) tl1_vaw: TRAPTL1(do_vaw_tl1)
/* The grotty trick to save %g1 into current->thread.cee_stuff /* The grotty trick to save %g1 into current->thread.cee_stuff
* is because when we take this trap we could be interrupting trap * is because when we take this trap we could be interrupting
* code already using the trap alternate global registers. * trap code already using the trap alternate global registers.
* *
* We cross our fingers and pray that this store/load does * We cross our fingers and pray that this store/load does
* not cause yet another CEE trap. * not cause yet another CEE trap.

View file

@ -349,9 +349,9 @@ int handle_popc(u32 insn, struct pt_regs *regs)
extern void do_fpother(struct pt_regs *regs); extern void do_fpother(struct pt_regs *regs);
extern void do_privact(struct pt_regs *regs); extern void do_privact(struct pt_regs *regs);
extern void data_access_exception(struct pt_regs *regs, extern void spitfire_data_access_exception(struct pt_regs *regs,
unsigned long sfsr, unsigned long sfsr,
unsigned long sfar); unsigned long sfar);
int handle_ldf_stq(u32 insn, struct pt_regs *regs) int handle_ldf_stq(u32 insn, struct pt_regs *regs)
{ {
@ -394,14 +394,14 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs)
break; break;
} }
default: default:
data_access_exception(regs, 0, addr); spitfire_data_access_exception(regs, 0, addr);
return 1; return 1;
} }
if (put_user (first >> 32, (u32 __user *)addr) || if (put_user (first >> 32, (u32 __user *)addr) ||
__put_user ((u32)first, (u32 __user *)(addr + 4)) || __put_user ((u32)first, (u32 __user *)(addr + 4)) ||
__put_user (second >> 32, (u32 __user *)(addr + 8)) || __put_user (second >> 32, (u32 __user *)(addr + 8)) ||
__put_user ((u32)second, (u32 __user *)(addr + 12))) { __put_user ((u32)second, (u32 __user *)(addr + 12))) {
data_access_exception(regs, 0, addr); spitfire_data_access_exception(regs, 0, addr);
return 1; return 1;
} }
} else { } else {
@ -414,7 +414,7 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs)
do_privact(regs); do_privact(regs);
return 1; return 1;
} else if (asi > ASI_SNFL) { } else if (asi > ASI_SNFL) {
data_access_exception(regs, 0, addr); spitfire_data_access_exception(regs, 0, addr);
return 1; return 1;
} }
switch (insn & 0x180000) { switch (insn & 0x180000) {
@ -431,7 +431,7 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs)
err |= __get_user (data[i], (u32 __user *)(addr + 4*i)); err |= __get_user (data[i], (u32 __user *)(addr + 4*i));
} }
if (err && !(asi & 0x2 /* NF */)) { if (err && !(asi & 0x2 /* NF */)) {
data_access_exception(regs, 0, addr); spitfire_data_access_exception(regs, 0, addr);
return 1; return 1;
} }
if (asi & 0x8) /* Little */ { if (asi & 0x8) /* Little */ {
@ -534,7 +534,7 @@ void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
*(u64 *)(f->regs + freg) = value; *(u64 *)(f->regs + freg) = value;
current_thread_info()->fpsaved[0] |= flag; current_thread_info()->fpsaved[0] |= flag;
} else { } else {
daex: data_access_exception(regs, sfsr, sfar); daex: spitfire_data_access_exception(regs, sfsr, sfar);
return; return;
} }
advance(regs); advance(regs);
@ -578,7 +578,7 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
__put_user ((u32)value, (u32 __user *)(sfar + 4))) __put_user ((u32)value, (u32 __user *)(sfar + 4)))
goto daex; goto daex;
} else { } else {
daex: data_access_exception(regs, sfsr, sfar); daex: spitfire_data_access_exception(regs, sfsr, sfar);
return; return;
} }
advance(regs); advance(regs);

View file

@ -318,7 +318,7 @@ fill_fixup_dax:
nop nop
rdpr %pstate, %l1 ! Prepare to change globals. rdpr %pstate, %l1 ! Prepare to change globals.
mov %g4, %o1 ! Setup args for mov %g4, %o1 ! Setup args for
mov %g5, %o2 ! final call to data_access_exception. mov %g5, %o2 ! final call to spitfire_data_access_exception.
andn %l1, PSTATE_MM, %l1 ! We want to be in RMO andn %l1, PSTATE_MM, %l1 ! We want to be in RMO
mov %g6, %o7 ! Stash away current. mov %g6, %o7 ! Stash away current.
@ -330,7 +330,7 @@ fill_fixup_dax:
mov TSB_REG, %g1 mov TSB_REG, %g1
ldxa [%g1] ASI_IMMU, %g5 ldxa [%g1] ASI_IMMU, %g5
#endif #endif
call data_access_exception call spitfire_data_access_exception
add %sp, PTREGS_OFF, %o0 add %sp, PTREGS_OFF, %o0
b,pt %xcc, rtrap b,pt %xcc, rtrap
@ -391,7 +391,7 @@ window_dax_from_user_common:
109: or %g7, %lo(109b), %g7 109: or %g7, %lo(109b), %g7
mov %l4, %o1 mov %l4, %o1
mov %l5, %o2 mov %l5, %o2
call data_access_exception call spitfire_data_access_exception
add %sp, PTREGS_OFF, %o0 add %sp, PTREGS_OFF, %o0
ba,pt %xcc, rtrap ba,pt %xcc, rtrap
clr %l6 clr %l6

View file

@ -0,0 +1,82 @@
#ifndef _SPARC64_SFAFSR_H
#define _SPARC64_SFAFSR_H
#include <asm/const.h>
/* Spitfire Asynchronous Fault Status register, ASI=0x4C VA<63:0>=0x0 */
#define SFAFSR_ME (_AC(1,UL) << SFAFSR_ME_SHIFT)
#define SFAFSR_ME_SHIFT 32
#define SFAFSR_PRIV (_AC(1,UL) << SFAFSR_PRIV_SHIFT)
#define SFAFSR_PRIV_SHIFT 31
#define SFAFSR_ISAP (_AC(1,UL) << SFAFSR_ISAP_SHIFT)
#define SFAFSR_ISAP_SHIFT 30
#define SFAFSR_ETP (_AC(1,UL) << SFAFSR_ETP_SHIFT)
#define SFAFSR_ETP_SHIFT 29
#define SFAFSR_IVUE (_AC(1,UL) << SFAFSR_IVUE_SHIFT)
#define SFAFSR_IVUE_SHIFT 28
#define SFAFSR_TO (_AC(1,UL) << SFAFSR_TO_SHIFT)
#define SFAFSR_TO_SHIFT 27
#define SFAFSR_BERR (_AC(1,UL) << SFAFSR_BERR_SHIFT)
#define SFAFSR_BERR_SHIFT 26
#define SFAFSR_LDP (_AC(1,UL) << SFAFSR_LDP_SHIFT)
#define SFAFSR_LDP_SHIFT 25
#define SFAFSR_CP (_AC(1,UL) << SFAFSR_CP_SHIFT)
#define SFAFSR_CP_SHIFT 24
#define SFAFSR_WP (_AC(1,UL) << SFAFSR_WP_SHIFT)
#define SFAFSR_WP_SHIFT 23
#define SFAFSR_EDP (_AC(1,UL) << SFAFSR_EDP_SHIFT)
#define SFAFSR_EDP_SHIFT 22
#define SFAFSR_UE (_AC(1,UL) << SFAFSR_UE_SHIFT)
#define SFAFSR_UE_SHIFT 21
#define SFAFSR_CE (_AC(1,UL) << SFAFSR_CE_SHIFT)
#define SFAFSR_CE_SHIFT 20
#define SFAFSR_ETS (_AC(0xf,UL) << SFAFSR_ETS_SHIFT)
#define SFAFSR_ETS_SHIFT 16
#define SFAFSR_PSYND (_AC(0xffff,UL) << SFAFSR_PSYND_SHIFT)
#define SFAFSR_PSYND_SHIFT 0
/* UDB Error Register, ASI=0x7f VA<63:0>=0x0(High),0x18(Low) for read
* ASI=0x77 VA<63:0>=0x0(High),0x18(Low) for write
*/
#define UDBE_UE (_AC(1,UL) << 9)
#define UDBE_CE (_AC(1,UL) << 8)
#define UDBE_E_SYNDR (_AC(0xff,UL) << 0)
/* The trap handlers for asynchronous errors encode the AFSR and
* other pieces of information into a 64-bit argument for C code
* encoded as follows:
*
* -----------------------------------------------
* | UDB_H | UDB_L | TL>1 | TT | AFSR |
* -----------------------------------------------
* 63 54 53 44 42 41 33 32 0
*
* The AFAR is passed in unchanged.
*/
#define SFSTAT_UDBH_MASK (_AC(0x3ff,UL) << SFSTAT_UDBH_SHIFT)
#define SFSTAT_UDBH_SHIFT 54
#define SFSTAT_UDBL_MASK (_AC(0x3ff,UL) << SFSTAT_UDBH_SHIFT)
#define SFSTAT_UDBL_SHIFT 44
#define SFSTAT_TL_GT_ONE (_AC(1,UL) << SFSTAT_TL_GT_ONE_SHIFT)
#define SFSTAT_TL_GT_ONE_SHIFT 42
#define SFSTAT_TRAP_TYPE (_AC(0x1FF,UL) << SFSTAT_TRAP_TYPE_SHIFT)
#define SFSTAT_TRAP_TYPE_SHIFT 33
#define SFSTAT_AFSR_MASK (_AC(0x1ffffffff,UL) << SFSTAT_AFSR_SHIFT)
#define SFSTAT_AFSR_SHIFT 0
/* ESTATE Error Enable Register, ASI=0x4b VA<63:0>=0x0 */
#define ESTATE_ERR_CE 0x1 /* Correctable errors */
#define ESTATE_ERR_NCE 0x2 /* TO, BERR, LDP, ETP, EDP, WP, UE, IVUE */
#define ESTATE_ERR_ISAP 0x4 /* System address parity error */
#define ESTATE_ERR_ALL (ESTATE_ERR_CE | \
ESTATE_ERR_NCE | \
ESTATE_ERR_ISAP)
/* The various trap types that report using the above state. */
#define TRAP_TYPE_IAE 0x09 /* Instruction Access Error */
#define TRAP_TYPE_DAE 0x32 /* Data Access Error */
#define TRAP_TYPE_CEE 0x63 /* Correctable ECC Error */
#endif /* _SPARC64_SFAFSR_H */