us3_common.c revision 937435419de303fde6598a9eda8f579228a6ee3c
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
#include <sys/sysmacros.h>
#include <sys/archsystm.h>
#include <sys/machparam.h>
#include <sys/machsystm.h>
#include <sys/machthread.h>
#include <sys/elf_SPARC.h>
#include <vm/hat_sfmmu.h>
#include <sys/cheetahregs.h>
#include <sys/us3_module.h>
#include <sys/dditypes.h>
#include <sys/prom_debug.h>
#include <sys/prom_plat.h>
#include <sys/cpu_module.h>
#include <sys/sysmacros.h>
#include <sys/platform_module.h>
#include <sys/machtrap.h>
#include <sys/bootconf.h>
#include <sys/fpras_impl.h>
#include <sys/watchpoint.h>
#include <sys/plat_ecc_unum.h>
#include <sys/errclassify.h>
#ifdef CHEETAHPLUS_ERRATUM_25
#endif /* CHEETAHPLUS_ERRATUM_25 */
/*
* Note that 'Cheetah PRM' refers to:
* SPARC V9 JPS1 Implementation Supplement: Sun UltraSPARC-III
*/
/*
* Per CPU pointers to physical address of TL>0 logout data areas.
* These pointers have to be in the kernel nucleus to avoid MMU
* misses.
*/
/*
* to prevent unnecessary panics.
*/
/*
* Per CPU pending error at TL>0, used by level15 softint handler
*/
/*
* For deferred CE re-enable after trap.
*/
/*
* Internal functions.
*/
#if defined(CPU_IMP_ECACHE_ASSOC)
#endif
static int cpu_ectag_pa_to_subblk_state(int cachesize,
int *, int *);
static int cpu_ce_delayed_ec_logout(uint64_t);
static int cpu_matching_ecache_line(uint64_t, void *, int, int *);
static int cpu_error_is_ecache_data(int, uint64_t);
static void cpu_fmri_cpu_set(nvlist_t *, int);
#ifdef CHEETAHPLUS_ERRATUM_25
static int mondo_recover_proc(uint16_t, int);
static void cheetah_nudge_init(void);
cyc_time_t *when);
static void cheetah_nudge_buddy(void);
#endif /* CHEETAHPLUS_ERRATUM_25 */
#if defined(CPU_IMP_L1_CACHE_PARITY)
#endif /* CPU_IMP_L1_CACHE_PARITY */
/*
* This table is used to determine which bit(s) is(are) bad when an ECC
* error occurs. The array is indexed by an 9-bit syndrome. The entries
* of this array have the following semantics:
*
* 00-127 The number of the bad bit, when only one bit is bad.
* 128 ECC bit C0 is bad.
* 129 ECC bit C1 is bad.
* 130 ECC bit C2 is bad.
* 131 ECC bit C3 is bad.
* 132 ECC bit C4 is bad.
* 133 ECC bit C5 is bad.
* 134 ECC bit C6 is bad.
* 135 ECC bit C7 is bad.
* 136 ECC bit C8 is bad.
* 137-143 reserved for Mtag Data and ECC.
* 144(M2) Two bits are bad within a nibble.
* 145(M3) Three bits are bad within a nibble.
* 146(M3) Four bits are bad within a nibble.
* 147(M) Multiple bits (5 or more) are bad.
* 148 NO bits are bad.
* Based on "Cheetah Programmer's Reference Manual" rev 1.1, Tables 11-4,11-5.
*/
#define C0 128
#define C1 129
#define C2 130
#define C3 131
#define C4 132
#define C5 133
#define C6 134
#define C7 135
#define C8 136
#define MT1 138
#define MT2 139
#define MTC1 141
#define MTC2 142
#define MTC3 143
#define M2 144
#define M3 145
#define M4 146
#define M 147
#define NA 148
#else /* JALAPENO || SERRANO */
#endif /* JALAPENO || SERRANO */
#define BPAR15 167
#endif /* JALAPENO || SERRANO */
static uint8_t ecc_syndrome_tab[] =
{
#else /* JALAPENO || SERRANO */
#endif /* JALAPENO || SERRANO */
#else /* JALAPENO || SERRANO */
#endif /* JALAPENO || SERRANO */
};
/*
* This table is used to determine which bit(s) is(are) bad when a Mtag
* error occurs. The array is indexed by an 4-bit ECC syndrome. The entries
* of this array have the following semantics:
*
* -1 Invalid mtag syndrome.
* 137 Mtag Data 0 is bad.
* 138 Mtag Data 1 is bad.
* 139 Mtag Data 2 is bad.
* 140 Mtag ECC 0 is bad.
* 141 Mtag ECC 1 is bad.
* 142 Mtag ECC 2 is bad.
* 143 Mtag ECC 3 is bad.
* Based on "Cheetah Programmer's Reference Manual" rev 1.1, Tables 11-6.
*/
short mtag_syndrome_tab[] =
{
};
#define MSYND_TBL_SIZE (sizeof (mtag_syndrome_tab) / sizeof (short))
#else /* !(JALAPENO || SERRANO) */
#define BSYND_TBL_SIZE 16
#endif /* !(JALAPENO || SERRANO) */
/*
* Types returned from cpu_error_to_resource_type()
*/
#define ERRTYPE_UNKNOWN 0
#define ERRTYPE_CPU 1
#define ERRTYPE_MEMORY 2
#define ERRTYPE_ECACHE_DATA 3
/*
* CE initial classification and subsequent action lookup table
*/
static int ce_disp_inited;
/*
* Set to disable leaky and partner check for memory correctables
*/
int ce_xdiag_off;
/*
* The following are not incremented atomically so are indicative only
*/
static int ce_xdiag_drops;
static int ce_xdiag_lkydrops;
static int ce_xdiag_ptnrdrops;
static int ce_xdiag_bad;
/*
* CE leaky check callback structure
*/
typedef struct {
struct async_flt *lkycb_aflt;
/*
* defines for various ecache_flush_flag's
*/
#define ECACHE_FLUSH_LINE 1
#define ECACHE_FLUSH_ALL 2
/*
* STICK sync
*/
#define STICK_ITERATION 10
#define MAX_TSKEW 1
#define EV_A_START 0
#define EV_A_END 1
#define EV_B_START 2
#define EV_B_END 3
#define EVENTS 4
typedef enum {
EVENT_NULL = 0,
} event_cmd_t;
static volatile int slave_done;
#ifdef DEBUG
#define DSYNC_ATTEMPTS 64
typedef struct {
} ss_t;
#endif /* DEBUG */
uint_t cpu_impl_dual_pgsz = 0;
#if defined(CPU_IMP_DUAL_PAGESIZE)
uint_t disable_dual_pgsz = 0;
#endif /* CPU_IMP_DUAL_PAGESIZE */
/*
* Save the cache bootup state for use when internal
* caches are to be re-enabled after an error occurs.
*/
/*
* PA[22:0] represent Displacement in Safari configuration space.
*/
#else /* JALAPENO || SERRANO */
#endif /* JALAPENO || SERRANO */
{0, 0}
};
/*
* Interval for deferred CEEN reenable
*/
/*
*/
int cpu_berr_to_verbose = 0;
/*
*/
/*
* Set of all offline cpus
*/
static void cpu_delayed_check_ce_errors(void *);
static void cpu_check_ce_errors(void *);
void cpu_error_ecache_flush(ch_async_flt_t *);
static int cpu_error_ecache_flush_required(ch_async_flt_t *);
static void cpu_log_and_clear_ce(ch_async_flt_t *);
void cpu_ce_detected(ch_cpu_errors_t *, int);
/*
* CE Leaky check timeout in microseconds. This is chosen to be twice the
* memory refresh interval of current DIMMs (64ms). After initial fix that
* gives at least one full refresh cycle in which the cell can leak
* (whereafter further refreshes simply reinforce any incorrect bit value).
*/
/*
* CE partner check partner caching period in seconds
*/
int cpu_ce_ptnr_cachetime_sec = 60;
/*
* Sets trap table entry ttentry by overwriting eight instructions from ttlabel
*/
static int min_ecache_size;
static uint_t priv_hcl_1;
static uint_t priv_hcl_2;
static uint_t priv_hcl_4;
static uint_t priv_hcl_8;
void
cpu_setup(void)
{
extern int at_flags;
extern int disable_delay_tlb_flush, delay_tlb_flush;
extern int cpc_has_overflow_intr;
extern int disable_text_largepages;
extern int use_text_pgsz4m;
/*
* Setup chip-specific trap handlers.
*/
/*
* save the cache bootup state.
*/
/*
* Due to the number of entries in the fully-associative tlb
* this may have to be tuned lower than in spitfire.
*/
/*
* Block stores do not invalidate all pages of the d$, pagecopy
* et. al. need virtual translations with virtual coloring taken
* load side.
*/
if (use_page_coloring) {
do_pg_coloring = 1;
if (use_virtual_coloring)
do_virtual_coloring = 1;
}
isa_list =
"sparcv9+vis2 sparcv9+vis sparcv9 "
"sparcv8plus+vis2 sparcv8plus+vis sparcv8plus "
"sparcv8 sparcv8-fsmuld sparcv7 sparc";
/*
* On Panther-based machines, this should
* also include AV_SPARC_POPC too
*/
/*
* On cheetah, there's no hole in the virtual address space
*/
hole_start = hole_end = 0;
/*
* The kpm mapping window.
* kpm_size:
* The size of a single kpm range.
* The overall size will be: kpm_size * vac_colors.
* kpm_vbase:
* The virtual start address of the kpm range within the kernel
* virtual address space. kpm_vbase has to be kpm_size aligned.
*/
kpm_size_shift = 43;
kpm_smallpages = 1;
/*
* The traptrace code uses either %tick or %stick for
* timestamping. We have %stick so we can use it.
*/
traptrace_use_stick = 1;
/*
* Cheetah has a performance counter overflow interrupt
*/
/*
* Use cheetah flush-all support
*/
if (!disable_delay_tlb_flush)
delay_tlb_flush = 1;
#if defined(CPU_IMP_DUAL_PAGESIZE)
/*
* Use Cheetah+ and later dual page size support.
*/
if (!disable_dual_pgsz) {
cpu_impl_dual_pgsz = 1;
}
#endif /* CPU_IMP_DUAL_PAGESIZE */
/*
* Declare that this architecture/cpu combination does fpRAS.
*/
fpras_implemented = 1;
/*
* Enable 4M pages to be used for mapping user text by default. Don't
* use large pages for initialized data segments since we may not know
* at exec() time what should be the preferred large page size for DTLB
* programming.
*/
use_text_pgsz4m = 1;
/*
* Setup CE lookup table
*/
ce_disp_inited = 1;
}
/*
* Called by setcpudelay
*/
void
cpu_init_tick_freq(void)
{
/*
* For UltraSPARC III and beyond we want to use the
* system clock rate as the basis for low level timing,
* due to support of mixed speed CPUs and power managment.
*/
if (system_clock_freq == 0)
}
#ifdef CHEETAHPLUS_ERRATUM_25
/*
* Tunables
*/
int cheetah_bpe_off = 0;
int cheetah_sendmondo_recover = 1;
int cheetah_sendmondo_fullscan = 0;
int cheetah_sendmondo_recover_delay = 5;
#define CHEETAH_LIVELOCK_MIN_DELAY 1
/*
* Recovery Statistics
*/
typedef struct cheetah_livelock_entry {
int cpuid; /* fallen cpu */
int buddy; /* cpu that ran recovery */
#define CHEETAH_LIVELOCK_NENTRY 32
#define CHEETAH_LIVELOCK_ENTRY_NEXT(statp) { \
if (++cheetah_livelock_entry_nxt >= CHEETAH_LIVELOCK_NENTRY) { \
cheetah_livelock_entry_nxt = 0; \
} \
}
struct {
int recovery; /* recovered */
int full_claimed; /* maximum pages claimed in full recovery */
int proc_entry; /* attempted to claim TSB */
int proc_tsb_scan; /* tsb scanned */
int proc_tsb_partscan; /* tsb partially scanned */
int proc_tsb_fullscan; /* whole tsb scanned */
int proc_claimed; /* maximum pages claimed in tsb scan */
int proc_user; /* user thread */
int proc_kernel; /* kernel thread */
int proc_onflt; /* bad stack */
int proc_cpu; /* null cpu */
int proc_thread; /* null thread */
int proc_proc; /* null proc */
int proc_as; /* null as */
int proc_hat; /* null hat */
int proc_hat_inval; /* hat contents don't make sense */
int proc_hat_busy; /* hat is changing TSBs */
int proc_tsb_reloc; /* TSB skipped because being relocated */
int proc_cnum_bad; /* cnum out of range */
int proc_cnum; /* last cnum processed */
}
/*
* Attempt to recover a cpu by claiming every cache line as saved
* in the TSB that the non-responsive cpu is using. Since we can't
* grab any adaptive lock, this is at best an attempt to do so. Because
* we don't grab any locks, we must operate under the protection of
* on_fault().
*
* Return 1 if cpuid could be recovered, 0 if failed.
*/
int
{
kthread_t *t;
proc_t *p;
int pages_claimed = 0;
int tried_kernel_tsb = 0;
goto badstruct;
}
goto badstruct;
}
goto badstruct;
}
goto badstruct;
}
goto badstruct;
}
goto badstruct;
}
goto badstruct;
}
goto badstruct;
}
} else {
}
/* Verify as */
goto badstruct;
}
goto badstruct;
}
do {
/*
* Skip TSBs being relocated. This is important because
* we want to avoid the following deadlock scenario:
*
* 1) when we came in we set ourselves to "in recover" state.
* 2) when we try to touch TSB being relocated the mapping
* will be in the suspended state so we'll spin waiting
* for it to be unlocked.
* 3) when the CPU that holds the TSB mapping locked tries to
* unlock it it will send a xtrap which will fail to xcall
* us or the CPU we're trying to recover, and will in turn
* enter the mondo code.
* 4) since we are still spinning on the locked mapping
* no further progress will be made and the system will
* inevitably hard hang.
*
* A TSB not being relocated can't begin being relocated
* while we're accessing it because we check
* sendmondo_in_recover before relocating TSBs.
*/
goto next_tsbinfo;
}
/*
* Invalid tte
*/
continue;
}
/*
* Don't want device registers
*/
continue;
}
/*
* Must be cached in E$
*/
continue;
}
IDSR_BUSY_BIT(bn))) == 0) {
goto done;
}
(palo << MMU_PAGESHIFT));
}
}
tried_kernel_tsb = 1;
} else if (!tried_kernel_tsb) {
}
no_fault();
IDSR_BUSY_BIT(bn))) == 0) {
return (1);
} else {
return (0);
}
done:
no_fault();
return (1);
no_fault();
return (0);
}
/*
* Attempt to claim ownership, temporarily, of every cache line that a
* non-responsive cpu might be using. This might kick that cpu out of
* this state.
*
* The return value indicates to the caller if we have exhausted all recovery
* techniques. If 1 is returned, it is useless to call this function again
* even for a different target CPU.
*/
int
{
int retval = 0;
int pages_claimed = 0;
/*
* Wait while recovery takes place
*/
while (sendmondo_in_recover) {
drv_usecwait(1);
}
/*
* Assume we didn't claim the whole memory. If
* the target of this caller is not recovered,
* it will come back.
*/
return (retval);
}
/*
* First try to claim the lines in the TSB the target
* may have been using.
*/
/*
* Didn't claim the whole memory
*/
goto done;
}
/*
* We tried using the TSB. The target is still
* not recovered. Check if complete memory scan is
* enabled.
*/
if (cheetah_sendmondo_fullscan == 0) {
/*
* Full memory scan is disabled.
*/
retval = 1;
goto done;
}
/*
* Try claiming the whole memory.
*/
cur_pa += MMU_PAGESIZE) {
IDSR_BUSY_BIT(bn))) == 0) {
/*
* Didn't claim all memory
*/
goto done;
}
}
}
}
/*
* We did all we could.
*/
retval = 1;
done:
/*
* Update statistics
*/
return (retval);
}
/*
* This is called by the cyclic framework when this CPU becomes online
*/
/*ARGSUSED*/
static void
{
/*
* Stagger the start time
*/
}
}
/*
* Create a low level cyclic to send a xtrap to the next cpu online.
* However, there's no need to have this running on a uniprocessor system.
*/
static void
cheetah_nudge_init(void)
{
if (max_ncpus == 1) {
return;
}
(void) cyclic_add_omni(&hdlr);
}
/*
* Cyclic handler to wake up buddy
*/
void
cheetah_nudge_buddy(void)
{
/*
* Disable kernel preemption to protect the cpu list
*/
0, 0);
}
}
#endif /* CHEETAHPLUS_ERRATUM_25 */
#ifdef SEND_MONDO_STATS
#endif
/*
* Note: A version of this function is used by the debugger via the KDI,
* and must be kept in sync with this version. Any changes made to this
* function to support new chips or to accomodate errata must also be included
* in the KDI-specific version. See us3_kdi.c.
*/
void
send_one_mondo(int cpuid)
{
#ifdef CHEETAHPLUS_ERRATUM_25
int recovered = 0;
#endif
/*
* will be used for dispatching interrupt. For now, assume
* there are no more than IDSR_BN_SETS CPUs, hence no aliasing
*/
#else /* JALAPENO || SERRANO */
#endif /* JALAPENO || SERRANO */
for (;;) {
if (idsr == 0)
break;
/*
* If there is a big jump between the current tick
* count and lasttick, we have probably hit a break
* point. Adjust endtick accordingly to avoid panic.
*/
if (panic_quiesce)
return;
#ifdef CHEETAHPLUS_ERRATUM_25
if (cheetah_sendmondo_recover && recovered == 0) {
if (mondo_recover(cpuid, 0)) {
/*
* We claimed the whole memory or
* full scan is disabled.
*/
recovered++;
}
/*
* Recheck idsr
*/
continue;
} else
#endif /* CHEETAHPLUS_ERRATUM_25 */
{
"(target 0x%x) [%d NACK %d BUSY]",
}
}
busy++;
continue;
}
drv_usecwait(1);
nack++;
busy = 0;
}
#ifdef SEND_MONDO_STATS
{
if (n < 8192)
x_one_stimes[n >> 7]++;
else
}
#endif
}
void
syncfpu(void)
{
}
/*
* Return processor specific async error structure
* size used.
*/
int
cpu_aflt_size(void)
{
return (sizeof (ch_async_flt_t));
}
/*
* Tunable to disable the checking of other cpu logout areas during panic for
* potential syndrome 71 generating errors.
*/
int enable_check_other_cpus_logout = 1;
/*
* Check other cpus logout area for potential synd 71 generating
* errors.
*/
static void
{
return;
}
#if defined(SERRANO)
#endif /* SERRANO */
/*
* In order to simplify code, we maintain this afsr_errs
* variable which holds the aggregate of AFSR and AFSR_EXT
* sticky bits.
*/
(t_afsr & C_AFSR_ALL_ERRS);
/* Setup the async fault structure */
/*
* Queue events on the async event queue, one event per error bit.
* If no events are queued, queue an event to complain.
*/
}
/*
* Zero out + invalidate CPU logout.
*/
}
/*
* Check the logout areas of all other cpus for unlogged errors.
*/
static void
{
int i, j;
for (i = 0; i < NCPU; i++) {
continue;
}
/*
* Check each of the tl>0 logout areas
*/
for (j = 0; j < CH_ERR_TL1_TLMAX; j++, cl1p++) {
if (cl1p->ch_err_tl1_flags == 0)
continue;
}
/*
* Check each of the remaining logout areas
*/
}
}
/*
* The fast_ecc_err handler transfers control here for UCU, UCC events.
* Note that we flush Ecache twice, once in the fast_ecc_err handler to
* flush the TL=1 trap handler code out of the Ecache, so we can minimize
* the probability of getting a TL>1 Fast ECC trap when we're fielding
* another Fast ECC trap.
*
* Cheetah+ also handles: TSCE: No additional processing required.
* Panther adds L3_UCU and L3_UCC which are reported in AFSR_EXT.
*
* Note that the p_clo_flags input is only valid in cases where the
* cpu_private struct is not yet initialized (since that is the only
* time that information cannot be obtained from the logout struct.)
*/
/*ARGSUSED*/
void
{
/*
* Get the CPU log out info. If we can't find our CPU private
* pointer, then we will have to make due without any detailed
* logout information.
*/
} else {
}
}
/*
* Log fast ecc error, called from either Fast ECC at TL=0 or Fast
* ECC at TL>0. Need to supply either a error register pointer or a
* cpu logout structure pointer.
*/
static void
{
char pr_reason[MAX_REASON_STRING];
/*
* If no cpu logout data, then we will have to make due without
* any detailed logout information.
*/
#if defined(SERRANO)
#endif /* SERRANO */
} else {
#if defined(SERRANO)
#endif /* SERRANO */
}
/*
* In order to simplify code, we maintain this afsr_errs
* variable which holds the aggregate of AFSR and AFSR_EXT
* sticky bits.
*/
(t_afsr & C_AFSR_ALL_ERRS);
pr_reason[0] = '\0';
/* Setup the async fault structure */
/*
* XXXX - Phenomenal hack to get around Solaris not getting all the
* cmn_err messages out to the console. The situation is a UCU (in
* priv mode) which causes a WDU which causes a UE (on the retry).
* The messages for the UCU and WDU are enqueued and then pulled off
* the async queue via softint and syslogd starts to process them
* but doesn't get them to the console. The UE causes a panic, but
* on the async queue. The hack is to check if we have a matching
* WDU event for the UCU, and if it matches, we're more than likely
* going to panic with a UE, unless we're under protection. So, we
* check to see if we got a matching WDU event and if we're under
* protection.
*
* looks like this:
* UCU->WDU->UE
* For Panther, it could look like either of these:
* UCU---->WDU->L3_WDU->UE
* L3_UCU->WDU->L3_WDU->UE
*/
(t_afsr_errs & C_AFSR_WDU));
}
/*
* Queue events on the async event queue, one event per error bit.
* If no events are queued or no Fast ECC events are on in the AFSR,
* queue an event to complain.
*/
}
/*
* Zero out + invalidate CPU logout.
*/
if (clop) {
}
/*
* We carefully re-enable NCEEN and CEEN and then check if any deferred
* or disrupting errors have happened. We do this because if a
* CEEN works differently on Cheetah than on Spitfire. Also, we enable
* deferred or disrupting error happening between checking the AFSR and
*
* Note: CEEN and NCEEN are only reenabled if they were on when trap
* taken.
*/
if (clear_errors(&ch_flt)) {
(C_AFSR_EXT_ASYNC_ERRS | C_AFSR_ASYNC_ERRS)) != 0);
NULL);
}
/*
* Panic here if aflt->flt_panic has been set. Enqueued errors will
* be logged as part of the panic flow.
*/
/*
* Flushing the Ecache here gets the part of the trap handler that
* is run at TL=1 out of the Ecache.
*/
}
/*
* This is called via sys_trap from pil15_interrupt code if the
* corresponding entry in ch_err_tl1_pending is set. Checks the
* various ch_err_tl1_data structures for valid entries based on the bit
* settings in the ch_err_tl1_flags entry of the structure.
*/
/*ARGSUSED*/
void
{
int i, ncl1ps;
cl1p = &ch_err_tl1_data;
ncl1ps = 1;
} else {
ncl1ps = 0;
}
if (cl1p->ch_err_tl1_flags == 0)
continue;
/*
* Grab a copy of the logout data and invalidate
* the logout area.
*/
/*
* Log "first error" in ch_err_tl1_data.
*/
}
#if defined(CPU_IMP_L1_CACHE_PARITY)
}
#endif /* CPU_IMP_L1_CACHE_PARITY */
/*
* Log "multiple events" in ch_err_tl1_data. Note that
* if the structure is busy, we just do the cache flushing
* at this point *should* have some relevant info. If there
* are no valid errors in the AFSR, we'll assume they've
* already been picked up and logged. For I$/D$ parity,
* we just log an event with an "Unknown" (NULL) TPC.
*/
if (me_flags & CH_ERR_FECC) {
/*
* Get the error registers and see if there's
* a pending error. If not, don't bother
* generating an "Invalid AFSR" error event.
*/
if (t_afsr_errs != 0) {
}
}
#if defined(CPU_IMP_L1_CACHE_PARITY)
}
#endif /* CPU_IMP_L1_CACHE_PARITY */
}
}
/*
* Called from Fast ECC TL>0 handler in case of fatal error.
* cpu_tl1_error should always find an associated ch_err_tl1_data structure,
* but if we don't, we'll panic with something reasonable.
*/
/*ARGSUSED*/
void
{
/*
* Should never return, but just in case.
*/
fm_panic("Unsurvivable ECC Error at TL>0");
}
/*
* The ce_err/ce_err_tl1 handlers transfer control here for CE, EMC, EDU:ST,
* EDC, WDU, WDC, CPU, CPC, IVU, IVC events.
* Disrupting errors controlled by NCEEN: EDU:ST, WDU, CPU, IVU
* Disrupting errors controlled by CEEN: CE, EMC, EDC, WDC, CPC, IVC
*
* Cheetah+ also handles (No additional processing required):
* DUE, DTO, DBERR (NCEEN controlled)
* THCE (CEEN and ET_ECC_en controlled)
* TUE (ET_ECC_en controlled)
*
* Panther further adds:
* IMU, L3_EDU, L3_WDU, L3_CPU (NCEEN controlled)
* IMC, L3_EDC, L3_WDC, L3_CPC, L3_THCE (CEEN controlled)
* TUE_SH, TUE (NCEEN and L2_tag_ECC_en controlled)
* L3_TUE, L3_TUE_SH (NCEEN and ET_ECC_en controlled)
* THCE (CEEN and L2_tag_ECC_en controlled)
* L3_THCE (CEEN and ET_ECC_en controlled)
*
* Note that the p_clo_flags input is only valid in cases where the
* cpu_private struct is not yet initialized (since that is the only
* time that information cannot be obtained from the logout struct.)
*/
/*ARGSUSED*/
void
{
char pr_reason[MAX_REASON_STRING];
/*
* Get the CPU log out info. If we can't find our CPU private
* pointer, then we will have to make due without any detailed
* logout information.
*/
#if defined(SERRANO)
#endif /* SERRANO */
} else {
#if defined(SERRANO)
#endif /* SERRANO */
}
/*
* In order to simplify code, we maintain this afsr_errs
* variable which holds the aggregate of AFSR and AFSR_EXT
* sticky bits.
*/
(t_afsr & C_AFSR_ALL_ERRS);
pr_reason[0] = '\0';
/* Setup the async fault structure */
/*
* If this trap is a result of one of the errors not masked
* by cpu_ce_not_deferred, we don't reenable CEEN. Instead
* indicate that a timeout is to be set later.
*/
else
/*
* log the CE and clean up
*/
/*
* We re-enable CEEN (if required) and check if any disrupting errors
* have happened. We do this because if a disrupting error had occurred
* with CEEN off, the trap will not be taken when CEEN is re-enabled.
* Note that CEEN works differently on Cheetah than on Spitfire. Also,
* we enable CEEN *before* checking the AFSR to avoid the small window
* of a error happening between checking the AFSR and enabling CEEN.
*/
if (clear_errors(&ch_flt)) {
NULL);
}
/*
* Panic here if aflt->flt_panic has been set. Enqueued errors will
* be logged as part of the panic flow.
*/
}
/*
* The async_err handler transfers control here for UE, EMU, EDU:BLD,
* L3_EDU:BLD, TO, and BERR events.
* Deferred errors controlled by NCEEN: UE, EMU, EDU:BLD, L3_EDU:BLD, TO, BERR
*
* Cheetah+: No additional errors handled.
*
* Note that the p_clo_flags input is only valid in cases where the
* cpu_private struct is not yet initialized (since that is the only
* time that information cannot be obtained from the logout struct.)
*/
/*ARGSUSED*/
void
{
int trampolined = 0;
char pr_reason[MAX_REASON_STRING];
int expected = DDI_FM_ERR_UNEXPECTED;
/*
* We need to look at p_flag to determine if the thread detected an
* error while dumping core. We can't grab p_lock here, but it's ok
* because we just need a consistent snapshot and we know that everyone
* else will store a consistent set of bits while holding p_lock. We
* don't have to worry about a race because SDOCORE is set once prior
* to doing i/o from the process's address space and is never cleared.
*/
/*
* Get the CPU log out info. If we can't find our CPU private
* pointer then we will have to make due without any detailed
* logout information.
*/
#if defined(SERRANO)
#endif /* SERRANO */
} else {
#if defined(SERRANO)
#endif /* SERRANO */
}
/*
* In order to simplify code, we maintain this afsr_errs
* variable which holds the aggregate of AFSR and AFSR_EXT
* sticky bits.
*/
(t_afsr & C_AFSR_ALL_ERRS);
pr_reason[0] = '\0';
/*
* Grab information encoded into our clo_flags field.
*/
/*
* handle the specific error
*/
/*
* If the trap occurred in privileged mode at TL=0, we need to check to
* see if we were executing in the kernel under on_trap() or t_lofault
* protection. If so, modify the saved registers so that we return
* from the trap to the appropriate trampoline routine.
*/
trampolined = 1;
}
trampolined = 1;
/*
* for peeks and caut_gets errors are expected
*/
if (!hp)
}
trampolined = 1;
}
}
/*
* If we're in user mode or we're doing a protected copy, we either
* want the ASTON code below to send a signal to the user process
* or we want to panic if aft_panic is set.
*
* If we're in privileged mode and we're not doing a copy, then we
* need to check if we've trampolined. If we haven't trampolined,
* we should panic.
*/
if (t_afsr_errs &
~(C_AFSR_BERR | C_AFSR_TO)))
} else if (!trampolined) {
}
/*
* If we've trampolined due to a privileged TO or BERR, or if an
* unprivileged TO or BERR occurred, we don't want to enqueue an
* event for that TO or BERR. Queue all other events (if any) besides
* ignore the number of events queued. If we haven't trampolined due
* to a TO or BERR, just enqueue events normally.
*/
if (trampolined) {
/*
* User mode, suppress messages if
* cpu_berr_to_verbose is not set.
*/
if (!cpu_berr_to_verbose)
}
/*
* Log any errors that occurred
*/
if (((log_afsr &
(t_afsr_errs &
(C_AFSR_ASYNC_ERRS | C_AFSR_EXT_ASYNC_ERRS)) == 0) {
}
/*
* Zero out + invalidate CPU logout.
*/
if (clop) {
}
/*
* IO errors that may have resulted in this trap.
*/
}
/*
* line from the Ecache. We also need to query the bus nexus for
* fatal errors. Attempts to do diagnostic read on caches may
* introduce more errors (especially when the module is bad).
*/
/*
* Ask our bus nexus friends if they have any fatal errors. If
* so, they will log appropriate error messages.
*/
/*
* We got a UE or RUE and are panicking, save the fault PA in
* a known location so that the platform specific panic code
* can check for copyback errors.
*/
panic_aflt = *aflt;
}
}
/*
* Flush Ecache line or entire Ecache
*/
#else /* JALAPENO || SERRANO */
/*
* IO errors that may have resulted in this trap.
*/
}
/*
* UE: If the UE is in memory, we need to flush the bad
* line from the Ecache. We also need to query the bus nexus for
* fatal errors. Attempts to do diagnostic read on caches may
* introduce more errors (especially when the module is bad).
*/
/*
* Ask our legacy bus nexus friends if they have any fatal
* errors. If so, they will log appropriate error messages.
*/
/*
* We got a UE and are panicking, save the fault PA in a known
* location so that the platform specific panic code can check
* for copyback errors.
*/
panic_aflt = *aflt;
}
}
/*
* Flush Ecache line or entire Ecache
*/
if (t_afsr_errs &
#endif /* JALAPENO || SERRANO */
/*
* We carefully re-enable NCEEN and CEEN and then check if any deferred
* or disrupting errors have happened. We do this because if a
* CEEN works differently on Cheetah than on Spitfire. Also, we enable
* deferred or disrupting error happening between checking the AFSR and
*
* Note: CEEN reenabled only if it was on when trap taken.
*/
if (clear_errors(&ch_flt)) {
/*
* Check for secondary errors, and avoid panicking if we
* have them
*/
t_afar) == 0) {
(C_AFSR_ASYNC_ERRS | C_AFSR_EXT_ASYNC_ERRS)) != 0);
}
NULL);
}
/*
* Panic here if aflt->flt_panic has been set. Enqueued errors will
* be logged as part of the panic flow.
*/
/*
* If we queued an error and we are going to return from the trap and
* the error was in user mode or inside of a copy routine, set AST flag
* so the queue will be drained before returning to user mode. The
* AST processing will also act on our failure policy.
*/
int pcb_flag = 0;
if (t_afsr_errs &
~(C_AFSR_BERR | C_AFSR_TO)))
pcb_flag |= ASYNC_HWERR;
if (t_afsr & C_AFSR_BERR)
pcb_flag |= ASYNC_BERR;
}
}
#if defined(CPU_IMP_L1_CACHE_PARITY)
/*
* Handling of data and instruction parity errors (traps 0x71, 0x72).
*
* For Panther, P$ data parity errors during floating point load hits
* are also detected (reported as TT 0x71) and handled by this trap
* handler.
*
* is available.
*/
/*ARGSUSED*/
void
{
char *error_class;
/*
* Log the error.
* For icache parity errors the fault address is the trap PC.
* be decoded to determine the address and that isn't possible
* at high PIL.
*/
if (iparity) {
else
} else {
else
/*
* For panther we also need to check the P$ for parity errors.
*/
aflt->flt_payload =
}
}
}
if (iparity) {
/*
* Invalidate entire I$.
* This is required due to the use of diagnostic ASI
* accesses that may result in a loss of I$ coherency.
*/
if (cache_boot_state & DCU_IC) {
flush_icache();
}
/*
* According to section P.3.1 of the Panther PRM, we
* need to do a little more for recovery on those
* CPUs after encountering an I$ parity error.
*/
flush_ipb();
flush_pcache();
}
} else {
/*
* Since the valid bit is ignored when checking parity the
* D$ data and tag must also be corrected. Set D$ data bits
* to zero and set utag to 0, 1, 2, 3.
*/
/*
* According to section P.3.3 of the Panther PRM, we
* need to do a little more for recovery on those
* CPUs after encountering a D$ or P$ parity error.
*
* As far as clearing P$ parity errors, it is enough to
* simply invalidate all entries in the P$ since P$ parity
* error traps are only generated for floating point load
* hits.
*/
flush_icache();
flush_ipb();
flush_pcache();
}
}
/*
* Invalidate entire D$ if it was enabled.
* This is done to avoid stale data in the D$ which might
* occur with the D$ disabled and the trap handler doing
* stores affecting lines already in the D$.
*/
if (cache_boot_state & DCU_DC) {
flush_dcache();
}
/*
* Restore caches to their bootup state.
*/
/*
* Panic here if aflt->flt_panic has been set. Enqueued errors will
* be logged as part of the panic flow.
*/
/*
* If this error occurred at TL>0 then flush the E$ here to reduce
* the chance of getting an unrecoverable Fast ECC error. This
* flush will evict the part of the parity trap handler that is run
* at TL>1.
*/
if (tl) {
}
}
/*
* On an I$ parity error, mark the appropriate entries in the ch_async_flt_t
* to indicate which portions of the captured data should be in the ereport.
*/
void
{
int tag_index;
/*
* Parity error in I$ tag or data
*/
else
} else {
/*
* Parity error was not identified.
* Log tags and data for all ways.
*/
else
}
}
}
/*
* On an D$ parity error, mark the appropriate entries in the ch_async_flt_t
* to indicate which portions of the captured data should be in the ereport.
*/
void
{
int tag_index;
if (offset != -1) {
/*
* Parity error in D$ or P$ data array.
*
* First check to see whether the parity error is in D$ or P$
* since P$ data parity errors are reported in Panther using
* the same trap.
*/
} else {
}
} else if (way != -1) {
/*
* Parity error in D$ tag.
*/
}
}
#endif /* CPU_IMP_L1_CACHE_PARITY */
/*
* The cpu_async_log_err() function is called via the [uc]e_drain() function to
* post-process CPU events that are dequeued. As such, it can be invoked
* from softint context, from AST processing in the trap() flow, or from the
* panic flow. We decode the CPU-specific data, and take appropriate actions.
* Historically this entry point was used to log the actual cmn_err(9F) text;
* now with FMA it is used to prepare 'flt' to be converted into an ereport.
* With FMA this function now also returns a flag which indicates to the
* caller whether the ereport should be posted (1) or suppressed (0).
*/
static int
{
case CPU_INV_AFSR:
/*
* If it is a disrupting trap and the AFSR is zero, then
* the event has probably already been noted. Do not post
* an ereport.
*/
return (0);
else
return (1);
case CPU_TO:
case CPU_BERR:
case CPU_FATAL:
case CPU_FPUERR:
return (1);
case CPU_UE_ECACHE_RETIRE:
return (1);
/*
* Cases where we may want to suppress logging or perform
* extended diagnostics.
*/
case CPU_CE:
case CPU_EMC:
/*
* We want to skip logging and further classification
* only if ALL the following conditions are true:
*
* 1. There is only one error
* 2. That error is a correctable memory error
* 3. The error is caused by the memory scrubber (in
* which case the error will have occurred under
* on_trap protection)
* 4. The error is on a retired page
*
* Note: AFLT_PROT_EC is used places other than the memory
* scrubber. However, none of those errors should occur
* on a retired page.
*/
/*
* Since we're skipping logging, we'll need
* to schedule the re-enabling of CEEN
*/
(void) timeout(cpu_delayed_check_ce_errors,
* MICROSEC));
}
return (0);
}
}
/*
* only if the page is healthy (we don't want bad
* pages inducing too much diagnostic activity). If we could
* not find a page pointer then we also skip this. If
* ce_scrub_xdiag_recirc returns nonzero then it has chosen
* to copy and recirculate the event (for further diagnostics)
* and we should not proceed to log it here.
*
* This must be the last step here before the cpu_log_err()
* below - if an event recirculates cpu_ce_log_err() will
* not call the current function but just proceed directly
* to cpu_ereport_post after the cpu_log_err() avoided below.
*
* Note: Check cpu_impl_async_log_err if changing this
*/
} else {
return (0);
}
}
/*FALLTHRU*/
/*
* Cases where we just want to report the error and continue.
*/
case CPU_CE_ECACHE:
case CPU_UE_ECACHE:
case CPU_IV:
case CPU_ORPH:
return (1);
/*
* Cases where we want to fall through to handle panicking.
*/
case CPU_UE:
/*
* We want to skip logging in the same conditions as the
* CE case. In addition, we want to make sure we're not
* panicking.
*/
/* Zero the address to clear the error */
return (0);
}
}
break;
default:
/*
* If the us3_common.c code doesn't know the flt_type, it may
* be an implementation-specific code. Call into the impldep
* backend to find out what to do: if it tells us to continue,
* break and handle as if falling through from a UE; if not,
* the impldep backend has handled the error and we're done.
*/
case CH_ASYNC_LOG_DONE:
return (1);
case CH_ASYNC_LOG_RECIRC:
return (0);
case CH_ASYNC_LOG_CONTINUE:
break; /* continue on to handle UE-like error */
default:
"invalid fault type (0x%x)",
return (0);
}
}
/* ... fall through from the UE case */
if (!panicstr) {
} else {
/*
* Clear UEs on panic so that we don't
* get haunted by them during panic or
* after reboot
*/
(void) clear_errors(NULL);
}
}
return (1);
}
/*
* Retire the bad page that may contain the flushed error.
*/
void
{
}
/*
* Return true if the error specified in the AFSR indicates
*/
/* ARGSUSED */
static int
{
return (0);
#elif defined(CHEETAH_PLUS)
return ((t_afsr & C_AFSR_EXT_L3_DATA_ERRS) != 0);
return ((t_afsr & C_AFSR_EC_DATA_ERRS) != 0);
#else /* CHEETAH_PLUS */
return ((t_afsr & C_AFSR_EC_DATA_ERRS) != 0);
#endif
}
/*
* The cpu_log_err() function is called by cpu_async_log_err() to perform the
* generic event post-processing for correctable and uncorrectable memory,
* E$, and MTag errors. Historically this entry point was used to log bits of
* common cmn_err(9F) text; now with FMA it is used to prepare 'flt' to be
* converted into an ereport. In addition, it transmits the error to any
* platform-specific service-processor FRU logging routines, if available.
*/
void
{
char unum[UNUM_NAMLEN];
else
/*
* Determine syndrome status.
*/
/*
* Determine afar status.
*/
else
/*
* If afar status is not invalid do a unum lookup.
*/
if (afar_status != AFLT_STAT_INVALID) {
} else {
unum[0] = '\0';
}
/*
* Do not send the fruid message (plat_ecc_error_data_t)
* to the SC if it can handle the enhanced error information
* (plat_ecc_error2_data_t) or when the tunable
* ecc_log_fruid_enable is set to 0.
*/
if (&plat_ecc_capability_sc_get &&
if (&plat_log_fruid_error)
}
if (afar_status != AFLT_STAT_INVALID)
/*
* If we have a CEEN error , we do not reenable CEEN until after
* we exit the trap handler. Otherwise, another error may
* occur causing the handler to be entered recursively.
* We set a timeout to trigger in cpu_ceen_delay_secs seconds,
* to try and ensure that the CPU makes progress in the face
* of a CE storm.
*/
(void) timeout(cpu_delayed_check_ce_errors,
}
}
/*
* Invoked by error_init() early in startup and therefore before
* startup_errorq() is called to drain any error Q -
*
* startup()
* startup_end()
* error_init()
* cpu_error_init()
* errorq_init()
* errorq_drain()
* start_other_cpus()
*
* The purpose of this routine is to create error-related taskqs. Taskqs
* are used for this purpose because cpu_lock can't be grabbed from interrupt
* context.
*/
void
cpu_error_init(int items)
{
/*
* Create taskq(s) to reenable CE
*/
}
void
{
char unum[UNUM_NAMLEN];
int len;
case CPU_FAULT:
break;
case BUS_FAULT:
}
break;
case RECIRC_CPU_FAULT:
break;
case RECIRC_BUS_FAULT:
/*FALLTHRU*/
default:
return;
}
}
/*
* Scrub and classify a CE. This function must not modify the
* fault structure passed to it but instead should return the classification
* information.
*/
static uchar_t
{
/*
* Clear CEEN. CPU CE TL > 0 trap handling will already have done
* this, but our other callers have not. Disable preemption to
* avoid CPU migration so that we restore CEEN on the correct
* cpu later.
*
* CEEN is cleared so that further CEs that our instruction and
* data footprint induce do not cause use to either creep down
* kernel stack to the point of overflow, or do so much CE
* notification as to make little real forward progress.
*
* NCEEN must not be cleared. However it is possible that
* our accesses to the flt_addr may provoke a bus error or timeout
* if the offending address has just been unconfigured as part of
* a DR action. So we must operate under on_trap protection.
*/
orig_err = get_error_enable();
if (orig_err & EN_REG_CEEN)
/*
* Our classification algorithm includes the line state before
* the scrub; we'd like this captured after the detection and
* before the algorithm below - the earlier the better.
*
* If we've come from a cpu CE trap then this info already exists
* in the cpu logout area.
*
* For a CE detected by memscrub for which there was no trap
* (running with CEEN off) cpu_log_and_clear_ce has called
* cpu_ce_delayed_ec_logout to capture some cache data, and
* marked the fault structure as incomplete as a flag to later
* logging code.
*
* If called directly from an IO detected CE there has been
* no line data capture. In this case we logout to the cpu logout
* area - that's appropriate since it's the cpu cache data we need
* for classification. We thus borrow the cpu logout area for a
* short time, and cpu_ce_delayed_ec_logout will mark it as busy in
* this time (we will invalidate it again below).
*
* If called from the partner check xcall handler then this cpu
* (the partner) has not necessarily experienced a CE at this
* address. But we want to capture line state before its scrub
* attempt since we use that in our classification.
*/
if (logout_tried == B_FALSE) {
}
/*
* Scrub memory, then check AFSR for errors. The AFAR we scrub may
* no longer be valid (if DR'd since the initial event) so we
* perform this scrub under on_trap protection. If this access is
* ok then further accesses below will also be ok - DR cannot
* proceed while this thread is active (preemption is disabled);
* to be safe we'll nonetheless use on_trap again below.
*/
} else {
no_trap();
if (orig_err & EN_REG_CEEN)
return (disp);
}
no_trap();
/*
* Did the casx read of the scrub log a CE that matches the AFAR?
* Note that it's quite possible that the read sourced the data from
* another cpu.
*/
disp |= CE_XDIAG_CE1;
/*
* Read the data again. This time the read is very likely to
* come from memory since the scrub induced a writeback to memory.
*/
} else {
no_trap();
if (orig_err & EN_REG_CEEN)
return (disp);
}
no_trap();
/* Did that read induce a CE that matches the AFAR? */
disp |= CE_XDIAG_CE2;
/*
* Look at the logout information and record whether we found the
* we found it in either cache (it won't reside in both but
* it is possible to read it that way given the moving target).
*/
int state;
int totalsize;
/*
* If hit is nonzero then a match was found and hit will
* be one greater than the index which hit. For Panther we
* also need to pay attention to level to see which of l2$ or
* l3$ it hit in.
*/
0, &level);
if (hit) {
--hit;
if (level == 2)
else
} else {
}
/*
* Cheetah variants use different state encodings -
* the CH_ECSTATE_* defines vary depending on the
* module we're compiled for. Translate into our
* one true version. Conflate Owner-Shared state
* of SSM mode with Owner as victimisation of such
* lines may cause a writeback.
*/
switch (state) {
case CH_ECSTATE_MOD:
disp |= EC_STATE_M;
break;
case CH_ECSTATE_OWN:
case CH_ECSTATE_OWS:
disp |= EC_STATE_O;
break;
case CH_ECSTATE_EXL:
disp |= EC_STATE_E;
break;
case CH_ECSTATE_SHR:
disp |= EC_STATE_S;
break;
default:
disp |= EC_STATE_I;
break;
}
}
/*
* If we initiated the delayed logout then we are responsible
* for invalidating the logout area.
*/
if (logout_tried == B_FALSE) {
}
}
/*
* Re-enable CEEN if we turned it off.
*/
if (orig_err & EN_REG_CEEN)
return (disp);
}
/*
* Scrub a correctable memory error and collect data for classification
* of CE type. This function is called in the detection path, ie tl0 handling
* of a correctable error trap (cpus) or interrupt (IO) at high PIL.
*/
void
{
/*
* Cheetah CE classification does not set any bits in flt_status.
* Instead we will record classification datapoints in flt_disp.
*/
/*
* To check if the error detected by IO is persistent, sticky or
* intermittent. This is noticed by clear_ecc().
*/
/*
* Record information from this first part of the algorithm in
* flt_disp.
*/
}
/*
* Select a partner to perform a further CE classification check from.
* Must be called with kernel preemption disabled (to stop the cpu list
* from changing). The detecting cpu we are partnering has cpuid
* aflt->flt_inst; we might not be running on the detecting cpu.
*
* Restrict choice to active cpus in the same cpu partition as ourselves in
* an effort to stop bad cpus in one partition causing other partitions to
* perform excessive diagnostic activity. Actually since the errorq drain
* is run from a softint most of the time and that is a global mechanism
* this isolation is only partial. Return NULL if we fail to find a
* suitable partner.
*
* We prefer a partner that is in a different latency group to ourselves as
* we will share fewer datapaths. If such a partner is unavailable then
* choose one in the same lgroup but prefer a different chip and only allow
* a sibling core if flags includes PTNR_SIBLINGOK. If all else fails and
* flags includes PTNR_SELFOK then permit selection of the original detector.
*
* We keep a cache of the last partner selected for a cpu, and we'll try to
* use that previous partner if no more than cpu_ce_ptnr_cachetime_sec seconds
* have passed since that selection was made. This provides the benefit
* of the point-of-view of different partners over time but without
* requiring frequent cpu list traversals.
*/
static cpu_t *
{
/*
* Short-circuit for the following cases:
* . the dtcr is not flagged active
* . there is just one cpu present
* . the detector has disappeared
* . we were given a bad flt_inst cpuid; this should not happen
* (eg PCI code now fills flt_inst) but if it does it is no
* reason to panic.
* . there is just one cpu left online in the cpu partition
*
* If we return NULL after this point then we do not update the
* chpr_ceptnr_seltime which will cause us to perform a full lookup
* again next time; this is the case where the only other cpu online
* in the detector's partition is on the same chip as the detector
* and since CEEN re-enable is throttled even that case should not
* hurt performance.
*/
return (NULL);
}
if (flags & PTNR_SELFOK) {
return (dtcr);
} else {
return (NULL);
}
}
/*
* Select a starting point.
*/
if (!lasttime) {
/*
* We've never selected a partner for this detector before.
* Start the scan at the next online cpu in the same cpu
* partition.
*/
/*
* Our last selection has not aged yet. If this partner:
* . is still a valid cpu,
* . is still in the same partition as the detector
* . is still marked active
* . satisfies the 'flags' argument criteria
* then select it again without updating the timestamp.
*/
!(flags & PTNR_SIBLINGOK))) {
} else {
} else {
}
return (sp);
}
} else {
/*
* Our last selection has aged. If it is nonetheless still a
* valid cpu then start the scan at the next cpu in the
* partition after our last partner. If the last selection
* is no longer a valid cpu then go with our default. In
* this way we slowly cycle through possible partners to
* obtain multiple viewpoints over time.
*/
} else {
}
}
/*
* We have a proposed starting point for our search, but if this
* cpu is offline then its cpu_next_part will point to itself
* so we can't use that to iterate over cpus in this partition in
* the loop below. We still want to avoid iterating over cpus not
* in our partition, so in the case that our starting point is offline
* we will repoint it to be the detector itself; and if the detector
* happens to be offline we'll return NULL from the following loop.
*/
}
do {
continue;
return (ptnr);
}
continue;
}
/*
* A foreign partner has already been returned if one was available.
*
* If locptnr is not NULL it is a cpu in the same lgroup as the
* detector, is active, and is not a sibling of the detector.
*
* If sibptnr is not NULL it is a sibling of the detector, and is
* active.
*
* If we have to resort to using the detector itself we have already
* checked that it is active.
*/
if (locptnr) {
return (locptnr);
return (sibptnr);
} else if (flags & PTNR_SELFOK) {
return (dtcr);
}
return (NULL);
}
/*
* Cross call handler that is requested to run on the designated partner of
* a cpu that experienced a possibly sticky or possibly persistnet CE.
*/
static void
{
}
/*
* The associated errorqs are never destroyed so we do not need to deal with
* them disappearing before this timeout fires. If the affected memory
* has been DR'd out since the original event the scrub algrithm will catch
* any errors and return null disposition info. If the original detecting
* cpu has been DR'd out then ereport detector info will not be able to
* lookup CPU type; with a small timeout this is unlikely.
*/
static void
{
int ptnrtype;
&ptnrtype)) {
} else {
if (ncpus > 1)
}
}
/*
* Called from errorq drain code when processing a CE error, both from
* CPU and PCI drain functions. Decide what further classification actions,
* if any, we will perform. Perform immediate actions now, and schedule
* delayed actions as required. Note that we are no longer necessarily running
* on the detecting cpu, and that the async_flt structure will not persist on
* return from this function.
*
* Calls to this function should aim to be self-throtlling in some way. With
* the delayed re-enable of CEEN the absolute rate of calls should not
* be excessive. Callers should also avoid performing in-depth classification
* for events in pages that are already known to be suspect.
*
* We return nonzero to indicate that the event has been copied and
* recirculated for further testing. The caller should not log the event
* in this case - it will be logged when further test results are available.
*
* Our possible contexts are that of errorq_drain: below lock level or from
* panic context. We can assume that the cpu we are running on is online.
*/
#ifdef DEBUG
static int ce_xdiag_forceaction;
#endif
int
{
int ptnrtype;
return (0);
} else if (!aflt->flt_in_memory) {
return (0);
}
/*
* Some correctable events are not scrubbed/classified, such as those
* noticed at the tail of cpu_deferred_error. So if there is no
* initial detector classification go no further.
*/
if (!CE_XDIAG_EXT_ALG_APPLIED(dtcrinfo)) {
return (0);
}
#ifdef DEBUG
if (ce_xdiag_forceaction != 0)
#endif
switch (action) {
case CE_ACT_LKYCHK: {
break;
}
(void) timeout((void (*)(void *))ce_lkychk_cb,
return (1);
}
case CE_ACT_PTNRCHK:
kpreempt_disable(); /* stop cpu list changing */
} else if (ncpus > 1) {
} else {
}
break;
case CE_ACT_DONE:
break;
case CE_ACT(CE_DISP_BAD):
default:
#ifdef DEBUG
#endif
ce_xdiag_bad++;
break;
}
return (0);
}
/*
* We route all errors through a single switch statement.
*/
void
{
case CPU_FAULT:
break;
case BUS_FAULT:
break;
default:
return;
}
}
/*
* Routine for panic hook callback from panic_idle().
*/
void
cpu_async_panic_callb(void)
{
if (afsr_errs) {
#if defined(SERRANO)
#endif /* SERRANO */
}
}
/*
* Routine to convert a syndrome into a syndrome code.
*/
static int
{
if (synd_status == AFLT_STAT_INVALID)
return (-1);
/*
* Use the syndrome to index the appropriate syndrome table,
* to get the code indicating which bit(s) is(are) bad.
*/
if (afsr_bit &
if (afsr_bit & C_AFSR_MSYND_ERRS) {
return (-1);
else
#else /* JALAPENO || SERRANO */
return (-1);
else
return (mtag_syndrome_tab[synd]);
#endif /* JALAPENO || SERRANO */
} else {
return (-1);
else
return (ecc_syndrome_tab[synd]);
}
} else {
return (-1);
}
}
int
{
if (&plat_get_mem_sid)
else
return (ENOTSUP);
}
int
{
if (&plat_get_mem_offset)
else
return (ENOTSUP);
}
int
{
if (&plat_get_mem_addr)
else
return (ENOTSUP);
}
/*
* Routine to return a string identifying the physical name
*/
int
{
int synd_code;
int ret;
/*
* An AFSR of -1 defaults to a memory syndrome.
*/
/*
* Syndrome code must be either a single-bit error code
* (0...143) or -1 for unum lookup.
*/
synd_code = -1;
if (&plat_get_mem_unum) {
buf[0] = '\0';
*lenp = 0;
}
return (ret);
}
return (ENOTSUP);
}
/*
* Wrapper for cpu_get_mem_unum() routine that takes an
* async_flt struct rather than explicit arguments.
*/
int
{
/*
* If we come thru here for an IO bus error aflt->flt_stat will
* not be the CPU AFSR, and we pass in a -1 to cpu_get_mem_unum()
* so it will interpret this as a memory error.
*/
}
/*
* Return unum string given synd_code and async_flt into
* the buf with size UNUM_NAMLEN
*/
static int
{
/*
* Syndrome code must be either a single-bit error code
* (0...143) or -1 for unum lookup.
*/
synd_code = -1;
if (&plat_get_mem_unum) {
buf[0] = '\0';
}
return (ret);
}
buf[0] = '\0';
return (ENOTSUP);
}
/*
* This routine is a more generic interface to cpu_get_mem_unum()
* that may be used by other modules (e.g. the 'mm' driver, through
* the 'MEM_NAME' ioctl, which is used by fmd to resolve unum's
*/
int
{
ushort_t flt_status = 0;
char unum[UNUM_NAMLEN];
/*
* Check for an invalid address.
*/
return (ENXIO);
else
/*
* Get aggregate AFSR for call to cpu_error_is_ecache_data.
*/
else {
#if defined(CHEETAH_PLUS)
#endif /* CHEETAH_PLUS */
}
/*
* Turn on ECC_ECACHE if error type is E$ Data.
*/
flt_status |= ECC_ECACHE;
if (ret != 0)
return (ret);
return (ENAMETOOLONG);
return (0);
}
/*
* Routine to return memory information associated
* with a physical address and syndrome.
*/
int
{
int synd_status, synd_code;
return (ENXIO);
else
if (p2get_mem_info != NULL)
else
return (ENOTSUP);
}
/*
* Routine to return a string identifying the physical
* name associated with a cpuid.
*/
int
{
int ret;
char unum[UNUM_NAMLEN];
if (&plat_get_cpu_unum) {
!= 0)
return (ret);
} else {
return (ENOTSUP);
}
return (ENAMETOOLONG);
return (0);
}
/*
* This routine exports the name buffer size.
*/
{
return (UNUM_NAMLEN);
}
/*
* Historical function, apparantly not used.
*/
/* ARGSUSED */
void
{}
/*
* Historical function only called for SBus errors in debugging.
*/
/*ARGSUSED*/
void
{}
/*
* Clear the AFSR sticky bits. The routine returns a non-zero value if
* any of the AFSR's sticky errors are detected. If a non-null pointer to
* an async fault structure argument is passed in, the captured error state
* (AFSR, AFAR) info will be returned in the structure.
*/
int
{
#if defined(SERRANO)
#endif /* SERRANO */
}
}
/*
* Clear any AFSR error bits, and check for persistence.
*
* It would be desirable to also insist that syndrome match. PCI handling
* has already filled flt_synd. For errors trapped by CPU we only fill
* flt_synd when we queue the event, so we do not have a valid flt_synd
* during initial classification (it is valid if we're called as part of
* subsequent low-pil additional classification attempts). We could try
* to determine which syndrome to use: we know we're only called for
* implemented then what do we do in the case that we do experience an
* error on the same afar but with different syndrome? At the very least
* we should count such occurences. Anyway, for now, we'll leave it as
* it has been for ages.
*/
static int
{
/*
* Snapshot the AFSR and AFAR and clear any errors
*/
/*
* If any of the same memory access error bits are still on and
* the AFAR matches, return that the error is persistent.
*/
}
/*
* Turn off all cpu error detection, normally only used for panics.
*/
void
cpu_disable_errors(void)
{
/*
* With error detection now turned off, check the other cpus
* logout areas for any unlogged errors.
*/
/*
* Make a second pass over the logout areas, in case
* there is a failing CPU in an error-trap loop which
* will write to the logout area once it is emptied.
*/
}
}
/*
* Enable errors.
*/
void
cpu_enable_errors(void)
{
}
/*
* Flush the entire ecache using displacement flush by reading through a
* physical address range twice as large as the Ecache.
*/
void
cpu_flush_ecache(void)
{
}
/*
* Return CPU E$ set size - E$ size divided by the associativity.
* We use this function in places where the CPU_PRIVATE ptr may not be
* initialized yet. Note that for send_mondo and in the Ecache scrubber,
* we're guaranteed that CPU_PRIVATE is initialized. Also, cpunodes is set
* up before the kernel switches from OBP's to the kernel's trap table, so
* we don't have to worry about cpunodes being unitialized.
*/
int
{
if (CPU_PRIVATE(cp))
}
/*
* Flush Ecache line.
* Uses ASI_EC_DIAG for Cheetah+ and Jalapeno.
* Uses normal displacement flush for Cheetah.
*/
static void
{
}
/*
* Scrub physical address.
* Scrub code is different depending upon whether this a Cheetah+ with 2-way
* Ecache or direct-mapped Ecache.
*/
static void
{
}
/*
* Clear physical address.
* Scrub code is different depending upon whether this a Cheetah+ with 2-way
* Ecache or direct-mapped Ecache.
*/
void
{
}
#if defined(CPU_IMP_ECACHE_ASSOC)
/*
* Check for a matching valid line in all the sets.
* If found, return set# + 1. Otherwise return 0.
*/
static int
{
int nway = cpu_ecache_nway();
int i;
return (i+1);
}
return (0);
}
#endif /* CPU_IMP_ECACHE_ASSOC */
/*
* Check whether a line in the given logout info matches the specified
* fault address. If reqval is set then the line must not be Invalid.
* Returns 0 on failure; on success (way + 1) is returned an *level is
* set to 2 for l2$ or 3 for l3$.
*/
static int
{
int totalsize, ec_set_size;
int i, ways;
int match = 0;
int tagvalid;
/*
* Check the l2$ logout data
*/
if (ispanther) {
ways = PN_L2_NWAYS;
} else {
ways = cpu_ecache_nway();
}
/* remove low order PA bits from fault address not used in PA tag */
if (ispanther) {
} else {
}
match = i + 1;
*level = 2;
break;
}
}
return (match);
/* For Panther we also check the l3$ */
ways = PN_L3_NWAYS;
match = i + 1;
*level = 3;
break;
}
}
return (match);
}
#if defined(CPU_IMP_L1_CACHE_PARITY)
/*
* Record information related to the source of an Dcache Parity Error.
*/
static void
{
int index;
/*
* Since instruction decode cannot be done at high PIL
* just examine the entire Dcache to locate the error.
*/
}
}
/*
* Check all ways of the Dcache at a specified index for good parity.
*/
static void
{
/*
* Perform diagnostic read.
*/
/*
* Check tag for even parity.
* Sum of 1 bits (including parity bit) should be even.
*/
/*
* If this is the first error log detailed information
* about it and check the snoop tag. Otherwise just
* record the fact that we found another error.
*/
CHP_DCSNTAG_PARMASK) & 1) {
}
}
}
/*
* Panther has more parity bits than the other
* processors for covering dcache data and so each
* byte of data in each word has its own parity bit.
*/
data_byte++) {
PN_DC_DATA_PARITY_MASK)) & 1) ^
(pbits & 1)) {
word);
}
pbits >>= 1;
data_word >>= 8;
}
parity_bits >>= 8;
}
} else {
/*
* Check data array for even parity.
* The 8 parity bits are grouped into 4 pairs each
* of which covers a 64-bit word. The endianness is
* reversed -- the low-order parity bits cover the
* high-order data words.
*/
}
}
}
}
}
static void
{
/*
* If this is the first error log detailed information about it.
* Otherwise just record the fact that we found another error.
*/
}
}
/*
* Record information related to the source of an Icache Parity Error.
*
* Called with the Icache disabled so any diagnostic accesses are safe.
*/
static void
{
int ic_set_size;
int ic_linesize;
int index;
if (CPU_PRIVATE(CPU)) {
} else {
}
}
/*
* Check all ways of the Icache at a specified index for good parity.
*/
static void
{
int ic_set_size;
int ic_linesize;
if (CPU_PRIVATE(CPU)) {
} else {
}
/*
* Panther has twice as many instructions per icache line and the
* instruction parity bit is in a different location.
*/
} else {
pn_inst_parity = 0;
}
/*
* Index at which we expect to find the parity error.
*/
/*
* Diagnostic reads expect address argument in ASI format.
*/
/*
* If this is the index in which we expect to find the
* error log detailed information about each of the ways.
* This information will be displayed later if we can't
* determine the exact way in which the error is located.
*/
/*
* Check tag for even parity.
* Sum of 1 bits (including parity bit) should be even.
*/
/*
* If this way is the one in which we expected
* to find the error record the way and check the
* snoop tag. Otherwise just record the fact we
* found another error.
*/
CHP_ICSNTAG_PARMASK) & 1) {
}
}
continue;
}
/*
* Check instruction data for even parity.
* Bits participating in parity differ for PC-relative
* versus non-PC-relative instructions.
*/
/*
* If this way is the one in which we expected
* to find the error record the way and offset.
* Otherwise just log the fact we found another
* error.
*/
instr * 4;
}
continue;
}
}
}
}
/*
* Record information related to the source of an Pcache Parity Error.
*/
static void
{
int index;
/*
* Since instruction decode cannot be done at high PIL just
* examine the entire Pcache to check for any parity errors.
*/
}
}
/*
* Check all ways of the Pcache at a specified index for good parity.
*/
static void
{
/*
* Perform diagnostic read.
*/
/*
* Check data array for odd parity. There are 8 parity
* bits (bits 57:50 of ASI_PCACHE_STATUS_DATA) and each
* of those bits covers exactly 8 bytes of the data
* array:
*
* parity bit P$ data bytes covered
* ---------- ---------------------
* 50 63:56
* 51 55:48
* 52 47:40
* 53 39:32
* 54 31:24
* 55 23:16
* 56 15:8
* 57 7:0
*/
/*
* If this is the first error log detailed
* information about it. Otherwise just record
* the fact that we found another error.
*/
sizeof (ch_pc_data_t));
}
}
}
}
}
/*
* Add L1 Data cache data to the ereport payload.
*/
static void
{
int i, ways_to_check, ways_logged = 0;
/*
* If this is an D$ fault then there may be multiple
* ways captured in the ch_parity_log_t structure.
* Otherwise, there will be at most one way captured
* in the ch_diag_data_t struct.
* Check each way to see if it should be encoded.
*/
else
ways_to_check = 1;
for (i = 0; i < ways_to_check; i++) {
else
sizeof (ch_dc_data_t));
ways_logged++;
}
}
/*
* Add the dcache data to the payload.
*/
if (ways_logged != 0) {
}
}
/*
* Add L1 Instruction cache data to the ereport payload.
*/
static void
{
int i, ways_to_check, ways_logged = 0;
/*
* If this is an I$ fault then there may be multiple
* ways captured in the ch_parity_log_t structure.
* Otherwise, there will be at most one way captured
* in the ch_diag_data_t struct.
* Check each way to see if it should be encoded.
*/
else
ways_to_check = 1;
for (i = 0; i < ways_to_check; i++) {
else
sizeof (ch_ic_data_t));
ways_logged++;
}
}
/*
* Add the icache data to the payload.
*/
if (ways_logged != 0) {
}
}
#endif /* CPU_IMP_L1_CACHE_PARITY */
/*
* Add ecache data to payload.
*/
static void
{
int i, ways_logged = 0;
/*
* Check each way to see if it should be encoded
* and concatinate it into a temporary buffer.
*/
for (i = 0; i < CHD_EC_DATA_SETS; i++) {
sizeof (ch_ec_data_t));
ways_logged++;
}
}
/*
* Panther CPUs have an additional level of cache and so
* what we just collected was the L3 (ecache) and not the
* L2 cache.
*/
/*
* Add the L3 (ecache) data to the payload.
*/
if (ways_logged != 0) {
nelem = sizeof (ch_ec_data_t) /
sizeof (uint64_t) * ways_logged;
}
/*
* Now collect the L2 cache.
*/
ways_logged = 0;
for (i = 0; i < PN_L2_NWAYS; i++) {
sizeof (ch_ec_data_t));
ways_logged++;
}
}
}
/*
* Add the L2 cache data to the payload.
*/
if (ways_logged != 0) {
nelem = sizeof (ch_ec_data_t) /
sizeof (uint64_t) * ways_logged;
}
}
/*
* Initialize cpu scheme for specified cpu.
*/
static void
{
}
/*
* Returns ereport resource type.
*/
static int
{
case CPU_CE_ECACHE:
case CPU_UE_ECACHE:
case CPU_UE_ECACHE_RETIRE:
case CPU_ORPH:
/*
* If AFSR error bit indicates L2$ Data for Cheetah,
* Cheetah+ or Jaguar, or L3$ Data for Panther, return
* E$ Data type, otherwise, return CPU type.
*/
return (ERRTYPE_ECACHE_DATA);
return (ERRTYPE_CPU);
case CPU_CE:
case CPU_UE:
case CPU_EMC:
case CPU_DUE:
case CPU_RCE:
case CPU_RUE:
case CPU_FRC:
case CPU_FRU:
return (ERRTYPE_MEMORY);
case CPU_IC_PARITY:
case CPU_DC_PARITY:
case CPU_FPUERR:
case CPU_PC_PARITY:
case CPU_ITLB_PARITY:
case CPU_DTLB_PARITY:
return (ERRTYPE_CPU);
}
return (ERRTYPE_UNKNOWN);
}
/*
* Encode the data saved in the ch_async_flt_t struct into
* the FM ereport payload.
*/
static void
{
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
#if defined(CPU_IMP_L1_CACHE_PARITY)
#endif /* CPU_IMP_L1_CACHE_PARITY */
#if defined(CHEETAH_PLUS)
#endif /* CHEETAH_PLUS */
/*
* Create the FMRI that goes into the payload
* and contains the unum info if necessary.
*/
switch (rtype) {
case ERRTYPE_MEMORY:
case ERRTYPE_ECACHE_DATA:
/*
* Memory errors, do unum lookup
*/
if (*afar_status == AFLT_STAT_INVALID)
break;
if (rtype == ERRTYPE_ECACHE_DATA)
else
break;
&len);
if (ret == 0) {
&offset);
}
break;
case ERRTYPE_CPU:
/*
* On-board processor array error, add cpu resource.
*/
break;
}
}
}
/*
* Initialize the way info if necessary.
*/
void
{
int i;
/*
* Initialize the info in the CPU logout structure.
* The I$/D$ way information is not initialized here
* since it is captured in the logout assembly code.
*/
for (i = 0; i < CHD_EC_DATA_SETS; i++)
for (i = 0; i < PN_L2_NWAYS; i++)
}
/*
* Returns whether fault address is valid for this error bit and
* whether the address is "in memory" (i.e. pf_is_memory returns 1).
*/
int
{
return ((t_afsr_bit & C_AFSR_MEMORY) &&
}
/*
* Returns whether fault address is valid based on the error bit for the
* one event being queued and whether the address is "in memory".
*/
static int
{
int afar_status;
if (!(t_afsr_bit & C_AFSR_MEMORY) ||
return (0);
switch (afar_status) {
case AFLT_STAT_VALID:
return (1);
case AFLT_STAT_AMBIGUOUS:
/*
* Status is ambiguous since another error bit (or bits)
* of equal priority to the specified bit on in the afsr,
* so check those bits. Return 1 only if the bits on in the
* same class as the t_afsr_bit are also C_AFSR_MEMORY bits.
* Otherwise not all the equal priority bits are for memory
* errors, so return 0.
*/
/*
* Get other bits that are on in t_afsr_bit's priority
* class to check for Memory Error bits only.
*/
if (afsr_ow & t_afsr_bit) {
return (0);
else
return (1);
}
}
/*FALLTHRU*/
default:
return (0);
}
}
static void
{
#if defined(CPU_IMP_ECACHE_ASSOC)
int i, nway;
#endif /* CPU_IMP_ECACHE_ASSOC */
/*
* Check if the CPU log out captured was valid.
*/
return;
#if defined(CPU_IMP_ECACHE_ASSOC)
nway = cpu_ecache_nway();
i = cpu_ecache_line_valid(ch_flt);
if (i == 0 || i > nway) {
for (i = 0; i < nway; i++)
} else
#else /* CPU_IMP_ECACHE_ASSOC */
#endif /* CPU_IMP_ECACHE_ASSOC */
#if defined(CHEETAH_PLUS)
#endif /* CHEETAH_PLUS */
}
else
}
}
/*
* Cheetah ECC calculation.
*
* We only need to do the calculation on the data bits and can ignore check
* bit and Mtag bit terms in the calculation.
*/
/*
* low order 64-bits high-order 64-bits
*/
{ 0x46bffffeccd1177f, 0x488800022100014c },
{ 0x42fccc81331ff77f, 0x14424f1010249184 },
{ 0x8898827c222f1ffe, 0x22c1222808184aaf },
{ 0xf7632203e131ccf1, 0xe1241121848292b8 },
{ 0x7f5511421b113809, 0x901c88d84288aafe },
{ 0x1d49412184882487, 0x8f338c87c044c6ef },
{ 0xf552181014448344, 0x7ff8f4443e411911 },
{ 0x2189240808f24228, 0xfeeff8cc81333f42 },
{ 0x3280008440001112, 0xfee88b337ffffd62 },
};
/*
* 64-bit population count, use well-known popcnt trick.
* We could use the UltraSPARC V9 POPC instruction, but some
* CPUs including Cheetahplus and Jaguar do not support that
* instruction.
*/
int
{
int cnt;
cnt++;
return (cnt);
}
/*
* Generate the 9 ECC bits for the 128-bit chunk based on the table above.
* Note that xor'ing an odd number of 1 bits == 1 and xor'ing an even number
* of 1 bits == 0, so we can just use the least significant bit of the popcnt
* instead of doing all the xor's.
*/
{
int bitno, s;
int synd = 0;
}
return (synd);
}
/*
* Queue one event based on ecc_type_to_info entry. If the event has an AFT1
* tag associated with it or is a fatal event (aflt_panic set), it is sent to
* the UE event queue. Otherwise it is dispatched to the CE event queue.
*/
static void
{
if (reason &&
}
else
else
else
}
/*
* Queue events on async event queue one event per error bit. First we
* queue the events that we "expect" for the given trap, then we queue events
* that we may not expect. Return number of events queued.
*/
int
{
int nevents = 0;
#if defined(CHEETAH_PLUS)
#endif
#if defined(CHEETAH_PLUS)
/*
*/
/*
* Set the AFSR and AFAR fields to the shadow registers. The
* flt_addr and flt_stat fields will be reset to the primaries
* below, but the sdw_addr and sdw_stat will stay as the
* secondaries.
*/
/*
* If the primary and shadow AFSR differ, tag the shadow as
* the first fault.
*/
}
/*
* Check AFSR bits as well as AFSR_EXT bits in order of
* the AFAR overwrite priority. Our stored AFSR_EXT value
* is expected to be zero for those CPUs which do not have
* an AFSR_EXT register.
*/
if ((eccp->ec_afsr_bit &
nevents++;
}
}
/*
* If the ME bit is on in the primary AFSR turn all the
* error bits on again that may set the ME bit to make
* sure we see the ME AFSR error logs.
*/
if ((primary_afsr & C_AFSR_ME) != 0)
}
#endif /* CHEETAH_PLUS */
/*
* Queue expected errors, error bit and fault type must match
* in the ecc_type_to_info table.
*/
eccp++) {
#if defined(SERRANO)
/*
* the address and the associated data is
* in the shadow logout area.
*/
} else {
}
#else /* SERRANO */
#endif /* SERRANO */
nevents++;
}
}
/*
* Queue unexpected errors, error bit only match.
*/
eccp++) {
#if defined(SERRANO)
/*
* the address and the associated data is
* in the shadow logout area.
*/
} else {
}
#else /* SERRANO */
#endif /* SERRANO */
nevents++;
}
}
return (nevents);
}
/*
* Return trap type number.
*/
{
return (TRAP_TYPE_ECC_I);
return (TRAP_TYPE_ECC_D);
return (TRAP_TYPE_ECC_F);
return (TRAP_TYPE_ECC_C);
return (TRAP_TYPE_ECC_DP);
return (TRAP_TYPE_ECC_IP);
return (TRAP_TYPE_ECC_ITLB);
return (TRAP_TYPE_ECC_DTLB);
return (TRAP_TYPE_UNKNOWN);
}
/*
* The following array is used for quick translation - it must
* stay in sync with ce_dispact_t.
*/
static char *cetypes[] = {
};
char *
{
/*
* The memory payload bundle is shared by some events that do
* not perform any classification. For those flt_disp will be
* 0 and we will return "unknown".
*/
return (cetypes[CE_DISP_UNKNOWN]);
/*
* It is also possible that no scrub/classification was performed
* by the detector, for instance where a disrupting error logged
* in the AFSR while CEEN was off in cpu_deferred_error.
*/
if (!CE_XDIAG_EXT_ALG_APPLIED(dtcrinfo))
return (cetypes[CE_DISP_UNKNOWN]);
/*
* Lookup type in initial classification/action table
*/
/*
* A bad lookup is not something to panic production systems for.
*/
if (dispact == CE_DISP_BAD)
return (cetypes[CE_DISP_UNKNOWN]);
switch (disp) {
case CE_DISP_UNKNOWN:
case CE_DISP_INTERMITTENT:
break;
case CE_DISP_POSS_PERS:
/*
* "Possible persistent" errors to which we have applied a valid
* leaky test can be separated into "persistent" or "leaky".
*/
if (CE_XDIAG_TESTVALID(lkyinfo)) {
if (CE_XDIAG_CE1SEEN(lkyinfo) ||
else
disp = CE_DISP_PERS;
}
break;
case CE_DISP_POSS_STICKY:
/*
* Promote "possible sticky" results that have been
* confirmed by a partner test to "sticky". Unconfirmed
* "possible sticky" events are left at that status - we do not
*/
if (CE_XDIAG_TESTVALID(ptnrinfo) &&
/*
* Promote "possible sticky" results on a uniprocessor
* to "sticky"
*/
if (disp == CE_DISP_POSS_STICKY &&
break;
default:
break;
}
}
/*
* Given the entire afsr, the specific bit to check and a prioritized list of
* error bits, determine the validity of the various overwrite priority
* different overwrite priorities.
*
* Given a specific afsr error bit and the entire afsr, there are three cases:
* INVALID: The specified bit is lower overwrite priority than some other
* VALID: The specified bit is higher priority than all other error bits
* which are on in the afsr.
* AMBIGUOUS: Another error bit (or bits) of equal priority to the specified
* bit is on in the afsr.
*/
int
{
/*
* If bit is in the priority class, check to see if another
* bit in the same class is on => ambiguous. Otherwise,
* the value is valid. If the bit is not on at this priority
* class, but a higher priority bit is on, then the value is
* invalid.
*/
/*
* If equal pri bit is on, ambiguous.
*/
return (AFLT_STAT_AMBIGUOUS);
return (AFLT_STAT_VALID);
break;
}
/*
* We didn't find a match or a higher priority bit was on. Not
* finding a match handles the case of invalid AFAR for IVC, IVU.
*/
return (AFLT_STAT_INVALID);
}
static int
{
#if defined(SERRANO)
else
#endif /* SERRANO */
}
static int
{
}
static int
{
}
static int
{
#ifdef lint
#endif
#if defined(CHEETAH_PLUS)
/*
* The M_SYND overwrite policy is combined with the E_SYND overwrite
* policy for Cheetah+ and separate for Panther CPUs.
*/
if (afsr_bit & C_AFSR_MSYND_ERRS) {
else
else
#else /* CHEETAH_PLUS */
if (afsr_bit & C_AFSR_MSYND_ERRS) {
#endif /* CHEETAH_PLUS */
} else {
return (AFLT_STAT_INVALID);
}
}
/*
* Slave CPU stick synchronization.
*/
void
sticksync_slave(void)
{
int i;
int tries = 0;
/* wait for the master side */
while (stick_sync_cmd != SLAVE_START)
;
/*
* Synchronization should only take a few tries at most. But in the
* odd case where the cpu isn't cooperating we'll keep trying. A cpu
* without it's stick synchronized wouldn't be a good citizen.
*/
while (slave_done == 0) {
/*
* Time skew calculation.
*/
for (i = 0; i < stick_iter; i++) {
/* make location hot */
timestamp[EV_A_START] = 0;
/* tell the master we're ready */
/* and wait */
while (stick_sync_cmd != SLAVE_CONT)
;
/* Event B end */
/* calculate time skew */
/* keep running count */
} /* for */
/*
* Adjust stick for time skew if not within the max allowed;
* otherwise we're all done.
*/
if (stick_iter != 0)
/*
* If the skew is 1 (the slave's STICK register
* is 1 STICK ahead of the master's), stick_adj
* could fail to adjust the slave's STICK register
* if the STICK read on the slave happens to
* align with the increment of the STICK.
* Therefore, we increment the skew to 2.
*/
if (av_tskew == 1)
av_tskew++;
} else
slave_done = 1;
#ifdef DEBUG
if (tries < DSYNC_ATTEMPTS)
++tries;
#endif /* DEBUG */
#ifdef lint
#endif
} /* while */
/* allow the master to finish */
}
/*
* Master CPU side of stick synchronization.
* - timestamp end of Event A
* - timestamp beginning of Event B
*/
void
sticksync_master(void)
{
int i;
/* tell the slave we've started */
slave_done = 0;
while (slave_done == 0) {
for (i = 0; i < stick_iter; i++) {
/* wait for the slave */
while (stick_sync_cmd != MASTER_START)
;
/* Event A end */
/* make location hot */
timestamp[EV_B_START] = 0;
/* tell the slave to continue */
} /* for */
/* wait while slave calculates time skew */
while (stick_sync_cmd == SLAVE_CONT)
;
} /* while */
}
/*
* do Spitfire hack of xcall'ing all the cpus to ask to check for them. Also,
* in cpu_async_panic_callb, each cpu checks for CPU events on its way to
* panic idle.
*/
/*ARGSUSED*/
void
{}
struct kmem_cache *ch_private_cache;
/*
* Cpu private unitialization. Uninitialize the Ecache scrubber and
* deallocate the scrubber data structures and cpu_private data structure.
*/
void
{
}
/*
* Cheetah Cache Scrubbing
*
* The primary purpose of Cheetah cache scrubbing is to reduce the exposure
* of E$ tags, D$ data, and I$ data to cosmic ray events since they are not
* protected by either parity or ECC.
*
* We currently default the E$ and D$ scan rate to 100 (scan 10% of the
* cache per second). Due to the the specifics of how the I$ control
* logic works with respect to the ASI used to scrub I$ lines, the entire
* I$ is scanned at once.
*/
/*
* Tuneables to enable and disable the scrubbing of the caches, and to tune
* on a running system.
*/
/*
*/
#if defined(JALAPENO)
/*
* Due to several errata (82, 85, 86), we don't enable the L2$ scrubber
* on Jalapeno.
*/
int ecache_scrub_enable = 0;
#else /* JALAPENO */
/*
* With all other cpu types, E$ scrubbing is on by default
*/
int ecache_scrub_enable = 1;
#endif /* JALAPENO */
/*
* The I$ scrubber tends to cause latency problems for real-time SW, so it
* is disabled by default on non-Cheetah systems
*/
int icache_scrub_enable = 0;
/*
* Tuneables specifying the scrub calls per second and the scan rate
* for each cache
*
* The cyclic times are set during boot based on the following values.
* Changing these values in mdb after this time will have no effect. If
* reboot.
*/
int ecache_calls_a_sec = 1;
int dcache_calls_a_sec = 2;
int icache_calls_a_sec = 2;
int ecache_scan_rate_idle = 1;
int ecache_scan_rate_busy = 1;
int dcache_scan_rate_idle = 1;
int dcache_scan_rate_busy = 1;
int icache_scan_rate_idle = 1;
int icache_scan_rate_busy = 1;
#else /* CHEETAH_PLUS || JALAPENO || SERRANO */
#endif /* CHEETAH_PLUS || JALAPENO || SERRANO */
/*
* In order to scrub on offline cpus, a cross trap is sent. The handler will
* increment the outstanding request counter and schedule a softint to run
* the scrubber.
*/
extern xcfunc_t cache_scrubreq_tl1;
/*
* These are the softint functions for each cache scrubber
*/
/*
* The cache scrub info table contains cache specific information
* and allows for some of the scrub code to be table driven, reducing
* duplication of cache similar code.
*
* This table keeps a copy of the value in the calls per second variable
* (?cache_calls_a_sec). This makes it much more difficult for someone
* to cause us problems (for example, by setting ecache_calls_a_sec to 0 in
* mdb in a misguided attempt to disable the scrubber).
*/
struct scrub_info {
int *csi_enable; /* scrubber enable flag */
int csi_freq; /* scrubber calls per second */
int csi_index; /* index to chsm_outstanding[] */
} cache_scrub_info[] = {
};
/*
* If scrubbing is enabled, increment the outstanding request counter. If it
* is 1 (meaning there were no previous requests outstanding), call
* setsoftint_tl1 through xt_one_unchecked, which eventually ends up doing
* a self trap.
*/
static void
{
}
}
}
/*
* Omni cyclics don't fire on offline cpus, so we use another cyclic to
* cross-trap the offline cpus.
*/
static void
{
if (CPUSET_ISNULL(cpu_offline_set)) {
/*
* No offline cpus - nothing to do
*/
return;
}
}
}
/*
* This is the initial setup for the scrubber cyclics - it sets the
* interrupt level, frequency, and function to call.
*/
/*ARGSUSED*/
static void
{
}
/*
* Initialization for cache scrubbing.
* This routine is called AFTER all cpus have had cpu_init_private called
* to initialize their private data areas.
*/
void
cpu_init_cache_scrub(void)
{
int i;
struct scrub_info *csi;
/*
* save away the maximum number of lines for the D$
*/
/*
* register the softints for the cache scrubbing
*/
/*
* start the scrubbing for all the caches
*/
for (i = 0; i < CACHE_SCRUBBER_COUNT; i++) {
csi = &cache_scrub_info[i];
if (!(*csi->csi_enable))
continue;
/*
* force the following to be true:
* 1 <= calls_a_sec <= hz
*/
}
}
}
/*
* Indicate that the specified cpu is idle.
*/
void
{
}
}
/*
* Indicate that the specified cpu is busy.
*/
void
{
}
}
/*
* Initialization for cache scrubbing for the specified cpu.
*/
void
{
/* initialize the number of lines in the caches */
/*
* do_scrub() and do_scrub_offline() check both the global
* ?cache_scrub_enable and this per-cpu enable variable. All scrubbers
* check this value before scrubbing. Currently, we use it to
* disable the E$ scrubber on multi-core cpus or while running at
* slowed speed. For now, just turn everything on and allow
* cpu_init_private() to change it if necessary.
*/
}
/*
* Un-initialization for cache scrubbing for the specified cpu.
*/
static void
{
/*
* un-initialize bookkeeping for cache scrubbing
*/
}
/*
* Called periodically on each CPU to scrub the D$.
*/
static void
scrub_dcache(int how_many)
{
int i;
/*
* scrub the desired number of lines
*/
for (i = 0; i < how_many; i++) {
/*
* scrub a D$ line
*/
/*
* calculate the next D$ line to scrub, assumes
* that dcache_nlines is a power of 2
*/
}
/*
* set the scrub index for the next visit
*/
}
/*
* Handler for D$ scrub inum softint. Call scrub_dcache until
* we decrement the outstanding request count to zero.
*/
/*ARGSUSED*/
static uint_t
{
int i;
int how_many;
int outstanding;
/*
* The scan rates are expressed in units of tenths of a
* percent. A scan rate of 1000 (100%) means the whole
* cache is scanned every second.
*/
do {
outstanding = *countp;
for (i = 0; i < outstanding; i++) {
}
return (DDI_INTR_CLAIMED);
}
/*
* Called periodically on each CPU to scrub the I$. The I$ is scrubbed
* by invalidating lines. Due to the characteristics of the ASI which
* is used to invalidate an I$ line, the entire I$ must be invalidated
* vs. an individual I$ line.
*/
static void
scrub_icache(int how_many)
{
int i;
/*
* scrub the desired number of lines
*/
for (i = 0; i < how_many; i++) {
/*
* since the entire I$ must be scrubbed at once,
* wait until the index wraps to zero to invalidate
* the entire I$
*/
if (index == 0) {
}
/*
* calculate the next I$ line to scrub, assumes
* that chsm_icache_nlines is a power of 2
*/
}
/*
* set the scrub index for the next visit
*/
}
/*
* Handler for I$ scrub inum softint. Call scrub_icache until
* we decrement the outstanding request count to zero.
*/
/*ARGSUSED*/
static uint_t
{
int i;
int how_many;
int outstanding;
/*
* The scan rates are expressed in units of tenths of a
* percent. A scan rate of 1000 (100%) means the whole
* cache is scanned every second.
*/
do {
outstanding = *countp;
for (i = 0; i < outstanding; i++) {
}
return (DDI_INTR_CLAIMED);
}
/*
* Called periodically on each CPU to scrub the E$.
*/
static void
scrub_ecache(int how_many)
{
int i;
/*
* scrub the desired number of lines
*/
for (i = 0; i < how_many; i++) {
/*
* scrub the E$ line
*/
/*
* calculate the next E$ line to scrub based on twice
* the number of E$ lines (to displace lines containing
* flush area data), assumes that the number of lines
* is a power of 2
*/
}
/*
* set the ecache scrub index for the next visit
*/
}
/*
* Handler for E$ scrub inum softint. Call the E$ scrubber until
* we decrement the outstanding request count to zero.
*
* Due to interactions with cpu_scrub_cpu_setup(), the outstanding count may
* become negative after the atomic_add_32_nv(). This is not a problem, as
* the next trip around the loop won't scrub anything, and the next add will
* reset the count back to zero.
*/
/*ARGSUSED*/
static uint_t
{
int i;
int how_many;
int outstanding;
/*
* The scan rates are expressed in units of tenths of a
* percent. A scan rate of 1000 (100%) means the whole
* cache is scanned every second.
*/
do {
outstanding = *countp;
for (i = 0; i < outstanding; i++) {
}
return (DDI_INTR_CLAIMED);
}
/*
* Timeout function to reenable CE
*/
static void
cpu_delayed_check_ce_errors(void *arg)
{
TQ_NOSLEEP)) {
}
}
/*
* CE Deferred Re-enable after trap.
*
* When the CPU gets a disrupting trap for any of the errors
* controlled by the CEEN bit, CEEN is disabled in the trap handler
* immediately. To eliminate the possibility of multiple CEs causing
* recursive stack overflow in the trap handler, we cannot
* reenable CEEN while still running in the trap handler. Instead,
* after a CE is logged on a CPU, we schedule a timeout function,
* cpu_check_ce_errors(), to trigger after cpu_ceen_delay_secs
* seconds. This function will check whether any further CEs
* have occurred on that CPU, and if none have, will reenable CEEN.
*
* If further CEs have occurred while CEEN is disabled, another
* timeout will be scheduled. This is to ensure that the CPU can
* make progress in the face of CE 'storms', and that it does not
* spend all its time logging CE errors.
*/
static void
cpu_check_ce_errors(void *arg)
{
/*
* We acquire cpu_lock.
*/
/*
* verify that the cpu is still around, DR
* could have got there first ...
*/
return;
}
/*
* make sure we don't migrate across CPUs
* while checking our CE status.
*/
/*
* If we are running on the CPU that got the
* CE, we can do the checks directly.
*/
cpu_check_ce(TIMEOUT_CEEN_CHECK, 0, 0, 0);
return;
}
/*
* send an x-call to get the CPU that originally
* got the CE to do the necessary checks. If we can't
* send the x-call, reschedule the timeout, otherwise we
* lose CEEN forever on that CPU.
*/
TIMEOUT_CEEN_CHECK, 0);
} else {
/*
* When the CPU is not accepting xcalls, or
* the processor is offlined, we don't want to
* incur the extra overhead of trying to schedule the
* CE timeout indefinitely. However, we don't want to lose
* CE checking forever.
*
* Keep rescheduling the timeout, accepting the additional
* overhead as the cost of correctness in the case where we get
* a CE, disable CEEN, offline the CPU during the
* the timeout interval, and then online it at some
* point in the future. This is unlikely given the short
* cpu_ceen_delay_secs.
*/
(void) timeout(cpu_delayed_check_ce_errors,
}
}
/*
* This routine will check whether CEs have occurred while
* CEEN is disabled. Any CEs detected will be logged and, if
* possible, scrubbed.
*
* The memscrubber will also use this routine to clear any errors
* caused by its scrubbing with CEEN disabled.
*
* flag == SCRUBBER_CEEN_CHECK
* paddr physical addr. for start of scrub pages
* vaddr virtual addr. for scrub area
* psz page size of area to be scrubbed
*
* flag == TIMEOUT_CEEN_CHECK
* timeout function has triggered, reset timeout or CEEN
*
* Note: We must not migrate cpus during this function. This can be
* achieved by one of:
* - invoking as target of an x-call in which case we're at XCALL_PIL
* The flag value must be first xcall argument.
* - disabling kernel preemption. This should be done for very short
* periods so is not suitable for SCRUBBER_CEEN_CHECK where we might
* scrub an extended area with cpu_check_block. The call for
* TIMEOUT_CEEN_CHECK uses this so cpu_check_ce must be kept
* brief for this case.
* - binding to a cpu, eg with thread_affinity_set(). This is used
* in the SCRUBBER_CEEN_CHECK case, but is not practical for
* the TIMEOUT_CEEN_CHECK because both need cpu_lock.
*/
void
{
/* Read AFSR */
/*
* If no CEEN errors have occurred during the timeout
* interval, it is safe to re-enable CEEN and exit.
*/
if (flag == TIMEOUT_CEEN_CHECK &&
return;
}
/*
* Ensure that CEEN was not reenabled (maybe by DR) before
*/
/*
* timeout will be rescheduled when the error is logged.
*/
else
/*
* If the memory scrubber runs while CEEN is
* disabled, (or if CEEN is disabled during the
* scrub as a result of a CE being triggered by
* it), the range being scrubbed will not be
* completely cleaned. If there are multiple CEs
* in the range at most two of these will be dealt
* with, (one by the trap handler and one by the
* timeout). It is also possible that none are dealt
* with, (CEEN disabled and another CE occurs before
* the timeout triggers). So to ensure that the
* memory is actually scrubbed, we have to access each
* memory location in the range and then check whether
* that access causes a CE.
*/
/*
* Force a load from physical memory for each
* 64-byte block, then check AFSR to determine
* whether this access caused an error.
*
* This is a slow way to do a scrub, but as it will
* only be invoked when the memory scrubber actually
* triggered a CE, it should not happen too
* frequently.
*
* cut down what we need to check as the scrubber
* has verified up to AFAR, so get it's offset
* into the page and start there.
*/
(psz - 1));
psz);
}
}
/*
* Reset error enable if this CE is not masked.
*/
if ((flag == TIMEOUT_CEEN_CHECK) &&
}
/*
* Attempt a cpu logout for an error that we did not trap for, such
* as a CE noticed with CEEN off. It is assumed that we are still running
* on the cpu that took the error and that we cannot migrate. Returns
* 0 on success, otherwise nonzero.
*/
static int
{
return (0);
return (0);
return (1);
}
/*
* We got an error while CEEN was disabled. We
* need to clean up after it and log whatever
* information we have on the CE.
*/
void
{
char pr_reason[MAX_REASON_STRING];
#if defined(SERRANO)
#endif /* SERRANO */
/*
* check if we caused any errors during cleanup
*/
if (clear_errors(&ch_flt)) {
pr_reason[0] = '\0';
NULL);
}
}
/*
*/
static void
{
char pr_reason[MAX_REASON_STRING];
pr_reason[0] = '\0';
/*
* Get the CPU log out info for Disrupting Trap.
*/
} else {
}
#if defined(SERRANO)
#endif /* SERRANO */
/*
* The trap handler does it for CEEN enabled errors
* so we need to do it here.
*/
}
/*
* FRC: Can't scrub memory as we don't have AFAR for Jalapeno.
* For Serrano, even thou we do have the AFAR, we still do the
* scrub on the RCE side since that's where the error type can
* be properly classified as intermittent, persistent, etc.
*
* Must scrub memory before cpu_queue_events, as scrubbing memory sets
* the flt_status bits.
*/
}
#else /* JALAPENO || SERRANO */
/*
* Must scrub memory before cpu_queue_events, as scrubbing memory sets
* the flt_status bits.
*/
}
}
#endif /* JALAPENO || SERRANO */
/*
* Update flt_prot if this error occurred under on_trap protection.
*/
/*
* Queue events on the async event queue, one event per error bit.
*/
}
/*
* Zero out + invalidate CPU logout.
*/
if (clop) {
}
/*
* If either a CPC, WDC or EDC error has occurred while CEEN
* was disabled, we need to flush either the entire
* E$ or an E$ line.
*/
#else /* JALAPENO || SERRANO */
#endif /* JALAPENO || SERRANO */
}
/*
* depending on the error type, we determine whether we
* need to flush the entire ecache or just a line.
*/
static int
{
/*
* If we got multiple errors, no point in trying
* the individual cases, just flush the whole cache
*/
return (ECACHE_FLUSH_ALL);
}
/*
* If either a CPC, WDC or EDC error has occurred while CEEN
* was disabled, we need to flush entire E$. We can't just
* flush the cache line affected as the ME bit
* is not set when multiple correctable errors of the same
* type occur, so we might have multiple CPC or EDC errors,
* with only the first recorded.
*/
#else /* JALAPENO || SERRANO */
C_AFSR_L3_EDC | C_AFSR_L3_WDC)) {
#endif /* JALAPENO || SERRANO */
return (ECACHE_FLUSH_ALL);
}
/*
* If only UE or RUE is set, flush the Ecache line, otherwise
* flush the entire Ecache.
*/
return (ECACHE_FLUSH_LINE);
} else {
return (ECACHE_FLUSH_ALL);
}
}
#else /* JALAPENO || SERRANO */
/*
* If UE only is set, flush the Ecache line, otherwise
* flush the entire Ecache.
*/
C_AFSR_UE) {
return (ECACHE_FLUSH_LINE);
} else {
return (ECACHE_FLUSH_ALL);
}
}
#endif /* JALAPENO || SERRANO */
/*
* EDU: If EDU only is set, flush the ecache line, otherwise
* flush the entire Ecache.
*/
if (((afsr_errs & ~C_AFSR_EDU) == 0) ||
((afsr_errs & ~C_AFSR_L3_EDU) == 0)) {
return (ECACHE_FLUSH_LINE);
} else {
return (ECACHE_FLUSH_ALL);
}
}
/*
* BERR: If BERR only is set, flush the Ecache line, otherwise
* flush the entire Ecache.
*/
if (afsr_errs & C_AFSR_BERR) {
if ((afsr_errs & ~C_AFSR_BERR) == 0) {
return (ECACHE_FLUSH_LINE);
} else {
return (ECACHE_FLUSH_ALL);
}
}
return (0);
}
void
{
int ecache_flush_flag =
/*
* Flush Ecache line or entire Ecache based on above checks.
*/
if (ecache_flush_flag == ECACHE_FLUSH_ALL)
else if (ecache_flush_flag == ECACHE_FLUSH_LINE) {
}
}
/*
* Extract the PA portion from the E$ tag.
*/
{
return (PN_L3TAG_TO_PA(tag));
else
}
/*
* Convert the E$ tag PA into an E$ subblock index.
*/
static int
{
/* Panther has only one subblock per line */
return (0);
else
}
/*
* All subblocks in an E$ line must be invalid for
* the line to be invalid.
*/
int
{
return (PN_L3_LINE_INVALID(tag));
else
}
/*
* Extract state bits for a subblock given the tag. Note that for Panther
* this works on both l2 and l3 tags.
*/
static int
{
return (tag & CH_ECSTATE_MASK);
else
}
/*
* Cpu specific initialization.
*/
void
cpu_mp_init(void)
{
#ifdef CHEETAHPLUS_ERRATUM_25
if (cheetah_sendmondo_recover) {
}
#endif
}
void
{
char unum[UNUM_NAMLEN];
int synd_code;
return;
} else {
}
/*
* Create the scheme "cpu" FMRI.
*/
case CHEETAH_IMPL:
break;
case CHEETAH_PLUS_IMPL:
break;
case JALAPENO_IMPL:
break;
case SERRANO_IMPL:
break;
case JAGUAR_IMPL:
break;
case PANTHER_IMPL:
break;
default:
break;
}
/*
* Encode all the common data into the ereport.
*/
/*
* Encode the error specific data that was saved in
* the async_flt structure into the ereport.
*/
} else {
}
/*
* Send the enhanced error information (plat_ecc_error2_data_t)
* to the SC olny if it can process it.
*/
if (&plat_ecc_capability_sc_get &&
if (msg_type != PLAT_ECC_ERROR2_NONE) {
/*
* If afar status is not invalid do a unum lookup.
*/
if (plat_ecc_ch_flt.ecaf_afar_status !=
(void) cpu_get_mem_unum_synd(synd_code,
} else {
unum[0] = '\0';
}
if (&plat_log_fruid_error2)
}
}
}
void
{
int status;
}
void
{
}
/*
* This routine may be called by the IO module, but does not do
* anything in this cpu module. The SERD algorithm is handled by
* cpumem-diagnosis engine instead.
*/
/*ARGSUSED*/
void
{}
void
{
/*
* Set hw copy limits.
*
* of these settings.
*
* At this time, ecache size seems only mildly relevant.
* We seem to run into issues with the d-cache and stalls
* we see on misses.
*
* Cycle measurement indicates that 2 byte aligned copies fare
* little better than doing things with VIS at around 512 bytes.
* 4 byte aligned shows promise until around 1024 bytes. 8 Byte
* aligned is faster whenever the source and destination data
* in cache and the total size is less than 2 Kbytes. The 2K
* limit seems to be driven by the 2K write cache.
* When more than 2K of copies are done in non-VIS mode, stores
* backup in the write cache. In VIS mode, the write cache is
* bypassed, allowing faster cache-line writes aligned on cache
* boundaries.
*
* In addition, in non-VIS mode, there is no prefetching, so
* for larger copies, the advantage of prefetching to avoid even
* occasional cache misses is enough to justify using the VIS code.
*
* During testing, it was discovered that netbench ran 3% slower
* when hw_copy_limit_8 was 2K or larger. Apparently for server
* applications, data is only used once (copied to the output
* buffer, then copied by the network device off the system). Using
* the VIS copy saves more L2 cache state. Network copies are
* around 1.3K to 1.5K in size for historical reasons.
*
* Therefore, a limit of 1K bytes will be used for the 8 byte
* aligned copy even for large caches and 8 MB ecache. The
* infrastructure to allow different limits for different sized
* caches is kept to allow further tuning in later releases.
*/
if (min_ecache_size == 0 && use_hw_bcopy) {
/*
* is read.
* Could skip the checks for zero but this lets us
* preserve any debugger rewrites.
*/
if (hw_copy_limit_1 == 0) {
}
if (hw_copy_limit_2 == 0) {
}
if (hw_copy_limit_4 == 0) {
}
if (hw_copy_limit_8 == 0) {
}
} else {
/*
* been parsed. One CPU has already been initialized.
* of our values.
*/
if (ecache_size == min_ecache_size) {
/*
* Same size ecache. We do nothing unless we
* have a pessimistic ecache setting. In that
* case we become more optimistic (if the cache is
* large enough).
*/
/*
* Need to adjust hw_copy_limit* from our
* pessimistic uniprocessor value to a more
* optimistic UP value *iff* it hasn't been
* reset.
*/
if ((ecache_size > 1048576) &&
(priv_hcl_8 == hw_copy_limit_8)) {
if (ecache_size <= 2097152)
hw_copy_limit_8 = 4 *
else if (ecache_size <= 4194304)
hw_copy_limit_8 = 4 *
else
hw_copy_limit_8 = 4 *
}
}
} else if (ecache_size < min_ecache_size) {
/*
* A different ecache size. Can this even happen?
*/
if (priv_hcl_8 == hw_copy_limit_8) {
/*
* The previous value that we set
* is unchanged (i.e., it hasn't been
*/
if (ecache_size <= 1048576)
hw_copy_limit_8 = 8 *
else if (ecache_size <= 2097152)
hw_copy_limit_8 = 8 *
else if (ecache_size <= 4194304)
hw_copy_limit_8 = 8 *
else
hw_copy_limit_8 = 10 *
}
}
}
}
/*
* Called from illegal instruction trap handler to see if we can attribute
* the trap to a fpras check.
*/
int
{
int op;
struct fpras_chkfngrp *cgp;
if (fpras_chkfngrps == NULL)
return (0);
break;
}
if (op == FPRAS_NCOPYOPS)
return (0);
/*
* This is an fpRAS failure caught through an illegal
* instruction - trampoline.
*/
return (1);
}
/*
* fpras_failure is called when a fpras check detects a bad calculation
* result or an illegal instruction trap is attributed to an fpras
* check. In all cases we are still bound to CPU.
*/
int
{
int i;
/*
* We're running on a sick CPU. Avoid further FPU use at least for
* the time in which we dispatch an ereport and (if applicable) panic.
*/
use_hw_bcopy = use_hw_bzero = 0;
hw_copy_limit_8 = 0;
/*
* We must panic if the copy operation had no lofault protection -
* ie, don't panic for copyin, copyout, kcopy and bcopy called
* under on_fault and do panic for unprotected bcopy and hwblkpagecopy.
*/
/*
* XOR the source instruction block with the copied instruction
* block - this will show us which bit(s) are corrupted.
*/
} else {
}
/*
* caller has used on_fault. We will flag the error so that
* the process may be killed The trap_async_hwerr mechanism will
* take appropriate further action (such as a reboot, contract
* notification etc). Since we may be continuing we will
* restore the global hardware copy acceleration switches.
*
* When we return from this function to the copy function we want to
* avoid potentially bad data being used, ie we want the affected
* copy function to return an error. The caller should therefore
* invoke its lofault handler (which always exists for these functions)
* which will return the appropriate error.
*/
return (1);
}
#define VIS_BLOCKSIZE 64
int
{
if (watched)
return (ret);
}
/*
* Called when a cpu enters the CPU_FAULTED state (by the cpu placing the
* faulted cpu into that state). Cross-trap to the faulted cpu to clear
* CEEN from the EER to disable traps for further disrupting error types
* on that cpu. We could cross-call instead, but that has a larger
* instruction and data footprint than cross-trapping, and the cpu is known
* to be faulted.
*/
void
{
}
/*
* Called when a cpu leaves the CPU_FAULTED state to return to one of
* offline, spare, or online (by the cpu requesting this state change).
* First we cross-call to clear the AFSR (and AFSR_EXT on Panther) of
* disrupting error bits that have accumulated without trapping, then
* we cross-trap to re-enable CEEN controlled traps.
*/
void
{
(uint64_t)&cpu_error_regs, 0);
}
/*
* Return 1 if the errors in ch_flt's AFSR are secondary errors caused by
* the errors in the original AFSR, 0 otherwise.
*
* For all procs if the initial error was a BERR or TO, then it is possible
* that we may have caused a secondary BERR or TO in the process of logging the
* inital error via cpu_run_bus_error_handlers(). If this is the case then
* if the request was protected then a panic is still not necessary, if not
* protected then aft_panic is already set - so either way there's no need
* to set aft_panic for the secondary error.
*
* For Cheetah and Jalapeno, if the original error was a UE which occurred on
* a store merge, then the error handling code will call cpu_deferred_error().
* When clear_errors() is called, it will determine that secondary errors have
* occurred - in particular, the store merge also caused a EDU and WDU that
* weren't discovered until this point.
*
* We do three checks to verify that we are in this case. If we pass all three
* checks, we return 1 to indicate that we should not panic. If any unexpected
* errors occur, we return 0.
*
* For Cheetah+ and derivative procs, the store merge causes a DUE, which is
* handled in cpu_disrupting_errors(). Since this function is not even called
* in the case we are interested in, we just return 0 for these processors.
*/
/*ARGSUSED*/
static int
{
#if defined(CHEETAH_PLUS)
#else /* CHEETAH_PLUS */
#endif /* CHEETAH_PLUS */
/*
* Was the original error a BERR or TO and only a BERR or TO
* (multiple errors are also OK)
*/
/*
* Is the new error a BERR or TO and only a BERR or TO
* (multiple errors are also OK)
*/
return (1);
}
#if defined(CHEETAH_PLUS)
return (0);
#else /* CHEETAH_PLUS */
/*
*
* Check the original error was a UE, and only a UE. Note that
* the ME bit will cause us to fail this check.
*/
if (t_afsr_errs != C_AFSR_UE)
return (0);
/*
*/
return (0);
/*
* Check the AFAR of the original error and secondary errors
* match to the 64-byte boundary
*/
return (0);
/*
* We've passed all the checks, so it's a secondary error!
*/
return (1);
#endif /* CHEETAH_PLUS */
}
/*
* Translate the flt_bit or flt_type into an error type. First, flt_bit
* is checked for any valid errors. If found, the error type is
* returned. If not found, the flt_type is checked for L1$ parity errors.
*/
/*ARGSUSED*/
static uint8_t
{
#if defined(JALAPENO)
/*
* Currently, logging errors to the SC is not supported on Jalapeno
*/
return (PLAT_ECC_ERROR2_NONE);
#else
case C_AFSR_CE:
return (PLAT_ECC_ERROR2_CE);
case C_AFSR_UCC:
case C_AFSR_EDC:
case C_AFSR_WDC:
case C_AFSR_CPC:
return (PLAT_ECC_ERROR2_L2_CE);
case C_AFSR_EMC:
return (PLAT_ECC_ERROR2_EMC);
case C_AFSR_IVC:
return (PLAT_ECC_ERROR2_IVC);
case C_AFSR_UE:
return (PLAT_ECC_ERROR2_UE);
case C_AFSR_UCU:
case C_AFSR_EDU:
case C_AFSR_WDU:
case C_AFSR_CPU:
return (PLAT_ECC_ERROR2_L2_UE);
case C_AFSR_IVU:
return (PLAT_ECC_ERROR2_IVU);
case C_AFSR_TO:
return (PLAT_ECC_ERROR2_TO);
case C_AFSR_BERR:
return (PLAT_ECC_ERROR2_BERR);
#if defined(CHEETAH_PLUS)
case C_AFSR_L3_EDC:
case C_AFSR_L3_UCC:
case C_AFSR_L3_CPC:
case C_AFSR_L3_WDC:
return (PLAT_ECC_ERROR2_L3_CE);
case C_AFSR_IMC:
return (PLAT_ECC_ERROR2_IMC);
case C_AFSR_TSCE:
return (PLAT_ECC_ERROR2_L2_TSCE);
case C_AFSR_THCE:
return (PLAT_ECC_ERROR2_L2_THCE);
case C_AFSR_L3_MECC:
return (PLAT_ECC_ERROR2_L3_MECC);
case C_AFSR_L3_THCE:
return (PLAT_ECC_ERROR2_L3_THCE);
case C_AFSR_L3_CPU:
case C_AFSR_L3_EDU:
case C_AFSR_L3_UCU:
case C_AFSR_L3_WDU:
return (PLAT_ECC_ERROR2_L3_UE);
case C_AFSR_DUE:
return (PLAT_ECC_ERROR2_DUE);
case C_AFSR_DTO:
return (PLAT_ECC_ERROR2_DTO);
case C_AFSR_DBERR:
return (PLAT_ECC_ERROR2_DBERR);
#endif /* CHEETAH_PLUS */
default:
#if defined(CPU_IMP_L1_CACHE_PARITY)
case CPU_IC_PARITY:
return (PLAT_ECC_ERROR2_IPE);
case CPU_DC_PARITY:
return (PLAT_ECC_ERROR2_PCACHE);
}
}
return (PLAT_ECC_ERROR2_DPE);
#endif /* CPU_IMP_L1_CACHE_PARITY */
case CPU_ITLB_PARITY:
return (PLAT_ECC_ERROR2_ITLB);
case CPU_DTLB_PARITY:
return (PLAT_ECC_ERROR2_DTLB);
default:
return (PLAT_ECC_ERROR2_NONE);
}
}
#endif /* JALAPENO */
}