/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Support routines for managing per-CPU state.
*/
#include <cmd_cpu.h>
#ifdef sun4u
#include <cmd_ecache.h>
#endif /* sun4u */
#include <cmd_mem.h>
#include <cmd.h>
#include <stdio.h>
#include <string.h>
#include <strings.h>
#include <errno.h>
#include <kstat.h>
#ifdef sun4u
#include <sys/cheetahregs.h>
#include <cmd_opl.h>
#include <cmd_Lxcache.h>
#else /* sun4u */
#include <sys/niagararegs.h>
#include <cmd_hc_sun4v.h>
#endif /* sun4u */
/* Must be in sync with cmd_cpu_type_t */
static const char *const cpu_names[] = {
NULL,
"ultraSPARC-III",
"ultraSPARC-IIIplus",
"ultraSPARC-IIIi",
"ultraSPARC-IV",
"ultraSPARC-IVplus",
"ultraSPARC-IIIiplus",
"ultraSPARC-T1",
"SPARC64-VI",
"SPARC64-VII",
"ultraSPARC-T2",
"ultraSPARC-T2plus"
};
/*
* This needs to be in sync with cpu_family_t.
*/
{ CMD_CPU_FAM_CHEETAH, B_TRUE },
{ CMD_CPU_FAM_NIAGARA, B_FALSE },
};
const char *
{
}
static cmd_cpu_type_t
{
int i;
for (i = 1; i < sizeof (cpu_names) / sizeof (char *); i++) {
return (i);
}
/*NOTREACHED*/
return (0);
}
const char *fmd_fmri_get_platform();
"SUNW,Sun-Fire-15000") == 0)
"SUNW,Sun-Fire") == 0)
static void
{
switch (type) {
#ifdef sun4u
case CPU_ULTRASPARC_IVplus:
switch (level) {
case CMD_CPU_LEVEL_CORE:
if (is_starcat)
else if (is_serengeti)
else
return;
default:
*cpustep = 1;
return;
}
#else /* i.e. sun4v */
#define UST1_CPUS_PER_CORE 4
#define UST1_CPU_CORE_STEP 1
#define UST1_CPUS_PER_CHIP 32
#define UST1_CPU_CHIP_STEP 1
#define UST2_CPUS_PER_CORE 8
#define UST2_CPU_CORE_STEP 1
#define UST2_CPUS_PER_CHIP 64
#define UST2_CPU_CHIP_STEP 1
case CPU_ULTRASPARC_T1:
switch (level) {
case CMD_CPU_LEVEL_CORE:
return;
case CMD_CPU_LEVEL_CHIP:
return;
default:
*cpustep = 1;
return;
}
case CPU_ULTRASPARC_T2:
case CPU_ULTRASPARC_T2plus:
switch (level) {
case CMD_CPU_LEVEL_CORE:
return;
case CMD_CPU_LEVEL_CHIP:
return;
default:
*cpustep = 1;
return;
}
#endif /* sun4u */
default:
*cpustep = 1;
return;
}
}
switch (type) {
#ifdef sun4u
case CPU_ULTRASPARC_IVplus:
switch (level) {
case CMD_CPU_LEVEL_CORE:
if (is_starcat)
return ((cpuid /
(cpuid % US4P_SCAT_CPU_CORE_STEP));
else if (is_serengeti)
return (cpuid % US4P_SGTI_CPU_CORE_STEP);
else
return (cpuid % US4P_DAKC_CPU_CORE_STEP);
default:
return (cpuid);
}
#else /* i.e. sun4v */
case CPU_ULTRASPARC_T1:
switch (level) {
case CMD_CPU_LEVEL_CORE:
return (cpuid/UST1_CPUS_PER_CORE);
case CMD_CPU_LEVEL_CHIP:
return (cpuid/UST1_CPUS_PER_CHIP);
default:
return (cpuid);
}
case CPU_ULTRASPARC_T2:
case CPU_ULTRASPARC_T2plus:
switch (level) {
case CMD_CPU_LEVEL_CORE:
return (cpuid/UST2_CPUS_PER_CORE);
case CMD_CPU_LEVEL_CHIP:
return (cpuid/UST2_CPUS_PER_CHIP);
default:
return (cpuid);
}
#endif /* sun4u */
default:
return (cpuid);
}
}
#ifdef sun4u
static void
{
/*
* The UE cache may change size. fmd expects statically-sized buffers,
* so we must delete and re-create it if the size has changed from the
* last time it was written.
*/
}
}
static void
const char *fmt, ...)
{
}
static void
{
}
}
static void
{
return;
if (destroy)
}
static void
{
}
cpu->cpu_uec_flush = 0;
}
static void
{
return; /* nothing to flush */
return; /* don't flush the UE cache unless we can flush E$ */
}
/*
* If there's already an old UE cache, we're racing with another
* flush. For safety, we'll add the current contents of the
* cache to the existing old cache.
*/
} else {
}
if (cpu->cpu_uec_flush != 0)
}
void
{
int i;
return; /* already there */
}
}
/* no space - resize the cache */
}
}
}
void
{
}
int
{
int i;
/*
* The UE cache works as long as we are able to add an entry for every
* UE seen by a given CPU. If we see a UE with a non-valid AFAR, we
* can't guarantee our ability to filter a corresponding xxU, and must,
* for safety, assume that every subsequent xxU (until the E$ and UE
* cache are flushed) has a matching UE.
*/
return (1);
return (1);
}
return (1);
}
return (0);
}
#endif /* sun4u */
void
{
sizeof (cmd_xr_t));
}
static cmd_xr_hdlr_f *
{
switch (id) {
case CMD_XR_HDLR_XXC:
return (cmd_xxc_resolve);
case CMD_XR_HDLR_XXU:
return (cmd_xxu_resolve);
case CMD_XR_HDLR_NOP:
return (cmd_nop_resolve);
default:
id);
}
return (NULL);
}
cmd_xr_t *
{
const char *uuid;
int err = 0;
#ifdef sun4u
#endif
&rsrc);
if (err != 0) {
return (NULL);
}
}
return (xr);
}
{
return (CMD_EVD_OK);
}
static void
{
}
void
{
}
}
static void
{
NULL) {
}
/*
* fmd is still in the process of starting up. If we reschedule this
* event with the normal redelivery timeout, it'll get redelivered
* before initialization has completed, we'll potentially fail to
* match the train, deref() the waiter (causing any subsequent side-
* effects to miss the waiter), and use this ereport to blame the CPU.
* The other side-effects will blame the CPU too, since we'll have
* deref()'d the waiter out of existence. We can get up to three
* additions to the SERD engine this way, which is bad. To keep that
* from happening, we're going to schedule an arbitrarily long timeout,
* which *should* be long enough. It's pretty bad, but there's no
* real way to keep the other side-effects from taking out the CPU.
*/
}
typedef struct cmd_xxcu_train {
#ifdef sun4u
/* UCC: WDC */
/* UCU: WDU, WDU+L3_WDU */
/* EDC: WDC */
/* EDU: WDU, WDU+L3_WDU */
/* CPC: WDC, EDC+WDC, UCC+WDC, EDC+UCC+WDC */
/* CPU: WDU, WDU+L3_WDU, UCU+WDU, UCU+WDU+L3_WDU */
/* CPU: EDU+WDU, EDU+WDU+L3_WDU, EDU+UCU+WDU, EDU+UCU+WDU+L3_WDU */
/* WDU: L3_WDU */
/* L3_UCC: WDC+(zero or more of EDC, CPC, UCC) */
/* L3_UCU: WDU+(zero or more of EDU, CPU, UCU) */
/* L3_UCU: WDU+(zero or more of EDU, CPU, UCU)+L3_WDU */
/* L3_EDC: WDC+(zero or more of EDC, CPC, UCC) */
/* L3_EDU: WDU+(zero or more of EDU, CPU, UCU) */
/* L3_EDU: WDU+(zero or more of EDU, CPU, UCU)+L3_WDU */
/* L3_CPC: L3_WDC */
/* L3_CPC: L3_EDC+ WDC+(zero or more of EDC, CPC, UCC) */
/* L3_CPC: L3_UCC+WDC+(zero or more of EDC, CPC, UCC) */
/* L3_CPU: L3_WDU */
/* L3_CPU: L3_EDU+WDU+(zero or more of EDU, CPU, UCU) */
/* L3_CPU: L3_UCU+WDU+(zero or more of EDU, CPU, UCU) */
/* L3_CPU: L3_EDU+WDU+(zero or more of EDU, CPU, UCU)+L3_WDU */
/* L3_CPU: L3_UCU+WDU+(zero or more of EDU, CPU, UCU)+L3_WDU */
| CMD_ERRCL_L3_WDU),
#else /* sun4u */
/*
* sun4v also has the following trains, but the train
* algorithm does an exhaustive search and compare
* all pairs in the train mask, so we don't need
* to define these trains
* dl2nd->ldwu (wbue), dcdp
* il2nd->ldwu (wbue), icdp
* dxl2u->ldwu (wbue), dcdp
* ixl2u->ldwu (wbue), icdp
*/
#endif /* sun4u */
CMD_TRAIN(0, 0)
};
{
int i;
for (i = 0; cmd_xxcu_trains[i].tr_mask != 0; i++) {
return (cmd_xxcu_trains[i].tr_cause);
}
return (0);
}
{
int i;
for (i = 0; i < cmd.cmd_xxcu_ntrw; i++) {
return (trw);
}
}
return (NULL);
}
void
{
}
/*ARGSUSED*/
void
{
}
void
{
}
void
{
/*
* Previous size == current size. In absence of
* versioning, assume that the structure and # of elements
* have not changed.
*/
} else {
/*
* Previous size != current size. Something has changed;
* hence we cannot rely on the contents of this buffer.
* Delete the buffer and start fresh.
*/
}
}
char *
{
char *nm;
const char *fmt;
fmt = "cpu_%d_%s_serd";
} else {
fmt = "cpu_%d_%d_%s_serd";
serdbase) + 1;
serdbase);
}
return (nm);
}
/*
* cmd_cpu_create_faultlist is a combination of the former cmd_cpu_create_fault
* and fmd_case_add_suspect. If a 'cpu' structure represents a set of threads
* (level > CMD_CPU_LEVEL_THREAD), then we must add multiple faults to
* this case, under loop control. Use call to cmd_cpu_create_faultlist to
* replace the sequence
*
* flt = cmd_cpu_create_fault(...);
* fmd_case_add_suspect(hdl, cc->cp, flt);
*/
void
{
#ifdef sun4v
char *loc;
#endif
#ifdef sun4v
/*
* Add motherboard fault to t5440 lfu suspect.list.
*/
/* get mb fmri from libtopo */
cert, "MB");
}
}
#endif
&asru, 0) != 0) {
"ASRU for thread in core\n");
}
(void) nvlist_remove_all(asru,
if (nvlist_add_uint32(asru,
FM_FMRI_CPU_ID, i) != 0) {
"unable to create thread struct\n");
}
}
continue;
#ifdef sun4v
#endif /* sun4v */
}
} else {
#ifdef sun4v
#endif /* sun4v */
}
#ifdef sun4v
#endif
}
static void
{
int i;
#ifdef sun4u
#endif
for (i = 0; i < sizeof (cmd_cpu_cases_t) / sizeof (cmd_case_t); i++) {
}
}
}
#ifdef sun4u
/*
* free Lxcache also.
*/
}
#endif /* sun4u */
if (destroy)
}
void
{
}
static cmd_cpu_t *
{
return (cpu);
}
return (NULL);
}
static nvlist_t *
{
return (NULL);
}
return (nvlp);
}
static void
{
sizeof (cmd_cpu_pers_t))
sizeof (cmd_cpu_pers_t));
}
static void
{
/*
* We need to be tolerant of leaked CPU buffers, as their effects can
* be severe. Consider the following scenario: we create a version 0
* cmd_cpu_t in response to some error, commit it to a persistent
* buffer, and then leak it. We then upgrade, and restart the DE using
* version 1 cmd_cpu_t's. Another error comes along, for the same CPU
* whose struct was leaked. Not knowing about the leaked buffer, we
* create a new cmd_cpu_t for that CPU, and create a buffer for it. As
* the v1 cmd_cpu_t is smaller than the v0 cmd_cpu_t, fmd will use the
* pre-existing (leaked) buffer. We'll therefore have an x-byte, v1
* cmd_cpu_t in a y-byte buffer, where y > x. Upon the next DE restart,
* we'll attempt to restore the cmd_cpu_t, but will do version
* validation using the size of the buffer (y). This won't match what
* we're expecting (x), and the DE will abort.
*
* To protect against such a scenario, we're going to check for and
* remove the pre-existing cmd_cpu_t for this CPU, if one exists. While
* this won't fix the leak, it'll allow us to continue functioning
* properly in spite of it.
*/
sz != sizeof (cmd_cpu_pers_t)) {
}
}
static cmd_cpu_t *
{
/*
* No CPU state matches the CPU described in the ereport. Create a new
* one, add it to the list, and pass it back.
*/
} else {
}
#ifdef sun4u
#endif /* sun4u */
} else {
}
} else {
}
} else {
} else {
}
}
return (cpu);
}
/*
* As its name implies, 'cpu_all_threads_invalid' determines if all cpu
* threads (level 0) contained within the cpu structure are invalid.
* This is done by checking all the (level 0) threads which may be
* contained within this chip, core, or thread; if all are invalid, return
* FMD_B_TRUE; if any are valid, return FMD_B_FALSE.
*/
int
{
return (FMD_B_FALSE);
else return (FMD_B_TRUE);
} else {
"cannot add thread %d to asru\n", i);
}
return (FMD_B_FALSE);
}
}
}
return (FMD_B_TRUE);
}
/*
* Locate the state structure for this CPU, creating a new one if one doesn't
* already exist. Before passing it back, we also need to validate it against
* the current state of the world, checking to ensure that the CPU described by
* the ereport, the CPU indicated in the cmd_cpu_t, and the CPU currently
* residing at the indicated cpuid are the same. We do this by comparing the
* serial IDs from the three entities.
*/
{
return (NULL);
}
if (nvlist_lookup_pairs(asru, 0,
vers != CPU_SCHEME_VERSION1) ||
return (NULL);
}
/*
* 'cpuid' at this point refers to a thread, because it
* was extracted from a detector FMRI
*/
}
/*
* Check to see if the CPU described by the ereport has been removed
* from the system. If it has, return to the caller without a CPU.
*/
return (NULL);
}
}
return (cpu);
}
{
}
static cmd_cpu_t *
{
if (oldsz != sizeof (cmd_cpu_0_t)) {
"version 0 state (%u bytes).\n", sizeof (cmd_cpu_0_t));
}
return (new);
}
static cmd_cpu_t *
{
if (oldsz != sizeof (cmd_cpu_1_t)) {
"version 1 state (%u bytes).\n", sizeof (cmd_cpu_1_t));
}
return (new);
}
static cmd_cpu_t *
{
if (oldsz != sizeof (cmd_cpu_2_t)) {
"version 2 state (%u bytes).\n", sizeof (cmd_cpu_2_t));
}
return (new);
}
static cmd_cpu_t *
{
if (psz != sizeof (cmd_cpu_pers_t)) {
"version 3 state (%u bytes).\n", sizeof (cmd_cpu_pers_t));
}
return (cpu);
}
static void
const char *serdbase)
{
serdbase));
}
{
break;
}
int migrated = 0;
"not found. Case is already solved or "
"closed\n",
return (NULL);
} else {
"does not exist in saved state\n",
}
"is out of bounds (is %u bytes)\n",
}
}
cpu->cpu_version);
if (CMD_CPU_VERSIONED(cpu)) {
switch (cpu->cpu_version) {
case CMD_CPU_VERSION_1:
cpusz);
migrated = 1;
break;
case CMD_CPU_VERSION_2:
cpusz);
migrated = 1;
break;
case CMD_CPU_VERSION_3:
cpusz);
break;
default:
"for cpu state referenced by case %s.\n",
break;
}
} else {
migrated = 1;
}
if (migrated) {
}
#ifdef sun4u
#endif /* sun4u */
}
return (cpu);
}
void *
{
return (NULL);
switch (ptr->ptr_subtype) {
case CMD_PTR_CPU_ICACHE:
break;
case CMD_PTR_CPU_DCACHE:
break;
case CMD_PTR_CPU_PCACHE:
break;
case CMD_PTR_CPU_ITLB:
break;
case CMD_PTR_CPU_DTLB:
break;
case CMD_PTR_CPU_L2DATA:
break;
/* No longer used -- discard */
break;
case CMD_PTR_CPU_L2TAG:
break;
case CMD_PTR_CPU_L3DATA:
break;
/* No longer used -- discard */
break;
case CMD_PTR_CPU_L3TAG:
break;
case CMD_PTR_CPU_FPU:
break;
case CMD_PTR_CPU_XR_RETRY:
break;
case CMD_PTR_CPU_IREG:
break;
case CMD_PTR_CPU_FREG:
break;
case CMD_PTR_CPU_MAU:
break;
case CMD_PTR_CPU_L2CTL:
break;
case CMD_PTR_CPU_MISC_REGS:
"misc_regs");
break;
case CMD_PTR_CPU_LFU:
break;
#ifdef sun4u
case CMD_PTR_CPU_INV_SFSR:
"opl_invsfsr");
break;
case CMD_PTR_CPU_UE_DET_CPU:
"oplue_detcpu");
break;
case CMD_PTR_CPU_UE_DET_IO:
"oplue_detio");
break;
case CMD_PTR_CPU_MTLB:
"opl_mtlb");
break;
case CMD_PTR_CPU_TLBP:
"opl_tlbp");
break;
"opl_inv_urg");
break;
case CMD_PTR_CPU_UGESR_CRE:
"opl_cre");
break;
"opl_tsb_ctx");
break;
case CMD_PTR_CPU_UGESR_TSBP:
"opl_tsbp");
break;
case CMD_PTR_CPU_UGESR_PSTATE:
"opl_pstate");
break;
case CMD_PTR_CPU_UGESR_TSTATE:
"opl_tstate");
break;
case CMD_PTR_CPU_UGESR_IUG_F:
"opl_iug_f");
break;
case CMD_PTR_CPU_UGESR_IUG_R:
"opl_iug_r");
break;
case CMD_PTR_CPU_UGESR_SDC:
"opl_sdc");
break;
case CMD_PTR_CPU_UGESR_WDT:
"opl_wdt");
break;
case CMD_PTR_CPU_UGESR_DTLB:
"opl_dtlb");
break;
case CMD_PTR_CPU_UGESR_ITLB:
"opl_itlb");
break;
"opl_core_err");
break;
case CMD_PTR_CPU_UGESR_DAE:
"opl_dae");
break;
case CMD_PTR_CPU_UGESR_IAE:
"opl_iae");
break;
case CMD_PTR_CPU_UGESR_UGE:
"opl_uge");
break;
#endif /* sun4u */
default:
}
return (cpu);
}
void
{
}
}
}
}
static void
{
return;
}
}
}
/*ARGSUSED*/
static void
{
#ifdef sun4u
return;
}
}
#else /* sun4u */
return;
#endif /* sun4u */
}
void
{
break;
break;
}
}
static int
{
int i;
return (0);
}
for (i = 0; i < sizeof (cmd_cpu_cases_t) / sizeof (cmd_case_t); i++) {
continue;
return (1);
}
return (1);
return (1);
return (0);
}
/*ARGSUSED*/
void
{
continue;
}
#ifdef sun4u
#endif /* sun4u */
cpu->cpu_uec_nflushes = 0;
}
}
void
{
}
typedef struct {
const char *fam_name;
} famdata_t;
{"UltraSPARC-III", CMD_CPU_FAM_CHEETAH},
{"UltraSPARC-IV", CMD_CPU_FAM_CHEETAH},
{"UltraSPARC-T", CMD_CPU_FAM_NIAGARA},
{"SPARC64-VI", CMD_CPU_FAM_SPARC64},
{"SPARC64-VII", CMD_CPU_FAM_SPARC64}
};
{
int j;
for (j = 0; j < sizeof (famdata_tbl)/sizeof (famdata_t); j++) {
return (famdata_tbl[j].fam_value);
}
}
return (CMD_CPU_FAM_UNSUPPORTED);
}
/*
* Determine which CPU family this diagnosis is being run on.
* This assumes that ereports are being generated by this system.
*/
cmd_cpu_check_support(void)
{
int i;
return (CMD_CPU_FAM_UNSUPPORTED);
continue;
(void) kstat_close(kc);
return (CMD_CPU_FAM_UNSUPPORTED);
}
continue;
(void) kstat_close(kc);
return (family);
}
}
(void) kstat_close(kc);
return (CMD_CPU_FAM_UNSUPPORTED);
}
cmd_cpu_ecache_support(void)
{
}
/*
* This function builds the fmri of the
* given cpuid based on the cpu scheme.
*/
nvlist_t *
{
return (NULL);
FM_FMRI_SCHEME, FM_FMRI_SCHEME_CPU) != 0 ||
return (NULL);
}
return (fmri);
}