cmi.c revision 20c794b39650d115e17a15983b6b82e46238cf45
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
/*
* Public interface to routines implemented by CPU modules
*/
#include <sys/x86_archext.h>
#include <sys/cpu_module_impl.h>
#include <sys/cpu_module_ms.h>
#include <sys/privregs.h>
/*
* Set to force cmi_init to fail.
*/
int cmi_no_init = 0;
/*
* Set to avoid MCA initialization.
*/
int cmi_no_mca_init = 0;
/*
* If cleared for debugging we will not attempt to load a model-specific
* cpu module but will load the generic cpu module instead.
*/
int cmi_force_generic = 0;
/*
* If cleared for debugging, we will suppress panicking on fatal hardware
* errors. This should *only* be used for debugging; it use can and will
* cause data corruption if actual hardware errors are detected by the system.
*/
int cmi_panic_on_uncorrectable_error = 1;
/*
* Subdirectory (relative to the module search path) in which we will
* look for cpu modules.
*/
#define CPUMOD_SUBDIR "cpu"
/*
* CPU modules have a filenames such as "cpu.AuthenticAMD.15" and
* "cpu.generic" - the "cpu" prefix is specified by the following.
*/
#define CPUMOD_PREFIX "cpu"
/*
* Structure used to keep track of cpu modules we have loaded and their ops
*/
typedef struct cmi {
} cmi_t;
static kmutex_t cmi_load_lock;
/*
* Functions we need from cmi_hw.c that are not part of the cpu_module.h
* interface.
*/
extern void cmi_hdl_setcmi(cmi_hdl_t, void *, void *);
extern void *cmi_hdl_getcmi(cmi_hdl_t);
#define CMI_MATCH_VENDOR 0 /* Just match on vendor */
static void
{
}
static void
{
}
/*
* Hold the module in memory. We call to CPU modules without using the
* stubs mechanism, so these modules must be manually held in memory.
* The mod_ref acts as if another loaded module has a dependency on us.
*/
static void
{
cmi->cmi_refcnt++;
}
static void
{
if (--cmi->cmi_refcnt == 0) {
}
}
static cmi_ops_t *
{
NULL) {
return (NULL);
}
return (NULL);
}
return (ops);
}
static cmi_t *
{
return (cmi);
}
/*
* Apparently a cpu module before versioning was introduced -
* we call this version 0.
*/
} else {
if (!CMI_API_VERSION_CHKMAGIC(apiver)) {
"_cmi_api_version 0x%x has bad magic",
return (NULL);
}
}
if (apiver != CMI_API_VERSION) {
return (NULL);
}
return (NULL);
return (cmi);
}
static int
{
if (match >= CMI_MATCH_VENDOR &&
return (0);
if (match >= CMI_MATCH_FAMILY &&
return (0);
if (match >= CMI_MATCH_MODEL &&
return (0);
if (match >= CMI_MATCH_STEPPING &&
return (0);
return (1);
}
static int
{
return (CMI_HDL_WALK_DONE);
} else {
return (CMI_HDL_WALK_NEXT);
}
}
static cmi_t *
{
if (dhdl) {
}
return (cmi);
}
static cmi_t *
{
int modid;
uint_t s[3];
/*
* Have we already loaded a module for a cpu with the same
*/
return (cmi);
}
s[0] = cmi_hdl_family(hdl);
if (modid == -1)
return (NULL);
if (cmi)
return (cmi);
}
/*
* Try to load a cpu module with specific support for this chip type.
*/
static cmi_t *
{
int err;
int i;
for (i = CMI_MATCH_STEPPING; i >= CMI_MATCH_VENDOR; i--) {
int suffixlevel;
return (NULL);
/*
* A module has loaded and has a _cmi_ops structure, and the
* module has been held for this instance. Call its cmi_init
* entry point - we expect success (0) or ENOTSUP.
*/
if (boothowto & RB_VERBOSE) {
printf("initialized cpu module '%s' on "
"chip %d core %d strand %d\n",
}
return (cmi);
"chip %d core %d strand %d: err=%d\n",
}
/*
* The module failed or declined to init, so release
* it and update i to be equal to the number
* of suffices actually used in the last module path.
*/
i = suffixlevel;
}
return (NULL);
}
/*
* Load the generic IA32 MCA cpu module, which may still supplement
* itself with model-specific support through cpu model-specific modules.
*/
static cmi_t *
{
int modid;
int err;
return (NULL);
if (cmi)
return (NULL);
"init: err=%d", err);
return (NULL);
}
return (cmi);
}
{
void *data;
if (cmi_no_init) {
cmi_no_mca_init = 1;
return (NULL);
}
"core %d strand %d (cmi_hdl_create returned NULL)\n",
return (NULL);
}
if (!cmi_force_generic)
return (NULL);
}
return (hdl);
}
/*
* cmi_fini is not called at the moment. It is intended to be called
* on DR deconfigure of a cpu resource. It should not be called at
* simple offline of a cpu.
*/
void
{
if (cms_present(hdl))
}
/*
* cmi_post_startup is called from post_startup for the boot cpu only.
*/
void
cmi_post_startup(void)
{
if (cmi_no_mca_init != 0 ||
return;
}
/*
* Called just once from start_other_cpus when all processors are started.
* This will not be called for each cpu, so the registered op must not
* assume it is called as such.
*/
void
cmi_post_mpstartup(void)
{
if (cmi_no_mca_init != 0 ||
return;
}
void
{
if (cmi_no_mca_init != 0)
return;
}
void
{
if (cmi_no_mca_init != 0)
return;
}
void
{
if (cmi_no_mca_init != 0)
return;
}
#define CMI_RESPONSE_NONE 0x1
#define CMI_RESPONSE_CKILL 0x2
#define CMI_RESPONSE_ONTRAP_PROT 0x4
#define CMI_RESPONSE_LOFAULT_PROT 0x5
/*
* Return 0 if we will panic in response to this machine check, otherwise
* non-zero. If the caller is cmi_mca_trap in this file then the nonzero
* return values are to be interpreted from CMI_RESPONSE_* above.
*
* This function must just return what will be done without actually
* doing anything; this includes not changing the regs.
*/
int
{
/*
* If no bits are set in the disposition then there is nothing to
* worry about and we do not need to trampoline to ontrap or
* lofault handlers.
*/
if (disp == 0)
return (CMI_RESPONSE_NONE);
/*
* Unconstrained errors cannot be forgiven, even by ontrap or
* lofault protection. The data is not poisoned and may not
* even belong to the trapped context - eg a writeback of
* data that is found to be bad.
*/
if (disp & CMI_ERRDISP_UC_UNCONSTRAINED)
return (panicrsp);
/*
* ontrap OT_DATA_EC and lofault protection forgive any disposition
* other than unconstrained, even those normally forced fatal.
*/
return (CMI_RESPONSE_ONTRAP_PROT);
return (CMI_RESPONSE_LOFAULT_PROT);
/*
* Forced-fatal errors are terminal even in user mode.
*/
if (disp & CMI_ERRDISP_FORCEFATAL)
return (panicrsp);
/*
* If the trapped context is corrupt or we have no instruction pointer
* to resume at (and aren't trampolining to a fault handler)
* then in the kernel case we must panic and in usermode we
* kill the affected contract.
*/
/*
* Anything else is harmless
*/
return (CMI_RESPONSE_NONE);
}
int cma_mca_trap_panic_suppressed = 0;
static void
cmi_mca_panic(void)
{
fm_panic("Unrecoverable Machine-Check Exception");
} else {
}
}
int cma_mca_trap_contract_kills = 0;
int cma_mca_trap_ontrap_forgiven = 0;
int cma_mca_trap_lofault_forgiven = 0;
/*
* Native #MC handler - we branch to here from mcetrap
*/
/*ARGSUSED*/
void
{
#ifndef __xpv
int s;
if (cmi_no_mca_init != 0)
return;
/*
* This function can call cmn_err, and the cpu module cmi_mca_trap
* entry point may also elect to call cmn_err (e.g., if it can't
* log the error onto an errorq, say very early in boot).
* We need to let cprintf know that we must not block.
*/
s = spl8();
hdl ? "handle lookup ok but no #MC handler found" :
"handle lookup failed");
splx(s);
return;
}
default:
/*FALLTHRU*/
case CMI_RESPONSE_PANIC:
break;
case CMI_RESPONSE_NONE:
break;
case CMI_RESPONSE_CKILL:
break;
case CMI_RESPONSE_ONTRAP_PROT: {
break;
}
break;
}
splx(s);
#endif /* __xpv */
}
void
{
return;
}
void
{
if (!cmi_no_mca_init)
}
{
const struct cmi_mc_ops *mcops;
if (cmi_no_mca_init ||
return (CMIERR_MC_ABSENT);
return (CMIERR_MC_NOTSUP);
}
return (rv);
}
{
const struct cmi_mc_ops *mcops;
return (CMIERR_API); /* convert from just one form */
if (cmi_no_mca_init ||
return (CMIERR_MC_ABSENT);
FM_FMRI_MEM_PHYSADDR, pap) == 0) {
return (CMIERR_MC_PARTIALUNUMTOPA);
} else {
}
}
return (rv);
}
void
{
const struct cmi_mc_ops *mcops;
return;
}
int force)
{
return (CMIERR_NOTSUP);
}
cmi_panic_on_ue(void)
{
}