gcpu_poll.c revision 1b31ef1ec652dc2f9c89b703b7e0b9ace0642b9d
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
/*
* Generic x86 CPU MCA poller.
*/
#include <sys/sysmacros.h>
#include <sys/x86_archext.h>
#include "gcpu.h"
#ifdef DEBUG
int gcpu_mca_poll_trace_always = 1;
#else
int gcpu_mca_poll_trace_always = 0;
#endif
static kmutex_t mch_poll_lock;
static hrtime_t mch_poll_timestamp;
static cmi_hdl_t mch_poll_owner;
/*
* Return nonzero of the given handle should poll the MCH. We stick with
* the same handle as before unless the timestamp has not been updated
* for a while. There is no need to keep a hold on the mch_poll_owner
* handle.
*/
static int
{
int dopoll = 0;
mch_poll_timestamp == 0) {
dopoll = 1;
} else if (mch_poll_owner == hdl) {
dopoll = 1;
}
if (dopoll)
return (dopoll);
}
static void
{
return; /* poll trace buffer is disabled */
if (what == GCPU_MPT_WHAT_CYC_ERR)
}
#ifndef __xpv
/*
* Perform a native poll of MCA state.
*/
static void
{
int willpanic;
int i;
/*
* On the first cyclic poll after unfaulting a CPU we
* clear the status registers; see gcpu_faulted_exit
* for details. We don't do this if the poll was
* initiated manually (presumably from some injection
* activity).
*/
if (what == GCPU_MPT_WHAT_CYC_ERR) {
for (i = 0; i < mca->gcpu_mca_nbanks; i++) {
(void) cmi_hdl_wrmsr(hdl,
}
return;
}
}
/*
* Logout errors of the MCA banks of this cpu.
*/
/*
* Call to the memory-controller driver which may report some
* errors not visible under the MCA (for off-chip NB).
* Since there is typically a single MCH we arrange that
* just one cpu perform this task at each cyclic fire.
*/
if (gcpu_mch_pollowner(hdl))
/*
* In the common case any polled error is considered non-fatal,
* even if it indicates PCC or UC etc. The only condition on which
* we will panic for a polled error is if model-specific support
* forces the error to be terminal regardless of how it is
* encountered.
*/
if (willpanic) {
#ifdef DEBUG
"%u PCC (%u ok), "
"%u UC (%u ok, %u poisoned), "
"%u forcefatal, %u ignored",
#endif
fm_panic("Unrecoverable Machine-Check Exception (Polled)");
}
}
/*
* See gcpu_mca_trap for an explanation of why preemption is disabled here.
* Note that we disable preemption and then contend for an adaptive mutex -
* we could block during the mutex operation, but once we return with the
* mutex held we nust perform no operation that can block and we cannot
* be preempted so we will stay on cpu for the duration. The disabling
* of preemption also means we cannot migrate cpus once we have returned
* with the mutex held - cyclic invocations can't migrate, anyway, but
* others could if they have failed to bind before this point.
*/
static void
{
return;
}
static void
gcpu_ntv_mca_poll_cyclic(void *arg)
{
}
/*ARGSUSED*/
static void
{
/*
* Lookup and hold a handle for this cpu (any hold released in
* our offline function). If we chose not to initialize a handle
* for this cpu back at cmi_init time then this lookup will return
* NULL, so the cyh_func we appoint must be prepared for that.
*/
}
/*ARGSUSED*/
static void
{
}
#endif /* __xpv */
/*
* gcpu_mca_poll_init is called from gcpu_mca_init for each cpu handle
* that we initialize for. It should prepare for polling by allocating
* control structures and the like, but must not kick polling off yet.
*
* In the native case our polling technique (see gcpu_mca_poll_start) will
* be to install an omnipresent cyclic to fire on all online cpus (cpu_t),
* and they will poll the real hardware beneath them.
*
* In the xVM MCA case the hypervisor performs polling and makes telemetry
* available to dom0 - a cyclic on each virtual cpu is inappropriate.
* Instead we will create a single unbound cyclic which will consume the
* hypervisor-provided telemetry when it fires, and submit it into
* common logging code.
*/
static int gcpu_mca_poll_inits;
void
{
switch (cmi_hdl_class(hdl)) {
case CMI_HDL_NATIVE: {
if (gcpu_mca_poll_trace_always) {
}
break;
}
case CMI_HDL_SOLARIS_xVM_MCA:
/*
* Implementation should move the kmem_alloc above to before
* the switch, and stash the trace buffer and current record
* pointer in a static structure. This should be done
* just once, despite this init function potentially being
* called multiple times.
*/
/*FALLTHRU*/
default:
break;
}
}
static void
gcpu_ntv_mca_poll_start(void)
{
#ifndef __xpv
if (gcpu_mca_poll_interval == 0 || gcpu_mca_poll_inits == 0)
return;
#endif /* __xpv */
}
void
{
switch (cmi_hdl_class(hdl)) {
case CMI_HDL_NATIVE:
break;
case CMI_HDL_SOLARIS_xVM_MCA:
/*
* Implementation should call a new function to install
* an unbound cyclic that will process hypervisor-provided
* telemetry.
*/
/*FALLTHRU*/
default:
break;
}
}
void
{
switch (cmi_hdl_class(hdl)) {
case CMI_HDL_NATIVE:
break;
case CMI_HDL_SOLARIS_xVM_MCA:
/*
* Implementation will call the xPV poll wrapper.
*/
default:
break;
}
}