gcpu_poll_xpv.c revision a31148363f598def767ac48c5d82e1572e44b935
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* "Polled" MCA events in an i86xpv dom0. A timeout runs in the hypervisor
* and checks MCA state. If it observes valid MCA state in a bank and if
* it sees that dom0 has registered a handler for the VIRQ_MCA then it
* raises that VIRQ to dom0. The interrupt handler performs a
* hypercall to retrieve the polled telemetry and then pushes that telemetry
* into the MSR interpose hash and calls the generic logout code which
* will then find the provided interposed MSR values when it performs
* cmi_hdl_rdmsr so logout code works unchanged for native or i86xpv dom0.
*/
#include <sys/x86_archext.h>
#include <sys/evtchn_impl.h>
#include <sys/hypervisor.h>
#include "../../i86pc/cpu/generic_cpu/gcpu.h"
extern void gcpu_xpv_telem_ack(int, uint64_t);
int gcpu_xpv_mch_poll_interval_secs = 10;
int gcpu_xpv_virq_level = 3;
static int gcpu_xpv_virq_vect = -1;
static mc_info_t gcpu_xpv_polldata;
static kmutex_t gcpu_xpv_polldata_lock;
static cmi_mca_regs_t *gcpu_xpv_poll_bankregs;
static size_t gcpu_xpv_poll_bankregs_sz;
static uint32_t gcpu_xpv_intr_unclaimed;
static uint32_t gcpu_xpv_mca_hcall_busy;
#define GCPU_XPV_ARCH_NREGS 3
#define GCPU_XPV_MCH_POLL_REARM ((void *)1)
#define GCPU_XPV_MCH_POLL_NO_REARM NULL
static uint_t
gcpu_xpv_virq_intr(void)
{
int count = 0;
int i;
return (DDI_INTR_UNCLAIMED);
}
if (!mutex_tryenter(&gcpu_xpv_polldata_lock)) {
return (DDI_INTR_CLAIMED);
}
&fetch_id)) {
count++;
}
}
return (DDI_INTR_CLAIMED);
}
static void
gcpu_xpv_mch_poll(void *arg)
{
cmi_mc_logout(hdl, 0, 0);
}
if (arg == GCPU_XPV_MCH_POLL_REARM &&
gcpu_xpv_mch_poll_interval_secs != 0) {
}
}
/*
* gcpu_mca_poll_init is called from gcpu_mca_init for each cpu handle
* that we initialize for. It should prepare for polling by allocating
* control structures and the like, but must not kick polling off yet.
*
* Since we initialize all cpus in a serialized loop there is no race
* on allocating the bankregs structure, nor in free'ing and enlarging
* it if we find the number of MCA banks is not uniform in the system
* (unlikely) since polling is only started post mp startup.
*/
void
{
if (gcpu_xpv_poll_bankregs != NULL) {
} else {
}
}
}
/* deconfigure gcpu_mca_poll_init() */
void
{
}
void
{
/*
* We are on the boot cpu (cpu 0), called at the end of its
* multiprocessor startup.
*/
/*
* The hypervisor will poll MCA state for us, but it cannot
* poll MCH state so we do that via a timeout.
*/
if (gcpu_xpv_mch_poll_interval_secs != 0) {
MICROSEC));
}
/*
* Register handler for VIRQ_MCA; once this is in place
* the hypervisor will begin to forward polled MCA observations
* to us.
*/
}
}