cmi_hw.c revision 1b31ef1ec652dc2f9c89b703b7e0b9ace0642b9d
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
/*
* CPU Module Interface - hardware abstraction.
*/
#include <sys/cpu_module.h>
#include <sys/x86_archext.h>
#include <sys/pci_cfgspace.h>
#include <sys/archsystm.h>
#include <sys/controlregs.h>
/*
* Outside of this file consumers use the opaque cmi_hdl_t. This
* definition is duplicated in the generic_cpu mdb module, so keep
* them in-sync when making changes.
*/
typedef struct cmi_hdl_impl {
void *cmih_hdlpriv; /* cmi_hw.c private data */
void *cmih_spec; /* cmi_hdl_{set,get}_specific */
void *cmih_cmi; /* cpu mod control structure */
void *cmih_cmidata; /* cpu mod private data */
void *cmih_mcdata; /* Memory-controller data */
/*
* Handles are looked up from contexts such as polling, injection etc
* where the context is reasonably well defined (although a poller could
* interrupt any old thread holding any old lock). They are also looked
* up by machine check handlers, which may strike at inconvenient times
* such as during handle initialization or destruction or during handle
* lookup (which the #MC handler itself will also have to perform).
*
* So keeping handles in a linked list makes locking difficult when we
* consider #MC handlers. Our solution is to have an array indexed
* with each array member a structure including a pointer to a handle
* structure for the resource, and a reference count for the handle.
* Reference counts are modified atomically. The public cmi_hdl_hold
* always succeeds because this can only be used after handle creation
* and before the call to destruct, so the hold count it already at least one.
* In other functions that lookup a handle (cmi_hdl_lookup, cmi_hdl_any)
* we must be certain that the count has not already decrmented to zero
* before applying our hold.
*
* This array is allocated when first we want to populate an entry.
* When allocated it is maximal - ideally we should scale to the
* actual number of chips, cores per chip and strand per core but
* that info is not readily available if we are virtualized so
* for now we stick with the dumb approach.
*/
#define CMI_MAX_CHIPS 16
#define CMI_MAX_CORES_PER_CHIP 8
#define CMI_MAX_STRANDS_PER_CORE 2
struct cmi_hdl_hashent {
volatile uint32_t cmhe_refcnt;
};
static struct cmi_hdl_hashent *cmi_hdl_hash;
(strandid))
/*
* Controls where we will source PCI config space data.
*/
#define CMI_PCICFG_FLAG_RD_HWOK 0x0001
#define CMI_PCICFG_FLAG_RD_INTERPOSEOK 0X0002
#define CMI_PCICFG_FLAG_WR_HWOK 0x0004
#define CMI_PCICFG_FLAG_WR_INTERPOSEOK 0X0008
static uint64_t cmi_pcicfg_flags =
/*
* The flags for individual cpus are kept in their per-cpu handle cmih_msrsrc
*/
#define CMI_MSR_FLAG_RD_HWOK 0x0001
#define CMI_MSR_FLAG_RD_INTERPOSEOK 0x0002
#define CMI_MSR_FLAG_WR_HWOK 0x0004
#define CMI_MSR_FLAG_WR_INTERPOSEOK 0x0008
int cmi_call_func_ntv_tries = 3;
static cmi_errno_t
{
int i;
} else {
/*
* This should not happen for a #MC trap or a poll, so
* this is likely an error injection or similar.
* We will try to cross call with xc_trycall - we
* can't guarantee success with xc_call because
* the interrupt code in the case of a #MC may
* already hold the xc mutex.
*/
for (i = 0; i < cmi_call_func_ntv_tries; i++) {
if (rc != -1)
break;
DELAY(1);
}
}
}
/*
* =======================================================
* | MSR Interposition |
* | ----------------- |
* | |
* -------------------------------------------------------
*/
#define CMI_MSRI_HASHSZ 16
struct cmi_msri_bkt {
struct cmi_msri_hashent *msrib_head;
};
struct cmi_msri_hashent {
struct cmi_msri_hashent *msrie_next;
struct cmi_msri_hashent *msrie_prev;
};
static void
{
struct cmi_msri_hashent *hep;
break;
}
} else {
}
}
/*
* Look for a match for the given hanlde and msr. Return 1 with valp
* filled if a match is found, otherwise return 0 with valp untouched.
*/
static int
{
struct cmi_msri_hashent *hep;
/*
* This function is called during #MC trap handling, so we should
* consider the possibility that the hash mutex is held by the
* interrupted thread. This should not happen because interposition
* is an artificial injection mechanism and the #MC is requested
* after adding entries, but just in case of a real #MC at an
* unlucky moment we'll use mutex_tryenter here.
*/
return (0);
break;
}
}
}
/*
* Remove any interposed value that matches.
*/
static void
{
struct cmi_msri_hashent *hep;
return;
break;
}
}
}
/*
* =======================================================
* | PCI Config Space Interposition |
* | ------------------------------ |
* | |
* -------------------------------------------------------
*/
/*
* and then record whether the value stashed was made with a byte, word or
* doubleword access; we will only return a hit for an access of the
* same size. If you access say a 32-bit register using byte accesses
* and then attempt to read the full 32-bit value back you will not obtain
* any sort of merged result - you get a lookup miss.
*/
#define CMI_PCII_HASHSZ 16
#define CMI_PCII_HASHIDX(b, d, f, o) \
(((b) + (d) + (f) + (o)) % (CMI_PCII_HASHSZ - 1))
struct cmi_pcii_bkt {
struct cmi_pcii_hashent *pciib_head;
};
struct cmi_pcii_hashent {
struct cmi_pcii_hashent *pcii_next;
struct cmi_pcii_hashent *pcii_prev;
int pcii_bus;
int pcii_dev;
int pcii_func;
int pcii_reg;
int pcii_asize;
};
/*
* Add a new entry to the PCI interpose hash, overwriting any existing
* entry that is found.
*/
static void
{
struct cmi_pcii_hashent *hep;
break;
}
} else {
}
}
/*
* filled if a match is found, otherwise return 0 with valp untouched.
*/
static int
{
struct cmi_pcii_hashent *hep;
return (0);
break;
}
}
}
static void
{
struct cmi_pcii_hashent *hep;
break;
}
}
}
/*
* =======================================================
* | Native methods |
* | -------------- |
* | |
* | These are used when we are running native on bare- |
* | metal, or simply don't know any better. |
* ---------------------------------------------------------
*/
static uint_t
{
}
static const char *
{
}
static uint_t
{
}
static uint_t
{
}
static uint_t
{
}
static uint_t
{
return (hdl->cmih_chipid);
}
static uint_t
{
return (hdl->cmih_coreid);
}
static uint_t
{
return (hdl->cmih_strandid);
}
static uint32_t
{
}
static const char *
{
}
static uint32_t
{
}
/*ARGSUSED*/
static int
{
*rcp = CMI_SUCCESS;
return (0);
}
static ulong_t
{
return (val);
}
/*ARGSUSED*/
static int
{
*rcp = CMI_SUCCESS;
return (0);
}
static void
{
}
volatile uint32_t cmi_trapped_rdmsr;
/*ARGSUSED*/
static int
{
*rcp = CMI_SUCCESS;
else
*rcp = CMIERR_NOTSUP;
} else {
*rcp = CMIERR_MSRGPF;
}
no_trap();
return (0);
}
static cmi_errno_t
{
}
volatile uint32_t cmi_trapped_wrmsr;
/*ARGSUSED*/
static int
{
*rcp = CMI_SUCCESS;
else
*rcp = CMIERR_NOTSUP;
} else {
*rcp = CMIERR_MSRGPF;
}
no_trap();
return (0);
}
static cmi_errno_t
{
}
/*ARGSUSED*/
static int
{
int18();
*rcp = CMI_SUCCESS;
return (0);
}
static void
{
}
/*
* Ops structure for handle operations.
*/
struct cmi_hdl_ops {
const char *(*cmio_vendorstr)(cmi_hdl_impl_t *);
const char *(*cmio_chiprevstr)(cmi_hdl_impl_t *);
void (*cmio_mcheck)(cmi_hdl_impl_t *);
} cmi_hdl_ops[] = {
/*
* CMI_HDL_NATIVE - ops when apparently running on bare-metal
*/
{
},
};
#ifndef __xpv
static void *
{
switch (class) {
case CMI_HDL_NATIVE: {
do {
return ((void *)cp);
}
return (NULL);
}
default:
return (NULL);
}
}
#endif
{
int idx;
return (NULL);
#ifndef __xpv
return (NULL);
#endif
if (cmi_hdl_hash == NULL) {
}
/*
* Somehow this (chipid, coreid, strandid) id tuple has
* already been assigned! This indicates that the
* callers logic in determining these values is busted,
* or perhaps undermined by bad BIOS setup. Complain,
* and refuse to initialize this tuple again as bad things
* will happen.
*/
"strandid %d handle already allocated!",
return (NULL);
}
/*
* Once we store a nonzero reference count others can find this
* handle via cmi_hdl_lookup etc. This initial hold on the handle
* is to be dropped only if some other part of cmi initialization
* fails or, if it succeeds, at later cpu deconfigure. Note the
* the module private data we hold in cmih_cmi and cmih_cmidata
* is still NULL at this point (the caller will fill it with
* cmi_hdl_setcmi if it initializes) so consumers of handles
* should always be ready for that possibility.
*/
}
void
{
}
static int
cmi_hdl_canref(int hashidx)
{
if (cmi_hdl_hash == NULL)
return (0);
if (refcnt == 0) {
/*
* Associated object never existed, is being destroyed,
* or has been destroyed.
*/
return (0);
}
/*
* We cannot use atomic increment here because once the reference
* count reaches zero it must never be bumped up again.
*/
while (refcnt != 0) {
return (1);
}
/*
* Somebody dropped the reference count to 0 after our initial
* check.
*/
return (0);
}
void
{
int idx;
return;
hdl->cmih_strandid);
}
void
{
}
void *
{
}
void
{
}
const struct cmi_mc_ops *
{
}
void *
{
}
{
if (!cmi_hdl_canref(idx))
return (NULL);
return (NULL);
}
}
cmi_hdl_any(void)
{
int i;
for (i = 0; i < CMI_HDL_HASHSZ; i++) {
if (cmi_hdl_canref(i))
}
return (NULL);
}
void
{
int i;
for (i = 0; i < CMI_HDL_HASHSZ; i++) {
if (cmi_hdl_canref(i)) {
break;
}
}
}
}
void
{
}
void *
{
}
void *
{
}
enum cmi_hdl_class
{
}
type \
{ \
}
CMI_HDL_OPFUNC(vendorstr, const char *)
CMI_HDL_OPFUNC(chiprevstr, const char *)
void
{
}
#ifndef __xpv
/*
* Return hardware chip instance; cpuid_get_chipid provides this directly.
*/
{
return (cpuid_get_chipid(cp));
}
/*
* Return core instance within a single chip.
*/
{
return (cpuid_get_pkgcoreid(cp));
}
/*
* Return strand number within a single core. cpuid_get_clogid numbers
* all execution units (strands, or cores in unstranded models) sequentially
* within a single chip.
*/
{
}
#endif /* __xpv */
void
{
}
void
{
}
{
/*
* Regardless of the handle class, we first check for am
* interposed value. In the xVM case you probably want to
* place interposed values within the hypervisor itself, but
* we still allow interposing them in dom0 for test and bringup
* purposes.
*/
return (CMI_SUCCESS);
return (CMIERR_INTERPOSE);
}
{
/* Invalidate any interposed value */
return (CMI_SUCCESS);
}
void
{
}
void
{
int i;
for (i = 0; i < nregs; i++)
}
void
cmi_pcird_nohw(void)
{
}
void
cmi_pciwr_nohw(void)
{
}
static uint32_t
{
if (interpose)
*interpose = 1;
return (val);
}
if (interpose)
*interpose = 0;
if (!(cmi_pcicfg_flags & CMI_PCICFG_FLAG_RD_HWOK))
return (0);
switch (asz) {
case 1:
if (hdl)
else
break;
case 2:
if (hdl)
else
break;
case 4:
if (hdl)
else
break;
default:
val = 0;
}
return (val);
}
{
hdl));
}
{
hdl));
}
{
}
void
{
}
void
{
}
void
{
}
static void
{
/*
* If there is an interposed value for this register invalidate it.
*/
if (!(cmi_pcicfg_flags & CMI_PCICFG_FLAG_WR_HWOK))
return;
switch (asz) {
case 1:
if (hdl)
else
break;
case 2:
if (hdl)
else
break;
case 4:
if (hdl)
else
break;
default:
break;
}
}
extern void
{
}
extern void
{
}
extern void
{
}