mcamd_drv.c revision 0b9e3e769e160ab52d990398d55e6318f737a940
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
#include <sys/machsystm.h>
#include <sys/x86_archext.h>
#include <sys/cpu_module.h>
#include <mcamd.h>
#include <mcamd_api.h>
int mc_quadranksupport = 0; /* set to 1 for a MB with quad rank support */
int mc_hold_attached = 1;
static void
{
return;
mc->mc_snapshotgen++;
}
static int
{
return (0);
NV_ENCODE_XDR, KM_SLEEP) != 0)
return (-1);
return (0);
}
static mc_t *
{
int i;
for (i = 0; i < MC_FUNC_NUM; i++) {
return (mc);
}
}
}
return (NULL);
}
static mc_t *
{
}
static mc_t *
{
}
static mc_t *
{
return (mc);
}
return (NULL);
}
typedef struct mc_rev_map {
const char *rm_name;
} mc_rev_map_t;
static const mc_rev_map_t mc_rev_map[] = {
{ 0, 0, 0, MC_REV_UNKNOWN, NULL }
};
static const mc_rev_map_t *
{
const mc_rev_map_t *rm;
/*
* For the moment, we assume that both cores in multi-core chips will
* be of the same revision, so we'll confine our revision check to
* the first CPU pointed to by this chip.
*/
break;
}
return (rm);
}
static void
{
int i;
}
}
static void
{
int valfound;
}
static nvlist_t *
{
int nelem, i;
}
for (i = 0; i < nelem; i++)
nvlist_free(cslist[i]);
int ncs = 0;
for (i = 0; i < MC_CHIP_DIMMRANKMAX; i++) {
}
}
for (i = 0; i < nelem; i++)
nvlist_free(dimmlist[i]);
return (mcnvl);
}
static void
{
int i;
for (i = 0; i < MC_CHIP_DIMMRANKMAX; i++) {
break;
}
}
ASSERT(i != MC_CHIP_DIMMRANKMAX);
}
static mc_dimm_t *
{
return (mcd);
}
/*
* A chip-select is associated with up to 2 dimms, and a single dimm may
* have up to 4 associated chip-selects (in the presence of quad-rank support
* on the motherboard). How we number our dimms is determined by the MC
* config. This function may be called by multiple chip-selects for the
* same dimm(s).
*/
static void
{
int i;
int nfound = 0;
/*
* Has some other chip-select already created this dimm or dimms?
*/
for (i = 0; i < ndimm; i++) {
nfound++;
}
}
}
return;
for (i = 0; i < ndimm; i++) {
else
}
}
/*
* A placeholder for a future implementation that works this out from
* smbios or SPD information. For now we will return a value that
*/
/*ARGSUSED*/
static int
{
return (mc_quadranksupport != 0);
}
/*
* Create the DIMM structure for this MC. There are a number of unkowns,
* such as the number of DIMM slots for this MC, the number of chip-select
* ranks supported for each DIMM, how the slots are labelled etc.
*
* SMBIOS information can help with some of this (if the bios implementation is
* complete and accurate, which is often not the case):
*
* . A record is required for each SMB_TYPE_MEMDEVICE slot, whether populated
* or not. The record should reference the associated SMB_TYPE_MEMARRAY,
* so we can figure out the number of slots for each MC. In practice some
* smbios implementations attribute all slots (from multiple chips) to
* a single memory array.
*
* . SMB_TYPE_MEMDEVICEMAP records indicate how a particular SMB_TYPE_MEMDEVICE
* has been mapped. Some smbios implementation produce rubbish here, or get
* confused when cs bank interleaving is enabled or disabled, but we can
* perform some validation of the information before using it. The record
* information is not well suited to handling cs bank interleaving since
* it really only provides for a device to have a few contiguos mappings
* and with cs interleave we have lots of little chunks interleaved across
* the devices. If we assume that the bios has followed the BKDG algorithm
* for setting up cs interleaving (which involves assinging contiguous
* and adjacent ranges to the chip selects and then swapping some
* base and mask hi and lo bits) then we can attempt to interpret the
* bits to establish the interleave - that seems to cover at least some
* smbios implementations. Even if that assumption appears good it is
* also not clear which MEMDEVICE records correspond to LODIMMs and which
* to UPDIMMs in a DIMM pair (128 bit MC mode) - we have to interpret the
* Device Locator and Bank Locator labels.
*
* We also do not know how many chip-select banks reside on individual
* DIMMs. For instance we cannot distinguish a system that supports 8
* DIMMs slots per chip (one CS line each, thereby supporting only single-rank
* DIMMs) vs a system that has just 4 slots per chip and which routes
* 2 CS lines to each pair (thereby supporting dual rank DIMMs). In each
* we would discover 8 active chip-selects.
*
* So the task of establishing the real DIMM configuration is complex, likely
* requiring some combination of good SMBIOS data and perhaps our own access
* to SPD information. Instead we opt for a canonical numbering scheme,
* derived from the 'AMD Athlon (TM) 64 FX and AMD Opteron (TM) Processors
* Motherboard Design Guide' (AMD publication #25180).
*/
static void
{
int mcmode;
int ldimmno; /* logical DIMM pair number, 0 .. 3 */
if (quadrank) {
/*
* Quad-rank support. We assume that any of cs#
* 4/5/6/6 that we have discovered active are routed
* for quad rank support as described in the MB
* design guide:
* DIMM0: CS# 0, 1, 4 and 5
* DIMM1: CS# 2, 3, 6 and 7
*/
} else {
/*
* DIMM0: CS# 0 and 1
* DIMM1: CS# 2 and 3
* DIMM2: CS# 4 and 5
* DIMM3: CS# 6 and 7
*/
}
if (mcmode == 128) {
/* 128-bit data width mode - dimms present in pairs */
} else {
/* 64-bit data width mode - only even numbered dimms */
}
}
}
static mc_cs_t *
{
return (mccs);
}
/*
* Function 1 Configuration - Address Map (see BKDG 3.4.4 DRAM Address Map)
*
* Read the Function 1 Address Map for each potential DRAM node. The Base
* Address for a node gives the starting system address mapped at that node,
* and the limit gives the last valid address mapped at that node. Regions for
* different nodes should not overlap, unless node-interleaving is enabled.
* The base register also indicates the node-interleaving settings (IntlvEn).
* The limit register includes IntlvSel which determines which 4K blocks will
* be routed to this node and the destination node ID for addresses that fall
* within the [base, limit] range - this must match the pair number.
*/
static void
{
int i;
for (i = 0; i < MC_AM_REG_NODE_NUM; i++) {
/*
* Don't create properties for empty nodes.
*/
if ((lim[i] & MC_AM_DL_DRAMLIM_MASK) == 0)
continue;
/*
* Don't create properties for DIMM ranges that aren't local
* to this node.
*/
continue;
}
/*
* The Function 1 DRAM Hole Address Register tells us which node(s)
* own the DRAM space that is hoisted above 4GB, together with the
* hole base and offset for this node.
*/
}
/*
* Function 2 configuration - DRAM Controller
*/
static void
{
int wide = 0; /* 128-bit access mode? */
int i;
/*
* Read Function 2 DRAM Configuration High and Low registers and
* weld them together into a 64-bit value. The High component
* is mostly concerned with memory clocks etc and we'll not have
* any use for that. The Low component tells us if ECC is enabled,
* if we're in 64- or 128-bit MC mode, how the upper chip-selects
* are mapped, which chip-select pairs are using x4 parts, etc.
*/
/*
* Read Function 2 DRAM Bank Address Mapping. This tells us
* whether bank swizzle mode is enabled, and also encodes
* the type of DIMM module in use for each chip-select pair.
*/
/*
* Read Function 2 Configuration Registers for DRAM CS Base 0 thru 7
* and DRAM CS Mask 0 thru 7. The Base registers give us the
* BaseAddrHi and BaseAddrLo from which the base can be constructed,
* and whether this chip-select bank is enabled (CSBE). The
* Mask registers give us AddrMaskHi and AddrMaskLo from which
* a full mask can be constructed.
*/
/*
* Create a cs node for each enabled chip-select
*/
for (i = 0; i < MC_CHIP_NCS; i++) {
if (!(base[i] & MC_DC_CSB_CSBE)) {
mcp->mcp_disabled_cs++;
continue;
}
continue;
sz);
else
/*
* Check for cs bank interleaving - some bits clear in the
* if cs interleaving is active.
*/
if (!mcp->mcp_csbank_intlv) {
for (bitno = MC_DC_CSM_MASKLO_LOBIT;
ibits++;
}
if (ibits > 0)
}
}
/*
* Now that we have discovered all active chip-selects we attempt
* to divine the associated DIMM configuration.
*/
}
typedef struct mc_bind_map {
const char *bm_bindnm; /* attachment binding name */
const char *bm_model; /* value for device node model property */
static const mc_bind_map_t mc_bind_map[] = {
"AMD Memory Controller (HT Configuration)", NULL },
"AMD Memory Controller (Address Map)", mc_mkprops_addrmap },
"AMD Memory Controller (DRAM Controller & HT Trace)",
};
/*ARGSUSED*/
static int
{
return (EINVAL);
return (EINVAL);
}
return (0);
}
/*ARGSUSED*/
static int
{
return (0);
}
/*ARGSUSED*/
static int
{
int rc = 0;
return (EINVAL);
return (EINVAL);
}
if (mc_snapshot_update(mc) < 0) {
return (EIO);
}
switch (cmd) {
case MC_IOC_SNAPSHOT_INFO: {
mode) < 0)
break;
}
case MC_IOC_SNAPSHOT:
mode) < 0)
break;
}
return (rc);
}
nodev, /* not a block driver */
nodev, /* no print routine */
nodev, /* no dump routine */
nodev, /* no read routine */
nodev, /* no write routine */
nodev, /* no devmap routine */
nodev, /* no mmap routine */
nodev, /* no segmap routine */
nochpoll, /* no chpoll routine */
0, /* not a STREAMS driver */
};
/*ARGSUSED*/
static int
{
int rc = DDI_SUCCESS;
if (infocmd != DDI_INFO_DEVT2DEVINFO &&
return (DDI_FAILURE);
}
rc = DDI_FAILURE;
} else if (infocmd == DDI_INFO_DEVT2DEVINFO) {
} else {
}
return (rc);
}
/*ARGSUSED2*/
static int
{
return (DDI_FM_NONFATAL);
}
static void
{
}
static void
{
}
static mc_t *
{
return (NULL);
return (mc);
}
static int
{
const mc_bind_map_t *bm;
const char *bindnm;
long unitaddr;
if (cmd != DDI_ATTACH)
return (DDI_FAILURE);
break;
}
}
return (DDI_FAILURE);
/*
* We need the device number, which corresponds to the processor node
* number plus 24. The node number can then be used to associate this
* memory controller device with a given processor chip.
*/
return (DDI_FAILURE);
}
return (DDI_FAILURE);
}
break;
}
/* Integrate this memory controller device into existing set */
/*
* We don't complain here because this is a legitimate
* path for MP systems. On those machines, we'll attach
* before all CPUs have been initialized, and thus the
* chip verification in mc_create will fail. We'll be
* reattached later for those CPUs.
*/
return (DDI_FAILURE);
}
} else {
}
/* Beyond this point, we're committed to creating this node */
/*
* Add the common properties to this node, and then add any properties
* that are specific to this node based upon its configuration space.
*/
(void) ddi_prop_update_string(DDI_DEV_T_NONE,
(void) ddi_prop_update_int(DDI_DEV_T_NONE,
}
/*
* If this is the last node to be attached for this memory controller,
* so create the minor node and set up the properties.
*/
if (func == MC_FUNC_DEVIMAP) {
}
/*
* Register the memory controller for every CPU of this chip.
* Then attempt to enable h/w memory scrubbers for this node.
* If we are successful, disable the software memory scrubber.
*/
do {
if (rc)
}
return (DDI_SUCCESS);
}
/*ARGSUSED*/
static int
{
return (DDI_FAILURE);
}
DEVO_REV, /* devo_rev */
0, /* devo_refcnt */
mc_getinfo, /* devo_getinfo */
nulldev, /* devo_identify */
nulldev, /* devo_probe */
mc_attach, /* devo_attach */
mc_detach, /* devo_detach */
nodev, /* devo_reset */
&mc_cb_ops, /* devo_cb_ops */
NULL, /* devo_bus_ops */
NULL /* devo_power */
};
"Memory Controller for AMD processors",
};
static struct modlinkage modlinkage = {
(void *)&modldrv,
};
int
_init(void)
{
return (mod_install(&modlinkage));
}
int
{
}
int
_fini(void)
{
int rc;
return (rc);
return (0);
}