drmach.c revision f500b19684bd0346ac05bec02a50af07f369da1a
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include <sys/dditypes.h>
#include <sys/ndi_impldefs.h>
#include <sys/processor.h>
#include <sys/cheetahregs.h>
#include <sys/mem_config.h>
#include <sys/ddi_impldefs.h>
#include <sys/machsystm.h>
#include <sys/autoconf.h>
#include <sys/sysmacros.h>
#include <sys/prom_plat.h>
#include <vm/seg_kmem.h>
#include <sys/mem_cage.h>
#include <sys/archsystm.h>
#include <vm/hat_sfmmu.h>
#include <sys/cpu_module.h>
#include <sys/plat_ecc_dimm.h>
#include <sys/cpu_sgnblk_defs.h>
#include <sys/sc_gptwocfg.h>
#include <sys/iosramreg.h>
#include <sys/iosramio.h>
#include <sys/iosramvar.h>
#include <sys/sbd_ioctl.h>
#include <sys/sysevent.h>
/* defined in ../ml/drmach.il.cpp */
extern void flush_dcache_il(void);
extern void flush_icache_il(void);
extern void flush_pcache_il(void);
/* defined in ../ml/drmach_asm.s */
extern uint64_t lddsafconfig(void);
extern int man_dr_attach(dev_info_t *);
extern int man_dr_detach(dev_info_t *);
#define DRMACH_SLICE_MASK 0x1Full
/*
* DRMACH_MEM_SLICE_SIZE and DRMACH_MEM_USABLE_SLICE_SIZE define the
* available address space and the usable address space for every slice.
* There must be a distinction between the available and usable do to a
* restriction imposed by CDC memory size.
*/
#define DRMACH_MC_NBANKS 4
#define DRMACH_EMU_ACT_STATUS_OFFSET 0x50
#define DRMACH_EMU_ACT_STATUS_ADDR(mp) \
/*
* The Cheetah's Safari Configuration Register and the Schizo's
* same bit locations with in their register word. This source code takes
* which are shared by various Cheetah and Schizo drmach routines.
*/
#define DRMACH_L1_SET_LPA(b) \
(((b)->flags & DRMACH_NULL_PROC_LPA) == 0)
#define DRMACH_CPU_SRAM_ADDR 0x7fff0900000ull
#define DRMACH_CPU_SRAM_SIZE 0x20000ull
/*
* Name properties for frequently accessed device nodes.
*/
#define DRMACH_CPU_NAMEPROP "cpu"
#define DRMACH_CMP_NAMEPROP "cmp"
#define DRMACH_AXQ_NAMEPROP "address-extender-queue"
#define DRMACH_PCI_NAMEPROP "pci"
/*
* Maximum value of processor Safari Timeout Log (TOL) field of
* Safari Config reg (7 secs).
*/
/*
* drmach_board_t flag definitions
*/
#define DRMACH_NULL_PROC_LPA 0x1
typedef struct {
} drmach_reg_t;
typedef struct {
struct drmach_node *node;
void *data;
typedef struct drmach_node {
void *here;
int *len);
struct drmach_node *pnode);
typedef struct {
int min_index;
int max_index;
int arr_sz;
typedef struct {
void *isa;
void (*dispose)(drmachid_t);
char name[MAXNAMELEN];
struct drmach_board;
typedef struct drmach_board drmach_board_t;
typedef struct {
const char *type;
int portid;
int unum;
int busy;
int powered;
typedef struct drmach_cpu {
int coreid;
} drmach_cpu_t;
typedef struct drmach_mem {
struct drmach_mem *next;
} drmach_mem_t;
typedef struct drmach_io {
} drmach_io_t;
struct drmach_board {
int bnum;
int assigned;
int powered;
int connected;
int empty;
int cond;
char type[BD_TYPELEN];
};
typedef struct {
int flags;
typedef struct {
int ndevs;
void *a;
typedef struct drmach_casmslot {
int valid;
int slice;
typedef enum {
typedef struct {
void *isa;
void *earg;
/*
* The following global is read as a boolean value, non-zero is true.
* If zero, DR copy-rename and cpu poweron will not set the processor
* LPA settings (CBASE, CBND of Safari config register) to correspond
* to the current memory slice map. LPAs of processors present at boot
* will remain as programmed by POST. LPAs of processors on boards added
* by DR will remain NULL, as programmed by POST. This can be used to
* to override the per-board L1SSFLG_THIS_L1_NULL_PROC_LPA flag set by
* POST in the LDCD (and copied to the GDCD by SMS).
*
* drmach_reprogram_lpa and L1SSFLG_THIS_L1_NULL_PROC_LPA do not apply
* to Schizo device LPAs. These are always set by DR.
*/
static int drmach_reprogram_lpa = 1;
/*
* can fail to receive an XIR. To workaround this issue until a hardware
* fix is implemented, we will exclude the selection of these CPUs.
* Setting this to 0 will allow their selection again.
*/
static int drmach_iocage_exclude_jaguar_port_zero = 1;
static int drmach_initialized;
static drmach_array_t *drmach_boards;
static int drmach_cpu_delay = 1000;
static int drmach_cpu_ntries = 50000;
static kmutex_t drmach_slice_table_lock;
/*
* Setting to non-zero will enable delay before all disconnect ops.
*/
static int drmach_unclaim_delay_all;
/*
* Default delay is slightly greater than the max processor Safari timeout.
* This delay is intended to ensure the outstanding Safari activity has
* retired on this board prior to a board disconnect.
*/
/*
* By default, DR of non-Panther procs is not allowed into a Panther
* domain with large page sizes enabled. Setting this to 0 will remove
* the restriction.
*/
static int drmach_large_page_restriction = 1;
/*
* Used to pass updated LPA values to procs.
* Protocol is to clear the array before use.
*/
volatile uchar_t *drmach_xt_mb;
volatile uint64_t drmach_xt_ready;
static kmutex_t drmach_xt_mb_lock;
static int drmach_xt_mb_size;
static kmutex_t drmach_bus_sync_lock;
drmach_board_t *, int, drmachid_t *);
static void drmach_bus_sync_list_update(void);
static void drmach_slice_table_update(drmach_board_t *, int);
static int drmach_portid2bnum(int);
static int drmach_panther_boards(void);
static int drmach_name2type_idx(char *);
#ifdef DEBUG
int drmach_debug = 0; /* set to non-zero to enable debug messages */
#else
#endif /* DEBUG */
#define DRMACH_IS_BOARD_ID(id) \
((id != 0) && \
#define DRMACH_IS_CPU_ID(id) \
((id != 0) && \
#define DRMACH_IS_MEM_ID(id) \
((id != 0) && \
#define DRMACH_IS_IO_ID(id) \
((id != 0) && \
#define DRMACH_IS_DEVICE_ID(id) \
((id != 0) && \
#define DRMACH_IS_ID(id) \
((id != 0) && \
#define DRMACH_INTERNAL_ERROR() \
static char *drmach_ie_fmt = "drmach.c %d";
static struct {
const char *name;
const char *type;
} drmach_name2type[] = {
};
/*
* drmach autoconfiguration data structures and interfaces
*/
extern struct mod_ops mod_miscops;
"Sun Fire 15000 DR"
};
static struct modlinkage modlinkage = {
(void *)&modlmisc,
};
/*
* access to drmach_boards array between status and board lookup
* as READERS, and assign, and unassign threads as WRITERS.
*/
static krwlock_t drmach_boards_rwlock;
static kmutex_t drmach_i_lock;
static kmutex_t drmach_iocage_lock;
static kcondvar_t drmach_iocage_cv;
static int drmach_iocage_is_busy = 0;
static caddr_t drmach_iocage_vaddr;
static int drmach_iocage_size = 0;
static int drmach_is_cheetah = -1;
int
_init(void)
{
int err;
}
return (err);
}
int
_fini(void)
{
static void drmach_fini(void);
int err;
drmach_fini();
return (err);
}
int
{
}
/*
* drmach_node_* routines serve the purpose of separating the
* rest of the code from the device tree and OBP. This is necessary
* because of In-Kernel-Probing. Devices probed after stod, are probed
* by the in-kernel-prober, not OBP. These devices, therefore, do not
* have dnode ids.
*/
static int
{
static char *fn = "drmach_node_obp_get_parent";
if (nodeid == OBP_NONODE) {
return (-1);
}
return (-1);
}
return (0);
}
static pnode_t
{
}
typedef struct {
int err;
int
{
/*
* dip doesn't have to be held here as we are called
* from ddi_walk_devs() which holds the dip.
*/
/*
* Set "here" to NULL so that unheld dip is not accessible
* outside ddi_walk_devs()
*/
return (DDI_WALK_TERMINATE);
else
return (DDI_WALK_CONTINUE);
}
static int
{
/* initialized args structure for callback */
/*
* Root node doesn't have to be held in any way.
*/
(void *)&nargs);
}
static int
{
int rv;
/* initialized args structure for callback */
/* save our new position within the tree */
rv = 0;
while (nodeid != OBP_NONODE) {
if (rv)
break;
while (child != OBP_NONODE) {
if (rv)
break;
}
/* save our new position within the tree */
}
return (rv);
}
static int
{
static char *fn = "drmach_node_ddi_get_parent";
return (-1);
}
return (-1);
}
return (0);
}
/*ARGSUSED*/
static pnode_t
{
}
static drmach_node_t *
drmach_node_new(void)
{
if (drmach_initialized) {
} else {
}
return (np);
}
static void
{
}
/*
* Check if a CPU node is part of a CMP.
*/
static int
{
return (0);
}
return (1);
}
return (0);
}
static dev_info_t *
{
if (nodeid == OBP_NONODE)
return (NULL);
if (dip) {
/*
* The branch rooted at dip will have been previously
* held, or it will be the child of a CMP. In either
* case, the hold acquired in e_ddi_nodeid_to_dip()
* is not needed.
*/
}
return (dip);
}
static dev_info_t *
{
}
static int
{
}
static int
{
int rv = 0;
static char *fn = "drmach_node_ddi_get_prop";
rv = -1;
rv = -1;
}
return (rv);
}
/* ARGSUSED */
static int
{
int rv = 0;
static char *fn = "drmach_node_obp_get_prop";
if (nodeid == OBP_NONODE) {
rv = -1;
rv = -1;
} else {
}
return (rv);
}
static int
{
int rv = 0;
rv = -1;
rv = -1;
}
return (rv);
}
static int
{
int rv;
if (nodeid == OBP_NONODE)
rv = -1;
else {
}
return (rv);
}
static drmachid_t
{
dup = drmach_node_new();
return (dup);
}
/*
* drmach_array provides convenient array construction, access,
* bounds checking and array destruction logic.
*/
static drmach_array_t *
{
return (arr);
} else {
return (0);
}
}
static int
{
return (-1);
else {
return (0);
}
/*NOTREACHED*/
}
static int
{
return (-1);
else {
return (0);
}
/*NOTREACHED*/
}
static int
{
int rv;
*idx += 1;
return (rv);
}
static int
{
int rv;
*idx += 1;
*idx += 1;
return (rv);
}
static void
{
int idx;
int rv;
while (rv == 0) {
/* clear the array entry */
}
}
static gdcd_t *
{
/* read the gdcd, bail if magic or ver #s are not what is expected */
bail:
return (NULL);
goto bail;
goto bail;
}
return (gdcd);
}
static void
{
}
/*ARGSUSED*/
{
/*
* On Starcat, there is no CPU driver, so it is
* not necessary to configure any CPU nodes.
*/
if (DRMACH_IS_CPU_ID(id)) {
return (NULL);
}
for (; id; ) {
if (!DRMACH_IS_DEVICE_ID(id))
/*
* We held this branch earlier, so at a minimum its
* root should still be present in the device tree.
*/
DRMACH_PR("drmach_configure: configuring DDI branch");
/*
* Record first failure but don't stop
*/
}
/*
* If non-NULL, fdip is returned held and must be
* released.
*/
}
}
if (DRMACH_IS_MEM_ID(id)) {
} else {
}
}
return (err);
}
static sbd_error_t *
{
char name[OBP_MAXDRVNAME];
if (rv) {
/* every node is expected to have a name */
"dip: 0x%p: property %s",
return (err);
}
i = drmach_name2type_idx(name);
/*
* Not a node of interest to dr - including "cmp",
* but it is in drmach_name2type[], which lets gptwocfg
* driver to check if node is OBP created.
*/
*idp = (drmachid_t)0;
return (NULL);
}
/*
* Derive a best-guess unit number from the portid value.
* Some drmach_*_new constructors (drmach_pci_new, for example)
* will overwrite the prototype unum value with one that is more
* appropriate for the device.
*/
if (device_id < 4)
else if (device_id == 8) {
unum = 0;
} else if (device_id == 9) {
unum = 1;
} else if (device_id == 0x1c) {
unum = 0;
} else if (device_id == 0x1d) {
unum = 1;
} else {
return (DRMACH_INTERNAL_ERROR());
}
}
static void
{
}
static drmach_board_t *
drmach_board_new(int bnum)
{
return (bp);
}
static void
{
}
static sbd_error_t *
{
if (!DRMACH_IS_BOARD_ID(id))
/*
* we need to know if the board's connected before
* issuing a showboard message. If it's connected, we just
* reply with status composed of cached info
*/
sizeof (dr_showboard_t));
if (err)
return (err);
switch (shb.test_status) {
case DR_TEST_STATUS_UNKNOWN:
case DR_TEST_STATUS_IPOST:
case DR_TEST_STATUS_ABORTED:
break;
case DR_TEST_STATUS_PASSED:
break;
case DR_TEST_STATUS_FAILED:
break;
default:
DRMACH_PR("Unknown test status=0x%x from SC\n",
break;
}
} else {
}
int rv;
int d_idx;
while (rv == 0) {
if (err)
break;
}
}
return (err);
}
typedef struct drmach_msglist {
int o_nretry; /* number of sending retries */
int f_error; /* mailbox framework error */
unused :6;
volatile int drmach_getmsg_thread_run; /* run flag for getmsg thr */
volatile int drmach_sendmsg_thread_run; /* run flag for sendmsg */
int drmach_mbox_istate; /* mailbox init state */
int drmach_mbox_iflag; /* set if init'd with SC */
int drmach_mbox_ipending; /* set if reinit scheduled */
/*
* Timeout values (in seconds) used when waiting for replies (from the SC) to
* requests that we sent. Since we only receive boardevent messages, and they
* are events rather than replies, there is no boardevent timeout.
*/
/*
* Delay (in seconds) used after receiving a non-transient error indication from
* an mboxsc_getmsg call in the thread that loops waiting for incoming messages.
*/
/*
* Timeout values (in milliseconds) for mboxsc_putmsg and mboxsc_getmsg calls.
*/
/*
* Normally, drmach_to_putmsg is set dynamically during initialization in
* drmach_mbox_init. This has the potentially undesirable side effect of
* dynamic setting of drmach_to_putmsg (thereby allowing it to be tuned in
*/
int drmach_use_tuned_putmsg_to = 0;
/* maximum conceivable message size for future mailbox protocol versions */
#define DRMACH_MAX_MBOX_MSG_SIZE 4096
/*ARGSUSED*/
void
{
int i, j;
#ifdef DEBUG
case DRMSG_BOARDEVENT:
if (dir) {
DRMACH_PR("ERROR!! outgoing BOARDEVENT\n");
} else {
DRMACH_PR("BOARDEVENT received:\n");
DRMACH_PR("init=%d ins=%d rem=%d asgn=%d\n",
DRMACH_PR("unasgn=%d avail=%d unavail=%d\n",
}
break;
case DRMSG_MBOX_INIT:
if (dir) {
DRMACH_PR("MBOX_INIT Request:\n");
} else {
DRMACH_PR("MBOX_INIT Reply:\n");
}
break;
case DRMSG_ASSIGN:
if (dir) {
DRMACH_PR("ASSIGN Request:\n");
} else {
DRMACH_PR("ASSIGN Reply:\n");
}
break;
case DRMSG_UNASSIGN:
if (dir) {
DRMACH_PR("UNASSIGN Request:\n");
} else {
DRMACH_PR("UNASSIGN Reply:\n");
}
break;
case DRMSG_CLAIM:
if (!dir) {
DRMACH_PR("CLAIM Reply:\n");
break;
}
DRMACH_PR("CLAIM Request:\n");
for (i = 0; i < 18; ++i) {
DRMACH_PR("exp%d: val=%d slice=0x%x\n", i,
for (j = 0; j < S0_LPORT_COUNT; j++) {
DRMACH_PR(" MC %2d: "
"MADR[%d] = 0x%lx, "
"MADR[%d] = 0x%lx\n", j,
DRMACH_PR(" : "
"MADR[%d] = 0x%lx, "
"MADR[%d] = 0x%lx\n",
}
}
break;
case DRMSG_UNCLAIM:
if (!dir) {
DRMACH_PR("UNCLAIM Reply:\n");
break;
}
DRMACH_PR("UNCLAIM Request:\n");
for (i = 0; i < 18; ++i) {
DRMACH_PR("exp%d: val=%d slice=0x%x\n", i,
for (j = 0; j < S0_LPORT_COUNT; j++) {
DRMACH_PR(" MC %2d: "
"MADR[%d] = 0x%lx, "
"MADR[%d] = 0x%lx\n", j,
DRMACH_PR(" : "
"MADR[%d] = 0x%lx, "
"MADR[%d] = 0x%lx\n",
}
}
break;
case DRMSG_UNCONFIG:
if (!dir) {
DRMACH_PR("UNCONFIG Reply:\n");
break;
}
DRMACH_PR("UNCONFIG Request:\n");
for (i = 0; i < 18; ++i) {
DRMACH_PR("exp%d: val=%d slice=0x%x\n", i,
for (j = 0; j < S0_LPORT_COUNT; j++) {
DRMACH_PR(" MC %2d: "
"MADR[%d] = 0x%lx, "
"MADR[%d] = 0x%lx\n", j,
DRMACH_PR(" : "
"MADR[%d] = 0x%lx, "
"MADR[%d] = 0x%lx\n",
}
}
break;
case DRMSG_POWERON:
if (dir) {
DRMACH_PR("POWERON Request:\n");
} else {
DRMACH_PR("POWERON Reply:\n");
}
break;
case DRMSG_POWEROFF:
if (dir) {
DRMACH_PR("POWEROFF Request:\n");
} else {
DRMACH_PR("POWEROFF Reply:\n");
}
break;
case DRMSG_TESTBOARD:
if (dir) {
DRMACH_PR("TESTBOARD Request:\n");
DRMACH_PR("\tmemaddrhi=0x%x memaddrlo=0x%x ",
DRMACH_PR("memlen=0x%x cpu_portid=0x%x\n",
DRMACH_PR("\tforce=0x%x imm=0x%x\n",
} else {
DRMACH_PR("TESTBOARD Reply:\n");
DRMACH_PR("\tmemaddrhi=0x%x memaddrlo=0x%x ",
DRMACH_PR("memlen=0x%x cpu_portid=0x%x\n",
DRMACH_PR("\trecovered=0x%x test status=0x%x\n",
}
break;
case DRMSG_ABORT_TEST:
if (dir) {
DRMACH_PR("ABORT_TEST Request:\n");
} else {
DRMACH_PR("ABORT_TEST Reply:\n");
}
DRMACH_PR("\tmemaddrhi=0x%x memaddrlo=0x%x ",
DRMACH_PR("memlen=0x%x cpu_portid=0x%x\n",
break;
case DRMSG_SHOWBOARD:
if (dir) {
DRMACH_PR("SHOWBOARD Request:\n");
} else {
DRMACH_PR("SHOWBOARD Reply:\n");
DRMACH_PR(": empty=%d power=%d assigned=%d",
DRMACH_PR(": active=%d t_status=%d t_level=%d ",
}
break;
default:
DRMACH_PR("Unknown message type\n");
break;
}
DRMACH_PR("dr hdr:\n\tid=0x%x vers=0x%x cmd=0x%x exp=0x%x slot=0x%x\n",
#endif
php->error_code);
}
/*
* Callback function passed to taskq_dispatch when a mailbox reinitialization
* handshake needs to be scheduled. The handshake can't be performed by the
* thread that determines it is needed, in most cases, so this function is
* dispatched on the system-wide taskq pool of threads. Failure is reported but
* otherwise ignored, since any situation that requires a mailbox initialization
* handshake will continue to request the handshake until it succeeds.
*/
static void
drmach_mbox_reinit(void *unused)
{
DRMACH_PR("scheduled mailbox reinit running\n");
if (drmach_mbox_iflag == 0) {
/* need to initialize the mailbox */
if (serr) {
"mbox_init: MBOX_INIT failed ecode=0x%x",
}
if (!serr) {
drmach_mbox_iflag = 1;
}
}
drmach_mbox_ipending = 0;
}
/*
* To ensure sufficient compatibility with future versions of the DR mailbox
* protocol, we use a buffer that is large enough to receive the largest message
* that could possibly be sent to us. However, since that ends up being fairly
* large, allocating it on the stack is a bad idea. Fortunately, this function
* does not need to be MT-safe since it is only invoked by the mailbox
* framework, which will never invoke it multiple times concurrently. Since
* that is the case, we can use a static buffer.
*/
void
drmach_mbox_event(void)
{
int err;
char *hint = "";
int logsys = 0;
do {
/* don't try to interpret anything with the wrong version number */
drmach_mbox_iflag = 0;
/* schedule a reinit handshake if one isn't pending */
if (!drmach_mbox_ipending) {
drmach_mbox_ipending = 1;
} else {
"failed to schedule mailbox reinit");
}
}
return;
}
"Unsolicited mboxsc_getmsg failed: err=0x%x code=0x%x",
} else {
/* check for initialization event */
if (be->initialized) {
drmach_mbox_iflag = 0;
/* schedule a reinit handshake if one isn't pending */
if (!drmach_mbox_ipending) {
!= NULL) {
drmach_mbox_ipending = 1;
} else {
"failed to schedule mailbox reinit");
}
}
}
/* anything else will be a log_sysevent call */
if (be->board_insertion) {
DRMACH_PR("Board Insertion event received");
logsys++;
}
if (be->board_removal) {
DRMACH_PR("Board Removal event received");
logsys++;
}
if (be->slot_assign) {
DRMACH_PR("Slot Assign event received");
logsys++;
}
if (be->slot_unassign) {
DRMACH_PR("Slot Unassign event received");
logsys++;
}
if (be->slot_avail) {
DRMACH_PR("Slot Available event received");
logsys++;
}
if (be->slot_unavail) {
DRMACH_PR("Slot Unavailable event received");
logsys++;
}
DRMACH_PR("Power ON event received");
logsys++;
}
DRMACH_PR("Power OFF event received");
logsys++;
}
if (logsys)
}
}
static uint32_t
{
if (!(++drmach_msgid))
++drmach_msgid;
rv = drmach_msgid;
return (rv);
}
/*
* unlink an entry from the message transaction list
*
* caller must hold drmach_msglist_mutex
*/
void
{
} else {
}
if (entry == drmach_msglist_last) {
}
}
void
{
if (drmach_msglist_last) {
} else {
}
}
void
{
int err;
register int msgid;
while (drmach_getmsg_thread_run != 0) {
/* get a reply message */
command = 0;
transid = 0;
if (err) {
/*
* If mboxsc_getmsg returns ETIMEDOUT or EAGAIN, then
* the "error" is really just a normal, transient
* condition and we can retry the operation right away.
* Any other error suggests a more serious problem,
* ranging from a message being too big for our buffer
* (EMSGSIZE) to total failure of the mailbox layer.
* This second class of errors is much less "transient",
* so rather than retrying over and over (and getting
* the same error over and over) as fast as we can,
* we'll sleep for a while before retrying.
*/
"mboxsc_getmsg failed, err=0x%x", err);
}
continue;
}
drmach_mbox_prmsg(msg, 0);
"mailbox version mismatch 0x%x vs 0x%x",
drmach_mbox_iflag = 0;
/* schedule a reinit handshake if one isn't pending */
if (!drmach_mbox_ipending) {
!= NULL) {
drmach_mbox_ipending = 1;
} else {
"failed to schedule mailbox reinit");
}
}
continue;
}
} else
}
if (found) {
} else {
msgid);
}
}
}
}
drmach_getmsg_thread_run = -1;
thread_exit();
}
void
{
while (drmach_sendmsg_thread_run != 0) {
/*
* Search through the list to find entries awaiting
* transmission to the SC
*/
retry = 0;
continue;
}
if (!retry)
if (err) {
switch (err) {
case EAGAIN:
case EBUSY:
++retry;
continue;
case ETIMEDOUT:
} else {
++retry;
continue;
}
break;
default:
break;
}
} else {
}
retry = 0;
}
(void) cv_timedwait(&drmach_sendmsg_cv,
}
}
}
thread_exit();
}
void
{
while (entry) {
} else
}
}
}
static drmach_msglist_t *
{
return (listp);
}
static drmach_msglist_t *
{
int crv;
/* setup transaction list entry */
/* send mailbox message, await reply */
}
} else {
if (nosig)
to_val);
else
switch (crv) {
case -1: /* timed out */
"!msgid=0x%x reply timed out",
hdrp->message_id);
break;
case 0: /* signal received */
"operation interrupted by signal");
break;
default:
break;
}
}
/*
* If link is set for this entry, check to see if
* the linked entry has been replied to. If not,
* wait for the response.
* Currently, this is only used for ABORT_TEST functionality,
* wherein a check is made for the TESTBOARD reply when
* the ABORT_TEST reply is received.
*/
if (link) {
/*
* If the reply to the linked entry hasn't been
* received, clear the existing link->f_error,
* and await the reply.
*/
}
to_val);
switch (crv) {
case -1: /* timed out */
"!link msgid=0x%x reply timed out",
break;
default:
break;
}
}
}
}
return (listp);
}
static sbd_error_t *
{
char a_pnt[MAXNAMELEN];
int bnum;
/*
* If framework failure is due to signal, return "no error"
* error.
*/
drmach_mbox_iflag = 0;
else
}
a_pnt[0] = '\0';
case 0:
return (NULL);
case DRERR_NOACL:
case DRERR_NOT_ASSIGNED:
case DRERR_NOT_ACTIVE:
case DRERR_EMPTY_SLOT:
case DRERR_POWER_OFF:
case DRERR_TEST_IN_PROGRESS:
return (drerr_new(0, ESTC_TEST_IN_PROGRESS,
"%s", a_pnt));
case DRERR_TESTING_BUSY:
case DRERR_TEST_REQUIRED:
case DRERR_UNAVAILABLE:
case DRERR_RECOVERABLE:
return (drerr_new(0, ESTC_SMS_ERR_RECOVERABLE,
"%s", a_pnt));
case DRERR_UNRECOVERABLE:
"%s", a_pnt));
default:
}
}
static sbd_error_t *
{
int timeout = 0;
int ntries = 0;
int nosignals = 0;
if (msgtype != DRMSG_MBOX_INIT) {
if (drmach_mbox_iflag == 0) {
/* need to initialize the mailbox */
"!reinitializing DR mailbox");
/*
* If framework failure incoming is encountered on
* the MBOX_INIT [timeout on SMS reply], the error
* type must be changed before returning to caller.
* This is to prevent drmach_board_connect() and
* drmach_board_disconnect() from marking boards
* UNUSABLE based on MBOX_INIT failures.
*/
"!Changed mbox incoming to outgoing"
" failure on reinit");
sbd_err_clear(&err);
}
if (err) {
return (err);
}
drmach_mbox_iflag = 1;
}
}
/* setup outgoing mailbox header */
switch (msgtype) {
case DRMSG_MBOX_INIT:
ntries = 1;
nosignals = 0;
break;
case DRMSG_ASSIGN:
ntries = 1;
nosignals = 0;
break;
case DRMSG_UNASSIGN:
ntries = 1;
nosignals = 0;
break;
case DRMSG_POWERON:
ntries = 1;
nosignals = 0;
break;
case DRMSG_POWEROFF:
ntries = 1;
nosignals = 0;
break;
case DRMSG_SHOWBOARD:
ntries = 1;
nosignals = 0;
break;
case DRMSG_CLAIM:
ntries = 1;
nosignals = 1;
break;
case DRMSG_UNCLAIM:
ntries = 1;
nosignals = 1;
break;
case DRMSG_UNCONFIG:
ntries = 1;
nosignals = 0;
break;
case DRMSG_TESTBOARD:
ntries = 1;
nosignals = 0;
break;
default:
"Unknown outgoing message type 0x%x", msgtype);
err = DRMACH_INTERNAL_ERROR();
break;
}
/*
* For DRMSG_TESTBOARD attempts which have timed out, or
* been aborted due to a signal received after mboxsc_putmsg()
* has succeeded in sending the message, a DRMSG_ABORT_TEST
* must be sent.
*/
}
}
return (err);
}
static int
{
int err;
drmach_mbox_istate = 0;
/* register the outgoing mailbox */
NULL)) != 0) {
return (-1);
}
drmach_mbox_istate = 1;
/* setup the mboxsc_putmsg timeout value */
if (drmach_use_tuned_putmsg_to) {
} else {
MBOXSC_CMD_PUTMSG_TIMEOUT_RANGE, &mbxtoz)) != 0) {
drmach_to_putmsg = 60000;
} else {
DRMACH_PR("putmsg range is 0x%lx - 0x%lx value"
}
}
/* register the incoming mailbox */
drmach_mbox_event)) != 0) {
return (-1);
}
drmach_mbox_istate = 2;
/* initialize mutex for mailbox globals */
/* initialize mutex for mailbox re-init */
/* initialize mailbox message list elements */
drmach_mbox_istate = 3;
/* start mailbox sendmsg thread */
if (drmach_sendmsg_thread == NULL)
/* start mailbox getmsg thread */
if (drmach_getmsg_thread == NULL)
if (serr) {
return (-1);
}
drmach_mbox_iflag = 1;
drmach_mbox_ipending = 0;
return (0);
}
static int
{
if (drmach_mbox_istate > 2) {
"drmach_mbox_fini: waiting for mbox threads...");
while ((drmach_getmsg_thread_run == 0) ||
(drmach_sendmsg_thread_run == 0)) {
continue;
}
"drmach_mbox_fini: mbox threads done.");
}
if (drmach_mbox_istate) {
/* de-register the outgoing mailbox */
err);
rv = -1;
}
}
if (drmach_mbox_istate > 1) {
/* de-register the incoming mailbox */
err);
rv = -1;
}
}
return (rv);
}
static int
drmach_portid2bnum(int portid)
{
int slot;
switch (portid & 0x1f) {
case 0x1e: /* slot 0 axq registers */
slot = 0;
break;
case 8: case 9: /* cpu devices */
case 0x1f: /* slot 1 axq registers */
slot = 1;
break;
default:
ASSERT(0); /* catch in debug kernels */
}
}
extern int axq_suspend_iopause;
static int
{
int i;
/*
* For Starcat, we must be children of the root devinfo node
*/
i = drmach_name2type_idx(name);
/*
* Only children of the root devinfo node need to be
* of tree operations. This corresponds to the node types
* listed in the drmach_name2type array.
*/
if (i < 0) {
/* Not of interest to us */
return (DDI_WALK_PRUNECHILD);
}
if (*holdp) {
} else {
}
return (DDI_WALK_PRUNECHILD);
}
static int
drmach_init(void)
{
int bnum;
if (drmach_initialized) {
return (0);
}
gdcd = drmach_gdcd_new();
return (-1);
}
do {
int len;
int portid;
continue;
portid = -1;
if (portid == -1)
continue;
/* portid translated to an invalid board number */
" invalid property value, %s=%u",
/* clean up */
return (-1);
bp->stardrb_offset =
bp->stardrb_offset);
}
}
DRMACH_PR("gdcd size=0x%x align=0x%x PA=0x%x\n",
DRMACH_PR("drmach size=0x%x PA=0x%lx VA=0x%p\n",
}
if (drmach_iocage_size == 0) {
return (-1);
}
ASSERT(drmach_iocage_is_busy == 0);
if (drmach_mbox_init() == -1) {
}
/*
* Walk immediate children of devinfo root node and hold
* all devinfo branches of interest.
*/
hold = 1;
rdip = ddi_root_node();
drmach_initialized = 1;
/*
* To avoid a circular patch dependency between DR and AXQ, the AXQ
* rev introducing the axq_iopause_*_all interfaces should not regress
* when installed without the DR rev using those interfaces. The default
* setting the following axq flag to zero, axq will not enable iopause
* interfaces during drmach_copy_rename.
*/
axq_suspend_iopause = 0;
return (0);
}
static void
drmach_fini(void)
{
if (drmach_initialized) {
/*
* Walk immediate children of the root devinfo node
* releasing holds acquired on branches in drmach_init()
*/
hold = 0;
rdip = ddi_root_node();
drmach_initialized = 0;
}
if (drmach_xt_mb != NULL) {
}
}
static void
{
/* get register address, read madr value */
} else {
}
}
static uint64_t *
{
int bank;
/* fetch mc's bank madr register value */
if (madr & DRMACH_MC_VALID_MASK) {
/* encode new base pa into madr */
madr &= ~DRMACH_MC_UM_MASK;
madr &= ~DRMACH_MC_LM_MASK;
if (local)
else
*p++ = madr;
}
}
return (p);
}
static uint64_t *
{
int rv;
int idx;
uint64_t last_scsr_pa = 0;
/* memory is always in slot 0 */
/* look up slot 1 board on same expander */
/* look up should never be out of bounds */
/* nothing to do when board is not found or has no devices */
return (p);
while (rv == 0) {
if (DRMACH_IS_IO_ID(id)) {
/*
* Skip all non-Schizo IO devices (only IO nodes
* that are Schizo devices have non-zero scsr_pa).
* Filter out "other" leaf to avoid writing to the
*/
scsr &= ~(DRMACH_LPA_BASE_MASK |
*p++ = scsr;
}
}
}
return (p);
}
/*
* For Panther MCs, append the MC idle reg address and drmach_mem_t pointer.
* The latter is returned when drmach_rename fails to idle a Panther MC and
* is used to identify the MC for error reporting.
*/
static uint64_t *
{
/* only slot 0 has memory */
if (local) {
*p++ = ASI_EMU_ACT_STATUS_VA; /* local ASI */
}
} else if (!local) {
}
}
return (p);
}
static sbd_error_t *
{
int rv;
/* verify supplied buffer space is adequate */
/* addr for all possible MC banks */
/* list section terminator */
(sizeof (uint64_t) * 1) +
(sizeof (uint64_t) * 2) +
/* list section terminator */
(sizeof (uint64_t) * 1) +
/* list section terminator */
(sizeof (uint64_t) * 1) +
/* list section terminator */
(sizeof (uint64_t) * 1) +
/* list section terminator */
(sizeof (uint64_t) * 1) +
/* list section terminator */
(sizeof (uint64_t) * 1) +
/* list terminator */
(sizeof (uint64_t) * 1));
/* copy bank list to rename script */
for (q = drmach_bus_sync_list; *q; q++, p++)
*p = *q;
/* list section terminator */
*p++ = 0;
/*
* Write idle script for MC on this processor. A script will be
* produced only if this is a Panther processor on the source or
* target board.
*/
/* list section terminator */
*p++ = 0;
/*
* Write idle script for all other MCs on source and target
* Panther boards.
*/
p = drmach_prep_pn_mc_idle(p, s_mp, 0);
p = drmach_prep_pn_mc_idle(p, t_mp, 0);
/* list section terminator */
*p++ = 0;
/*
* Step 1: Write source base address to target MC
* with present bit off.
* Step 2: Now rewrite target reg with present bit on.
*/
/* exchange base pa. include slice offset in new target base pa */
DRMACH_PR("preparing MC MADR rename script (master is CPU%d):\n",
/*
* Write rename script for MC on this processor. A script will
* be produced only if this processor is on the source or target
* board.
*/
}
}
}
}
/* list section terminator */
*p++ = 0;
/*
* Write rename script for all other MCs on source and target
* boards.
*/
continue;
}
continue;
}
/* list section terminator */
*p++ = 0;
DRMACH_PR("preparing AXQ CASM rename script (EXP%d <> EXP%d):\n",
rv = axq_do_casm_rename_script(&p,
if (rv == DDI_FAILURE)
return (DRMACH_INTERNAL_ERROR());
/* list section & final terminator */
*p++ = 0;
*p++ = 0;
#ifdef DEBUG
{
/* paranoia */
DRMACH_PR("MC bank base pa list:\n");
while (*q) {
uint64_t a = *q++;
DRMACH_PR("0x%lx\n", a);
}
/* skip terminator */
q += 1;
DRMACH_PR("local Panther MC idle reg (via ASI 0x4a):\n");
while (*q) {
q += 2;
}
/* skip terminator */
q += 1;
DRMACH_PR("non-local Panther MC idle reg (via ASI 0x15):\n");
while (*q) {
q += 2;
}
/* skip terminator */
q += 1;
DRMACH_PR("MC reprogramming script (via ASI 0x72):\n");
while (*q) {
uint64_t r = *q++; /* register address */
uint64_t v = *q++; /* new register value */
DRMACH_PR("0x%lx = 0x%lx, basepa 0x%lx\n",
r,
v,
DRMACH_MC_UM_TO_PA(v)|DRMACH_MC_LM_TO_PA(v));
}
/* skip terminator */
q += 1;
while (*q) {
q += 2;
}
/* skip terminator */
q += 1;
DRMACH_PR("AXQ reprogramming script:\n");
while (*q) {
q += 2;
}
/* verify final terminator is present */
ASSERT(*(q + 1) == 0);
DRMACH_PR("copy-rename script 0x%p, len %d\n",
if (drmach_debug)
DELAY(10000000);
}
#endif
return (NULL);
}
static void
{
int rv;
int d_idx;
while (rv == 0) {
if (DRMACH_IS_CPU_ID(d_id)) {
}
}
}
(void *) &s1bp);
}
}
}
{
extern void drmach_rename_end(void);
int len;
uint_t *p, *q;
if (!DRMACH_IS_MEM_ID(s_id))
if (!DRMACH_IS_MEM_ID(t_id))
/* get starting physical address of target memory */
if (err)
return (err);
/* calculate slice offset mask from slice size */
/* calculate source and target base pa */
/* paranoia */
/* adjust copy memlist addresses to be relative to copy base pa */
}
#ifdef DEBUG
{
DRMACH_PR("source copy span: base pa 0x%lx, end pa 0x%lx\n",
DRMACH_PR("target copy span: base pa 0x%lx, end pa 0x%lx\n",
DRMACH_PR("copy memlist (relative to copy base pa):\n");
DRMACH_PR("current source base pa 0x%lx, size 0x%lx\n",
DRMACH_PR("current target base pa 0x%lx, size 0x%lx\n",
}
#endif /* DEBUG */
/* Map in appropriate cpu sram page */
/* Make sure the rename routine will fit */
/* copy text. standard bcopy not designed to work in nc space */
q = (uint_t *)drmach_rename;
while (q < (uint_t *)drmach_rename_end)
*p++ = *q++;
/* zero remainder. standard bzero not designed to work in nc space */
*p++ = 0;
if (err) {
return (err);
}
/* disable and flush CDC */
if (axq_cdc_disable_flush_all() != DDI_SUCCESS) {
axq_cdc_enable_all(); /* paranoia */
err = DRMACH_INTERNAL_ERROR();
goto cleanup;
}
/* mark both memory units busy */
VM_SLEEP);
}
}
return (NULL);
}
int drmach_rename_count;
int drmach_rename_ntries;
{
case DRMACH_CR_OK:
break;
case DRMACH_CR_MC_IDLE_ERR: {
break;
}
case DRMACH_CR_IOPAUSE_ERR:
break;
case DRMACH_CR_ONTRAP_ERR:
"memory error");
break;
default:
err = DRMACH_INTERNAL_ERROR();
break;
}
#ifdef DEBUG
int i;
for (i = 0; i < NCPU; i++) {
if (drmach_xt_mb[i])
DRMACH_PR("cpu%d ignored drmach_xt_mb", i);
}
}
#endif
if (err) {
goto done;
}
/* update casm shadow for target and source board */
/*
* Make a good-faith effort to notify the SC about the copy-rename, but
* will duplicate the update.
*/
done:
DRMACH_PR("waited %d out of %d tries for drmach_rename_wait on %d cpus",
return (err);
}
int drmach_slow_copy = 0;
void
{
extern xcfunc_t drmach_rename_wait;
extern xcfunc_t drmach_rename_done;
extern xcfunc_t drmach_rename_abort;
int i, count;
/*
* Prevent slot1 IO from accessing Safari memory bus.
*/
return;
}
drmach_xt_ready = 0;
for (i = 0; i < drmach_cpu_ntries; i++) {
if (drmach_xt_ready == count)
break;
}
drmach_rename_ntries = i; /* for debug */
drmach_xt_ready = 0; /* steal the line back */
for (i = 0; i < NCPU; i++) /* steal the line back, preserve data */
drmach_xt_mb[i] = drmach_xt_mb[i];
/* disable CE reporting */
neer = get_error_enable();
/* disable interrupts (paranoia) */
/*
* Execute copy-rename under on_trap to protect against a panic due
* to an uncorrectable error. Instead, DR will abort the copy-rename
* operation and rely on the OS to do the error reporting.
*
* In general, trap handling on any cpu once the copy begins
* can result in an inconsistent memory image on the target.
*/
goto copy_rename_end;
}
/*
* DO COPY.
*/
/* copy 32 bytes at src_pa to dst_pa */
/* increment by 32 bytes */
/* decrement by 32 bytes */
if (drmach_slow_copy) { /* for debug */
while (i--)
;
}
}
}
/*
* XXX CHEETAH SUPPORT
* For cheetah, we need to grab the iocage lock since iocage
* memory is used for e$ flush.
*
* NOTE: This code block is dangerous at this point in the
* copy-rename operation. It modifies memory after the copy
* has taken place which means that any persistent state will
* be abandoned after the rename operation. The code is also
* performing thread synchronization at a time when all but
* one processors are paused. This is a potential deadlock
* situation.
*
* This code block must be moved to drmach_copy_rename_init.
*/
if (drmach_is_cheetah) {
while (drmach_iocage_is_busy)
}
/*
* ASI_MEM instructions. Following the copy loop, the E$
* of the master (this) processor will have lines in state
* O that correspond to lines of home memory in state gI.
* An E$ flush is necessary to commit these lines before
* proceeding with the rename operation.
*
* Flushing the E$ will automatically flush the W$, but
* the D$ and I$ must be flushed separately and explicitly.
*/
/*
* Each line of home memory is now in state gM, except in
* the case of a cheetah processor when the E$ flush area
* is included within the copied region. In such a case,
* the lines of home memory for the upper half of the
* flush area are in state gS.
*
* Each line of target memory is in state gM.
*
* Each line of this processor's E$ is in state I, except
* those of a cheetah processor. All lines of a cheetah
* processor's E$ are in state S and correspond to the lines
* in upper half of the E$ flush area.
*
* It is vital at this point that none of the lines in the
* home or target memories are in state gI and that none
* of the lines in this processor's E$ are in state O or Os.
* A single instance of such a condition will cause loss of
* coherency following the rename operation.
*/
/*
* Rename
*/
/*
* Rename operation complete. The physical address space
* of the home and target memories have been swapped, the
* routing data in the respective CASM entries have been
* swapped, and LPA settings in the processor and schizo
* devices have been reprogrammed accordingly.
*
* In the case of a cheetah processor, the E$ remains
* populated with lines in state S that correspond to the
* lines in the former home memory. Now that the physical
* addresses have been swapped, these E$ lines correspond
* to lines in the new home memory which are in state gM.
* This combination is invalid. An additional E$ flush is
* necessary to restore coherency. The E$ flush will cause
* the lines of the new home memory for the flush region
* to transition from state gM to gS. The former home memory
* remains unmodified. This additional E$ flush has no effect
* on a cheetah+ processor.
*/
/*
* The D$ and I$ must be flushed to ensure that coherency is
* maintained. Any line in a cache that is in the valid
* state has its corresponding line of the new home memory
* in the gM state. This is an invalid condition. When the
* flushes are complete the cache line states will be
* resynchronized with those in the new home memory.
*/
flush_icache_il(); /* inline version */
flush_dcache_il(); /* inline version */
flush_pcache_il(); /* inline version */
no_trap();
/* enable interrupts */
/* enable CE reporting */
/*
* XXX CHEETAH SUPPORT
*/
if (drmach_is_cheetah) {
}
}
static void drmach_io_dispose(drmachid_t);
static sbd_error_t *
{
int rv;
int len = 0;
/* pci nodes are expected to have regs */
"Device Node 0x%x: property %s",
return (err);
}
if (rv) {
"Device Node 0x%x: property %s",
return (err);
}
/*
* Fix up unit number so that Leaf A has a lower unit number
* than Leaf B.
*/
else
} else {
else
}
/* reassemble 64-bit base address */
}
return (err);
}
static sbd_error_t *
{
return (NULL);
}
static void
{
}
/*ARGSUSED*/
{
switch (cmd) {
case SBD_CMD_TEST:
case SBD_CMD_STATUS:
case SBD_CMD_GETNCM:
break;
case SBD_CMD_CONNECT:
break;
case SBD_CMD_DISCONNECT:
break;
default:
break;
}
}
return (err);
}
/*ARGSUSED*/
{
return (NULL);
}
{
err = DRMACH_INTERNAL_ERROR();
}
if (!err) {
} else {
if (*id)
if (!err) {
if (!*id)
}
}
}
return (err);
}
static uint_t
{
uint_t non_panther_cpus = 0;
/*
* Determine PRD port indices based on slot location.
*/
switch (slot) {
case 0:
port_start = 0;
port_end = 3;
break;
case 1:
port_start = 4;
port_end = 5;
break;
default:
ASSERT(0);
/* check all */
port_start = 0;
port_end = 5;
break;
}
/*
* This Safari port passed POST and represents a
* cpu, so check the implementation.
*/
& 0xffff;
switch (impl) {
case CHEETAH_IMPL:
case CHEETAH_PLUS_IMPL:
case JAGUAR_IMPL:
break;
case PANTHER_IMPL:
break;
default:
ASSERT(0);
break;
}
}
}
DRMACH_PR("drmach_board_non_panther_cpus: exp=%d, slot=%d, "
return (non_panther_cpus);
}
{
if (!DRMACH_IS_BOARD_ID(id))
/*
* Build the casm info portion of the CLAIM message.
*/
if (err) {
/*
* if mailbox timeout or unrecoverable error from SC,
* board cannot be touched. Mark the status as
* unusable.
*/
return (err);
}
gdcd = drmach_gdcd_new();
return (DRMACH_INTERNAL_ERROR());
}
/*
* Read CPU SRAM DR buffer offset from GDCD.
*/
bp->stardrb_offset =
bp->stardrb_offset);
/*
* Read board LPA setting from GDCD.
*/
}
/*
* XXX Until the Solaris large pages support heterogeneous cpu
* domains, DR needs to prevent the addition of non-Panther cpus
* to an all-Panther domain with large pages enabled.
*/
"UltraSPARC-IV+ board into an all UltraSPARC-IV+ domain");
}
/* do saf configurator stuff */
}
if (err) {
/* flush CDC srams */
if (axq_cdc_flush_all() != DDI_SUCCESS) {
goto out;
}
/*
* Build the casm info portion of the UNCLAIM message.
*/
/*
* we clear the connected flag just in case it would have
* been set by a concurrent drmach_board_status() thread
* before the UNCLAIM completed.
*/
goto out;
}
/*
* Now that the board has been successfully attached, obtain
* platform-specific DIMM serial id information for the board.
*/
}
out:
return (err);
}
static void
{
static char *axq_name = "address-extender-queue";
static int axq_exp = -1;
static int axq_slot;
int e, s, slice;
if (invalidate) {
/* invalidate cached casm value */
drmach_slice_table[e] = 0;
/* invalidate cached axq info if for same exp */
}
}
int i, portid;
/* search for an attached slot0 axq instance */
for (i = 0; i < AXQ_MAX_EXP * AXQ_MAX_SLOT_PER_EXP; i++) {
if (axq_dip)
if (portid == -1) {
DRMACH_PR("cant get portid of axq "
"instance %d\n", i);
continue;
}
if (invalidate && axq_exp == e)
continue;
if (axq_slot == 0)
break; /* found */
}
}
if (i == AXQ_MAX_EXP * AXQ_MAX_SLOT_PER_EXP) {
if (axq_dip) {
}
DRMACH_PR("drmach_slice_table_update: failed to "
"update axq dip\n");
return;
}
}
if (invalidate)
return;
DRMACH_PR("using AXQ casm %d.%d for slot%d.%d\n",
/* invalidate entry */
drmach_slice_table[e] &= ~0x20;
/*
* find a slice that routes to expander e. If no match
* is found, drmach_slice_table[e] will remain invalid.
*
* The CASM is a routing table indexed by slice number.
* Each element in the table contains permission bits,
* a destination expander number and a valid bit. The
* valid bit must true for the element to be meaningful.
*
* CASM entry structure
* Bits 15..6 ignored
* Bit 5 valid
* Bits 0..4 expander number
*
* NOTE: the for loop is really enumerating the range of slices,
* which is ALWAYS equal to the range of expanders. Hence,
* AXQ_MAX_EXP is okay to use in this loop.
*/
}
}
/*
* Get base and bound PAs for slot 1 board lpa programming
* information corresponding to the CASM. Otherwise, set base and
* bound PAs to 0.
*/
static void
{
s0id != 0) {
if ((slice =
& 0x20) {
}
}
}
/*
* Reprogram slot 1 lpa's as required.
* The purpose of this routine is maintain the LPA settings of the devices
* in slot 1. To date we know Schizo and Cheetah are the only devices that
* require this attention. The LPA setting must match the slice field in the
* CASM element for the local expander. This field is guaranteed to be
* programmed in accordance with the cacheable address space on the slot 0
* board of the local expander. If no memory is present on the slot 0 board,
* there is no cacheable address space and, hence, the CASM slice field will
* be zero or its valid bit will be false (or both).
*/
static void
{
uint64_t last_scsr_pa = 0;
DRMACH_PR("drmach...lpa_set: slot1=%d not present",
return;
}
} else {
/* nothing to do when board is not found or has no devices */
DRMACH_PR("drmach...lpa_set: slot1=%d not present",
return;
}
}
DRMACH_PR("drmach_...lpa_set: bnum=%d base=0x%lx bound=0x%lx\n",
while (rv == 0) {
if (DRMACH_IS_IO_ID(id)) {
is_maxcat = 0;
/*
* Skip all non-Schizo IO devices (only IO nodes
* that are Schizo devices have non-zero scsr_pa).
* Filter out "other" leaf to avoid writing to the
*/
DRMACH_PR("drmach...lpa_set: old scsr=0x%lx\n",
scsr);
scsr &= ~(DRMACH_LPA_BASE_MASK |
DRMACH_PR("drmach...lpa_set: new scsr=0x%lx\n",
scsr);
}
}
}
extern xcfunc_t drmach_set_lpa;
DRMACH_PR("reprogramming maxcat lpa's");
if (DRMACH_IS_CPU_ID(id)) {
int ntries;
/*
* Check for unconfigured or powered-off
* MCPUs. If CPU_READY flag is clear, the
* MCPU cannot be xcalled.
*/
CPU_READY) == 0) {
continue;
}
/*
* XXX CHEETAH SUPPORT
* for cheetah, we need to clear iocage
* memory since it will be used for e$ flush
* in drmach_set_lpa.
*/
if (drmach_is_cheetah) {
while (drmach_iocage_is_busy)
ecache_size * 2);
}
/*
* drmach_slice_table[*]
* bit 5 valid
* bit 0:4 slice number
*
* drmach_xt_mb[*] format for drmach_set_lpa
* bit 7 valid
* bit 6 set null LPA
* (overrides bits 0:4)
* bit 0:4 slice number
*
* drmach_set_lpa derives processor CBASE and
* CBND from bits 6 and 0:4 of drmach_xt_mb.
* If bit 6 is set, then CBASE = CBND = 0.
* Otherwise, CBASE = slice number;
* CBND = slice number + 1.
* No action is taken if bit 7 is zero.
*/
bzero((void *)drmach_xt_mb,
if (new_basepa == 0 && new_boundpa == 0)
else
drmach_xt_ready = 0;
while (!drmach_xt_ready && ntries) {
ntries--;
}
drmach_xt_ready = 0;
/*
* XXX CHEETAH SUPPORT
* for cheetah, we need to clear iocage
* memory since it was used for e$ flush
* in performed drmach_set_lpa.
*/
if (drmach_is_cheetah) {
ecache_size * 2);
}
}
}
}
}
/*
* Return the number of connected Panther boards in the domain.
*/
static int
drmach_panther_boards(void)
{
int rv;
int b_idx;
int npanther = 0;
while (rv == 0) {
npanther++;
}
return (npanther);
}
/*ARGSUSED*/
{
if (!DRMACH_IS_BOARD_ID(id))
/*
* Build the casm info portion of the UNCLAIM message.
* This must be done prior to calling for saf configurator
* deprobe, to ensure that the associated axq instance
* is not detached.
*/
/*
* If disconnecting slot 0 board, update the casm slice table
* info now, for use by drmach_slot1_lpa_set()
*/
/*
* Update LPA information for slot1 board
*/
/* disable and flush CDC */
if (axq_cdc_disable_flush_all() != DDI_SUCCESS) {
axq_cdc_enable_all(); /* paranoia */
err = DRMACH_INTERNAL_ERROR();
}
/*
* call saf configurator for deprobe
* It's done now before sending an UNCLAIM message because
* IKP will probe boards it doesn't know about <present at boot>
* prior to unprobing them. If this happens after sending the
* UNCLAIM, it will cause a dstop for domain transgression error.
*/
if (!err) {
}
}
/*
* If disconnecting a board from a Panther domain, wait a fixed-
* time delay for pending Safari transactions to complete on the
* disconnecting board's processors. The bus sync list read used
* in drmach_shutdown_asm to synchronize with outstanding Safari
* transactions assumes no read-bypass-write mode for all memory
* controllers. Since Panther supports read-bypass-write, a
* delay is used that is slightly larger than the maximum Safari
*/
if (drmach_panther_boards() > 0 || drmach_unclaim_delay_all) {
DRMACH_PR("delayed %ld ticks (%ld secs) before disconnecting "
}
if (!err) {
if (err) {
/*
* if mailbox timeout or unrecoverable error from SC,
* board cannot be touched. Mark the status as
* unusable.
*/
else {
DRMACH_PR("UNCLAIM failed for bnum=%d\n",
DRMACH_PR("calling sc_probe_board: bnum=%d\n",
"sc_probe_board failed for bnum=%d",
} else {
0);
}
}
}
} else {
/*
* Now that the board has been successfully detached,
* discard platform-specific DIMM serial id information
* for the board.
*/
(void) plat_discard_mem_sids(
}
}
}
return (err);
}
static int
{
int portid;
char type[OBP_MAXPROPNAME];
return (portid);
/*
* Get the device_type property to see if we should
* continue processing this node.
*/
return (-1);
/*
* If the device is a CPU without a 'portid' property,
* it is a CMP core. For such cases, the parent node
* has the portid.
*/
return (-1);
return (portid);
}
return (-1);
}
/*
* This is a helper function to determine if a given
* node should be considered for a dr operation according
* to predefined dr type nodes and the node's name.
* Formal Parameter : The name of a device node.
* Return Value: -1, name does not map to a valid dr type.
* A value greater or equal to 0, name is a valid dr type.
*/
static int
drmach_name2type_idx(char *name)
{
return (-1);
/*
* Determine how many possible types are currently supported
* for dr.
*/
/* Determine if the node's name correspond to a predefined type. */
/* The node is an allowed type for dr. */
return (index);
}
/*
* If the name of the node does not map to any of the
* types in the array drmach_name2type then the node is not of
* interest to dr.
*/
return (-1);
}
static int
{
char name[OBP_MAXDRVNAME];
if (portid == -1) {
/*
* if the node does not have a portid property, then
* by that information alone it is known that drmach
* is not interested in it.
*/
return (0);
}
/* The node must have a name */
if (rv)
return (0);
/*
* Ignore devices whose portid do not map to this board,
* or that their name property is not mapped to a valid
* dr device name.
*/
(drmach_name2type_idx(name) < 0))
return (0);
/*
* Create a device data structure from this node data.
* The call may yield nothing if the node is not of interest
* to drmach.
*/
return (-1);
else if (!id) {
/*
* drmach_device_new examined the node we passed in
* and determined that it was either one not of
* interest to drmach or the PIM dr layer.
* So, it is skipped.
*/
return (0);
}
if (rv) {
return (-1);
}
#ifdef DEBUG
if (DRMACH_IS_IO_ID(id))
#endif
}
{
int max_devices;
int rv;
if (!DRMACH_IS_BOARD_ID(id))
data.a = a;
if (rv == 0) {
} else {
else
err = DRMACH_INTERNAL_ERROR();
}
return (err);
}
int
{
int rv = 0;
*id = 0;
return (-1);
}
*id = 0;
rv = -1;
} else {
if (bp)
sizeof (dr_showboard_t));
if (err) {
*id = 0;
rv = -1;
}
sbd_err_clear(&err);
} else {
if (!bp)
switch (shb.test_status) {
case DR_TEST_STATUS_UNKNOWN:
case DR_TEST_STATUS_IPOST:
case DR_TEST_STATUS_ABORTED:
break;
case DR_TEST_STATUS_PASSED:
break;
case DR_TEST_STATUS_FAILED:
break;
default:
DRMACH_PR("Unknown test status=0x%x from SC\n",
break;
}
}
}
return (rv);
}
{
return (NULL);
}
{
if (!DRMACH_IS_BOARD_ID(id))
if (!err) {
else {
if (!err)
}
}
return (err);
}
{
if (!DRMACH_IS_BOARD_ID(id))
if (!err)
return (err);
}
static sbd_error_t *
{
if (!DRMACH_IS_BOARD_ID(id))
return (NULL);
}
{
int cpylen;
char *copts;
int is_io;
if (!DRMACH_IS_BOARD_ID(id))
/*
* If the board is an I/O or MAXCAT board, setup I/O cage for
* testing. Slot 1 indicates I/O or MAXCAT board.
*/
if (force)
}
if (is_io) {
if (err) {
return (err);
}
}
if (!err)
else
/* examine test status */
switch (tbr.test_status) {
case DR_TEST_STATUS_IPOST:
NULL);
break;
case DR_TEST_STATUS_UNKNOWN:
break;
case DR_TEST_STATUS_FAILED:
NULL);
break;
case DR_TEST_STATUS_ABORTED:
NULL);
break;
default:
NULL);
break;
}
}
/*
* If I/O cage test was performed, check for availability of the
* cpu used. If cpu has been returned, it's OK to proceed with
* reconfiguring it for use.
*/
if (is_io) {
DRMACH_PR("drmach_board_test: port id: %d",
/*
* Check the cpu_recovered flag in the testboard reply, or
* if the testboard request message was not sent to SMS due
* to an mboxsc_putmsg() failure, it's OK to recover the
* cpu since hpost hasn't touched it.
*/
int i;
for (i = 0; i < MAX_CORES_PER_CMP; i++) {
(void) drmach_iocage_cpu_return(dp[i],
oflags[i]);
}
}
} else {
"after I/O cage test: cpu_recovered=%d, "
"returned portid=%d",
}
}
return (err);
}
{
if (!DRMACH_IS_BOARD_ID(id)) {
}
if (err) {
return (err);
}
} else {
if (!err) {
err = DRMACH_INTERNAL_ERROR();
else
}
}
return (err);
}
static sbd_error_t *
{
int len;
/*
* If the node does not have a portid property,
* it represents a CMP device. For a CMP, the reg
* property of the parent holds the information of
* interest.
*/
return (DRMACH_INTERNAL_ERROR());
}
}
return (DRMACH_INTERNAL_ERROR());
return (DRMACH_INTERNAL_ERROR());
return (DRMACH_INTERNAL_ERROR());
/* reassemble 64-bit base address */
return (NULL);
}
static void
{
*saf_config_reg = lddsafconfig();
*reg_read = 0x1;
}
/*
* A return value of 1 indicates success and 0 indicates a failure
*/
static int
{
int rv = 0x0;
*scr = 0x0;
/*
* Confirm cpu was in ready set when xc was issued.
* This is done by verifying rv which is
* set to 0x1 when xc_one is successful.
*/
return (rv);
}
static sbd_error_t *
{
/*
* If a CPU does not have a portid property, it must
* be a CMP device with a cpuid property.
*/
return (DRMACH_INTERNAL_ERROR());
}
}
return (NULL);
}
/* Starcat CMP core id is bit 2 of the cpuid */
#define DRMACH_CPUID2SRAM_IDX(id) \
static sbd_error_t *
{
static void drmach_cpu_dispose(drmachid_t);
int idx;
int impl;
if (err) {
goto fail;
}
if (err) {
goto fail;
}
if (err) {
goto fail;
}
/*
* Init the board cpu type. Assumes all board cpus are the same type.
*/
}
/*
* XXX CHEETAH SUPPORT
* determine if the domain uses Cheetah procs
*/
if (drmach_is_cheetah < 0) {
}
/*
* Initialize TTE for mapping CPU SRAM STARDRB buffer.
* The STARDRB buffer (16KB on Cheetah+ boards, 32KB on
* pair. Each cpu uses 8KB according to the following layout:
*
*/
DRMACH_PR("drmach_cpu_new: cpuid=%d, coreid=%d, stardrb_offset=0x%lx, "
return (NULL);
fail:
if (cp) {
}
*idp = (drmachid_t)0;
return (err);
}
static void
{
}
static int
{
extern xcfunc_t drmach_set_lpa;
extern void restart_other_cpu(int);
/*
* NOTE: restart_other_cpu pauses cpus during the
* slave cpu start. This helps to quiesce the
* bus traffic a bit which makes the tick sync
* routine in the prom more robust.
*/
if (prom_hotaddcpu(cpuid) != 0) {
cpuid);
}
DRMACH_PR("drmach_cpu_start: cannot read board info for "
int exp;
int ntries;
/*
* drmach_slice_table[*]
* bit 5 valid
* bit 0:4 slice number
*
* drmach_xt_mb[*] format for drmach_set_lpa
* bit 7 valid
* bit 6 set null LPA (overrides bits 0:4)
* bit 0:4 slice number
*
* drmach_set_lpa derives processor CBASE and CBND
* from bits 6 and 0:4 of drmach_xt_mb. If bit 6 is
* set, then CBASE = CBND = 0. Otherwise, CBASE = slice
* number; CBND = slice number + 1.
* No action is taken if bit 7 is zero.
*/
} else {
}
drmach_xt_ready = 0;
while (!drmach_xt_ready && ntries) {
ntries--;
}
"waited %d out of %d tries for drmach_set_lpa on cpu%d",
}
return (0);
}
/*
* A detaching CPU is xcalled with an xtrap to drmach_cpu_stop_self() after
* it has been offlined. The function of this routine is to get the cpu
* spinning in a safe place. The requirement is that the system will not
* reference anything on the detaching board (memory and i/o is detached
* elsewhere) and that the CPU not reference anything on any other board
* in the system. This isolation is required during and after the writes
* to the domain masks to remove the board from the domain.
*
* To accomplish this isolation the following is done:
* 1) Create a locked mapping to the STARDRB data buffer located
* in this cpu's sram. There is one TTE per cpu, initialized in
* drmach_cpu_new(). The cpuid is used to select which TTE to use.
* board. The STARDRB buffer is 16KB on Cheetah+ boards, 32KB on Jaguar
* boards. Each STARDRB buffer is logically divided by DR into one
* 8KB page per cpu (or Jaguar core).
* 2) Copy the target function (drmach_shutdown_asm) into buffer.
* 3) Jump to function now in the cpu sram.
* Function will:
* 3.1) Flush its Ecache (displacement).
* 3.2) Flush its Dcache with HW mechanism.
* 3.3) Flush its Icache with HW mechanism.
* 3.4) Flush all valid and _unlocked_ D-TLB and I-TLB entries.
* 3.5) Set LPA to NULL
* 3.6) Clear xt_mb to signal completion. Note: cache line is
* recovered by drmach_cpu_poweroff().
* 4) Jump into an infinite loop.
*/
static void
drmach_cpu_stop_self(void)
{
extern void drmach_shutdown_asm(
extern void drmach_shutdown_asm_end(void);
uint_t *p, *q;
/* copy text. standard bcopy not designed to work in nc space */
p = (uint_t *)drmach_cpu_sram_va;
q = (uint_t *)drmach_shutdown_asm;
while (q < (uint_t *)drmach_shutdown_asm_end)
*p++ = *q++;
/* zero to assist debug */
while (p < q)
*p++ = 0;
/* a parking spot for the stack pointer */
stack_pointer = (uint64_t)q;
/* call copy of drmach_shutdown_asm */
(*(void (*)())drmach_cpu_sram_va)(
}
static void
drmach_cpu_shutdown_self(void)
{
extern void flush_windows(void);
(void) spl8();
}
static sbd_error_t *
{
if (!DRMACH_IS_CPU_ID(id))
err = DRMACH_INTERNAL_ERROR();
else
return (err);
}
static sbd_error_t *
{
return (NULL);
}
{
if (!DRMACH_IS_CPU_ID(id))
return (NULL);
}
{
if (!DRMACH_IS_CPU_ID(id))
return (NULL);
}
{
int impl;
if (!DRMACH_IS_CPU_ID(id))
return (DRMACH_INTERNAL_ERROR());
}
return (NULL);
}
/*
* Flush this cpu's ecache, then ensure all outstanding safari
* transactions have retired.
*/
void
{
uint64_t *p;
for (p = drmach_bus_sync_list; *p; p++)
(void) ldphys(*p);
}
{
if (!DRMACH_IS_DEVICE_ID(id))
return (NULL);
}
{
int state;
if (!DRMACH_IS_IO_ID(id))
*yes = 0;
return (NULL);
}
return (NULL);
}
static int
{
char dtype[OBP_MAXPROPNAME];
int portid;
return (0);
/*
* safari IDs end in 0x1C.
*/
"portid", &len);
if ((rv != DDI_PROP_SUCCESS) ||
return (0);
if (rv != DDI_PROP_SUCCESS)
return (0);
return (0);
&len) == DDI_PROP_SUCCESS) {
/*
* All PCI B-Leafs are at configspace 0x70.0000.
*/
if (pci_csr_base == 0x700000)
return (1);
}
}
}
return (0);
}
#define SCHIZO_BINDING_NAME "pci108e,8001"
#define XMITS_BINDING_NAME "pci108e,8002"
/*
* Verify if the dip is an instance of MAN 'eri'.
*/
static int
{
char *name;
int len;
return (0);
/*
* Verify if the parent is schizo(xmits)0 and pci B leaf.
*/
return (0);
/*
* This RIO could be on XMITS, so get the dip to
* XMITS PCI Leaf.
*/
return (0);
return (0);
}
}
return (0);
/*
* Finally make sure it is the MAN eri.
*/
/*
* The network function of the RIO ASIC will always be
* device 3 and function 1 ("network@3,1").
*/
return (1);
}
return (0);
}
typedef struct {
int iosram_inst;
int bnum;
int
{
int rv;
int len;
int portid;
char name[OBP_MAXDRVNAME];
return (DDI_WALK_CONTINUE);
}
if (rv != DDI_PROP_SUCCESS)
return (DDI_WALK_CONTINUE);
/* ignore devices that are not on this board */
return (DDI_WALK_CONTINUE);
"name", &len);
if (rv == DDI_PROP_SUCCESS) {
0, "name",
if (rv != DDI_PROP_SUCCESS)
return (DDI_WALK_CONTINUE);
return (DDI_WALK_CONTINUE);
else
return (DDI_WALK_TERMINATE);
} else {
if (drmach_dip_is_man_eri(dip)) {
if (ios->iosram_inst < 0)
return (DDI_WALK_CONTINUE);
else
return (DDI_WALK_TERMINATE);
}
}
}
}
return (DDI_WALK_CONTINUE);
}
{
int rv = 0;
int circ;
if (!DRMACH_IS_IO_ID(id))
/* walk device tree to find iosram instance for the board */
(void *)&ios);
DRMACH_PR("drmach_io_pre_release: bnum=%d iosram=%d eri=0x%p\n",
/*
* Release hold acquired in drmach_board_find_io_insts()
*/
}
if (ios.iosram_inst >= 0) {
/* call for tunnel switch */
do {
DRMACH_PR("calling iosram_switchfrom(%d)\n",
if (rv)
DRMACH_PR("iosram_switchfrom returned %d\n",
rv);
if (rv)
}
return (err);
}
{
if (!DRMACH_IS_IO_ID(id))
err = DRMACH_INTERNAL_ERROR();
else {
0);
if (func) {
int circ;
/*
* Walk device tree to find rio dip for the board
* Since we are not interested in iosram instance here,
* initialize it to 0, so that the walk terminates as
* soon as eri dip is found.
*/
ios.iosram_inst = 0;
}
/*
* Root node doesn't have to be held in any way.
*/
drmach_board_find_io_insts, (void *)&ios);
if (pdip) {
}
DRMACH_PR("drmach_io_unrelease: bnum=%d eri=0x%p\n",
DRMACH_PR("calling man_dr_attach\n");
/*
* Release hold acquired in
* drmach_board_find_io_insts()
*/
}
} else
DRMACH_PR("man_dr_attach NOT present\n");
}
return (err);
}
static sbd_error_t *
{
if (!DRMACH_IS_IO_ID(id))
err = DRMACH_INTERNAL_ERROR();
else {
0);
if (func) {
int circ;
/*
* Walk device tree to find rio dip for the board
* Since we are not interested in iosram instance here,
* initialize it to 0, so that the walk terminates as
* soon as eri dip is found.
*/
ios.iosram_inst = 0;
}
/*
* Root node doesn't have to be held in any way.
*/
drmach_board_find_io_insts, (void *)&ios);
if (pdip) {
}
DRMACH_PR("drmach_io_release: bnum=%d eri=0x%p\n",
DRMACH_PR("calling man_dr_detach\n");
/*
* Release hold acquired in
* drmach_board_find_io_insts()
*/
}
} else
DRMACH_PR("man_dr_detach NOT present\n");
}
return (err);
}
{
char *path;
if (!DRMACH_IS_DEVICE_ID(id))
/*
* Always called after drmach_unconfigure() which on Starcat
* unconfigures the branch but doesn't remove it so the
* dip must always exist.
*/
#ifdef DEBUG
#endif
if (schpc_remove_pci(rdip)) {
DRMACH_PR("schpc_remove_pci failed\n");
} else {
DRMACH_PR("schpc_remove_pci succeeded\n");
}
}
return (NULL);
}
{
int circ;
if (!DRMACH_IS_DEVICE_ID(id))
/*
* We held the branch rooted at dip earlier, so at a minimum the
* root i.e. dip must be present in the device tree.
*/
if (schpc_add_pci(dip)) {
DRMACH_PR("schpc_add_pci failed\n");
} else {
DRMACH_PR("schpc_add_pci succeeded\n");
}
}
/*
* Walk device tree to find rio dip for the board
* Since we are not interested in iosram instance here,
* initialize it to 0, so that the walk terminates as
* soon as eri dip is found.
*/
ios.iosram_inst = 0;
}
/*
* Root node doesn't have to be held in any way.
*/
(void *)&ios);
if (pdip) {
}
DRMACH_PR("drmach_io_post_attach: bnum=%d eri=0x%p\n",
func =
if (func) {
DRMACH_PR("calling man_dr_attach\n");
} else {
DRMACH_PR("man_dr_attach NOT present\n");
}
/*
* Release hold acquired in drmach_board_find_io_insts()
*/
}
return (NULL);
}
static sbd_error_t *
{
int configured;
if (err)
return (err);
return (NULL);
}
{
if (!DRMACH_IS_MEM_ID(id))
if (err)
return (err);
gdcd = drmach_gdcd_new();
return (DRMACH_INTERNAL_ERROR());
sz = 0;
while (chunks-- != 0) {
}
++chunk;
}
return (NULL);
}
/*
* Hardware registers are organized into consecutively
* addressed registers. The reg property's hi and lo fields
* together describe the base address of the register set for
* this memory-controller. Register descriptions and offsets
* (from the base address) are as follows:
*
* Description Offset Size (bytes)
* Memory Timing Control Register I 0x00 8
* Memory Timing Control Register II 0x08 8
* Memory Address Decoding Register I 0x10 8
* Memory Address Decoding Register II 0x18 8
* Memory Address Decoding Register III 0x20 8
* Memory Address Decoding Register IV 0x28 8
* Memory Address Control Register 0x30 8
* Memory Timing Control Register III 0x38 8
* Memory Timing Control Register IV 0x40 8
* Memory Timing Control Register V 0x48 8 (Jaguar, Panther only)
* EMU Activity Status Register 0x50 8 (Panther only)
*
* Only the Memory Address Decoding Register and EMU Activity Status
* Register addresses are needed for DRMACH.
*/
static sbd_error_t *
{
static void drmach_mem_dispose(drmachid_t);
if (err)
return (err);
if (madr & DRMACH_MC_VALID_MASK) {
count += 1;
break;
}
}
/*
* If none of the banks had their valid bit set, that means
* post did not configure this MC to participate in the
* domain. So, pretend this node does not exist by returning
* a drmachid of zero.
*/
if (count == 0) {
/* drmach_mem_dispose frees board mem list */
*idp = (drmachid_t)0;
return (NULL);
}
/*
* Only one mem unit per board is exposed to the
* PIM layer. The first mem unit encountered during
* tree walk is used to represent all mem units on
* the same board.
*/
/* start list of mem units on this board */
/*
* force unum to zero since this is the only mem unit
* that will be visible to the PIM layer.
*/
/*
* board memory size kept in this mem unit only
*/
if (err) {
/* drmach_mem_dispose frees board mem list */
*idp = (drmachid_t)0;
return (NULL);
}
/*
* allow this instance (the first encountered on this board)
* to be visible to the PIM layer.
*/
} else {
/* hide this mem instance behind the first. */
;
/*
* hide this instance from the caller.
* See drmach_board_find_devices_cb() for details.
*/
*idp = (drmachid_t)0;
}
return (NULL);
}
static void
{
do {
} while (mp);
}
{
int rv;
if (!DRMACH_IS_MEM_ID(id))
} else if (rv != 0) {
/* catch this in debug kernels */
ASSERT(0);
" return value %d", rv);
}
return (NULL);
}
{
int rv;
if (!DRMACH_IS_MEM_ID(id))
if (size > 0) {
if (rv != 0) {
"unexpected kcage_range_delete_post_mem_del"
" return value %d", rv);
return (DRMACH_INTERNAL_ERROR());
}
}
return (NULL);
}
{
if (!DRMACH_IS_MEM_ID(id))
else
return (NULL);
}
{
if (!DRMACH_IS_MEM_ID(id))
else
return (NULL);
}
{
static struct {
} uk2segsz[] = {
};
uint64_t largest_sz = 0;
if (!DRMACH_IS_MEM_ID(id))
/* prime the result with a default value */
int bank;
int i;
/* get register value, extract uk and normalize */
if (!(madr & DRMACH_MC_VALID_MASK))
continue;
/* match uk value */
for (i = 0; i < len; i++)
break;
if (i < len) {
/*
* remember largest segment size,
* update mask result
*/
if (sz > largest_sz) {
largest_sz = sz;
}
} else {
/*
* uk not in table, punt using
* entire slice size. no longer any
* reason to check other banks.
*/
return (NULL);
}
}
}
return (NULL);
}
{
if (!DRMACH_IS_MEM_ID(id))
int bank;
if (madr & DRMACH_MC_VALID_MASK) {
}
}
}
/* should not happen, but ... */
return (DRMACH_INTERNAL_ERROR());
return (NULL);
}
void
{
while (rv == 0) {
while (mp) {
int bank;
if (madr & DRMACH_MC_VALID_MASK) {
/*
* The list is zero terminated.
* Offset the pa by a doubleword
* to avoid confusing a pa value of
* of zero with the terminator.
*/
}
}
}
}
drmach_bus_sync_list[cnt] = 0;
}
{
if (err)
return (err);
gdcd = drmach_gdcd_new();
return (DRMACH_INTERNAL_ERROR());
while (chunks-- != 0) {
}
++chunk;
}
#ifdef DEBUG
DRMACH_PR("GDCD derived memlist:");
#endif
return (NULL);
}
{
if (!DRMACH_IS_MEM_ID(id))
return (NULL);
}
{
if (!DRMACH_IS_MEM_ID(id))
case 0: *bytes = DRMACH_MEM_USABLE_SLICE_SIZE;
break;
case 1: *bytes = 0;
break;
default:
err = DRMACH_INTERNAL_ERROR();
break;
}
return (err);
}
{
if (!DRMACH_IS_MEM_ID(id))
return (CPU_CURRENT);
if (drmach_mem_cpu_affinity_nail) {
return (CPU_CURRENT);
cpuid = CPU_CURRENT;
return (cpuid);
}
/* try to choose a proc on the target board */
int rv;
int d_idx;
while (rv == 0) {
if (DRMACH_IS_CPU_ID(d_id)) {
return (cpuid);
} else {
}
}
}
}
/* otherwise, this proc, wherever it is */
return (CPU_CURRENT);
}
static sbd_error_t *
{
if (!DRMACH_IS_MEM_ID(id))
return (NULL);
}
static sbd_error_t *
{
/* get starting physical address of target memory */
if (err)
return (err);
/* round down to slice boundary */
/* stop at first span that is in slice */
break;
return (NULL);
}
{
if (!DRMACH_IS_BOARD_ID(id))
}
}
return (err);
}
/*ARGSUSED1*/
static sbd_error_t *
{
int err = 1;
if (DRMACH_IS_CPU_ID(id)) {
err = 0;
err = 0;
}
if (err)
uprintf("showlpa %s::%s portid %d, base pa %lx, bound pa %lx\n",
return (NULL);
}
/*ARGSUSED*/
static sbd_error_t *
{
if (!DRMACH_IS_BOARD_ID(id))
/* do saf configurator stuff */
return (err);
}
return (err);
}
/*ARGSUSED*/
static sbd_error_t *
{
if (!DRMACH_IS_BOARD_ID(id))
}
return (err);
}
static sbd_error_t *
{
/* copy 32 bytes at src_pa to dst_pa */
/* increment by 32 bytes */
/* decrement by 32 bytes */
}
}
return (NULL);
}
static sbd_error_t *
{
if (!DRMACH_IS_CPU_ID(id))
return (NULL);
}
/*
* Starcat DR passthrus are for debugging purposes only.
*/
static struct {
const char *name;
} drmach_pt_arr[] = {
{ "showlpa", drmach_pt_showlpa },
{ "ikprobe", drmach_pt_ikprobe },
{ "ikdeprobe", drmach_pt_ikdeprobe },
{ "readmem", drmach_pt_readmem },
{ "recovercpu", drmach_pt_recovercpu },
/* the following line must always be last */
};
/*ARGSUSED*/
{
int i;
i = 0;
break;
i += 1;
}
else
return (err);
}
{
if (!DRMACH_IS_DEVICE_ID(id))
}
{
if (!DRMACH_IS_ID(id)) {
}
return (err);
}
static sbd_error_t *
{
if (!DRMACH_IS_ID(id))
}
/*ARGSUSED*/
{
char name[OBP_MAXDRVNAME];
int rv;
/*
* Since CPU nodes are not configured, it is
* necessary to skip the unconfigure step as
* well.
*/
if (DRMACH_IS_CPU_ID(id)) {
return (NULL);
}
for (; id; ) {
if (!DRMACH_IS_DEVICE_ID(id))
/*
* drmach_unconfigure() is always called on a configured branch.
* So the root of the branch was held earlier and must exist.
*/
DRMACH_PR("drmach_unconfigure: unconfiguring DDI branch");
/* The node must have a name */
if (rv)
return (0);
if (drmach_name2type_idx(name) < 0) {
if (DRMACH_IS_MEM_ID(id)) {
} else {
}
continue;
}
/*
* NOTE: FORCE flag is no longer needed under devfs
*/
KM_SLEEP);
/*
* If non-NULL, fdip is returned held and must be
* released.
*/
} else {
}
/*
* If we were unconfiguring an IO board, a call was
* made to man_dr_detach. We now need to call
* man_dr_attach to regain man use of the eri.
*/
if (DRMACH_IS_IO_ID(id)) {
("man_dr_attach", 0);
if (func) {
int circ;
/*
* Walk device tree to find rio dip for
* the board
* Since we are not interested in iosram
* instance here, initialize it to 0, so
* that the walk terminates as soon as
* eri dip is found.
*/
ios.iosram_inst = 0;
}
/*
* Root node doesn't have to be held in
* any way.
*/
(void *)&ios);
DRMACH_PR("drmach_unconfigure: bnum=%d"
" eri=0x%p\n",
if (pdip) {
}
DRMACH_PR("calling"
" man_dr_attach\n");
/*
* Release hold acquired in
* drmach_board_find_io_insts()
*/
}
}
}
return (err);
}
if (DRMACH_IS_MEM_ID(id)) {
} else {
}
}
return (NULL);
}
/*
* drmach interfaces to legacy Starfire platmod logic
* linkage via runtime symbol look up, called from plat_cpu_power*
*/
/*
* Start up a cpu. It is possible that we're attempting to restart
* the cpu after an UNCONFIGURE in which case the cpu will be
* spinning in its cache. So, all we have to do is wakeup him up.
* Under normal circumstances the cpu will be coming from a previous
* CONNECT and thus will be spinning in OBP. In both cases, the
* startup sequence is the same.
*/
int
{
if (drmach_cpu_start(cp) != 0)
return (EBUSY);
else
return (0);
}
int
{
int ntries;
void drmach_cpu_shutdown_self(void);
/*
* XXX CHEETAH SUPPORT
* for cheetah, we need to grab the iocage lock since iocage
* memory is used for e$ flush.
*/
if (drmach_is_cheetah) {
while (drmach_iocage_is_busy)
}
/*
* Set affinity to ensure consistent reading and writing of
* drmach_xt_mb[cpuid] by one "master" CPU directing
* the shutdown of the target CPU.
*/
/*
* Capture all CPUs (except for detaching proc) to prevent
* crosscalls to the detaching proc until it has cleared its
* bit in cpu_ready_set.
*
* The CPUs remain paused and the prom_mutex is known to be free.
* This prevents blocking when doing prom IEEE-1275 calls at a
* high PIL level.
*/
/*
* Quiesce interrupts on the target CPU. We do this by setting
* the CPU 'not ready'- (i.e. removing the CPU from cpu_ready_set) to
* prevent it from receiving cross calls and cross traps.
* This prevents the processor from receiving any new soft interrupts.
*/
start_cpus();
/* setup xt_mb, will be cleared by drmach_shutdown_asm when ready */
ntries--;
}
membar_sync(); /* make sure copy-back retires */
/*
* XXX CHEETAH SUPPORT
*/
if (drmach_is_cheetah) {
}
DRMACH_PR("waited %d out of %d tries for "
"drmach_cpu_shutdown_self on cpu%d",
/*
* Do this here instead of drmach_cpu_shutdown_self() to
* avoid an assertion failure panic in turnstile.c.
*/
return (0);
}
void
{
extern int drmach_bc_bzero(void*, size_t);
int rv;
if (rv != 0) {
"iocage scrub failed, drmach_bc_bzero returned %d\n", rv);
if (rv != 0)
"iocage scrub failed, drmach_bc_bzero rv=%d\n",
rv);
}
}
static sbd_error_t *
{
/*
* HPOST wants the address of the cage to be 64 megabyte-aligned
* and in megabyte units.
* The size of the cage is also in megabyte units.
*/
return (NULL);
}
static sbd_error_t *
{
return (NULL);
}
static int
{
if (cpu_intr_disable(cp) != 0)
return (-1);
return (0);
}
static int
{
static char *fn = "drmach_iocage_cpu_acquire";
int impl;
return (-1);
return (-1);
}
if (!CPU_ACTIVE(cp)) {
return (-1);
}
/*
* can fail to receive an XIR. To workaround this issue until a hardware
* fix is implemented, we will exclude the selection of these CPUs.
*
* Once a fix is implemented in hardware, this code should be updated
* to allow Jaguar CPUs that have the fix to be used. However, support
* must be retained to skip revisions that do not have this fix.
*/
if (err) {
sbd_err_clear(&err);
return (-1);
}
DRMACH_PR("%s: excluding CPU id %d: port 0 on jaguar",
return (-1);
}
if (cpu_offline(cp, 0)) {
return (-1);
}
if (cpu_poweroff(cp)) {
if (cpu_online(cp)) {
"during I/O cage test selection", cpuid);
}
drmach_cpu_intr_disable(cp) != 0) {
"no-intr during I/O cage test selection", cpuid);
}
return (-1);
}
if (cpu_unconfigure(cpuid)) {
cpuid);
(void) cpu_configure(cpuid);
"during I/O cage test selection", cpuid);
return (-1);
}
"during I/O cage test selection",
}
drmach_cpu_intr_disable(cp) != 0) {
"no-intr during I/O cage test selection", cpuid);
}
return (-1);
}
return (0);
}
/*
* Attempt to acquire all the CPU devices passed in. It is
* assumed that all the devices in the list are the cores of
* a single CMP device. Non CMP devices can be handled as a
* single core CMP by passing in a one element list.
*
* Success is only returned if *all* the devices in the list
* can be acquired. In the failure case, none of the devices
* in the list will be held as acquired.
*/
static int
{
int curr;
int i;
int rv = 0;
/*
* Walk the list of CPU devices (cores of a CMP)
* and attempt to acquire them. Bail out if an
* error is encountered.
*/
/* check for the end of the list */
break;
}
if (rv != 0) {
break;
}
}
/*
* Check for an error.
*/
if (rv != 0) {
/*
* Make a best effort attempt to return any cores
* that were already acquired before the error was
* encountered.
*/
for (i = 0; i < curr; i++) {
}
}
return (rv);
}
static int
{
int rv = 0;
static char *fn = "drmach_iocage_cpu_return";
if (cpu_configure(cpuid)) {
"after I/O cage test", cpuid);
/*
* The component was never set to unconfigured during the IO
* cage test, so we need to leave marked as busy to prevent
* further DR operations involving this component.
*/
return (-1);
}
"I/O cage test", cpuid);
return (-1);
}
rv = -1;
}
/*
* drmach_iocage_cpu_acquire will accept cpus in state P_ONLINE or
* P_NOINTR. Need to return to previous user-visible state.
*/
drmach_cpu_intr_disable(cp) != 0) {
"no-intr after I/O cage test", cpuid);
rv = -1;
}
return (rv);
}
static sbd_error_t *
{
int b_rv;
int b_idx;
int found;
found = 0;
/*
* Walk the board list.
*/
while (b_rv == 0) {
int d_rv;
int d_idx;
continue;
}
/* An AXQ restriction disqualifies MCPU's as candidates. */
continue;
}
/*
* Walk the device list of this board.
*/
while (d_rv == 0) {
/* only interested in CPU devices */
if (!DRMACH_IS_CPU_ID(d_id)) {
&d_id);
continue;
}
/*
* The following code assumes two properties
* of a CMP device:
*
* 1. All cores of a CMP are grouped together
* in the device list.
*
* 2. There will only be a maximum of two cores
* present in the CMP.
*
* If either of these two properties change,
* this code will have to be revisited.
*/
/*
* Get the next device. It may or may not be used.
*/
/*
* The second device is only interesting for
* this pass if it has the same portid as the
* first device. This implies that both are
* cores of the same CMP.
*/
}
}
/*
* Attempt to acquire all cores of the CMP.
*/
found = 1;
break;
}
/*
* Check if the search for the second core was
* successful. If not, the next iteration should
* use that device.
*/
continue;
}
}
if (found)
break;
}
if (!found) {
}
return (NULL);
}
/*
* Setup an iocage by acquiring a cpu and memory.
*/
static sbd_error_t *
{
if (!err) {
while (drmach_iocage_is_busy)
if (err) {
}
}
return (err);
}
#define DRMACH_SCHIZO_PCI_LEAF_MAX 2
#define DRMACH_SCHIZO_PCI_SLOT_MAX 8
#define DRMACH_S1P_SAMPLE_MAX 2
typedef enum {
DRMACH_POST_SUSPEND = 0,
typedef struct {
typedef struct {
struct {
typedef struct {
struct {
typedef struct {
/*
* Table of saved state for paused slot1 devices.
*/
static int drmach_slot1_pause_init = 1;
#ifdef DEBUG
int drmach_slot1_pause_debug = 1;
#else
int drmach_slot1_pause_debug = 0;
#endif /* DEBUG */
static int
{
return (0);
}
return (0);
}
for (i = 0; i < STARCAT_SLOT1_CPU_MAX; i++) {
/* maxcat cpu present */
return (0);
}
}
DRMACH_PR("drmach_is_slot1_pause_axq: no reg prop for "
"axq dip=%p\n", dip);
return (0);
}
return (1);
}
/*
* Allocate an entry in the slot1_paused state table.
*/
static void
{
int axq_exp;
strlen(DRMACH_AXQ_NAMEPROP)) == 0);
/*
* XXX This dip should really be held (via ndi_hold_devi())
* before saving it in the axq pause structure. However that
* would prevent DR as the pause data structures persist until
* the next suspend. drmach code should be modified to free the
* the slot 1 pause data structures for a boardset when its
* slot 1 board is DRed out. The dip can then be released via
* ndi_rele_devi() when the pause data structure is freed
* allowing DR to proceed. Until this change is made, drmach
* code should be careful about dereferencing the saved dip
* as it may no longer exist.
*/
}
static void
{
int i;
for (i = 0; i < DRMACH_S1P_SAMPLE_MAX; i++) {
}
}
}
static void
{
int i, j, k;
for (i = 0; i < STARCAT_BDSET_MAX; i++) {
continue;
for (j = 0; j < STARCAT_SLOT1_IO_MAX; j++)
for (k = 0; k < DRMACH_SCHIZO_PCI_LEAF_MAX; k++)
slot1_paused[i] = NULL;
}
}
/*
* Tree walk callback routine. If dip represents a Schizo PCI leaf,
* fill in the appropriate info in the slot1_paused state table.
*/
static int
{
char buf[OBP_MAXDRVNAME];
return (DDI_WALK_CONTINUE);
}
return (DDI_WALK_CONTINUE);
}
DRMACH_PR("drmach_find_slot1_io: no reg prop for pci "
"dip=%p\n", dip);
return (DDI_WALK_CONTINUE);
}
/*
* XXX This dip should really be held (via ndi_hold_devi())
* before saving it in the pci pause structure. However that
* would prevent DR as the pause data structures persist until
* the next suspend. drmach code should be modified to free the
* the slot 1 pause data structures for a boardset when its
* slot 1 board is DRed out. The dip can then be released via
* ndi_rele_devi() when the pause data structure is freed
* allowing DR to proceed. Until this change is made, drmach
* code should be careful about dereferencing the saved dip as
* it may no longer exist.
*/
DRMACH_PR("drmach_find_slot1_io: name=%s, portid=0x%x, dip=%p\n",
return (DDI_WALK_PRUNECHILD);
}
static void
{
/*
* Root node doesn't have to be held
*/
(void *)slot1_paused);
}
/*
* Save the interrupt mapping registers for each non-idle interrupt
* represented by the bit pairs in the saved interrupt state
* diagnostic registers for this PCI leaf.
*/
static void
{
char *dname;
/*
* 1st pass allocates, 2nd pass populates.
*/
for (i = 0; i < 2; i++) {
/*
* PCI slot interrupts
*/
while (reg) {
/*
* Xmits Interrupt Number Offset(ino) Assignments
* 00-17 PCI Slot Interrupts
* 18-1f Not Used
*/
break;
if ((reg & COMMON_CLEAR_INTR_REG_MASK) !=
if (i) {
}
++cnt;
}
++ino;
reg >>= 2;
}
/*
* Xmits Interrupt Number Offset(ino) Assignments
* 20-2f Not Used
* 30-37 Internal interrupts
* 38-3e Not Used
*/
/*
* OBIO and internal schizo interrupts
* Each PCI leaf has a set of mapping registers for all
* possible interrupt sources except the NewLink interrupts.
*/
if ((reg & COMMON_CLEAR_INTR_REG_MASK) !=
if (i) {
}
++cnt;
}
++ino;
reg >>= 2;
}
if (!i) {
}
}
}
static void
{
return;
if (iter == DRMACH_POST_SUSPEND) {
/*
* Select l2_io_queue counter by writing L2_IO_Q mux
* input to bits 0-6 of perf cntr select reg.
*/
reg &= ~AXQ_PIC_CLEAR_MASK;
}
if (iter == DRMACH_PRE_RESUME) {
axq->pcr_sel_save);
}
DRMACH_PR("drmach_s1p_axq_update: axq #%d pic_l2_io_q[%d]=%d\n",
}
static void
{
int i;
return;
for (i = 0; i < DRMACH_SCHIZO_PCI_LEAF_MAX; i++) {
}
}
}
/*
* Called post-suspend and pre-resume to snapshot the suspend state
* of slot1 AXQs and Schizos.
*/
static void
{
int i, j;
for (i = 0; i < STARCAT_BDSET_MAX; i++) {
continue;
for (j = 0; j < STARCAT_SLOT1_IO_MAX; j++)
}
}
/*
* Starcat hPCI Schizo devices.
*
* The name field is overloaded. NULL means the slot (interrupt concentrator
* bus) is not used. intr_mask is a bit mask representing the 4 possible
* interrupts per slot, on if valid (rio does not use interrupt lines 0, 1).
*/
static struct {
char *name;
/* Schizo 0 */ /* Schizo 1 */
{{"C3V0", 0xf}, {"C3V1", 0xf}}, /* slot 0 */
{{"C5V0", 0xf}, {"C5V1", 0xf}}, /* slot 1 */
};
/*
* See Schizo Specification, Revision 51 (May 23, 2001), Section 22.4.4
* "Interrupt Registers", Table 22-69, page 306.
*/
static char *
{
int intr;
switch (intr) {
case (0x0): return ("Uncorrectable ECC error");
case (0x1): return ("Correctable ECC error");
case (0x2): return ("PCI Bus A Error");
case (0x3): return ("PCI Bus B Error");
case (0x4): return ("Safari Bus Error");
default: return ("Reserved");
}
}
static void
{
char *slot_devname;
char namebuf[OBP_MAXDRVNAME];
if (!slot_valid) {
}
prom_printf("IO%d/P%d PCI slot interrupt: ino=0x%x, source device=%s, "
}
/*
* Log interrupt source device info for all valid, pending interrupts
* on each Schizo PCI leaf. Called if Schizo has logged a Safari bus
* error in the error ctrl reg.
*/
static void
{
int i, n, ino;
/*
* Check the saved interrupt mapping registers. If interrupt is valid,
* map the ino to the Schizo source device and check that the pci
* slot and interrupt line are valid.
*/
for (i = 0; i < DRMACH_SCHIZO_PCI_LEAF_MAX; i++) {
if (reg & COMMON_INTR_MAP_REG_VALID) {
if (ino <= 0x1f) {
/*
* PCI slot interrupt
*/
} else if (ino <= 0x2f) {
/*
* OBIO interrupt
*/
prom_printf("IO%d/P%d OBIO interrupt: "
} else if (ino <= 0x37) {
/*
* Internal interrupt
*/
prom_printf("IO%d/P%d Internal "
"interrupt: ino=0x%x (%s)\n",
} else {
/*
* NewLink interrupt
*/
prom_printf("IO%d/P%d NewLink "
"interrupt: ino=0x%x\n", exp,
}
DRMACH_PR("drmach_s1p_schizo_log_intr: "
"exp=%d, schizo=%d, pci_leaf=%c, "
"ino=0x%x, intr_map_reg=0x%lx\n",
}
}
}
}
/*
* See Schizo Specification, Revision 51 (May 23, 2001), Section 22.2.4
*/
/*
* Check for possible error indicators prior to resuming the
* AXQ driver, which will de-assert slot1 AXQ_DOMCTRL_PAUSE.
*/
static void
{
int i, j;
int errflag = 0;
/*
* Check for logged schizo bus error and pending interrupts.
*/
for (i = 0; i < STARCAT_BDSET_MAX; i++) {
continue;
for (j = 0; j < STARCAT_SLOT1_IO_MAX; j++) {
continue;
if (!errflag) {
prom_printf("DR WARNING: interrupt "
"attempt detected during "
"copy-rename (%s):\n",
(iter == DRMACH_POST_SUSPEND) ?
"post suspend" : "pre resume");
++errflag;
}
i, j, iter);
}
}
}
/*
* Check for changes in axq l2_io_q performance counters (2nd pass only)
*/
if (iter == DRMACH_PRE_RESUME) {
for (i = 0; i < STARCAT_BDSET_MAX; i++) {
continue;
prom_printf("DR WARNING: IO transactions "
"detected on IO%d during copy-rename: "
"AXQ l2_io_q performance counter "
"start=%d, end=%d\n", i,
}
}
}
}
struct drmach_sr_list {
struct drmach_sr_list *next;
struct drmach_sr_list *prev;
};
static struct drmach_sr_ordered {
char *name;
struct drmach_sr_list *ring;
} drmach_sr_ordered[] = {
{ "iosram", NULL },
{ "address-extender-queue", NULL },
};
static void
{
struct drmach_sr_list *np;
sizeof (struct drmach_sr_list), KM_SLEEP);
/* establish list */
} else {
/* place new node behind head node on ring list */
}
}
static void
{
if (*lp) {
struct drmach_sr_list *xp;
/* start search with mostly likely node */
do {
DRMACH_PR("drmach_sr_delete:"
" disposed sr node for dip %p", dip);
return;
}
DRMACH_PR("drmach_sr_delete: still searching\n");
}
/* every dip should be found during resume */
}
int
{
int rv;
int len;
char name[OBP_MAXDRVNAME];
if (drmach_slot1_pause_debug) {
if (sflag && drmach_slot1_pause_init) {
} else if (!sflag && !drmach_slot1_pause_init) {
/* schedule init for next suspend */
}
}
"name", &len);
if (rv == DDI_PROP_SUCCESS) {
int portid;
struct drmach_sr_ordered *op;
if (rv != DDI_PROP_SUCCESS)
return (0);
if (drmach_slot1_pause_debug && sflag &&
}
if (sflag)
else
return (1);
}
}
}
return (0);
}
static void
{
int rv;
name = "<null name>";
else
aka = "<unknown>";
name_addr = "<null>";
prom_printf("\t%s %s@%s (aka %s)\n",
if (suspend) {
} else {
}
if (rv != DDI_SUCCESS) {
prom_printf("\tFAILED to %s %s@%s\n",
}
}
void
{
struct drmach_sr_ordered *op;
/*
* The ordering array declares the strict sequence in which
* the named drivers are to suspended. Each element in
* the array may have a double-linked ring list of driver
* instances (dip) in the order in which they were presented
* to drmach_verify_sr. If present, walk the list in the
* forward direction to suspend each instance.
*/
struct drmach_sr_list *rp;
do {
}
}
if (drmach_slot1_pause_debug) {
}
}
void
{
(sizeof (drmach_sr_ordered) / sizeof (drmach_sr_ordered[0]));
if (drmach_slot1_pause_debug) {
}
/*
* walk ordering array and rings backwards to resume dips
* in reverse order in which they were suspended
*/
while (--op >= drmach_sr_ordered) {
struct drmach_sr_list *rp;
do {
}
}
}
/*
* Log a DR sysevent.
* Return value: 0 success, non-zero failure.
*/
int
{
sysevent_t *ev;
char attach_pnt[MAXNAMELEN];
attach_pnt[0] = '\0';
rv = -1;
goto logexit;
}
if (verbose)
DRMACH_PR("drmach_log_sysevent: %s %s, flag: %d, verbose: %d\n",
rv = -2;
goto logexit;
}
goto logexit;
goto logexit;
}
/*
* Log the event but do not sleep waiting for its
* delivery. This provides insulation from syseventd.
*/
if (ev)
"drmach_log_sysevent failed (rv %d) for %s %s\n",
return (rv);
}
/*
* Only the valid entries are modified, so the array should be zeroed out
* initially.
*/
static void
int i;
char c;
for (i = 0; i < AXQ_MAX_EXP; i++) {
c = drmach_slice_table[i];
if (c & 0x20) {
}
}
}
/*
* Only the valid entries are modified, so the array should be zeroed out
* initially.
*/
static void
/* CONSTCOND */
continue;
}
if (madr & DRMACH_MC_VALID_MASK) {
DRMACH_PR("%d.%d.%d.madr = 0x%lx\n",
}
}
}
}
}
/*
* Do not allow physical address range modification if either board on this
* expander has processors in NULL LPA mode (CBASE=CBND=NULL).
*
* A side effect of NULL proc LPA mode in Starcat SSM is that local reads will
* See section 5.2.3 of the Safari spec. All processors will read the bus sync
* list before the rename after flushing local caches. When copy-rename
* requires changing the physical address ranges (i.e. smaller memory target),
* the bus sync list contains physical addresses that will not exist after the
* rename. If these cache lines are owned due to a RTSR, a system error can
* occur following the rename when these cache lines are evicted and a writeback
* is attempted.
*
* Incoming parameter represents either the copy-rename source or a candidate
* target memory board. On Starcat, only slot0 boards may have memory.
*/
int
{
int rv;
/*
* This is reason enough to fail the request, no need
* to check the device list for cpus.
*/
return (0);
}
/*
* Check for MCPU board on the same expander.
*
* The board flag DRMACH_NULL_PROC_LPA can be set for all board
* types, as it is derived at from the POST gdcd board flag
* L1SSFLG_THIS_L1_NULL_PROC_LPA, which can be set (and should be
* ignored) for boards with no processors. Since NULL proc LPA
* applies only to processors, we walk the devices array to detect
* MCPUs.
*/
int d_idx;
while (rv == 0) {
if (DRMACH_IS_CPU_ID(d_id)) {
/*
* Fail MCPU in NULL LPA mode.
*/
return (0);
}
&d_id);
}
}
}
return (1);
}