drmach.c revision 6b990117eca1cdf7ad1f4424209791ad38be9791
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
#include <sys/dditypes.h>
#include <sys/ndi_impldefs.h>
#include <sys/opl_olympus_regs.h>
#include <sys/mem_config.h>
#include <sys/ddi_impldefs.h>
#include <sys/machsystm.h>
#include <sys/autoconf.h>
#include <sys/sysmacros.h>
#include <sys/prom_plat.h>
#include <vm/seg_kmem.h>
#include <sys/mem_cage.h>
#include <sys/archsystm.h>
#include <vm/hat_sfmmu.h>
#include <sys/cpu_module.h>
#include <sys/cpu_sgnblk_defs.h>
#include <sys/promimpl.h>
#include <sys/prom_plat.h>
#include <sys/sysevent.h>
extern void flush_cache_il(void);
extern void drmach_sleep_il(void);
typedef struct {
struct drmach_node *node;
void *data;
typedef struct drmach_node {
void *here;
int *len);
struct drmach_node *pnode);
typedef struct {
int min_index;
int max_index;
int arr_sz;
typedef struct {
void *isa;
void (*dispose)(drmachid_t);
char name[MAXNAMELEN];
typedef struct {
} drmach_cmp_t;
typedef struct {
int bnum;
int assigned;
int powered;
int connected;
int cond;
int boot_board; /* if board exists on bootup */
typedef struct {
int unum;
int portid;
int busy;
int powered;
const char *type;
typedef struct drmach_cpu {
int sb;
int chipid;
int coreid;
int strandid;
int status;
#define OPL_CPU_HOTADDED 1
} drmach_cpu_t;
typedef struct drmach_mem {
} drmach_mem_t;
typedef struct drmach_io {
int channel;
int leaf;
} drmach_io_t;
typedef struct drmach_domain_info {
int allow_dr;
typedef struct {
int flags;
typedef struct {
int ndevs;
void *a;
static drmach_array_t *drmach_boards;
drmach_board_t *, int, drmachid_t *);
static int drmach_get_portid(drmach_node_t *);
static int opl_check_dr_status();
static void drmach_io_dispose(drmachid_t);
static int drmach_init(void);
static void drmach_fini(void);
static drmach_board_t *drmach_get_board_by_bnum(int);
/* options for the second argument in drmach_add_remove_cpu() */
#define HOTADD_CPU 1
#define HOTREMOVE_CPU 2
(OPL_MAX_COREID_PER_BOARD - 1))
static int drmach_name2type_idx(char *);
static drmach_board_t *drmach_board_new(int, int);
#ifdef DEBUG
#else
#endif /* DEBUG */
#define DRMACH_IS_BOARD_ID(id) \
((id != 0) && \
#define DRMACH_IS_CPU_ID(id) \
((id != 0) && \
#define DRMACH_IS_MEM_ID(id) \
((id != 0) && \
#define DRMACH_IS_IO_ID(id) \
((id != 0) && \
#define DRMACH_IS_DEVICE_ID(id) \
((id != 0) && \
#define DRMACH_IS_ID(id) \
((id != 0) && \
#define DRMACH_INTERNAL_ERROR() \
static char *drmach_ie_fmt = "drmach.c %d";
static struct {
const char *name;
const char *type;
} drmach_name2type[] = {
};
/* utility */
#define MBYTE (1048576ull)
/*
* drmach autoconfiguration data structures and interfaces
*/
extern struct mod_ops mod_miscops;
"OPL DR 1.1"
};
static struct modlinkage modlinkage = {
(void *)&modlmisc,
};
static krwlock_t drmach_boards_rwlock;
typedef const char *fn_t;
int
_init(void)
{
int err;
if ((err = drmach_init()) != 0) {
return (err);
}
drmach_fini();
}
return (err);
}
int
_fini(void)
{
int err;
drmach_fini();
return (err);
}
int
{
}
struct drmach_mc_lookup {
int bnum;
};
static int
{
int len;
int rv;
len = sizeof (memory_ranges);
DDI_PROP_DONTPASS, "sb-mem-ranges",
mp->slice_base = 0;
mp->slice_size = 0;
return (-1);
}
int i;
if (rv != 0) {
return (-1);
}
for (i = 0; i < HWD_MAX_MEM_CHUNKS; i++) {
}
}
} else {
/*
* we intersect phys_install to get base_pa.
* This only works at bootup time.
*/
if (ml) {
}
}
if (ml) {
struct memlist *p;
}
else
} else {
}
return (0);
}
struct drmach_hotcpu {
int bnum;
int core_id;
int rv;
int option;
};
static int
{
char name[OBP_MAXDRVNAME];
int len = OBP_MAXDRVNAME;
if (dip == ddi_root_node()) {
return (DDI_WALK_CONTINUE);
}
DDI_PROP_DONTPASS, "name",
return (DDI_WALK_PRUNECHILD);
}
/* only cmp has board number */
bnum = -1;
bnum = -1;
}
return (DDI_WALK_PRUNECHILD);
return (DDI_WALK_CONTINUE);
}
/* we have already pruned all unwanted cores and cpu's above */
return (DDI_WALK_CONTINUE);
}
DDI_PROP_DONTPASS, "cpuid",
p->rv = -1;
return (DDI_WALK_TERMINATE);
}
return (DDI_WALK_CONTINUE);
if (p->option == HOTADD_CPU) {
if (prom_hotaddcpu(cpuid) != 0) {
p->rv = -1;
return (DDI_WALK_TERMINATE);
}
} else if (p->option == HOTREMOVE_CPU) {
if (prom_hotremovecpu(cpuid) != 0) {
p->rv = -1;
return (DDI_WALK_TERMINATE);
}
}
return (DDI_WALK_CONTINUE);
}
return (DDI_WALK_PRUNECHILD);
}
static int
{
struct drmach_hotcpu arg;
}
struct drmach_setup_core_arg {
};
static int
{
char name[OBP_MAXDRVNAME];
int len = OBP_MAXDRVNAME;
int bnum;
if (dip == ddi_root_node()) {
return (DDI_WALK_CONTINUE);
}
DDI_PROP_DONTPASS, "name",
return (DDI_WALK_PRUNECHILD);
}
/* only cmp has board number */
bnum = -1;
bnum = -1;
}
return (DDI_WALK_PRUNECHILD);
return (DDI_WALK_CONTINUE);
}
/* we have already pruned all unwanted cores and cpu's above */
return (DDI_WALK_CONTINUE);
}
DDI_PROP_DONTPASS, "cpuid",
return (DDI_WALK_TERMINATE);
}
return (DDI_WALK_CONTINUE);
}
return (DDI_WALK_PRUNECHILD);
}
static void
{
struct drmach_setup_core_arg arg;
int i;
for (i = 0; i < OPL_MAX_COREID_PER_BOARD; i++) {
}
for (i = 0; i < OPL_MAX_COREID_PER_BOARD; i++) {
if (obj->boot_board) {
}
}
}
/*
* drmach_node_* routines serve the purpose of separating the
* rest of the code from the device tree and OBP. This is necessary
* because of In-Kernel-Probing. Devices probed after stod, are probed
* by the in-kernel-prober, not OBP. These devices, therefore, do not
* have dnode ids.
*/
typedef struct {
int err;
static int
{
/*
* dip doesn't have to be held here as we are called
* from ddi_walk_devs() which holds the dip.
*/
/*
* Set "here" to NULL so that unheld dip is not accessible
* outside ddi_walk_devs()
*/
return (DDI_WALK_TERMINATE);
else
return (DDI_WALK_CONTINUE);
}
static int
{
/* initialized args structure for callback */
/*
* Root node doesn't have to be held in any way.
*/
(void *)&nargs);
}
static int
{
static char *fn = "drmach_node_ddi_get_parent";
return (-1);
}
return (-1);
}
return (0);
}
/*ARGSUSED*/
static pnode_t
{
}
static drmach_node_t *
drmach_node_new(void)
{
return (np);
}
static void
{
}
static dev_info_t *
{
}
static int
{
}
static int
{
int rv = 0;
static char *fn = "drmach_node_ddi_get_prop";
rv = -1;
rv = -1;
}
return (rv);
}
static int
{
int rv = 0;
rv = -1;
rv = -1;
}
return (rv);
}
static drmachid_t
{
dup = drmach_node_new();
return (dup);
}
/*
* drmach_array provides convenient array construction, access,
* bounds checking and array destruction logic.
*/
static drmach_array_t *
{
return (arr);
} else {
return (0);
}
}
static int
{
return (-1);
else {
return (0);
}
/*NOTREACHED*/
}
static int
{
return (-1);
else {
return (0);
}
/*NOTREACHED*/
}
static int
{
int rv;
*idx += 1;
return (rv);
}
static int
{
int rv;
*idx += 1;
*idx += 1;
return (rv);
}
static void
{
int idx;
int rv;
while (rv == 0) {
}
}
static drmach_board_t *
{
return ((drmach_board_t *)id);
else
return (NULL);
}
static pnode_t
{
}
/*ARGSUSED*/
{
if (DRMACH_IS_CPU_ID(id)) {
return (NULL);
}
if (!DRMACH_IS_DEVICE_ID(id))
/* If non-NULL, fdip is returned held and must be released */
}
return (err);
}
static sbd_error_t *
{
int i;
int rv;
char name[OBP_MAXDRVNAME];
if (rv) {
/* every node is expected to have a name */
"device node %s: property %s",
return (err);
}
/*
* The node currently being examined is not listed in the name2type[]
* array. In this case, the node is no interest to drmach. Both
* dp and err are initialized here to yield nothing (no device or
* error structure) for this case.
*/
i = drmach_name2type_idx(name);
if (i < 0) {
*idp = (drmachid_t)0;
return (NULL);
}
/* device specific new function will set unum */
}
static void
{
}
static drmach_board_t *
{
/*
* If this is not bootup initialization, we have to wait till
* IKP sets up the device nodes in drmach_board_connect().
*/
if (boot_board)
return (bp);
}
static void
{
}
static sbd_error_t *
{
if (!DRMACH_IS_BOARD_ID(id))
int rv;
int d_idx;
while (rv == 0) {
if (err)
break;
}
}
return (err);
}
int
{
if (!DRMACH_IS_BOARD_ID(id))
return (0);
}
static int
drmach_init(void)
{
int *floating;
rdip = ddi_root_node();
} else {
if (rv != DDI_PROP_SUCCESS) {
} else {
drmach_domain.floating = 0;
for (i = 0; i < len / sizeof (int); i++) {
}
}
}
do {
int bnum;
bnum = -1;
if (bnum == -1)
continue;
" invalid property value, %s=%d",
goto error;
}
/*
* Initialize the IKP feature.
*
* This can be done only after DR has acquired a hold on all the
* device nodes that are interesting to IKP.
*/
if (opl_init_cfg() != 0) {
goto error;
}
return (0);
return (ENXIO);
}
static void
drmach_fini(void)
{
/*
* Walk immediate children of the root devinfo node
* releasing holds acquired on branches in drmach_init()
*/
}
/*
* Each system board contains 2 Oberon PCI bridge and
* 1 CMUCH.
* Each oberon has 2 channels.
* Each channel has 2 pci-ex leaf.
* Each CMUCH has 1 pci bus.
*
*
* Device Path:
* /pci@<portid>,reg
*
* where
* portid[10] = 0
* portid[9:0] = LLEAF_ID[9:0] of the Oberon Channel
*
* LLEAF_ID[9:8] = 0
* LLEAF_ID[8:4] = LSB_ID[4:0]
* LLEAF_ID[3:1] = IO Channel#[2:0] (0,1,2,3 for Oberon)
* channel 4 is pcicmu
* LLEAF_ID[0] = PCI Leaf Number (0 for leaf-A, 1 for leaf-B)
*
* Properties:
* name = pci
* device_type = "pciex"
* board# = LSBID
* reg = int32 * 2, Oberon CSR space of the leaf and the UBC space
* portid = Jupiter Bus Device ID ((LSB_ID << 3)|pciport#)
*/
static sbd_error_t *
{
int portid;
return (NULL);
}
static void
{
}
/*ARGSUSED*/
{
/* allow status and ncm operations to always succeed */
return (NULL);
}
/* check all other commands for the required option string */
}
} else {
}
switch (cmd) {
case SBD_CMD_TEST:
case SBD_CMD_STATUS:
case SBD_CMD_GETNCM:
break;
case SBD_CMD_CONNECT:
else if (!drmach_domain.allow_dr)
NULL);
break;
case SBD_CMD_DISCONNECT:
else if (!drmach_domain.allow_dr)
NULL);
break;
default:
if (!drmach_domain.allow_dr)
NULL);
break;
}
}
return (err);
}
/*ARGSUSED*/
{
return (NULL);
}
{
} else {
if (*id)
if (!(*id))
}
return (err);
}
/*ARGSUSED*/
{
if (!DRMACH_IS_BOARD_ID(id))
return (NULL);
}
static int drmach_cache_flush_flag[NCPU];
/*ARGSUSED*/
static void
{
extern void cpu_flush_ecache(void);
drmach_cache_flush_flag[id] = 0;
}
static void
{
int i;
for (i = 0; i < NCPU; i++) {
if (CPU_IN_SET(xc_cpuset, i)) {
drmach_cache_flush_flag[i] = 1;
xc_one(i, drmach_flush_cache, i, 0);
while (drmach_cache_flush_flag[i]) {
DELAY(1000);
}
}
}
}
static int
{
int i, bnum;
for (i = 0; i < OPL_MAX_COREID_PER_BOARD; i++) {
return (-1);
"Failed to remove CMP %d on board %d\n",
i, bnum);
return (-1);
}
}
}
}
return (0);
}
/*ARGSUSED*/
{
int rv = 0;
if (DRMACH_NULL_ID(id))
return (NULL);
if (!DRMACH_IS_BOARD_ID(id))
if (drmach_disconnect_cpus(obj)) {
return (err);
}
if (rv == 0) {
} else
return (err);
}
static int
{
int portid;
char type[OBP_MAXPROPNAME];
return (portid);
/*
* Get the device_type property to see if we should
* continue processing this node.
*/
return (-1);
/*
* We return cpuid because it has no portid
*/
return (portid);
}
return (-1);
}
/*
* This is a helper function to determine if a given
* node should be considered for a dr operation according
* to predefined dr type nodes and the node's name.
* Formal Parameter : The name of a device node.
* Return Value: -1, name does not map to a valid dr type.
* A value greater or equal to 0, name is a valid dr type.
*/
static int
drmach_name2type_idx(char *name)
{
return (-1);
/*
* Determine how many possible types are currently supported
* for dr.
*/
/* Determine if the node's name correspond to a predefined type. */
/* The node is an allowed type for dr. */
return (index);
}
/*
* If the name of the node does not map to any of the
* types in the array drmach_name2type then the node is not of
* interest to dr.
*/
return (-1);
}
/*
* there is some complication on OPL:
* - pseudo-mc nodes do not have portid property
* - portid[9:5] of cmp node is LSB #, portid[7:3] of pci is LSB#
* - cmp has board#
* - core and cpu nodes do not have portid and board# properties
* starcat uses portid to derive the board# but that does not work
* for us. starfire reads board# property to filter the devices.
* That does not work either. So for these specific device,
* we use specific hard coded methods to get the board# -
* cpu: LSB# = CPUID[9:5]
*/
static int
{
int bnum;
char name[OBP_MAXDRVNAME];
/*
* core, cpu and pseudo-mc do not have portid
* we use cpuid as the portid of the cpu node
* for pseudo-mc, we do not use portid info.
*/
if (rv)
return (0);
if (rv) {
/*
* cpu does not have board# property. We use
* CPUID[9:5]
*/
} else
return (0);
}
return (0);
if (drmach_name2type_idx(name) < 0) {
return (0);
}
/*
* Create a device data structure from this node data.
* The call may yield nothing if the node is not of interest
* to drmach.
*/
return (-1);
else if (!id) {
/*
* drmach_device_new examined the node we passed in
* and determined that it was one not of interest to
* drmach. So, it is skipped.
*/
return (0);
}
if (rv) {
return (-1);
}
}
{
int max_devices;
int rv;
if (!DRMACH_IS_BOARD_ID(id))
data.a = a;
if (rv == 0)
else {
else
err = DRMACH_INTERNAL_ERROR();
}
return (err);
}
int
{
int rv = 0;
*id = 0;
rv = -1;
}
return (rv);
}
{
return (NULL);
}
{
if (DRMACH_NULL_ID(id))
return (NULL);
if (!DRMACH_IS_BOARD_ID(id))
if (!err) {
else {
}
}
return (err);
}
{
if (!DRMACH_IS_BOARD_ID(id))
return (NULL);
}
static sbd_error_t *
{
if (!DRMACH_IS_BOARD_ID(id))
return (NULL);
}
/*ARGSUSED*/
{
return (NULL);
}
{
if (DRMACH_NULL_ID(id))
return (NULL);
if (!DRMACH_IS_BOARD_ID(id)) {
}
if (err) {
return (err);
}
} else {
err = DRMACH_INTERNAL_ERROR();
else
}
return (err);
}
/*
* We have to do more on OPL - e.g. set up sram tte, read cpuid, strand id,
* implementation #, etc
*/
static sbd_error_t *
{
static void drmach_cpu_dispose(drmachid_t);
int portid;
/* portid is CPUID of the node */
(OPL_MAX_CMPID_PER_BOARD - 1)) +
/*
* CPU ID representation
* CPUID[9:5] = SB#
* CPUID[4:3] = Chip#
* CPUID[2:1] = Core# (Only 2 core for OPL)
* CPUID[0:0] = Strand#
*/
/*
* reg property of the strand contains strand ID
* reg property of the parent node contains core ID
* We should use them.
*/
return (NULL);
}
static void
{
}
static int
{
extern int restart_other_cpu(int);
/*
* NOTE: restart_other_cpu pauses cpus during the
* slave cpu start. This helps to quiesce the
* bus traffic a bit which makes the tick sync
* routine in the prom more robust.
*/
return (0);
}
static sbd_error_t *
{
if (!DRMACH_IS_CPU_ID(id))
return (NULL);
}
static sbd_error_t *
{
return (NULL);
}
{
if (!DRMACH_IS_CPU_ID(id))
return (NULL);
}
{
if (!DRMACH_IS_CPU_ID(id))
/* get from cpu directly on OPL */
return (NULL);
}
{
int impl;
char type[OBP_MAXPROPNAME];
if (!DRMACH_IS_CPU_ID(id))
return (DRMACH_INTERNAL_ERROR());
}
/* the parent should be core */
}
}
} else {
return (DRMACH_INTERNAL_ERROR());
}
return (NULL);
}
{
if (!DRMACH_IS_DEVICE_ID(id))
return (NULL);
}
{
int state;
if (!DRMACH_IS_IO_ID(id))
*yes = 0;
return (NULL);
}
(state == DDI_DEVSTATE_UP));
return (NULL);
}
struct drmach_io_cb {
char *name; /* name of the node */
int (*func)(dev_info_t *);
int rv;
};
#define DRMACH_IO_POST_ATTACH 0
#define DRMACH_IO_PRE_RELEASE 1
static int
{
char name[OBP_MAXDRVNAME];
int len = OBP_MAXDRVNAME;
DDI_PROP_DONTPASS, "name",
return (DDI_WALK_PRUNECHILD);
}
return (DDI_WALK_TERMINATE);
}
return (DDI_WALK_CONTINUE);
}
static int
{
struct drmach_io_cb arg;
int (*msudetp)(dev_info_t *);
int (*msuattp)(dev_info_t *);
int circ;
/* 4 is pcicmu channel */
return (0);
if (state == DRMACH_IO_PRE_RELEASE) {
msudetp = (int (*)(dev_info_t *))
modgetsymvalue("oplmsu_dr_detach", 0);
} else if (state == DRMACH_IO_POST_ATTACH) {
msuattp = (int (*)(dev_info_t *))
modgetsymvalue("oplmsu_dr_attach", 0);
} else {
return (0);
}
return (0);
}
} else {
/* this cannot happen unless something bad happens */
return (-1);
}
} else {
}
}
{
int rv;
if (!DRMACH_IS_IO_ID(id))
if (rv != 0)
return (NULL);
}
static sbd_error_t *
{
if (!DRMACH_IS_IO_ID(id))
return (NULL);
}
{
if (!DRMACH_IS_IO_ID(id))
return (NULL);
}
/*ARGSUSED*/
{
return (NULL);
}
/*ARGSUSED*/
{
int rv;
if (!DRMACH_IS_IO_ID(id))
if (rv != 0)
return (0);
}
static sbd_error_t *
{
int configured;
if (err)
return (err);
return (NULL);
}
static sbd_error_t *
{
static void drmach_mem_dispose(drmachid_t);
int rv;
rv = 0;
(rv <= 0)) {
*idp = (drmachid_t)0;
return (NULL);
}
}
/* make sure we do not create memoryless nodes */
} else
return (NULL);
}
static void
{
}
}
{
int rv;
if (!DRMACH_IS_MEM_ID(id))
} else if (rv != 0) {
/* catch this in debug kernels */
ASSERT(0);
" return value %d", rv);
}
if (rv) {
return (DRMACH_INTERNAL_ERROR());
}
else
return (NULL);
}
{
int rv;
if (!DRMACH_IS_MEM_ID(id))
if (size > 0) {
if (rv != 0) {
"unexpected kcage_range_delete_post_mem_del"
" return value %d", rv);
return (DRMACH_INTERNAL_ERROR());
}
}
return (NULL);
}
{
if (!DRMACH_IS_MEM_ID(id))
else {
return (NULL);
}
}
{
if (!DRMACH_IS_MEM_ID(id))
else
return (NULL);
}
{
if (!DRMACH_IS_MEM_ID(id))
/*
* for copying. Our unit of memory isolation is 64 MB.
*/
return (NULL);
}
{
if (!DRMACH_IS_MEM_ID(id))
return (NULL);
}
{
#ifdef DEBUG
int rv;
#endif
if (!DRMACH_IS_MEM_ID(id))
#ifdef DEBUG
/*
* Make sure the incoming memlist doesn't already
* intersect with what's present in the system (phys_install).
*/
if (rv) {
DRMACH_PR("Derived memlist intersects"
" with phys_install\n");
DRMACH_PR("phys_install memlist:\n");
return (DRMACH_INTERNAL_ERROR());
}
DRMACH_PR("Derived memlist:");
#endif
return (NULL);
}
{
if (!DRMACH_IS_MEM_ID(id))
return (NULL);
}
/* ARGSUSED */
{
return (CPU_CURRENT);
}
static sbd_error_t *
{
if (!DRMACH_IS_MEM_ID(id))
return (NULL);
}
static sbd_error_t *
{
/* get starting physical address of target memory */
/* round down to slice boundary */
/* stop at first span that is in slice */
break;
return (NULL);
}
{
if (!DRMACH_IS_BOARD_ID(id))
}
}
bp->boot_board = 0;
return (NULL);
}
/*ARGSUSED*/
static sbd_error_t *
{
int rv;
if (!DRMACH_IS_BOARD_ID(id))
if (rv != 0) {
return (err);
}
return (err);
}
/*ARGSUSED*/
static sbd_error_t *
{
int rv;
if (!DRMACH_IS_BOARD_ID(id))
if (rv != 0) {
}
return (err);
}
/*ARGSUSED*/
{
/* copy 32 bytes at arc_pa to dst_pa */
/* increment by 32 bytes */
/* decrement by 32 bytes */
}
}
return (NULL);
}
static struct {
const char *name;
} drmach_pt_arr[] = {
{ "readmem", drmach_pt_readmem },
{ "ikprobe", drmach_pt_ikprobe },
{ "ikdeprobe", drmach_pt_ikdeprobe },
/* the following line must always be last */
};
/*ARGSUSED*/
{
int i;
i = 0;
break;
i += 1;
}
else
return (err);
}
{
if (!DRMACH_IS_DEVICE_ID(id))
}
{
if (!DRMACH_IS_ID(id)) {
}
return (err);
}
static sbd_error_t *
{
if (!DRMACH_IS_ID(id))
}
/*ARGSUSED*/
{
char name[OBP_MAXDRVNAME];
int rv;
if (DRMACH_IS_CPU_ID(id))
return (NULL);
if (!DRMACH_IS_DEVICE_ID(id))
if (rv)
return (NULL);
/*
* Note: FORCE flag is no longer necessary under devfs
*/
/*
* If non-NULL, fdip is returned held and must be released.
*/
} else {
}
return (err);
}
return (NULL);
}
int
{
HOTADD_CPU) != 0) {
onb_core_num, bnum);
return (EIO);
}
}
if (drmach_cpu_start(cp) != 0) {
/*
* we must undo the hotadd or no one will do that
* If this fails, we will do this again in
* drmach_board_disconnect.
*/
HOTREMOVE_CPU) != 0) {
"on board %d\n",
onb_core_num, bnum);
}
}
return (EBUSY);
} else {
return (0);
}
}
int
{
int rv = 0;
/*
* Capture all CPUs (except for detaching proc) to prevent
* crosscalls to the detaching proc until it has cleared its
* bit in cpu_ready_set.
*
* The CPU's remain paused and the prom_mutex is known to be free.
* This prevents the x-trap victim from blocking when doing prom
* IEEE-1275 calls at a high PIL level.
*/
/*
* Quiesce interrupts on the target CPU. We do this by setting
* the CPU 'not ready'- (i.e. removing the CPU from cpu_ready_set) to
* prevent it from receiving cross calls and cross traps.
* This prevents the processor from receiving any new soft interrupts.
*/
if (rv == 0)
start_cpus();
if (rv == 0) {
HOTREMOVE_CPU) != 0) {
"Failed to remove CMP %d LSB %d\n",
onb_core_num, bnum);
return (EIO);
}
}
}
return (rv);
}
/*ARGSUSED*/
int
{
return (0);
}
void
drmach_suspend_last(void)
{
}
void
drmach_resume_first(void)
{
}
/*
* Log a DR sysevent.
* Return value: 0 success, non-zero failure.
*/
int
{
sysevent_t *ev;
char attach_pnt[MAXNAMELEN];
attach_pnt[0] = '\0';
rv = -1;
goto logexit;
}
if (verbose) {
DRMACH_PR("drmach_log_sysevent: %s %s, flag: %d, verbose: %d\n",
}
rv = -2;
goto logexit;
}
goto logexit;
goto logexit;
}
/*
* Log the event but do not sleep waiting for its
* delivery. This provides insulation from syseventd.
*/
if (ev)
"drmach_log_sysevent failed (rv %d) for %s %s\n",
return (rv);
}
#define OPL_DR_STATUS_PROP "dr-status"
static int
{
char *str;
node = prom_rootnode();
if (node == OBP_BADNODE) {
return (1);
}
if (len == -1) {
/*
* dr-status doesn't exist when DR is activated and
* any warning messages aren't needed.
*/
return (1);
}
if (rtn == -1) {
return (1);
} else {
return (0);
}
}
/* we are allocating memlist from TLB locked pages to avoid tlbmisses */
static struct memlist *
{
return (NULL);
mlist = p->free_mlist;
return (NULL);
return (mlist);
}
nl = p->free_mlist;
return (NULL);
} else {
base;
}
break;
break;
}
}
nl = p->free_mlist;
return (NULL);
}
return (mlist);
}
/*
* The routine performs the necessary memory COPY and MC adr SWITCH.
* Both operations MUST be at the same "level" so that the stack is
* maintained correctly between the copy and switch. The switch
* portion implements a caching mechanism to guarantee the code text
* is cached prior to execution. This is to guard against possible
* memory access while the MC adr's are being modified.
*
* IMPORTANT: The _drmach_copy_rename_end() function must immediately
* follow drmach_copy_rename_prog__relocatable() so that the correct
* "length" of the drmach_copy_rename_prog__relocatable can be
* calculated. This routine MUST be a LEAF function, i.e. it can
* make NO function calls, primarily for two reasons:
*
* 1. We must keep the stack consistent across the "switch".
* 2. Function calls are compiled to relative offsets, and
* we execute this function we'll be executing it from
* a copied version in a different area of memory, thus
* the relative offsets will be bogus.
*
* Moreover, it must have the "__relocatable" suffix to inform DTrace
* providers (and anything else, for that matter) that this
* function's text is manually relocated elsewhere before it is
* executed. That is, it cannot be safely instrumented with any
* methodology that is PC-relative.
*/
/*
* We multiply this to system_clock_frequency so we
* are setting a delay of fmem_timeout second for
* the rename command.
*
* FMEM command itself should complete within 15 sec.
* We add 2 more sec to be conservative.
*
* Note that there is also a SCF BUSY bit checking
* in drmach_asm.s right before FMEM command is
* issued. XSCF sets the SCF BUSY bit when the
* other domain on the same PSB reboots and it
* will not be able to service the FMEM command
* within 15 sec. After setting the SCF BUSY
* bit, XSCF will wait a while before servicing
* other reboot command so there is no race
* condition.
*/
static int fmem_timeout = 17;
/*
* The empirical data on some OPL system shows that
* we can copy 250 MB per second. We set it to
* 80 MB to be conservative. In normal case,
* this timeout does not affect anything.
*/
/*
* This is the timeout value for the xcall synchronization
* to get all the CPU ready to do the parallel copying.
* Even on a fully loaded system, 10 sec. should be long
* enough.
*/
static int cpu_xcall_delay = 10;
int drmach_disable_mcopy = 0;
/*
* The following delay loop executes sleep instruction to yield the
* CPU to other strands. If this is not done, some strand will tie
* up the CPU in busy loops while the other strand cannot do useful
* work. The copy procedure will take a much longer time without this.
*/
{ \
start = drmach_get_stick_il(); \
drmach_sleep_il(); \
now = drmach_get_stick_il(); \
} \
}
static int
int cpuid)
{
register int rtn;
int i;
extern uint64_t drmach_get_stick_il();
extern void membar_sync_il();
extern void flush_instr_mem_il(void*);
extern void flush_windows_il(void);
/*
* flush_windows is moved here to make sure all
* registers used in the callers are flushed to
* memory before the copy.
*
* If flush_windows() is called too early in the
* calling function, the compiler might put some
* data in the local registers after flush_windows().
* After FMA, if there is any fill trap, the registers
* will contain stale data.
*/
limit = drmach_get_stick_il();
for (i = 0; i < NCPU; i++) {
/* wait for all CPU's to be ready */
for (;;) {
break;
}
}
curr = drmach_get_stick_il();
return (EOPL_FMEM_XC_TIMEOUT);
}
}
}
} else {
for (;;) {
break;
}
return (EOPL_FMEM_TERMINATE);
}
}
}
/*
* DO COPY.
*/
/* If the master has detected error, we just bail out */
return (EOPL_FMEM_TERMINATE);
}
/*
* This copy does NOT use an ASI
* that avoids the Ecache, therefore
* the dst_pa addresses may remain
* in our Ecache after the dst_pa
* has been removed from the system.
* A subsequent write-back to memory
* will cause an ARB-stop because the
* physical address no longer exists
* in the system. Therefore we must
* flush out local Ecache after we
* finish the copy.
*/
/* copy 32 bytes at src_pa to dst_pa */
/* increment the counter to signal that we are alive */
/* increment by 32 bytes */
/* decrement by 32 bytes */
}
}
}
/*
* Since bcopy32_il() does NOT use an ASI to bypass
* the Ecache, we need to flush our Ecache after
* the copy is complete.
*/
/*
* drmach_fmem_exec_script()
*/
for (i = 0; i < NCPU; i++) {
for (;;) {
/* we get FMEM_LOOP_FMEM_READY in normal case */
break;
}
/* got error traps */
return (EOPL_FMEM_COPY_ERROR);
}
/* if we have not reached limit, wait more */
curr = drmach_get_stick_il();
continue;
curr - copy_start;
/* now check if slave is alive */
/* no progress, perhaps just finished */
break;
/* copy error */
return (EOPL_FMEM_COPY_ERROR);
}
return (EOPL_FMEM_COPY_TIMEOUT);
}
}
}
}
/*
* drmach_fmem_exec_script()
*/
return (rtn);
} else {
/*
* drmach_fmem_loop_script()
*/
/* slave thread does not care the rv */
return (0);
}
}
static void
drmach_copy_rename_end(void)
{
/*
* IMPORTANT: This function's location MUST be located immediately
* following drmach_copy_rename_prog__relocatable to
* accurately estimate its size. Note that this assumes
* the compiler keeps these functions in the order in
* which they appear :-o
*/
}
static void
{
int nbytes, s;
p->free_mlist = NULL;
buf = p->memlist_buffer;
p->free_mlist = ml;
buf += s;
nbytes -= s;
}
}
static void
{
int i;
for (i = 0; i < DRMACH_FMEM_LOCKED_PAGES; i++) {
}
}
static void
{
int i;
for (i = 0; i < DRMACH_FMEM_LOCKED_PAGES; i++) {
}
}
{
extern void drmach_fmem_loop_script();
extern void drmach_fmem_loop_script_rtn();
extern int drmach_fmem_exec_script();
extern void drmach_fmem_exec_script_end();
void (*mc_suspend)(void);
void (*mc_resume)(void);
int (*scf_fmem_start)(int, int);
int (*scf_fmem_end)(void);
int (*scf_fmem_cancel)(void);
uint64_t (*scf_get_base_addr)(void);
if (!DRMACH_IS_MEM_ID(s_id))
if (!DRMACH_IS_MEM_ID(t_id))
for (i = 0; i < NCPU; i++) {
/*
* this kind of CPU will spin in cache
*/
if (CPU_IN_SET(cpu_ready_set, i))
continue;
/*
* Now check for any inactive CPU's that
* have been hotadded. This can only occur in
* error condition in drmach_cpu_poweron().
*/
continue;
(1 << strand_id)) {
(1 << strand_id))) {
}
}
}
mc_suspend = (void (*)(void))
modgetsymvalue("opl_mc_suspend", 0);
mc_resume = (void (*)(void))
modgetsymvalue("opl_mc_resume", 0);
}
scf_fmem_start = (int (*)(int, int))
modgetsymvalue("scf_fmem_start", 0);
if (scf_fmem_start == NULL) {
}
scf_fmem_end = (int (*)(void))
modgetsymvalue("scf_fmem_end", 0);
if (scf_fmem_end == NULL) {
}
scf_fmem_cancel = (int (*)(void))
modgetsymvalue("scf_fmem_cancel", 0);
if (scf_fmem_cancel == NULL) {
}
scf_get_base_addr = (uint64_t (*)(void))
modgetsymvalue("scf_get_base_addr", 0);
if (scf_get_base_addr == NULL) {
}
/* calculate source and target base pa */
/* adjust copy memlist addresses to be relative to copy base pa */
}
/*
* bp will be page aligned, since we're calling
* kmem_zalloc() with an exact multiple of PAGESIZE.
*/
/*
* To avoid MTLB hit, we allocate a new VM space and remap
* the kmem_alloc buffer to that address. This solves
* 2 problems we found:
* - the kmem_alloc buffer can be just a chunk inside
* a much larger, e.g. 4MB buffer and MTLB will occur
* if there are both a 4MB and a 8K TLB mapping to
* the same VA range.
* - the kmem mapping got dropped into the TLB by other
* strands, unintentionally.
* Note that the pointers like data, critical, memlist_buffer,
* and stat inside the copy rename structure are mapped to this
* alternate VM space so we must make sure we lock the TLB mapping
* whenever we access data pointed to by these pointers.
*/
/* Now remap prog_kmem to prog */
/* All pointers in prog are based on the alternate mapping */
sizeof (drmach_copy_rename_program_t)), sizeof (void *));
/* LINTED */
ASSERT(sizeof (drmach_cr_stat_t)
* PAGESIZE));
for (i = 2; i < 15; i++) {
}
len = sizeof (drmach_copy_rename_critical_t);
/*
* We always leave 1K nop's to prevent the processor from
* speculative execution that causes memory access
*/
/* this is the entry point of the loop script */
/* now we make sure there is 1K extra */
goto out;
}
len = sizeof (drmach_copy_rename_critical_t);
/* now we are committed, call SCF, soft suspend mac patrol */
goto out;
}
/* soft suspend mac patrol */
(*mc_suspend)();
/*
* 0x30800000 is op code "ba,a +0"
*/
/*
* set the value of SCF FMEM TIMEOUT
*/
for (i = 0; i < NCPU; i++) {
}
active_cpus = 0;
if (drmach_disable_mcopy) {
active_cpus = 1;
} else {
for (i = 0; i < NCPU; i++) {
if (CPU_IN_SET(cpu_ready_set, i) &&
CPU_ACTIVE(cpu[i])) {
active_cpus++;
}
}
}
sz = 0;
}
}
for (i = 0; i < NCPU; i++) {
continue;
}
while (sz) {
break;
} else {
} else {
goto end;
}
}
}
}
end:
/* Unmap the alternate space. It will have to be remapped again */
return (NULL);
out:
}
}
return (err);
}
{
int rv;
/*
* Note that we have to delay calling SCF to find out the
* status of the FMEM operation here because SCF cannot
* respond while it is suspended.
* This create a small window when we are sure about the
* base address of the system board.
* If there is any call to mc-opl to get memory unum,
* mc-opl will return UNKNOWN as the unum.
*/
/*
* we have to remap again because all the pointer like data,
* critical in prog are based on the alternate vmem space.
*/
"invalid op code %x\n",
}
if (fmem_error != ESBD_NOERROR) {
}
/* possible ops are SCF_START, MC_SUSPEND */
if (fmem_error != ESBD_NOERROR) {
}
if (rv) {
}
/*
* If we get here, rename is successful.
* Do all the copy rename post processing.
*/
} else {
if (rv) {
if (!err)
"scf_fmem_cancel() failed. rv = 0x%x", rv);
}
}
/* soft resume mac patrol */
return (err);
}
/*ARGSUSED*/
static void
{
register int cpuid;
extern void drmach_flush();
extern void membar_sync_il();
extern void drmach_flush_icache();
no_trap();
return;
}
/*
* jmp drmach_copy_rename_prog().
*/
no_trap();
}
static void
{
return;
}
}
}
/*
* IKP has to update the sb-mem-ranges for mac patrol driver
* when it resumes, it will re-read the sb-mem-range property
* to get the new base address
*/
}
void
{
int cpuid;
register int rtn;
extern int in_sync;
int old_in_sync;
extern void drmach_sys_trap();
extern void drmach_flush();
extern void drmach_flush_icache();
/*
* We must immediately drop in the TLB because all pointers
* are based on the alternate vmem space.
*/
/*
* we call scf to get the base address here becuase if scf
* has not been suspended yet, the active path can be changing and
* sometimes it is not even mapped. We call the interface when
* the OS has been quiesced.
*/
return;
}
}
}
in_sync = 1;
}
}
goto done;
}
/*
* jmp drmach_copy_rename_prog().
*/
done:
no_trap();
if (rtn == EOPL_FMEM_HW_ERROR) {
prom_panic("URGENT_ERROR_TRAP is "
"detected during FMEM.\n");
}
/*
* In normal case, all slave CPU's are still spinning in
* the assembly code. The master has to patch the instruction
* to get them out.
* In error case, e.g. COPY_ERROR, some slave CPU's might
* have aborted and already returned and sset LOOP_EXIT status.
* Some CPU might still be copying.
* In any case, some delay is necessary to give them
* enough time to set the LOOP_EXIT status.
*/
for (;;) {
break;
}
}
continue;
}
/*
* Wait for all CPU to exit.
* However we do not want an infinite loop
* so we detect hangup situation here.
* If the slave CPU is still copying data,
* we will continue to wait.
* In error cases, the master has already set
* fmem_status.error to abort the copying.
* 1 m.s delay for them to abort copying and
* return to drmach_copy_rename_slave to set
* FMEM_LOOP_EXIT status should be enough.
*/
for (;;) {
break;
drv_usecwait(1000);
drv_usecwait(1000);
break;
"CPU %d hang during Copy Rename", cpuid);
}
}
prom_panic("URGENT_ERROR_TRAP is "
"detected during FMEM.\n");
}
}
/*
* This must be done after all strands have exit.
* Removing the TLB entry will affect both strands
* in the same core.
*/
}
}
/*
* we should unlock before the following lock to keep the kpreempt
* count correct.
*/
/*
* we must remap again. TLB might have been removed in above xcall.
*/
DRMACH_PR("Unexpected long wait time %ld seconds "
"during copy rename on CPU %d\n",
}
}