/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
*/
/*
* driver for accessing kernel devinfo tree.
*/
#include <sys/pathname.h>
#include <sys/autoconf.h>
#include <sys/sunldi_impl.h>
#include <sys/ddi_impldefs.h>
#include <sys/ndi_impldefs.h>
#include <sys/mdi_impldefs.h>
#include <sys/devinfo_impl.h>
#include <sys/ddi_hp_impl.h>
#include <sys/sysmacros.h>
#ifdef DEBUG
static int di_debug;
#else
#endif
/*
* We partition the space of devinfo minor nodes equally between the full and
* unprivileged versions of the driver. The even-numbered minor nodes are the
* full version, while the odd-numbered ones are the read-only version.
*/
#define DI_FULL_PARENT 0
/*
* Keep max alignment so we can move snapshot to different platforms.
*
* NOTE: Most callers should rely on the di_checkmem return value
* being aligned, and reestablish *off_p with aligned value, instead
* of trying to align size of their allocations: this approach will
* minimize memory use.
*/
/*
* To avoid wasting memory, make a linked list of memory chunks.
* Size of each chunk is buf_size.
*/
struct di_mem {
};
/*
* This is a stack for walking the tree without using recursion.
* When the devinfo tree height is above some small size, one
* gets watchdog resets on sun4m.
*/
struct di_stack {
};
}
}
/*
* With devfs, the device tree has no global locks. The device tree is
* dynamic and dips may come and go if they are not locked locally. Under
* these conditions, pointers are no longer reliable as unique IDs.
* Specifically, these pointers cannot be used as keys for hash tables
* as the same devinfo structure may be freed in one part of the tree only
* to be allocated as the structure for a different device in another
* part of the tree. This can happen if DR and the snapshot are
* happening concurrently.
* The following data structures act as keys for devinfo nodes and
* pathinfo nodes.
*/
enum di_ktype {
};
struct di_dkey {
int dk_inst;
};
struct di_pkey {
char *pk_path_addr;
};
struct di_key {
union {
} k_u;
};
struct i_lnode;
typedef struct i_link {
/*
* If a di_link struct representing this i_link struct makes it
* into the snapshot, then self will point to the offset of
* the di_link struct in the snapshot
*/
} i_link_t;
typedef struct i_lnode {
/*
* If a di_lnode struct representing this i_lnode struct makes it
* into the snapshot, then self will point to the offset of
* the di_lnode struct in the snapshot
*/
/*
* used for hashing and comparing i_lnodes
*/
int modid;
/*
* public information describing a link endpoint
*/
/*
* i_link ptr to links coming into this i_lnode node
* (this i_lnode is the target of these i_links)
*/
/*
* i_link ptr to links going out of this i_lnode node
* (this i_lnode is the source of these i_links)
*/
} i_lnode_t;
typedef struct i_hp {
} i_hp_t;
/*
* Soft state associated with each instance of driver open.
*/
static struct di_state {
int lnode_count;
int link_count;
} **di_states;
typedef enum {
/*
* Check that whole device tree is being configured as a pre-condition for
*/
typedef struct phci_walk_arg {
struct di_state *);
struct di_state *);
struct di_state *);
static void di_freemem(struct di_state *);
static int di_setstate(struct di_state *, int);
struct di_state *, int);
extern int modrootloaded;
extern void mdi_walk_vhcis(int (*)(dev_info_t *, void *), void *);
extern void mdi_vhci_walk_phcis(dev_info_t *,
int (*)(dev_info_t *, void *), void *);
di_open, /* open */
di_close, /* close */
nodev, /* strategy */
nodev, /* print */
nodev, /* dump */
nodev, /* read */
nodev, /* write */
di_ioctl, /* ioctl */
nodev, /* devmap */
nodev, /* mmap */
nodev, /* segmap */
nochpoll, /* poll */
ddi_prop_op, /* prop_op */
NULL, /* streamtab */
};
DEVO_REV, /* devo_rev, */
0, /* refcnt */
di_info, /* info */
nulldev, /* identify */
nulldev, /* probe */
di_attach, /* attach */
di_detach, /* detach */
nodev, /* reset */
&di_cb_ops, /* driver operations */
NULL /* bus operations */
};
/*
* Module linkage information for the kernel.
*/
"DEVINFO Driver",
};
&modldrv,
};
int
_init(void)
{
int error;
if (error != 0) {
return (error);
}
return (0);
}
int
{
}
int
_fini(void)
{
int error;
if (error != 0) {
return (error);
}
return (0);
}
/*ARGSUSED*/
static int
{
switch (infocmd) {
case DDI_INFO_DEVT2DEVINFO:
error = DDI_SUCCESS;
break;
case DDI_INFO_DEVT2INSTANCE:
/*
* All dev_t's map to the same, single instance.
*/
*result = (void *)0;
error = DDI_SUCCESS;
break;
default:
break;
}
return (error);
}
static int
{
switch (cmd) {
case DDI_ATTACH:
di_max_opens * sizeof (struct di_state *));
error = DDI_FAILURE;
} else {
error = DDI_SUCCESS;
}
break;
default:
error = DDI_FAILURE;
break;
}
return (error);
}
static int
{
switch (cmd) {
case DDI_DETACH:
error = DDI_SUCCESS;
break;
default:
error = DDI_FAILURE;
break;
}
return (error);
}
/*
* Allow multiple opens by tweaking the dev_t such that it looks like each
* open is getting a different minor device. Each minor gets a separate
* entry in the di_states[] table. Based on the original minor number, we
* discriminate opens of the full and read-only nodes. If all of the instances
* of the selected minor node are currently open, we return EAGAIN.
*/
/*ARGSUSED*/
static int
{
int m;
if (minor_parent != DI_FULL_PARENT &&
return (ENXIO);
continue;
break; /* It's ours. */
}
if (m >= di_max_opens) {
/*
* maximum open instance for device reached
*/
return (EAGAIN);
}
ASSERT(m < di_max_opens);
(void *)curthread, m + DI_NODE_SPECIES));
return (0);
}
/*ARGSUSED*/
static int
{
if (m < 0) {
m + DI_NODE_SPECIES);
return (ENXIO);
}
di_freemem(st);
/*
* empty slot in state table
*/
(void *)curthread, m + DI_NODE_SPECIES));
return (0);
}
/*ARGSUSED*/
static int
{
major_t i;
char *drv_name;
int ndi_flags;
if (m < 0 || m >= di_max_opens) {
return (ENXIO);
}
switch (cmd) {
case DINFOIDENT:
/*
* This is called from di_init to verify that the driver
* opened is indeed devinfo. The purpose is to guard against
* sending ioctl to an unknown driver in case of an
* unresolved major number conflict during bfu.
*/
return (0);
case DINFOLODRV:
/*
* Hold an installed driver and return the result
*/
if (DI_UNPRIVILEGED_NODE(m)) {
/*
* Only the fully enabled instances may issue
* DINFOLDDRV.
*/
return (EACCES);
}
return (EFAULT);
}
/*
* Some 3rd party driver's _init() walks the device tree,
* so we load the driver module before configuring driver.
*/
i = ddi_name_to_major(drv_name);
if (ddi_hold_driver(i) == NULL) {
return (ENXIO);
}
/*
* i_ddi_load_drvconf() below will trigger a reprobe
* via reset_nexus_flags(). NDI_DRV_CONF_REPROBE isn't
* needed here.
*/
(void) i_ddi_load_drvconf(i);
ddi_rele_driver(i);
rv = i_ddi_devs_attached(i);
case DINFOUSRLD:
/*
* The case for copying snapshot to userland
*/
return (EBUSY);
if (map_size == 0) {
return (EFAULT);
}
/*
* copyout the snapshot
*/
/*
* Return the map size, so caller may do a sanity
* check against the return value of snapshot ioctl()
*/
/*
* Copy one chunk at a time
*/
off = 0;
while (map_size) {
}
return (EFAULT);
}
}
di_freemem(st);
return (0);
default:
/*
* Invalid ioctl command
*/
return (ENOTTY);
}
/*
* take a snapshot
*/
/*FALLTHROUGH*/
}
/*
* Obtain enough memory to hold header + rootpath. We prevent kernel
* memory exhaustion by freeing any previously allocated snapshot and
* refusing the operation; otherwise we would be allowing ioctl(),
* ioctl(), ioctl(), ..., panic.
*/
return (EBUSY);
/*
* Initial memlist always holds di_all and the root_path - and
* is at least a page and size.
*/
/*
* Note the endianness in case we need to transport snapshot
* over the network.
*/
#if defined(_LITTLE_ENDIAN)
#else
#endif
/* Copyin ioctl args, store in the snapshot. */
di_freemem(st);
return (EFAULT);
}
di_freemem(st);
return (EINVAL);
}
error = 0;
di_freemem(st);
return (error);
}
/*
* Only the fully enabled version may force load drivers or read
* the parent private data from a driver.
*/
DI_UNPRIVILEGED_NODE(m)) {
di_freemem(st);
return (EACCES);
}
/* Do we need private data? */
#ifdef _MULTI_DATAMODEL
case DDI_MODEL_ILP32: {
/*
* Cannot copy private data from 64-bit kernel
* to 32-bit app
*/
di_freemem(st);
return (EINVAL);
}
case DDI_MODEL_NONE:
di_freemem(st);
return (EFAULT);
}
break;
}
#else /* !_MULTI_DATAMODEL */
di_freemem(st);
return (EFAULT);
}
#endif /* _MULTI_DATAMODEL */
}
/*
* For cache lookups we reallocate memory from scratch,
* so the value of "all" is no longer valid.
*/
} else if (snapshot_is_cacheable(st)) {
} else
if (*rvalp) {
} else {
di_freemem(st);
}
return (0);
}
/*
* Get a chunk of memory >= size, for the snapshot
*/
static void
{
/*
* Round up size to nearest power of 2. If it is less
* than st->mem_size, set it to st->mem_size (i.e.,
* the mem_size is doubled every time) to reduce the
* number of memory allocations.
*/
tmp <<= 1;
}
} else {
/*
* locate end of linked list and add a chunk at the end
*/
}
}
}
/*
* Copy upto bufsiz bytes of the memlist to buf
*/
static void
{
return;
}
copysz = 0;
else
if (bufsiz == 0)
break;
}
}
/*
* Free all memory for the snapshot
*/
static void
{
while (dcp) { /* traverse the linked list */
}
}
}
/*
* Copies cached data to the di_state structure.
* Returns:
* - size of data copied, on SUCCESS
* - 0 on failure
*/
static int
{
if (cache->cache_size == 0) {
return (0);
}
/*
* Verify that di_allocmem() allocates contiguous memory,
* so that it is safe to do straight bcopy()
*/
return (cache->cache_size);
}
/*
* Copies a snapshot from di_state to the cache
* Returns:
* - 0 on failure
* - size of copied data on success
*/
static size_t
{
return (0);
}
/*
* The size of the memory list may be much larger than the
* size of valid data (map_size). Cache only the valid data
*/
return (0);
}
return (map_size);
}
/*
* Make sure there is at least "size" bytes memory left before
* going on. Otherwise, start on a new chunk.
*/
static di_off_t
{
/*
* di_checkmem() shouldn't be called with a size of zero.
* But in case it is, we want to make sure we return a valid
* offset within the memlist and not an offset that points us
* at the end of the memlist.
*/
if (size == 0) {
size = 1;
}
}
/* verify that return value is aligned */
return (off);
}
/*
* Copy the private data format from ioctl arg.
* On success, the ending offset is returned. On error 0 is returned.
*/
static di_off_t
{
/*
* Copyin data and check version.
* We only handle private data version 0.
*/
return (0);
}
/*
* Save di_priv_data copied from userland in snapshot.
*/
/*
* copyin private data format, modify offset accordingly
*/
/*
* check memory
*/
mode) != 0) {
return (0);
}
}
/*
* check memory
*/
mode) != 0) {
return (0);
}
}
return (off);
}
/*
* Return the real address based on the offset (off) within snapshot
*/
static void *
{
}
}
/*
* Ideally we would use the whole key to derive the hash
* value. However, the probability that two keys will
* have the same dip (or pip) is very low, so
* hashing by dip (or pip) pointer should suffice.
*/
static uint_t
{
void *ptr;
case DI_DKEY:
break;
case DI_PKEY:
break;
default:
panic("devinfo: unknown key type");
/*NOTREACHED*/
}
}
static void
{
char *path_addr;
case DI_DKEY:
break;
case DI_PKEY:
if (path_addr)
break;
default:
panic("devinfo: unknown key type");
/*NOTREACHED*/
}
}
static int
{
}
return (0);
}
static int
{
int rv;
if (rv)
return (0);
}
static int
{
panic("devinfo: mismatched keys");
/*NOTREACHED*/
}
case DI_DKEY:
case DI_PKEY:
default:
panic("devinfo: unknown key type");
/*NOTREACHED*/
}
}
static void
{
return;
}
return;
}
}
/*
* This is the main function that takes a snapshot
*/
static di_off_t
{
int plen;
char *path;
int i;
/*
* Translate requested root path if an alias and snap-root != "/"
*/
/* If there is no redirected alias, use root_path as is */
if (rootnode) {
goto got_root;
}
}
/*
* Verify path before entrusting it to e_ddi_hold_devi_by_path because
* some platforms have OBP bugs where executing the NDI_PROMNAME code
* path against an invalid path results in panic. The lookupnameat
* is done relative to rootdir without a leading '/' on "devices/"
* to force the lookup to occur in the global zone.
*/
return (0);
}
/*
* Hold the devinfo node referred by the path.
*/
return (0);
}
"devinfo registered dips (statep=%p)", (void *)st);
"devinfo registered pips (statep=%p)", (void *)st);
}
/*
* copy the device tree
*/
}
}
/*
* copy the devnames array
*/
/* initialize the hash tables */
st->lnode_count = 0;
st->link_count = 0;
}
if (ddi_aliases_present == B_FALSE)
goto done;
for (i = 0; i < ddi_aliases.dali_num_pairs; i++) {
}
done:
/*
* Free up hash tables
*/
/*
* Record the timestamp now that we are done with snapshot.
*
* We compute the checksum later and then only if we cache
* the snapshot, since checksumming adds some overhead.
* The checksum is checked later if we read the cache file.
* from disk.
*
* Set checksum field to 0 as CRC is calculated with that
* field set to 0.
*/
all->cache_checksum = 0;
return (off);
}
/*
*/
static di_off_t
{
/*
* In order to accurately account for the system configuration
* fully configured before the cleanup starts.
* So enable modunload only after the cleanup.
*/
/*
* Remove backing store nodes for unused devices,
* which retain past permissions customizations
* and may be undesired for newly configured devices.
*/
}
return (off);
}
/*
* construct vhci linkage in the snapshot.
*/
static int
{
return (DDI_WALK_TERMINATE);
}
if (all->top_vhci_devinfo == 0) {
} else {
}
}
return (DDI_WALK_CONTINUE);
}
/*
* construct phci linkage for the given vhci in the snapshot.
*/
static int
{
return (DDI_WALK_TERMINATE);
}
if (vh_di_node->top_phci == 0) {
return (DDI_WALK_CONTINUE);
}
}
return (DDI_WALK_CONTINUE);
}
/*
* Assumes all devinfo nodes in device tree have been snapshotted
*/
static void
{
continue;
/*
* Only nodes which were BOUND when they were
* snapshotted will be added to per-driver list.
*/
continue;
}
*off_p = 0;
}
/*
* Copy the devnames array, so we have a list of drivers in the snapshot.
* Also makes it possible to locate the per-driver devinfo nodes.
*/
static di_off_t
{
int i;
/*
* make sure there is some allocated memory
*/
for (i = 0; i < devcnt; i++) {
continue;
}
/*
* dn_name is not freed during driver unload or removal.
*
* There is a race condition when make_devname() changes
* dn_name during our strcpy. This should be rare since
* only add_drv does this. At any rate, we never had a
* problem with ddi_name_to_major(), which should have
* the same problem.
*/
/*
* Snapshot per-driver node list
*/
/*
* This is not used by libdevinfo, leave it for now
*/
/*
* get global properties
*/
devnamesp[i].dn_global_prop_ptr) {
}
/*
* Bit encode driver ops: & bus_ops, cb_ops, & cb_ops->cb_str
*/
if (CB_DRV_INSTALLED(devopsp[i])) {
if (devopsp[i]->devo_cb_ops) {
}
}
}
}
return (off);
}
/*
* Copy the kernel devinfo tree. The tree and the devnames array forms
* the entire snapshot (see also di_copydevnm).
*/
static di_off_t
{
/* force attach drivers */
}
/*
* Push top_devinfo onto a stack
*
* The stack is necessary to avoid recursion, which can overrun
* the kernel stack.
*/
/*
* As long as there is a node on the stack, copy the node.
* di_copynode() is responsible for pushing and popping
* child and sibling nodes on the stack.
*/
while (!EMPTY_STACK(dsp)) {
}
/*
* Free the stack structure
*/
return (off);
}
/*
* This is the core function, which copies all data associated with a single
* node into the snapshot. The amount of information is determined by the
* ioctl command.
*/
static di_off_t
{
struct dev_info *n;
/*
* check memory usage, and fix offsets accordingly.
*/
/*
* Node parameters:
* self -- offset of current node within snapshot
* nodeid -- pointer to PROM node (tri-valued)
* state -- hot plugging device state
* node_state -- devinfo node state
*/
/*
* Get parent's offset in snapshot from the stack
* and store it in the current node
*/
}
/*
* Save the offset of this di_node in a hash table.
* This is used later to resolve references to this
* dip from other parts of the tree (per-driver list,
* multipathing linkages, layered usage linkages).
* The key used for the hash table is derived from
* information in the dip.
*/
#ifdef DEVID_COMPATIBILITY
/* check for devid as property marker */
if (node->devi_devid_str) {
/*
* The devid is now represented as a property. For
* compatibility with di_devid() interface in libdevinfo we
* must return it as a binary structure in the snapshot. When
* (if) di_devid() is removed from libdevinfo then the code
* related to DEVID_COMPATIBILITY can be removed.
*/
DDI_SUCCESS) {
}
}
#endif /* DEVID_COMPATIBILITY */
if (node->devi_node_name) {
}
}
}
if (node->devi_binding_name) {
}
/*
* If the dip is BOUND, set the next pointer of the
* per-instance list to -1, indicating that it is yet to be resolved.
* This will be resolved later in snap_driver_list().
*/
} else {
}
/*
* An optimization to skip mutex_enter when not needed.
*/
goto priv_data;
}
/*
* LOCKING: We already have an active ndi_devi_enter to gather the
* minor data, and we will take devi_lock to gather properties as
* needed off di_getprop.
*/
goto path;
}
}
path:
goto property;
}
}
if (MDI_CLIENT(node)) {
"component type = %d. off=%d",
}
"component type = %d. off=%d",
}
goto hotplug_data;
}
}
}
}
} else {
/*
* Make copy of global property list if this devinfo refers
* global properties different from what's on the devnames
* array. It can happen if there has been a forced
* driver.conf update. See mod_drv(1M).
*/
if (node->devi_global_prop_list !=
}
}
goto priv_data;
}
}
goto pm_info;
}
}
}
pm_info: /* NOT implemented */
/* keep the stack aligned */
return (off);
}
/*
* If there is a visible child--push child onto stack.
* Hold the parent (me) busy while doing so.
*/
/* skip hidden nodes */
while (n && ndi_dev_is_hidden_node((dev_info_t *)n))
n = n->devi_sibling;
if (n) {
}
}
/*
* Done with any child nodes, unroll the stack till a visible
* sibling of a parent node is found or root node is reached.
*/
while (!EMPTY_STACK(dsp)) {
/* skip hidden nodes */
while (n && ndi_dev_is_hidden_node((dev_info_t *)n))
n = n->devi_sibling;
if (n) {
}
}
}
/*
* DONE with all nodes
*/
return (off);
}
static i_lnode_t *
{
return (i_lnode);
}
static void
{
}
static void
{
/* This lnode and its dip must have been snapshotted */
/* at least 1 link (in or out) must exist for this lnode */
}
static i_link_t *
{
return (i_link);
}
static void
{
/* This link must have been snapshotted */
/* Both endpoint lnodes must exist for this link */
}
/*ARGSUSED*/
static uint_t
{
if (dev != DDI_DEV_T_NONE)
if (ptr) {
return ((uint_t)k);
}
}
static int
{
}
return (0);
}
/*
* An lnode represents a {dip, dev_t} tuple. A link represents a
* {src_lnode, tgt_lnode, spec_type} tuple.
* The following callback assumes that LDI framework ref-counts the
* src_dip and tgt_dip while invoking this callback.
*/
static int
{
int res;
/*
* if the source or target of this device usage information doesn't
* correspond to a device node then we don't report it via
* libdevinfo so return.
*/
return (LDI_USAGE_CONTINUE);
/*
* Skip the ldi_usage if either src or tgt dip is not in the
*/
return (LDI_USAGE_CONTINUE);
return (LDI_USAGE_CONTINUE);
/*
* allocate an i_lnode and add it to the lnode hash
* if it is not already present. For this particular
* link the lnode is a source, but it may
* participate as tgt or src in any number of layered
* operations - so it may already be in the hash.
*/
if (res == MH_ERR_NOTFOUND) {
/*
* new i_lnode
* add it to the hash and increment the lnode count
*/
st->lnode_count++;
} else {
/* this i_lnode already exists in the lnode_hash */
}
/*
* allocate a tgt i_lnode and add it to the lnode hash
*/
if (res == MH_ERR_NOTFOUND) {
/*
* new i_lnode
* add it to the hash and increment the lnode count
*/
st->lnode_count++;
} else {
/* this i_lnode already exists in the lnode_hash */
}
/*
* allocate a i_link
*/
/*
* add this link onto the src i_lnodes outbound i_link list
*/
while (*i_link_next != NULL) {
/* this link already exists */
return (LDI_USAGE_CONTINUE);
}
}
*i_link_next = i_link;
/*
* add this link onto the tgt i_lnodes inbound i_link list
*/
while (*i_link_next != NULL) {
}
*i_link_next = i_link;
/*
* add this i_link to the link hash
*/
st->link_count++;
return (LDI_USAGE_CONTINUE);
}
struct i_layer_data {
int lnode_count;
int link_count;
};
/*ARGSUSED*/
static uint_t
{
data->link_count++;
/* fill in fields for the di_link snapshot */
/*
* The src_lnode and tgt_lnode i_lnode_t for this i_link_t
* are created during the LDI table walk. Since we are
* walking the link hash, the lnode hash has already been
* walked and the lnodes have been snapshotted. Save lnode
* offsets.
*/
/*
* Save this link's offset in the src_lnode snapshot's link_out
* field
*/
/*
* Put this link on the tgt_lnode's link_in field
*/
/*
* An i_lnode_t is only created if the corresponding dip exists
* in the snapshot. A pointer to the di_node is saved in the
* i_lnode_t when it is allocated. For this link, get the di_node
* for the source lnode. Then put the link on the di_node's list
* of src links
*/
/*
* Put this link on the tgt_links list of the target
* dip.
*/
return (MH_WALK_CONTINUE);
}
/*ARGSUSED*/
static uint_t
{
data->lnode_count++;
/* fill in fields for the di_lnode snapshot */
} else {
}
/*
* The dip corresponding to this lnode must exist in
* the snapshot or we wouldn't have created the i_lnode_t
* during LDI walk. Save the offset of the dip.
*/
/*
* There must be at least one link in or out of this lnode
* or we wouldn't have created it. These fields will be set
* during the link hash walk.
*/
/*
* set the offset of the devinfo node associated with this
* lnode. Also update the node_next next pointer. this pointer
* is set if there are multiple lnodes associated with the same
* devinfo node. (could occure when multiple minor nodes
* are open for one device, etc.)
*/
return (MH_WALK_CONTINUE);
}
static di_off_t
{
/* get driver layering information */
/* check if there is any link data to include in the snapshot */
if (st->lnode_count == 0) {
goto out;
}
/* get a pointer to snapshot memory for all the di_lnodes */
/* get a pointer to snapshot memory for all the di_links */
/*
* We have lnodes and links that will go into the
* snapshot, so let's walk the respective hashes
* and snapshot them. The various linkages are
* also set up during the walk.
*/
out:
/* free up the i_lnodes and i_links used to create the snapshot */
st->lnode_count = 0;
st->link_count = 0;
return (off);
}
/*
* Copy all minor data nodes attached to a devinfo node into the snapshot.
* It is called from di_copynode with active ndi_devi_enter to protect
* the list of minor nodes.
*/
static di_off_t
{
/*
* check memory first
*/
do {
/*
* both ILP32 and LP64 model
*/
}
if (mnode->ddm_node_type) {
}
} while (mnode);
return (off);
}
/*
* di_register_dip(), di_find_dip(): The dip must be protected
* from deallocation when using these routines - this can either
* be a reference count, a busy hold or a per-driver lock.
*/
static void
{
"duplicate devinfo (%p) registered during device "
"tree walk", (void *)dip);
}
}
static int
{
/*
* uintptr_t must be used because it matches the size of void *;
* mod_hash expects clients to place results into pointer-size
* containers; since di_off_t is always a 32-bit offset, alignment
* would otherwise be broken on 64-bit kernels.
*/
(mod_hash_val_t *)&offset) == 0) {
return (0);
} else {
return (-1);
}
}
/*
* di_register_pip(), di_find_pip(): The pip must be protected from deallocation
* when using these routines. The caller must do this by protecting the
* client(or phci)<->pip linkage while traversing the list and then holding the
* pip when it is found in the list.
*/
static void
{
char *path_addr;
if (path_addr)
"duplicate pathinfo (%p) registered during device "
"tree walk", (void *)pip);
}
}
/*
* As with di_register_pip, the caller must hold or lock the pip
*/
static int
{
/*
* uintptr_t must be used because it matches the size of void *;
* mod_hash expects clients to place results into pointer-size
* containers; since di_off_t is always a 32-bit offset, alignment
* would otherwise be broken on 64-bit kernels.
*/
*off_p = 0;
return (0);
}
(mod_hash_val_t *)&offset) == 0) {
return (0);
} else {
return (-1);
}
}
static di_path_state_t
{
switch (st) {
return (DI_PATH_STATE_ONLINE);
return (DI_PATH_STATE_STANDBY);
return (DI_PATH_STATE_OFFLINE);
case MDI_PATHINFO_STATE_FAULT:
return (DI_PATH_STATE_FAULT);
default:
return (DI_PATH_STATE_UNKNOWN);
}
}
static uint_t
{
/* MDI_PATHINFO_FLAGS_HIDDEN nodes not in snapshot */
return (di_path_flags);
}
static di_off_t
{
int off;
char *str;
*off_p = 0;
return (off);
}
off += sizeof (struct di_path_prop);
/*
* property name
*/
switch (nvpair_type(prop)) {
case DATA_TYPE_BYTE:
case DATA_TYPE_INT16:
case DATA_TYPE_UINT16:
case DATA_TYPE_INT32:
case DATA_TYPE_UINT32:
(void) nvpair_value_int32(prop,
break;
case DATA_TYPE_INT64:
case DATA_TYPE_UINT64:
(void) nvpair_value_int64(prop,
break;
case DATA_TYPE_STRING:
break;
case DATA_TYPE_BYTE_ARRAY:
case DATA_TYPE_INT16_ARRAY:
case DATA_TYPE_UINT16_ARRAY:
case DATA_TYPE_INT32_ARRAY:
case DATA_TYPE_UINT32_ARRAY:
case DATA_TYPE_INT64_ARRAY:
case DATA_TYPE_UINT64_ARRAY:
if (nelems != 0) {
}
break;
default: /* Unknown or unhandled type; skip it */
size = 0;
break;
}
if (size > 0) {
}
}
return (off);
}
static void
int get_client)
{
if (get_client) {
me->path_snap_state &=
} else {
me->path_snap_state &=
}
}
/*
* off_p: pointer to the linkage field. This links pips along the client|phci
* linkage list.
* noff : Offset for the endpoint dip snapshot.
*/
static di_off_t
{
/*
* The naming of the following mdi_xyz() is unfortunately
* non-intuitive. mdi_get_next_phci_path() follows the
* client_link i.e. the list of pip's belonging to the
* given client dip.
*/
if (get_client)
else
/* We don't represent hidden paths in the snapshot */
if (mdi_pi_ishidden(pip)) {
continue;
}
/*
* We've already seen this pathinfo node so we need to
* take care not to snap it again; However, one endpoint
* and linkage will be set here. The other endpoint
* and linkage has already been set when the pip was
* first snapshotted i.e. when the other endpoint dip
* was snapshotted.
*/
*off_p = stored_offset;
/*
* The other endpoint and linkage were set when this
* pip was snapshotted. So we are done with both
* endpoints and linkages.
*/
continue;
}
/*
* Now that we need to snapshot this pip, check memory
*/
me->path_snap_state |=
/*
* Zero out fields as di_checkmem() doesn't guarantee
* zero-filled memory
*/
/*
* Note the existence of this pathinfo
*/
/*
* Get intermediate addressing info.
*/
/*
* Get path properties if props are to be included in the
* snapshot
*/
} else {
}
}
*off_p = 0;
return (off);
}
/*
* Return driver prop_op entry point for the specified devinfo node.
*
* To return a non-NULL value:
* - driver must be attached and held:
* If driver is not attached we ignore the driver property list.
* No one should rely on such properties.
* - driver "cb_prop_op != ddi_prop_op":
* If "cb_prop_op == ddi_prop_op", framework does not need to call driver.
* XXX or parent's bus_prop_op != ddi_bus_prop_op
*/
static int
{
/* If driver is not attached we ignore the driver property list. */
return (NULL);
/*
* Some nexus drivers incorrectly set cb_prop_op to nodev, nulldev,
* or even NULL.
*/
return (NULL);
}
static di_off_t
int (*prop_op)(),
{
int need_free = 0;
int pflags;
int rv;
int len;
/* If we have prop_op function, ask driver for latest value */
if (prop_op) {
/* Must search DDI_DEV_T_NONE with DDI_DEV_T_ANY */
/*
* We have type information in flags, but are invoking an
* old non-typed prop_op(9E) interface. Since not all types are
* part of DDI_PROP_TYPE_ANY (example is DDI_PROP_TYPE_INT64),
* we set DDI_PROP_CONSUMER_TYPED - causing the framework to
* expand type bits beyond DDI_PROP_TYPE_ANY. This allows us
* to use the legacy prop_op(9E) interface to obtain updates
* non-DDI_PROP_TYPE_ANY dynamic properties.
*/
/*
* Hold and exit across prop_op(9E) to avoid lock order
* issues between
* [ndi_devi_enter() ..prop_op(9E).. driver-lock]
* .vs.
* [..ioctl(9E).. driver-lock ..ddi_remove_minor_node(9F)..
* ndi_devi_enter()]
* ordering.
*/
if (rv == DDI_PROP_SUCCESS) {
} else if (dyn) {
/*
* A dynamic property must succeed prop_op(9E) to show
* up in the snapshot - that is the only source of its
* value.
*/
return (off); /* dynamic prop not supported */
} else {
/*
* In case calling the driver caused an update off
* prop_op(9E) of a non-dynamic property (code leading
* to ddi_prop_change), we defer picking up val and
* len informatiojn until after prop_op(9E) to ensure
* that we snapshot the latest value.
*/
}
} else {
}
/* property name */
if (name) {
} else {
}
} else if (len != 0) {
}
if (need_free) /* free PROP_LEN_AND_VAL_ALLOC alloc */
return (off);
}
/*
* Copy a list of properties attached to a devinfo node. Called from
* di_copynode with active ndi_devi_enter. The major number is passed in case
* we need to call driver's prop_op entry. The value of list indicates
* which list we are copying. Possible values are:
* DI_PROP_DRV_LIST, DI_PROP_SYS_LIST, DI_PROP_GLB_LIST, DI_PROP_HW_LIST
*/
static di_off_t
{
int (*prop_op)();
int off;
struct plist {
char *pl_name;
int pl_flags;
int pl_len;
*off_p = 0;
/* get pointer to driver's prop_op(9E) implementation if DRV_LIST */
/*
* Form private list of properties, holding devi_lock for properties
* that hang off the dip.
*/
if (dip)
else
} else {
}
}
if (dip)
/*
* Now that we have dropped devi_lock, perform a second-pass to
* add properties to the snapshot. We do this as a second pass
* because we may need to call prop_op(9E) and we can't hold
* devi_lock across that call.
*/
}
/*
* If there is no prop_op or dynamic property support has been
* disabled, we are done.
*/
*off_p = 0;
return (off);
}
/* Add dynamic driver properties to snapshot */
if (dp->dp_spec_type) {
/* if spec_type, property of matching minor */
continue;
}
} else {
/* property of devinfo node */
}
}
/* Add dynamic parent properties to snapshot */
if (dp->dp_spec_type) {
/* if spec_type, property of matching minor */
continue;
}
} else {
/* property of devinfo node */
}
}
*off_p = 0;
return (off);
}
/*
* find private data format attached to a dip
* parent = 1 to match driver name of parent dip (for parent private data)
* 0 to match driver name of current dip (for driver private data)
*/
#define DI_MATCH_DRIVER 0
struct di_priv_format *
{
char *drv_name;
if (match == DI_MATCH_PARENT) {
}
return (NULL);
}
return (NULL);
}
/*
* Match the driver name.
*/
return (NULL);
}
/* Now get the di_priv_format array */
if (match == DI_MATCH_PARENT) {
} else {
}
for (i = 0; i < count; i++) {
char *tmp;
return (&form[i]);
}
/*
* Move to next driver name, skipping a white space
*/
tmp++;
}
}
}
return (NULL);
}
/*
* The following functions copy data as specified by the format passed in.
* To prevent invalid format from panicing the system, we call on_fault().
* A return value of 0 indicates an error. Otherwise, the total offset
* is returned.
*/
static di_off_t
{
void *ptr;
char *path;
/*
* check memory availability. Private data size is
* limited to DI_MAX_PRIVDATA.
*/
goto failure;
}
/* copy the struct */
/* dereferencing pointers */
for (i = 0; i < MAX_PTR_IN_PRV; i++) {
goto success; /* no more ptrs */
}
/*
* first, get the pointer content
*/
goto failure; /* wrong offset */
/* save a tmp ptr to store off_t later */
/* get pointer value, if NULL continue */
continue;
}
/*
* next, find the repeat count (array dimension)
*/
/*
* Positive value indicates a fixed sized array.
* 0 or negative value indicates variable sized array.
*
* For variable sized array, the variable must be
* an int member of the structure, with an offset
* equal to the absolution value of struct member.
*/
goto failure; /* wrong offset */
}
if (repeat >= 0) {
repeat = *((int *)
} else {
}
/*
* next, get the size of the object to be copied
*/
/*
* Arbitrarily limit the total size of object to be
* copied (1 byte to 1/4 page).
*/
goto failure; /* wrong size or too big */
}
/*
* Now copy the data
*/
}
} else {
goto failure;
}
/*
* success if reached here
*/
no_fault();
/*NOTREACHED*/
/*
* fault occurred
*/
no_fault();
return (off);
}
/*
* get parent private data; on error, returns original offset
*/
static di_off_t
{
int off;
/* find the parent data format */
*off_p = 0; /* set parent data to none */
return (off);
}
}
/*
* get parent private data; returns original offset
*/
static di_off_t
{
int off;
/* find the parent data format */
*off_p = 0; /* set driver data to none */
return (off);
}
}
/*
* Copy hotplug data associated with a devinfo node into the snapshot.
*/
static di_off_t
{
/*
* check memory first
*/
do {
(void) ddihp_cn_getstate(hp_hdl);
me->hp_type_str = 0;
/*
* Child links are resolved later by di_hotplug_children().
* Store a reference to this di_hp_t in the list used later
* by di_hotplug_children().
*/
/* Add name of this di_hp_t to the snapshot */
}
/* Add type description of this di_hp_t to the snapshot */
}
/*
* Set link to next in the chain of di_hp_t nodes,
* or terminate the chain when processing the last node.
*/
} else {
}
/* Update pointer to next in the chain */
} while (hp_hdl);
return (off);
}
/*
* The driver is stateful across DINFOCPYALL and DINFOUSRLD.
* This function encapsulates the state machine:
*
* -> IOC_IDLE -> IOC_SNAP -> IOC_DONE -> IOC_COPY ->
* | SNAPSHOT USRLD |
* --------------------------------------------------
*
* Returns 0 on success and -1 on failure
*/
static int
{
int ret = 0;
switch (new_state) {
case IOC_IDLE:
case IOC_DONE:
break;
case IOC_SNAP:
ret = -1;
break;
case IOC_COPY:
ret = -1;
break;
default:
ret = -1;
}
if (ret == 0)
else
return (ret);
}
/*
* We cannot assume the presence of the entire
* snapshot in this routine. All we are guaranteed
* is the di_all struct + 1 byte (for root_path)
*/
static int
{
/*
* Refuse to read old versions
*/
return (0);
}
return (0);
}
if (all->snapshot_time == 0) {
return (0);
}
if (all->top_devinfo == 0) {
return (0);
}
return (0);
}
return (0);
}
/*
* We can't check checksum here as we just have the header
*/
return (1);
}
static int
{
int error = 0;
while (len) {
resid = 0;
break;
}
/*
* Check if we are making progress
*/
break;
}
}
return (error);
}
static void
{
int oflags;
int error;
char *buf;
if (cache->cache_size == 0) {
return;
}
return;
}
if (!header_plus_one_ok(all)) {
return;
}
/*
* The cache_size is the total allocated memory for the cache.
* The map_size is the actual size of valid data in the cache.
* map_size may be smaller than cache_size but cannot exceed
* cache_size.
*/
return;
}
/*
* First unlink the temp file
*/
DI_CACHE_TEMP, error));
}
return;
}
DI_CACHE_TEMP, error));
return;
}
/*
* Paranoid: Check if the file is on a read-only FS
*/
if (vn_is_readonly(vp)) {
goto fail;
}
/*
* Note that we only write map_size bytes to disk - this saves
* space as the actual cache size may be larger than size of
* valid data in the cache.
* Another advantage is that it makes verification of size
* easier when the file is read later.
*/
off = 0;
while (map_size) {
/*
* Write in chunks so that VM system
* is not overwhelmed
*/
else
if (error) {
goto fail;
}
/* If low on memory, give pageout a chance to run */
delay(1);
}
/*
* Now sync the file and close it
*/
}
return;
}
/*
* Now do the rename
*/
return;
}
return;
fail:
}
/*
* Since we could be called early in boot,
* use kobj_read_file()
*/
static void
{
int n;
DI_CACHE_FILE, ENOENT));
return;
}
/*
* Read in the header+root_path first. The root_path must be "/"
*/
return;
}
off = 0;
while (sz) {
/* Don't overload VM with large reads */
if (n != chunk) {
DI_CACHE_FILE, off));
goto fail;
}
}
/*
* Read past expected EOF to verify size.
*/
goto fail;
}
if (!header_plus_one_ok(all)) {
goto fail;
}
/*
* Compute CRC with checksum field in the cache data set to 0
*/
all->cache_checksum = 0;
"%s: checksum error: expected=0x%x actual=0x%x",
goto fail;
}
goto fail;
}
return;
fail:
di_cache.cache_size = 0;
}
/*
* Checks if arguments are valid for using the cache.
*/
static int
{
if (!modrootloaded || !i_ddi_io_initialized()) {
"cache lookup failure: I/O subsystem not inited"));
*error = ENOTACTIVE;
return (0);
}
/*
* No other flags allowed with DINFOCACHE
*/
"cache lookup failure: bad flags: 0x%x",
return (0);
}
"cache lookup failure: bad root: %s",
return (0);
}
*error = 0;
return (1);
}
static int
{
"not cacheable: incompatible flags: 0x%x",
return (0);
}
"not cacheable: incompatible root path: %s",
return (0);
}
return (1);
}
static int
{
int cache_valid;
/*
* The following assignment determines the validity
* of the cache as far as this snapshot is concerned.
*/
/* check for read or file error */
cache_valid = 0;
}
if (cache_valid) {
/*
* Ok, the cache was valid as of this particular
* snapshot. Copy the cached snapshot. This is safe
* to do as the cache cannot be freed (we hold the
* cache lock). Free the memory allocated in di_state
* up until this point - we will simply copy everything
* in the cache.
*/
di_freemem(st);
rval = 0;
/*
* map_size is size of valid data in the
* cached snapshot and may be less than
* size of the cache.
*/
}
} else {
/*
* The cache isn't valid, we need to take a snapshot.
* Set the command flags appropriately
*/
}
/*
* For cached snapshots, the devinfo driver always returns
* a snapshot rooted at "/".
*/
return ((int)rval);
}
/*
* This is a forced update of the cache - the previous state of the cache
* may be:
* - unpopulated
* - populated and invalid
* - populated and valid
*/
static int
{
int rval;
/*
* Free the in-core cache and the on-disk file (if they exist)
*/
/*
* Set valid flag before taking the snapshot,
* so that any invalidations that arrive
* during or after the snapshot are not
* removed by us.
*/
if (rval == 0) {
return (0);
}
return (0);
}
/*
* Now that we have cached the snapshot, compute its checksum.
* The checksum is only computed over the valid data in the
* cache, not the entire cache.
* Also, set all the fields (except checksum) before computing
* checksum.
*/
return (rval);
}
static void
{
if (di_cache_debug <= DI_QUIET)
return;
if (di_cache_debug < msglevel)
return;
switch (msglevel) {
case DI_ERR:
break;
case DI_INFO:
case DI_TRACE:
default:
break;
}
}
static void
{
}
}
}