rcm_subr.c revision 4bc0a2ef2b7ba50a7a717e7ddbf31472ad28e358
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*
* Copyright 2005 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
#include "rcm_impl.h"
#include "rcm_module.h"
/*
* Short-circuits unloading of modules with no registrations, so that
* they are present during the next db_sync cycle.
*/
#define MOD_REFCNT_INIT 2
int need_cleanup; /* flag indicating if clean up is needed */
/*
* Misc help routines
*/
static void rcmd_db_print();
static void rcm_handle_free(rcm_handle_t *);
static void rsrc_clients_free(client_t *);
static struct rcm_mod_ops *modops_from_v1(void *);
static int node_action(rsrc_node_t *, void *);
extern void start_polling_thread();
/*
* translate /dev name to a /devices path
*
* N.B. This routine can be enhanced to understand network names
* and friendly names in the future.
*/
char *
resolve_name(char *alias)
{
char *tmp;
const char *dev = "/dev/";
return (NULL);
/*
* Treat /dev/... as a symbolic link
*/
return (tmp);
} else {
}
/* Fail to resolve /dev/ name, use the name as is */
}
}
/*
* Figure out resource type based on "resolved" name
*
* N.B. This routine does not figure out file system mount points.
* This is determined at runtime when filesys module register
* with RCM_FILESYS flag.
*/
int
rsrc_get_type(const char *resolved_name)
{
if (resolved_name[0] != '/')
return (RSRC_TYPE_ABSTRACT);
return (RSRC_TYPE_DEVICE);
return (RSRC_TYPE_NORMAL);
}
/*
* Module operations:
* module_load, module_unload, module_info, module_attach, module_detach,
* cli_module_hold, cli_module_rele
*/
#ifdef ENABLE_MODULE_DETACH
/*
* call unregister() entry point to allow module to unregister for
* resources without getting confused.
*/
static void
{
}
#endif /* ENABLE_MODULE_DETACH */
/*
* call register() entry point to allow module to register for resources
*/
static void
{
}
}
struct rcm_mod_ops *
{
/* rcm module */
else
/* rcm script */
return (script_init(module));
}
/*
* call rmc_mod_info() entry of module
*/
static const char *
{
/* rcm module */
else
/* rcm script */
return (script_info(module));
}
int
{
/* rcm module */
else
/* rcm script */
return (script_fini(module));
}
/*
* call rmc_mod_fini() entry of module, dlclose module, and free memory
*/
static void
{
(void) module_fini(module);
switch (version) {
case RCM_MOD_OPS_V1:
/*
* Free memory associated with converted ops vector
*/
break;
case RCM_MOD_OPS_VERSION:
default:
break;
}
}
/*
* Locate the module, execute rcm_mod_init() and check ops vector version
*/
static module_t *
module_load(char *modname)
{
/*
* dlopen the module
*/
if (rcm_is_script(modname) == 0) {
/* rcm module */
goto fail;
}
/*
* dlsym rcm_mod_init/fini/info() entry points
*/
"rcm_mod_info");
goto fail;
}
} else {
/* rcm script */
}
goto fail;
}
/*
* Check ops vector version
*/
case RCM_MOD_OPS_V1:
break;
case RCM_MOD_OPS_VERSION:
break;
default:
gettext("module %s rejected: version %d not supported\n"),
(void) module_fini(module);
goto fail;
}
/*
* Make sure all fields are set
*/
gettext("module %s rejected: has NULL ops fields\n"),
modname);
(void) module_fini(module);
goto fail;
}
return (module);
fail:
return (NULL);
}
/*
* add one to module hold count. load the module if not loaded
*/
static module_t *
cli_module_hold(char *modname)
{
(void) mutex_lock(&mod_lock);
while (module) {
break;
}
}
if (module) {
(void) mutex_unlock(&mod_lock);
return (module);
}
/*
* Module not found, attempt to load it
*/
(void) mutex_unlock(&mod_lock);
return (NULL);
}
/*
* Hold module and link module into module list
*/
(void) mutex_unlock(&mod_lock);
return (module);
}
/*
* decrement module hold count. Unload it if no reference
*/
static void
{
(void) mutex_lock(&mod_lock);
(void) mutex_unlock(&mod_lock);
return;
}
/*
* Unlink the module from list
*/
}
gettext("Unexpected error: module %s not found.\n"),
} else {
}
(void) mutex_unlock(&mod_lock);
}
/*
* Gather usage info be passed back to requester. Discard info if user does
* not care (list == NULL).
*/
void
{
return;
}
}
/*LINTED*/
}
/*
* Daemon calls to add_busy_rsrc_to_list may pass in
*/
if (errstr) {
errstr);
(char *)errstr)) {
}
}
if (infostr) {
(char *)infostr)) {
}
}
if (modname) {
modname)) {
}
}
if (client_props) {
NV_ENCODE_NATIVE, 0)) {
}
}
}
/* link info at end of list */
if (*list) {
} else {
}
}
/*
* Resource client realted operations:
* rsrc_client_alloc, rsrc_client_find, rsrc_client_add,
* rsrc_client_remove, rsrc_client_action, rsrc_client_action_list
*/
/* Allocate rsrc_client_t structure. Load module if necessary. */
/*ARGSUSED*/
static client_t *
{
return (NULL);
}
/* This queue is protected by rcm_req_lock */
return (client);
}
/* Find client in list matching modname and pid */
client_t *
{
while (client) {
break;
}
}
return (client);
}
/* Add a client to client list */
static void
{
}
/* Remove client from list and destroy it */
static void
{
while (tmp) {
continue;
}
if (prev) {
} else {
}
return;
}
}
/* Free a list of clients. Called from cleanup thread only */
static void
{
while (client) {
/*
* Note that the rcm daemon is single threaded while
* executing this routine. So there is no need to acquire
* rcm_req_lock here while dequeuing.
*/
}
}
}
}
/*
* Invoke a callback into a single client
* This is the core of rcm_mod_ops interface
*/
static int
{
int rval = RCM_SUCCESS;
char *dummy_error = NULL;
/*
* Create a per-operation handle, increment seq_num by 1 so we will
* know if a module uses this handle to callback into rcm_daemon.
*/
/*
* Filter out operations for which the client didn't register.
*/
switch (cmd) {
case CMD_SUSPEND:
case CMD_RESUME:
case CMD_OFFLINE:
case CMD_ONLINE:
case CMD_REMOVE:
return (RCM_SUCCESS);
}
break;
case CMD_REQUEST_CHANGE:
case CMD_NOTIFY_CHANGE:
return (RCM_SUCCESS);
}
break;
case CMD_EVENT:
return (RCM_SUCCESS);
}
break;
}
/*
* Create nvlist_t for any client-specific properties.
*/
"client action failed (nvlist_alloc=%s)\n",
}
/*
* Process the operation via a callback to the client module.
*/
switch (cmd) {
case CMD_GETINFO:
break;
case CMD_SUSPEND:
break;
}
} else {
}
/*
* Update the client's state before the operation.
* If this is a cancelled query, then updating the state is
* the only thing that needs to be done, so break afterwards.
*/
} else {
break;
}
&depend_info);
/* Update the client's state after the operation. */
if (rval == RCM_SUCCESS) {
} else {
}
} else {
if (rval == RCM_SUCCESS) {
} else {
}
}
break;
case CMD_RESUME:
break;
}
/* online state is unconditional */
break;
case CMD_OFFLINE:
break;
}
} else {
}
/*
* Update the client's state before the operation.
* If this is a cancelled query, then updating the state is
* the only thing that needs to be done, so break afterwards.
*/
} else {
break;
}
/* Update the client's state after the operation. */
if (rval == RCM_SUCCESS) {
} else {
}
} else {
if (rval == RCM_SUCCESS) {
} else {
}
}
break;
case CMD_ONLINE:
break;
}
break;
case CMD_REMOVE:
break;
case CMD_REQUEST_CHANGE:
&error, &depend_info);
break;
case CMD_NOTIFY_CHANGE:
&error, &depend_info);
break;
case CMD_EVENT:
if (ops->rcmop_notify_event)
&depend_info);
break;
default:
cmd);
rval = RCM_FAILURE;
break;
}
/* reset error code to the most significant error */
if (rval != RCM_SUCCESS)
/*
* XXX - The code below may produce duplicate rcm_info_t's on error?
*/
if ((cmd != CMD_GETINFO) &&
((rval != RCM_SUCCESS) ||
if (dummy_error)
(void) free(dummy_error);
} else if (cmd != CMD_GETINFO) {
client_props = NULL;
}
if (client_props) {
}
if (info)
if (error)
if (depend_info) {
} else {
}
}
return (rval);
}
/*
* invoke a callback into a list of clients, return 0 if all success
*/
int
{
while (list) {
continue;
if (error != RCM_SUCCESS) {
}
}
return (rval);
}
/*
* Node realted operations:
*
* rn_alloc, rn_free, rn_find_child,
* rn_get_child, rn_get_sibling,
* rsrc_node_find, rsrc_node_add_user, rsrc_node_remove_user,
*/
/* Allocate node based on a logical or physical name */
static rsrc_node_t *
{
return (node);
}
/*
* Free node along with its siblings and children
*/
static void
{
return;
}
}
}
}
/*
* Find next sibling
*/
static rsrc_node_t *
{
}
/*
* Find first child
*/
static rsrc_node_t *
{
}
/*
* Find child named childname. Create it if flag is RSRC_NODE_CRTEATE
*/
static rsrc_node_t *
{
"rn_find_child(parent=%s, child=%s, 0x%x, %d)\n",
/*
* Children are ordered based on strcmp.
*/
}
return (child);
}
if (flag != RSRC_NODE_CREATE)
return (NULL);
/*
* Set this linkage last so we don't break ongoing operations.
*
* N.B. Assume setting a pointer is an atomic operation.
*/
} else {
}
return (new);
}
/*
* Pathname related help functions
*/
static void
{
char *tmp;
if (type != RSRC_TYPE_DEVICE)
return;
/*
* For devices, convert ':' to '/' (treat minor nodes and children)
*/
return;
*tmp = '/';
}
static char *
{
char *slash;
return (NULL);
/* skip slashes' */
while (*pathname == '/')
++pathname;
if (*pathname == '\0')
return (NULL);
*slash = '\0';
} else {
}
return (pathname);
}
/*
* Find a node in tree based on device, which is the physical pathname
* of the form /sbus@.../esp@.../sd@...
*/
int
{
int type;
/*
* For RSRC_TYPE_ABSTRACT, look under /ABSTRACT. For other types,
* look under /SYSTEM.
*/
return (EINVAL);
switch (type) {
case RSRC_TYPE_DEVICE:
case RSRC_TYPE_NORMAL:
break;
case RSRC_TYPE_ABSTRACT:
break;
default:
/* just to make sure */
return (EINVAL);
}
/*
* Find position of device within tree. Upon exiting the loop, device
* should be placed between prev and curr.
*/
return (RCM_SUCCESS);
}
}
return (RCM_SUCCESS);
}
/*
* add a usage client to a node
*/
/*ARGSUSED*/
int
{
"rsrc_node_add_user(%s, %s, %s, %ld, 0x%x)\n",
/*
* If a client_t already exists, add the registration and return
* success if it's a valid registration request.
*
* Return EALREADY if the resource is already registered.
* This means either the client_t already has the requested
* registration flagged, or that a DR registration was attempted
* on a resource already in use in the DR operations state model.
*/
return (EALREADY);
}
if ((flag & RCM_REGISTER_DR) &&
return (EALREADY);
}
if ((flag & RCM_REGISTER_DR) ||
}
return (RCM_SUCCESS);
}
/*
* Otherwise create a new client_t and create a new registration.
*/
}
if (flag & RCM_FILESYS)
return (RCM_SUCCESS);
}
/*
* remove a usage client of a node
*/
int
{
"client not registered: module=%s, pid=%d, dev=%s\n"),
return (ENOENT);
}
/* Strip off the registration being removed (DR, event, capacity) */
/*
* Mark the client as removed if all registrations have been removed
*/
return (RCM_SUCCESS);
}
/*
* Tree walking function - rsrc_walk
*/
#define MAX_TREE_DEPTH 32
#define RN_WALK_CONTINUE 0
#define RN_WALK_PRUNESIB 1
#define RN_WALK_PRUNECHILD 2
#define RN_WALK_TERMINATE 3
struct rn_stack {
char prunesib[MAX_TREE_DEPTH];
char prunechild[MAX_TREE_DEPTH];
int depth;
};
/* walking one node and update node stack */
/*ARGSUSED*/
static void
int (*node_callback)(rsrc_node_t *, void *))
{
int prunesib;
case RN_WALK_TERMINATE:
while (!EMPTY_STACK(sp)) {
}
return;
case RN_WALK_PRUNESIB:
break;
case RN_WALK_PRUNECHILD:
break;
case RN_WALK_CONTINUE:
default:
break;
}
/*
* Push child on the stack
*/
return;
}
/*
* Pop the stack till a node's sibling can be pushed
*/
while (!EMPTY_STACK(sp) &&
}
if (EMPTY_STACK(sp)) {
return;
}
/*
* push sibling onto the stack
*/
}
/*
* walk tree rooted at root in child-first order
*/
static void
int (*node_callback)(rsrc_node_t *, void *))
{
/*
* Push root on stack and walk in child-first order
*/
while (!EMPTY_STACK(&stack)) {
}
}
/*
* Callback for a command action on a node
*/
static int
{
/*
* If flag indicates operation on a filesystem, we don't callback on
* the filesystem root to avoid infinite recursion on filesystem module.
*
* N.B. Such request should only come from filesystem RCM module.
*/
if (flag & RCM_FILESYS) {
return (RN_WALK_CONTINUE);
}
/*
* Execute state change callback
*/
/*
* Upon hitting a filesys root, prune children.
* The filesys module should have taken care of
* children by now.
*/
return (RN_WALK_PRUNECHILD);
return (RN_WALK_CONTINUE);
}
/*
* Execute a command on a subtree under root.
*/
int
{
}
/*
* Get info on current regsitrations
*/
int
{
int initial_req;
int rv;
int i;
if (flag & RCM_INCLUDE_DEPENDENT) {
/*
* if redundant request, skip the operation
*/
continue;
}
}
continue;
}
/*
* Based on RCM_INCLUDE_SUBTREE flag, query either the subtree
* or just the node.
*/
if (flag & RCM_INCLUDE_SUBTREE) {
} else {
}
}
out:
return (rv);
}
/*
* Get the list of currently loaded module
*/
{
(void) mutex_lock(&mod_lock);
mod = module_head;
while (mod) {
}
(void) mutex_unlock(&mod_lock);
return (info);
}
/*
* Initialize resource map - load all modules
*/
void
{
char *tmp;
int i;
char *dir_name;
int rcm_script;
if (script_main_init() == -1)
continue; /* try next directory */
}
continue;
if (rcm_script == 0) {
/* rcm module */
RCM_MODULE_SUFFIX)) == NULL) ||
continue;
}
}
if (rcm_script == 0)
gettext("%s: failed to load\n"),
continue;
}
/*
* ask module to register for resource 1st time
*/
}
}
}
}
/*
* sync resource map - ask all modules to register again
*/
void
{
return;
(void) mutex_lock(&mod_lock);
mod = module_head;
while (mod) {
/*
* Hold module by incrementing ref count and release
* mod_lock to avoid deadlock, since rcmop_register()
* may callback into the daemon and request mod_lock.
*/
(void) mutex_unlock(&mod_lock);
(void) mutex_lock(&mod_lock);
}
(void) mutex_unlock(&mod_lock);
}
/*
* Determine if a process is alive
*/
int
{
char path[64];
const char *procfs = "/proc";
return (1);
}
}
/*
* Cleaup client list
*
* N.B. This routine runs in a single-threaded environment only. It is only
* called by the cleanup thread, which never runs in parallel with other
* threads.
*/
static void
{
/*
* Cleanup notification clients for which pid no longer exists
*/
while (client) {
continue;
}
/*
* Destroy this client_t. rsrc_client_remove updates
* listp to point to the next client.
*/
}
}
/*ARGSUSED*/
static int
{
return (RN_WALK_CONTINUE);
}
static void
{
"clean_rsrc_tree(): delete stale dr clients\n");
}
static void
db_clean()
{
extern void clean_dr_list();
for (;;) {
(void) mutex_lock(&rcm_req_lock);
(void) mutex_unlock(&rcm_req_lock);
while (need_cleanup == 0)
/*
* Make sure all other threads are either blocked or exited.
*/
need_cleanup = 0;
/*
* clean dr_req_list
*/
/*
* clean resource tree
*/
}
}
void
{
"rcm_db_clean(): launch thread to clean database\n");
gettext("failed to create cleanup thread %s\n"),
}
}
/*ARGSUSED*/
static int
{
return (RN_WALK_CONTINUE);
}
while (user) {
}
return (RN_WALK_CONTINUE);
}
static void
{
(void) mutex_lock(&mod_lock);
mod = module_head;
while (mod) {
}
(void) mutex_unlock(&mod_lock);
}
/*
* Allocate handle from calling into each RCM module
*/
static rcm_handle_t *
{
return (hdl);
}
/*
* Free rcm_handle_t
*/
static void
{
}
/*
* help function that exit on memory outage
*/
void *
{
}
return (buf);
}
void *
{
}
return (buf);
}
void *
{
}
return (new);
}
char *
{
}
return (buf);
}
/*
* Convert a version 1 ops vector to current ops vector
* Fields missing in version 1 are set to NULL.
*/
static struct rcm_mod_ops *
modops_from_v1(void *ops_v1)
{
struct rcm_mod_ops *ops;
return (ops);
}
/* call a module's getinfo routine; detects v1 ops and adjusts the call */
static int
rcm_info_t **infop)
{
int rval;
struct rcm_mod_ops_v1 *v1_ops;
infop);
return (rval);
} else {
client_props, infop));
}
}
void
{
}
void
{
}
void
{
}
void
{
}
{
return (element);
}
{
return (element);
}
void
{
}