/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <errno.h>
#include <meta.h>
#include <sdssc.h>
#include "rcm_module.h"
/*
* This module is the RCM Module for SVM. The policy adopted by this module
* is to block offline requests for any SVM resource that is in use. A
* resource is considered to be in use if it contains a metadb or if it is
* a non-errored component of a metadevice that is open.
*
* The module uses the library libmeta to access the current state of the
* metadevices. On entry, and when svm_register() is called, the module
* builds a cache of all of the SVM resources and their dependencies. Each
* metadevice has an entry of type deventry_t which is accessed by a hash
* function. When the cache is built each SVM resource is registered with
* the RCM framework. The check_device code path uses meta_invalidate_name to
* ensure that the caching in libmeta will not conflict with the cache
* we build within this code.
*
* When an RCM operation occurs that affects a registered SVM resource, the RCM
* framework will call the appropriate routine in this module. The cache
* entry will be found and if the resource has dependants, a callback will
* be made into the RCM framework to pass the request on to the dependants,
* which may themselves by SVM resources.
*
* Locking:
* The cache is protected by a mutex
*/
/*
* Private constants
*/
/*
* Generic Messages
*/
/*
* Macros to produce a quoted string containing the value of a preprocessor
* macro. For example, if SIZE is defined to be 256, VAL2STR(SIZE) is "256".
* This is used to construct format strings for scanf-family functions below.
*/
#define QUOTE(x) #x
typedef enum {
SVM_SLICE = 0,
} svm_type_t;
/* Hash table parameters */
/* Hot spare pool users */
typedef struct hspuser {
} hspuser_t;
/* Hot spare pool entry */
typedef struct hspentry {
} hspentry_t;
/* Hash table entry */
typedef struct deventry {
} deventry_t;
/* flag values */
/*
* Device redundancy flags. If the device can be removed from the
* metadevice configuration then it is considered a redundant device,
* otherwise not.
*/
#define NOTREDUNDANT 0
/* Cache */
typedef struct cache {
} cache_t;
/*
* Forward declarations of private functions
*/
static cache_t *create_cache();
rcm_info_t **infop);
/*
* Module-Private data
*/
{
NULL,
NULL,
};
/*
* Module Interface Routines
*/
/*
* rcm_mod_init()
*
* Create a cache, and return the ops structure.
* Input: None
* Return: rcm_mod_ops structure
*/
struct rcm_mod_ops *
{
/* initialize the lock mutex */
gettext("SVM: can't init mutex"));
return (NULL);
}
/* need to initialize the cluster library to avoid seg faults */
if (sdssc_bind_library() == SDSSC_ERROR) {
gettext("SVM: Interface error with libsds_sc.so,"
" aborting."));
return (NULL);
}
/* Create a cache */
gettext("SVM: module can't function, aborting."));
return (NULL);
}
/* Return the ops vectors */
return (&svm_ops);
}
/*
* rcm_mod_info()
*
* Return a string describing this module.
* Input: None
* Return: String
* Locking: None
*/
const char *
{
return (gettext("Solaris Volume Manager module 1.9"));
}
/*
* rcm_mod_fini()
*
* Destroy the cache and mutex
* Input: None
* Return: RCM_SUCCESS
* Locking: None
*/
int
{
(void) mutex_lock(&svm_cache_lock);
if (svm_cache) {
}
(void) mutex_unlock(&svm_cache_lock);
(void) mutex_destroy(&svm_cache_lock);
return (RCM_SUCCESS);
}
/*
* svm_register()
*
* Make sure the cache is properly sync'ed, and its registrations are in
* order.
*
* Input:
* rcm_handle_t *hd
* Return:
* RCM_SUCCESS
* Locking: the cache is locked throughout the execution of this routine
* because it reads and possibly modifies cache links continuously.
*/
static int
{
uint32_t i = 0;
deventry_t *l = NULL;
char *devicename;
/* Guard against bad arguments */
/* Lock the cache */
(void) mutex_lock(&svm_cache_lock);
/* If the cache has already been registered, then just sync it. */
(void) mutex_unlock(&svm_cache_lock);
return (RCM_SUCCESS);
}
/* If not, register the whole cache and mark it as registered. */
}
/* Unlock the cache */
(void) mutex_unlock(&svm_cache_lock);
return (RCM_SUCCESS);
}
/*
* svm_unregister()
*
* Manually walk through the cache, unregistering all the special files and
* mount points.
*
* Input:
* rcm_handle_t *hd
* Return:
* RCM_SUCCESS
* Locking: the cache is locked throughout the execution of this routine
* because it reads and modifies cache links continuously.
*/
static int
{
deventry_t *l = NULL;
uint32_t i = 0;
/* Guard against bad arguments */
/* Walk the cache, unregistering everything */
(void) mutex_lock(&svm_cache_lock);
(void) svm_unregister_device(hd, l);
}
svm_cache->registered = 0;
}
(void) mutex_unlock(&svm_cache_lock);
return (RCM_SUCCESS);
}
/*
* svm_offline()
*
* Determine dependents of the resource being offlined, and offline
* them all.
*
* Input:
* rcm_handle_t *hd handle
* char* *rsrc resource name
* id_t id 0
* char **errorp ptr to error message
* rcm_info_t **infop ptr to info string
* Output:
* char **errorp pass back error message
* Return:
* int RCM_SUCCESS or RCM_FAILURE
* Locking: the cache is locked for most of this routine, except while
* processing dependents.
*/
/*ARGSUSED*/
static int
{
int ret;
char **dependents;
/* Guard against bad arguments */
/* Trace */
/* Lock the cache */
(void) mutex_lock(&svm_cache_lock);
/* Lookup the resource in the cache. */
(void) mutex_unlock(&svm_cache_lock);
rv = RCM_FAILURE;
return (rv);
}
/* If it is a TRANS device, do not allow the offline */
rv = RCM_FAILURE;
(void) mutex_unlock(&svm_cache_lock);
goto exit;
}
/*
* If this is in a hot spare pool, check to see
* if any of the hot spare pool users are open
*/
while (hspentry) {
while (hspuser) {
/* Check if open */
rv = RCM_FAILURE;
(void) mutex_unlock(&svm_cache_lock);
goto exit;
}
}
}
}
/* Fail if the device contains a metadb replica */
/*
* The user should delete the replica before continuing,
* so force the error.
*/
rv = RCM_FAILURE;
(void) mutex_unlock(&svm_cache_lock);
goto exit;
}
/* Get dependents */
rv = RCM_FAILURE;
(void) mutex_unlock(&svm_cache_lock);
goto exit;
}
if (dependents) {
/* Check if the device is broken (needs maintanence). */
/*
* The device is broken, the offline request should
* succeed, so ignore any of the dependents.
*/
"SVM: ignoring dependents\n");
(void) mutex_unlock(&svm_cache_lock);
goto exit;
}
(void) mutex_unlock(&svm_cache_lock);
if (ret != RCM_SUCCESS) {
}
} else {
/* If no dependents, check if the metadevice is open */
rv = RCM_FAILURE;
(void) mutex_unlock(&svm_cache_lock);
goto exit;
}
(void) mutex_unlock(&svm_cache_lock);
}
exit:
if (rv != RCM_SUCCESS)
return (rv);
}
/*
* svm_online()
*
* Just pass the online notification on to the dependents of this resource
*
* Input:
* rcm_handle_t *hd handle
* char* *rsrc resource name
* id_t id 0
* char **errorp ptr to error message
* rcm_info_t **infop ptr to info string
* Output:
* char **errorp pass back error message
* Return:
* int RCM_SUCCESS or RCM_FAILURE
* Locking: the cache is locked for most of this routine, except while
* processing dependents.
*/
/*ARGSUSED*/
static int
rcm_info_t **infop)
{
char **dependents;
/* Guard against bad arguments */
/* Trace */
/* Lookup this resource in the cache (cache gets locked) */
(void) mutex_lock(&svm_cache_lock);
(void) mutex_unlock(&svm_cache_lock);
return (RCM_FAILURE);
}
/* Get dependents */
(void) mutex_unlock(&svm_cache_lock);
return (RCM_FAILURE);
}
(void) mutex_unlock(&svm_cache_lock);
if (dependents) {
if (rv != RCM_SUCCESS)
}
return (rv);
}
/*
* svm_get_info()
*
* Gather usage information for this resource.
*
* Input:
* rcm_handle_t *hd handle
* char* *rsrc resource name
* id_t id 0
* char **errorp ptr to error message
* nvlist_t *props Not used
* rcm_info_t **infop ptr to info string
* Output:
* char **infop pass back info string
* Return:
* int RCM_SUCCESS or RCM_FAILURE
* Locking: the cache is locked throughout the whole function
*/
/*ARGSUSED*/
static int
{
char **dependents;
/* Guard against bad arguments */
/* Trace */
/* Lookup this resource in the cache (cache gets locked) */
(void) mutex_lock(&svm_cache_lock);
(void) mutex_unlock(&svm_cache_lock);
return (RCM_FAILURE);
}
}
gettext("contains soft partition(s)"));
}
gettext("soft partition based on \"%s\""),
}
int hspflag = 0;
while (hspentry) {
if (hspflag == 0) {
gettext("member of hot spare pool"));
hspflag = 1;
}
}
} else {
while (dependent) {
/* Resource has dependents */
case SVM_STRIPE:
gettext("component of stripe \"%s\""),
break;
case SVM_CONCAT:
gettext("component of concat \"%s\""),
break;
case SVM_MIRROR:
gettext("submirror of \"%s\""),
break;
case SVM_RAID:
gettext("component of RAID \"%s\""),
break;
case SVM_TRANS:
gettext("trans log for \"%s\""),
} else {
gettext("trans master for \"%s\""),
}
break;
case SVM_SOFTPART:
/* Contains soft parts, already processed */
break;
default:
gettext("Unknown type %d\n"),
}
}
}
/* Get dependents and recurse if necessary */
(void) mutex_unlock(&svm_cache_lock);
return (RCM_FAILURE);
}
(void) mutex_unlock(&svm_cache_lock);
if (rv != RCM_SUCCESS)
}
return (rv);
}
/*
* svm_suspend()
*
* Notify all dependents that the resource is being suspended.
* Since no real operation is involved, QUERY or not doesn't matter.
*
* Input:
* rcm_handle_t *hd handle
* char* *rsrc resource name
* id_t id 0
* char **errorp ptr to error message
* rcm_info_t **infop ptr to info string
* Output:
* char **errorp pass back error message
* Return:
* int RCM_SUCCESS or RCM_FAILURE
* Locking: the cache is locked for most of this routine, except while
* processing dependents.
*/
static int
{
char **dependents;
/* Guard against bad arguments */
/* Trace */
/* Lock the cache and extract information about this resource. */
(void) mutex_lock(&svm_cache_lock);
(void) mutex_unlock(&svm_cache_lock);
return (RCM_SUCCESS);
}
/* Get dependents */
(void) mutex_unlock(&svm_cache_lock);
return (RCM_FAILURE);
}
(void) mutex_unlock(&svm_cache_lock);
if (dependents) {
if (rv != RCM_SUCCESS)
}
return (rv);
}
/*
* svm_resume()
*
* Notify all dependents that the resource is being resumed.
*
* Input:
* rcm_handle_t *hd handle
* char* *rsrc resource name
* id_t id 0
* char **errorp ptr to error message
* rcm_info_t **infop ptr to info string
* Output:
* char **errorp pass back error message
* Return:
* int RCM_SUCCESS or RCM_FAILURE
* Locking: the cache is locked for most of this routine, except while
* processing dependents.
*
*/
static int
rcm_info_t **infop)
{
char **dependents;
/* Guard against bad arguments */
/* Trace */
/*
* Lock the cache just long enough to extract information about this
* resource.
*/
(void) mutex_lock(&svm_cache_lock);
(void) mutex_unlock(&svm_cache_lock);
return (RCM_SUCCESS);
}
/* Get dependents */
(void) mutex_unlock(&svm_cache_lock);
return (RCM_FAILURE);
}
(void) mutex_unlock(&svm_cache_lock);
if (dependents) {
if (rv != RCM_SUCCESS)
}
return (rv);
}
/*
* svm_remove()
*
* Remove the resource from the cache and notify all dependents that
* the resource has been removed.
*
* Input:
* rcm_handle_t *hd handle
* char* *rsrc resource name
* id_t id 0
* char **errorp ptr to error message
* rcm_info_t **infop ptr to info string
* Output:
* char **errorp pass back error message
* Return:
* int RCM_SUCCESS or RCM_FAILURE
* Locking: the cache is locked for most of this routine, except while
* processing dependents.
*/
static int
rcm_info_t **infop)
{
char **dependents;
/* Guard against bad arguments */
/* Trace */
/* Lock the cache while removing resource */
(void) mutex_lock(&svm_cache_lock);
(void) mutex_unlock(&svm_cache_lock);
return (RCM_SUCCESS);
}
/* Get dependents */
(void) mutex_unlock(&svm_cache_lock);
return (RCM_FAILURE);
}
if (dependents) {
(void) mutex_unlock(&svm_cache_lock);
(void) mutex_lock(&svm_cache_lock);
if (rv != RCM_SUCCESS)
}
/* Mark entry as removed */
(void) mutex_unlock(&svm_cache_lock);
/* Clean up and return success */
return (RCM_SUCCESS);
}
/*
* Definitions of private functions
*
*/
/*
* find_hsp()
*
* Find the hot spare entry from the linked list of all hotspare pools
*
* Input:
* char *hspname name of hot spare pool
* Return:
* hspentry_t hot spare entry
*/
static hspentry_t *
{
while (hspentry) {
return (hspentry);
}
return (NULL);
}
/*
* add_hsp_user()
*
* Add a hot spare pool user to the list for the hsp specfied by
* hspname. The memory allocated here will be freed by free_cache()
*
* Input:
* char *hspname hot spare pool name
* deventry_t *deventry specified hsp user
* Return:
* hspuser_t entry in hsp user list
*/
static hspuser_t *
{
char *newhspusername;
return (NULL);
if (newhspuser == NULL) {
gettext("SVM: can't malloc hspuser"));
return (NULL);
}
if (newhspusername == NULL) {
gettext("SVM: can't malloc hspusername"));
return (NULL);
}
} else {
}
return (newhspuser);
}
/*
* add_hsp()
*
* Add a hot spare pool entry to the list for the slice, deventry.
* Also add to the linked list of all hsp pools
* The memory alllocated here will be freed by free_cache()
*
* Input:
* char *hspname name of hsp pool entry
* deventry_t *deventry device entry for the slice
* Return:
* hspentry_t end of hsp list
* Locking: None
*/
static hspentry_t *
{
char *newhspname;
hspname);
if (newhspentry == NULL) {
gettext("SVM: can't malloc hspentry"));
return (NULL);
}
if (newhspname == NULL) {
gettext("SVM: can't malloc hspname"));
return (NULL);
}
/* Add to linked list of all hotspare pools */
/* Add to list of hotspare pools containing this slice */
} else {
}
hspname);
return (newhspentry);
}
/*
* cache_dependent()
*
* Add a dependent for a deventry to the cache and return the cache entry
* If the name is not in the cache, we assume that it a SLICE. If it
* turns out to be any other type of metadevice, when it is processed
* in cache_all_devices_in_set(), cache_device() will be called to
* set the type to the actual value.
*
* Input:
* cache_t *cache cache
* char *devname metadevice name
* int devflags metadevice flags
* deventry_t *dependent dependent of this metadevice
* Return:
* deventry_t metadevice entry added to cache
* Locking: None
*/
static deventry_t *
{
int comp;
gettext("SVM: can't hash device."));
return (NULL);
}
/* if the hash table slot is empty, then this is easy */
} else {
/* if the hash table slot isn't empty, find the immediate successor */
}
if (comp == 0) {
/* if already in cache, just update the flags */
} else {
/* insert the entry if it's not already there */
gettext("SVM: can't create hash line."));
return (NULL);
}
if (comp > 0) {
if (hashprev)
else
} else if (comp < 0) {
}
}
}
/* complete deventry by linking the dependent to it */
return (deventry);
}
/*
* cache_device()
*
* Add an entry to the cache for devname
*
* Input:
* cache_t *cache cache
* char *devname metadevice named
* svm_type_t devtype metadevice type
* md_dev64_t devkey dev_t of device
* int devflags device flags
* Return:
* deventry_t metadevice added to cache
* Locking: None
*/
static deventry_t *
{
int comp;
gettext("SVM: can't hash device."));
return (NULL);
}
/* if the hash table slot is empty, then this is easy */
devflags);
} else {
/* if the hash table slot isn't empty, find the immediate successor */
}
if (comp == 0) {
/*
* If entry already exists, just set the type, key
* and flags
*/
} else {
/* insert the entry if it's not already there */
gettext("SVM: can't create hash line."));
}
if (comp > 0) {
if (previous)
else
} else if (comp < 0) {
}
}
}
return (deventry);
}
/*
* free_names()
*
* Free all name list entries
*
* Input:
* mdnamelist_t *np namelist pointer
* Return: None
*/
static void
{
mdnamelist_t *p;
}
}
/*
* cache_hsp()
*
* Add an entry to the cache for each slice in the hot spare
* pool. Call add_hsp() to add the hot spare pool to the list
* of all hot spare pools.
*
* Input:
* cache_t *cache cache
* mdnamelist_t *nlp pointer to hsp name
* md_hsp_t *hsp
* Return:
* 0 if successful or error code
*/
static int
{
int i;
return (ENOMEM);
}
return (ENOMEM);
}
}
return (0);
}
/*
* cache_trans()
*
* Add an entry to the cache for trans metadevice, the master
* and the log. Call cache_dependent() to link that master and
* the log to the trans metadevice.
*
* Input:
* cache_t *cache cache
* mdnamelist_t *nlp pointer to trans name
* md_trans_t *trans
* Return:
* 0 if successful or error code
*
*/
static int
{
return (ENOMEM);
}
return (ENOMEM);
}
antecedent) == NULL) {
return (ENOMEM);
}
return (ENOMEM);
}
antecedent) == NULL) {
return (ENOMEM);
}
}
return (0);
}
/*
* cache_mirror()
*
* Add an entry to the cache for the mirror. For each
* submirror, call cache_dependent() to add an entry to the
* cache and to link it to mirror entry.
*
* Input:
* cache_t *cache cache
* mdnamelist_t *nlp pointer to mirror name
* md_mirror_t *mirror
* Return:
* 0 if successful or error code
*
*/
static int
{
int i;
return (ENOMEM);
}
for (i = 0; i < NMIRROR; i++) {
continue;
if (!submirror->submirnamep)
continue;
0, antecedent) == NULL) {
return (ENOMEM);
}
}
return (0);
}
/*
* cache_raid()
*
* Add an entry to the cache for the RAID metadevice. For
* each component of the RAID call cache_dependent() to add
* add it to the cache and to link it to the RAID metadevice.
*
* Input:
* cache_t *cache cache
* mdnamelist_t *nlp pointer to raid name
* md_raid_t *raid mirror
* Return:
* 0 if successful or error code
*/
static int
{
int i;
return (ENOMEM);
}
antecedent) == NULL) {
return (ENOMEM);
}
}
if (cache_dependent(cache,
antecedent) == NULL) {
return (ENOMEM);
}
}
return (0);
}
/*
* cache_stripe()
*
* Add a CONCAT or a STRIPE entry entry to the cache for the
* metadevice and call cache_dependent() to add each
* component to the cache.
*
* Input:
* cache_t *cache cache
* mdnamelist_t *nlp pointer to stripe name
* md_stripe_t *stripe
* Return:
* 0 if successful or error code
*
*/
static int
{
int i;
return (ENOMEM);
}
antecedent) == NULL) {
return (ENOMEM);
}
}
int j;
return (ENOMEM);
}
if (cache_dependent(cache,
antecedent) == NULL) {
return (ENOMEM);
}
}
}
return (0);
}
/*
* cache_sp()
*
* Add an entry to the cache for the softpart and also call
* cache_dependent() to set the CONT_SOFTPART flag in the
* cache entry for the metadevice that contains the softpart.
*
* Input:
* cache_t *cache cache
* mdnamelist_t *nlp pointer to soft part name
* md_sp_t *soft_part
* Return:
* 0 if successful or error code
*
*/
static int
{
return (ENOMEM);
}
return (ENOMEM);
}
return (0);
}
/*
* cache_all_devices_in_set()
*
* Add all of the metadevices and mddb replicas in the set to the
* cache
*
* Input:
* cache_t *cache cache
* mdsetname_t *sp setname
* Return:
* 0 if successful or error code
*/
static int
{
/* Add each mddb replica to the cache */
/* there are no metadb's; that is ok, no need to check the rest */
mdclrerror(&error);
return (0);
}
CONT_METADB) == NULL) {
return (ENOMEM);
}
}
/* Process Hot Spare pools */
return (ENOMEM);
}
}
}
}
/* Process Trans devices */
&error);
continue;
}
return (ENOMEM);
}
}
}
}
/* Process Mirrors */
&error);
continue;
}
return (ENOMEM);
}
}
}
}
/* Process Raid devices */
&error);
continue;
}
return (ENOMEM);
}
}
}
}
/* Process Slices */
&error);
continue;
}
return (ENOMEM);
}
}
}
}
/* Process Soft partitions */
&error);
continue;
}
return (ENOMEM);
}
}
}
}
mdclrerror(&error);
return (0);
}
/*
* create_all_devices()
*
* Cache all devices in all sets
*
* Input:
* cache_t cache
* Return:
* 0 if successful, error code if not
* Locking: None
*/
static int
{
int max_sets;
int i;
return (0);
}
mdclrerror(&error);
return (0);
}
"SVM: cache_all_devices,max sets = %d\n", max_sets);
/* for each possible set number, see if we really have a diskset */
for (i = 0; i < max_sets; i++) {
"SVM: cache_all_devices no set: setno %d\n", i);
/*
* metad rpc program not available
* - no metasets. metad rpc not available
* is indicated either by an RPC error or
* the fact that the service is not
* enabled.
*/
break;
}
continue;
}
return (ENOMEM);
}
}
mdclrerror(&error);
return (0);
}
/*
* create_cache()
*
* Create an empty cache
* If the function fails free_cache() will be called to free any
* allocated memory.
*
* Input: None
* Return:
* cache_t cache created
* Locking: None
*/
static cache_t *
{
int ret;
size = HASH_DEFAULT;
/* try allocating storage for a new, empty cache */
return (NULL);
}
return (NULL);
}
/* Initialise linked list of hsp entries */
/* add entries to cache */
if (ret != 0) {
free_cache(&cache);
return (NULL);
}
/* Mark the cache as new */
cache->registered = 0;
/* Finished - return the new cache */
return (cache);
}
/*
* create_deventry()
*
* Create a new deventry entry for device with name devname
* The memory alllocated here will be freed by free_cache()
*
* Input:
* char *devname device name
* svm_type_t devtype metadevice type
* md_dev64_t devkey device key
* int devflags device flags
* Return:
* deventry_t New deventry
* Locking: None
*/
static deventry_t *
int devflags)
{
if (newdeventry == NULL) {
gettext("SVM: can't malloc deventrys"));
goto errout;
}
if (newdevname == NULL) {
gettext("SVM: can't malloc devname"));
goto errout;
}
/*
* When we register interest in a name starting with /dev/, RCM
* will use realpath to convert the name to a /devices name before
* storing it. metaclear removes both the /dev and the /devices
* form of the name of a metadevice from the file system. Thus,
* when we later call rcm_unregister_interest to get rid of a
* metacleared device, RCM will not be able to derive the /devices
* name for the /dev name. Thus, to unregister we will need to use
* the /devices name. We will save it now, so that we have it when
* it comes time to unregister.
*/
if (devicesname == NULL) {
gettext("SVM: can't malloc PATH_MAX bytes"));
goto errout;
}
devicesname = NULL;
}
}
} else {
"SVM created deventry for %s (%s)\n",
}
return (newdeventry);
if (devicesname != NULL)
if (newdevname != NULL)
if (newdeventry != NULL)
return (NULL);
}
/*
* cache_remove()
*
* Given a cache and a deventry, the deventry is
* removed from the cache's tables and memory for the deventry is
* free'ed.
*
* Input:
* cache_t *cache cache
* deventry_t *deventry deventry to be removed
* Return: None
* Locking: The cache must be locked by the caller prior to calling
* this routine.
*/
static void
{
/* sanity check */
return;
/* If this is in the hash table, remove it from there */
gettext("SVM: can't hash device."));
return;
}
while (olddeventry) {
if (olddeventry->devname &&
break;
}
}
if (olddeventry) {
if (previous)
else
/*
* If this is in a hot spare pool, remove the list
* of hot spare pools that it is in along with
* all of the volumes that are users of the pool
*/
while (hspentry) {
while (hspuser) {
}
}
}
}
}
/*
* cache_lookup()
*
* Return the deventry corresponding to devname from the cache
* Input:
* cache_t cache cache
* char *devname name to lookup in cache
* Return:
* deventry_t deventry of name, NULL if not found
* Locking: cache lock held on entry and on exit
*/
static deventry_t *
{
int comp;
gettext("SVM: can't hash resource."));
return (NULL);
}
while (deventry) {
if (comp == 0)
return (deventry);
if (comp > 0)
return (NULL);
}
return (NULL);
}
/*
* cache_sync()
*
* Resync cache with the svm database. First a new cache is created
* that represents the current state of the SVM database. The
* function walks the new cache to look for new entries that must be
* registered. The new entries are kept in a list, because we cannot
* register them at this point. Entries that appear in both caches
* are removed from the old cache. Because of this at the end of the
* walk, the old cache will only contain devices that have been
* removed and need to be unregistered.
*
* Next the old cache is walked, so that we can unregister the devices
* that are no longer present.
*
* Finally, we process the list of new devices that must be
* registered. There is a reason why we must unregister the removed
* (metacleared) devices before registering the new ones. It has to
* do with the fact that rcm_register_interest calls realpath(3C) to
* convert a /dev name to a /devices name. It uses the /devices name
* for storing the device information.
*
* It can happen that between cache_syncs that the administrator
* metaclears one metadevice and metacreates a new one. For example,
*
* metaclear acct
* metainit engr 1 1 c1t12d0s0
*
* The metaclear operation frees up the minor number that was being
* used by acct. The metainit operation can then reuse the minor
* number. This means that both metadevices would have the same
* /devices name even though they had different /dev names. Since
* rcm_register_interest uses /devices names for storing records, we
* need to unregister acct before registering engr. Otherwise we
* would get an EALREADY errno and a failed registration. This is why
* cache_sync creates a list of devices to be registered after all the
* removed devices have been unregistered.
*
* Input:
* rcm_handle_t *hd rcm handle
* cache_t **cachep pointer to cache
* Return:
* cache_t **cachep pointer to new cache
* Return: None
* Locking: The cache must be locked prior to entry
*/
static void
{
char *devicename;
/* register_list */
uint32_t i = 0;
/* Get a new cache */
return;
}
/* For every entry in the new cache... */
/* Look for this entry in the old cache */
/*
* If no entry in old cache, register the resource. If there
* is an entry, but it is marked as removed, register it
* again and remove it from the old cache
*/
} else {
}
/* Save this entry if we need to register it later. */
if (register_this) {
if (register_count >= allocated) {
/* Need to extend our array */
allocated * sizeof (*register_list));
if (register_list == NULL) {
/* Out of memory. Give up. */
return;
}
}
}
}
/*
* For every device left in the old cache, just unregister if
* it has not already been removed
*/
i = 0;
}
}
/* Register the new devices. */
for (i = 0; i < register_count; i++) {
deventry = *(register_list + i);
}
if (register_list)
/* Swap pointers */
/* Destroy old cache */
/* Mark the new cache as registered */
}
/*
* cache_walk()
*
* Perform one step of a walk through the cache. The i and hashline
* parameters are updated to store progress of the walk for future steps.
* They must all be initialized for the beginning of the walk
* (i = 0, line = NULL). Initialize variables to these values for these
* parameters, and then pass in the address of each of the variables
* along with the cache. A NULL return value will be given to indicate
* when there are no more cached items to be returned.
*
* Input:
* cache_t *cache cache
* uint32_t *i hash table index of prev entry
* deventry_t **line ptr to previous device entry
* Output:
* uint32_t *i updated hash table index
* deventry_t **line ptr to device entry
* Return:
* char* device name (NULL for end of cache)
* Locking: The cache must be locked prior to calling this routine.
*/
static char *
{
uint32_t j;
/* sanity check */
return (NULL);
/* if initial values were given, look for the first entry */
*i = j;
}
}
} else {
/* otherwise, look for the next entry for this hash value */
} else {
/* next look further down in the hash table */
*i = j;
}
}
}
}
/*
* We would have returned somewhere above if there were any more
* entries. So set the sentinel values and return a NULL.
*/
return (NULL);
}
/*
* free_cache()
*
* Given a pointer to a cache structure, this routine will free all
* of the memory allocated within the cache.
*
* Input:
* cache_t **cache ptr to cache
* Return: None
* Locking: cache lock held on entry
*/
static void
{
/* sanity check */
return;
/* de-reference the cache pointer */
/* free the hash table */
}
}
/*
* free_deventry()
*
* This routine frees all of the memory allocated within a node of a
* deventry.
*
* Input:
* deventry_t **deventry ptr to deventry
* Return: None
* Locking: cache lock held on entry
*/
static void
{
/*
* If this is in a hot spare pool, remove the
* memory allocated to hot spare pools and
* the users of the pool
*/
while (hspentry) {
while (hspuser) {
}
}
}
if ((*deventry)->devicesname)
*deventry = olddeventry;
}
}
}
/*
* hash()
*
* A rotating hashing function that converts a string 's' to an index
* in a hash table of size 'h'.
*
* Input:
* uint32_t h hash table size
* char *s string to be hashed
* Return:
* uint32_t hash value
* Locking: None
*/
static uint32_t
{
int len;
int hash, i;
}
return (hash % h);
}
/*
* svm_register_device()
*
* Register a device
*
* Input:
* rcm_handle_t *hd rcm handle
* char *devname device name
* Return: None
* Locking: None
*/
static void
{
/* Sanity check */
return;
devname);
}
}
/*
* add_dep()
*
* Add an entry to an array of dependent names for a device. Used to
* build an array to call the rcm framework with when passing on a
* DR request.
*
* Input:
* int *ndeps ptr to current number of deps
* char ***depsp ptr to current dependent array
* deventry_t *deventry deventry of device to be added
* Output:
* int *ndeps ptr to updated no of deps
* char ***depsp ptr to new dependant array
* Return:
* int 0, of ok, -1 if failed to allocate memory
* Locking: None
*/
static int
{
char **deps_new;
*ndeps += 1;
gettext("SVM: cannot allocate dependent array (%s).\n"),
return (-1);
}
return (0);
}
/*
* get_dependent()
*
* Create a list of all dependents of a device
* Do not add dependent if it is marked as removed
*
* Input:
* deventry_t *deventry device entry
* Output:
* char ***dependentsp pty to dependent list
* Return:
* int 0, if ok, -1 if failed
* Locking: None
*/
static int
{
int ndeps = 0;
*dependentsp = NULL;
return (0);
}
/*
* do not add dependent if we have
* already received a remove notifification
*/
return (-1);
}
if (ndeps == 0) {
*dependentsp = NULL;
} else {
*dependentsp = deps;
}
return (0);
}
/*
* add_to_usage()
* Add string to the usage string pointed at by usagep. Allocate memory
* for the new usage string and free the memory used by the original
* usage string
*
* Input:
* char **usagep ptr to usage string
* char *string string to be added to usage
* Return:
* char ptr to new usage string
* Locking: None
*/
char *
{
int len;
len = 0;
} else {
}
if (*usagep) {
}
}
return (new_usage);
}
/*
* add_to_usage_fmt()
*
* Add a formatted string , of the form "blah %s" to the usage string
* pointed at by usagep. Allocate memory for the new usage string and free
* the memory used by the original usage string.
*
* Input:
* char **usagep ptr to current usage string
* char *fmt format string
* char *string string to be added
* Return:
* char* new usage string
* Locking: None
*/
/*PRINTFLIKE2*/
char *
{
int len;
char *usage;
}
return (new_usage);
}
/*
* is_open()
*
* Make ioctl call to find if a device is open
*
* Input:
* dev_t devkey dev_t for device
* Return:
* int 0 if not open, !=0 if open
* Locking: None
*/
static int
{
int fd;
/* Open admin device */
return (0);
}
return (0);
}
return (isopen_ioc.isopen);
}
/*
* check_softpart()
*
* Check the status of the passed in device within the softpartition.
*
* Input:
* mdsetname_t * the name of the set
* mdname_t * the softpartition device that is being examined
* char * the device which needs to be checked
* md_error_t * error pointer (not used)
* Return:
* int REDUNDANT - device is redundant and can be
* removed
* NOTREDUNDANT - device cannot be removed
* NOTINDEVICE - device is not part of this
* component
*/
static int
{
/* softp cannot be NULL, if it is then the RCM cache is corrupt */
/*
* if the softpartition is not a parent then nothing can be done, user
* must close the device and then fix the under lying devices.
*/
"SVM: softpart is a top level device\n");
return (NOTREDUNDANT);
}
/*
* This can occur if this function has been called by the
* check_raid5 code as it is cycling through each column
* in turn.
*/
"SVM: %s is not in softpart (%s)\n",
return (NOTINDEVICE);
}
/*
* Check the status of the soft partition this only moves from
* an okay state if the underlying devices fails while the soft
* partition is open.
*/
"SVM: softpart is broken (state: 0x%x)\n",
return (REDUNDANT);
}
return (NOTREDUNDANT);
}
/*
* check_raid5()
*
* Check the status of the passed in device within the raid5 in question.
*
* Input:
* mdsetname_t * the name of the set
* mdname_t * the raid5 device that is being examined
* char * the device which needs to be checked
* md_error_t * error pointer (not used)
* Return:
* int REDUNDANT - device is redundant and can be
* removed
* NOTREDUNDANT - device cannot be removed
*/
static int
{
int i;
int rval = 0;
/* raidp cannot be NULL, if it is then the RCM cache is corrupt */
/*
* Now check each column in the device. We cannot rely upon the state
* of the device because if a hotspare is in use all the states are
* set to Okay, both at the metadevice layer and the column layer.
*/
"SVM: raid5 checking %s state %s 0x%x\n",
/*
* It is possible for the column to be a softpartition,
* so need to check the softpartiton if this is the
* case. It is *not* valid for the column to be a
* type of metadevice is being used.
*/
if (metaismeta(np)) {
/* this is a metadevice ie a softpartiton */
"SVM: raid5 %s is broken\n", uname);
return (REDUNDANT);
} else if (rval == NOTREDUNDANT &&
"SVM: raid5 device is broken, hotspared\n");
return (REDUNDANT);
}
continue;
}
continue;
/*
* Found the device. Check if it is broken or hotspared.
*/
"SVM: raid5 column device is broken\n");
return (REDUNDANT);
}
"SVM: raid5 column device is broken, hotspared\n");
return (REDUNDANT);
}
}
return (NOTREDUNDANT);
}
/*
* check_stripe()
*
* Check the status of the passed in device within the stripe in question.
*
* Input:
* mdsetname_t * the name of the set
* mdname_t * the stripe that is being examined
* char * the device which needs to be checked
* md_error_t * error pointer (not used)
* Return:
* int REDUNDANT - device is redundant and can be
* removed
* NOTREDUNDANT - device cannot be removed
* NOTINDEVICE - device is not part of this
* component
*/
static int
{
char *miscname;
int row;
int col;
/* stripep cannot be NULL, if it is then the RCM cache is corrupt */
/*
* If the stripe is not a parent then nothing can be done, user
* must close the device and then fix the devices.
*/
"SVM: stripe is a top level device\n");
return (NOTREDUNDANT);
}
/*
* Only NULL when the replicas are in an inconsistant state
* ie the device says it is the parent of X but X does not
* exist.
*/
return (NOTREDUNDANT);
}
/*
* Get the type of the parent and make sure that it is a mirror,
* if it is then need to find out the number of submirrors, and
* if it is not a mirror then this is not a REDUNDANT device.
*/
/*
* Again something is wrong with the configuration.
*/
return (NOTREDUNDANT);
}
"SVM: %s is a %s and not redundant\n",
return (NOTREDUNDANT);
}
/* now the components in the row */
"SVM: stripe comp %s check\n",
continue;
"SVM: component state: %s\n",
/* device is broken and hotspared */
"SVM: stripe %s broken, hotspare active\n",
uname);
return (REDUNDANT);
}
/*
* LAST_ERRED is a special case. If the state of a
* component is CS_LAST_ERRED then this is the last
* copy of the data and we need to keep using it, even
* though we had errors. Thus, we must block the DR
* request. If you follow the documented procedure for
* fixing each component (fix devs in maintenance
* before last erred) then the mirror will
* automatically transition Last Erred components to
* the Erred state after which they can be DRed out.
*/
/* device is broken */
"SVM: stripe %s is broken\n", uname);
return (REDUNDANT);
}
/*
* Short circuit - if here the component has been
* found in the column so no further processing is
* required here.
*/
return (NOTREDUNDANT);
}
}
/*
* Only get to this point if the device (uname) has not been
* found in the stripe. This means that there is something
* wrong with the device dependency list.
*/
"SVM: component %s is not part of %s\n",
return (NOTINDEVICE);
}
/*
* check_mirror()
*
* Make sure that the mirror > 1 submirror.
*
* Input:
* mdsetname_t * the name of the set
* mdname_t * the stripe that is being examined
* Return:
* int REDUNDANT - mirror > 1 submirrors
* NOTREDUNDANT - mirror has 1 submirror
*/
static int
{
/* mirrorp cannot be NULL, if it is then the RCM cache is corrupt */
/*
* Need to check how many submirrors that the mirror has.
*/
/* Is this submirror being used ? No, then continue */
if (submirnamep == NULL)
continue;
nsm++;
}
/*
* If there is only one submirror then there is no redundancy
* in the configuration and the user needs to take some other
* action before using cfgadm on the device ie close the metadevice.
*/
if (nsm == 1) {
"SVM: only one submirror unable to allow action\n");
return (NOTREDUNDANT);
}
return (REDUNDANT);
}
/*
* check_device()
*
* Check the current status of the underlying device.
*
* Input:
* deventry_t * the device that is being checked
* Return:
* int REDUNDANT - device is redundant and can be
* removed
* NOTREDUNDANT - device cannot be removed
* Locking:
* None
*
* The check_device code path (the functions called by check_device) use
* libmeta calls directly to determine if the specified device is
* redundant or not. The can lead to conflicts between data cached in
* libmeta and data that is being cached by this rcm module. Since the
* rcm cache is our primary source of information here, we need to make
* sure that we are not getting stale data from the libmeta caches.
* We use meta_invalidate_name throughout this code path to clear the
* cached data in libmeta in order to ensure that we are not using stale data.
*/
static int
{
int ret;
/*
* should not be null because the caller has already figured out
* there are dependent devices.
*/
do {
continue;
}
/*
* The device *should* be a metadevice and so need to see if
* it contains a setname.
*/
sname);
if (ret != 1)
"SVM: unable to get setname for \"%s\", error %s\n",
break;
}
case SVM_TRANS:
/*
* No code to check trans devices because ufs logging
* should be being used.
*/
"SVM: Use UFS logging instead of trans devices\n");
break;
case SVM_SLICE:
case SVM_STRIPE:
case SVM_CONCAT:
break;
case SVM_MIRROR:
/*
* No check here as this is performed by the one
* above when the submirror is checked.
*/
"SVM: Mirror check is done by the stripe check\n");
break;
case SVM_RAID:
/*
* Raid5 devices can be built on soft partitions or
* slices and so the check here is for the raid5
* device built on top of slices. Note, a raid5 cannot
*/
break;
case SVM_SOFTPART:
/*
* Raid5 devices can be built on top of soft partitions
* and so they have to be checked.
*/
&error);
break;
default:
break;
}
break;
return (rval);
}
/*
* svm_unregister_device
*
* Unregister the device specified by the deventry
*
* Input:
* rcm_handle_t * information for RCM
* deventry_t * description of the device to be
* unregistered
*
* Return:
* int 0 - successfully unregistered
* != 0 - failed to unregister
*
* Locking:
* None
*
* If the deventry_t has a devicesname, we will first attempt to unregister
* using that name. If that fails then we'll attempt to unregister using
* devname. The reason for this strategy has to do with the way that
* rcm_register_interest works. If passed a /dev/ name,
* rcm_register_interest uses realpath() to convert it to a /devices name.
* Thus, we are more likely to succeed if we use devicesname first.
*/
static int
{
int deleted;
if (d->devicesname) {
d->devname, d->devicesname);
} else {
d->devname);
}
deleted = -1;
if (d->devicesname != NULL) {
/*
* Try to unregister via the /devices entry first. RCM
* converts /dev/ entries to /devices entries before
* storing them. Thus, if this item has a /devices name
* available, we should use it for unregistering.
*/
d->devicesname, 0);
}
if (deleted != 0) {
/*
* Either we did not have a /devices name or the attempt to
* unregister using the /devices name failed. Either way
* we'll now try to unregister using the conventional name.
*/
}
if (deleted != 0) {
"for %s\n", d->devname);
}
return (deleted);
}