md_subr.c revision 323a81d93e2f58a7d62f6e523f9fddbc029d3d0b
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
/*
* Driver for Virtual Disk.
*/
#include <sys/sysmacros.h>
#include <sys/sysevent.h>
/*
* Machine specific Hertz is kept here
*/
/*
* Externs.
*/
extern int (*mdv_strategy_tstpnt)(buf_t *, int, void*);
extern md_set_io_t md_set_io[];
extern md_ops_t *md_opslist;
extern ddi_modhandle_t *md_mods;
extern md_krwlock_t md_unit_array_rw;
extern kcondvar_t md_cv;
extern md_krwlock_t hsp_rwlp;
extern md_krwlock_t ni_rwlp;
extern int md_num_daemons;
extern int md_status;
extern int md_ioctl_cnt;
extern int md_mtioctl_cnt;
extern struct metatransops metatransops;
extern md_event_queue_t *md_event_queue;
extern md_resync_t md_cpr_resync;
extern int md_done_daemon_threads;
extern int md_ff_daemon_threads;
extern void mddb_setexit(mddb_set_t *s);
#ifdef DEBUG
/* Flag to switch on debug messages */
int md_release_reacquire_debug = 0; /* debug flag */
#endif
/*
*
* The md_request_queues is table of pointers to request queues and the number
* of threads associated with the request queues.
* When the number of threads is set to 1, then the order of execution is
* sequential.
* The number of threads for all the queues have been defined as global
* variables to enable kernel tuning.
*
*/
#define MD_DAEMON_QUEUES 10
{0, 0}
};
/*
* Number of times a message is retried before issuing a warning to the operator
*/
#define MD_MN_WARN_INTVL 10
/*
* Setting retry cnt to one (pre decremented) so that we actually do no
* retries when committing/deleting a mddb rec. The underlying disk driver
* does several retries to check if the disk is really dead or not so there
* is no reason for us to retry on top of the drivers retries.
*/
/*
* Bug # 1212146
* Before this change the user had to pass in a short aligned buffer because of
* problems in some underlying device drivers. This problem seems to have been
* corrected in the underlying drivers so we will default to not requiring any
* alignment. If the user needs to check for a specific alignment,
* the behavior before this fix, the md_uio_alignment_mask would be set to 1,
* to check for word alignment, it can be set to 3, for double word alignment,
* it can be set to 7, etc.
*
* [Other part of fix is in function md_chk_uio()]
*/
static int md_uio_alignment_mask = 0;
/*
* for md_dev64_t translation
*/
struct md_xlate_table *md_tuple_table;
struct md_xlate_major_table *md_major_tuple_table;
int md_tuple_length;
/* Function declarations */
/*
* manipulate global status
*/
void
md_set_status(int bits)
{
mutex_enter(&md_mx);
mutex_exit(&md_mx);
}
void
md_clr_status(int bits)
{
mutex_enter(&md_mx);
mutex_exit(&md_mx);
}
int
{
int result;
mutex_enter(&md_mx);
mutex_exit(&md_mx);
return (result);
}
void
{
mutex_enter(&md_mx);
mutex_exit(&md_mx);
}
void
{
mutex_enter(&md_mx);
mutex_exit(&md_mx);
}
{
mutex_enter(&md_mx);
mutex_exit(&md_mx);
return (result);
}
/*
* md_unit_readerlock_common:
* -------------------------
* Mark the given unit as having a reader reference. Spin waiting for any
* writer references to be released.
*
* Input:
* ui unit reference
* lock_held 0 => ui_mx needs to be grabbed
* 1 => ui_mx already held
* Output:
* mm_unit_t corresponding to unit structure
* ui->ui_readercnt incremented
*/
static void *
{
if (!lock_held)
if (panicstr) {
panic("md: writer lock is held");
break;
}
}
ui->ui_readercnt++;
if (!lock_held)
}
void *
{
return (md_unit_readerlock_common(ui, 0));
}
/*
* md_unit_writerlock_common:
* -------------------------
* Acquire a unique writer reference. Causes previous readers to drain.
* dropped the lock to allow a ksend_message to be despatched.
*
* Input:
* ui unit reference
* lock_held 0 => grab ui_mx
* 1 => ui_mx already held on entry
* Output:
* mm_unit_t reference
*/
static void *
{
if (panicstr)
panic("md: writer lock not allowed");
if (!lock_held)
ui->ui_wanabecnt++;
if (--ui->ui_wanabecnt == 0)
}
if (!lock_held)
}
void *
{
return (md_unit_writerlock_common(ui, 0));
}
/*
* md_unit_readerexit_common:
* -------------------------
* Release the readerlock for the specified unit. If the reader count reaches
* zero and there are waiting writers (MD_UL_WANABEWRITER set) wake them up.
*
* Input:
* ui unit reference
* lock_held 0 => ui_mx needs to be acquired
* 1 => ui_mx already held
*/
static void
{
if (!lock_held)
ui->ui_readercnt--;
if (!lock_held)
}
void
{
}
/*
* md_unit_writerexit_common:
* -------------------------
* Release the writerlock currently held on the unit. Wake any threads waiting
* on becoming reader or writer (MD_UL_WANABEWRITER set).
*
* Input:
* ui unit reference
* lock_held 0 => ui_mx to be acquired
* 1 => ui_mx already held
*/
static void
{
if (!lock_held)
if (!lock_held)
}
void
{
}
void *
{
if (panicstr) {
panic("md: writer lock is held");
break;
}
}
io->io_readercnt++;
}
void *
{
if (panicstr)
panic("md: writer lock not allowed");
io->io_wanabecnt++;
if (--io->io_wanabecnt == 0)
}
}
void
{
io->io_readercnt--;
}
}
void
{
}
/*
* Attempt to grab that set of locks defined as global.
* A mask containing the set of global locks that are owned upon
* entry is input. Any additional global locks are then grabbed.
* This keeps the caller from having to know the set of global
* locks.
*/
static int
{
/*
* The current implementation has been verified by inspection
* and test to be deadlock free. If another global lock is
* added, changing the algorithm used by this function should
* be considered. With more than 2 locks it is difficult to
* guarantee that locks are being acquired in the correct order.
* The safe approach would be to drop all of the locks that are
* owned at function entry and then reacquire all of the locks
* in the order defined by the lock hierarchy.
*/
mutex_enter(&md_mx);
if (!(global_locks_owned_mask & MD_GBL_IOCTL_LOCK)) {
while ((md_mtioctl_cnt != 0) ||
(md_status & MD_GBL_IOCTL_LOCK)) {
mutex_exit(&md_mx);
return (EINTR);
}
}
md_ioctl_cnt++;
}
if (!(global_locks_owned_mask & MD_GBL_HS_LOCK)) {
while (md_status & MD_GBL_HS_LOCK) {
mutex_exit(&md_mx);
return (EINTR);
}
}
}
mutex_exit(&md_mx);
return (0);
}
/*
* Release the set of global locks that were grabbed in md_global_lock_enter
* that were not already owned by the calling thread. The set of previously
* owned global locks is passed in as a mask parameter.
*/
static int
{
mutex_enter(&md_mx);
/* If MT ioctl decrement mt_ioctl_cnt */
if ((flags & MD_MT_IOCTL)) {
} else {
if (!(global_locks_owned_mask & MD_GBL_IOCTL_LOCK)) {
/* clear the lock and decrement count */
md_ioctl_cnt--;
}
if (!(global_locks_owned_mask & MD_GBL_HS_LOCK))
md_status &= ~MD_GBL_HS_LOCK;
}
if (flags & MD_READER_HELD)
if (flags & MD_WRITER_HELD)
if (flags & MD_IO_HELD)
}
mutex_exit(&md_mx);
return (code);
}
/*
* The two functions, md_ioctl_lock_enter, and md_ioctl_lock_exit make
* use of the md_global_lock_{enter|exit} functions to avoid duplication
* of code. They rely upon the fact that the locks that are specified in
* the input mask are not acquired or freed. If this algorithm changes
* as described in the block comment at the beginning of md_global_lock_enter
* then it will be necessary to change these 2 functions. Otherwise these
* functions will be grabbing and holding global locks unnecessarily.
*/
int
md_ioctl_lock_enter(void)
{
/* grab only the ioctl lock */
return (md_global_lock_enter(~MD_GBL_IOCTL_LOCK));
}
/*
* If md_ioctl_lock_exit is being called at the end of an ioctl before
* returning to user space, then ioctl_end is set to 1.
* Otherwise, the ioctl lock is being dropped in the middle of handling
* an ioctl and will be reacquired before the end of the ioctl.
* Do not attempt to process the MN diskset mddb parse flags unless
* ioctl_end is true - otherwise a deadlock situation could arise.
*/
int
{
int ret_val;
mddb_set_t *s;
int i;
int err;
int rval = 1;
int flag;
/* release only the ioctl lock */
/*
* If md_ioctl_lock_exit is being called with a possible lock held
* (ioctl_end is 0), then don't check the MN disksets since the
* call to mddb_setenter may cause a lock ordering deadlock.
*/
if (!ioctl_end)
return (ret_val);
/*
* Walk through disksets to see if there is a MN diskset that
* has messages that need to be sent. Set must be snarfed and
* be a MN diskset in order to be checked.
*
* In a MN diskset, this routine may send messages to the
* rpc.mdcommd in order to have the slave nodes re-parse parts
* of the mddb. Messages can only be sent with no locks held,
* so if mddb change occurred while the ioctl lock is held, this
* routine must send the messages.
*/
for (i = 1; i < md_nsets; i++) {
status = md_get_setstatus(i);
/* Set must be snarfed and be a MN diskset */
continue;
/* Grab set lock so that set can't change */
continue;
/* Re-get set status now that lock is held */
status = md_get_setstatus(i);
/*
* If MN parsing block flag is set - continue to next set.
*
* If s_mn_parseflags_sending is non-zero, then another thread
* is already currently sending a parse message, so just
* release the set mutex. If this ioctl had caused an mddb
* change that results in a parse message to be generated,
* the thread that is currently sending a parse message would
* generate the additional parse message.
*
* If s_mn_parseflags_sending is zero then loop until
* s_mn_parseflags is 0 (until there are no more
* messages to send).
* While s_mn_parseflags is non-zero,
* put snapshot of parse_flags in s_mn_parseflags_sending
* set s_mn_parseflags to zero
* release set mutex
* send message
* re-grab set mutex
* set s_mn_parseflags_sending to zero
*
* If set is STALE, send message with NO_LOG flag so that
* rpc.mdcommd won't attempt to log message to non-writeable
* replica.
*/
KM_SLEEP);
while (((s->s_mn_parseflags_sending & MDDB_PARSE_MASK) == 0) &&
(s->s_mn_parseflags & MDDB_PARSE_MASK) &&
(!(status & MD_SET_MNPARSE_BLK))) {
/* Grab snapshot of parse flags */
s->s_mn_parseflags_sending = s->s_mn_parseflags;
s->s_mn_parseflags = 0;
/*
* Send the message to the slaves to re-parse
* the indicated portions of the mddb. Send the status
* of the 50 mddbs in this set so that slaves know
* which mddbs that the master node thinks are 'good'.
* Otherwise, slave may reparse, but from wrong
* replica.
*/
for (i = 0; i < MDDB_NLB; i++) {
mddb_parse_msg->msg_lb_flags[i] =
}
KM_SLEEP);
while (rval != 0) {
flag = 0;
if (status & MD_SET_STALE)
flag |= MD_MSGF_NO_LOG;
(char *)mddb_parse_msg,
sizeof (mddb_parse_msg), kresult);
/* if the node hasn't yet joined, it's Ok. */
(kresult->kmmr_comm_state !=
MDMNE_NOT_JOINED)) {
"MD_MN_MSG_MDDB_PARSE");
"Unable to send mddb update "
"message to other nodes in "
"diskset %s\n", s->s_setname);
rval = 1;
}
}
/*
* Re-grab mutex to clear sending field and to
* see if another parse message needs to be generated.
*/
s->s_mn_parseflags_sending = 0;
}
}
return (ret_val);
}
/*
* Called when in an ioctl and need readerlock.
*/
void *
{
return (md_unit_readerlock_common(ui, 0));
}
/*
* Called when in an ioctl and need writerlock.
*/
void *
{
return (md_unit_writerlock_common(ui, 0));
}
void *
{
return (md_io_writerlock(ui));
}
void
{
}
void
{
}
void
{
}
/*
* md_ioctl_releaselocks:
* --------------------
* Release the unit locks that are held and stop subsequent
* md_unit_reader/writerlock calls from progressing. This allows the caller
* to send messages across the cluster when running in a multinode
* environment.
* ioctl originated locks (via md_ioctl_readerlock/md_ioctl_writerlock) are
* allowed to progress as normal. This is required as these typically are
* invoked by the message handler that may be called while a unit lock is
* marked as released.
*
* On entry:
* variety of unit locks may be held including ioctl lock
*
* On exit:
* locks released and unit structure updated to prevent subsequent reader/
* writer locks being acquired until md_ioctl_reacquirelocks is called
*/
void
{
/* This actually releases the locks. */
}
/*
* md_ioctl_reacquirelocks:
* ----------------------
* Reacquire the locks that were held when md_ioctl_releaselocks
* was called.
*
* On entry:
* No unit locks held
* On exit:
* locks held that were held at md_ioctl_releaselocks time including
* the ioctl lock.
*/
void
{
if (flags & MD_MT_IOCTL) {
mutex_enter(&md_mx);
mutex_exit(&md_mx);
} else {
while (md_ioctl_lock_enter() == EINTR);
}
if (flags & MD_ARRAY_WRITER) {
} else if (flags & MD_ARRAY_READER) {
}
if (flags & MD_IO_HELD) {
(void) md_io_writerlock(ui);
}
if (flags & MD_READER_HELD) {
} else if (flags & MD_WRITER_HELD) {
}
/* Wake up any blocked readerlock() calls */
}
}
void
{
mdi_unit_t *ui;
int flags;
if (flags & MD_READER_HELD) {
}
if (flags & MD_WRITER_HELD) {
}
if (flags & MD_IO_HELD) {
}
}
}
void
{
}
void
{
}
/*
* Called when in an ioctl and need opencloselock.
* Sets flags in lockp for READER_HELD.
*/
void *
{
void *un;
/* Maintain mutex across the readerlock call */
return (un);
}
/*
* Clears reader lock using md_ioctl instead of md_unit
* and updates lockp.
*/
void
{
mdi_unit_t *ui;
}
/*
* Clears reader lock using md_ioctl instead of md_unit
* and updates lockp.
* Does not acquire or release the ui_mx lock since the calling
* routine has already acquired this lock.
*/
void
{
mdi_unit_t *ui;
}
void *
{
void *un;
/* Maintain mutex across the readerlock call */
return (un);
}
void
{
}
/*
* Drop the openclose and readerlocks without acquiring or
* releasing the ui_mx lock since the calling routine has
* already acquired this lock.
*/
void
{
}
int
)
{
int isopen;
/* check status */
return (isopen);
}
int
int flag,
int otyp
)
{
int err = 0;
/* check type and flags */
goto out;
}
goto out;
}
/* count and flag open */
/* setup kstat, return success */
return (0);
/* return error */
out:
return (err);
}
int
int otyp
)
{
int err = 0;
unsigned i;
/* check type and flags */
goto out;
goto out;
}
/* count and flag closed */
else
for (i = 0; (i < OTYPCNT); ++i)
/* teardown kstat, return success */
return (0);
}
/* return success */
out:
return (err);
}
{
int i;
/*
* check to see if we're in an upgrade situation
* if we are not in upgrade just return the input device
*/
if (!MD_UPGRADE)
return (targ_devt);
i = 0;
while (i != md_tuple_length) {
}
i++;
}
return (NODEV64);
}
{
int i;
if (!MD_UPGRADE)
return (mini_devt);
i = 0;
while (i != md_tuple_length) {
}
i++;
}
return (NODEV64);
}
void
md_xlate_free(int size)
{
}
char *
{
int i;
if (!MD_UPGRADE)
return (ddi_major_to_name(maj));
for (i = 0; i < md_majortab_len; i++) {
break;
}
}
return (drv_name);
}
md_targ_name_to_major(char *drv_name)
{
int i;
if (!MD_UPGRADE)
return (ddi_name_to_major(drv_name));
for (i = 0; i < md_majortab_len; i++) {
drv_name)) == 0) {
break;
}
}
return (maj);
}
void
{
int i;
for (i = 0; i < md_majortab_len; i++) {
}
}
/* functions return a pointer to a function which returns an int */
intptr_t (*
{
mdi_unit_t *ui;
int i;
/*
* Return the first named service found.
* Use this path when it is known that there is only
* one named service possible (e.g., hotspare interface)
*/
for (i = 0; i < MD_NOPS; i++) {
continue;
}
continue;
return (sp->md_service);
sp++;
}
}
return (Default);
}
/*
* Return the named service for the given modindex.
* This is used if there are multiple possible named services
* and each one needs to be called (e.g., poke hotspares)
*/
return (Default);
return (Default);
return (Default);
return (sp->md_service);
sp++;
}
return (Default);
}
/*
* Return the named service for this md_dev64_t
*/
return (Default);
return (NULL);
return (NULL);
return (Default);
return (sp->md_service);
sp++;
}
return (Default);
}
/*
* md_daemon callback routine
*/
{
int ret = 0; /* assume success */
switch (code) {
case CB_CODE_CPR_CHKPT:
/*
* Check for active resync threads
*/
if ((md_cpr_resync.md_mirror_resync > 0) ||
(md_cpr_resync.md_raid_resync > 0)) {
"synchronization threads running.");
"a later time.");
ret = -1;
break;
}
/* cv_timedwait() returns -1 if it times out. */
break;
break;
case CB_CODE_CPR_RESUME:
break;
}
return (ret != -1);
}
void
{
return;
/*
* Register cpr callback
*/
/*CONSTCOND*/
while (1) {
if (pass_thru) {
/*
* CALLB_CPR_EXIT Will do
* mutex_exit(&anchor->a_mx)
*/
return;
}
if (md_get_status() & MD_GBL_DAEMONS_DIE) {
mutex_enter(&md_mx);
mutex_exit(&md_mx);
/*
* CALLB_CPR_EXIT will do
* mutex_exit(&anchor->a_mx)
*/
thread_exit();
}
}
}
/*NOTREACHED*/
}
/*
* daemon_request:
*
* Adds requests to appropriate requestq which is
* anchored by *anchor.
* The request is the first element of a doubly linked circular list.
* When the request is a single element, the forward and backward
* pointers MUST point to the element itself.
*/
void
{
int i = 0;
/* set it to the new style */
}
/* scan the list and add the function to each element */
do {
i++;
/* save pointer to tail of the request list */
/* stats */
/* now add the list to request queue */
}
void
{
int sent_log = 0;
while (mddb_commitrec(recid)) {
if (! sent_log) {
"md: state database commit failed");
sent_log = 1;
}
/*
* Setting retry cnt to one (pre decremented) so that we
* actually do no retries when committing/deleting a mddb rec.
* The underlying disk driver does several retries to check
* if the disk is really dead or not so there
* is no reason for us to retry on top of the drivers retries.
*/
if (--retry == 0) {
"md: Panic due to lack of DiskSuite state\n"
" database replicas. Fewer than 50%% of "
"the total were available,\n so panic to "
"ensure data integrity.");
} else {
panic("md: state database problem");
}
/*NOTREACHED*/
}
}
}
void
{
int sent_log = 0;
while (mddb_commitrecs(recids)) {
if (! sent_log) {
"md: state database commit failed");
sent_log = 1;
}
/*
* Setting retry cnt to one (pre decremented) so that we
* actually do no retries when committing/deleting a mddb rec.
* The underlying disk driver does several retries to check
* if the disk is really dead or not so there
* is no reason for us to retry on top of the drivers retries.
*/
if (--retry == 0) {
/*
* since all the records are part of the same set
* use the first one to get setno
*/
"md: Panic due to lack of DiskSuite state\n"
" database replicas. Fewer than 50%% of "
"the total were available,\n so panic to "
"ensure data integrity.");
} else {
panic("md: state database problem");
}
/*NOTREACHED*/
}
}
}
void
{
int sent_log = 0;
while (mddb_deleterec(recid)) {
if (! sent_log) {
"md: state database delete failed");
sent_log = 1;
}
/*
* Setting retry cnt to one (pre decremented) so that we
* actually do no retries when committing/deleting a mddb rec.
* The underlying disk driver does several retries to check
* if the disk is really dead or not so there
* is no reason for us to retry on top of the drivers retries.
*/
if (--retry == 0) {
"md: Panic due to lack of DiskSuite state\n"
" database replicas. Fewer than 50%% of "
"the total were available,\n so panic to "
"ensure data integrity.");
} else {
panic("md: state database problem");
}
/*NOTREACHED*/
}
}
}
/*
* md_holdset_enter is called in order to hold the set in its
* current state (loaded, unloaded, snarfed, unsnarfed, etc)
* until md_holdset_exit is called. This is used by the mirror
* code to mark the set as HOLD so that the set won't be
* unloaded while hotspares are being allocated in check_4_hotspares.
* The original fix to the mirror code to hold the set was to call
* md_haltsnarf_enter, but this will block all ioctls and ioctls
* must work for a MN diskset while hotspares are allocated.
*/
void
{
mutex_enter(&md_mx);
mutex_exit(&md_mx);
}
void
{
mutex_enter(&md_mx);
mutex_exit(&md_mx);
}
/*
* Returns a 0 if this thread marked the set as HOLD (success),
* returns a -1 if set was already marked HOLD (failure).
* Used by the release_set code to see if set is marked HOLD.
* HOLD is set by a daemon when hotspares are being allocated
* to mirror units.
*/
int
{
mutex_enter(&md_mx);
mutex_exit(&md_mx);
return (-1);
}
mutex_exit(&md_mx);
return (0);
}
void
{
mutex_enter(&md_mx);
mutex_exit(&md_mx);
}
void
{
mutex_enter(&md_mx);
mutex_exit(&md_mx);
}
void
{
mutex_enter(&md_mx);
mutex_exit(&md_mx);
}
/*
* ASSUMED that the md_unit_array_rw WRITER lock is held.
*/
int
{
int i, err;
return (0);
}
for (i = 0; i < MD_NOPS; i++) {
continue;
for (--i; i > 0; --i) {
continue;
(MD_HALT_OPEN, setno);
}
return (EBUSY);
}
}
for (i = 0; i < MD_NOPS; i++) {
continue;
for (i = 0; i < MD_NOPS; i++) {
continue;
(MD_HALT_OPEN, setno);
}
return (EBUSY);
}
}
}
for (i = 0; i < MD_NOPS; i++) {
continue;
if (err != 0)
"md: halt failed for %s, error %d",
}
/*
* Unload the devid namespace if it is loaded
*/
md_unload_namespace(setno, 0L);
}
return (0);
}
int
{
set_t i, j;
int err;
int init_queues;
char *name;
/*
* Grab the all of the global locks that are not
* already owned to ensure that there isn't another
* thread trying to access a global resource
* while the halt is in progress
*/
return (EINTR);
for (i = 0; i < md_nsets; i++)
/*
* Kill the daemon threads.
*/
rqp = &md_daemon_queues[0];
i = 0;
while (!NULL_REQUESTQ_ENTRY(rqp)) {
rqp = &md_daemon_queues[++i];
}
mutex_enter(&md_mx);
while (md_num_daemons != 0) {
mutex_exit(&md_mx);
mutex_enter(&md_mx);
}
mutex_exit(&md_mx);
for (i = 0; i < md_nsets; i++)
/*
* Only call into md_halt_set if s_un / s_ui are both set.
* If they are NULL this set hasn't been accessed, so its
* pointless performing the call.
*/
if (md_halt_set(i, MD_HALT_CHECK)) {
if (md_start_daemons(init_queues))
"md: restart of daemon threads "
"failed");
for (j = 0; j < md_nsets; j++)
return (md_global_lock_exit(
MD_ARRAY_WRITER, NULL));
}
}
/*
* if we get here we are going to do it
*/
for (i = 0; i < md_nsets; i++) {
/*
* Only call into md_halt_set if s_un / s_ui are both set.
* If they are NULL this set hasn't been accessed, so its
* pointless performing the call.
*/
if (err != 0)
"md: halt failed set %u, error %d",
(unsigned)i, err);
}
}
/*
* issue a halt unload to each module to indicate that it
* is about to be unloaded. Each module is called once, set
* has no meaning at this point in time.
*/
for (i = 0; i < MD_NOPS; i++) {
continue;
if (err != 0)
"md: halt failed for %s, error %d",
}
/* ddi_modclose the submodules */
for (i = 0; i < MD_NOPS; i++) {
/* skip if not open */
continue;
/* find and unlink from md_opslist */
pops = &md_opslist;
break;
}
}
/* uninitialize */
ops->md_selfindex = 0;
/* close */
if (err != 0)
"md: halt close failed for %s, error %d",
}
/* Unload the database */
mddb_unload();
for (i = 0; i < md_nsets; i++)
return (md_global_lock_exit(global_locks_owned_mask, 0,
MD_ARRAY_WRITER, NULL));
}
/*
* md_layered_open() is an internal routine only for SVM modules.
* So the input device will be a md_dev64_t, because all SVM modules internally
* work with that device type.
* ddi routines on the other hand work with dev_t. So, if we call any ddi
* routines from here we first have to convert that device into a dev_t.
*/
int
int md_oflags
)
{
int err;
return (ENODEV);
/* metadevice */
mdi_unit_t *ui;
/* open underlying driver */
/*
* As open() may change the device,
* send this info back to the caller.
*/
return (ret);
}
/* or do it ourselves */
(void) md_unit_openclose_enter(ui);
/* convert our ddi_dev back to the dev we were given */
return (err);
}
/*
* Open regular device, since open() may change dev_t give new dev_t
* back to the caller.
*/
return (err);
}
/*
* md_layered_close() is an internal routine only for SVM modules.
* So the input device will be a md_dev64_t, because all SVM modules internally
* work with that device type.
* ddi routines on the other hand work with dev_t. So, if we call any ddi
* routines from here we first have to convert that device into a dev_t.
*/
void
int md_cflags
)
{
/* metadevice */
/* close underlying driver */
return;
}
/* or do it ourselves */
(void) md_unit_openclose_enter(ui);
return;
}
/* close regular device */
}
/*
* saves a little code in mdstrategy
*/
int
{
else
md_biodone(bp);
return (1);
}
static int md_write_label = 0;
int
{
/*
* Check early for unreasonable block number.
*
* b_blkno is defined as adaddr_t which is typedef'd to a long.
* A problem occurs if b_blkno has bit 31 set and un_total_blocks
* doesn't, b_blkno is then compared as a negative number which is
* always less than a positive.
*/
/*
* make sure we don't clobber any labels
*/
}
}
return (0);
}
/*
* init_request_queue: initializes the request queues and creates the threads.
* return value = 0 :invalid num_threads
* = n : n is the number of threads created.
*/
int
void (*threadfn)(), /* function to start the thread */
int pri, /* thread priority */
int init_queue) /* flag to init queues */
{
struct mdq_anchor *rqhead;
int i;
int num_threads;
return (0);
if (init_queue) {
}
for (i = 0; i < num_threads; i++) {
}
return (i);
}
static void
start_daemon(struct mdq_anchor *q)
{
md_daemon(0, q);
ASSERT(0);
}
/*
* Creates all the md daemons.
* Global:
* md_num_daemons is set to number of daemons.
* MD_GBL_DAEMONS_LIVE flag set to indicate the daemons are active.
*
* Return value: 0 success
* 1 failure
*/
int
{
int cnt;
int i;
int retval = 0;
if (md_get_status() & MD_GBL_DAEMONS_LIVE) {
return (retval);
}
rqp = &md_daemon_queues[0];
i = 0;
while (!NULL_REQUESTQ_ENTRY(rqp)) {
retval = 1;
break;
}
/*
* initialize variables
*/
md_num_daemons += cnt;
rqp = &md_daemon_queues[++i];
}
return (retval);
}
int
{
int i, err;
/*
* See if the submodule is mdopened. If not, i is the index of the
* next empty slot.
*/
MD_DRIVERNAMELEN) == 0)
return (i);
if (i == (MD_NOPS - 1))
return (-1);
}
if (drvrid < 0) {
/* Do not try to add any records to the DB when stale. */
return (-1);
}
if (drvrid < 0)
return (-1);
/* open and import the md_ops of the submodules */
return (-1);
}
"unable to import md_interface_ops from %s, error %d\n",
(void) ddi_modclose(mod);
return (-1);
}
/* ddi_modsym returns pointer to md_interface_ops in submod */
/* initialize */
ops->md_selfindex = i;
/* plumb */
md_opslist = ops;
/* return index */
return (i);
}
int
{
int i;
int modindex;
int drvid;
int local_dont_load;
return (-1);
for (i = 0; name[i] != 0; i++)
if (i == (MD_DRIVERNAMELEN -1))
return (-1);
/*
* If set is STALE, set local_dont_load to 1 since no records
* should be added to DB when stale.
*/
local_dont_load = 1;
} else {
}
/*
* Single thread ioctl module binding with respect to
* similar code executed in md_loadsubmod that is called
* from md_snarf_db_set (which is where that path does
* its md_haltsnarf_enter call).
*/
/* See if the submodule is already ddi_modopened. */
MD_DRIVERNAMELEN) == 0) {
if (! local_dont_load &&
== MD_KEYBAD) {
if (!db_notrequired)
goto err;
}
}
return (i);
}
if (i == (MD_NOPS -1))
break;
}
if (local_dont_load)
goto err;
/* ddi_modopen the submodule */
if (modindex < 0)
goto err;
return (modindex);
return (-1);
}
void
{
mdi_unit_t *ui;
if (mdv_strategy_tstpnt)
return;
(void) bdev_strategy(bp);
return;
}
}
/*
* md_call_ioctl:
* -------------
* Issue the specified ioctl to the device associated with the given md_dev64_t
*
* Arguments:
* dev - underlying device [md_dev64_t]
* cmd - ioctl to perform
* data - arguments / result location
* lockp - lock reference
*
* Returns:
* 0 success
* !=0 Failure (error code)
*/
int
{
int rval;
mdi_unit_t *ui;
/*
* See if device is a metadevice. If not call cdev_ioctl(), otherwise
* call the ioctl entry-point in the metadevice.
*/
int rv;
ddi_get_cred(), &rv);
} else {
}
return (rval);
}
void
{
while (next) {
return;
}
}
}
int
{
return (0);
return (1);
return (0);
return (1);
return (0);
}
{
mdi_unit_t *ui;
return (MD_NO_PARENT);
return (parent);
}
void
{
mdi_unit_t *ui;
return;
}
void
{
mdi_unit_t *ui;
return;
}
int
int labeled,
{
int err;
/*
* RW lock on hot_spare_interface. We don't want it to change from
* underneath us. If hot_spare_interface is NULL we're going to
* need to set it. So we need to upgrade to a WRITER lock. If that
* doesn't work, we drop the lock and reenter as WRITER. This leaves
* a small hole during which hot_spare_interface could be modified
* so we check it for NULL again. What a pain. Then if still null
* load from md_get_named_service.
*/
if (hot_spare_interface == NULL) {
if (hot_spare_interface != NULL) {
err = ((*hot_spare_interface)
sblock));
return (err);
}
}
"hot spare interface", 0);
}
if (hot_spare_interface == NULL) {
return (0);
}
err = ((*hot_spare_interface)
return (err);
}
void
{
}
int
)
{
int err;
if (md_event_queue == NULL)
return (0);
if (notify_interface == NULL) {
if (notify_interface != NULL) {
err = ((*notify_interface)
return (err);
}
}
"notify interface", 0);
}
if (notify_interface == NULL) {
return (0);
}
return (err);
}
char *
{
char *setname;
char name[MD_MAX_CTDLEN];
int rtn = 0;
/*
* Verify that the passed dev_t refers to a valid metadevice.
* If it doesn't we can make no assumptions as to what the device
* name is. Return NULL in these cases.
*/
return (NULL);
}
name[0] = '\0';
switch (tag) {
case SVM_TAG_HSP:
if (setno == 0) {
(unsigned)MD_MIN2UNIT(mnum));
} else {
}
}
break;
case SVM_TAG_DRIVE:
break;
case SVM_TAG_HOST:
break;
case SVM_TAG_SET:
rtn = 0;
}
break;
default:
break;
}
/* Check if we got any rubbish for any of the snprintf's */
return (NULL);
}
}
/* Sysevent subclass and mdnotify event type pairs */
struct node {
char *se_ev;
};
/*
* Table must be sorted in case sensitive ascending order of
* the sysevents values
*/
{ ESC_SVM_ADD, EQ_ADD },
{ ESC_SVM_ATTACH, EQ_ATTACH },
{ ESC_SVM_CHANGE, EQ_CHANGE },
{ ESC_SVM_CREATE, EQ_CREATE },
{ ESC_SVM_DELETE, EQ_DELETE },
{ ESC_SVM_DETACH, EQ_DETACH },
{ ESC_SVM_ENABLE, EQ_ENABLE },
{ ESC_SVM_ERRED, EQ_ERRED },
{ ESC_SVM_EXCHANGE, EQ_EXCHANGE },
{ ESC_SVM_GROW, EQ_GROW },
{ ESC_SVM_HS_FREED, EQ_HS_FREED },
{ ESC_SVM_HOST_ADD, EQ_HOST_ADD },
{ ESC_SVM_IOERR, EQ_IOERR },
{ ESC_SVM_OFFLINE, EQ_OFFLINE },
{ ESC_SVM_OK, EQ_OK },
{ ESC_SVM_ONLINE, EQ_ONLINE },
{ ESC_SVM_RELEASE, EQ_RELEASE },
{ ESC_SVM_REMOVE, EQ_REMOVE },
{ ESC_SVM_REPLACE, EQ_REPLACE },
};
};
{
low = 0;
if (p == 0) {
} else if (p < 0) {
} else {
}
}
return (EQ_EMPTY);
}
/*
* Log mdnotify event
*/
void
{
/* Translate sysevent into mdnotify event */
} else {
}
}
/*
* Log SVM sys events
*/
void
char *se_class,
char *se_subclass,
)
{
int err = DDI_SUCCESS;
char *devname;
extern dev_info_t *md_devinfo;
/* Raise the mdnotify event before anything else */
if (md_devinfo == NULL) {
return;
}
if (err == DDI_SUCCESS) {
/* Add the version numver */
if (err != DDI_SUCCESS) {
goto fail;
}
/* Add the tag attribute */
if (err != DDI_SUCCESS) {
goto fail;
}
/* Add the set number attribute */
if (err != DDI_SUCCESS) {
goto fail;
}
/* Add the device id attribute */
if (err != DDI_SUCCESS) {
goto fail;
}
/* Add the device name attribute */
devname);
} else {
"unspecified");
}
if (err != DDI_SUCCESS) {
goto fail;
}
/* Attempt to post event */
if (err != DDI_SUCCESS) {
}
}
return;
fail:
}
void
{
}
void
{
mdi_unit_t *ui;
/* initialize all the incore conditional variables */
} else
if (alloc_lock) {
}
/* setup the unavailable field */
#if defined(_ILP32)
"metadevices are not accessible on a 32 bit kernel",
mnum);
}
#endif
}
void
{
mdi_unit_t *ui;
/*
* ASSUMPTION: md_unit_array_rw WRITER lock is held.
*/
return;
/* destroy the io lock if one is being used */
if (ui->ui_io_lock) {
}
/* teardown kstat */
/* destroy all the incore conditional variables */
}
void
{
int i, s;
int max_sides;
if (nsv == 0)
return;
/* All entries removed are in the same diskset */
else
for (i = 0; i < nsv; i++)
for (s = 0; s < max_sides; s++)
}
/*
* Checking user args before we get into physio - returns 0 for ok, else errno
* We do a lot of checking against illegal arguments here because some of the
* real disk drivers don't like certain kinds of arguments. (e.g xy doesn't
* like odd address user buffer.) Those drivers capture bad arguments in
* xxread and xxwrite. But since meta-driver calls their strategy routines
* directly, two bad scenario might happen:
* 1. the real strategy doesn't like it and panic.
* 2. the real strategy doesn't like it and set B_ERROR.
*
* The second case is no better than the first one, since the meta-driver
* will treat it as a media-error and off line the mirror metapartition.
* (Too bad there is no way to tell what error it is.)
*
*/
int
{
int i;
/*
* Check for negative or not block-aligned offset
*/
if ((uio->uio_loffset < 0) ||
return (EINVAL);
}
i = uio->uio_iovcnt;
while (i--) {
return (EINVAL);
/*
* Bug # 1212146
* The default is to not check alignment, but we can now check
* for a larger number of alignments if desired.
*/
return (EINVAL);
iov++;
}
return (0);
}
char *
)
{
static char buf[MAXPATHLEN];
char *devname;
char *invalid = " (Invalid minor number %u) ";
char *metaname;
mdc_unit_t *un;
return (buf);
}
/*
* If unit is not a friendly name unit, derive the name from the
* minor number.
*/
/* This is a traditional metadevice */
if (setno == MD_LOCAL_SET) {
(unsigned)unit);
} else {
}
return (buf);
}
/*
* It is a friendly name metadevice, so we need to get its name.
*/
/*
* or not we are in the local set. Thus, we'll pull the
* metaname from this string.
*/
goto out;
}
metaname++; /* move past slash */
if (setno == MD_LOCAL_SET) {
/* No set name. */
} else {
/* Include setname */
}
} else {
/* We couldn't find the name. */
}
out:
return (buf);
}
char *
char *buf,
)
{
static char mybuf[MD_MAX_CTDLEN];
int err;
} else {
}
if (err) {
} else {
}
}
return (buf);
}
void
{
extern unsigned md_maxbcount;
}
void
{
}
void
{
}
/*
* md_bioclone is needed as long as the real bioclone only takes a daddr_t
* as block number.
* We simply call bioclone with all input parameters but blkno, and set the
* correct blkno afterwards.
* Caveat Emptor: bp_mem must not be NULL!
*/
buf_t *
{
return (bp_mem);
}
/*
* kstat stuff
*/
void
)
{
char module[KSTAT_STRLEN];
char *p = module;
if (setno != MD_LOCAL_SET) {
char buf[64];
char *s = buf;
while ((p < e) && (*s != '\0'))
*p++ = *s++;
*p++ = '/';
}
*p++ = 'm';
*p++ = 'd';
*p = '\0';
}
}
}
void
)
{
}
void
)
{
/*
* kstat_delete() interface has it's own locking mechanism and
* does not allow holding of kstat lock (ks_lock).
* Note: ks_lock == ui_mx from the md_kstat_init_ui().
*/
}
}
void
)
{
}
/*
* In the following subsequent routines, locks are held before checking the
* validity of ui_kstat. This is done to make sure that we don't trip over
* a NULL ui_kstat anymore.
*/
void
)
{
}
void
)
{
}
void
)
{
}
void
)
{
}
void
)
{
}
void
mdi_unit_t *ui,
int war
)
{
/* check for end of device */
n_done = 0;
} else {
}
/* do accounting */
} else {
}
}
}
{
ASSERT(0);
return ((pid_t)0);
} else {
return (valuep);
}
}
proc_t *
{
ASSERT(0);
} else {
return (valuep);
}
}
/*
* this check to see if a process pid pair are still running. For the
* currently held.
*/
int
{
int retval = 1;
return (0);
retval = 0;
return (retval);
}
/*
* NAME: md_init_probereq
*
* DESCRIPTION: initializes a probe request. Parcels out the mnums such that
* they can be dispatched to multiple daemon threads.
*
* PARAMETERS: struct md_probedev *p pointer ioctl input
*
* RETURN VALUE: Returns errno
*
*/
int
{
int err = 0;
int modindx;
intptr_t (*probe_test)();
/*
* Initialize the semaphores and mutex
* for the request
*/
if (probe_test == NULL) {
goto err_out;
}
return (err);
}
/*
* NAME: md_probe_one
*
* DESCRIPTION: Generic routine for probing disks. This is called from the
* daemon.
*
* PARAMETERS: probe_req_t *reqp pointer to the probe request structure.
*
*/
void
{
mdi_unit_t *ui;
int err = 0;
/*
* Validate the unit while holding the global ioctl lock, then
* obtain the unit_writerlock. Once the writerlock has been obtained
* we can release the global lock. As long as we hold one of these
* locks this will prevent a metaclear operation being performed
* on the metadevice because metaclear takes the readerlock (via
* openclose lock).
*/
while (md_ioctl_lock_enter() == EINTR);
(void) md_unit_writerlock_common(ui, 0);
(void) md_ioctl_lock_exit(0, 0, 0, FALSE);
} else {
(void) md_ioctl_lock_exit(0, 0, 0, FALSE);
}
/* update the info info in the probe structure */
mutex_enter(PROBE_MX(p));
if (err != 0) {
}
mutex_exit(PROBE_MX(p));
sema_v(PROBE_SEMA(p));
}
char *
{
}
void
{
}
/*
* Validate the list and skip invalid devices. Then create
* a doubly linked circular list of devices to probe.
* The hdr points to the head and tail of this list.
*/
static int
intptr_t (*probe_test)())
{
nodevcnt = 0;
nodevcnt++;
continue;
}
} else {
}
}
if (nodevcnt > 0)
/*
* If there are no devices to be probed because they were
* incorrect, then return an error.
*/
return (err);
}
/*
* This routine increments the I/O count for set I/O operations. This
* value is used to determine if an I/O can done. If a release is in
* process this will return an error and cause the I/O to be errored.
*/
int
{
int rc = 0;
if (setno == 0)
return (0);
goto out;
}
return (rc);
}
void
{
if (setno == 0)
return;
}
void
{
if (setno == 0)
return;
}
int
{
int rc = 0;
if (setno == 0)
return (0);
rc = 1;
return (rc);
}
int
{
int rc = 0;
if (setno == 0)
return (1);
}
rc = 1;
return (rc);
}
void
{
if (setno == 0)
return;
}
void
{
if (setno == 0)
return;
#ifdef DEBUG
}
#endif /* DEBUG */
}
/*
* Test and set version of the md_block_setio.
* Set the io_state to keep new I/O from being issued.
* If there is I/O currently in progress, then set io_state to active
* and return failure. Otherwise, return a 1 for success.
*
* Used in a MN diskset since the commd must be suspended before
* this node can attempt to withdraw from a diskset. But, with commd
* suspended, I/O may have been issued that can never finish until
* commd is resumed (allocation of hotspare, etc). So, if I/O is
* outstanding after diskset io_state is marked RELEASE, then set diskset
* io_state back to ACTIVE and return failure.
*/
int
{
int rc;
if (setno == 0)
return (1);
rc = 0;
} else {
rc = 1;
}
return (rc);
}
void
{
mdi_unit_t *ui;
if (setno == 0) {
return;
}
#ifdef DEBUG
if (!md_unit_isopen(ui))
#endif /* DEBUG */
/*
* Handle the local diskset
*/
#ifdef DEBUG
/*
* this is being done after the lock is dropped so there
* are cases it may be invalid. It is advisory.
*/
/* Only display this error once for this metadevice */
"I/O to %s attempted during set RELEASE\n",
md_shortname(mnum));
}
}
#endif /* DEBUG */
}
/*
* Driver special private devt handling routine
* INPUT: md_dev64_t
* OUTPUT: dev_t, 32 bit on a 32 bit kernel, 64 bit on a 64 bit kernel.
*/
{
}
/*
* Driver private makedevice routine
* INPUT: major_t major, minor_t minor
* OUTPUT: md_dev64_t, no matter if on 32 bit or 64 bit kernel.
*/
{
}
/*
* Driver private devt md_getmajor routine
* INPUT: dev a 64 bit container holding either a 32 bit or a 64 bit device
* OUTPUT: the appropriate major number
*/
{
if (major == 0) {
/* Here we were given a 32bit dev */
}
return (major);
}
/*
* Driver private devt md_getminor routine
* INPUT: dev a 64 bit container holding either a 32 bit or a 64 bit device
* OUTPUT: the appropriate minor number
*/
{
if (major == 0) {
/* Here we were given a 32bit dev */
} else {
}
return (minor);
}
int
{
/*
* If the metadevice is an old style device, it has a vtoc,
* in that case all reading EFI ioctls are not applicable.
* If the metadevice has an EFI label, reading vtoc and geom ioctls
* are not supposed to work.
*/
switch (cmd) {
case DKIOCGGEOM:
case DKIOCGVTOC:
case DKIOCGAPART:
if ((flags & MD_EFILABEL) != 0) {
return (ENOTSUP);
}
break;
case DKIOCGETEFI:
case DKIOCPARTITION:
if ((flags & MD_EFILABEL) == 0) {
return (ENOTSUP);
}
break;
case DKIOCSETEFI:
/* setting an EFI label should always be ok */
return (0);
case DKIOCSVTOC:
/*
* This one is ok for small devices, even if they have an EFI
* label. The appropriate check is in md_set_vtoc
*/
return (0);
}
return (0);
}
/*
* md_vtoc_to_efi_record()
* Input: record id of the vtoc record
* Output: record id of the efi record
* Function:
* - reads the volume name from the vtoc record
* - converts the volume name to a format, libefi understands
* - creates a new record of size MD_EFI_PARTNAME_BYTES
* - stores the volname in that record,
* - commits that record
* - returns the recid of the efi record.
* Caveat Emptor:
* The calling routine must do something like
* - un->c.un_vtoc_id = md_vtoc_to_efi_record(vtoc_recid)
* - commit(un)
* - delete(vtoc_recid)
* in order to keep the mddb consistent in case of a panic in the middle.
* Errors:
* - returns 0 on any error
*/
{
ushort_t *v;
int i;
return (0);
}
if (efi_recid < 0) {
return (0);
}
/* This for loop read, converts and writes */
for (i = 0; i < LEN_DKL_VVOL; i++) {
}
/* commit the new record */
return (efi_recid);
}
/*
* Send a kernel message.
* user has to provide for an allocated result structure
* If the door handler disappears we retry forever emitting warnings every so
* often.
* TODO: make this a flaggable attribute so that the caller can decide if the
* message is to be a 'one-shot' message or not.
*/
int
char *data,
int size,
{
int rval;
if (size > MDMN_MAX_KMSG_DATA)
return (ENOMEM);
#ifdef DEBUG_COMM
printf("send msg: set=%d, flags=%d, type=%d, txid = 0x%llx,"
" size=%d, data=%d, data2=%d\n",
*(int *)data,
#endif /* DEBUG_COMM */
/*
* Wait for the door handle to be established.
*/
while (mdmn_door_did == -1) {
if ((++retry_cnt % MD_MN_WARN_INTVL) == 0) {
}
}
retry_cnt = 0;
SIZE_MAX, 0)) != 0) {
if ((++retry_cnt % MD_MN_WARN_INTVL) == 0) {
}
} else {
"md door call failed. Returned %d", rval);
}
}
/*
* Attempt to determine if the message failed (with an RPC_FAILURE)
* because we are in the middle of shutting the system down.
*
* If message failed with an RPC_FAILURE when rpc.mdcommd had
* been gracefully shutdown (md_mn_is_commd_present returns FALSE)
* then don't retry the message anymore. If message
* failed due to any other reason, then retry up to MD_MN_WARN_INTVL
* times which should allow a shutting down system time to
* notify the kernel of a graceful shutdown of rpc.mdcommd.
*
* Caller of this routine will need to check the md_mn_commd_present
* flag and the failure error in order to determine whether to panic
* or not. If md_mn_commd_present is set to 0 and failure error
* is RPC_FAILURE, the calling routine should not panic since the
* system is in the process of being shutdown.
*
*/
retry_cnt = 0;
while (md_mn_is_commd_present() == 1) {
if ((++retry_cnt % MD_MN_WARN_INTVL) == 0)
break;
}
}
return (0);
}
/*
* Called to propagate the capability of a metadevice to all nodes in the set.
*
* On entry, lockp is set if the function has been called from within an ioctl.
*
* IOLOCK_RETURN_RELEASE, which drops the md_ioctl_lock is called in this
* routine to enable other mdioctls to enter the kernel while this
* thread of execution waits on the completion of mdmn_ksend_message. When
* the message is completed the thread continues and md_ioctl_lock must be
* reacquired. Even though md_ioctl_lock is interruptable, we choose to
* ignore EINTR as we must not return without acquiring md_ioctl_lock.
*/
int
{
int ret;
if (lockp)
/*
* Mask signals for the mdmd_ksend_message call. This keeps the door
* interface from failing if the user process receives a signal while
* in mdmn_ksend_message.
*/
kres));
}
if (lockp) {
}
return (ret);
}
/*
* Called to clear all of the transient capabilities for a metadevice when it is
* not open on any node in the cluster
* Called from close for mirror and sp.
*/
void
{
int ret;
/*
* The check open message doesn't have to be logged, nor should the
* result be stored in the MCT. We want an up-to-date state.
*/
/*
* Mask signals for the mdmd_ksend_message call. This keeps the door
* interface from failing if the user process receives a signal while
* in mdmn_ksend_message.
*/
/*
* Not open on any node, clear all capabilities, eg ABR and
* DMR
*/
}
}
/*
* mdmn_ksend_show_error:
* ---------------------
* Called to display the error contents of a failing mdmn_ksend_message() result
*
* Input:
* rv - return value from mdmn_ksend_message()
* kres - pointer to result structure filled in by mdmn_ksend_message
* s - Informative message to identify failing condition (e.g.
* "Ownership change") This string will be displayed with
* cmn_err(CE_WARN, "%s *FAILED*",...) to alert the system
* administrator
*/
void
{
if (rv == 0) {
} else {
}
}
/*
* Callback routine for resync thread. If requested to suspend we mark the
* commd as not being present.
*/
{
int ret = 0; /* assume success */
switch (code) {
case CB_CODE_CPR_CHKPT:
/*
* Mark the rpc.mdcommd as no longer present. We are trying to
* suspend the system and so we should expect RPC failures to
* occur.
*/
/* cv_timedwait() returns -1 if it times out. */
break;
break;
case CB_CODE_CPR_RESUME:
break;
}
return (ret != -1);
}
void
{
int s;
int max_sides;
/* All entries removed are in the same diskset */
else
for (s = 0; s < max_sides; s++)
}
int
{
int s;
int max_sides;
struct nm_next_hdr *nh;
struct nm_name *n;
/*
* Get the key since remove routine expects it
*/
return (ENOENT);
}
return (ENOENT);
}
/* All entries removed are in the same diskset */
else
for (s = 0; s < max_sides; s++)
return (0);
}
void
{
}
}
struct hot_spare_pool *
{
return (hsp);
}
return ((hot_spare_pool_t *)0);
}