/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
*/
/*
* Just in case we're not in a build environment, make sure that
* TEXT_DOMAIN gets set to something.
*/
#if !defined(TEXT_DOMAIN)
#endif
/*
* Metadevice diskset interfaces
*/
#include "meta_set_prv.h"
#include <meta.h>
#include <sdssc.h>
static int
)
{
int rval = 0;
return (-1);
/*
* This is not the first replica being added to the
* diskset so call with ADDSIDENMS_BCAST. If this
* is a traditional diskset, the bcast flag is ignored
* since traditional disksets don't use the rpc.mdcommd.
*/
DB_ADDSIDENMS_BCAST, ep)) {
rval = -1;
goto out;
}
}
out:
return (rval);
}
static int
int node_c,
char **node_v,
)
{
int i;
return (-1);
return (-1);
return (0);
}
for (i = 0; i < node_c; i++) {
return (-1);
}
return (0);
}
static int
{
int done, i;
int rval = 0;
if (!metaislocalset(sp)) {
return (-1);
}
/* Use rpc.mdcommd to add md side info from all nodes */
int send_rval;
/*
* If reconfig cycle has been started, this node is stuck in
* in the return step until this command has completed. If
* mdcommd is suspended, ask send_message to fail (instead of
* retrying) so that metaset can finish allowing the
* reconfig cycle to proceed.
*/
0, (char *)&md_as, sizeof (md_mn_msg_meta_md_addside_t),
if (send_rval != 0) {
if (resultp)
return (-1);
}
if (resultp)
return (0);
} else {
/*CONSTCOND*/
while (1) {
return (0);
/*
* Okay we have a valid key
* Let's see if it is hsp or not
*/
if (drvnm)
return (-1);
}
/*
* If it is hsp add here
*/
return (-1);
} else {
continue;
}
}
if (MD_MNSET_DESC(sd)) {
tmp_sideno = sideno;
} else {
}
return (-1);
}
/*
* The device reference count can be greater than 1 if
* more than one softpart is configured on top of the
* same device. If this is the case then we want to
* increment the count to sync up with the other sides.
*/
rval = -1;
}
if (rval != 0)
return (rval);
}
}
/*NOTREACHED*/
}
static int
{
return (-1);
return (-1);
}
return (0);
}
int
int node_c, /* Number of new nodes */
char **node_v, /* Nodes which are being added */
int new_set,
)
{
int i;
int rval = 0;
int nodecnt;
if (!new_set) {
return (-1);
genid--;
} else {
rval = -1;
goto out;
}
/* Put the new entries into the set */
/*
* Get membershiplist from API routine. If there's
* an error, fail to create set and pass back error.
*/
rval = -1;
goto out;
}
/*
* meta_set_addhosts has already verified that
* this node list is in the membership list
* so set ALIVE flag.
* Since this is a new set, all hosts being
* added are new to the set, so also set ADD flag.
*/
for (i = 0; i < node_c; i++) {
while (nl2) {
node_v[i]) == 0) {
nl2->msl_node_addr);
break;
}
}
/*
* Nodelist must be kept in ascending
* nodeid order.
*/
/* Nothing in list, just add it */
/* Add to head of list */
} else {
/* Search for place ot add it */
while (nd_curr) {
/* Add before nd_curr */
break;
}
}
/* Add to end of list */
}
}
/* Set master to be first node added */
}
/*
* Creating mnset for first time.
* Set master to be invalid until first drive is
* in set.
*/
}
/* Create the set where needed */
for (i = 0; i < node_c; i++) {
/*
* Create the set on each new node. If the set already
* exists, then the node list being created on each new node
* is the current node list from before the new nodes
* were added. If the set doesn't exist, then the node
* list being created on each new node is the entire
* new node list.
*/
rval = -1;
break;
}
}
out:
if (new_set) {
while (nd) {
}
}
return (rval);
/*
* Add the drive records to the new sets
* and names for the new sides.
*/
}
static int
int node_c, /* Number of new nodes */
char **node_v, /* Nodes which are being added */
int new_set,
)
{
int i;
int rval = 0;
if (!new_set) {
return (-1);
genid--;
} else {
if (node_c > MD_MAXSIDES)
/* Put the new entries into the set */
for (i = 0; i < node_c; i++) {
}
rval = -1;
goto out;
}
}
/* Create the set where needed */
for (i = 0; i < node_c; i++) {
/*
* Create the set on each new host
*/
ep) == -1) {
rval = -1;
break;
}
}
out:
if (new_set)
return (rval);
/*
* Add the drive records to the new sets
* and names for the new sides.
*/
}
static int
int multi_node, /* Multi_node diskset or not? */
int node_c, /* Number of new nodes */
char **node_v, /* Nodes which are being added */
int new_set,
)
{
if (multi_node)
else
}
static int
int multi_node, /* Multi-node diskset or not? */
int node_c,
char **node_v,
int auto_take,
)
{
int i;
int rval = 0;
int bool;
int rb_level = 0;
int lock_flag = 0;
int sig_flag = 0;
return (-1);
/* We must be a member of the set we are creating */
/*
* If auto_take then we must be the only member of the set
* that we are creating.
*/
/*
* If we're part of SC3.0 we'll already have allocated the
* set number so we can skip the allocation algorithm used.
* Set number is unique across traditional and MN disksets.
*/
== SDSSC_NOT_BOUND) {
for (i = 0; i < node_c; i++) {
int has_set;
/* Skip my node */
continue;
/*
* Make sure this set name is not used on the
* other hosts
*/
if (has_set < 0) {
rval = -1;
goto out;
}
mdclrerror(ep);
continue;
}
if (has_set) {
rval = -1;
goto out;
}
}
for (i = 0; i < node_c; i++) {
&bool, ep) == -1) {
rval = -1;
goto out;
}
if (bool == TRUE)
break;
}
if (i == node_c)
break;
}
} else if (sdssc_rval != SDSSC_OKAY) {
rval = -1;
goto out;
}
rval = -1;
goto out;
}
/*
* Lock the set on current set members.
* Set locking done much earlier for MN diskset than for traditional
* diskset since lock_set is used to protect against
* other meta* commands running on the other nodes.
* Don't issue mdcommd SUSPEND command since there is nothing
* to suspend since there currently is no set.
*/
if (multi_node) {
/* Make sure we are blocking all signals */
mdclrerror(&xep);
sig_flag = 1;
/* Lock the set on new set members */
for (i = 0; i < node_c; i++) {
rval = -1;
goto out;
}
lock_flag = 1;
}
/* Now have the diskset locked, verify set number is still ok */
for (i = 0; i < node_c; i++) {
&bool, ep) == -1) {
rval = -1;
goto out;
}
}
}
rval = -1;
goto out;
}
for (i = 0; i < node_c; i++) {
rval = -1;
goto out;
}
if (bool == FALSE) {
rval = -1;
goto out;
}
}
/* END CHECK CODE */
/* Lock the set on new set members */
if (!multi_node) {
sig_flag = 1;
for (i = 0; i < node_c; i++) {
rval = -1;
goto out;
}
lock_flag = 1;
}
}
goto rollback;
if (auto_take)
else
/*
* Mark the set record MD_SR_OK
*/
for (i = 0; i < node_c; i++)
goto rollback;
/*
* For MN diskset:
* On each added node, set the node record for that node
* to OK. Then set all node records for the newly added
* nodes on all nodes to ok.
*
* By setting a node's own node record to ok first, even if
* the node adding the hosts panics, the rest of the nodes can
* determine the same node list during the choosing of the master
* during reconfig. So, only nodes considered for mastership
* are nodes that have both MD_MN_NODE_OK and MD_SR_OK set
* on that node's rpc.metad. If all nodes have MD_SR_OK set,
* but no node has its own MD_MN_NODE_OK set, then the set will
* be removed during reconfig since a panic occurred during the
* creation of the initial diskset.
*/
if (multi_node) {
goto rollback;
}
for (i = 0; i < node_c; i++) {
/* All nodes are guaranteed to be ALIVE */
while (nd) {
break;
}
/* Something wrong, will pick this up in next loop */
continue;
/* Only changing my local cache of node list */
/* Set node record for added host to ok on that host */
goto rollback;
}
}
/* Now set all node records on all nodes to be ok */
/* All nodes are guaranteed to be ALIVE */
while (nd) {
goto rollback;
}
}
}
out:
if ((rval == 0) && multi_node) {
/*
* Set successfully created.
* Notify rpc.mdcommd on all nodes of a nodelist change.
* Send reinit command to mdcommd which forces it to get
* fresh set description. Then send resume.
* Resume on class 0 will resume all classes.
*/
for (i = 0; i < node_c; i++) {
/* Class is ignored for REINIT */
if (rval == 0)
rval = -1;
"Unable to reinit rpc.mdcommd.\n"));
}
}
for (i = 0; i < node_c; i++) {
if (rval == 0)
rval = -1;
"Unable to resume rpc.mdcommd.\n"));
}
}
}
if (lock_flag) {
for (i = 0; i < node_c; i++) {
if (rval == 0)
rval = -1;
}
}
}
if (sig_flag) {
if (multi_node) {
/* release signals back to what they were on entry */
mdclrerror(&xep);
} else {
}
}
return (rval);
/* all signals already blocked for MN disket */
if (!multi_node) {
/* Make sure we are blocking all signals */
mdclrerror(&xep);
}
rval = -1;
/*
* For MN diskset:
* On each added node (which is now each node to be deleted),
* set the node record for that node to DEL. Then set all
* node records for the newly added (soon to be deleted) nodes
* on all nodes to ok.
*
* By setting a node's own node record to DEL first, even if
* the node doing the rollback panics, the rest of the nodes can
* determine the same node list during the choosing of the master
* during reconfig.
*/
/* level 3 */
mdclrerror(&xep);
}
for (i = 0; i < node_c; i++) {
/* All nodes are guaranteed to be ALIVE */
while (nd) {
break;
}
/* Something wrong, will pick this up in next loop */
continue;
/* Only changing my local cache of node list */
/* Set node record for added host to DEL on that host */
mdclrerror(&xep);
}
}
/* Now set all node records on all nodes to be DEL */
/* All nodes are guaranteed to be ALIVE */
while (nd) {
mdclrerror(&xep);
}
}
/* Mark set record on all hosts to be DELETED */
for (i = 0; i < node_c; i++) {
mdclrerror(&xep);
}
}
}
/* level 1 */
if (rb_level > 0) {
for (i = 0; i < node_c; i++) {
mdclrerror(&xep);
}
}
/* level 0 */
/* Don't test lock flag since guaranteed to be set if in rollback */
for (i = 0; i < node_c; i++) {
mdclrerror(&xep);
}
/* release signals back to what they were on entry */
mdclrerror(&xep);
if ((sig_flag) && (!multi_node))
return (rval);
}
static int
)
{
int rval = 0;
return (-1);
rval = -1;
goto out;
}
}
out:
return (rval);
}
static int
int node_c,
char **node_v,
int oha,
)
{
int i;
for (i = 0; i < node_c; i++) {
/*
* During OHA mode, don't issue RPCs to
* non-alive nodes since there is no reason to
* wait for RPC timeouts.
*/
while (nd) {
break;
}
}
continue;
}
return (-1);
}
/*
* All nodes should be alive in non-oha mode.
*/
return (-1);
}
} else {
/*
* For traditional diskset, issue the RPC and
* ignore RPC failure if in OHA mode.
*/
mdclrerror(ep);
continue;
}
return (-1);
}
}
}
return (0);
}
static int
char **anode,
)
{
int rval = 0;
return (-1);
/* Make sure we own the set */
return (-1);
/* Lock the set on our side */
rval = -1;
goto out;
}
rval = -1;
goto out;
}
if (!MD_MNSET_DESC(sd)) {
rval = -1;
goto out;
}
}
/* If we have drives */
rval = -1;
goto out;
}
}
}
out:
if (rval == 0)
rval = -1;
}
return (rval);
}
static int
{
int i;
if (!metaislocalset(sp)) {
return (-1);
}
/* Use rpc.mdcommd to add md side info from all nodes */
int send_rval;
/*
* If reconfig cycle has been started, this node is stuck in
* in the return step until this command has completed. If
* mdcommd is suspended, ask send_message to fail (instead of
* retrying) so that metaset can finish allowing the
* reconfig cycle to proceed.
*/
0, (char *)&md_ds, sizeof (md_mn_msg_meta_md_delside_t),
if (send_rval != 0) {
if (resultp)
return (-1);
}
if (resultp)
} else {
/*CONSTCOND*/
while (1) {
return (0);
/*
* The device reference count can be greater than 1 if
* more than one softpart is configured on top of the
* same device. If this is the case then we want to
* decrement the count to zero so the entry can be
* actually removed.
*/
return (-1);
}
}
}
return (0);
}
static void
)
{
int i;
int has_set;
if (MD_MNSET_DESC(sd)) {
while (nd) {
continue;
}
NHS_NST_EQ, &xep);
if (has_set >= 0) {
continue;
}
mdclrerror(&xep);
mdclrerror(&xep);
}
} else {
for (i = 0; i < MD_MAXSIDES; i++) {
/* Skip empty slots */
continue;
NHS_NST_EQ, &xep);
if (has_set >= 0)
continue;
mdclrerror(&xep);
mdclrerror(&xep);
}
}
}
/*
* If a MN diskset, set is already locked on all nodes via clnt_lock_set.
*/
static int
int node_c,
char **node_v,
int oha,
)
{
int i;
int rb_level = 0;
int rval = 0;
return (-1);
if (MD_MNSET_DESC(sd)) {
/* Make sure we are blocking all signals */
mdclrerror(&xep);
} else {
}
/*
* Lock the set on current set members for traditional disksets.
*/
if (!(MD_MNSET_DESC(sd))) {
for (i = 0; i < node_c; i++) {
/*
* For traditional diskset, issue the RPC and
* ignore RPC failure if in OHA mode.
*/
mdclrerror(ep);
continue;
}
rval = -1;
goto out;
}
}
}
/*
* Mark the set record MD_SR_DEL
*/
for (i = 0; i < node_c; i++) {
/*
* During OHA mode, don't issue RPCs to
* non-alive nodes since there is no reason to
* wait for RPC timeouts.
*/
while (nd) {
break;
}
goto rollback;
}
continue;
}
goto rollback;
}
/*
* All nodes should be alive in non-oha mode.
*/
goto rollback;
}
} else {
/*
* For traditional diskset, issue the RPC and
* ignore RPC failure if in OHA mode.
*/
mdclrerror(ep);
continue;
}
goto rollback;
}
}
}
delete_end = 0;
else
goto rollback;
/* The set is OK to delete, make it so. */
for (i = 0; i < node_c; i++) {
/*
* During OHA mode, don't issue RPCs to
* non-alive nodes since there is no reason to
* wait for RPC timeouts.
*/
while (nd) {
break;
}
goto rollback;
}
continue;
}
goto rollback;
}
/*
* All nodes should be alive in non-oha mode.
*/
goto rollback;
}
} else {
/*
* For traditional diskset, issue the RPC and
* ignore RPC failure if in OHA mode.
*/
mdclrerror(ep);
continue;
}
goto rollback;
}
}
}
out:
/*
* Unlock the set on current set members
* for traditional disksets.
*/
if (!(MD_MNSET_DESC(sd))) {
for (i = 0; i < node_c; i++) {
/*
* For traditional diskset, issue the RPC and
* ignore RPC failure if in OHA mode.
*/
mdclrerror(&xep);
continue;
}
if (rval == 0)
rval = -1;
}
}
}
/*
* A MN diskset has the clnt_locks held by meta_set_deletehosts so
* don't flush that data until meta_set_deletehosts has finished
* with it. meta_set_deletehosts will handle the flush of the
* setname.
*/
if (!(MD_MNSET_DESC(sd))) {
}
if (delete_end &&
rval = -1;
if (MD_MNSET_DESC(sd)) {
/* release signals back to what they were on entry */
mdclrerror(&xep);
} else {
}
return (rval);
/* all signals already blocked for MN disket */
if (!(MD_MNSET_DESC(sd))) {
/* Make sure we are blocking all signals */
mdclrerror(&xep);
}
rval = -1;
/* level 2 */
if (rb_level > 1) {
max_genid++;
if (delete_end)
}
/* level 1 */
if (rb_level > 0) {
max_genid++;
}
/* level 0 */
/*
* Unlock the set on current set members
* for traditional disksets.
*/
if (!(MD_MNSET_DESC(sd))) {
for (i = 0; i < node_c; i++) {
/*
* For traditional diskset, issue the RPC and
* ignore RPC failure if in OHA mode.
*/
mdclrerror(&xep);
}
}
/* release signals back to what they were on entry */
mdclrerror(&xep);
/*
* A MN diskset has the clnt_locks held by meta_set_deletehosts so
* don't flush that data until meta_set_deletehosts has finished
* with it. meta_set_deletehosts will handle the flush of the
* setname.
*/
if (!(MD_MNSET_DESC(sd))) {
}
return (rval);
}
/*
* On entry:
* procsigs already called for MN diskset.
* md_rb_sig_handling already called for traditional diskset.
*/
static int
int node_c, /* Number of nodes */
char **node_v, /* Nodes being deleted */
int oha,
)
{
int i;
int j;
/*
* May need this to re-add sidenames on roll back.
*/
ep) < 0)
goto rollback;
goto rollback;
/*
* Delete the db replica sides
* This is done before the next loop, so that
* the db does not get unloaded before we are finished
* deleting the sides.
*/
if (MD_MNSET_DESC(sd)) {
while (nd) {
/* Skip hosts not being deleted */
node_v)) {
continue;
}
goto rollback;
}
} else {
/* Skip empty slots */
continue;
/* Skip hosts not being deleted */
node_v))
continue;
goto rollback;
}
}
/* Delete the names from the namespace */
if (MD_MNSET_DESC(sd)) {
while (nd) {
/* Skip hosts not being deleted */
node_v)) {
continue;
}
goto rollback;
}
} else {
/* Skip empty slots */
continue;
/* Skip hosts not being deleted */
node_v))
continue;
goto rollback;
}
}
}
for (i = 0; i < node_c; i++) {
/*
* During OHA mode, don't issue RPCs to
* non-alive nodes since there is no reason to
* wait for RPC timeouts.
*/
while (nd) {
break;
}
goto rollback;
}
continue;
}
goto rollback;
}
/*
* All nodes should be alive in non-oha mode.
*/
goto rollback;
}
} else {
/*
* For traditional diskset, issue the RPC and
* ignore RPC failure if in OHA mode.
*/
mdclrerror(ep);
continue;
}
goto rollback;
}
}
}
if (MD_MNSET_DESC(sd)) {
/* release signals back to what they were on entry */
mdclrerror(&xep);
} else {
}
return (0);
/* all signals already blocked for MN disket */
if (!(MD_MNSET_DESC(sd))) {
/* Make sure we are blocking all signals */
mdclrerror(&xep);
}
/* level 5 */
if (rb_level > 4) {
max_genid++;
}
/* level 2 */
/*
* See if we have to re-add the drives specified.
*/
for (i = 0; i < node_c; i++) {
/*
* During OHA mode, don't issue RPCs to
* non-alive nodes since there is no reason to
* wait for RPC timeouts.
*/
while (nd) {
== 0)
break;
}
continue;
continue;
}
/* Don't care if set record is MN or not */
mdclrerror(&xep);
continue;
}
/* Drive already added, skip to next node */
/*
* Set record structure was allocated from RPC
* routine getset so this structure is only of
* size md_set_record even if the MN flag is
* set. So, clear the flag so that the free
* code doesn't attempt to free a structure
* the size of md_mnset_record.
*/
continue;
}
mdclrerror(&xep);
mdclrerror(&xep);
/*
* Set record structure was allocated from RPC routine
* getset so this structure is only of size
* md_set_record even if the MN flag is set. So,
* clear the flag so that the free code doesn't
* attempt to free a structure the size of
* md_mnset_record.
*/
}
max_genid += 3;
}
/* level 3 */
/*
* This is not the first replica being added to the
* diskset so call with ADDSIDENMS_BCAST. If this
* is a traditional diskset, the bcast flag is ignored
* since traditional disksets don't use the rpc.mdcommd.
*/
mdclrerror(&xep);
}
}
/* level 4 */
int nodeid_addsides = 0;
/*
* Add the device names for the new sides into the namespace,
* on all hosts not being deleted.
*/
if (MD_MNSET_DESC(sd)) {
while (nd) {
/* Find a node that is not being deleted */
node_v)) {
break;
}
}
} else {
for (j = 0; j < MD_MAXSIDES; j++) {
/* Skip empty slots */
continue;
/* Find a node that is not being deleted */
node_v))
break;
}
nodeid_addsides = j;
}
if (MD_MNSET_DESC(sd)) {
while (nd) {
/* Skip nodes not being deleted */
node_v)) {
continue;
}
/* this side was just created, add the names */
nodeid_addsides, &xep))
mdclrerror(&xep);
}
} else {
for (i = 0; i < MD_MAXSIDES; i++) {
/* Skip empty slots */
continue;
/* Skip nodes not being deleted */
continue;
/* this side was just created, add the names */
&xep))
mdclrerror(&xep);
}
}
}
/* level 1 */
if (rb_level > 0) {
max_genid++;
}
/* level 0 */
if (MD_MNSET_DESC(sd)) {
while (nd) {
continue;
mdclrerror(&xep);
}
} else {
for (i = 0; i < MD_MAXSIDES; i++) {
/* Skip empty slots */
continue;
mdclrerror(&xep);
}
}
/* release signals back to what they were on entry */
mdclrerror(&xep);
if (!(MD_MNSET_DESC(sd))) {
}
return (-1);
}
static int
)
{
int err = 0;
return (-1);
/* find the end of the link list */
;
return (-1);
return (-1);
if (MD_MNSET_DESC(sd)) {
/*
* For MO diskset the sideno is not an index into
* the array of nodes. Hence getside_devinfo is
* used instead of meta_getnextside_devinfo.
*/
err = -1;
} else {
/* decrement sideno, to look like the previous sideno */
sideno--;
err = -1;
}
if (err) {
return (err);
}
/* Add to the end of the linked list */
return (0);
}
static int
int node_c,
char **node_v,
)
{
char *hostname;
int i;
for (i = 0; i < node_c; i++) {
return (-1);
}
}
return (0);
}
/*
* Exported Entry Points
*/
/*
* Check the given disk set name for syntactic correctness.
*/
int
{
char *cp;
return (0);
}
/*
* Add host(s) to the multi-node diskset provided in sp.
* - create set if non-existent.
*/
static int
int multi_node,
int node_c,
char **node_v,
int auto_take,
)
{
int rval = 0;
int bool;
int nodeindex;
int i;
int has_set;
int rb_level = 0;
int nodecnt;
int suspendall_flag = 0;
int suspend1_flag = 0;
int lock_flag = 0;
int stale_flag = 0;
int remote_sets_created = 0;
/*
* Check membershiplist first. If there's
* an error, fail to create set and pass back error.
*/
return (-1);
}
/* Verify that all nodes are in member list */
for (i = 0; i < node_c; i++) {
/*
* If node in list isn't a member of the membership,
* just return error.
*/
}
}
/*
* Node list is needed later, but there is a lot of error
* checking and possible failures between here and there, so
* just re-get the list later if there are no errors.
*/
/*
* Verify that list of nodes being added contains no
* duplicates.
*/
return (-1);
/*
* Verify that each node being added thinks that its nodename
* is the same as the nodename given.
*/
return (-1);
return (-1);
mdclrerror(ep);
ep));
} else {
/*
* If this node and another node were both attempting to
* create the same setname at the same time, and the other
* node has just created the set on this node then sd would
* be non-NULL, but sp->setno would be null (setno is filled
* in by the create_set). If this is true, then fail since
* the other node has already won this race.
*/
}
}
/* The auto_take behavior is inconsistent with multiple hosts. */
return (-1);
}
/*
* We already have the set.
*/
/* Make sure we own the set */
return (-1);
/*
* The drive and node records are stored in the local mddbs of each
* node in the diskset. Each node's rpc.metad daemon reads in the set,
* drive and node records from that node's local mddb and caches them
* internally. Any process needing diskset information contacts its
* local rpc.metad to get this information. Since each node in the
* diskset is independently reading the set information from its local
* mddb, the set, drive and node records in the local mddbs must stay
* in-sync, so that all nodes have a consistent view of the diskset.
*
* For a multinode diskset, explicitly verify that all nodes in the
* diskset are ALIVE (i.e. are in the API membership list). Otherwise,
* fail this operation since all nodes must be ALIVE in order to add
* the new node record to their local mddb. If a panic of this node
* leaves the local mddbs set, node and drive records out-of-sync, the
* reconfig cycle will fix the local mddbs and force them back into
* synchronization.
*/
while (nd) {
}
}
/*
* Check if node is already in set.
*/
for (i = 0; i < node_c; i++) {
/* Is node already in set? */
while (nd) {
break;
}
if (nd) {
}
}
/*
* Lock the set on current set members.
* Set locking done much earlier for MN diskset than for traditional
* diskset since lock_set and SUSPEND are used to protect against
* other meta* commands running on the other nodes.
*/
/* Make sure we are blocking all signals */
mdclrerror(&xep);
/* All nodes are guaranteed to be ALIVE */
while (nd) {
rval = -1;
goto out;
}
lock_flag = 1;
}
/*
* Lock out other meta* commands by suspending
* class 1 messages across the diskset.
*/
/* Send suspend to nodes in nodelist before addhosts call */
/* All nodes are guaranteed to be ALIVE */
while (nd) {
MD_MSCF_NO_FLAGS, ep)) {
rval = -1;
goto out;
}
suspend1_flag = 1;
}
/* Lock the set on new set members */
for (i = 0; i < node_c; i++) {
/* Already verified to be alive */
rval = -1;
goto out;
}
lock_flag = 1;
}
/*
* Perform the required checks for new hosts
*/
for (i = 0; i < node_c; i++) {
/* Make sure this set name is not used on the other hosts */
if (has_set < 0) {
rval = -1;
goto out;
}
/* Keep on truck'n */
mdclrerror(ep);
} else if (has_set) {
rval = -1;
goto out;
}
rval = -1;
goto out;
}
if (bool == TRUE) {
rval = -1;
goto out;
}
rval = -1;
goto out;
}
if (bool == FALSE) {
rval = -1;
goto out;
}
rval = -1;
goto out;
}
}
/* Get drive descriptors for the set */
rval = -1;
goto out;
}
}
/* END CHECK CODE */
/*
* Create the set where needed
*/
goto rollback;
}
/*
* Send suspend to rpc.mdcommd on nodes where a set has been
* created since rpc.mdcommd must now be running on the remote nodes.
*/
remote_sets_created = 1;
for (i = 0; i < node_c; i++) {
/*
* Lock out other meta* commands by suspending
* class 1 messages across the diskset.
*/
if (clnt_mdcommdctl(node_v[i],
MD_MSCF_NO_FLAGS, ep)) {
rval = -1;
goto rollback;
}
}
/*
* Merge the new entries into the set with the existing sides.
* Get membershiplist from API routine. If there's
* an error, fail to create set and pass back error.
*/
goto rollback;
}
goto rollback;
}
while (nl2) {
nl2->msl_node_addr);
break;
}
}
/*
* Nodelist must be kept in ascending nodeid order.
*/
/* Nothing in list, just add it */
/* Add to head of list */
} else {
/* Search for place to add it */
while (nd_curr) {
/* Add before nd_curr */
break;
}
}
/* Add to end of list */
}
}
/* Node already verified to be in membership */
}
/* If we have drives */
/*
* For all the hosts being added, create a sidename structure
*/
while (nd) {
/* Skip nodes not being added */
continue;
}
goto rollback;
}
}
/*
* Add the new sidename for each drive to all the hosts
*
* If a multi-node diskset, each host only stores
* the side information for itself. So, only send
* side information to the new hosts where each host
* will add the appropriate side information to its
* local mddb.
*/
while (nd) {
/* Skip nodes not being added */
node_v)) {
continue;
}
/* Add side info to new hosts */
goto rollback;
}
/*
* Add the device names for the new sides into the namespace
* for all hosts being added. This is adding the side
* names to the diskset's mddb so add sidenames for all
* of the new hosts.
*/
while (nd) {
/* Skip nodes not being added */
continue;
}
/* this side was just created, add the names */
MD_SIDEWILD, ep))
goto rollback;
}
goto rollback;
} else {
rb_level = 4;
}
/*
* Notify rpc.mdcommd on all nodes of a nodelist change.
* Start by suspending rpc.mdcommd (which drains it of all
* messages), then change the nodelist followed by a reinit
* and resume.
*/
/* Send suspend_all to nodes in nodelist (existing + new) */
/* All nodes are guaranteed to be ALIVE */
while (nd) {
rval = -1;
goto rollback;
}
suspendall_flag = 1;
}
}
/* Add the node(s) to the each host that is currently in the set */
/* All nodes are guaranteed to be ALIVE */
while (nd) {
goto rollback;
}
}
/*
* Mark the drives MD_DR_OK.
*/
/* All nodes are guaranteed to be ALIVE */
while (nd) {
goto rollback;
}
}
/* Add the mediator information to all hosts in the set. */
/* All nodes are guaranteed to be ALIVE */
while (nd) {
goto rollback;
}
/*
* If a MN diskset and there are drives in the set,
* set the master on the new nodes and
* automatically join the new nodes into the set.
*/
/*
* Is current set STALE?
*/
(void) memset(&c, 0, sizeof (c));
c.c_id = 0;
rval = -1;
goto out;
}
if (c.c_flags & MDDB_C_STALE) {
}
/* Set master on newly added nodes */
for (i = 0; i < node_c; i++) {
goto rollback;
}
}
/* Join newly added nodes to diskset and set OWN flag */
for (i = 0; i < node_c; i++) {
goto rollback;
while (nd) {
/*
* Also set ADD flag since this flag
* is already set in rpc.metad - it's
* just not in the local copy.
* Could flush local cache and call
* metaget_setdesc, but this just
* adds time. Since this node knows
* the state of the node flags in
* rpc.metad, just set the ADD
* flag and save time.
*/
break;
}
}
}
/* Send new node flag list to all Owner nodes */
while (nd) {
continue;
}
/*
* Will effectively set OWN flag in records kept
* cached in rpc.metad. The ADD flag would have
* already been set by the call to clnt_addhosts.
*/
goto rollback;
}
}
}
/*
* Mark the set record MD_SR_OK
*/
/* All nodes are guaranteed to be ALIVE */
while (nd) {
ep)) {
goto rollback;
}
}
/*
* For MN diskset:
* On each newly added node, set the node record for that node
* to OK. Then set all node records for the newly added
* nodes on all nodes to ok.
*
* By setting a node's own node record to ok first, even if
* the node adding the hosts panics, the rest of the nodes can
* determine the same node list during the choosing of the master
* during reconfig. So, only nodes considered for mastership
* are nodes that have both MD_MN_NODE_OK and MD_SR_OK set
* on that node's rpc.metad. If all nodes have MD_SR_OK set,
* but no node has its own MD_MN_NODE_OK set, then the set will
* be removed during reconfig since a panic occurred during the
* creation of the initial diskset.
*/
for (i = 0; i < node_c; i++) {
/* All nodes are guaranteed to be ALIVE */
while (nd) {
break;
}
/* Something wrong, will pick this up in next loop */
continue;
/* Only changing my local cache of node list */
/* Set node record for added host to ok on that host */
goto rollback;
}
}
/* Now set all node records on all nodes to be ok */
/* All nodes are guaranteed to be ALIVE */
while (nd) {
goto rollback;
}
}
out:
/*
* Notify rpc.mdcommd on all nodes of a nodelist change.
* Send reinit command to mdcommd which forces it to get
* fresh set description. Then send resume.
* Resume on class 0 will resume all classes, so can skip
* doing an explicit resume of class1 (ignore suspend1_flag).
*/
if (suspendall_flag) {
/*
* Don't know if nodelist contains the nodes being added
* or not, so do reinit to nodes not being added (by skipping
* any nodes in the nodelist being added) and then do
* reinit to nodes being added if remote_sets_created is 1.
*/
/* All nodes are guaranteed to be ALIVE */
while (nd) {
/* Skip nodes being added - handled later */
continue;
}
/* Class is ignored for REINIT */
if (rval == 0)
rval = -1;
"Unable to reinit rpc.mdcommd.\n"));
}
}
/*
* Send reinit to added nodes that had a set created since
* rpc.mdcommd is running on the nodes with a set.
*/
if (remote_sets_created == 1) {
for (i = 0; i < node_c; i++) {
if (rval == 0)
rval = -1;
"Unable to reinit rpc.mdcommd.\n"));
}
}
}
}
if ((suspend1_flag) || (suspendall_flag)) {
/*
* Unlock diskset by resuming messages across the diskset.
* Just resume all classes so that resume is the same whether
* just one class was locked or all classes were locked.
*
* Don't know if nodelist contains the nodes being added
* or not, so do resume_all to nodes not being added (by
* skipping any nodes in the nodelist being added) and then do
* resume_all to nodes being added if remote_sets_created is 1.
*/
/* All nodes are guaranteed to be ALIVE */
while (nd) {
/* Skip nodes being added - handled later */
continue;
}
if (rval == 0)
rval = -1;
"Unable to resume rpc.mdcommd.\n"));
}
}
/*
* Send resume to added nodes that had a set created since
* rpc.mdcommd is be running on the nodes with a set.
*/
if (remote_sets_created == 1) {
for (i = 0; i < node_c; i++) {
/* Already verified to be alive */
&xep)) {
if (rval == 0)
rval = -1;
"Unable to resume rpc.mdcommd.\n"));
}
}
}
/*
* Start a resync thread on the newly added nodes
* if set is not stale. Also start a thread to update the
* abr state of all soft partitions
*/
if (stale_flag != MNSET_IS_STALE) {
for (i = 0; i < node_c; i++) {
if (clnt_mn_mirror_resync_all(node_v[i],
if (rval == 0)
rval = -1;
"Unable to start resync "
"thread.\n"));
}
if (clnt_mn_sp_update_abr(node_v[i],
if (rval == 0)
rval = -1;
"Unable to start sp update "
"thread.\n"));
}
}
}
}
/*
* Don't know if nodelist contains the nodes being added
* or not, so do clnt_unlock_set to nodes not being added (by
* skipping any nodes in the nodelist being added) and then do
* clnt_unlock_set to nodes being added.
*/
if (lock_flag) {
/* All nodes are guaranteed to be ALIVE */
while (nd) {
/* Skip hosts we get in the next loop */
continue;
}
if (rval == 0)
rval = -1;
}
}
for (i = 0; i < node_c; i++) {
/* Already verified to be alive */
if (rval == 0)
rval = -1;
}
}
}
/* release signals back to what they were on entry */
mdclrerror(&xep);
return (rval);
rval = -1;
/* level 6 */
if (rb_level > 5) {
/*
* For each node being deleted, set DEL flag and
* reset OK flag on that node first.
* Until a node has turned off its own
* rpc.metad's NODE_OK flag, that node could be
* considered for master during a reconfig.
*/
for (i = 0; i < node_c; i++) {
/* All nodes are guaranteed to be ALIVE */
while (nd) {
break;
}
/* Something wrong, handle this in next loop */
continue;
/* Only changing my local cache of node list */
/* Set flags for del host to DEL on that host */
mdclrerror(&xep);
}
}
for (i = 0; i < node_c; i++) {
/* Reset master on newly added node */
MD_MN_INVALID_NID, &xep))
mdclrerror(&xep);
/* Withdraw set on newly added node */
mdclrerror(&xep);
}
/*
* Turn off owner flag in nodes to be deleted
* if there are drives in the set.
* Also, turn off NODE_OK and turn on NODE_DEL
* for nodes to be deleted.
* These flags are used to set the node
* record flags in all nodes in the set.
*/
while (nd) {
}
break;
}
}
}
/*
* Now, reset owner and set delete flags for the deleted
* nodes on all nodes.
*/
while (nd) {
mdclrerror(&xep);
}
}
/*
* On each node being deleted, set the set record
* to be in DEL state.
*/
for (i = 0; i < node_c; i++) {
mdclrerror(&xep);
}
}
}
/* level 5 */
if (rb_level > 4) {
/* All nodes are guaranteed to be ALIVE */
while (nd) {
mdclrerror(&xep);
}
}
/*
* Notify rpc.mdcommd on all nodes of a nodelist change.
* Send reinit command to mdcommd which forces it to get
* fresh set description. Then send resume.
* Nodelist contains all nodes (existing + added).
*/
if (suspendall_flag) {
/* Send reinit */
/* All nodes are guaranteed to be ALIVE */
/* Send reinit to nodes in nodelist before addhosts call */
while (nd) {
/*
* Skip nodes being added if remote sets were not
* created since rpc.mdcommd may not be running
* on the remote nodes.
*/
if ((remote_sets_created == 0) &&
continue;
}
/* Class is ignored for REINIT */
"Unable to reinit rpc.mdcommd.\n"));
mdclrerror(&xep);
}
}
/* Send resume */
/* All nodes are guaranteed to be ALIVE */
while (nd) {
/*
* Skip nodes being added if remote sets were not
* created since rpc.mdcommd may not be running
* on the remote nodes.
*/
if ((remote_sets_created == 0) &&
continue;
}
/*
* Resume all classes but class 1 so that lock is held
* against meta* commands.
* Send resume_all_but_1 to nodes in nodelist
* before addhosts call.
*/
&xep)) {
"Unable to resume rpc.mdcommd.\n"));
mdclrerror(&xep);
}
}
}
/* level 4 */
/* Nodelist may or may not contain nodes being added. */
while (nd) {
/* Skip nodes not being added */
continue;
}
mdclrerror(&xep);
}
}
/* level 3 */
/* Nodelist may or may not contain nodes being added. */
while (nd) {
/* Skip nodes not being added */
continue;
}
mdclrerror(&xep);
}
}
/* level 1 */
if (rb_level > 0) {
/* delete the drive records */
for (i = 0; i < node_c; i++) {
mdclrerror(&xep);
}
}
/* delete the set record */
for (i = 0; i < node_c; i++) {
mdclrerror(&xep);
}
}
/* level 0 */
/* Don't test lock flag since guaranteed to be set if in rollback */
/* Nodelist may or may not contain nodes being added. */
/*
* Unlock diskset by resuming messages across the diskset.
* Just resume all classes so that resume is the same whether
* just one class was locked or all classes were locked.
*/
if ((suspend1_flag) || (suspendall_flag)) {
/* All nodes are guaranteed to be ALIVE */
while (nd) {
/*
* Skip nodes being added since remote sets
* were either created and then deleted or
* were never created. Either way - rpc.mdcommd
* may not be running on the remote node.
*/
continue;
}
MD_MSCF_NO_FLAGS, &xep)) {
"Unable to resume rpc.mdcommd.\n"));
mdclrerror(&xep);
}
}
}
/* All nodes are guaranteed to be ALIVE */
while (nd) {
/* Skip hosts we get in the next loop */
continue;
}
mdclrerror(&xep);
}
for (i = 0; i < node_c; i++)
mdclrerror(&xep);
/* release signals back to what they were on entry */
mdclrerror(&xep);
return (rval);
}
/*
* Add host(s) to the traditional diskset provided in sp.
* - create set if non-existent.
*/
static int
int multi_node,
int node_c,
char **node_v,
int auto_take,
)
{
int rval = 0;
int bool;
int nodeindex;
int i;
int has_set;
int numsides;
int rb_level = 0;
int max_meds;
return (-1);
return (-1);
return (-1);
mdclrerror(ep);
ep));
}
/* The auto_take behavior is inconsistent with multiple hosts. */
return (-1);
}
/*
* We already have the set.
*/
/* Make sure we own the set */
return (-1);
/*
* Perform the required checks for new hosts
*/
for (i = 0; i < node_c; i++) {
/* Make sure this set name is not used on the other hosts */
if (has_set < 0) {
return (-1);
/* Keep on truck'n */
mdclrerror(ep);
} else if (has_set)
return (-1);
if (bool == TRUE)
return (-1);
if (bool == FALSE)
return (-1);
}
/* Count the number of occupied slots */
numsides = 0;
for (i = 0; i < MD_MAXSIDES; i++) {
/* Count occupied slots */
numsides++;
}
/* Make sure the we have space to add the new sides */
return (-1);
}
/* Get drive descriptors for the set */
return (-1);
/* Setup the mediator record roll-back structure */
rb_medr.med_rec_fl = 0;
for (i = 0; i < MD_MAXSIDES; i++)
rb_medr.med_rec_foff = 0;
return (-1);
/* END CHECK CODE */
/* Lock the set on current set members */
for (i = 0; i < MD_MAXSIDES; i++) {
/* Skip empty slots */
continue;
rval = -1;
goto out;
}
}
/* Lock the set on new set members */
for (i = 0; i < node_c; i++) {
rval = -1;
goto out;
}
}
/*
* Add the new hosts to the existing set record on the existing hosts
*/
for (i = 0; i < MD_MAXSIDES; i++) {
/* skip empty slots */
continue;
goto rollback;
}
/* Merge the new entries into the set with the existing sides */
nodeindex = 0;
for (i = 0; i < MD_MAXSIDES; i++) {
/* Skip full slots */
continue;
break;
}
/* If we have drives */
/*
* For all the hosts being added, create a sidename structure
*/
for (i = 0; i < MD_MAXSIDES; i++) {
/* Skip empty slots */
continue;
/* Skip nodes not being added */
continue;
ep) != 0)
goto rollback;
}
}
/*
* Add the new sidename for each drive to the existing hosts
*/
for (i = 0; i < MD_MAXSIDES; i++) {
/* Skip empty slots */
continue;
/* Skip nodes being added */
continue;
goto rollback;
}
}
goto rollback;
}
} else {
rb_level = 3;
}
/* create the set on the new nodes, this adds the drives as well */
goto rollback;
}
/*
* Add the device entries for the new sides into the namespace.
*/
for (i = 0; i < MD_MAXSIDES; i++) {
/* Skip empty slots */
continue;
/* Skip nodes not being added */
continue;
goto rollback;
}
}
/*
* Mark the drives MD_DR_OK.
*/
for (i = 0; i < MD_MAXSIDES; i++) {
/* Skip empty slots */
continue;
goto rollback;
}
}
}
/* Bring the mediator record up to date with the set record */
for (i = 0; i < MD_MAXSIDES; i++)
/* Inform the mediator hosts of the new node list */
for (i = 0; i < max_meds; i++) {
continue;
goto rollback;
}
/* Add the mediator information to all hosts in the set */
for (i = 0; i < MD_MAXSIDES; i++) {
/* Skip empty slots */
continue;
goto rollback;
}
/*
* Mark the set record MD_SR_OK
*/
for (i = 0; i < MD_MAXSIDES; i++) {
/* Skip empty slots */
continue;
goto rollback;
}
out:
for (i = 0; i < MD_MAXSIDES; i++) {
/* Skip empty slots */
continue;
/* Skip hosts we get in the next loop */
continue;
if (rval == 0)
rval = -1;
}
}
if (rval == 0) {
for (i = 0; i < node_c; i++)
if (rval == 0)
rval = -1;
}
}
return (rval);
/* Make sure we are blocking all signals */
mdclrerror(&xep);
rval = -1;
/* level 6 */
if (rb_level > 5) {
for (i = 0; i < max_meds; i++) {
continue;
mdclrerror(&xep);
}
for (i = 0; i < MD_MAXSIDES; i++) {
/* Skip empty slots */
continue;
/* Skip nodes not being added */
continue;
mdclrerror(&xep);
}
}
}
/* level 5 */
if (rb_level > 4) {
/* delete the drive records */
for (i = 0; i < node_c; i++) {
mdclrerror(&xep);
}
}
/* delete the set record on the 'new' hosts */
for (i = 0; i < node_c; i++) {
mdclrerror(&xep);
}
}
/* level 4 */
for (i = 0; i < MD_MAXSIDES; i++) {
/* Skip empty slots */
continue;
/* Skip nodes not being added */
continue;
mdclrerror(&xep);
}
}
/* level 3 */
for (i = 0; i < MD_MAXSIDES; i++) {
/* Skip empty slots */
continue;
/* Skip nodes not being added */
continue;
&xep) == -1)
mdclrerror(&xep);
}
}
/* level 2 */
if (rb_level > 1) {
for (i = 0; i < MD_MAXSIDES; i++) {
/* Skip empty slots */
continue;
&xep) == -1)
mdclrerror(&xep);
}
}
/* level 1 */
if (rb_level > 0) {
for (i = 0; i < MD_MAXSIDES; i++) {
/* Skip empty slots */
continue;
/* Skip hosts we get in the next loop */
continue;
mdclrerror(&xep);
}
for (i = 0; i < node_c; i++)
mdclrerror(&xep);
}
/* release signals back to what they were on entry */
mdclrerror(&xep);
return (rval);
}
/*
* Add host(s) to the diskset provided in sp.
* - create set if non-existent.
*/
int
int multi_node,
int node_c,
char **node_v,
int auto_take,
)
{
if (multi_node)
else
}
/*
* Delete host(s) from the diskset provided in sp.
* - destroy set if last host in set is removed.
*/
int
int node_c,
char **node_v,
int forceflg,
)
{
int i, j;
int has_set;
int numsides = 0;
int rval = 0;
int rb_level = 0;
int max_meds = 0;
int delete_master = 0;
int suspend1_flag = 0;
int lock_flag = 0;
int stale_flag = 0;
int remote_sets_deleted = 0;
return (-1);
/*
* Verify that list of nodes being deleted contains no
* duplicates.
*/
return (-1);
/* Make sure we own the set */
return (-1);
/*
* The drive and node records are stored in the local mddbs of each
* node in the diskset. Each node's rpc.metad daemon reads in the set,
* drive and node records from that node's local mddb and caches them
* internally. Any process needing diskset information contacts its
* local rpc.metad to get this information. Since each node in the
* diskset is independently reading the set information from its local
* mddb, the set, drive and node records in the local mddbs must stay
* in-sync, so that all nodes have a consistent view of the diskset.
*
* For a multinode diskset, explicitly verify that all nodes in the
* diskset are ALIVE (i.e. are in the API membership list) if the
* forceflag is FALSE. (The case of forceflag being TRUE is handled
* in OHA check above.)
*
* If forceflag is FALSE and a node in the diskset is not in
* the membership list, then fail this operation since all nodes must
* be ALIVE in order to delete the node record from their local mddb.
* If a panic of this node leaves the local mddbs set, node and drive
* records out-of-sync, the reconfig cycle will fix the local mddbs
* and force them back into synchronization.
*/
while (nd) {
}
}
}
/*
* Lock the set on current set members.
* Set locking done much earlier for MN diskset than for traditional
* diskset since lock_set and SUSPEND are used to protect against
* other meta* commands running on the other nodes.
*/
if (MD_MNSET_DESC(sd)) {
/* Make sure we are blocking all signals */
mdclrerror(&xep);
while (nd) {
continue;
}
rval = -1;
goto out2;
}
lock_flag = 1;
}
/*
* Lock out other meta* commands by suspending
* class 1 messages across the diskset.
*/
while (nd) {
continue;
}
MD_MSCF_NO_FLAGS, ep)) {
rval = -1;
goto out2;
}
suspend1_flag = 1;
}
}
for (i = 0; i < node_c; i++)
rval = -1;
goto out2;
}
/*
* Count the number of nodes currently in the set.
*/
if (MD_MNSET_DESC(sd)) {
while (nd) {
numsides++;
}
} else {
for (i = 0; i < MD_MAXSIDES; i++)
/* Count full slots */
numsides++;
}
/*
* OHA mode == -f -h <hostname>
* OHA is One Host Administration that occurs when the forceflag (-f)
* is set and at least one host in the diskset isn't responding
* to RPC requests.
*
* When in OHA mode, a node cannot delete itself from a diskset.
* When in OHA mode, a node can delete a list of nodes from a diskset
* even if some of the nodes in the diskset are unresponsive.
*
* For multinode diskset, only allow OHA mode when the nodes that
* aren't responding in the diskset are not in the membership list
* (i.e. nodes that aren't responding are not marked ALIVE).
* Nodes that aren't in the membership list will be rejoining
* the diskset through a reconfig cycle and the local mddb set
* and node records can be reconciled during the reconfig cycle.
*
* If a node isn't responding, but is still in the membership list,
* fail the request since the node may not be responding because
* rpc.metad died and is restarting. In this case, no reconfig
* cycle will be started, so there's no way to recover if
* the host delete operation was allowed.
*
* NOTE: if nodes that weren't in the membership when the OHA host
* delete occurred are now the only nodes in membership list,
* those nodes will see the old view of the diskset. As soon as
* a node re-enters the cluster that was present in the cluster
* during the host deletion, the diskset will reflect the host
* deletion on all nodes presently in the cluster.
*/
if (MD_MNSET_DESC(sd)) {
while (nd) {
/*
* If a node isn't ALIVE (in member list),
* then allow a force-able delete in OHA mode.
*/
break;
}
/*
* Don't test for clnt_nullproc since already
* tested the RPC connections by clnt_lock_set.
*/
}
} else {
for (i = 0; i < MD_MAXSIDES; i++) {
/* Skip empty slots */
continue;
/*
* If we timeout to at least one
* client, then we can allow OHA mode,
* otherwise, we are in normal mode.
*/
if (mdanyrpcerror(ep)) {
mdclrerror(ep);
break;
}
}
}
}
}
}
/*
* Don't allow this for MN diskset since meta_set_destroy of 1 node
* does NOT remove this node's node record from the other node's set
* records in their local mddb. This leaves a MN diskset in a very
* messed up state.
*/
if (!(MD_MNSET_DESC(sd))) {
/* Destroy set */
/* Can return since !MN diskset so nothing to unlock */
}
}
/*
* In multinode diskset, can only delete self if this
* is the last node in the set or if all nodes in
* the set are being deleted. The traditional diskset code
* allows a node to delete itself (when there are other nodes
* in the diskset) when using the force flag, but that code
* path doesn't have the node remove itself from
* the set node list on the other nodes. Since this isn't
* satisfactory for the multinode diskset, just don't
* allow this operation.
*/
rval = -1;
goto out2;
}
/*
* In multinode diskset, don't allow deletion of master node unless
* this is the only node left or unless all nodes are being
* deleted since there is no way to switch
* master ownership (unless via a cluster reconfig cycle).
*/
rval = -1;
goto out2;
}
/* Deleting self w/o forceflg */
rval = -1;
goto out2;
}
/*
* Setup the mediator record roll-back structure for a trad diskset.
*
* For a MN diskset, the deletion of a host in the diskset
* does not cause an update of the mediator record. If the
* host deletion will cause the diskset to be removed (this is
* the last host being removed or all hosts are being removed)
* then the mediator record must have already been removed by the
* user or this delete host operation will fail (a check for
* this is done later in this routine).
*/
if (!(MD_MNSET_DESC(sd))) {
rb_medr.med_rec_fl = 0;
for (i = 0; i < MD_MAXSIDES; i++)
rb_medr.med_rec_foff = 0;
/* Bring the mediator record up to date with the set record */
rval = -1;
goto out2;
}
}
/*
* For traditional diskset:
* Check to see if all the hosts we are trying to delete the set from
* have a set "setname" that is the same as ours, i.e. - same name,
* same time stamp, same genid. We only do this if forceflg is not
* specified or we are in OHA mode.
*/
int j;
for (i = 0; i < node_c; i++) {
/* We skip this side */
continue;
if (has_set < 0) {
/*
* Can't talk to the host only allowed in OHA
* mode.
*/
mdclrerror(ep);
continue;
}
/*
* We got an error we do not, or are not,
* prepared to handle.
*/
rval = -1;
goto out2;
}
mdclrerror(ep);
/*
* If we got here: both hosts are up; a host in
* our set record does not have the set. So we
* delete the host from our set and invalidate
* the node.
*/
/*
* If we delete a host, make sure the mediator
* hosts are made aware of this.
*/
for (j = 0; j < MD_MAXSIDES; j++) {
node_v[i]) != 0)
continue;
'\0', sizeof (md_node_nm_t));
}
if (rval == -1)
goto out2;
node_v[i][0] = '\0';
fix_node_v = TRUE;
continue;
}
/*
* If we can talk to the host, and they do not have the
* exact set, then we disallow the operation.
*/
rval = -1;
goto out2;
}
}
/*
* Here we prune the node_v's that were invalidated above.
*/
if (fix_node_v == TRUE) {
i = 0;
while (i < node_c) {
if (node_v[i][0] == '\0') {
for (j = i; (j + 1) < node_c; j++)
node_c--;
}
i++;
}
/*
* If we are left with no nodes, then we have
* compeleted the operation.
*/
if (node_c == 0) {
/*
* Inform the mediator hosts of the new node
* list
*/
for (i = 0; i < max_meds; i++) {
continue;
if (clnt_med_upd_rec(
ep))
mdclrerror(ep);
}
rval = 0;
goto out2;
}
}
}
/*
* For multinode diskset:
* If forceflag is FALSE then check to see if all the hosts we
* are trying to delete the set from have a set "setname" that
* is the same as ours, i.e. - same name, same time stamp, same genid.
* If forceflag is TRUE, then we don't care if the hosts being
* deleted have the same set information or not since user is forcing
* those hosts to be deleted.
*/
for (i = 0; i < node_c; i++) {
/* We skip this node since comparing against it */
continue;
if (has_set < 0) {
rval = -1;
goto out2;
}
/*
* If we can talk to the host, and they do not have the
* exact set, then we disallow the operation.
*/
rval = -1;
goto out2;
}
}
}
/*
* For traditional diskset:
* Can't allow user to delete their node (without deleting all nodes)
* out of a set in OHA mode, would leave a real mess.
* This action was already failed above for a MN diskset.
*/
/* Can directly return since !MN diskset; nothing to unlock */
}
/* Get the drive descriptors for this set */
rval = -1;
goto out2;
}
}
/*
* We have been asked to delete all the hosts in the set, i.e. - delete
* the whole set.
*/
/*
* This is only a valid operation if all drives have been
* removed first.
*/
rval = -1;
goto out2;
}
/*
* If a mediator is currently associated with this set,
* fail the deletion of the last host(s).
*/
rval = -1;
goto out2;
}
rval = -1;
goto out2;
}
remote_sets_deleted = 1;
goto out2;
}
/*
* Get timeout values in case we need to roll back
*/
rval = -1;
goto out2;
}
/*
* We need this around for re-adding DB side names later.
*/
rval = -1;
goto out2;
}
/*
* Alloc nodeid list if drives are present in diskset.
* nodeid list is used to reset mirror owners if the
* owner is a deleted node.
*/
if (MD_MNSET_DESC(sd)) {
}
}
/* Lock the set on current set members */
if (!(MD_MNSET_DESC(sd))) {
for (i = 0; i < MD_MAXSIDES; i++) {
/* Skip empty slots */
continue;
mdclrerror(ep);
continue;
}
rval = -1;
goto out2;
}
lock_flag = 1;
}
}
if (MD_MNSET_DESC(sd)) {
/*
* Notify rpc.mdcommd on all nodes of a nodelist change.
* Start by suspending rpc.mdcommd (which drains it of
* all messages), then change the nodelist followed
* by a reinit and resume.
*/
while (nd) {
continue;
}
MD_MSCF_NO_FLAGS, ep)) {
rval = -1;
goto out2;
}
suspendall_flag = 1;
}
/*
* Is current set STALE?
* Need to know this if delete host fails and node
* is re-joined to diskset.
*/
(void) memset(&c, 0, sizeof (c));
c.c_id = 0;
rval = -1;
goto out2;
}
if (c.c_flags & MDDB_C_STALE) {
}
}
/*
* For each node being deleted, set DEL flag and
* reset OK flag on that node first.
* Until a node has turned off its own
* rpc.metad's NODE_OK flag, that node could be
* considered for master during a reconfig.
*/
for (i = 0; i < node_c; i++) {
/*
* During OHA mode, don't issue RPCs to
* non-alive nodes since there is no reason to
* wait for RPC timeouts.
*/
while (nd) {
break;
}
/* Something wrong, handle this in next loop */
continue;
/* If node_id_list is alloc'd, fill in for later use */
if (node_id_list)
/* All nodes are guaranteed to be ALIVE unless OHA */
continue;
}
/* Only changing my local cache of node list */
/* Set flags for del host to DEL on that host */
goto rollback;
}
}
for (i = 0; i < node_c; i++) {
/*
* Turn off owner flag in nodes to be deleted
* if this node has been joined.
* Also, turn off NODE_OK and turn on NODE_DEL
* for nodes to be deleted.
* These flags are used to set the node
* record flags in all nodes in the set.
* Only withdraw nodes that are joined.
*/
while (nd) {
/*
* Don't communicate with non-ALIVE node if
* in OHA - but set flags in master list so
* alive nodes are updated correctly.
*/
MD_MN_NODE_ALIVE))) {
continue;
}
/*
* Going to set locally cached
* node flags to rollback join
* so in case of error, the
* rollback code knows which
* nodes to re-join. rpc.metad
* ignores the RB_JOIN flag.
*/
/*
* Be careful in ordering of
* following steps so that
* recovery from a panic
* between the steps is viable.
* Only reset master info in
* rpc.metad - don't reset
* local cached info which will
* be used to set master info
* back if failure (rollback).
*/
if (clnt_withdrawset(
goto rollback;
/*
* Reset master on deleted node
*/
if (clnt_mnsetmaster(node_v[i],
ep))
goto rollback;
}
}
}
}
/*
* Now, reset owner and set delete flags for the
* deleted nodes on all nodes.
*/
while (nd) {
/* Skip non-ALIVE node if in OHA */
continue;
}
goto rollback;
}
}
/*
* Notify rpc.mdcommd on all nodes of a nodelist change.
* Send reinit command to mdcommd which forces it to get
* fresh set description.
*/
if (suspendall_flag) {
/* Send reinit */
while (nd) {
continue;
}
/* Class is ignored for REINIT */
MD_MSCF_NO_FLAGS, ep)) {
"Unable to reinit rpc.mdcommd.\n"));
goto rollback;
}
}
/* Send resume */
while (nd) {
continue;
}
"Unable to resume rpc.mdcommd.\n"));
goto rollback;
}
}
}
}
/*
* Mark the set record MD_SR_DEL on the hosts we are deleting
* If a MN diskset and OHA mode, don't issue RPC to nodes that
* are not ALIVE.
* If a MN diskset and not in OHA mode, then all nodes must respond
* to RPC (be alive) or this routine will return failure.
* If a traditional diskset, all RPC failures if in OHA mode.
*/
for (i = 0; i < node_c; i++) {
/*
* During OHA mode, don't issue RPCs to
* non-alive nodes since there is no reason to
* wait for RPC timeouts.
*/
while (nd) {
break;
}
}
goto rollback;
/* Skip non-ALIVE node if in OHA mode */
continue;
} else {
goto rollback;
}
}
/*
* All nodes should be alive in non-oha mode.
*/
goto rollback;
}
} else {
/*
* For traditional diskset, issue the RPC and
* ignore RPC failure if in OHA mode.
*/
mdclrerror(ep);
continue;
}
goto rollback;
}
}
}
/* Delete the set on the hosts we are deleting */
if (node_id_list)
/*
* Failure during del_set_on_hosts would have recreated
* the diskset on the remote hosts, but for multi-owner
* disksets need to set node flags properly and REINIT and
* RESUME rpc.mdcommd, so just let the rollback code
* do this.
*/
if (MD_MNSET_DESC(sd))
goto rollback;
return (-1);
}
remote_sets_deleted = 1;
/* Delete the host from sets on hosts not being deleted */
if (MD_MNSET_DESC(sd)) {
/* All nodes are guaranteed to be ALIVE unless in oha mode */
while (nd) {
/*
* During OHA mode, don't issue RPCs to
* non-alive nodes since there is no reason to
* wait for RPC timeouts.
*/
continue;
}
/* Skip nodes being deleted */
continue;
}
ep) == -1) {
goto rollback;
}
}
} else {
for (i = 0; i < MD_MAXSIDES; i++) {
/* Skip empty slots */
continue;
/* Skip nodes being deleted */
continue;
ep) == -1) {
mdclrerror(ep);
continue;
}
goto rollback;
}
}
}
/* We have drives */
/*
* Delete the old sidename for each drive on all the hosts.
* If a multi-node diskset, each host only stores
* the side information for itself. So, a multi-node
* diskset doesn't delete the old sidename for
* an old host.
*
* If a MN diskset, reset owners of mirrors that are
* owned by the deleted nodes.
*/
if (!(MD_MNSET_DESC(sd))) {
for (i = 0; i < MD_MAXSIDES; i++) {
/* Skip empty slots */
continue;
/* Skip nodes being deleted */
continue;
ep)) {
mdclrerror(ep);
continue;
}
goto rollback;
}
}
} else {
/* All nodes guaranteed ALIVE unless in oha mode */
while (nd) {
/*
* If mirror owner was set to a deleted node,
* then each existing node resets mirror owner
* to NULL.
*
* During OHA mode, don't issue RPCs to
* non-alive nodes since there is no reason to
* wait for RPC timeouts.
*/
continue;
}
/* Skip nodes being deleted */
continue;
}
/*
* If mirror owner is a deleted node, reset
* mirror owners to NULL. If an error occurs,
* print a warning and continue. Don't fail
* metaset because of mirror owner reset
* problem since next node to grab mirror
* will resolve this issue. Before next node
* grabs mirrors, metaset will show the deleted
* node as owner which is why an attempt to
* reset the mirror owner is made.
*/
"Unable to reset mirror owner on"
mdclrerror(&xep);
}
}
}
}
/*
* Bring the mediator record up to date with the set record for
* traditional diskset.
*/
if (!(MD_MNSET_DESC(sd))) {
for (i = 0; i < MD_MAXSIDES; i++) {
'\0', sizeof (md_node_nm_t));
else
}
/* Inform the mediator hosts of the new node list */
for (i = 0; i < max_meds; i++) {
continue;
mdclrerror(ep);
continue;
}
goto rollback;
}
}
}
/*
* For traditional diskset:
* We are deleting ourselves out of the set and we have drives to
* consider; so we need to halt the set, release the drives and
* reset the timeout. **** THIS IS A ONE WAY TICKET, NO ROLL BACK
* IS POSSIBLE AS SOON AS THE HALT SET COMPLETES, SO THIS IS DONE
* WITH ALL SIGNALS BLOCKED AND LAST ****
*
* This situation cannot occur in a MN diskset since a node can't
* delete itself unless all nodes are being deleted and a diskset
* cannot contain any drives if all nodes are being deleted.
* So, don't even test for this if a MN diskset.
*/
/* Make sure we are blocking all signals */
rval = -1;
goto out1;
}
rval = -1;
goto out1;
}
rval = -1;
out1:
/* release signals back to what they were on entry */
if (rval == 0)
rval = -1;
}
}
out2:
/*
* Unlock diskset by resuming messages across the diskset.
* Just resume all classes so that resume is the same whether
* just one class was locked or all classes were locked.
*/
if ((suspend1_flag) || (suspendall_flag)) {
/* Send resume */
while (nd) {
continue;
}
/*
* Skip nodes being deleted if remote set
* was deleted since rpc.mdcommd may no longer
* be running on remote node.
*/
if ((remote_sets_deleted == 1) &&
continue;
}
if (rval == 0)
rval = -1;
"Unable to resume rpc.mdcommd.\n"));
}
}
}
if (lock_flag) {
if (MD_MNSET_DESC(sd)) {
while (nd) {
/*
* During OHA mode, don't issue RPCs to
* non-alive nodes since there is no reason to
* wait for RPC timeouts.
*/
continue;
}
if (rval == 0)
rval = -1;
}
}
} else {
for (i = 0; i < MD_MAXSIDES; i++) {
/* Skip empty slots */
continue;
mdanyrpcerror(&xep)) {
mdclrerror(&xep);
continue;
}
if (rval == 0)
rval = -1;
}
}
}
}
out3:
if (node_id_list)
if (MD_MNSET_DESC(sd)) {
/* release signals back to what they were on entry */
mdclrerror(&xep);
} else {
}
return (rval);
/* all signals already blocked for MN disket */
if (!(MD_MNSET_DESC(sd))) {
mdclrerror(&xep);
}
rval = -1;
/*
* Send reinit command to rpc.mdcommd which forces it to get
* fresh set description and resume all classes but class 0.
* Don't send any commands to rpc.mdcommd if set on that node
* has been removed.
*/
if (suspendall_flag) {
/* Send reinit */
while (nd) {
continue;
}
/*
* If the remote set was deleted, rpc.mdcommd
* may no longer be running so send nothing to it.
*/
if ((remote_sets_deleted == 1) &&
continue;
}
/* Class is ignored for REINIT */
"Unable to reinit rpc.mdcommd.\n"));
mdclrerror(&xep);
}
}
/* Send resume */
while (nd) {
continue;
}
/*
* If the remote set was deleted, rpc.mdcommd
* may no longer be running so send nothing to it.
*/
if ((remote_sets_deleted == 1) &&
continue;
}
&xep)) {
"Unable to resume rpc.mdcommd.\n"));
mdclrerror(&xep);
}
}
}
/* level 2 */
if (rb_level > 1) {
/*
* Lock out other meta* commands on nodes with the newly
* re-created sets by suspending class 1 messages
* across the diskset.
*/
while (nd) {
/* Skip nodes not being deleted */
continue;
}
/* Suspend commd on nodes with re-created sets */
MD_MSCF_NO_FLAGS, &xep)) {
"Unable to suspend rpc.mdcommd.\n"));
mdclrerror(&xep);
}
}
max_genid++;
/*
* See if we have to re-add the drives specified.
*/
for (i = 0; i < node_c; i++) {
/*
* During OHA mode, don't issue RPCs to
* non-alive nodes since there is no reason to
* wait for RPC timeouts.
*/
while (nd) {
== 0) {
break;
}
}
if (nd == 0)
continue;
continue;
}
/* Don't care if set record is MN or not */
&xep) == -1) {
mdclrerror(&xep);
continue;
}
/* Drive already added, skip to next node */
/*
* Set record structure was allocated from RPC
* routine getset so this structure is only of
* size md_set_record even if the MN flag is
* set. So, clear the flag so that the free
* code doesn't attempt to free a structure
* the size of md_mnset_record.
*/
continue;
}
mdclrerror(&xep);
&xep) == -1)
mdclrerror(&xep);
/*
* Set record structure was allocated from RPC routine
* getset so this structure is only of size
* md_set_record even if the MN flag is set. So,
* clear the flag so that the free code doesn't
* attempt to free a structure the size of
* md_mnset_record.
*/
}
max_genid += 3;
/*
* This is not the first replica being added to the
* diskset so call with ADDSIDENMS_BCAST. If this
* is a traditional diskset, the bcast flag is ignored
* since traditional disksets don't use the rpc.mdcommd.
*/
mdclrerror(&xep);
}
/*
* Add the device names for the new sides into the namespace,
* on all hosts not being deleted.
*/
if (MD_MNSET_DESC(sd)) {
while (nd) {
/* Find a node that is not being deleted */
node_v)) {
break;
}
}
} else {
for (j = 0; j < MD_MAXSIDES; j++) {
/* Skip empty slots */
continue;
/* Find a node that is not being deleted */
break;
}
}
if (MD_MNSET_DESC(sd)) {
while (nd) {
/* Skip nodes not being deleted */
node_v)) {
continue;
}
/* this side was just created, add the names */
mdclrerror(&xep);
}
} else {
for (i = 0; i < MD_MAXSIDES; i++) {
/* Skip empty slots */
continue;
/* Skip nodes not being deleted */
continue;
/* this side was just created, add the names */
mdclrerror(&xep);
}
}
}
/* level 4 */
/*
* Add the new sidename for each drive to all the hosts
* Multi-node disksets only store the sidename for
* that host, so there is nothing to re-add.
*/
if (!(MD_MNSET_DESC(sd))) {
for (j = 0; j < MD_MAXSIDES; j++) {
/* Skip empty slots */
continue;
/* Skip nodes not being deleted */
break;
}
for (i = 0; i < MD_MAXSIDES; i++) {
/* Skip empty slots */
continue;
&xep))
mdclrerror(&xep);
}
}
}
/* level 5 */
/* rollback the mediator record */
for (i = 0; i < max_meds; i++) {
continue;
mdclrerror(&xep);
}
}
/* level 3 */
if (rb_level > 2) {
if (MD_MNSET_DESC(sd)) {
/*
* During OHA mode, don't issue RPCs to
* non-alive nodes since there is no reason to
* wait for RPC timeouts.
*/
while (nd) {
continue;
}
/* Record should be for a multi-node diskset */
mdclrerror(&xep);
continue;
}
has_set = 1;
while (nr) {
break;
}
}
has_set = 0;
if (has_set) {
continue;
}
mdclrerror(&xep);
}
} else {
for (i = 0; i < MD_MAXSIDES; i++) {
/* Skip empty slots */
continue;
/* Record should be for a non-multi-node set */
mdclrerror(&xep);
continue;
}
/*
* Set record structure was allocated from RPC
* routine getset so this structure is only of
* size md_set_record even if the MN flag is
* set. So, clear the flag so that the free
* code doesn't attempt to free a structure
* the size of md_mnset_record.
*/
if (MD_MNSET_REC(sr)) {
continue;
}
has_set = 1;
for (j = 0; j < MD_MAXSIDES; j++) {
/* Skip empty slots */
continue;
has_set = 0;
break;
}
}
if (has_set)
continue;
mdclrerror(&xep);
}
}
max_genid++;
}
/* level 1 */
if (rb_level > 0) {
max_genid++;
/* Sets MD_SR_OK on given nodes. */
/*
* For MN diskset:
* On each newly re-added node, set the node record for that
* node to OK. Then set all node records for the newly added
* nodes on all nodes to ok.
*
* By setting a node's own node record to ok first, even if
* the node re-adding the hosts panics, the rest of the nodes
* can determine the same node list during the choosing of the
* master during reconfig. So, only nodes considered for
* mastership are nodes that have both MD_MN_NODE_OK and
* MD_SR_OK set on that node's rpc.metad. If all nodes have
* MD_SR_OK set, but no node has its own MD_MN_NODE_OK set,
* then the set will be removed during reconfig since a panic
* occurred during the re-creation of the deletion of
* the initial diskset.
*/
if (MD_MNSET_DESC(sd)) {
/*
* Notify rpc.mdcommd on all nodes of a
* nodelist change. Start by suspending
* rpc.mdcommd (which drains it of all
* messages), then change the nodelist
* followed by a reinit and resume.
*/
while (nd) {
MD_MN_NODE_ALIVE)) {
continue;
}
MD_MSCF_NO_FLAGS, &xep)) {
"Unable to suspend "
"rpc.mdcommd.\n"));
mdclrerror(&xep);
}
suspendall_flag_rb = 1;
}
}
for (i = 0; i < node_c; i++) {
/*
* During OHA mode, don't issue RPCs to
* non-alive nodes since there is no reason to
* wait for RPC timeouts.
*/
while (nd) {
== 0)
break;
}
/* Something wrong, finish this in next loop */
continue;
continue;
}
/* Set master on re-joining node. */
mdclrerror(&xep);
}
/*
* Re-join set to same state as
* before - stale or non-stale.
*/
stale_flag, &xep)) {
mdclrerror(&xep);
}
}
/* Only changing my local cache of node list */
/* Set record for host to ok on that host */
mdclrerror(&xep);
}
}
/* Now set all node records on all nodes to be ok */
while (nd) {
/*
* During OHA mode, don't issue RPCs to
* non-alive nodes since there is no reason to
* wait for RPC timeouts.
*/
continue;
}
mdclrerror(&xep);
}
}
}
}
/*
* Notify rpc.mdcommd on all nodes of a nodelist change.
* Send reinit command to mdcommd which forces it to get
* fresh set description.
*/
if (suspendall_flag_rb) {
/* Send reinit */
while (nd) {
continue;
}
/* Class is ignored for REINIT */
"Unable to reinit rpc.mdcommd.\n"));
mdclrerror(&xep);
}
}
}
/*
* Unlock diskset by resuming messages across the diskset.
* Just resume all classes so that resume is the same whether
* just one class was locked or all classes were locked.
*/
/* Send resume */
while (nd) {
continue;
}
"Unable to resume rpc.mdcommd.\n"));
}
}
}
/*
* Start a resync thread on the re-added nodes
* if set is not stale. Also start a thread to update the
* abr state of all soft partitions
*/
if (stale_flag != MNSET_IS_STALE) {
for (i = 0; i < node_c; i++) {
/*
* During OHA mode, don't issue RPCs to
* non-alive nodes since there is no reason to
* wait for RPC timeouts.
*/
while (nd) {
== 0)
break;
}
continue;
continue;
}
if (dd != 0) {
if (clnt_mn_mirror_resync_all(node_v[i],
"Unable to start resync "
"thread.\n"));
}
if (clnt_mn_sp_update_abr(node_v[i],
"Unable to start sp update "
"thread.\n"));
}
}
}
}
/* level 0 */
/* Don't test lock flag since guaranteed to be set if in rollback */
if (MD_MNSET_DESC(sd)) {
while (nd) {
/*
* During OHA mode, don't issue RPCs to
* non-alive nodes since there is no reason to
* wait for RPC timeouts.
*/
continue;
}
mdclrerror(&xep);
}
} else {
for (i = 0; i < MD_MAXSIDES; i++) {
/* Skip empty slots */
continue;
mdclrerror(&xep);
}
}
/* release signals back to what they were on entry */
mdclrerror(&xep);
if (node_id_list)
if (!(MD_MNSET_DESC(sd))) {
}
return (rval);
}
int
int take_val,
)
{
int i;
int rval = 0;
char *hostname;
return (-1);
/* Make sure we own the set */
return (-1);
/* Lock the set on our side */
rval = -1;
goto out;
}
if (take_val) {
/* enable auto_take but only if it is not already set */
/* verify that we're the only host in the set */
for (i = 0; i < MD_MAXSIDES; i++) {
continue;
rval = -1;
goto out;
}
}
rval = -1;
/* Disable SCSI reservations */
PRINT_FAST, &xep);
else
&xep);
mdclrerror(&xep);
mdclrerror(&xep);
}
}
} else {
/* disable auto_take, if set, or error */
rval = -1;
/* Enable SCSI reservations */
PRINT_FAST, &xep);
else
&xep);
mdclrerror(&xep);
mdclrerror(&xep);
}
} else {
rval = -1;
}
}
out:
if (rval == 0)
rval = -1;
}
return (rval);
}