md.c revision 44dc7d114800c45c22c623ec0ee6a468f2a5b11b
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
/*
* Md - is the meta-disk driver. It sits below the UFS file system
* but above the 'real' disk drivers, xy, id, sd etc.
*
* To the UFS software, md looks like a normal driver, since it has
* the normal kinds of entries in the bdevsw and cdevsw arrays. So
* UFS accesses md in the usual ways. In particular, the strategy
* routine, mdstrategy(), gets called by fbiwrite(), ufs_getapage(),
* and ufs_writelbn().
*
* Md maintains an array of minor devices (meta-partitions). Each
* meta partition stands for a matrix of real partitions, in rows
* which are not necessarily of equal length. Md maintains a table,
* with one entry for each meta-partition, which lists the rows and
* columns of actual partitions, and the job of the strategy routine
* is to translate from the meta-partition device and block numbers
* known to UFS into the actual partitions' device and block numbers.
*
* See below, in mdstrategy(), mdreal(), and mddone() for details of
* this translation.
*/
/*
* Driver for Virtual Disk.
*/
#include <sys/sysmacros.h>
#include <sys/priv_names.h>
#ifndef lint
char _depends_on[] = "strmod/rpcmod";
#endif /* lint */
int md_init_debug = 0; /* module binding debug */
/*
* Tunable to turn off the failfast behavior.
*/
int md_ff_disable = 0;
/*
* dynamically allocated list of non FF driver names - needs to
* be freed when md is detached.
*/
char **non_ff_drivers = NULL;
extern char svm_bootpath[];
#define SVM_PSEUDO_STR "/pseudo/md@0:"
#define VERSION_LENGTH 6
#define VERSION "1.0"
/*
* Keep track of possible 'orphan' entries in the name space
*/
int *md_nm_snarfed = NULL;
/*
* Global tunable giving the percentage of free space left in replica during
* conversion of non-devid style replica to devid style replica.
*/
int md_conv_perc = MDDB_DEVID_CONV_PERC;
#ifdef DEBUG
/* debug code to verify framework exclusion guarantees */
int md_in;
#define IN_INIT 0x01
#define IN_FINI 0x02
#define IN_ATTACH 0x04
#define IN_DETACH 0x08
#define IN_OPEN 0x10
#define MD_SET_IN(x) { \
mutex_enter(&md_in_mx); \
if (md_in) \
debug_enter("MD_SET_IN exclusion lost"); \
if (md_in & x) \
debug_enter("MD_SET_IN already set"); \
md_in |= x; \
mutex_exit(&md_in_mx); \
}
#define MD_CLR_IN(x) { \
mutex_enter(&md_in_mx); \
if (md_in & ~(x)) \
debug_enter("MD_CLR_IN exclusion lost"); \
if (!(md_in & x)) \
debug_enter("MD_CLR_IN already clr"); \
md_in &= ~x; \
mutex_exit(&md_in_mx); \
}
#else /* DEBUG */
#define MD_SET_IN(x)
#define MD_CLR_IN(x)
#endif /* DEBUG */
/*
* list things protected by md_mx even if they aren't
* used in this file.
*/
int md_status = 0; /* global status for the meta-driver */
int md_num_daemons = 0;
int md_ioctl_cnt = 0;
int md_mtioctl_cnt = 0; /* multithreaded ioctl cnt */
int (*mdv_strategy_tstpnt)(buf_t *, int, void*);
int md_nmedh = 0;
char *md_med_trans_lst = NULL;
int md_in_upgrade;
int md_keep_repl_state;
int md_devid_destroy;
/* for sending messages thru a door to userland */
int mdmn_door_did = -1;
ddi_prop_op_t, int, char *, caddr_t, int *);
mdopen, /* open */
mdclose, /* close */
mdstrategy, /* strategy */
/* print routine -- none yet */
mddump, /* dump */
mdread, /* read */
mdwrite, /* write */
mdioctl, /* ioctl */
/* devmap */
/* mmap */
/* segmap */
nochpoll, /* poll */
mdprop_op, /* prop_op */
0, /* streamtab */
CB_REV, /* cb_ops version */
mdaread, /* aread */
mdawrite, /* awrite */
};
DEVO_REV, /* dev_ops version */
0, /* device reference count */
mdinfo, /* info routine */
nulldev, /* identify routine */
nulldev, /* probe - not defined */
mdattach, /* attach routine */
mddetach, /* detach routine */
nodev, /* reset - not defined */
&md_cb_ops, /* driver operations */
NULL, /* bus operations */
nodev /* power management */
};
/*
* loadable module wrapper
*/
&mod_driverops, /* type of module -- a pseudodriver */
"Solaris Volume Manager base module %I%", /* name of the module */
&md_devops, /* driver ops */
};
static struct modlinkage modlinkage = {
(void *)&modldrv,
};
/* md_medd.c */
extern void med_init(void);
extern void med_fini(void);
/* md_names.c */
extern int remove_entry(struct nm_next_hdr *,
int md_maxphys = 0; /* maximum io size in bytes */
unsigned md_maxbcount = 0; /* maximum physio size in bytes */
void
{
set_t s;
if (alloc) {
/* initialize driver global locks */
/* initialize per set driver global locks */
for (s = 0; s < MD_MAXSETS; s++) {
/* initialize per set driver globals locks */
}
} else {
/* destroy per set driver global locks */
for (s = 0; s < MD_MAXSETS; s++) {
}
/* destroy driver global locks */
cv_destroy(&md_cv);
}
}
int
_init(void)
{
set_t s;
int err;
/* allocate dynamic space associated with driver globals */
/* initialize driver globals */
/* initialize tunable globals */
if (md_maxphys == 0) /* maximum io size in bytes */
if (md_maxbcount == 0) /* maximum physio size in bytes */
/* initialize per set driver globals */
for (s = 0; s < MD_MAXSETS; s++)
/*
* NOTE: the framework does not currently guarantee exclusion
* between _init and attach after calling mod_install.
*/
md_global_alloc_free(0); /* free dynamic space */
}
return (err);
}
int
_fini(void)
{
int err;
/*
* NOTE: the framework currently does not guarantee exclusion
* with attach until after mod_remove returns 0.
*/
return (err);
md_global_alloc_free(0); /* free dynamic space */
return (err);
}
int
{
}
/* ARGSUSED */
static int
{
int len;
unit_t i;
char ver[VERSION_LENGTH];
char **maj_str_array;
md_in_upgrade = 0;
md_keep_repl_state = 0;
md_devid_destroy = 0;
if (cmd != DDI_ATTACH) {
return (DDI_FAILURE);
}
if (md_devinfo != NULL) {
return (DDI_FAILURE);
}
mddb_init();
if (md_start_daemons(TRUE)) {
mddb_unload(); /* undo mddb_init() allocations */
return (DDI_FAILURE);
}
/* clear the halted state */
/* see if the diagnostic switch is on */
DDI_PROP_DONTPASS, "md_init_debug", 0))
/* see if the failfast disable switch is on */
DDI_PROP_DONTPASS, "md_ff_disable", 0))
/* try and get the md_nmedh property */
/* try and get the md_med_trans_lst property */
len = 0;
len == 0) {
} else {
}
}
/*
* Must initialize the internal data structures before the
* any possible calls to 'goto attach_failure' as _fini
* routine references them.
*/
med_init();
/* try and get the md_xlate property */
/* Should we only do this if upgrade? */
len = sizeof (char) * 5;
len = 0;
PROP_LEN_AND_VAL_ALLOC, 0, "md_xlate",
if (md_init_debug)
"md_xlate ddi_prop_op failed");
goto attach_failure;
} else {
md_in_upgrade = 1;
}
/* Get target's name to major table */
"md_targ_nm_table", &maj_str_array,
&md_majortab_len) != DDI_PROP_SUCCESS) {
md_majortab_len = 0;
if (md_init_debug)
"ddi_prop_lookup_string_array failed");
goto attach_failure;
}
(struct md_xlate_major_table *)
sizeof (struct md_xlate_major_table), KM_SLEEP);
for (i = 0; i < md_majortab_len; i++) {
/* Getting major name */
continue;
*str = '\0';
md_strdup(maj_str_array[i]);
/* Simplified atoi to get major number */
md_major_tuple_table[i].targ_maj = 0;
md_major_tuple_table[i].targ_maj +=
*str2++ - '0';
}
*str = ' ';
}
ddi_prop_free((void *)maj_str_array);
} else {
if (md_init_debug)
goto attach_failure;
}
}
/*
* Check for properties:
* md_keep_repl_state and md_devid_destroy
* and set globals if these exist.
*/
0, "md_keep_repl_state", 0);
0, "md_devid_destroy", 0);
if (MD_UPGRADE)
else
md_major_targ = 0;
/* allocate admin device node */
goto attach_failure;
goto attach_failure;
goto attach_failure;
/* these could have been cleared by a detach */
sz = sizeof (void *) * MD_MAXUNITS;
md_devinfo = dip;
/*
* Only allocate device node for root mirror metadevice.
* Don't pre-allocate unnecessary device nodes (thus slowing down a
* boot when we attach).
* We can't read the mddbs in attach. The mddbs will be read
* by metainit during the boot process when it is doing the
* auto-take processing and any other minor nodes will be
* allocated at that point.
*
* There are two scenarios to be aware of here:
* 1) when we are booting from a mirrored root we need the root
* metadevice to exist very early (during vfs_mountroot processing)
* 2) we need all of the nodes to be created so that any mnttab entries
* will succeed (handled by metainit reading the mddb during boot).
*/
== 0) {
char *p;
int mnum = 0;
/*
* The svm_bootpath string looks something like
* in this example so we need to set the pointer p onto
* the first digit of the minor number and convert it
* from ascii.
*/
*p >= '0' && *p <= '9'; p++) {
mnum *= 10;
mnum += *p - '0';
}
if (md_create_minor_node(0, mnum)) {
goto attach_failure;
}
}
return (DDI_SUCCESS);
/*
* Use our own detach routine to toss any stuff we allocated above.
* NOTE: detach will call md_halt to free the mddb_init allocations.
*/
return (DDI_FAILURE);
}
/* ARGSUSED */
static int
{
extern int check_active_locators();
set_t s;
int len;
/* check command */
if (cmd != DDI_DETACH) {
return (DDI_FAILURE);
}
/*
* if we have not already halted yet we have no active config
* then automatically initiate a halt so we can detach.
*/
if (!(md_get_status() & MD_GBL_HALTED)) {
if (check_active_locators() == 0) {
/*
* NOTE: a successful md_halt will have done the
* mddb_unload to free allocations done in mddb_init
*/
if (md_halt(MD_NO_GBL_LOCKS_HELD)) {
"Could not halt Solaris Volume Manager");
return (DDI_FAILURE);
}
}
/* fail detach if we have not halted */
if (!(md_get_status() & MD_GBL_HALTED)) {
return (DDI_FAILURE);
}
}
/* must be in halted state, this will be cleared on next attach */
/* cleanup attach allocations and initializations */
md_major_targ = 0;
for (s = 0; s < md_nsets; s++) {
}
}
}
md_nunits = 0;
md_nsets = 0;
md_nmedh = 0;
if (non_ff_drivers != NULL) {
int i;
for (i = 0; non_ff_drivers[i] != NULL; i++)
/* free i+1 entries because there is a null entry at list end */
}
if (md_med_trans_lst != NULL) {
}
}
}
if (MD_UPGRADE) {
md_in_upgrade = 0;
}
/*
* Undo what we did in mdattach, freeing resources
* and removing things we installed. The system
* framework guarantees we are not active with this devinfo
* node in any other entry points at this time.
*/
med_fini();
md_devinfo = NULL;
return (DDI_SUCCESS);
}
/*
* Given the device number return the devinfo pointer
* given to md via md_attach
*/
/*ARGSUSED*/
static int
{
int error = DDI_FAILURE;
switch (infocmd) {
case DDI_INFO_DEVT2DEVINFO:
if (md_devinfo) {
*result = (void *)md_devinfo;
error = DDI_SUCCESS;
}
break;
case DDI_INFO_DEVT2INSTANCE:
*result = (void *)0;
error = DDI_SUCCESS;
break;
}
return (error);
}
/*
* property operation routine. return the number of blocks for the partition
* in question or forward the request to the property facilities.
*/
static int
int mod_flags, /* property flags */
char *name, /* name of property */
int *lengthp) /* put length of property here */
{
mdi_unit_t *ui;
/*
* Our dynamic properties are all device specific and size oriented.
* Requests issued under conditions where size is valid are passed
* to ddi_prop_op_nblocks with the size information, otherwise the
* request is passed to ddi_prop_op. Make sure that the minor device
* is a valid part of the Virtual Disk subsystem.
*/
} else {
goto pass;
}
/* get nblocks value */
}
}
static void
{
continue;
if (status == MDDB_STALE)
continue;
if (status == MDDB_NODATA) {
continue;
}
}
}
static void
{
int li;
int drv_index;
char *name;
char *suffix;
int alloc_sz;
" Additional Blocks Needed: %d\n\n"
" Increase size of following replicas for\n"
" device relocatability by deleting listed\n"
" replica and re-adding replica with\n"
" increased size (see metadb(1M)):\n"
" Replica Increase By",
continue;
ib = 0;
}
if (ib == 0)
continue;
if (ib < max_blk_needed) {
&sn);
" %s (%s:%d:%d) %d blocks",
(max_blk_needed - ib));
}
}
}
/*
* md_create_minor_node:
* Create the minor device for the given set and un_self_id.
*
* Input:
* setno - set number
* mnum - selfID of unit
*
* Output:
* None.
*
* Returns 0 for success, 1 for failure.
*
* Side-effects:
* None.
*/
int
{
char name[20];
/* Check for valid arguments */
return (1);
return (1);
return (1);
return (0);
}
/*
* For a given key check if it is an orphaned record.
* The following conditions are used to determine an orphan.
* 1. The device associated with that key is not a metadevice.
* 2. If DEVID_STYLE then the physical device does not have a device Id
* associated with it.
*
* If a key does not have an entry in the devid namespace it could be
* a device that does not support device ids. Hence the record is not
* deleted.
*/
static int
{
mddb_set_t *s;
== NULL)
return (0);
/*
* If devid style is set then get the dev_t using MD_NOTRUST_DEVT
*/
return (0);
NULL)
return (1);
}
return (0);
}
int
{
int err = 0;
int i;
mddb_set_t *s;
struct nm_next_hdr *nh;
int size;
int devid_flag;
int retval;
int un_next_set = 0;
mutex_enter(&md_mx);
mutex_exit(&md_mx);
return (0);
}
mutex_exit(&md_mx);
if (! (md_get_status() & MD_GBL_DAEMONS_LIVE)) {
if (md_start_daemons(TRUE)) {
err = -1;
goto out;
}
}
/*
* Load the devid name space if it exists
*/
/*
* Unload the devid namespace
*/
err = -1;
goto out;
}
/*
* If replica is in non-devid state, convert if:
* - not in probe during upgrade (md_keep_repl_state = 0)
* - enough space available in replica
* - local set
* - not a multi-node diskset
* - clustering is not present (for non-local set)
*/
devid_flag = 0;
devid_flag = 1;
if (setno != MD_LOCAL_SET)
devid_flag = 0;
if (MD_MNSET_SETNO(setno))
devid_flag = 0;
devid_flag = 0;
/*
* if we weren't devid style before and md_keep_repl_state=1
* we need to stay non-devid
*/
if ((md_keep_repl_state == 1) &&
devid_flag = 0;
if (devid_flag) {
/*
* Determine number of free blocks needed to convert
* entire replica to device id format - locator blocks
* and namespace.
*/
cvt_blks = 0;
if (mddb_lb_did_convert(s, 0, &cvt_blks) != 0) {
err = -1;
goto out;
}
/* add MDDB_DEVID_CONV_PERC% */
}
if (cvt_blks <= s->s_freeblkcnt) {
err = -1;
goto out;
}
} else {
/*
* Print message that replica can't be converted for
* lack of space. No failure - just continue to
* run without device ids.
*/
"Unable to add Solaris Volume Manager device "
"relocation data.\n"
" To use device relocation feature:\n"
" - Increase size of listed replicas\n"
" - Reboot");
"Loading set without device relocation data.\n"
" Solaris Volume Manager disk movement "
"not tracked in local set.");
}
}
/*
* go through and load any modules referenced in
* data base
*/
if (status == MDDB_STALE) {
"md: state database is stale");
}
} else if (status == MDDB_NODATA) {
continue;
}
if (drvrid < MDDB_FIRST_MODID)
continue;
drvrid) < 0) {
}
}
if (recid < 0)
goto out;
/*
* Initialize the md_nm_snarfed array
* this array is indexed by the key and
* is set by md_getdevnum during the snarf time
*/
r_next_key) * (sizeof (int)));
}
/*
* go through and snarf until nothing gets added
*/
do {
i = 0;
if (retval == -1) {
err = -1;
/* Don't know the failed unit */
0);
(void) mddb_unload_set(setno);
return (err);
} else {
i += retval;
}
}
}
} while (i);
/*
* Set the first available slot and availability
*/
continue;
} else {
if (!un_next_set) {
un_next_set = 1;
}
}
}
if (privat & MD_PRV_COMMIT) {
if (mddb_commitrec(recid)) {
"md: state database is stale");
}
}
}
}
/* Deletes must happen after all the commits */
if (privat & MD_PRV_DELETE) {
if (mddb_deleterec(recid)) {
"md: state database is stale");
}
}
}
}
/*
* go through and clean up records until nothing gets cleaned up.
*/
do {
i = 0;
} while (i);
if (md_nm_snarfed != NULL &&
/*
* go thru and cleanup the namespace and the device id
* name space
*/
for (key = 1;
key++) {
/*
* Is the entry an 'orphan'?
*/
NULL) {
/*
* If the value is not set then apparently
* it is not part of the current configuration,
* remove it this can happen when system panic
* between the primary name space update and
* the device id name space update
*/
if (md_nm_snarfed[key] == 0) {
key) == 1)
(void) remove_entry(nh,
}
}
}
}
if (md_nm_snarfed != NULL) {
/*
* Done and free the memory
*/
}
/*
* if the destroy flag has been set and
* the MD_SET_DIDCLUP bit is not set in
* the set's status field, cleanup the
* entire device id namespace
*/
if (md_devid_destroy &&
} else
(void) md_devid_cleanup(setno, 0);
}
/*
* clear single threading on snarf, return success or error
*/
out:
return (err);
}
void
{
mdi_unit_t *ui;
info->dki_capacity = 0;
info->dki_lbsize = 0;
info->dki_media_type = 0;
return;
}
}
void
{
/*
* Controller Information
*/
/*
* Unit Information
*/
info->dki_partition = 0;
/*
* We can't get from here to there yet
*/
}
/*
* open admin device
*/
static int
int flag,
int otyp)
{
int err = 0;
/* single thread */
mutex_enter(&md_mx);
/* check type and flags */
goto out;
}
(md_status & MD_GBL_EXCL)) {
goto out;
}
/* count and flag open */
md_status |= MD_GBL_OPEN;
md_status |= MD_GBL_EXCL;
/* unlock return success */
out:
mutex_exit(&md_mx);
return (err);
}
/*
* open entry point
*/
static int
int flag,
int otyp,
{
int err = 0;
/* dispatch admin device opens */
if (mnum == MD_ADM_MINOR)
/* lock, check status */
if (md_get_status() & MD_GBL_HALTED) {
goto out;
}
/* check minor */
goto out;
}
/* make sure we're snarfed */
goto out;
}
}
goto out;
}
/* check unit */
goto out;
}
/*
* The softpart open routine may do an I/O during the open, in
* which case the open routine will set the OPENINPROGRESS flag
* and drop all locks during the I/O. If this thread sees
* the OPENINPROGRESS flag set, if should wait until the flag
* is reset before calling the driver's open routine. It must
* also revalidate the world after it grabs the unit_array lock
* since the set may have been released or the metadevice cleared
* during the sleep.
*/
if (MD_MNSET_SETNO(setno)) {
goto tryagain;
}
}
/* Test if device is openable */
goto out;
}
goto out;
}
/* don't allow writes to subdevices */
goto out;
}
/* open underlying driver */
goto out;
}
/* or do it ourselves */
else {
/* single thread */
(void) md_unit_openclose_enter(ui);
if (err != 0)
goto out;
}
/* unlock, return status */
out:
return (err);
}
/*
* close admin device
*/
static int
int otyp)
{
int i;
int err = 0;
/* single thread */
mutex_enter(&md_mx);
/* check type and flags */
goto out;
goto out;
}
/* count and flag closed */
else
md_status &= ~MD_GBL_OPEN;
for (i = 0; (i < OTYPCNT); ++i)
if (md_ocnt[i] != 0)
md_status |= MD_GBL_OPEN;
if (! (md_status & MD_GBL_OPEN))
md_status &= ~MD_GBL_EXCL;
/* unlock return success */
out:
mutex_exit(&md_mx);
return (err);
}
/*
* close entry point
*/
static int
int flag,
int otyp,
{
int err = 0;
/* dispatch admin device closes */
if (mnum == MD_ADM_MINOR)
return (mdadminclose(otyp));
/* check minor */
goto out;
}
/* close underlying driver */
goto out;
}
/* or do it ourselves */
else {
/* single thread */
(void) md_unit_openclose_enter(ui);
if (err != 0)
goto out;
}
/* return success */
out:
return (err);
}
/*
* This routine performs raw read operations. It is called from the
* device switch at normal priority.
*
* The main catch is that the *uio struct which is passed to us may
* specify a read which spans two buffers, which would be contiguous
* on a single partition, but not on a striped partition. This will
* be handled by mdstrategy.
*/
/*ARGSUSED*/
static int
{
mdi_unit_t *ui;
int error;
return (ENXIO);
return (error);
}
/*
* This routine performs async raw read operations. It is called from the
* device switch at normal priority.
*
* The main catch is that the *aio struct which is passed to us may
* specify a read which spans two buffers, which would be contiguous
* on a single partition, but not on a striped partition. This will
* be handled by mdstrategy.
*/
/*ARGSUSED*/
static int
{
mdi_unit_t *ui;
int error;
return (ENXIO);
return (error);
}
/*
* This routine performs raw write operations. It is called from the
* device switch at normal priority.
*
* The main catch is that the *uio struct which is passed to us may
* specify a write which spans two buffers, which would be contiguous
* on a single partition, but not on a striped partition. This is
* handled by mdstrategy.
*
*/
/*ARGSUSED*/
static int
{
mdi_unit_t *ui;
int error;
return (ENXIO);
return (error);
}
/*
* This routine performs async raw write operations. It is called from the
* device switch at normal priority.
*
* The main catch is that the *aio struct which is passed to us may
* specify a write which spans two buffers, which would be contiguous
* on a single partition, but not on a striped partition. This is
* handled by mdstrategy.
*
*/
/*ARGSUSED*/
static int
{
mdi_unit_t *ui;
int error;
return (ENXIO);
return (error);
}
int
{
mdi_unit_t *ui;
if (panicstr)
return (0);
}
} else {
}
return (0);
}
/*
* Return true if the ioctl is allowed to be multithreaded.
* All the ioctls with MN are sent only from the message handlers through
* rpc.mdcommd, which (via it's own locking mechanism) takes care that not two
* ioctl for the same metadevice are issued at the same time.
* So we are safe here.
* The other ioctls do not mess with any metadevice structures and therefor
* are harmless too, if called multiple times at the same time.
*/
static boolean_t
is_mt_ioctl(int cmd) {
switch (cmd) {
case MD_IOCGUNIQMSGID:
case MD_IOCGVERSION:
case MD_IOCISOPEN:
case MD_MN_SET_MM_OWNER:
case MD_MN_SET_STATE:
case MD_MN_SUSPEND_WRITES:
case MD_MN_ALLOCATE_HOTSPARE:
case MD_MN_SET_SETFLAGS:
case MD_MN_GET_SETFLAGS:
case MD_MN_MDDB_OPTRECFIX:
case MD_MN_MDDB_PARSE:
case MD_MN_MDDB_BLOCK:
case MD_MN_DB_USERREQ:
case MD_IOC_SPSTATUS:
case MD_MN_COMMD_ERR:
case MD_MN_SET_COMMD_RUNNING:
case MD_MN_RESYNC:
case MD_MN_SETSYNC:
case MD_MN_POKE_HOTSPARES:
return (1);
default:
return (0);
}
}
/*
* This routine implements the ioctl calls for the Virtual Disk System.
* It is called from the device switch at normal priority.
*/
/* ARGSUSED */
static int
int *rval_p)
{
mdi_unit_t *ui;
int err;
/*
* For multinode disksets number of ioctls are allowed to be
* multithreaded.
* A fundamental assumption made in this implementation is that
* ioctls either do not interact with other md structures or the
* ioctl to the admin device can only occur if the metadevice
* device is open. i.e. avoid a race between metaclear and the
* progress of a multithreaded ioctl.
*/
return (EINTR);
}
/*
* initialize lock tracker
*/
IOLOCK_INIT(&lock);
/* Flag to indicate that MD_GBL_IOCTL_LOCK is not acquired */
if (is_mt_ioctl(cmd)) {
/* increment the md_mtioctl_cnt */
mutex_enter(&md_mx);
mutex_exit(&md_mx);
}
/*
* this has been added to prevent notification from re-snarfing
* so metaunload will work. It may interfere with other modules
* halt process.
*/
/*
* admin device ioctls
*/
if (mnum == MD_ADM_MINOR) {
}
/*
* metadevice ioctls
*/
} else {
}
/*
* drop any locks we grabbed
*/
}
static int
{
mdi_unit_t *ui;
return (ENXIO);
return (ENXIO);
return (ENXIO);
return (ENXIO);
}
/*
* Metadevice unit number dispatcher
* When this routine is called it will scan the
* incore unit array and return the avail slot
* hence the unit number to the caller
*
* Return -1 if there is nothing available
*/
{
/*
* If nothing available
*/
return (MD_UNITBAD);
}
mutex_enter(&md_mx);
/* LINTED: E_CONSTANT_CONDITION */
while (1) {
/*
* Advance the starting index for the next
* md_get_nextunit call
*/
} else {
}
break;
}
un = MD_UNITBAD;
break;
}
}
mutex_exit(&md_mx);
return (un);
}