/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
*/
/*
* Storage Volume Character and Block Driver (SV)
*
* This driver implements a simplistic /dev/{r}dsk/ interface to a
* specified disk volume that is otherwise managed by the Prism
* software. The SV driver layers itself onto the underlying disk
* device driver by changing function pointers in the cb_ops
* structure.
*
* CONFIGURATION:
*
* 1. Configure the driver using the svadm utility.
*
* LIMITATIONS:
*
* This driver should NOT be used to share a device between another
* DataServices user interface module (e.g., STE) and a user accessing
* the device through the block device in O_WRITE mode. This is because
* writes through the block device are asynchronous (due to the page
* cache) and so consistency between the block device user and the
* STE user cannot be guaranteed.
*
* Data is copied between system struct buf(9s) and nsc_vec_t. This is
* wasteful and slow.
*/
#ifndef DS_DDICT
#include <sys/pathname.h>
#endif
#include <sys/sysmacros.h>
#include <sys/nsc_thread.h>
#ifdef DS_DDICT
#include "../contract.h"
#endif
#include "../nsctl.h"
#include "sv.h"
#include "sv_impl.h"
#include "sv_efi.h"
/*
* sv_mod_status
*/
#ifdef DKIOCPARTITION
/*
* CRC32 polynomial table needed for computing the checksums
* in an EFI vtoc.
*/
#endif
/*
* Per device and per major state.
*/
#ifndef _SunOS_5_6
#define UNSAFE_ENTER()
#define UNSAFE_EXIT()
#else
#endif
/* hash table of major dev structures */
/*
* Threading.
*/
/*
* nsctl fd callbacks.
*/
static int svattach_fd(blind_t);
static int svdetach_fd(blind_t);
{ 0, 0, }
};
/*
* cb_ops functions.
*/
/*
* These next functions are layered into the underlying driver's devops.
*/
static int sv_lyr_strategy(struct buf *);
svopen, /* open */
svclose, /* close */
nulldev, /* strategy */
nodev, /* dump */
nodev, /* read */
nodev, /* write */
nodev, /* devmap */
nodev, /* mmap */
nodev, /* segmap */
nochpoll, /* poll */
NULL, /* NOT a stream */
nodev, /* aread */
nodev, /* awrite */
};
/*
* dev_ops functions.
*/
0,
nulldev, /* identify */
nulldev, /* probe */
nodev, /* reset */
(struct bus_ops *)0
};
/*
* Module linkage.
*/
extern struct mod_ops mod_driverops;
"nws:Storage Volume:" ISS_VERSION_STR,
};
&modldrv,
0
};
int
_init(void)
{
int error;
return (error);
}
#ifdef DEBUG
#else
if (sv_micro_rev) {
} else {
}
#endif
return (error);
}
int
_fini(void)
{
int error;
return (error);
return (error);
}
int
{
}
/*
* Locking & State.
*
* sv_mutex protects config information - sv_maj_t and sv_dev_t lists;
* threadset creation and sizing; sv_ndevices.
*
* If we need to hold both sv_mutex and sv_lock, then the sv_mutex
* must be acquired first.
*
* sv_lock protects the sv_dev_t structure for an individual device.
*
* to hold both sv_lock and sv_olock, then the sv_lock must be acquired
* first.
*
* nsc_reserve/nsc_release are used in NSC_MULTI mode to allow multiple
* I/O operations to a device simultaneously, as above.
*
* All nsc_open/nsc_close/nsc_reserve/nsc_release operations that occur
* with sv_lock write-locked must be done with (sv_state == SV_PENDING)
* and (sv_pending == curthread) so that any recursion through
* sv_lyr_open/sv_lyr_close can be detected.
*/
static int
sv_init_devs(void)
{
int i;
if (sv_max_devices > 0)
return (0);
if (sv_max_devices <= 0) {
/* nsctl is not attached (nskernd not running) */
if (sv_debug > 0)
return (EAGAIN);
}
KM_NOSLEEP, sv_mem);
return (ENOMEM);
}
for (i = 0; i < sv_max_devices; i++) {
}
if (sv_debug > 0)
return (0);
}
static int
{
int rc;
switch (cmd) {
case DDI_ATTACH:
0, DDI_PSEUDO, 0) != DDI_SUCCESS)
goto failed;
goto failed;
}
rc = sv_init_devs();
goto failed;
}
"sv_threads", sv_threads);
if (sv_debug > 0)
if (sv_threads > sv_threads_max)
return (DDI_SUCCESS);
default:
return (DDI_FAILURE);
}
return (DDI_FAILURE);
}
static int
{
int i;
switch (cmd) {
case DDI_DETACH:
/*
* Check that everything is disabled.
*/
if (sv_mod_status == SV_PREVENT_UNLOAD) {
return (DDI_FAILURE);
}
for (i = 0; sv_devs && i < sv_max_devices; i++) {
return (DDI_FAILURE);
}
}
for (i = 0; sv_devs && i < sv_max_devices; i++) {
}
if (sv_devs) {
(sv_max_devices * sizeof (*sv_devs)));
}
sv_max_devices = 0;
if (sv_mem) {
}
/*
* Remove all minor nodes.
*/
return (DDI_SUCCESS);
default:
return (DDI_FAILURE);
}
}
static sv_maj_t *
{
/*
* See if the hash table entry, or one of the hash chains
* is already allocated for this major number
*/
do {
return (maj);
}
/*
* If the sv_mutex is held, there is design flaw, as the only non-mutex
* held callers can be sv_enable() or sv_dev_to_sv()
* Return an error, instead of panicing the system
*/
if (MUTEX_HELD(&sv_mutex)) {
return (NULL);
}
/*
* Determine where to allocate a new element in the hash table
*/
/* Did another thread beat us to it? */
return (maj);
/* Find a NULL insert point? */
}
/*
* Located the new insert point
*/
else
return (maj);
}
/* ARGSUSED */
static int
{
switch (infocmd) {
case DDI_INFO_DEVT2DEVINFO:
rc = DDI_SUCCESS;
break;
case DDI_INFO_DEVT2INSTANCE:
/*
* We only have a single instance.
*/
*result = 0;
rc = DDI_SUCCESS;
break;
default:
break;
}
return (rc);
}
/*
* Hashing of devices onto major device structures.
*
* Individual device structures are hashed onto one of the sm_hash[]
* buckets in the relevant major device structure.
*
* Hash insertion and deletion -must- be done with sv_mutex held. Hash
* searching does not require the mutex because of the sm_seq member.
* sm_seq is incremented on each insertion (-after- hash chain pointer
* manipulation) and each deletion (-before- hash chain pointer
* manipulation). When searching the hash chain, the seq number is
* checked before accessing each device structure, if the seq number has
* changed, then we restart the search from the top of the hash chain.
* If we restart more than SV_HASH_RETRY times, we take sv_mutex and search
* the hash chain (we are guaranteed that this search cannot be
* interrupted).
*/
static sv_dev_t *
{
int seq;
int try;
/* Get major hash table */
if (majpp)
return (NULL);
return (NULL);
}
try = 0;
if (try > SV_HASH_RETRY)
nsc_membar_stld(); /* preserve register load order */
try++;
goto retry;
}
break;
}
if (try > SV_HASH_RETRY)
return (svp);
}
/*
* Must be called with sv_mutex held.
*/
static int
{
int i;
/* Get major hash table */
return (NULL);
/* Determine which minor hash table */
/* look for clash */
break;
}
if (svp) {
return (SV_EENABLED);
}
/* look for spare sv_devs slot */
for (i = 0; i < sv_max_devices; i++) {
break;
}
if (i >= sv_max_devices) {
return (SV_ENOSLOTS);
}
/*
* We do not know the size of the underlying device at
* this stage, so initialise "nblocks" property to
* zero, and update it whenever we succeed in
* nsc_reserve'ing the underlying nsc_fd_t.
*/
svp->sv_nblocks = 0;
return (0);
}
/*
* Remove a device structure from it's hash chain.
* Must be called with sv_mutex held.
*/
static void
{
/* Get major hash table */
return;
/* remove svp from hash chain */
while (*svpp) {
/*
* increment of sm_seq must be before the
* removal from the hash chain
*/
break;
}
}
}
/*
* Free (disable) a device structure.
* Must be called with sv_lock(RW_WRITER) and sv_mutex held, and will
* perform the exits during its processing.
*/
static int
{
/* Get major hash table */
return (NULL);
/*
* Close the fd's before removing from the hash or swapping
* back the cb_ops pointers so that the cache flushes before new
* io can come in.
*/
}
if (error != SV_ESDOPEN &&
if (maj->sm_dev_ops)
else
/*
* corbin XXX
* Leave backing device ops in maj->sm_*
* to handle any requests that might come
* in during the disable. This could be
* a problem however if the backing device
* driver is changed while we process these
* requests.
*
* maj->sm_strategy = 0;
* maj->sm_awrite = 0;
* maj->sm_write = 0;
* maj->sm_ioctl = 0;
* maj->sm_close = 0;
* maj->sm_aread = 0;
* maj->sm_read = 0;
* maj->sm_open = 0;
* maj->sm_flag = 0;
*
*/
}
if (maj->sm_dev_ops) {
maj->sm_dev_ops = 0;
}
}
/*
* Close the protective layered driver open using the
* Sun Private layered driver i/f.
*/
}
return (error);
}
/*
* Reserve the device, taking into account the possibility that
* the reserve might have to be retried.
*/
static int
{
int eintr_count;
int rc;
eintr_count = 0;
do {
++eintr_count;
delay(2);
}
return (rc);
}
static int
{
int rc;
return (SV_EBADDEV);
}
return (SV_EAMODE);
}
/* Get major hash table */
return (SV_EBADDEV);
if (rc) {
return (rc);
}
/*
* Get real fd used for io
*/
/*
* OR in NSC_DEVICE to ensure that nskern grabs the real strategy
* function pointer before sv swaps them out.
*/
if (kstatus)
}
/*
* Perform a layered driver open using the Sun Private layered
* driver i/f to ensure that the cb_ops structure for the driver
* is not detached out from under us whilst sv is enabled.
*
*/
crp = ddi_get_cred();
}
if (rc != 0) {
if (kstatus)
}
/*
* Do layering if required - must happen after nsc_open().
*/
}
}
}
/*
* Check that the driver has async I/O entry points
* before changing them.
*/
} else {
}
/*
* Bug 4645743
*
* Prevent sv from ever unloading after it has interposed
* on a major device because there is a race between
* sv removing its layered entry points from the target
* dev_ops, a client coming in and accessing the driver,
* and the kernel modunloading the sv text.
*
* To allow unload, do svboot -u, which only happens in
* pkgrm time.
*/
}
sv_ndevices++;
nblocks = 0;
}
return (0);
}
static int
{
int rc = 0;
if (sv_mod_status == SV_PREVENT_UNLOAD) {
} else {
}
}
return (rc);
}
static int
{
int rc;
if (sv_debug > 0)
return (0);
}
"!svattach_fd: nsc_partsize() failed, rc %d", rc);
svp->sv_nblocks = 0;
}
"!svattach_fd: nsc_maxfbas() failed, rc %d", rc);
svp->sv_maxfbas = 0;
}
if (sv_debug > 0) {
}
return (0);
}
static int
{
if (sv_debug > 0)
/* svp can be NULL during disable of an sv */
return (0);
svp->sv_maxfbas = 0;
svp->sv_nblocks = 0;
return (0);
}
/*
* Side effect: if called with (guard != 0), then expects both sv_mutex
* and sv_lock(RW_WRITER) to be held, and will release them before returning.
*/
/* ARGSUSED */
static int
{
return (SV_ENODEV);
}
return (SV_EDISABLED);
}
sv_ndevices--;
}
static int
{
int (*fn)();
int ret;
int rc;
if (svp) {
/*
* This is a recursive open from a call to
* ddi_lyr_open_by_devt and so we just want
* to pass it straight through to the
* underlying driver.
*/
} else
}
UNSAFE_ENTER();
UNSAFE_EXIT();
} else {
}
if (ret == 0) {
/*
* Re-acquire svp if the driver changed *devp.
*/
if (svp) {
}
}
}
} else {
}
/*
* Underlying DDI open failed, but we have this
* device SV enabled. If we can read some data
* from the device, fake a successful open (this
* probably means that this device is RDC'd and we
* are getting the data from the secondary node).
*
* The reserve must be done with NSC_TRY|NSC_NOWAIT to
* ensure that it does not deadlock if this open is
* coming from nskernd:get_bsize().
*/
if (rc == 0) {
if (rc <= 0) {
/* success */
ret = 0;
}
if (tmph) {
(void) nsc_free_buf(tmph);
}
/*
* Count the number of layered opens that we
* fake since we have to fake a matching number
* paired).
*/
svp->sv_openlcnt++;
}
}
}
if (svp) {
}
return (ret);
}
static int
{
int (*fn)();
int ret;
if (svp &&
/*
* This is a recursive open from a call to
* ddi_lyr_close and so we just want
* to pass it straight through to the
* underlying driver.
*/
}
if (svp) {
if (svp->sv_openlcnt) {
/*
* Consume sufficient layered closes to
* account for the opens that we faked
* whilst the device was failed.
*/
svp->sv_openlcnt--;
return (0);
}
}
}
UNSAFE_ENTER();
UNSAFE_EXIT();
} else {
}
} else {
}
if (svp) {
}
return (ret);
}
/*
* Convert the specified dev_t into a locked and enabled sv_dev_t, or
* return NULL.
*/
static sv_dev_t *
{
/* locked and enabled */
break;
}
/*
* State was changed while waiting on the lock.
* Wait for a stable state.
*/
delay(2);
}
return (svp);
}
static int
{
int (*fn)();
int rc;
if (maj) {
else
if (fn != 0) {
UNSAFE_ENTER();
UNSAFE_EXIT();
} else {
}
}
return (rc);
} else {
return (ENODEV);
}
}
/*
* guard access mode
* - prevent user level access to the device
*/
goto out;
}
goto out;
}
else
out:
return (rc);
}
static int
{
}
static int
{
}
/* ARGSUSED */
static int
{
return (aphysio(sv_lyr_strategy,
}
/* ARGSUSED */
static int
{
return (aphysio(sv_lyr_strategy,
}
/*
* Set up an array containing the list of raw path names
* The array for the paths is svl and the size of the array is
* in size.
*
* If there are more layered devices than will fit in the array,
* the number of extra layered devices is returned. Otherwise
* zero is return.
*
* Input:
* svn : array for paths
* size : size of the array
*
* Output (extra):
* zero : All paths fit in array
* >0 : Number of defined layered devices don't fit in array
*/
static int
{
int i, index;
char *path;
*extra = 0;
index = 0;
if (ilp32)
else
for (i = 0; i < sv_max_devices; i++) {
continue;
}
/* Another overflow entry */
(*extra)++;
continue;
}
if (ilp32) {
svn32++;
} else {
svn++;
}
if (*nblocks == 0) {
if (sv_debug > 3)
}
}
/* Out of space */
(*extra)++;
}
}
/* NULL terminated list */
if (ilp32)
else
}
return (0);
}
static void
{
int change = 0;
int nthreads;
if (sv_threads_extra) {
/* keep track of any additional threads requested */
if (threads > 0) {
return;
}
if (threads >= sv_threads_extra) {
sv_threads_extra = 0;
/* fall through to while loop */
} else {
return;
}
} else if (threads > 0) {
/*
* do not increase the number of threads beyond
* sv_threads_max when doing dynamic thread tuning
*/
if (threads <= 0)
return;
}
}
if (threads < 0)
while (threads--) {
if (sv_threads_needed >= nthreads)
else if ((sv_threads_needed <
}
#ifdef DEBUG
if (change) {
"!sv_thread_tune: threads needed %d, nthreads %d, "
"nthreads change %d",
}
#endif
}
/* ARGSUSED */
static int
{
int rc;
rc = sv_init_devs();
return (rc);
}
/* ARGSUSED */
static int
{
break;
}
/* threads still active - wait for them to exit */
loops--;
}
if (loops <= 0) {
#ifndef DEBUG
/* do not write to console when non-DEBUG */
"!"
#endif
"sv:svclose: threads still active "
"after %d sec - leaking thread set", secs);
}
return (0);
}
static int
{
*rvalp = 0;
/*
* If sv_mod_status is 0 or SV_PREVENT_UNLOAD, then it will continue.
* else it means it previously was SV_PREVENT_UNLOAD, and now it's
* SV_ALLOW_UNLOAD, expecting the driver to eventually unload.
*
* SV_ALLOW_UNLOAD is final state, so no need to grab sv_mutex.
*/
if (sv_mod_status == SV_ALLOW_UNLOAD) {
return (EBUSY);
}
return (rc);
kstatus = spcs_s_kcreate();
if (!kstatus) {
return (ENOMEM);
}
switch (cmd) {
case SVIOC_ENABLE:
if (ilp32) {
return (EFAULT);
}
} else {
return (EFAULT);
}
}
/* force to raw access */
}
"!sv: could not allocate %d threads",
}
}
if (rc == 0) {
sv_config_time = nsc_lbolt();
}
/* NOTREACHED */
case SVIOC_DISABLE:
if (ilp32) {
return (EFAULT);
}
} else {
return (EFAULT);
}
}
int i;
/*
* User level could not find the minor device
* node, so do this the slow way by searching
* the entire sv config for a matching pathname.
*/
for (i = 0; i < sv_max_devices; i++) {
continue;
break;
}
}
return (spcs_s_ocopyoutf(&kstatus,
}
kstatus);
if (rc == 0) {
sv_config_time = nsc_lbolt();
}
/* NOTREACHED */
case SVIOC_LIST:
if (ilp32) {
return (EFAULT);
}
} else {
return (EFAULT);
}
}
/* Do some boundary checking */
/* Array size is out of range */
SV_EARRBOUNDS, "0",
sizeof (itmp1), 0),
sizeof (itmp2), 0)));
}
if (ilp32)
else
/* Allocate memory for the array of structures */
if (bytes != 0) {
if (!svn) {
return (spcs_s_ocopyoutf(&kstatus,
}
}
if (rc) {
}
if (ilp32) {
/* Return the list structure */
return (EFAULT);
}
} else {
/* Return the list structure */
return (EFAULT);
}
}
/* Return the array */
return (EFAULT);
}
}
/* NOTREACHED */
case SVIOC_VERSION:
if (ilp32) {
return (EFAULT);
}
return (EFAULT);
}
} else {
return (EFAULT);
}
return (EFAULT);
}
}
/* NOTREACHED */
case SVIOC_UNLOAD:
rc = sv_prepare_unload();
}
return (rc);
default:
return (EINVAL);
/* NOTREACHED */
}
/* NOTREACHED */
}
/* ARGSUSED */
static int
{
return (0);
}
static void
{
nsc_vec_t *v;
int (*fn)();
rc = 0;
if (sv_debug > 5)
UNSAFE_ENTER();
UNSAFE_EXIT();
} else {
}
return;
} else {
return;
}
}
/*
* guard access mode
* - prevent user level access to the device
*/
goto out;
}
goto out;
}
/* return EOF, not an error */
} else
goto done;
}
/*
* Preallocate a handle once per call to strategy.
* If this fails, then the nsc_alloc_buf() will allocate
* a temporary handle per allocation/free pair.
*/
"!sv: allocated active handle (bufh %p, flags %x)",
goto done;
}
fba_off = 0;
/*
* fba_req - requested size of transfer in FBAs after
* truncation to device extent, and allowing for
* possible non-FBA bounded final chunk.
* fba_off - offset of start of chunk from start of bp in FBAs.
* fba_len - size of this chunk in FBAs.
*/
loop:
int, rw);
if (rc > 0) {
(void) nsc_free_buf(hndl);
goto done;
}
/*
* Not overwriting all of the last FBA, so read in the
* old contents now before we overwrite it with the new
* data.
*/
if (rc > 0) {
goto done;
}
}
while (tocopy > 0) {
else
v++;
}
if (rc > 0) {
goto done;
}
}
/*
* Adjust FBA offset and requested (ie. remaining) length,
* loop if more data to transfer.
*/
if (fba_req > 0) {
if (rc > 0) {
}
if (rc <= 0)
goto loop;
}
done:
if (rc > 0) {
}
}
if (bufh)
(void) nsc_free_handle(bufh);
out:
if (sv_debug > 5) {
"!_sv_lyr_strategy: bp %p, bufh %p, bp->b_error %d\n",
}
}
static void
{
}
static int
{
int nlive;
/*
* If B_ASYNC was part of the DDI we could use it as a hint to
* not create a thread for synchronous i/o.
*/
/* not sv enabled - just pass through */
return (0);
}
if (sv_debug > 4) {
}
/*
* If there are only guard devices enabled there
* won't be a threadset, so don't try and use it.
*/
}
/*
* out of threads, so fall back to synchronous io.
*/
if (sv_debug > 0) {
"!sv_lyr_strategy: thread alloc failed\n");
}
} else {
if (nlive > sv_max_nlive) {
if (sv_debug > 0) {
"!sv_lyr_strategy: "
"new max nlive %d (nthread %d)\n",
}
}
}
return (0);
}
/*
* re-write the size of the current partition
*/
static int
{
int ilp32;
int pnum;
int rc;
if (rc != 0) {
return (rc);
}
"!sv_gvtoc: unable to determine partition number "
return (EINVAL);
}
if (ilp32) {
#ifdef _SunOS_5_6
#else
#endif
if (p_size == 0) {
NSC_MULTI|NSC_PCATCH) == 0) {
} else {
}
}
}
} else {
long p_size;
if (p_size == 0) {
NSC_MULTI|NSC_PCATCH) == 0) {
} else {
}
}
}
}
return (rc);
}
#ifdef DKIOCPARTITION
/*
* re-write the size of the current partition
*
* arg is dk_efi_t.
*
* dk_efi_t->dki_data = (void *)(uintptr_t)efi.dki_data_64;
*
* dk_efi_t->dki_data --> efi_gpt_t (label header)
* dk_efi_t->dki_data + 1 --> efi_gpe_t[] (array of partitions)
*
* efi_gpt_t->efi_gpt_PartitionEntryArrayCRC32 --> CRC32 of array of parts
* efi_gpt_t->efi_gpt_HeaderCRC32 --> CRC32 of header itself
*
* This assumes that sizeof (efi_gpt_t) is the same as the size of a
* logical block on the disk.
*
* Everything is little endian (i.e. disk format).
*/
static int
{
int pnum;
int rc;
if (rc != 0) {
return (rc);
}
if (pnum < 0) {
"!sv_efi: unable to determine partition number for dev %lx",
return (EINVAL);
}
return (EFAULT);
}
return (EINVAL);
}
goto out;
}
unparts = 1;
"!sv_efi: partition# beyond end of user array (%d >= %d)",
return (EINVAL);
}
goto out;
}
if (p_size == 0) {
} else {
}
}
gpt.efi_gpt_HeaderCRC32 = 0;
goto out;
}
goto out;
}
out:
if (gpe) {
}
return (rc);
}
/*
* Re-write the size of the partition specified by p_partno
*
* Note that if a DKIOCPARTITION is issued to an fd opened against a
* non-sv'd device, but p_partno requests the size for a different
* device that is sv'd, this function will *not* be called as sv is
* not interposed on the original device (the fd).
*
* It would not be easy to change this as we cannot get the partition
* number for the non-sv'd device, so cannot compute the dev_t of the
* (sv'd) p_partno device, and so cannot find out if it is sv'd or get
* its size from nsctl.
*
* See also the "Bug 4755783" comment in sv_lyr_ioctl().
*/
static int
{
if (rc != 0) {
return (rc);
}
return (EFAULT);
}
/* switch to requested partition, not the current one */
/* not sv device - just return */
return (0);
}
}
if (p_size == 0) {
} else {
}
}
}
return (EFAULT);
}
return (rc);
}
#endif /* DKIOCPARTITION */
static int
{
int (*fn)();
int rc = 0;
maj = 0;
fn = 0;
/*
* If sv_mod_status is 0 or SV_PREVENT_UNLOAD, then it will continue.
* else it means it previously was SV_PREVENT_UNLOAD, and now it's
* SV_ALLOW_UNLOAD, expecting the driver to eventually unload.
*
* SV_ALLOW_UNLOAD is final state, so no need to grab sv_mutex.
*/
if (sv_mod_status == SV_ALLOW_UNLOAD) {
return (EBUSY);
}
if (nskernd_isdaemon()) {
/*
* This is nskernd which always needs to see
* the underlying disk device accurately.
*
* So just pass the ioctl straight through
* to the underlying driver as though the device
* was not sv enabled.
*/
} else {
}
}
/*
* We now have a locked and enabled SV device, or a non-SV device.
*/
switch (cmd) {
/*
* DKIOCGVTOC, DKIOCSVTOC, DKIOCPARTITION, DKIOCGETEFI
* and DKIOCSETEFI are intercepted and faked up as some
* i/o providers emulate volumes of a different size to
* the underlying volume.
*
* Setting the size by rewriting the vtoc is not permitted.
*/
case DKIOCSVTOC:
#ifdef DKIOCPARTITION
case DKIOCSETEFI:
#endif
/* not intercepted -- allow ioctl through */
break;
}
return (EPERM);
default:
break;
}
/*
* Pass through the real ioctl command.
*/
UNSAFE_ENTER();
UNSAFE_EXIT();
} else {
}
} else {
}
/*
* Bug 4755783
* Fix up the size of the current partition to allow
* for the virtual volume to be a different size to the
* physical volume (e.g. for II compact dependent shadows).
*
* Note that this only attempts to fix up the current partition
* - the one that the ioctl was issued against. There could be
* other sv'd partitions in the same vtoc, but we cannot tell
* so we don't attempt to fix them up.
*/
switch (cmd) {
case DKIOCGVTOC:
break;
#ifdef DKIOCPARTITION
case DKIOCGETEFI:
break;
case DKIOCPARTITION:
break;
#endif /* DKIOCPARTITION */
}
}
}
return (rc);
}