bofi.c revision cecfa35819d13ab26f41d532f904681b316e6c0d
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
#include <sys/sysmacros.h>
#include <sys/ddi_impldefs.h>
#include <sys/bofi_impl.h>
/*
* Testing the resilience of a hardened device driver requires a suitably wide
* range of different types of "typical" hardware faults to be injected,
* preferably in a controlled and repeatable fashion. This is not in general
* possible via hardware, so the "fault injection test harness" is provided.
* This works by intercepting calls from the driver to various DDI routines,
* and then corrupting the result of those DDI routine calls as if the
* hardware had caused the corruption.
*
* Conceptually, the bofi driver consists of two parts:
*
* A driver interface that supports a number of ioctls which allow error
* definitions ("errdefs") to be defined and subsequently managed. The
* driver is a clone driver, so each open will create a separate
* invocation. Any errdefs created by using ioctls to that invocation
* will automatically be deleted when that invocation is closed.
*
* Intercept routines: When the bofi driver is attached, it edits the
* bus_ops structure of the bus nexus specified by the "bofi-nexus"
* field in the "bofi.conf" file, thus allowing the
* bofi driver to intercept various ddi functions. These intercept
* routines primarily carry out fault injections based on the errdefs
* created for that device.
*
* Faults can be injected into:
*
* ddi_dma_setup(), ddi_dma_bind_handle(), etc)
*
* etc),
*
* Interrupts (generating spurious interrupts, losing interrupts,
* delaying interrupts).
*
* By default, ddi routines called from all drivers will be intercepted
* and faults potentially injected. However, the "bofi-to-test" field in
* the "bofi.conf" file can be set to a space-separated list of drivers to
* test (or by preceding each driver name in the list with an "!", a list
* of drivers not to test).
*
* In addition to fault injection, the bofi driver does a number of static
* checks which are controlled by properties in the "bofi.conf" file.
*
* "bofi-ddi-check" - if set will validate that there are no PIO access
* other than those using the DDI routines (ddi_get8(), ddi_put8(), etc).
*
* "bofi-range-check" - if set to values 1 (warning) or 2 (panic), will
* validate that calls to ddi_get8(), ddi_put8(), etc are not made
* specifying addresses outside the range of the access_handle.
*
* "bofi-sync-check" - if set will validate that calls to ddi_dma_sync()
* are being made correctly.
*/
extern void *bp_mapin_common(struct buf *, int);
static int bofi_ddi_check;
static int bofi_sync_check;
static int bofi_range_check;
#define HDL_HASH_TBL_SIZE 64
#define HDL_DHASH(x) \
#define HDL_HHASH(x) \
static struct bofi_shadow shadow_list;
static struct bofi_errent *errent_listp;
static char driver_list[NAMESIZE];
static int driver_list_size;
static int driver_list_neg;
static char nexus_name[NAMESIZE];
static int initialized = 0;
#define NCLONES 256
static dev_info_t *our_dip;
static kmutex_t bofi_mutex;
static kmutex_t clone_tab_mutex;
static kmutex_t bofi_low_mutex;
static ddi_iblock_cookie_t bofi_low_cookie;
static int bofi_errdef_alloc(struct bofi_errdef *, char *,
struct bofi_errent *);
static int bofi_errdef_free(struct bofi_errent *);
static void bofi_start(struct bofi_errctl *, char *);
static void bofi_stop(struct bofi_errctl *, char *);
static void bofi_broadcast(struct bofi_errctl *, char *);
static void bofi_clear_acc_chk(struct bofi_errctl *, char *);
static void bofi_clear_errors(struct bofi_errctl *, char *);
static void bofi_clear_errdefs(struct bofi_errctl *, char *);
static int bofi_errdef_check(struct bofi_errstate *,
struct acc_log_elem **);
static int bofi_errdef_check_w(struct bofi_errstate *,
struct acc_log_elem **);
struct ddi_dma_req *, ddi_dma_handle_t *);
ddi_dma_handle_t *);
uint_t *);
void *result);
#define FM_SIMULATED_DMA "simulated.dma"
#define FM_SIMULATED_PIO "simulated.pio"
#if defined(__sparc)
uint_t, ddi_dma_cookie_t *);
#endif
static int driver_under_test(dev_info_t *);
static int bofi_check_acc_hdl(ddi_acc_impl_t *);
static int bofi_check_dma_hdl(ddi_dma_impl_t *);
static struct bus_ops bofi_bus_ops = {
NULL,
NULL,
NULL,
NULL,
NULL,
0,
0,
0,
0,
0,
0,
0,
};
static struct cb_ops bofi_cb_ops = {
bofi_open, /* open */
bofi_close, /* close */
nodev, /* strategy */
nodev, /* print */
nodev, /* dump */
nodev, /* read */
nodev, /* write */
bofi_ioctl, /* ioctl */
nodev, /* devmap */
nodev, /* mmap */
nodev, /* segmap */
nochpoll, /* chpoll */
ddi_prop_op, /* prop_op */
NULL, /* for STREAMS drivers */
D_MP, /* driver compatibility flag */
CB_REV, /* cb_ops revision */
nodev, /* aread */
nodev /* awrite */
};
DEVO_REV, /* driver build version */
0, /* device reference count */
nulldev, /* probe */
nulldev, /* reset */
nulldev /* power */
};
/* module configuration stuff */
static void *statep;
"bofi driver %I%",
};
static struct modlinkage modlinkage = {
&modldrv,
0
};
static struct bus_ops save_bus_ops;
#if defined(__sparc)
static struct dvma_ops bofi_dvma_ops = {
};
#endif
/*
* support routine - map user page into kernel virtual
*/
static caddr_t
{
/*
* mock up a buf structure so we can call bp_mapin_common()
*/
}
/*
* support routine - map page chain into kernel virtual
*/
static caddr_t
{
/*
* mock up a buf structure so we can call bp_mapin_common()
*/
}
/*
* support routine - map page array into kernel virtual
*/
static caddr_t
int flag)
{
/*
* mock up a buf structure so we can call bp_mapin_common()
*/
}
/*
* support routine - map dmareq into kernel virtual if not already
* fills in *lenp with length
* *mapaddr will be new kernel virtual address - or null if no mapping needed
*/
static caddr_t
{
return (*mapaddrp);
return (*mapaddrp);
} else {
return (*mapaddrp);
}
}
/*
* support routine - free off kernel virtual mapping as allocated by
* ddi_dmareq_mapin()
*/
static void
{
return;
/*
* mock up a buf structure
*/
}
static time_t
{
gethrestime(&ts);
}
/*
* reset the bus_ops structure of the specified nexus to point to
* the original values in the save_bus_ops structure.
*
* Note that both this routine and modify_bus_ops() rely on the current
* behavior of the framework in that nexus drivers are not unloadable
*
*/
static int
{
/*
* find specified module
*/
do {
if (!modp->mod_linkage) {
return (0);
}
return (0);
}
if (!bp) {
return (0);
}
if (ops->devo_refcnt > 0) {
/*
* As long as devices are active with modified
* bus ops bofi must not go away. There may be
* drivers with modified access or dma handles.
*/
return (0);
}
mp->drv_linkinfo);
return (1);
}
return (0);
}
/*
* modify the bus_ops structure of the specified nexus to point to bofi
* routines, saving the original values in the save_bus_ops structure
*/
static int
{
return (0);
/*
* find specified module
*/
do {
if (!modp->mod_linkage) {
return (0);
}
return (0);
}
if (!bp) {
return (0);
}
if (ops->devo_refcnt == 0) {
/*
* If there is no device active for this
* module then there is nothing to do for bofi.
*/
return (0);
}
mp->drv_linkinfo);
save_bus_ops = *bp;
return (1);
}
return (0);
}
int
_init(void)
{
int e;
if (e != 0)
return (e);
if ((e = mod_install(&modlinkage)) != 0)
return (e);
}
int
_fini(void)
{
int e;
if ((e = mod_remove(&modlinkage)) != 0)
return (e);
return (e);
}
int
{
}
static int
{
char *name;
char buf[80];
int i;
int s, ss;
int new_string;
char *ptr;
if (cmd != DDI_ATTACH)
return (DDI_FAILURE);
/*
* only one instance - but we clone using the open routine
*/
if (ddi_get_instance(dip) > 0)
return (DDI_FAILURE);
if (!initialized) {
return (DDI_FAILURE);
return (DDI_FAILURE);
&bofi_low_cookie) != DDI_SUCCESS) {
return (DDI_FAILURE); /* fail attach */
}
/*
* get nexus name (from conf file)
*/
return (DDI_FAILURE);
}
/*
* get whether to do dma map kmem private checking
*/
bofi_range_check = 0;
bofi_range_check = 2;
bofi_range_check = 1;
else
bofi_range_check = 0;
/*
* get whether to prevent direct access to register
*/
bofi_ddi_check = 0;
bofi_ddi_check = 1;
else
bofi_ddi_check = 0;
/*
* get whether to do copy on ddi_dma_sync
*/
bofi_sync_check = 0;
bofi_sync_check = 1;
else
bofi_sync_check = 0;
/*
* get driver-under-test names (from conf file)
*/
driver_list[0] = 0;
/*
* and convert into a sequence of strings
*/
driver_list_neg = 1;
new_string = 1;
for (i = 0; i < driver_list_size; i++) {
if (driver_list[i] == ' ') {
driver_list[i] = '\0';
new_string = 1;
} else if (new_string) {
if (driver_list[i] != '!')
driver_list_neg = 0;
new_string = 0;
}
}
/*
* initialize mutex, lists
*/
NULL);
/*
* fake up iblock cookie - need to protect outselves
* against drivers that use hilevel interrupts
*/
s = spl8();
(void *)bofi_low_cookie);
for (i = 0; i < HDL_HASH_TBL_SIZE; i++) {
}
for (i = 1; i < BOFI_NLINKS; i++)
/*
* overlay bus_ops structure
*/
return (DDI_FAILURE);
}
/*
* save dip for getinfo
*/
initialized = 1;
}
return (DDI_SUCCESS);
}
static int
{
char *name;
char buf[80];
if (cmd != DDI_DETACH)
return (DDI_FAILURE);
if (ddi_get_instance(dip) > 0)
return (DDI_FAILURE);
return (DDI_FAILURE);
/*
* make sure test bofi is no longer in use
*/
return (DDI_FAILURE);
}
/*
* restore bus_ops structure
*/
return (DDI_FAILURE);
initialized = 0;
return (DDI_SUCCESS);
}
/* ARGSUSED */
static int
{
int retval;
switch (cmd) {
case DDI_INFO_DEVT2DEVINFO:
} else {
}
break;
case DDI_INFO_DEVT2INSTANCE:
*result = (void *)0;
break;
default:
}
return (retval);
}
/* ARGSUSED */
static int
{
struct bofi_errent *softc;
/*
* only allow open on minor=0 - the clone device
*/
if (minor != 0)
return (ENXIO);
/*
* fail if not attached
*/
if (!initialized)
return (ENXIO);
/*
* find a free slot and grab it
*/
break;
}
}
return (EAGAIN);
/*
* soft state structure for this clone is used to maintain a list
* of allocated errdefs so they can be freed on close
*/
return (EAGAIN);
}
return (0);
}
/* ARGSUSED */
static int
{
struct bofi_errent *softc;
return (ENXIO);
/*
* find list of errdefs and free them off
*/
(void) bofi_errdef_free(ep);
}
/*
* free clone tab slot
*/
return (0);
}
/* ARGSUSED */
static int
int *rvalp)
{
struct bofi_errent *softc;
struct bofi_errdef errdef;
struct bofi_errctl errctl;
struct bofi_errstate errstate;
void *ed_handle;
struct bofi_get_handles get_handles;
struct bofi_get_hdl_info hdl_info;
struct handle_info *hdlip;
struct handle_info *hib;
char *buffer;
char *bufptr;
char *endbuf;
char *namep;
struct bofi_shadow *hp;
int retval;
struct bofi_shadow *hhashp;
int i;
switch (cmd) {
case BOFI_ADD_DEF:
/*
* add a new error definition
*/
#ifdef _MULTI_DATAMODEL
case DDI_MODEL_ILP32:
{
/*
* For use when a 32 bit app makes a call into a
* 64 bit ioctl
*/
struct bofi_errdef32 errdef_32;
sizeof (struct bofi_errdef32), mode)) {
return (EFAULT);
}
break;
}
case DDI_MODEL_NONE:
sizeof (struct bofi_errdef), mode))
return (EFAULT);
break;
}
#else /* ! _MULTI_DATAMODEL */
sizeof (struct bofi_errdef), mode) != 0)
return (EFAULT);
#endif /* _MULTI_DATAMODEL */
/*
* do some validation
*/
if (errdef.fail_count == 0)
return (EINVAL);
return (EINVAL);
return (EINVAL);
}
/*
* find softstate for this clone, so we can tag
* new errdef on to it
*/
return (ENXIO);
/*
* read in name
*/
return (EINVAL);
(void) bofi_errdef_free((struct bofi_errent *)
return (EINVAL);
}
/*
* copy out errdef again, including filled in errdef_handle
*/
#ifdef _MULTI_DATAMODEL
case DDI_MODEL_ILP32:
{
/*
* For use when a 32 bit app makes a call into a
* 64 bit ioctl
*/
struct bofi_errdef32 errdef_32;
sizeof (struct bofi_errdef32), mode) != 0) {
(void) bofi_errdef_free((struct bofi_errent *)
return (EFAULT);
}
break;
}
case DDI_MODEL_NONE:
sizeof (struct bofi_errdef), mode) != 0) {
(void) bofi_errdef_free((struct bofi_errent *)
return (EFAULT);
}
break;
}
#else /* ! _MULTI_DATAMODEL */
sizeof (struct bofi_errdef), mode) != 0) {
(void) bofi_errdef_free((struct bofi_errent *)
return (EFAULT);
}
#endif /* _MULTI_DATAMODEL */
return (0);
case BOFI_DEL_DEF:
/*
* delete existing errdef
*/
sizeof (void *), mode) != 0)
return (EFAULT);
case BOFI_START:
/*
* start all errdefs corresponding to
* this name and instance
*/
sizeof (struct bofi_errctl), mode) != 0)
return (EFAULT);
/*
* copy in name
*/
return (EINVAL);
return (0);
case BOFI_STOP:
/*
* stop all errdefs corresponding to
* this name and instance
*/
sizeof (struct bofi_errctl), mode) != 0)
return (EFAULT);
/*
* copy in name
*/
return (EINVAL);
return (0);
case BOFI_BROADCAST:
/*
* wakeup all errdefs corresponding to
* this name and instance
*/
sizeof (struct bofi_errctl), mode) != 0)
return (EFAULT);
/*
* copy in name
*/
return (EINVAL);
return (0);
case BOFI_CLEAR_ACC_CHK:
/*
* clear "acc_chk" for all errdefs corresponding to
* this name and instance
*/
sizeof (struct bofi_errctl), mode) != 0)
return (EFAULT);
/*
* copy in name
*/
return (EINVAL);
return (0);
case BOFI_CLEAR_ERRORS:
/*
* set "fail_count" to 0 for all errdefs corresponding to
* this name and instance whose "access_count"
* has expired.
*/
sizeof (struct bofi_errctl), mode) != 0)
return (EFAULT);
/*
* copy in name
*/
return (EINVAL);
return (0);
case BOFI_CLEAR_ERRDEFS:
/*
* set "access_count" and "fail_count" to 0 for all errdefs
* corresponding to this name and instance
*/
sizeof (struct bofi_errctl), mode) != 0)
return (EFAULT);
/*
* copy in name
*/
return (EINVAL);
return (0);
case BOFI_CHK_STATE:
{
struct acc_log_elem *klg;
/*
* get state for this errdef - read in dummy errstate
* with just the errdef_handle filled in
*/
#ifdef _MULTI_DATAMODEL
case DDI_MODEL_ILP32:
{
/*
* For use when a 32 bit app makes a call into a
* 64 bit ioctl
*/
struct bofi_errstate32 errstate_32;
sizeof (struct bofi_errstate32), mode) != 0) {
return (EFAULT);
}
break;
}
case DDI_MODEL_NONE:
sizeof (struct bofi_errstate), mode) != 0)
return (EFAULT);
break;
}
#else /* ! _MULTI_DATAMODEL */
sizeof (struct bofi_errstate), mode) != 0)
return (EFAULT);
#endif /* _MULTI_DATAMODEL */
return (EINVAL);
/*
* copy out real errstate structure
*/
/* insufficient user memory */
/* always pass back a time */
#ifdef _MULTI_DATAMODEL
case DDI_MODEL_ILP32:
{
/*
* For use when a 32 bit app makes a call into a
* 64 bit ioctl
*/
struct bofi_errstate32 errstate_32;
sizeof (struct bofi_errstate32), mode) != 0)
return (EFAULT);
break;
}
case DDI_MODEL_NONE:
sizeof (struct bofi_errstate), mode) != 0)
return (EFAULT);
break;
}
#else /* ! _MULTI_DATAMODEL */
sizeof (struct bofi_errstate), mode) != 0)
return (EFAULT);
#endif /* _MULTI_DATAMODEL */
mode) != 0) {
return (EFAULT);
}
return (retval);
}
case BOFI_CHK_STATE_W:
{
struct acc_log_elem *klg;
/*
* get state for this errdef - read in dummy errstate
* with just the errdef_handle filled in. Then wait for
* a ddi_report_fault message to come back
*/
#ifdef _MULTI_DATAMODEL
case DDI_MODEL_ILP32:
{
/*
* For use when a 32 bit app makes a call into a
* 64 bit ioctl
*/
struct bofi_errstate32 errstate_32;
sizeof (struct bofi_errstate32), mode) != 0) {
return (EFAULT);
}
break;
}
case DDI_MODEL_NONE:
sizeof (struct bofi_errstate), mode) != 0)
return (EFAULT);
break;
}
#else /* ! _MULTI_DATAMODEL */
sizeof (struct bofi_errstate), mode) != 0)
return (EFAULT);
#endif /* _MULTI_DATAMODEL */
return (EINVAL);
/*
* copy out real errstate structure
*/
/* insufficient user memory */
/* always pass back a time */
#ifdef _MULTI_DATAMODEL
case DDI_MODEL_ILP32:
{
/*
* For use when a 32 bit app makes a call into a
* 64 bit ioctl
*/
struct bofi_errstate32 errstate_32;
sizeof (struct bofi_errstate32), mode) != 0)
return (EFAULT);
break;
}
case DDI_MODEL_NONE:
sizeof (struct bofi_errstate), mode) != 0)
return (EFAULT);
break;
}
#else /* ! _MULTI_DATAMODEL */
sizeof (struct bofi_errstate), mode) != 0)
return (EFAULT);
#endif /* _MULTI_DATAMODEL */
mode) != 0) {
return (EFAULT);
}
return (retval);
}
case BOFI_GET_HANDLES:
/*
* display existing handles
*/
#ifdef _MULTI_DATAMODEL
case DDI_MODEL_ILP32:
{
/*
* For use when a 32 bit app makes a call into a
* 64 bit ioctl
*/
struct bofi_get_handles32 get_handles_32;
sizeof (get_handles_32), mode) != 0) {
return (EFAULT);
}
NAMESIZE);
break;
}
case DDI_MODEL_NONE:
sizeof (get_handles), mode) != 0)
return (EFAULT);
break;
}
#else /* ! _MULTI_DATAMODEL */
sizeof (get_handles), mode) != 0)
return (EFAULT);
#endif /* _MULTI_DATAMODEL */
/*
* read in name
*/
return (EINVAL);
/*
* display existing handles
*/
for (i = 0; i < HDL_HASH_TBL_SIZE; i++) {
hhashp = &hhash_table[i];
continue;
continue;
continue;
/*
* print information per handle - note that
* DMA* means an unbound DMA handle
*/
"reg set %d off 0x%llx\n",
else
"reg set %d off 0x%llx"
" len 0x%llx\n",
"handle no %d len 0x%llx"
else
}
}
if (err != 0)
return (EFAULT);
else
return (0);
case BOFI_GET_HANDLE_INFO:
/*
* display existing handles
*/
#ifdef _MULTI_DATAMODEL
case DDI_MODEL_ILP32:
{
/*
* For use when a 32 bit app makes a call into a
* 64 bit ioctl
*/
struct bofi_get_hdl_info32 hdl_info_32;
sizeof (hdl_info_32), mode)) {
return (EFAULT);
}
NAMESIZE);
break;
}
case DDI_MODEL_NONE:
return (EFAULT);
break;
}
#else /* ! _MULTI_DATAMODEL */
return (EFAULT);
#endif /* _MULTI_DATAMODEL */
return (EINVAL);
if (req_count > 0) {
KM_SLEEP);
} else {
}
/*
* display existing handles
*/
for (i = 0; i < HDL_HASH_TBL_SIZE; i++) {
hhashp = &hhash_table[i];
continue;
case BOFI_ACC_HDL:
break;
case BOFI_DMA_HDL:
hdlip->access_type = 0;
hdlip->access_type |=
hdlip->access_type |=
hdlip->addr_cookie =
break;
case BOFI_INT_HDL:
break;
default:
hdlip->access_type = 0;
break;
}
hdlip++;
count++;
}
}
err = 0;
#ifdef _MULTI_DATAMODEL
case DDI_MODEL_ILP32:
{
/*
* For use when a 32 bit app makes a call into a
* 64 bit ioctl
*/
struct bofi_get_hdl_info32 hdl_info_32;
NAMESIZE);
sizeof (hdl_info_32), mode) != 0) {
if (req_count > 0)
return (EFAULT);
}
break;
}
case DDI_MODEL_NONE:
if (req_count > 0)
return (EFAULT);
}
break;
}
#else /* ! _MULTI_DATAMODEL */
if (req_count > 0)
return (EFAULT);
}
#endif /* ! _MULTI_DATAMODEL */
if (count > 0) {
if (req_count > 0)
return (EFAULT);
}
}
if (req_count > 0)
return (err);
default:
return (ENOTTY);
}
}
/*
* add a new error definition
*/
static int
struct bofi_errent *softc)
{
struct bofi_errent *ep;
struct bofi_shadow *hp;
/*
* allocate errdef structure and put on in-use list
*/
/*
* allocate space for logging
*/
else
/*
* put on in-use list
*/
errent_listp = ep;
/*
* and add it to the per-clone list
*/
/*
* look for corresponding shadow handle structures and if we find any
* tag this errdef structure on to their link lists.
*/
}
}
}
}
/*
* delete existing errdef
*/
static int
{
struct bofi_shadow *hp;
/*
* don't just assume its a valid ep - check that its on the
* in-use list
*/
break;
}
return (EINVAL);
}
/*
* found it - delete from in-use list
*/
if (prev_hep)
else
/*
* and take it off the per-clone list
*/
/*
* see if we are on any shadow handle link lists - and if we
* are then take us off
*/
if (prev_lp)
else
} else {
}
}
}
if (ep->softintr_id)
return (0);
}
/*
* start all errdefs corresponding to this name and instance
*/
static void
{
struct bofi_errent *ep;
/*
* look for any errdefs with matching name and instance
*/
}
}
/*
* stop all errdefs corresponding to this name and instance
*/
static void
{
struct bofi_errent *ep;
/*
* look for any errdefs with matching name and instance
*/
(void) drv_getparm(TIME,
}
}
/*
* wake up any thread waiting on this errdefs
*/
static uint_t
{
struct bofi_errent *hep;
struct bofi_errent *ep =
break;
}
return (DDI_INTR_UNCLAIMED);
}
}
}
return (DDI_INTR_CLAIMED);
}
/*
* wake up all errdefs corresponding to this name and instance
*/
static void
{
struct bofi_errent *ep;
/*
* look for any errdefs with matching name and instance
*/
/*
* wake up sleepers
*/
}
}
/*
* clear "acc_chk" for all errdefs corresponding to this name and instance
* and wake them up.
*/
static void
{
struct bofi_errent *ep;
/*
* look for any errdefs with matching name and instance
*/
/*
* wake up sleepers
*/
}
}
/*
* set "fail_count" to 0 for all errdefs corresponding to this name and instance
* whose "access_count" has expired, set "acc_chk" to 0 and wake them up.
*/
static void
{
struct bofi_errent *ep;
/*
* look for any errdefs with matching name and instance
*/
(void) drv_getparm(TIME,
} else
/*
* wake up sleepers
*/
}
}
/*
* set "access_count" and "fail_count" to 0 for all errdefs corresponding to
* this name and instance, set "acc_chk" to 0, and wake them up.
*/
static void
{
struct bofi_errent *ep;
/*
* look for any errdefs with matching name and instance
*/
(void) drv_getparm(TIME,
/*
* wake up sleepers
*/
}
}
/*
* get state for this errdef
*/
static int
{
struct bofi_errent *hep;
struct bofi_errent *ep;
/*
* don't just assume its a valid ep - check that its on the
* in-use list
*/
break;
return (EINVAL);
}
return (0);
}
/*
* Wait for a ddi_report_fault message to come back for this errdef
* Then return state for this errdef.
* fault report is intercepted by bofi_post_event, which triggers
* bofi_signal via a softint, which will wake up this routine if
* we are waiting
*/
static int
struct acc_log_elem **logpp)
{
struct bofi_errent *hep;
struct bofi_errent *ep;
int rval = 0;
/*
* don't just assume its a valid ep - check that its on the
* in-use list
*/
break;
return (EINVAL);
}
/*
* wait for ddi_report_fault for the devinfo corresponding
* to this errdef
*/
}
goto retry;
}
/*
* we either didn't need to sleep, we've been woken up or we've been
* signaled - either way return state now
*/
return (rval);
}
/*
* support routine - check if requested driver is defined as under test in the
* conf file.
*/
static int
{
int i;
char *rname;
/*
* Enforce the user to specifically request the following drivers.
*/
if (driver_list_neg == 0) {
return (1);
} else {
return (0);
}
}
if (driver_list_neg == 0)
return (0);
else
return (1);
}
static void
{
repcount = 1;
ddi_trigger_softintr(((struct bofi_errent *)
}
}
}
}
/*
* data if necessary
*
* bofi_mutex always held when this is called.
*/
static void
{
int i;
if (synctype == DDI_DMA_SYNC_FORDEV)
atype = BOFI_DMA_W;
else if (synctype == DDI_DMA_SYNC_FORCPU ||
atype = BOFI_DMA_R;
else
atype = 0;
length, 1, 0);
}
}
/*
* OK do the corruption
*/
/*
* work out how much to corrupt
*
* Make sure endaddr isn't greater than hp->addr + hp->len.
* If endaddr becomes less than addr len becomes negative
* and the following loop isn't entered.
*/
char buf[FM_MAX_CLASS];
}
case BOFI_EQUAL :
for (i = 0; i < len; i++)
break;
case BOFI_AND :
for (i = 0; i < len; i++)
break;
case BOFI_OR :
for (i = 0; i < len; i++)
break;
case BOFI_XOR :
for (i = 0; i < len; i++)
break;
default:
/* do nothing */
break;
}
}
}
/*
* check all errdefs linked to this shadow handle. If we've got a condition
* match check counts and corrupt data if necessary
*
* bofi_mutex always held when this is called.
*
* because of possibility of BOFI_NO_TRANSFER, we couldn't get data
* from io-space before calling this, so we pass in the func to do the
* transfer as a parameter.
*/
static uint64_t
{
struct bofi_errent *ep;
int done_get = 0;
/*
* check through all errdefs associated with this shadow handle
*/
else
/*
* condition match for pio read
*/
if (done_get == 0) {
done_get = 1;
}
}
/*
* OK do corruption
*/
if (done_get == 0) {
/*
* no transfer - bomb out
*/
return (operand);
done_get = 1;
}
}
char buf[FM_MAX_CLASS];
NULL);
}
case BOFI_EQUAL :
break;
case BOFI_AND :
break;
case BOFI_OR :
break;
case BOFI_XOR :
break;
default:
/* do nothing */
break;
}
}
}
}
if (done_get == 0)
else
return (get_val);
}
/*
* check all errdefs linked to this shadow handle. If we've got a condition
* match check counts and corrupt data if necessary
*
* bofi_mutex always held when this is called.
*
* because of possibility of BOFI_NO_TRANSFER, we return 0 if no data
* is to be written out to io-space, 1 otherwise
*/
static int
{
struct bofi_errent *ep;
/*
* check through all errdefs associated with this shadow handle
*/
else
/*
* condition match for pio write
*/
repcount, &v);
repcount, &v);
/*
* OK do corruption
*/
char buf[FM_MAX_CLASS];
NULL);
}
case BOFI_EQUAL :
break;
case BOFI_AND :
break;
case BOFI_OR :
break;
case BOFI_XOR :
break;
case BOFI_NO_TRANSFER :
/*
* no transfer - bomb out
*/
return (0);
default:
/* do nothing */
break;
}
}
}
}
return (1);
}
static uint64_t
{
}
#define BOFI_READ_CHECKS(type) \
if (bofi_ddi_check) \
"ddi_get() out of range addr %p not in %p/%llx", \
return (0); \
}
/*
* our getb() routine - use tryenter
*/
static uint8_t
{
struct bofi_shadow *hp;
1);
return (retval);
}
static uint64_t
{
}
/*
* our getw() routine - use tryenter
*/
static uint16_t
{
struct bofi_shadow *hp;
2);
return (retval);
}
static uint64_t
{
}
/*
* our getl() routine - use tryenter
*/
static uint32_t
{
struct bofi_shadow *hp;
4);
return (retval);
}
static uint64_t
{
}
/*
* our getll() routine - use tryenter
*/
static uint64_t
{
struct bofi_shadow *hp;
8);
return (retval);
}
#define BOFI_WRITE_TESTS(type) \
if (bofi_ddi_check) \
"ddi_put() out of range addr %p not in %p/%llx\n", \
return; \
}
/*
* our putb() routine - use tryenter
*/
static void
{
struct bofi_shadow *hp;
return;
}
}
/*
* our putw() routine - use tryenter
*/
static void
{
struct bofi_shadow *hp;
return;
}
}
/*
* our putl() routine - use tryenter
*/
static void
{
struct bofi_shadow *hp;
return;
}
}
/*
* our putll() routine - use tryenter
*/
static void
{
struct bofi_shadow *hp;
return;
}
}
#define BOFI_REP_READ_TESTS(type) \
if (bofi_ddi_check) \
"ddi_rep_get() out of range addr %p not in %p/%llx\n", \
return; \
}
/*
* our rep_getb() routine - use tryenter
*/
static void
{
struct bofi_shadow *hp;
int i;
return;
}
for (i = 0; i < repcount; i++) {
}
}
/*
* our rep_getw() routine - use tryenter
*/
static void
{
struct bofi_shadow *hp;
int i;
return;
}
for (i = 0; i < repcount; i++) {
}
}
/*
* our rep_getl() routine - use tryenter
*/
static void
{
struct bofi_shadow *hp;
int i;
return;
}
for (i = 0; i < repcount; i++) {
}
}
/*
* our rep_getll() routine - use tryenter
*/
static void
{
struct bofi_shadow *hp;
int i;
return;
}
for (i = 0; i < repcount; i++) {
}
}
#define BOFI_REP_WRITE_TESTS(type) \
if (bofi_ddi_check) \
"ddi_rep_put() out of range addr %p not in %p/%llx\n", \
return; \
}
/*
* our rep_putb() routine - use tryenter
*/
static void
{
struct bofi_shadow *hp;
int i;
return;
}
for (i = 0; i < repcount; i++) {
repcount))
}
}
/*
* our rep_putw() routine - use tryenter
*/
static void
{
struct bofi_shadow *hp;
int i;
return;
}
for (i = 0; i < repcount; i++) {
repcount))
}
}
/*
* our rep_putl() routine - use tryenter
*/
static void
{
struct bofi_shadow *hp;
int i;
return;
}
for (i = 0; i < repcount; i++) {
repcount))
}
}
/*
* our rep_putll() routine - use tryenter
*/
static void
{
struct bofi_shadow *hp;
int i;
return;
}
for (i = 0; i < repcount; i++) {
repcount))
}
}
/*
* our ddi_map routine
*/
static int
{
struct bofi_shadow *hp;
struct bofi_errent *ep;
int retval;
struct bofi_shadow *dhashp;
struct bofi_shadow *hhashp;
case DDI_MO_MAP_LOCKED:
/*
* for this case get nexus to do real work first
*/
vaddrp);
if (retval != DDI_SUCCESS)
return (retval);
return (DDI_SUCCESS);
/*
* if driver_list is set, only intercept those drivers
*/
return (DDI_SUCCESS);
/*
* support for ddi_regs_map_setup()
* - allocate shadow handle structure and fill it in
*/
NAMESIZE);
/*
* return spurious value to catch direct access to registers
*/
if (bofi_ddi_check)
if (len == 0)
else
/*
* save existing function pointers and plug in our own
*/
#if defined(__sparc)
#else
#endif
/*
* stick in a pointer to our shadow handle
*/
/*
* add to dhash, hhash and inuse lists
*/
/*
* chain on any pre-existing errdefs that apply to this
* acc_handle
*/
}
}
}
return (DDI_SUCCESS);
case DDI_MO_UNMAP:
break;
/*
* support for ddi_regs_map_free()
* - check we really have a shadow handle for this one
*/
break;
break;
}
/*
* got a shadow handle - restore original pointers
*/
/*
* remove from dhash, hhash and inuse lists
*/
/*
* free any errdef link structures tagged onto the shadow handle
*/
}
/*
* finally delete shadow handle
*/
break;
default:
break;
}
}
/*
* chain any pre-existing errdefs on to newly created dma handle
* if required call do_dma_corrupt() to corrupt data
*/
static void
{
struct bofi_errent *ep;
/*
* chain on any pre-existing errdefs that apply to this dma_handle
*/
/*
* got a match - link it on
*/
}
}
}
}
}
/*
* need to do copy byte-by-byte in case one of pages is little-endian
*/
static void
{
while (len--)
*t++ = *f++;
}
/*
* our ddi_dma_map routine
*/
static int
{
int maxrnumber = 0;
int retval = DDI_DMA_NORESOURCES;
auto struct ddi_dma_req dmareq;
int sleep;
struct bofi_shadow *dhashp;
struct bofi_shadow *hhashp;
/*
* if driver_list is set, only intercept those drivers
*/
/*
* allocate shadow handle structure and fill it in
*/
goto error;
} else {
}
/*
* get a kernel virtual mapping
*/
goto error;
if (bofi_sync_check) {
/*
* Take a copy and pass pointers to this up to nexus instead.
* Data will be copied from the original on explicit
* and implicit ddi_dma_sync()
*
* - maintain page alignment because some devices assume it.
*/
&hp->umem_cookie);
goto error;
}
/*
* call nexus to do the real work
*/
if (retval != DDI_SUCCESS)
goto error2;
/*
* now set dma_handle to point to real handle
*/
/*
* unset DMP_NOSYNC
*/
/*
* bind and unbind are cached in devinfo - must overwrite them
* - note that our bind and unbind are quite happy dealing with
* any handles for this devinfo that were previously allocated
*/
if (save_bus_ops.bus_dma_unbindhdl ==
/*
* get an "rnumber" for this handle - really just seeking to
* get a unique number - generally only care for early allocated
* handles - so we get as far as INT_MAX, just stay there
*/
else
}
/*
* add to dhash, hhash and inuse lists
*/
/*
* chain on any pre-existing errdefs that apply to this
* acc_handle and corrupt if required (as there is an implicit
* ddi_dma_sync() in this call)
*/
return (retval);
/*
* what to do here? Wait a bit and try again
*/
}
if (hp) {
}
return (retval);
}
/*
* our ddi_dma_allochdl routine
*/
static int
{
int retval = DDI_DMA_NORESOURCES;
int maxrnumber = 0;
struct bofi_shadow *dhashp;
struct bofi_shadow *hhashp;
/*
* if driver_list is set, only intercept those drivers
*/
if (!driver_under_test(rdip))
/*
* allocate shadow handle structure and fill it in
*/
/*
* what to do here? Wait a bit and try again
*/
if (waitfp != DDI_DMA_DONTWAIT)
return (retval);
}
/*
* call nexus to do the real work
*/
handlep);
if (retval != DDI_SUCCESS) {
return (retval);
}
/*
* now point set dma_handle to point to real handle
*/
/*
* bind and unbind are cached in devinfo - must overwrite them
* - note that our bind and unbind are quite happy dealing with
* any handles for this devinfo that were previously allocated
*/
if (save_bus_ops.bus_dma_unbindhdl ==
/*
* get an "rnumber" for this handle - really just seeking to
* get a unique number - generally only care for early allocated
* handles - so we get as far as INT_MAX, just stay there
*/
else
}
/*
* add to dhash, hhash and inuse lists
*/
return (retval);
}
/*
* our ddi_dma_freehdl routine
*/
static int
{
int retval;
struct bofi_shadow *hp;
struct bofi_shadow *hhashp;
/*
* find shadow for this handle
*/
break;
/*
* call nexus to do the real work
*/
if (retval != DDI_SUCCESS) {
return (retval);
}
/*
* did we really have a shadow for this handle
*/
return (retval);
/*
* yes we have - see if it's still bound
*/
panic("driver freeing bound dma_handle");
/*
* remove from dhash, hhash and inuse lists
*/
return (retval);
}
/*
* our ddi_dma_bindhdl routine
*/
static int
{
int retval = DDI_DMA_NORESOURCES;
auto struct ddi_dma_req dmareq;
struct bofi_shadow *hp;
struct bofi_shadow *hhashp;
/*
* check we really have a shadow for this handle
*/
break;
/*
* no we don't - just call nexus to do the real work
*/
}
/*
* yes we have - see if it's already bound
*/
return (DDI_DMA_INUSE);
} else {
}
/*
* get a kernel virtual mapping
*/
goto error;
if (bofi_sync_check) {
/*
* Take a copy and pass pointers to this up to nexus instead.
* Data will be copied from the original on explicit
* and implicit ddi_dma_sync()
*
* - maintain page alignment because some devices assume it.
*/
&hp->umem_cookie);
goto error;
}
/*
* call nexus to do the real work
*/
if (retval != DDI_SUCCESS)
goto error2;
/*
* unset DMP_NOSYNC
*/
/*
* chain on any pre-existing errdefs that apply to this
* acc_handle and corrupt if required (as there is an implicit
* ddi_dma_sync() in this call)
*/
return (retval);
/*
* what to do here? Wait a bit and try again
*/
}
if (hp) {
}
return (retval);
}
/*
* our ddi_dma_unbindhdl routine
*/
static int
{
struct bofi_errent *ep;
int retval;
struct bofi_shadow *hp;
struct bofi_shadow *hhashp;
/*
* call nexus to do the real work
*/
if (retval != DDI_SUCCESS)
return (retval);
/*
* check we really have a shadow for this handle
*/
break;
return (retval);
}
/*
* yes we have - see if it's already unbound
*/
panic("driver unbinding unbound dma_handle");
/*
* free any errdef link structures tagged on to this
* shadow handle
*/
/*
* there is an implicit sync_for_cpu on free -
* may need to corrupt
*/
}
}
/*
* implicit sync_for_cpu - copy data back
*/
return (retval);
}
/*
* our ddi_dma_sync routine
*/
static int
{
struct bofi_errent *ep;
struct bofi_shadow *hp;
struct bofi_shadow *hhashp;
int retval;
/*
* in this case get nexus driver to do sync first
*/
if (retval != DDI_SUCCESS)
return (retval);
}
/*
* check we really have a shadow for this handle
*/
break;
/*
* yes - do we need to copy data from original
*/
/*
* yes - check if we need to corrupt the data
*/
(flags == DDI_DMA_SYNC_FORCPU ||
flags == DDI_DMA_SYNC_FORKERNEL)) ||
(flags == DDI_DMA_SYNC_FORDEV))) &&
}
}
/*
* do we need to copy data to original
*/
}
if (flags == DDI_DMA_SYNC_FORDEV)
/*
* in this case get nexus driver to do sync last
*/
return (retval);
}
/*
* our dma_win routine
*/
static int
{
struct bofi_shadow *hp;
struct bofi_shadow *hhashp;
int retval;
/*
* call nexus to do the real work
*/
if (retval != DDI_SUCCESS)
return (retval);
/*
* check we really have a shadow for this handle
*/
break;
/*
* yes - make sure DMP_NOSYNC is unset
*/
}
return (retval);
}
/*
* our dma_ctl routine
*/
static int
{
struct bofi_errent *ep;
struct bofi_shadow *hp;
struct bofi_shadow *hhashp;
int retval;
int i;
struct bofi_shadow *dummyhp;
/*
* get nexus to do real work
*/
if (retval != DDI_SUCCESS)
return (retval);
/*
* if driver_list is set, only intercept those drivers
*/
if (!driver_under_test(rdip))
return (DDI_SUCCESS);
#if defined(__sparc)
/*
* check if this is a dvma_reserve - that one's like a
* dma_allochdl and needs to be handled separately
*/
if (request == DDI_DMA_RESERVE) {
return (DDI_SUCCESS);
}
#endif
/*
* check we really have a shadow for this handle
*/
break;
return (retval);
}
/*
* yes we have - see what kind of command this is
*/
switch (request) {
case DDI_DMA_RELEASE:
/*
* dvma release - release dummy handle and all the index handles
*/
/*
* chek none of the index handles were still loaded
*/
panic("driver releasing loaded dvma");
/*
* remove from dhash and inuse lists
*/
}
sizeof (struct bofi_shadow *));
return (retval);
case DDI_DMA_FREE:
/*
* ddi_dma_free case - remove from dhash, hhash and inuse lists
*/
/*
* free any errdef link structures tagged on to this
* shadow handle
*/
/*
* there is an implicit sync_for_cpu on free -
* may need to corrupt
*/
}
}
return (retval);
case DDI_DMA_MOVWIN:
break;
case DDI_DMA_NEXTWIN:
break;
default:
break;
}
return (retval);
}
#if defined(__sparc)
/*
* dvma reserve case from bofi_dma_ctl()
*/
static void
{
struct bofi_shadow *hp;
struct bofi_shadow *dummyhp;
struct bofi_shadow *dhashp;
struct bofi_shadow *hhashp;
struct fast_dvma *nexus_private;
int i, count;
/*
* allocate dummy shadow handle structure
*/
/*
* overlay our routines over the nexus's dvma routines
*/
}
/*
* now fill in the dummy handle. This just gets put on hhash queue
* so our dvma routines can find and index off to the handle they
* really want.
*/
/*
* allocate space for real handles
*/
sizeof (struct bofi_shadow *), KM_SLEEP);
for (i = 0; i < count; i++) {
/*
* allocate shadow handle structures and fill them in
*/
if (bofi_sync_check) {
/*
* Take a copy and set this to be hp->addr
* Data will be copied to and from the original on
* explicit and implicit ddi_dma_sync()
*
* - maintain page alignment because some devices
* assume it.
*/
+ pagemask + 1,
}
/*
* add to dhash and inuse lists.
* these don't go on hhash queue.
*/
}
/*
* add dummy handle to hhash list only
*/
}
/*
* our dvma_kaddr_load()
*/
static void
{
struct bofi_shadow *dummyhp;
struct bofi_shadow *hp;
struct bofi_shadow *hhashp;
struct bofi_errent *ep;
/*
* check we really have a dummy shadow for this handle
*/
break;
/*
* no dummy shadow - panic
*/
panic("driver dvma_kaddr_load with no reserve");
}
/*
* find real hp
*/
/*
* check its not already loaded
*/
panic("driver loading loaded dvma");
/*
* if were doing copying, just need to change origaddr and get
* nexus to map hp->addr again
* if not, set hp->addr to new address.
* - note these are always kernel virtual addresses - no need to map
*/
} else
/*
* get nexus to do the real work
*/
/*
* chain on any pre-existing errdefs that apply to this dma_handle
* no need to corrupt - there's no implicit dma_sync on this one
*/
}
}
}
}
/*
* our dvma_unload()
*/
static void
{
struct bofi_errent *ep;
struct bofi_shadow *dummyhp;
struct bofi_shadow *hp;
struct bofi_shadow *hhashp;
/*
* check we really have a dummy shadow for this handle
*/
break;
/*
* no dummy shadow - panic
*/
panic("driver dvma_unload with no reserve");
}
/*
* find real hp
*/
/*
* check its not already unloaded
*/
panic("driver unloading unloaded dvma");
/*
* free any errdef link structures tagged on to this
* shadow handle - do corruption if necessary
*/
(view == DDI_DMA_SYNC_FORCPU ||
view == DDI_DMA_SYNC_FORKERNEL) &&
}
}
/*
* if there is an explicit sync_for_cpu, then do copy to original
*/
if (bofi_sync_check &&
}
/*
* our dvma_unload()
*/
static void
{
struct bofi_errent *ep;
struct bofi_shadow *hp;
struct bofi_shadow *dummyhp;
struct bofi_shadow *hhashp;
/*
* check we really have a dummy shadow for this handle
*/
break;
/*
* no dummy shadow - panic
*/
panic("driver dvma_sync with no reserve");
}
/*
* find real hp
*/
/*
* check its already loaded
*/
panic("driver syncing unloaded dvma");
/*
* in this case do sync first
*/
/*
* if there is an explicit sync_for_dev, then do copy from original
*/
}
/*
* do corruption if necessary
*/
(view == DDI_DMA_SYNC_FORCPU ||
view == DDI_DMA_SYNC_FORKERNEL)) ||
(view == DDI_DMA_SYNC_FORDEV))) &&
}
}
/*
* if there is an explicit sync_for_cpu, then do copy to original
*/
if (bofi_sync_check &&
}
if (view == DDI_DMA_SYNC_FORDEV)
/*
* in this case do sync last
*/
}
#endif
/*
* bofi intercept routine - gets called instead of users interrupt routine
*/
static uint_t
{
struct bofi_errent *ep;
struct bofi_shadow *hp;
int intr_count = 1;
int i;
int unclaimed_counter = 0;
int jabber_detected = 0;
/*
* check if nothing to do
*/
/*
* look for any errdefs
*/
/*
* got one
*/
/*
* OK do "corruption"
*/
case BOFI_DELAY_INTR:
}
break;
case BOFI_LOSE_INTR:
intr_count = 0;
break;
case BOFI_EXTRA_INTR:
break;
default:
break;
}
}
}
}
/*
* send extra or fewer interrupts as requested
*/
for (i = 0; i < intr_count; i++) {
if (result == DDI_INTR_CLAIMED)
unclaimed_counter >>= 1;
else if (++unclaimed_counter >= 20)
jabber_detected = 1;
if (i == 0)
}
/*
* if more than 1000 spurious interrupts requested and
* jabber not detected - give warning
*/
panic("undetected interrupt jabber: %s%d",
/*
* return first response - or "unclaimed" if none
*/
return (retval);
}
/*
* our ddi_check_acc_hdl
*/
/* ARGSUSED */
static int
{
struct bofi_shadow *hp;
return (0);
}
/*
* OR in error state from all associated
* errdef structures
*/
}
}
return (result);
}
/*
* our ddi_check_dma_hdl
*/
/* ARGSUSED */
static int
{
struct bofi_shadow *hp;
struct bofi_shadow *hhashp;
if (!mutex_tryenter(&bofi_mutex)) {
return (0);
}
break;
return (0);
}
return (0);
}
/*
* OR in error state from all associated
* errdef structures
*/
}
}
return (result);
}
/* ARGSUSED */
static int
{
struct ddi_fault_event_data *arg;
struct bofi_errent *ep;
struct bofi_shadow *hp;
struct bofi_shadow *dhashp;
return (DDI_FAILURE);
impl_data));
/*
* find shadow handles with appropriate dev_infos
* and set error reported on all associated errdef structures
*/
}
}
}
}
}
/*ARGSUSED*/
static int
{
char *class = "";
char *path = "";
char *ptr;
struct bofi_errent *ep;
struct bofi_shadow *hp;
char service_class[FM_MAX_CLASS];
char hppath[MAXPATHLEN];
int service_ereport = 0;
service_ereport = 1;
/*
* find shadow handles with appropriate dev_infos
* and set error reported on all associated errdef structures
*/
continue;
continue;
continue;
if (service_ereport) {
DDI_FM_SERVICE_DEGRADED) == 0)
DDI_FM_SERVICE_RESTORED) == 0)
else
}
}
}
}
return (0);
}
/*
* our intr_ops routine
*/
static int
{
int retval;
struct bofi_shadow *hp;
struct bofi_shadow *dhashp;
struct bofi_shadow *hhashp;
struct bofi_errent *ep;
switch (intr_op) {
case DDI_INTROP_ADDISR:
/*
* if driver_list is set, only intercept those drivers
*/
if (!driver_under_test(rdip))
/*
* allocate shadow handle structure and fill in
*/
/*
* save whether hilevel or not
*/
else
/*
* call nexus to do real work, but specifying our handler, and
* our shadow handle as argument
*/
if (retval != DDI_SUCCESS) {
return (retval);
}
/*
* add to dhash, hhash and inuse lists
*/
/*
* chain on any pre-existing errdefs that apply to this
* acc_handle
*/
}
}
}
return (retval);
case DDI_INTROP_REMISR:
/*
* call nexus routine first
*/
/*
* find shadow handle
*/
break;
}
}
return (retval);
}
/*
* found one - remove from dhash, hhash and inuse lists
*/
/*
* free any errdef link structures
* tagged on to this shadow handle
*/
}
return (retval);
default:
}
}