/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
*/
/*
* Copyright 2014 Nexenta Systems, Inc. All rights reserved.
*/
/*
* Multiplexed I/O SCSI vHCI implementation
*/
#include <sys/mdi_impldefs.h>
#include <sys/byteorder.h>
extern uintptr_t scsi_callback_id;
extern ddi_dma_attr_t scsi_alloc_attr;
#ifdef DEBUG
#endif
/* retry for the vhci_do_prout command when a not ready is returned */
/*
* These values are defined to support the internal retry of
* SCSI packets for better sense code handling.
*/
#define VHCI_CMD_CMPLT 0
/* changed to 132 to accomodate HDS */
/*
* Version Macros
*/
int vhci_first_time = 0;
/*
* Flag to delay the retry of the reserve command
*/
/* uscsi delay for a TRAN_BUSY */
/* uscsi_restart_sense timeout id in case it needs to get canceled */
static int vhci_bus_config_debug = 0;
/*
* Bidirectional map of 'target-port' to port id <pid> for support of
* iostat(1M) '-Xx' and '-Yx' output.
*/
/*
* functions exported by scsi_vhci struct cb_ops
*/
/*
* functions exported by scsi_vhci struct dev_ops
*/
/*
* functions exported by scsi_vhci scsi_hba_tran_t transport table
*/
scsi_hba_tran_t *, struct scsi_device *);
struct scsi_device *);
static int vhci_scsi_reset(struct scsi_address *, int);
static int vhci_scsi_reset_bus(struct scsi_address *);
static int vhci_scsi_getcap(struct scsi_address *, char *, int);
static int vhci_scsi_setcap(struct scsi_address *, char *, int, int);
static int vhci_commoncap(struct scsi_address *, char *, int, int, int);
caddr_t);
static int vhci_scsi_get_bus_addr(struct scsi_device *, char *, int);
static int vhci_scsi_get_name(struct scsi_device *, char *, int);
void *, void *);
void *, dev_info_t **);
void *);
void **, char **);
/*
* functions registered with the mpxio framework via mdi_vhci_ops_t
*/
mdi_pathinfo_state_t, uint32_t, int);
static void vhci_client_attached(dev_info_t *);
static int vhci_get_iocswitchdata(const void *, sv_switch_to_cntlr_iocdata_t *,
int, caddr_t);
sv_iocdata_t *, int, caddr_t);
static int vhci_handle_ext_fo(struct scsi_pkt *, int);
static int vhci_quiesce_lun(struct scsi_vhci_lun *);
static int vhci_pgr_validate_and_register(scsi_vhci_priv_t *);
static void vhci_dispatch_scsi_start(void *);
static void vhci_efo_done(void *);
static void vhci_initiate_auto_failback(void *);
static void vhci_kstat_create_pathinfo(mdi_pathinfo_t *);
scsi_vhci_lun_t *, char *, char *);
static char *vhci_devnm_to_guid(char *);
static int vhci_do_prout(scsi_vhci_priv_t *);
static void vhci_run_cmd(void *);
static int vhci_do_prin(struct vhci_pkt **);
void vhci_update_pathstates(void *);
#ifdef DEBUG
static void vhci_print_prin_keys(vhci_prin_readkeys_t *, int);
#endif
static void vhci_print_prout_keys(scsi_vhci_lun_t *, char *);
/*
* MP-API related functions
*/
extern int vhci_mpapi_init(struct scsi_vhci *);
extern void vhci_mpapi_add_dev_prod(struct scsi_vhci *, char *);
extern void vhci_update_mpapi_data(struct scsi_vhci *,
scsi_vhci_lun_t *, mdi_pathinfo_t *);
uint8_t, void*);
extern int vhci_mpapi_update_tpg_acc_state_for_lu(struct scsi_vhci *,
scsi_vhci_lun_t *);
/*
* Macros for the device-type mpxio options
*/
vhci_open, /* open */
vhci_close, /* close */
nodev, /* strategy */
nodev, /* print */
nodev, /* dump */
nodev, /* read */
nodev, /* write */
vhci_ioctl, /* ioctl */
nodev, /* devmap */
nodev, /* mmap */
nodev, /* segmap */
nochpoll, /* chpoll */
ddi_prop_op, /* cb_prop_op */
0, /* streamtab */
CB_REV, /* rev */
nodev, /* aread */
nodev /* awrite */
};
0,
nulldev, /* identify */
nulldev, /* probe */
vhci_attach, /* attach and detach are mandatory */
nodev, /* reset */
&vhci_cb_ops, /* cb_ops */
NULL, /* bus_ops */
NULL, /* power */
ddi_quiesce_not_needed, /* quiesce */
};
extern struct mod_ops mod_driverops;
vhci_version_name, /* module name */
};
&modldrv,
};
vhci_pathinfo_init, /* Pathinfo node init callback */
vhci_pathinfo_uninit, /* Pathinfo uninit callback */
vhci_pathinfo_state_change, /* Pathinfo node state change */
vhci_failover, /* failover callback */
vhci_client_attached, /* client attached callback */
vhci_is_dev_supported /* is device supported by mdi */
};
/*
* The scsi_failover table defines an ordered set of 'fops' modules supported
* by scsi_vhci. Currently, initialize this table from the 'ddi-forceload'
* property specified in scsi_vhci.conf.
*/
static struct scsi_failover {
int
_init(void)
{
int rval;
/*
* Allocate soft state and prepare to do ddi_soft_state_zalloc()
* before registering with the transport first.
*/
sizeof (struct scsi_vhci), 1)) != 0) {
"!_init:soft state init failed\n"));
return (rval);
}
"!_init: scsi hba init failed\n"));
return (rval);
}
if (vhci_targetmap_bypid)
}
return (rval);
}
/*
* the system is done with us as a driver, so clean up
*/
int
_fini(void)
{
int rval;
/*
* don't start cleaning up until we know that the module remove
* has worked -- if this works, then we know that each instance
* has successfully been DDI_DETACHed
*/
return (rval);
}
if (vhci_targetmap_bypid)
return (rval);
}
int
{
}
/*
* Lookup scsi_failover by "short name" of failover module.
*/
struct scsi_failover_ops *
{
continue;
}
return (NULL);
}
/*
* Load all scsi_failover_ops 'fops' modules.
*/
static void
{
char **module;
int i;
char **dt;
int e;
if (scsi_failover_table)
return;
/* Get the list of modules from scsi_vhci.conf */
"scsi_vhci.conf is missing 'ddi-forceload'");
return;
}
if (scsi_nfailover == 0) {
"scsi_vhci.conf has empty 'ddi-forceload'");
return;
}
/* allocate failover table based on number of modules */
scsi_failover_table = (struct scsi_failover *)
KM_SLEEP);
/* loop over modules specified in scsi_vhci.conf and open each module */
continue;
/*
* A module returns EEXIST if other software is
* supporting the intended function: for example
* the scsi_vhci_f_sum_emc module returns EEXIST
* from _init if EMC powerpath software is installed.
*/
if (e != EEXIST)
"module '%s', error %d", module[i], e);
continue;
}
"scsi_vhci_failover_ops", &e);
"unable to import 'scsi_failover_ops' from '%s', "
"error %d", module[i], e);
continue;
}
sf++;
}
/* verify that at least the "well-known" modules were there */
SFO_NAME_SYM "\" not defined in scsi_vhci.conf's "
"'ddi-forceload'");
SFO_NAME_TPGS "\" not defined in scsi_vhci.conf's "
"'ddi-forceload'");
/* call sfo_init for modules that need it */
}
}
/*
* unload all loaded scsi_failover_ops modules
*/
static void
{
continue;
}
if (scsi_failover_table && scsi_nfailover)
scsi_nfailover = 0;
}
/* ARGSUSED */
static int
{
return (EINVAL);
}
return (ENXIO);
}
return (EBUSY);
}
return (0);
}
/* ARGSUSED */
static int
{
return (EINVAL);
}
return (ENXIO);
}
return (0);
}
/* ARGSUSED */
static int
{
} else {
}
}
/*
* attach the module
*/
static int
{
int scsi_hba_attached = 0;
int vhci_attached = 0;
int mutex_initted = 0;
int instance;
char *data;
switch (cmd) {
case DDI_ATTACH:
break;
case DDI_RESUME:
case DDI_PM_RESUME:
"implemented\n"));
return (rval);
default:
"!vhci_attach: unknown ddi command\n"));
return (rval);
}
/*
* Allocate vhci data structure.
*/
"soft state alloc failed\n"));
return (DDI_FAILURE);
}
"bad soft state\n"));
return (DDI_FAILURE);
}
/* Allocate packet cache */
"vhci%d_cache", instance);
/*
* Allocate a transport structure
*/
/*
* register event notification routines with scsa
*/
/*
* Attach this instance with the mpxio framework
*/
!= MDI_SUCCESS) {
"mdi_vhci_register failed\n"));
goto attach_fail;
}
/*
* Attach this instance of the hba.
*
* Regarding dma attributes: Since scsi_vhci is a virtual scsi HBA
* driver, it has nothing to do with DMA. However, when calling
* scsi_hba_attach_setup() we need to pass something valid in the
* dma attributes parameter. So we just use scsi_alloc_attr.
* SCSA itself seems to care only for dma_attr_minxfer and
* dma_attr_burstsizes fields of dma attributes structure.
* It expects those fileds to be non-zero.
*/
SCSI_HBA_ADDR_COMPLEX) != DDI_SUCCESS) {
"hba attach failed\n"));
goto attach_fail;
}
" ddi_create_minor_node failed\n"));
goto attach_fail;
}
/*
* Set pm-want-child-notification property for
* power management of the phci and client
*/
"%s%d fail to create pm-want-child-notification? prop",
goto attach_fail;
}
/*
* Set appropriate configuration flags based on options set in
* conf file.
*/
vhci->vhci_conf_flags = 0;
}
"disabled through scsi_vhci.conf file.");
/*
* Allocate an mpapi private structure
*/
if (vhci_mpapi_init(vhci) != 0) {
"vhci_mpapi_init() failed"));
}
return (DDI_SUCCESS);
if (vhci_attached)
(void) mdi_vhci_unregister(dip, 0);
if (scsi_hba_attached)
(void) scsi_hba_detach(dip);
if (mutex_initted) {
}
return (DDI_FAILURE);
}
/*ARGSUSED*/
static int
{
return (DDI_FAILURE);
if (!vhci) {
return (DDI_FAILURE);
}
switch (cmd) {
case DDI_DETACH:
break;
case DDI_SUSPEND:
case DDI_PM_SUSPEND:
"implemented\n"));
return (DDI_FAILURE);
default:
"!vhci_detach: unknown ddi command\n"));
return (DDI_FAILURE);
}
(void) mdi_vhci_unregister(dip, 0);
(void) scsi_hba_detach(dip);
"pm-want-child-notification?") != DDI_PROP_SUCCESS) {
"%s%d unable to remove prop pm-want_child_notification?",
}
if (vhci_restart_timeid != 0) {
(void) untimeout(vhci_restart_timeid);
}
vhci_restart_timeid = 0;
vhci_failover_modclose(); /* unload failover modules */
return (DDI_SUCCESS);
}
/*
* vhci_getinfo()
* Given the device number, return the devinfo pointer or the
* instance number.
* Note: always succeed DDI_INFO_DEVT2INSTANCE, even before attach.
*/
/*ARGSUSED*/
static int
{
switch (cmd) {
case DDI_INFO_DEVT2DEVINFO:
else {
return (DDI_FAILURE);
}
break;
case DDI_INFO_DEVT2INSTANCE:
break;
default:
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
/*ARGSUSED*/
static int
{
char *guid;
int rval;
/*
* This must be the .conf node without GUID property.
* The node under fp already inserts a delay, so we
* just return from here. We rely on this delay to have
* all dips be posted to the ndi hotplug thread's newdev
* list. This is necessary for the deferred attach
* mechanism to work and opens() done soon after boot to
* succeed.
*/
"property failed"));
return (DDI_NOT_WELL_FORMED);
}
if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
/*
* This must be .conf node with the GUID property. We don't
* merge property by ndi_merge_node() here because the
* devi_addr_buf of .conf node is "" always according the
* implementation of vhci_scsi_get_name_bus_addr().
*/
return (DDI_FAILURE);
}
"!tgt_init: called for %s (instance %d)\n",
from_ticks = ddi_get_lbolt();
if (vhci_to_ticks == 0) {
}
#if DEBUG
if (vlun) {
"vhci_scsi_tgt_init: guid %s : found vlun 0x%p "
"from_ticks %lx to_ticks %lx",
} else {
"vhci_scsi_tgt_init: guid %s : vlun not found "
}
#endif
if (rval == MDI_SUCCESS) {
}
/*
* Wait for the following conditions :
* 1. no vlun available yet
* 2. no path established
* 3. timer did not expire
*/
(rval != MDI_SUCCESS)) {
"vlun 0x%p lun guid %s not supported!",
return (DDI_NOT_WELL_FORMED);
}
vhci_first_time = 1;
}
if (vhci_first_time == 1) {
"no wait for %s. from_tick %lx, to_tick %lx",
return (DDI_NOT_WELL_FORMED);
}
if (cv_timedwait(&vhci_cv,
/* Timed out */
#ifdef DEBUG
"tgt_init: no vlun for %s!", guid));
} else if (mdi_client_get_path_count(tgt_dip) == 0) {
"tgt_init: client path count is "
"zero for %s!", guid));
} else {
"tgt_init: client path not "
"available yet for %s!", guid));
}
#endif /* DEBUG */
return (DDI_NOT_WELL_FORMED);
}
if (rval == MDI_SUCCESS) {
}
from_ticks = ddi_get_lbolt();
}
return (DDI_SUCCESS);
}
/*ARGSUSED*/
static void
{
}
/*
* a PGR register command has started; copy the info we need
*/
int
{
void *addr;
if (!vpkt->vpkt_tgt_init_bp)
return (TRAN_BADPKT);
return (TRAN_BUSY);
(2 * MHIOC_RESV_KEY_SIZE*sizeof (char)));
return (0);
}
/*
* Function name : vhci_scsi_start()
*
* Return Values : TRAN_FATAL_ERROR - vhci has been shutdown
* or other fatal failure
* preventing packet transportation
* TRAN_BUSY - request queue is full
* TRAN_ACCEPT - pkt has been submitted to phci
* (or is held in the waitQ)
* Description : Implements SCSA's tran_start() entry point for
* packet transport
*
*/
static int
{
int flags = 0;
int restore_lbp = 0;
/* set if pkt is SCSI-II RESERVE cmd */
int pkt_reserve_cmd = 0;
int reserve_failed = 0;
int resrv_instance = 0;
/*
* Block IOs if LUN is held or QUIESCED for IOs.
*/
if ((VHCI_LUN_IS_HELD(vlun)) ||
return (TRAN_BUSY);
}
/*
* vhci_lun needs to be quiesced before SCSI-II RESERVE command
* can be issued. This may require a cv_timedwait, which is
* dangerous to perform in an interrupt context. So if this
* is a RESERVE command a taskq is dispatched to service it.
* This taskq shall again call vhci_scsi_start, but we shall be
* sure its not in an interrupt context.
*/
vhci_dispatch_scsi_start, (void *) vpkt,
KM_NOSLEEP)) {
return (TRAN_ACCEPT);
} else {
return (TRAN_BUSY);
}
}
/*
* Here we ensure that simultaneous SCSI-II RESERVE cmds don't
* get serviced for a lun.
*/
if (!held) {
return (TRAN_BUSY);
return (TRAN_BUSY);
}
/*
* To ensure that no IOs occur for this LUN for the duration
* of this pkt set the VLUN_QUIESCED_FLG.
* In case this routine needs to exit on error make sure that
* this flag is cleared.
*/
pkt_reserve_cmd = 1;
/*
* if this is a SCSI-II RESERVE command, set load balancing
* policy to be ALTERNATE PATH to ensure that all subsequent
* IOs are routed on the same path. This is because if commands
* are routed across multiple paths then IOs on paths other than
* the one on which the RESERVE was executed will get a
* RESERVATION CONFLICT
*/
if (lbp != LOAD_BALANCE_NONE) {
return (TRAN_FATAL_ERROR);
}
MDI_SUCCESS) {
return (TRAN_FATAL_ERROR);
}
restore_lbp = 1;
}
"!vhci_scsi_start: sending SCSI-2 RESERVE, vlun 0x%p, "
"svl_resrv_pip 0x%p, svl_flags: %x, lb_policy %x",
/*
* See comments for VLUN_RESERVE_ACTIVE_FLG in scsi_vhci.h
* To narrow this window where a reserve command may be sent
* down an inactive path the path states first need to be
* updated. Before calling vhci_update_pathstates reset
* VLUN_RESERVE_ACTIVE_FLG, just in case it was already set
* for this lun. This shall prevent an unnecessary reset
* from being sent out. Also remember currently reserved path
* just for a case the new reservation will go to another path.
*/
}
vhci_update_pathstates((void *)vlun);
}
/*
* If the command is PRIN with action of zero, then the cmd
* is reading PR keys which requires filtering on completion.
* Data cache sync must be guaranteed.
*/
}
/*
* Do not defer bind for PKT_DMA_PARTIAL
*/
/* This is a non pkt_dma_partial case */
if ((rval = vhci_bind_transport(
!= TRAN_ACCEPT) {
"!vhci%d %x: failed to bind transport: "
"vlun 0x%p pkt_reserved %x restore_lbp %x,"
if (restore_lbp)
if (pkt_reserve_cmd)
return (rval);
}
"vhci_scsi_start: v_b_t called 0x%p\n", (void *)vpkt));
}
/*
* This is the chance to adjust the pHCI's pkt and other information
* from target driver's pkt.
*/
(void *)vpkt));
"!vhci_bind: reserve flag set for vlun 0x%p, but, "
"pktpath 0x%p resrv path 0x%p differ. lb_policy %x",
(void *)vlun->svl_resrv_pip,
reserve_failed = 1;
}
}
if (pkt_reserve_cmd) {
"!vhci_bind returned null svp vlun 0x%p",
(void *)vlun));
if (restore_lbp)
}
}
}
}
return (TRAN_BUSY);
}
if ((resrv_instance != 0) && (resrv_instance !=
/*
* This is an attempt to reserve vpkt->vpkt_path. But the
* previously reserved path referred by resrv_instance might
* still be reserved. Hence we will send a release command
* there in order to avoid a reservation conflict.
*/
"conflicting reservation on another path, vlun 0x%p, "
"reserved instance %d, new instance: %d, pip: 0x%p",
(void *)vlun, resrv_instance,
/*
* In rare cases, the path referred by resrv_instance could
* disappear in the meantime. Calling mdi_select_path() below
* is an attempt to find out if the path still exists. It also
* ensures that the path will be held when the release is sent.
*/
svp_resrv = (scsi_vhci_priv_t *)
sizeof (struct scsi_arq_status), 0, 0, SLEEP_FUNC,
NULL);
char *p_path;
/*
* This is very unlikely.
* scsi_init_pkt(SLEEP_FUNC) does not fail
* because of resources. But in theory it could
* fail for some other reason. There is not an
* easy way how to recover though. Log a warning
* and return.
*/
"RELEASE(6) to %s failed, a potential "
"reservation conflict ahead.",
if (restore_lbp)
/* no need to check pkt_reserve_cmd here */
return (TRAN_FATAL_ERROR);
}
/*
* Ignore the return value. If it will fail
* then most likely it is no longer reserved
* anyway.
*/
(void) vhci_do_scsi_cmd(rel_pkt);
"!vhci_scsi_start: path 0x%p, issued SCSI-2"
" RELEASE\n", (void *)pip));
}
}
/*
* Ensure that no other IOs raced ahead, while a RESERVE cmd was
* QUIESCING the same lun.
*/
if ((!pkt_reserve_cmd) &&
goto pkt_cleanup;
}
/*
* currently this thread only handles running PGR
* commands, so don't bother creating it unless
* something interesting is going to happen (like
* either a PGR out, or a PGR in with enough space
* to hold the keys that are getting returned)
*/
}
/* an error */
return (rval);
}
}
}
/*
* SCSI-II RESERVE cmd is not expected in polled mode.
* If this changes it needs to be handled for the polled scenario.
*/
/*
* Set the path_instance *before* sending the scsi_pkt down the path
* to mpxio's pHCI so that additional path abstractions at a pHCI
* level (like maybe iSCSI at some point in the future) can update
* the path_instance.
*/
if (rval == TRAN_ACCEPT) {
if (flags & FLAG_NOINTR) {
}
}
}
/*
* This path will not automatically retry pkts
* internally, therefore, vpkt_org_vpkt should
* never be set.
*/
}
return (rval);
/* the command exited with bad status */
/* the command exited with bad status */
} else if (pkt_reserve_cmd) {
"!vhci_scsi_start: reserve failed vlun 0x%p",
(void *)vlun));
if (restore_lbp)
}
/* Do not destroy phci packet information for PKT_DMA_PARTIAL */
}
}
return (TRAN_BUSY);
}
/*
* Function name : vhci_scsi_reset()
*
* Return Values : 0 - reset failed
* 1 - reset succeeded
*/
/* ARGSUSED */
static int
{
int rval = 0;
return (vhci_scsi_reset_bus(ap));
}
return (rval);
}
/*
* vhci_recovery_reset:
* Issues reset to the device
* Input:
* vlun - vhci lun pointer of the device
* ap - address of the device
* select_path:
* If select_path is FALSE, then the address specified in ap is
* the path on which reset will be issued.
* If select_path is TRUE, then path is obtained by calling
* mdi_select_path.
*
* recovery_depth:
* Caller can specify the level of reset.
* VHCI_DEPTH_LUN -
* Issues LUN RESET if device supports lun reset.
* VHCI_DEPTH_TARGET -
* If Lun Reset fails or the device does not support
* Lun Reset, issues TARGET RESET
* VHCI_DEPTH_ALL -
* If Lun Reset fails or the device does not support
* Lun Reset, issues TARGET RESET.
* If TARGET RESET does not succeed, issues Bus Reset.
*/
static int
{
int ret = 0;
}
if ((ret == 0) && recovery_depth) {
}
if ((ret == 0) && recovery_depth) {
}
return (ret);
}
/*
* Note: The scsi_address passed to this routine could be the scsi_address
* for the virtual device or the physical device. No assumptions should be
* made in this routine about the contents of the ap structure.
* Further, note that the child dip would be the dip of the ssd node regardless
* of the scsi_address passed in.
*/
static int
{
int sps;
if (select_path != TRUE) {
}
}
"Unable to get a path, dip 0x%p", (void *)cdip));
return (0);
}
"priv is NULL, pip 0x%p", (void *)pip));
return (0);
}
"psd is NULL, pip 0x%p, svp 0x%p",
return (0);
}
"path %s, reset %d failed",
/*
* Select next path and issue the reset, repeat
* until all paths are exhausted
*/
return (0);
}
goto again;
}
"reset %d sent down pip:%p for cdip:%p\n", level,
return (1);
}
return (0);
}
/* ARGSUSED */
static int
{
return (1);
}
/*
* called by vhci_getcap and vhci_setcap to get and set (respectively)
* SCSI capabilities
*/
/* ARGSUSED */
static int
{
int cidx;
int rval = 0;
if (cap == (char *)0) {
"!vhci_commoncap: invalid arg"));
return (rval);
}
"!vhci_commoncap: vlun is null"));
return (rval);
}
return (UNDEFINED);
}
/*
* Process setcap request.
*/
if (doset) {
/*
* At present, we can only set binary (0/1) values
*/
switch (cidx) {
case SCSI_CAP_ARQ:
if (val == 0) {
rval = 0;
} else {
rval = 1;
}
break;
case SCSI_CAP_LUN_RESET:
if (tgtonly == 0) {
"scsi_vhci_setcap: "
"Returning error since whom = 0"));
rval = -1;
break;
}
/*
* Set the capability accordingly.
*/
break;
case SCSI_CAP_SECTOR_SIZE:
/* Always return success */
rval = 1;
break;
default:
"!vhci_setcap: unsupported %d", cidx));
break;
}
"0x%x/0x%x/0x%x/%d\n",
} else {
/*
* Process getcap request.
*/
switch (cidx) {
case SCSI_CAP_DMA_MAX:
/*
* For X86 this capability is caught in scsi_ifgetcap().
* XXX Should this be getting the value from the pHCI?
*/
rval = (int)VHCI_DMA_MAX_XFER_CAP;
break;
case SCSI_CAP_INITIATOR_ID:
rval = 0x00;
break;
case SCSI_CAP_ARQ:
case SCSI_CAP_TAGGED_QING:
rval = 1;
break;
case SCSI_CAP_SCSI_VERSION:
rval = 3;
break;
break;
case SCSI_CAP_LUN_RESET:
/*
* scsi_vhci will always return success for LUN reset.
* When request for doing LUN reset comes
* through scsi_reset entry point, at that time attempt
* will be made to do reset through all the possible
* paths.
*/
"scsi_vhci_getcap:"
"Getting the Lun reset capability %d", rval));
break;
case SCSI_CAP_SECTOR_SIZE:
break;
case SCSI_CAP_CDB_LEN:
break;
case SCSI_CAP_DMA_MAX_ARCH:
/*
* For X86 this capability is caught in scsi_ifgetcap().
* XXX Should this be getting the value from the pHCI?
*/
rval = 0;
break;
default:
"!vhci_getcap: unsupported %d", cidx));
break;
}
"0x%x/0x%x/0x%x/%d\n",
}
return (rval);
}
/*
* Function name : vhci_scsi_getcap()
*
*/
static int
{
}
static int
{
}
/*
* Function name : vhci_scsi_abort()
*/
/* ARGSUSED */
static int
{
return (0);
}
/*
* Function name : vhci_scsi_init_pkt
*
* Return Values : pointer to scsi_pkt, or NULL
*/
/* ARGSUSED */
static struct scsi_pkt *
{
int rval;
int newpkt = 0;
if (cmdlen > VHCI_SCSI_CDB_SIZE) {
if ((cmdlen != VHCI_SCSI_OSD_CDB_SIZE) ||
((flags & VHCI_SCSI_OSD_PKT_FLAGS) !=
"!init pkt: cdb size not supported\n"));
return (NULL);
}
}
arg);
return (NULL);
}
/* Get the vhci's private structure */
/* Save the target driver's packet */
/*
* Save pkt_tgt_init_pkt fields if deferred binding
* is needed or for other purposes.
*/
newpkt = 1;
} else { /* pkt not NULL */
}
/* Clear any stale error flags */
if (bp) {
}
if (flags & PKT_DMA_PARTIAL) {
/*
* Immediate binding is needed.
* Target driver may not set this flag in next invocation.
* vhci has to remember this flag was set during first
* invocation of vhci_scsi_init_pkt.
*/
}
/*
* Re-initialize some of the target driver packet state
* information.
*/
/*
* Binding a vpkt->vpkt_path for this IO at init_time.
* If an IO error happens later, target driver will clear
* this vpkt->vpkt_path binding before re-init IO again.
*/
"vhci_scsi_init_pkt: calling v_b_t %p, newpkt %d\n",
"v_s_i_p calling update_pHCI_pkt resid %ld\n",
}
if (callback == SLEEP_FUNC) {
} else {
}
"vhci_scsi_init_pkt: v_b_t called 0x%p rval 0x%x\n",
if (bp) {
if (rval == TRAN_FATAL_ERROR) {
/*
* No paths available. Could not bind
* any pHCI. Setting EFAULT as a way
* to indicate no DMA is mapped.
*/
} else {
/*
* Do not indicate any pHCI errors to
* target driver otherwise.
*/
}
}
if (rval != TRAN_ACCEPT) {
"vhci_scsi_init_pkt: "
"v_b_t failed 0x%p newpkt %x\n",
if (newpkt) {
vpkt->vpkt_tgt_pkt);
}
return (NULL);
}
/* Update the resid for the target driver */
}
return (vpkt->vpkt_tgt_pkt);
}
/*
* Function name : vhci_scsi_destroy_pkt
*
* Return Values : none
*/
static void
{
"vhci_scsi_destroy_pkt: vpkt 0x%p\n", (void *)vpkt));
vpkt->vpkt_tgt_init_pkt_flags = 0;
if (vpkt->vpkt_hba_pkt) {
}
}
}
/*
* Function name : vhci_scsi_dmafree()
*
* Return Values : none
*/
/*ARGSUSED*/
static void
{
"vhci_scsi_dmafree: vpkt 0x%p\n", (void *)vpkt));
if (vpkt->vpkt_hba_pkt) {
}
}
}
/*
* Function name : vhci_scsi_sync_pkt()
*
* Return Values : none
*/
/*ARGSUSED*/
static void
{
if (vpkt->vpkt_hba_pkt) {
}
}
/*
* routine for reset notification setup, to register or cancel.
*/
static int
{
}
static int
{
char *guid;
*name = 0;
return (1);
return (1);
/*
* Message is "sd# at scsi_vhci0: unit-address <guid>: <bus_addr>".
* <guid> bus_addr argument == 0
* <bus_addr> bus_addr argument != 0
* Since the <guid> is already provided with unit-address, we just
* provide failover module in <bus_addr> to keep output shorter.
*/
if (bus_addr == 0) {
/* report the guid: */
/* report the name of the failover module */
}
return (1);
}
static int
{
}
static int
{
}
/*
* Return a pointer to the guid part of the devnm.
* devnm format is "nodename@busaddr", busaddr format is "gGUID".
*/
static char *
{
return (NULL);
cp++;
return (cp + 2);
return (NULL);
}
static int
{
int pgr_sema_held = 0;
int held;
int path_instance = 0;
return (TRAN_BUSY);
pgr_sema_held = 1;
"vhci_bind_transport: path select fail\n"));
} else {
do {
"vhci_bind_transport: "
"valid first path 0x%p\n",
(void *)
vlun->svl_first_path));
goto bind_path;
}
} while ((rval == MDI_SUCCESS) &&
}
}
if (vlun->svl_first_path) {
"vhci_bind_transport: invalid first path 0x%p\n",
(void *)vlun->svl_first_path));
}
return (TRAN_BUSY);
}
pgr_sema_held = 1;
}
/*
* If the path is already bound for PKT_PARTIAL_DMA case,
* try to use the same path.
*/
"vhci_bind_transport: PKT_PARTIAL_DMA "
"vpkt 0x%p, path 0x%p\n",
goto bind_path;
}
/*
* Get path_instance. Non-zero with FLAG_PKT_PATH_INSTANCE set
* indicates that mdi_select_path should be called to select a
* specific instance.
*
* NB: Condition pkt_path_instance reference on proper allocation.
*/
}
/*
* If reservation is active bind the transport directly to the pip
* with the reservation.
*/
rval = MDI_SUCCESS;
goto bind_path;
} else {
if (pgr_sema_held) {
}
return (TRAN_BUSY);
}
}
if (pgr_sema_held) {
}
return (TRAN_BUSY);
} else if (rval == MDI_DEVI_ONLINING) {
/*
* if we are here then we are in the midst of
* We attempt to bind to ONLINE path if available,
* else it is OK to bind to a STANDBY path (instead
* of triggering a failover) because IO associated
* are completed by targets even on passive paths
* If no ONLINE paths available, it is important
* to set svl_waiting_for_activepath for two
* reasons: (1) avoid sense analysis in the
* "external failure detection" codepath in
* vhci_intr(). Failure to do so will result in
* infinite loop (unless an ONLINE path becomes
* available at some point) (2) avoid
* unnecessary failover (see "---Waiting For Active
* Path---" comment below).
*/
"state\n", (void *)cdip));
if (vlun->svl_waiting_for_activepath == 0) {
}
if (pgr_sema_held) {
}
return (TRAN_FATAL_ERROR);
}
goto bind_path;
}
} else if ((rval == MDI_FAILURE) ||
if (pgr_sema_held) {
}
return (TRAN_FATAL_ERROR);
}
while (vlun->svl_waiting_for_activepath) {
/*
* ---Waiting For Active Path---
* This device was discovered across a
* passive path; lets wait for a little
* bit, hopefully an active path will
* show up obviating the need for a
* failover
*/
(60 * NANOSEC)) {
} else {
drv_usecwait(1000);
== 0) {
/*
* an active path has come
* online!
*/
goto try_again;
}
}
}
if (!held) {
"!Lun not held\n"));
if (pgr_sema_held) {
}
return (TRAN_BUSY);
}
/*
* now that the LUN is stable, one last check
* to make sure no other changes sneaked in
* (like a path coming online or a
* failover initiated by another thread)
*/
goto bind_path;
}
/*
* Check if there is an ONLINE path OR a STANDBY path
* available. If none is available, do not attempt
* to do a failover, just return a fatal error at this
* point.
*/
/*
* No paths available, jus return FATAL error.
*/
if (pgr_sema_held) {
}
return (TRAN_FATAL_ERROR);
}
"mdi_failover\n"));
} else {
}
if (rval == MDI_FAILURE) {
if (pgr_sema_held) {
}
return (TRAN_FATAL_ERROR);
if (pgr_sema_held) {
}
return (TRAN_BUSY);
} else {
if (pgr_sema_held) {
}
return (TRAN_BUSY);
}
}
} else {
}
/* Verify match of specified path_instance and selected path_instance */
ASSERT((path_instance == 0) ||
/*
* For PKT_PARTIAL_DMA case, call pHCI's scsi_init_pkt whenever
* target driver calls vhci_scsi_init_pkt.
*/
"vhci_bind_transport: PKT_PARTIAL_DMA "
"vpkt 0x%p, path 0x%p hba_pkt 0x%p\n",
}
"!bind transport: 0x%p 0x%p 0x%p\n",
}
if (pgr_sema_held) {
}
/*
* Consider it a fatal error if b_error is
* set as a result of DMA binding failure
* vs. a condition of being temporarily out of
* some resource
*/
return (TRAN_FATAL_ERROR);
else
return (TRAN_BUSY);
}
}
return (TRAN_ACCEPT);
}
/*PRINTFLIKE3*/
void
{
}
/* do a PGR out with the information we've saved away */
static int
{
CDB_GROUP1, sizeof (struct scsi_arq_status), 0, 0,
SLEEP_FUNC, NULL);
return (0);
}
if (rval != 1) {
&(((struct scsi_arq_status *)(uintptr_t)
if ((skey == KEY_UNIT_ATTENTION) ||
(skey == KEY_NOT_READY)) {
int max_retry;
if (rval == SCSI_SENSE_NOT_READY) {
} else {
/* chk for state change and update */
if (rval == SCSI_SENSE_STATE_CHANGED) {
int held;
VH_NOSLEEP, held);
if (!held) {
} else {
/* chk for alua first */
(void *)vlun);
}
}
}
"!vhci_do_prout retry 0x%x "
"(0x%x 0x%x 0x%x)",
goto again;
}
rval = 0;
"!vhci_do_prout 0x%x "
"(0x%x 0x%x 0x%x)",
} else if (skey == KEY_ILLEGAL_REQUEST)
}
} else {
rval = 1;
}
return (rval);
}
static void
{
int fail = 0;
int rval;
goto done;
}
fail++;
"vhci_run_cmd: no path! 0x%p\n", (void *)svp));
goto done;
}
do {
"vhci_run_cmd: no "
"client priv! 0x%p offlined?\n",
(void *)npip));
goto next_path;
}
goto next_path;
} else {
fail++;
}
if (fail) {
"couldn't be replicated on all paths",
}
} else {
}
done:
if (svp)
}
}
/*
* The PROUT commands are not included in the automatic retry
* mechanism, therefore, vpkt_org_vpkt should never be set here.
*/
}
/*
* Get the keys registered with this target. Since we will have
* registered the same key with multiple initiators, strip out
* any duplicate keys.
*
* The pointers which will be used to filter the registered keys from
* the device will be stored in filter_prin and filter_pkt. If the
* allocation length of the buffer was sufficient for the number of
* parameter data bytes available to be returned by the device then the
* key filtering will use the keylist returned from the original
* request. If the allocation length of the buffer was not sufficient,
* then the filtering will use the keylist returned from the request
* that is resent below.
*
* If the device returns an additional length field that is greater than
* the allocation length of the buffer, then allocate a new buffer which
* can accommodate the number of parameter data bytes available to be
* returned. Resend the scsi PRIN command, filter out the duplicate
* keys and return as many of the unique keys found that was originally
* requested and set the additional length field equal to the data bytes
* of unique reservation keys available to be returned.
*
* If the device returns an additional length field that is less than or
* equal to the allocation length of the buffer, then all the available
* keys registered were returned by the device. Filter out the
* duplicate keys and return all of the unique keys found and set the
* additional length field equal to the data bytes of the reservation
* keys to be returned.
*/
static int
{
/*
* If the caller only asked for an amount of data that would not
* be enough to include any key data it is likely that they will
* send the next command with a buffer size based on the information
* from this header. Doing recovery on this would be a duplication
* of efforts.
*/
goto exit;
}
/*
* Can fail as sleep is not allowed.
*/
prin = (vhci_prin_readkeys_t *)
} else {
/*
* The retry buf doesn't need to be mapped in.
*/
prin = (vhci_prin_readkeys_t *)
}
"vhci_do_prin: bp_mapin_common failed."));
goto fail;
}
/*
* According to SPC-3r22, sec 4.3.4.6: "If the amount of
* information to be transferred exceeds the maximum value
* that the ALLOCATION LENGTH field is capable of specifying,
* the device server shall...terminate the command with CHECK
* CONDITION status". The ALLOCATION LENGTH field of the
* PERSISTENT RESERVE IN command is 2 bytes. We should never
* get here with an ADDITIONAL LENGTH greater than 0xFFFF
* so if we do, then it is an error!
*/
"vhci_do_prin: Device returned invalid "
"length 0x%x\n", prin_length));
goto fail;
}
/*
* If prin->length is greater than the byte count allocated in the
* original buffer, then resend the request with enough buffer
* allocated to get all of the available registered keys.
*/
goto fail;
}
/*
* This is the buf with buffer pointer
* where the prin readkeys will be
* returned from the device
*/
if (new_bp) {
}
goto fail;
}
}
if (rval == VHCI_CMD_RETRY) {
/*
* There were more keys then the original request asked for.
*/
/*
* Release the old path because it does not matter which path
* this command is sent down. This allows the normal bind
* transport mechanism to be used.
*/
}
/*
* Dispatch the retry command
*/
if (path_holder) {
}
goto fail;
}
/*
* If we return VHCI_CMD_RETRY, that means the caller
* is going to bail and wait for the reissued command
* to complete. In that case, we need to decrement
* the path command count right now. In any other
* case, it'll be decremented by the caller.
*/
goto exit;
}
if (rval == VHCI_CMD_CMPLT) {
/*
* The original request got all of the keys or the recovery
* packet returns.
*/
int new;
int old;
num_keys));
#ifdef DEBUG
if (vhci_debug == 5)
"vhci_do_prin: MPxIO old keys:\n"));
if (vhci_debug == 5)
#endif
/*
* Filter out all duplicate keys returned from the device
* We know that we use a different key for every host, so we
* can simply strip out duplicates. Otherwise we would need to
* do more bookkeeping to figure out which keys to strip out.
*/
new = 0;
/*
* If we got at least 1 key copy it.
*/
if (num_keys > 0) {
new++;
}
/*
* find next unique key.
*/
int j;
int match = 0;
if (new >= VHCI_NUM_RESV_KEYS)
break;
for (j = 0; j < new; j++) {
sizeof (mhioc_resv_key_t)) == 0) {
match = 1;
break;
}
}
if (!match) {
new++;
}
}
/* Stored Big Endian */
/* Stored Big Endian */
/*
* If we arrived at this point after issuing a retry, make sure
* that we put everything back the way it originally was so
* that the target driver can complete the command correctly.
*/
/*
* Make sure the original buffer is mapped into kernel
* space before we try to copy the filtered keys into
* it.
*/
}
/*
* Now copy the desired number of prin keys into the original
* target buffer.
*/
/*
* It is safe to return all of the available unique
* keys
*/
} else {
/*
* Not all of the available keys were requested by the
* original command.
*/
}
#ifdef DEBUG
"vhci_do_prin: To Application:\n"));
if (vhci_debug == 5)
"vhci_do_prin: MPxIO new keys:\n"));
if (vhci_debug == 5)
#endif
}
fail:
if (rval == VHCI_CMD_ERROR) {
/*
* If we arrived at this point after issuing a
* retry, make sure that we put everything back
* the way it originally was so that ssd can
* complete the command correctly.
*/
}
}
/*
* Mark this command completion as having an error so that
* ssd will retry the command.
*/
}
exit:
/*
* Make sure that the semaphore is only released once.
*/
if (rval == VHCI_CMD_CMPLT) {
}
return (rval);
}
static void
{
char *cpath;
/*
* sync up the target driver's pkt with the pkt that
* we actually used
*/
/* Return path_instance information back to the target driver. */
if (scsi_pkt_allocated_correctly(tpkt)) {
if (scsi_pkt_allocated_correctly(pkt)) {
/*
* If both packets were correctly allocated,
* return path returned by pHCI.
*/
} else {
/* Otherwise return path of pHCI we used */
}
}
}
}
}
switch (pkt->pkt_reason) {
case CMD_CMPLT:
/*
* cmd completed successfully, check for scsi errors
*/
case STATUS_CHECK:
&(((struct scsi_arq_status *)(uintptr_t)
"Received sns key %x esc %x escq %x\n",
/*
* if we are here it means we are
* through a passive path; this
* case is exempt from sense analysis
* for detection of ext. failover
* because that would unnecessarily
* increase attach time.
*/
break;
}
if (asc == VHCI_SCSI_PERR) {
/*
* parity error
*/
break;
}
if ((rval == SCSI_SENSE_NOFAILOVER) ||
(rval == SCSI_SENSE_UNKNOWN) ||
(rval == SCSI_SENSE_NOT_READY)) {
break;
} else if (rval == SCSI_SENSE_STATE_CHANGED) {
if (!held) {
/*
* looks like some other thread
* has already detected this
* condition
*/
break;
}
(void) taskq_dispatch(
} else {
/*
* externally initiated failover
* has occurred or is in progress
*/
if (!held) {
/*
* looks like some other thread
* has already detected this
* condition
*/
break;
} else {
if (rval == BUSY_RETURN) {
break;
}
break;
}
}
}
break;
/*
* If this is a good SCSI-II RELEASE cmd completion then restore
* the load balancing policy and reset VLUN_RESERVE_ACTIVE_FLG.
* If this is a good SCSI-II RESERVE cmd completion then set
* VLUN_RESERVE_ACTIVE_FLG.
*/
case STATUS_GOOD:
"!vhci_intr: vlun 0x%p release path 0x%p",
}
"!vhci_intr: vlun 0x%p reserved path 0x%p",
}
break;
"!vhci_intr: vlun 0x%p "
"reserve conflict on path 0x%p",
/* FALLTHROUGH */
default:
break;
}
/*
* Update I/O completion statistics for the path
*/
/*
* Command completed successfully, release the dma binding and
* destroy the transport side of the packet.
*/
return;
}
}
/*
* If the action (value in byte 1 of the cdb) is zero,
* we're reading keys, and that's the only condition
* where we need to be concerned with filtering keys
* and potential retries. Otherwise, we simply signal
* the semaphore and move on.
*/
/*
* If this is the completion of an internal
* retry then we need to make sure that the
* pkt and tpkt pointers are readjusted so
* the calls to scsi_destroy_pkt and pkt_comp
* below work * correctly.
*/
/*
* If this command was issued through
* the taskq then we need to clear
* this flag for proper processing in
* the case of a retry from the target
* driver.
*/
vpkt->vpkt_state &=
}
/*
* if vhci_do_prin returns VHCI_CMD_CMPLT then
* vpkt will contain the address of the
* original vpkt
*/
/*
* The command has been resent to get
* all the keys from the device. Don't
* complete the command with ssd until
* the retry completes.
*/
return;
}
} else {
}
}
break;
case CMD_TIMEOUT:
if ((pkt->pkt_statistics &
"!scsi vhci timeout invoked\n"));
}
break;
case CMD_TRAN_ERR:
/*
* This status is returned if the transport has sent the cmd
* down the link to the target and then some error occurs.
* In case of SCSI-II RESERVE cmd, we don't know if the
* reservation been accepted by the target or not, so we need
* to clear the reservation.
*/
" cmd_tran_err for scsi-2 reserve cmd\n"));
TRUE, VHCI_DEPTH_TARGET)) {
"!vhci_intr cmd_tran_err reset failed!"));
}
}
break;
case CMD_DEV_GONE:
/*
* If this is the last path then report CMD_DEV_GONE to the
* target driver, otherwise report BUSY to triggger retry.
*/
"cmd_dev_gone on last path\n"));
break;
}
/* Report CMD_CMPLT-with-BUSY to cause retry. */
"cmd_dev_gone\n"));
break;
default:
break;
}
/*
* SCSI-II RESERVE cmd has been serviced by the lower layers clear
* the flag so the lun is not QUIESCED any longer.
* Also clear the VHCI_PKT_THRU_TASKQ flag, to ensure that if this pkt
* is retried, a taskq shall again be dispatched to service it. Else
* it may lead to a system hang if the retry is within interrupt
* context.
*/
}
/*
* vpkt_org_vpkt should always be NULL here if the retry command
* has been successfully processed. If vpkt_org_vpkt != NULL at
* this point, it is an error so restore the original vpkt and
* return an error to the target driver so it can retry the
* command as appropriate.
*/
/*
* Mark this command completion as having an error so that
* ssd will retry the command.
*/
}
svp->svp_last_pkt_reason)) {
}
/*
* For PARTIAL_DMA, vhci should not free the path.
* Target driver will call into vhci_scsi_dmafree or
* destroy pkt to release this path.
*/
}
}
}
/*
* two possibilities: (1) failover has completed
* or (2) is in progress; update our path states for
* the former case; for the latter case,
* initiate a scsi_watch request to
* determine when failover completes - vlun is HELD
* until failover completes; BUSY is returned to upper
* layer in both the cases
*/
static int
{
char *path;
if (fostat == SCSI_SENSE_INACTIVE) {
"detected for %s; updating path states...\n",
vlun->svl_lun_wwn));
/*
* set the vlun flag to indicate to the task that the target
* port group needs updating
*/
} else {
"!%s (%s%d): Waiting for externally initiated failover "
"request packet allocation for %s failed....\n",
vlun->svl_lun_wwn));
return (PKT_RETURN);
}
swarg->svs_release_lun = 0;
/*
* place a hold on the path...we don't want it to
* vanish while scsi_watch is in progress
*/
}
return (BUSY_RETURN);
}
/*
* vhci_efo_watch_cb:
* Callback from scsi_watch request to check the failover status.
* Completion is either due to successful failover or timeout.
* Upon successful completion, vhci_update_path_states is called.
* For timeout condition, vhci_efo_done is called.
* Always returns 0 to scsi_watch to keep retrying till vhci_efo_done
* terminates this request properly in a separate thread.
*/
static int
{
/*
* Already completed failover or timedout.
* Waiting for vhci_efo_done to terminate this scsi_watch.
*/
return (0);
}
vlun->svl_efo_update_path = 0;
updt_paths = 0;
goto done;
}
return (0);
}
if (*((unsigned char *)statusp) == STATUS_CHECK) {
switch (rval) {
/*
* Only update path states in case path is definitely
* inactive, or no failover occurred. For all other
* check conditions continue pinging. A unexpected
* check condition shouldn't cause pinging to complete
* prematurely.
*/
case SCSI_SENSE_INACTIVE:
case SCSI_SENSE_NOFAILOVER:
updt_paths = 1;
break;
default:
>= VHCI_EXTFO_TIMEOUT) {
goto done;
}
return (0);
}
} else if (*((unsigned char *)statusp) ==
updt_paths = 1;
} else if ((*((unsigned char *)statusp)) &
(STATUS_BUSY | STATUS_QFULL)) {
return (0);
}
if ((*((unsigned char *)statusp) == STATUS_GOOD) ||
(updt_paths == 1)) {
/*
* we got here because we had detected an
* externally initiated failover; things
* have settled down now, so let's
* start up a task to update the
* path states and target port group
*/
vhci_update_pathstates, (void *)vlun,
KM_SLEEP);
return (0);
}
goto done;
}
return (0);
done:
return (0);
}
/*
* vhci_efo_done:
* cleanly terminates scsi_watch and free up resources.
* Called as taskq function in vhci_efo_watch_cb for EFO timeout condition
* or by vhci_update_path_states invoked during external initiated
* failover completion.
*/
static void
{
/* Wait for clean termination of scsi_watch */
/* release path and freeup resources to indicate failover completion */
if (swarg->svs_release_lun) {
}
}
/*
* Update the path states
* vlun should be HELD when this is invoked.
* Calls vhci_efo_done to cleanup resources allocated for EFO.
*/
void
{
char *cpath;
goto done;
}
do {
vlun->svl_fops_ctpriv) != 0) {
continue;
}
MDI_SUCCESS) {
"!vhci_update_pathstates: prop lookup failed for "
"path 0x%p\n", (void *)pip));
continue;
}
/*
* Need to update the "path-class" property
* value in the device tree if different
* from the existing value.
*/
}
/*
* Only change the state if needed. i.e. Don't call
* mdi_pi_set_state to ONLINE a path if its already
* ONLINE. Same for STANDBY paths.
*/
if (!(MDI_PI_IS_ONLINE(pip))) {
"!vhci_update_pathstates: marking path"
" 0x%p as ONLINE\n", (void *)pip));
"(%s%d): path %s "
"is now ONLINE because of "
"an externally initiated failover",
}
if (vlun->svl_waiting_for_activepath) {
}
} else if (MDI_PI_IS_ONLINE(pip)) {
!= 0) {
vlun->svl_active_pclass) != 0) {
KM_SLEEP);
+1));
} else {
/*
* No need to update
* svl_active_pclass
*/
}
if (tptr) {
if (vlun->svl_active_pclass
!= NULL) {
+1);
}
}
}
}
/* Check for Reservation Conflict */
if (!bp) {
"!vhci_update_pathstates: No resources "
"(buf)\n"));
goto done;
}
CDB_GROUP1, sizeof (struct scsi_arq_status), 0,
if (pkt) {
(void) scsi_setup_cdb((union scsi_cdb *)
0);
"!vhci_update_pathstates: reserv. "
"conflict to be resolved on 0x%p\n",
(void *)pip));
svp_conflict = svp;
}
}
!(MDI_PI_IS_STANDBY(pip))) {
"!vhci_update_pathstates: marking path"
" 0x%p as STANDBY\n", (void *)pip));
"(%s%d): path %s "
"is now STANDBY because of "
"an externally initiated failover",
opinfo.opinfo_path_attr) == 0) {
svl_active_pclass)+1);
}
}
}
(void) mdi_prop_free(pclass);
/*
* Check to see if this vlun has an active SCSI-II RESERVE. If so
* clear the reservation by sending a reset, so the host doesn't
* receive a reservation conflict. The reset has to be sent via a
* working path. Let's use a path referred to by svp_conflict as it
* should be working.
* Reset VLUN_RESERVE_ACTIVE_FLG for this vlun. Also notify ssd
* of the reset, explicitly.
*/
" sending recovery reset on 0x%p, path_state: %x",
(void) vhci_recovery_reset(vlun,
}
}
/*
* Update the AccessState of related MP-API TPGs
*/
}
done:
if (vlun->svl_efo_update_path) {
vlun->svl_efo_update_path = 0;
}
}
/* ARGSUSED */
static int
{
char *guid;
int vlun_alloced = 0;
"vhci_pathinfo_init: lun guid property failed"));
goto failure;
}
/*
* Initialize svl_lb_policy_save only for newly allocated vlun. Writing
* to svl_lb_policy_save later could accidentally overwrite saved lb
* policy.
*/
if (vlun_alloced) {
}
/*
* For a SCSI_HBA_ADDR_COMPLEX transport we store a pointer to
* scsi_device in the scsi_address structure. This allows an
* an HBA driver to find its scsi_device(9S) and
* per-scsi_device(9S) HBA private data given a
* scsi_address(9S) by using scsi_address_device(9F) and
* scsi_device_hba_private_get(9F)).
*/
/*
* Clone transport structure if requested, so
* Self enumerating HBAs always need to use cloning
*/
} else {
/*
* SPI pHCI unit-address. If we ever need to support this
* we could set a.spi.a_target/a.spi.a_lun based on pathinfo
* node unit-address properties. For now we fail...
*/
goto failure;
}
/*
* Mark scsi_device as being associated with a pathinfo node. For
* a scsi_device structure associated with a devinfo node,
* scsi_ctlops_initchild sets this field to NULL.
*/
/*
* LEGACY: sd_private: set for older mpxio-capable pHCI drivers with
* mpxio-capable pHCI drivers use SCSA enumeration services (or at
* least have been changed to use sd_pathinfo instead).
*/
/* See scsi_hba.c for info on sd_tran_safe kludge */
/*
* call hba's target init entry point if it exists
*/
psd->sd_tran_tgt_free_done = 0;
"!vhci_pathinfo_init: tran_tgt_init failed for "
goto failure;
}
}
(void *)pip));
return (MDI_SUCCESS);
if (psd) {
}
if (svp) {
}
if (vlun_alloced)
return (rval);
}
/* ARGSUSED */
static int
{
/* path already freed. Nothing to do. */
return (MDI_SUCCESS);
}
/* Verify plumbing */
/* Switch to cloned scsi_hba_tran(9S) structure */
}
}
}
/*
* Free the pathinfo related scsi_device inquiry data. Note that this
* matches what happens for scsi_hba.c devinfo case at uninitchild time.
*/
(void *)pip));
return (MDI_SUCCESS);
}
/* ARGSUSED */
static int
{
int held;
if (flags & MDI_EXT_STATE_CHANGE) {
/*
* We do not want to issue any commands down the path in case
* sync flag is set. Lower layers might not be ready to accept
* any I/O commands.
*/
if (op == DRIVER_DISABLE)
return (MDI_SUCCESS);
return (MDI_FAILURE);
}
if (flags & MDI_BEFORE_STATE_CHANGE) {
/*
* Hold the LUN.
*/
if (flags & MDI_DISABLE_OP) {
/*
* Issue scsi reset if it happens to be
* reserved path.
*/
/*
* if reservation pending on
* this path, dont' mark the
* path busy
*/
if (op == DRIVER_DISABLE_TRANSIENT) {
"!vhci_pathinfo"
"_state_change (pip:%p): "
" reservation: fail busy\n",
(void *)pip));
return (MDI_FAILURE);
}
if (vhci_recovery_reset(
TRUE,
VHCI_DEPTH_TARGET) == 0) {
VHCI_DEBUG(1,
"!vhci_pathinfo"
"_state_change "
" (pip:%p): "
"reset failed, "
"give up!\n",
(void *)pip));
}
}
}
} else if (flags & MDI_ENABLE_OP) {
if (((vhci->vhci_conf_flags &
MDI_PI_IS_STANDBY(pip)) {
/*
* Failback if enabling a standby path
* and it is the primary class or
* preferred class
*/
if (best_class == 0) {
/*
* if not preferred - compare
* path-class with class
*/
(void) fo->sfo_pathclass_next(
NULL, &best_pclass,
"path-class", &pclass);
if (rv != MDI_SUCCESS ||
"!path-class "
" lookup "
"failed. rv: %d"
"class: %p", rv,
(void *)pclass);
strlen(best_pclass)) == 0) {
best_class = 1;
}
if (rv == MDI_SUCCESS &&
rv = mdi_prop_free(
pclass);
if (rv !=
vdip,
"!path-"
"class"
" free"
" failed"
" rv: %d"
" class: "
"%p",
rv,
(void *)
pclass);
}
}
}
if (best_class == 1) {
"preferred path: %p "
"USER_DISABLE->USER_ENABLE "
"transition for lun %s\n",
(void *)pip,
vlun->svl_lun_wwn));
(void) taskq_dispatch(
}
}
/*
* if PGR is active, revalidate key and
* register on this path also, if key is
* still valid
*/
if (vlun->svl_pgr_active)
(void)
/*
* Inform target driver about any
* reservations to be reinstated if target
* has dropped reservation during the busy
* period.
*/
&vhci->vhci_mutex,
}
}
if (flags & MDI_AFTER_STATE_CHANGE) {
if (flags & MDI_ENABLE_OP) {
}
if (vlun->svl_setcap_done) {
1, pip);
}
/*
* Release the LUN
*/
/*
* Path transition is complete.
* Run callback to indicate target driver to
* retry to prevent IO starvation.
*/
if (scsi_callback_id != 0) {
}
}
} else {
switch (state) {
break;
break;
default:
break;
}
/*
* Path transition is complete.
* Run callback to indicate target driver to
* retry to prevent IO starvation.
*/
}
return (rval);
}
return (MDI_SUCCESS);
}
/*
* Parse the mpxio load balancing options. The datanameptr
* will point to a string containing the load-balance-options value.
* The load-balance-options value will be a property that
* defines the load-balance algorithm and any arguments to that
* algorithm.
* For example:
* device-type-mpxio-options-list=
* "device-type=SUN SENA", "load-balance-options=logical-block-options"
* "device-type=SUN SE6920", "round-robin-options";
* logical-block-options="load-balance=logical-block", "region-size=15";
* round-robin-options="load-balance=round-robin";
*
* If the load-balance is not defined the load balance algorithm will
* default to the global setting. There will be default values assigned
* to the arguments (region-size=18) and if an argument is one
* that is not known, it will be ignored.
*/
static void
{
return;
}
while (config_list_len > 0) {
strlen(mdi_load_balance)) == 0) {
/* get the load-balance scheme */
LOAD_BALANCE_PROP_LBA) == 0) {
(void) mdi_set_lb_policy(cdip,
LOAD_BALANCE_PROP_NONE) == 0) {
(void) mdi_set_lb_policy(cdip,
}
strlen(LOGICAL_BLOCK_REGION_SIZE)) == 0) {
int i = 0;
char *ptr;
char *tmp;
/* check for numeric value */
"Illegal region size: %s."
" Setting to default value: %d",
tmp,
break;
}
}
}
}
}
#ifdef DEBUG
"!vhci_parse_mpxio_lb_options: region-size: %d"
"only valid for load-balance=logical-block\n",
region_size));
}
#endif
"!vhci_parse_mpxio_lb_options: No region-size"
" defined load-balance=logical-block."
" Default to: %d\n", LOAD_BALANCE_DEFAULT_REGION_SIZE));
(void) mdi_set_lb_region_size(cdip,
}
if (list_len > 0) {
}
}
/*
* Parse the device-type-mpxio-options-list looking for the key of
* "load-balance-options". If found, parse the load balancing options.
* Check the comment of the vhci_get_device_type_mpxio_options()
* for the device-type-mpxio-options-list.
*/
static void
{
char *dataptr;
int len;
/*
* get the data list
*/
len = 0;
!= 0) {
strlen(LOAD_BALANCE_OPTIONS)) == 0) {
}
}
}
/*
* Check the inquriy string returned from the device with the device-type
* Check for the existence of the device-type-mpxio-options-list and
* if found parse the list checking for a match with the device-type
* value and the inquiry string returned from the device. If a match
* is found, parse the mpxio options list. The format of the
* device-type-mpxio-options-list is:
* device-type-mpxio-options-list=
* "device-type=SUN SENA", "load-balance-options=logical-block-options"
* "device-type=SUN SE6920", "round-robin-options";
* logical-block-options="load-balance=logical-block", "region-size=15";
* round-robin-options="load-balance=round-robin";
*/
void
struct scsi_device *devp)
{
/*
* look up the device-type-mpxio-options-list and walk thru
* the list compare the vendor ids of the earlier inquiry command and
* with those vids in the list if there is a match, lookup
* the mpxio-options value
*/
/*
* Compare vids in each duplet - if it matches,
* parse the mpxio options list.
*/
dupletlen = 0;
strlen(DEVICE_TYPE_STR)) == 0) {
/* point to next duplet */
/* add len of this duplet */
/* get to device type */
if ((vidlen != 0) &&
break;
}
/* get to next duplet */
}
/* get to the next device-type */
strlen(DEVICE_TYPE_STR)) != 0) {
}
}
if (config_list_len > 0) {
}
}
}
static int
struct scsi_failover_ops *fo,
{
int force_rereserve = 0;
int update_pathinfo_done = 0;
"Failed to get operation info for path:%p\n", (void *)pip));
return (MDI_FAILURE);
}
/* set the xlf capable flag in the vlun for future use */
/*
* Externally initiated failover has happened;
* next IO will trigger failover and thus
* sync-up the pathstates. Reason we don't
* sync-up immediately by invoking
* vhci_update_pathstates() is because it
* needs a VHCI_HOLD_LUN() and we don't
* want to block here.
*
* Further, if the device is an ALUA device,
* then failure to exactly match 'pclass' and
* 'svl_active_pclass'(as is the case here)
* indicates that the currently active path
* is a 'non-optimized' path - which means
* that 'svl_active_pclass' needs to be
* replaced with opinfo.opinfo_path_state
* value.
*/
char *tptr;
/*
* The device is ALUA compliant. The
* state need to be changed to online
* rather than standby state which is
* done typically for a asymmetric
* device that is non ALUA compliant.
*/
KM_SLEEP);
+1));
} else {
/*
* Non ALUA device case.
*/
}
update_pathinfo_done = 1;
}
/*
* Find out a class of currently reserved path if there
* is any.
*/
"!vhci_update_pathinfo: prop lookup "
"failed for path 0x%p\n",
(void *)vlun->svl_resrv_pip));
/*
* Something is wrong with the reserved path.
* We can't do much with that right here. Just
* force re-reservation to another path.
*/
force_rereserve = 1;
}
/*
* Inform target driver that a reservation
* should be reinstated because the reserved
* path is not the most preferred one.
*/
&vhci->vhci_mutex,
}
if (update_pathinfo_done == 1) {
return (MDI_SUCCESS);
}
} else {
char *tptr;
/*
* lets release the mutex before we try to
* allocate since the potential to sleep is
* possible.
*/
}
char *tptr;
}
/*
* externally initiated failover has happened;
* force state to ONLINE (see comment above)
*/
return (MDI_SUCCESS);
}
}
/*
* Initiate auto-failback, if enabled, for path if path-state
* is transitioning from OFFLINE->STANDBY and pathclass is the
* preferred pathclass for this storage.
* NOTE: In case where opinfo_path_state is SCSI_PATH_ACTIVE
* (above), where the pi state is set to STANDBY, we don't
* initiate auto-failback as the next IO shall take care of.
* this. See comment above.
*/
" OFFLINE->STANDBY transition for lun %s\n",
vhci_initiate_auto_failback, (void *) vlun,
KM_SLEEP);
}
}
" opinfo_path_state = %x opinfo_preferred = %x, opinfo_mode = %x\n",
return (MDI_SUCCESS);
}
/*
* Form the kstat name and and call mdi_pi_kstat_create()
*/
void
{
char *guid;
int by_id;
/* return if we have already allocated kstats */
if (mdi_pi_kstat_exists(pip))
return;
/*
* We need instance numbers to create a kstat name, return if we don't
* have instance numbers assigned yet.
*/
return;
/*
* A path oriented kstat has a ks_name of the form:
*
* <client-driver><instance>.t<pid>.<pHCI-driver><instance>
*
* We maintain a bidirectional 'target-port' to <pid> map,
* called targetmap. All pathinfo nodes with the same
* 'target-port' map to the same <pid>. The iostat(1M) code,
* when parsing a path oriented kstat name, uses the <pid> as
* a SCSI_VHCI_GET_TARGET_LONGNAME ioctl argument in order
* to get the 'target-port'. For KSTAT_FLAG_PERSISTENT kstats,
* this ioctl needs to translate a <pid> to a 'target-port'
* even after all pathinfo nodes associated with the
* 'target-port' have been destroyed. This is needed to support
* consistent first-iteration activity-since-boot iostat(1M)
* output. Because of this requirement, the mapping can't be
* based on pathinfo information in a devinfo snapshot.
*/
/* determine 'target-port' */
(void) mdi_prop_free(target_port);
by_id = 1;
} else {
/*
* If the pHCI did not set up 'target-port' on this
* pathinfo node, assume that our client is the only
* one with paths to the device by using the guid
* value as the 'target-port'. Since no other client
* will have the same guid, no other client will use
* the same <pid>. NOTE: a client with an instance
* number always has a guid.
*/
/*
* For this type of mapping we don't want the
* <id> -> 'target-port' mapping to be made. This
* will cause the SCSI_VHCI_GET_TARGET_LONGNAME ioctl
* to fail, and the iostat(1M) long '-n' output will
* still use the <pid>. We do this because we just
* made up the 'target-port' using the guid, and we
* don't want to expose that fact in iostat output.
*/
by_id = 0;
}
} else {
(void) mod_hash_insert(vhci_targetmap_byport,
if (by_id) {
(void) mod_hash_insert(vhci_targetmap_bypid,
}
}
/* form kstat name */
"kstat %s: pid %x <-> port %s\n", (void *)pip,
if (target_port_dup)
/* call mdi to create kstats with the name we built */
}
/* ARGSUSED */
static int
{
char *guid;
"property failed"));
goto failure;
}
/*
* Get inquiry data into pathinfo related scsi_device structure.
* Free sq_inq when pathinfo related scsi_device structure is destroyed
* by vhci_pathinfo_uninit(). In other words, vhci maintains its own
* copy of scsi_device and scsi_inquiry data on a per-path basis.
*/
rval = MDI_FAILURE;
goto failure;
}
/*
* See if we have a failover module to support the device.
*
* We re-probe to determine the failover ops for each path. This
* is done in case there are any path-specific side-effects associated
* with the sfo_device_probe implementation.
*
* Give the first successfull sfo_device_probe the opportunity to
* then be passed into the failover module on all other sfo_device_*()
* operations (and must be freed by sfo_device_unprobe implementation).
*
* NOTE: While sfo_device_probe is done once per path,
* sfo_device_unprobe only occurs once - when the vlun is destroyed.
*
* NOTE: We don't currently support per-path fops private data
* mechanism.
*/
/* check path configuration result with current vlun state */
/* Getting different results for different paths. */
"!vhci_pathinfo_online: dev (path 0x%p) contradiction\n",
(void *)pip));
"'%s'.vs.'%s': path %s\n",
goto done;
/* No failover module - device not supported under vHCI. */
"!vhci_pathinfo_online: dev (path 0x%p) not "
"supported\n", (void *)pip));
/* XXX does this contradict vhci_is_dev_supported ? */
goto done;
}
/* failover supported for device - save failover_ops in vlun */
/*
* Obtain the device-type based mpxio options as specified in
* scsi_vhci.conf file.
*
* NOTE: currently, the end result is a call to
* mdi_set_lb_region_size().
*/
/*
* if PGR is active, revalidate key and register on this path also,
* if key is still valid
*/
if (vlun->svl_pgr_active) {
if (rval != 1) {
rval = MDI_FAILURE;
goto failure;
}
}
if (svp->svp_new_path) {
/*
* Last chance to perform any cleanup operations on this
* new path before making this path completely online.
*/
svp->svp_new_path = 0;
/*
* If scsi_vhci knows the lun is alread RESERVE'd,
* then skip the issue of RELEASE on new path.
*/
/*
* Issue SCSI-2 RELEASE only for the first time on
* a new path just in case the host rebooted and
* a reservation is still pending on this path.
* IBM Shark storage does not clear RESERVE upon
* host reboot.
*/
sizeof (struct scsi_arq_status), 0, 0,
SLEEP_FUNC, NULL);
"!vhci_pathinfo_online: "
"Release init_pkt failed :%p\n",
(void *)pip));
rval = MDI_FAILURE;
goto failure;
}
"!vhci_path_online: path:%p "
"Issued SCSI-2 RELEASE\n", (void *)pip));
/* Ignore the return value */
(void) vhci_do_scsi_cmd(pkt);
}
}
if (rval == MDI_FAILURE) {
goto failure;
}
/* Initialize MP-API data */
/*
* MP-API also needs the Inquiry data to be maintained in the
* mp_vendor_prop_t structure, so find the lun and update its
* structure with this data.
*/
MP_OBJECT_TYPE_MULTIPATH_LU, (void *)vlun);
} else {
"mpapi_lu_data_t is NULL"));
}
/* create kstats for path */
done:
if (vlun->svl_setcap_done) {
}
(void *)pip));
return (rval);
}
/*
* path offline handler. Release all bindings that will not be
* released by the normal packet transport/completion code path.
* Since we don't (presently) keep any bindings alive outside of
* the in-transport packets (which will be released on completion)
* there is not much to do here.
*/
/* ARGSUSED */
static int
{
"phci dip", (void *)pip));
return (MDI_FAILURE);
}
"client dip", (void *)pip));
return (MDI_FAILURE);
}
/*
* mdi_pathinfo node in INIT state can have vHCI private
* information set to null
*/
"svp is NULL for pip 0x%p\n", (void *)pip));
return (MDI_SUCCESS);
}
TR_CLOCK_TICK) == -1) {
/*
* The timeout time reached without the condition
* being signaled.
*/
"Timeout reached on path 0x%p without the cond\n",
(void *)pip));
"%d cmds still pending on path: 0x%p\n",
break;
}
}
/*
* Check to see if this vlun has an active SCSI-II RESERVE. And this
* is the pip for the path that has been reserved.
* If so clear the reservation by sending a reset, so the host will not
* get a reservation conflict. Reset the flag VLUN_RESERVE_ACTIVE_FLG
* for this lun. Also a reset notify is sent to the target driver
* just in case the POR check condition is cleared by some other layer
* in the stack.
*/
VHCI_DEPTH_TARGET) == 0) {
"!vhci_pathinfo_offline (pip:%p):"
"reset failed, retrying\n", (void *)pip));
VHCI_DEPTH_TARGET) == 0) {
"!vhci_pathinfo_offline "
"(pip:%p): reset failed, "
"giving up!\n", (void *)pip));
}
}
}
}
"!vhci_pathinfo_offline: offlined path 0x%p\n", (void *)pip));
return (MDI_SUCCESS);
}
/*
* routine for SCSI VHCI IOCTL implementation.
*/
/* ARGSUSED */
static int
{
int retval = 0;
caddr_t s;
char *pclass;
/* Check for validity of vhci structure */
return (ENXIO);
}
return (ENXIO);
}
/* Get the vhci dip */
/* Allocate memory for getting parameters from userland */
/*
* Set a local variable indicating the ioctl name. Used for
* printing debug strings.
*/
switch (cmd) {
s = "GET_CLIENT_MULTIPATH_INFO";
break;
s = "GET_PHCI_MULTIPATH_INFO";
break;
s = "GET_CLIENT_NAME";
break;
case SCSI_VHCI_PATH_ONLINE:
s = "PATH_ONLINE";
break;
case SCSI_VHCI_PATH_OFFLINE:
s = "PATH_OFFLINE";
break;
case SCSI_VHCI_PATH_STANDBY:
s = "PATH_STANDBY";
break;
case SCSI_VHCI_PATH_TEST:
s = "PATH_TEST";
break;
s = "SWITCH_TO_CNTLR";
break;
case SCSI_VHCI_PATH_DISABLE:
s = "PATH_DISABLE";
break;
case SCSI_VHCI_PATH_ENABLE:
s = "PATH_ENABLE";
break;
s = "GET_TARGET_LONGNAME";
break;
#ifdef DEBUG
case SCSI_VHCI_CONFIGURE_PHCI:
s = "CONFIGURE_PHCI";
break;
s = "UNCONFIGURE_PHCI";
break;
#endif
default:
s = "Unknown";
break;
}
if (retval != 0) {
goto end;
}
/*
* Get IOCTL parameters from userland
*/
switch (cmd) {
case SCSI_VHCI_PATH_ONLINE:
case SCSI_VHCI_PATH_OFFLINE:
case SCSI_VHCI_PATH_STANDBY:
case SCSI_VHCI_PATH_TEST:
case SCSI_VHCI_PATH_DISABLE:
case SCSI_VHCI_PATH_ENABLE:
#ifdef DEBUG
case SCSI_VHCI_CONFIGURE_PHCI:
#endif
break;
mode, s);
break;
}
if (retval != 0) {
goto end;
}
/*
* Process the IOCTL
*/
switch (cmd) {
{
break;
}
/* Get client device path from user land */
break;
}
"client <%s>", s, client_path));
/* Get number of paths to this client device */
== NULL) {
"client dip doesn't exist. invalid path <%s>",
s, client_path));
break;
}
"num_paths copyout failed", s));
break;
}
/* If user just wanted num_paths, then return */
num_paths == 0) {
break;
}
/* Set num_paths to value as much as can be sent to userland */
}
/* Allocate memory and get userland pointers */
break;
}
/*
* Get the path information and send it to userland.
*/
!= MDI_SUCCESS) {
break;
}
break;
}
/* Free the memory allocated for path information */
break;
}
{
break;
}
/* Get PHCI device path from user land */
break;
}
"!vhci_ioctl: ioctl <%s> phci <%s>", s, phci_path));
/* Get number of devices associated with this PHCI device */
"phci dip doesn't exist. invalid path <%s>",
s, phci_path));
break;
}
"num_paths copyout failed", s));
break;
}
/* If user just wanted num_paths, then return */
num_paths == 0) {
break;
}
/* Set num_paths to value as much as can be sent to userland */
}
/* Allocate memory and get userland pointers */
break;
}
/*
* Get the path information and send it to userland.
*/
!= MDI_SUCCESS) {
break;
}
break;
}
/* Free the memory allocated for path information */
break;
}
{
/* Get PHCI path and device address from user land */
break;
}
/* Get the PHCI dip */
"phci dip doesn't exist. invalid path <%s>",
s, phci_path));
break;
}
"pathinfo doesn't exist. invalid device addr", s));
break;
}
/* Get the client device pathname and send to userland */
"client <%s>", s, client_path));
break;
}
break;
}
case SCSI_VHCI_PATH_ONLINE:
case SCSI_VHCI_PATH_OFFLINE:
case SCSI_VHCI_PATH_STANDBY:
case SCSI_VHCI_PATH_TEST:
{
/* Get PHCI path and device address from user land */
break;
}
/* Get the PHCI dip */
"phci dip doesn't exist. invalid path <%s>",
s, phci_path));
break;
}
"pathinfo doesn't exist. invalid device addr", s));
break;
}
"Calling MDI function to change device state", s));
switch (cmd) {
case SCSI_VHCI_PATH_ONLINE:
break;
case SCSI_VHCI_PATH_OFFLINE:
break;
case SCSI_VHCI_PATH_STANDBY:
break;
case SCSI_VHCI_PATH_TEST:
break;
}
break;
}
{
/* Get the client device pathname */
MAXPATHLEN, mode)) {
"client_path copyin failed", s));
break;
}
/* Get the path class to which user wants to switch */
"controller_class copyin failed", s));
break;
}
/* Perform validity checks */
client_path)) == NULL) {
"client dip doesn't exist. invalid path <%s>",
s, client_path));
break;
}
"to switch controller"));
"invalid path class <%s>", s, paddr));
break;
}
"invalid scsi device <%s>", s, client_path));
break;
}
/*
* Checking to see if device has only one pclass, PRIMARY.
* If so this device doesn't support failovers. Assumed
* that the devices with one pclass is PRIMARY, as thats the
* case today. If this is not true and in future other
* symmetric devices are supported with other pclass, this
* IOCTL shall have to be overhauled anyways as now the only
* arguments it accepts are PRIMARY and SECONDARY.
*/
vlun->svl_fops_ctpriv)) {
break;
}
break;
}
}
/* Call mdi function to cause a switch over */
if (retval == MDI_SUCCESS) {
retval = 0;
} else {
}
break;
}
case SCSI_VHCI_PATH_ENABLE:
case SCSI_VHCI_PATH_DISABLE:
{
/*
* Get client device path from user land
*/
break;
}
/*
* Get Phci device path from user land
*/
break;
}
/*
* Get the devinfo for the Phci.
*/
"phci dip doesn't exist. invalid path <%s>",
s, phci_path));
break;
}
/*
* If the client path is set to /scsi_vhci then we need
* to do the operation on all the clients so set cdip to NULL.
* Else, try to get the client dip.
*/
} else {
client_path)) == NULL) {
"!vhci_ioctl: ioctl <%s> client dip "
"doesn't exist. invalid path <%s>",
s, client_path));
break;
}
}
if (cmd == SCSI_VHCI_PATH_ENABLE)
else
break;
}
{
char *target_port;
/* targetmap lookup of 'target-port' by <pid> */
/*
* NOTE: failure to find the mapping is OK for guid
* based 'target-port' values.
*/
"targetport mapping doesn't exist: pid %d",
s, pid));
break;
}
/* copyout 'target-port' result */
target_port = (char *)hv;
"targetport copyout failed: len: %d",
s, (int)strlen(target_port)));
}
break;
}
#ifdef DEBUG
case SCSI_VHCI_CONFIGURE_PHCI:
{
/* Get PHCI path and device address from user land */
break;
}
"phci <%s>", s, phci_path));
/* Get the PHCI dip */
"phci dip doesn't exist. invalid path <%s>",
s, phci_path));
break;
}
if (ndi_devi_config(pdip,
}
break;
}
{
/* Get PHCI path and device address from user land */
break;
}
"phci <%s>", s, phci_path));
/* Get the PHCI dip */
"phci dip doesn't exist. invalid path <%s>",
s, phci_path));
break;
}
if (ndi_devi_unconfig(pdip,
}
break;
}
#endif
}
end:
/* Free the memory allocated above */
}
if (client_path != NULL) {
}
}
return (retval);
}
/*
* devctl IOCTL support for client device DR
*/
/* ARGSUSED */
int
int *rvalp)
{
int rv = 0;
int retval = 0;
return (ENXIO);
/*
* check if :devctl minor device has been opened
*/
return (ENXIO);
}
return (ENXIO);
/*
* We can use the generic implementation for these ioctls
*/
switch (cmd) {
case DEVCTL_DEVICE_GETSTATE:
case DEVCTL_DEVICE_ONLINE:
case DEVCTL_DEVICE_OFFLINE:
case DEVCTL_DEVICE_REMOVE:
case DEVCTL_BUS_GETSTATE:
}
/*
* read devctl ioctl data
*/
return (EFAULT);
switch (cmd) {
case DEVCTL_DEVICE_RESET:
/*
* lookup and hold child device
*/
break;
}
"Unable to get a path, dip 0x%p", (void *)child));
break;
}
VHCI_DEPTH_TARGET) == 0) {
"!vhci_ioctl(pip:%p): "
"reset failed\n", (void *)pip));
}
break;
case DEVCTL_BUS_QUIESCE:
case DEVCTL_BUS_UNQUIESCE:
case DEVCTL_BUS_RESET:
case DEVCTL_BUS_RESETALL:
#ifdef DEBUG
case DEVCTL_BUS_CONFIGURE:
case DEVCTL_BUS_UNCONFIGURE:
#endif
break;
default:
} /* end of outer switch */
return (rv);
}
/*
* Routine to get the PHCI pathname from ioctl structures in userland
*/
/* ARGSUSED */
static int
{
int retval = 0;
"phci_path copyin failed", s));
}
return (retval);
}
/*
* Routine to get the Client device pathname from ioctl structures in userland
*/
/* ARGSUSED */
static int
{
int retval = 0;
"ioctl <%s> client_path copyin failed", s));
}
return (retval);
}
/*
* Routine to get physical device address from ioctl structure in userland
*/
/* ARGSUSED */
static int
{
int retval = 0;
"ioctl <%s> device addr copyin failed", s));
}
return (retval);
}
/*
* Routine to send client device pathname to userland.
*/
/* ARGSUSED */
static int
{
int retval = 0;
"ioctl <%s> client_path copyout failed", s));
}
return (retval);
}
/*
* Routine to translated dev_info pointer (dip) to device pathname.
*/
static void
{
}
/*
* vhci_get_phci_path_list:
* get information about devices associated with a
* given PHCI device.
*
* Return Values:
* path information elements
*/
int
{
int status;
int circular;
/*
* Get the PHCI structure and retrieve the path information
* from the GUID hash table.
*/
count = 0;
&ret_pip->ret_ext_state);
}
#ifdef DEBUG
if (status != MDI_SUCCESS) {
"!vhci_get_phci_path_list: "
"phci <%s>, prop size failure 0x%x",
}
#endif /* DEBUG */
#ifdef DEBUG
if (status != MDI_SUCCESS) {
"!vhci_get_phci_path_list: "
"phci <%s>, prop pack failure 0x%x",
}
#endif /* DEBUG */
}
ret_pip++;
count++;
}
return (MDI_SUCCESS);
}
/*
* vhci_get_client_path_list:
* get information about various paths associated with a
* given client device.
*
* Return Values:
* path information elements
*/
int
{
int status;
int circular;
count = 0;
&ret_pip->ret_ext_state);
}
#ifdef DEBUG
if (status != MDI_SUCCESS) {
"!vhci_get_client_path_list: "
"phci <%s>, prop size failure 0x%x",
}
#endif /* DEBUG */
#ifdef DEBUG
if (status != MDI_SUCCESS) {
"!vhci_get_client_path_list: "
"phci <%s>, prop pack failure 0x%x",
}
#endif /* DEBUG */
}
ret_pip++;
count++;
}
return (MDI_SUCCESS);
}
/*
* Routine to get ioctl argument structure from userland.
*/
/* ARGSUSED */
static int
{
int retval = 0;
#ifdef _MULTI_DATAMODEL
case DDI_MODEL_ILP32:
{
break;
}
break;
}
case DDI_MODEL_NONE:
break;
}
break;
}
#else /* _MULTI_DATAMODEL */
}
#endif /* _MULTI_DATAMODEL */
#ifdef DEBUG
if (retval) {
"iocdata copyin failed", s));
}
#endif
return (retval);
}
/*
* Routine to get the ioctl argument for ioctl causing controller switchover.
*/
/* ARGSUSED */
static int
{
int retval = 0;
#ifdef _MULTI_DATAMODEL
case DDI_MODEL_ILP32:
{
break;
}
break;
}
case DDI_MODEL_NONE:
}
break;
}
#else /* _MULTI_DATAMODEL */
}
#endif /* _MULTI_DATAMODEL */
#ifdef DEBUG
if (retval) {
"switch_to_cntlr_iocdata copyin failed", s));
}
#endif
return (retval);
}
/*
* Routine to allocate memory for the path information structures.
* It allocates two chunks of memory - one for keeping userland
* keeping allocating kernel memory for path properties. These path
* properties are finally copied to userland.
*/
/* ARGSUSED */
static int
{
int retval = 0;
int index;
/* Allocate memory */
*upibuf = (sv_path_info_t *)
*kpibuf = (sv_path_info_t *)
/*
* Get the path info structure from the user space.
* We are interested in the following fields:
* - user size of buffer for per path properties.
* - user address of buffer for path info properties.
* - user pointer for returning actual buffer size
* Keep these fields in the 'upibuf' structures.
* Allocate buffer for per path info properties in kernel
* structure ('kpibuf').
* Size of these buffers will be equal to the size of buffers
* in the user space.
*/
#ifdef _MULTI_DATAMODEL
case DDI_MODEL_ILP32:
{
break;
}
}
break;
}
case DDI_MODEL_NONE:
}
break;
}
#else /* _MULTI_DATAMODEL */
}
#endif /* _MULTI_DATAMODEL */
if (retval != 0) {
"ioctl <%s> normal: path_info copyin failed", s));
return (retval);
}
/*
* Allocate memory for per path properties.
*/
} else {
}
} else {
}
}
return (0);
}
/*
* Routine to free memory for the path information structures.
* This is the memory which was allocated earlier.
*/
/* ARGSUSED */
static void
{
int index;
/* Free memory for per path properties */
}
}
}
/* Free memory for path info structures */
}
/*
* Routine to copy path information and path properties to userland.
*/
/* ARGSUSED */
static int
{
#ifdef _MULTI_DATAMODEL
case DDI_MODEL_ILP32:
goto copy_32bit;
case DDI_MODEL_NONE:
goto copy_normal;
}
#else /* _MULTI_DATAMODEL */
goto copy_normal;
#endif /* _MULTI_DATAMODEL */
/*
* Copy path information and path properties to user land.
* Pointer fields inside the path property structure were
* saved in the 'upibuf' structure earlier.
*/
break;
}
break;
}
mode)) {
break;
}
break;
}
break;
}
break;
}
}
#ifdef DEBUG
if (retval) {
"normal: path_info copyout failed", s));
}
#endif
return (retval);
/*
* Copy path information and path properties to user land.
* Pointer fields inside the path property structure were
* saved in the 'upibuf' structure earlier.
*/
break;
}
break;
}
break;
}
break;
}
break;
}
break;
}
}
#ifdef DEBUG
if (retval) {
"normal: path_info copyout failed", s));
}
#endif
return (retval);
}
/*
* vhci_failover()
* This routine expects VHCI_HOLD_LUN before being invoked. It can be invoked
* as MDI_FAILOVER_ASYNC or MDI_FAILOVER_SYNC. For Asynchronous failovers
* this routine shall VHCI_RELEASE_LUN on exiting. For synchronous failovers
* it is the callers responsibility to release lun.
*/
/* ARGSUSED */
static int
{
char *guid;
int activation_done = 0;
/*
* Lets maintain a local copy of the vlun->svl_active_pclass
* for the rest of the processing. Accessing the field
* directly in the loop below causes loop logic to break
* especially when the field gets updated by other threads
* update path status etc and causes 'paths are not currently
* available' condition to be declared prematurely.
*/
sizeof (active_pclass_copy));
active_pclass_ptr) != 0) {
}
} else {
/*
* can happen only when the available path to device
* discovered is a STANDBY path.
*/
active_pclass_copy[0] = '\0';
}
"failed, no more pathclasses\n", guid));
goto done;
} else {
}
"device %s (GUID %s): Invalid path-class %s",
goto done;
}
/*
* paths are not currently available
*/
" for device %s (GUID %s)",
goto done;
}
goto next_pathclass;
}
do {
pclass) != 0)) {
"!vhci_failover(5.5)(%s): skipping path "
(void) mdi_prop_free(pclass);
continue;
}
/*
* Issue READ at non-zer block on this STANDBY path.
* Purple returns
* 1. RESERVATION_CONFLICT if reservation is pending
* 2. POR check condition if it reset happened.
* 2. failover Check Conditions if one is already in progress.
*/
reserve_pending = 0;
check_condition = 0;
UA_condition = 0;
if (!bp) {
"vhci_failover !No resources (buf)\n"));
goto done;
}
CDB_GROUP1, sizeof (struct scsi_arq_status), 0,
if (pkt) {
switch (pkt->pkt_reason) {
case CMD_CMPLT:
case STATUS_GOOD:
/* Already failed over */
activation_done = 1;
break;
reserve_pending = 1;
break;
case STATUS_CHECK:
check_condition = 1;
break;
}
}
}
if (check_condition &&
&(((struct scsi_arq_status *)(uintptr_t)
if (skey == KEY_UNIT_ATTENTION &&
asc == 0x29) {
/* Already failed over */
"!vhci_failover(7)(%s): "
"path 0x%p POR UA condition\n",
if (UA_condition == 0) {
UA_condition = 1;
goto check_path_again;
}
} else {
activation_done = 0;
"!vhci_failover(%s): path 0x%p "
"unhandled chkcond %x %x %x\n",
}
}
}
if (activation_done) {
"path 0x%p already failedover\n", guid,
(void *)npip));
break;
}
(void) vhci_recovery_reset(vlun,
}
vlun->svl_fops_ctpriv) == 0) {
activation_done = 1;
"path 0x%p successfully activated\n", guid,
(void *)npip));
break;
}
if (activation_done == 0) {
goto next_pathclass;
}
/*
* if we are here, we have succeeded in activating path npip of
* pathclass pclass2; let us validate all paths of pclass2 by
* "ping"-ing each one and mark the good ones ONLINE
* Also, set the state of the paths belonging to the previously
* active pathclass to STANDBY
*/
"device %s (GUID %s): paths may be busy\n",
goto done;
}
do {
!= MDI_SUCCESS) {
continue;
}
if (pi_state == MDI_PATHINFO_STATE_STANDBY) {
svp = (scsi_vhci_priv_t *)
"!vhci_failover(8)(%s): "
"pinging path 0x%p\n",
"!vhci_failover(9)(%s): "
"path 0x%p ping successful, "
"marked online\n", guid,
(void *)npip));
}
}
== 0)) {
if (pi_state == MDI_PATHINFO_STATE_ONLINE) {
"!vhci_failover(10)(%s): path 0x%p marked "
}
}
(void) mdi_prop_free(pclass);
/*
* Update the AccessState of related MP-API TPGs
*/
"for device %s (GUID %s): failed over from %s to %s",
if (ptr2) {
}
/* All reservations are cleared upon these resets. */
done:
if (flags == MDI_FAILOVER_ASYNC) {
"releasing lun, as failover was ASYNC\n"));
} else {
"NOT releasing lun, as failover was SYNC\n"));
}
return (retval);
}
/*
* vhci_client_attached is called after the successful attach of a
* client devinfo node.
*/
static void
{
int circular;
/*
* At this point the client has attached and it's instance number is
* valid, so we can set up kstats. We need to do this here because it
* is possible for paths to go online prior to client attach, in which
* case the call to vhci_kstat_create_pathinfo in vhci_pathinfo_online
* was a noop.
*/
}
/*
* quiesce all of the online paths
*/
static int
char *guid, char *active_pclass_ptr)
{
int sps;
/* quiesce currently active paths */
return (1);
}
do {
&s_pclass) != MDI_SUCCESS) {
"for device %s (GUID %s) due to an internal "
return (1);
}
/*
* quiesce path. Free s_pclass since
* we don't need it anymore
*/
"!vhci_failover(2)(%s): failing over "
"from %s; quiescing path %p\n",
(void) mdi_prop_free(s_pclass);
svp = (scsi_vhci_priv_t *)
"!vhci_failover(2.5)(%s): no "
"client priv! %p offlined?\n",
continue;
}
== 0) {
(void) vhci_recovery_reset(vlun,
}
"!vhci_failover(3)(%s):"
} else {
"!vhci_failover(3.cv)(%s):"
"quiesced path %p\n", guid,
(void *)npip));
}
}
} else {
/*
* make sure we freeup the memory
*/
(void) mdi_prop_free(s_pclass);
}
return (0);
}
static struct scsi_vhci_lun *
{
return ((struct scsi_vhci_lun *)
}
static struct scsi_vhci_lun *
{
return (svl);
}
*didalloc = 1;
"vhci_lun_lookup_alloc: guid %s vlun 0x%p\n",
return (svl);
}
static void
{
char *guid;
}
}
if (dvlp->svl_fops_name) {
}
}
/*
* vhci_lun_free may be called before the tgt_dip
* initialization so check if the sd is NULL.
*/
}
int
{
int err = 0;
int retry_cnt = 0;
#ifdef DEBUG
if (vhci_debug > 5) {
}
#endif
if (err) {
"!v_s_do_s_c: RELEASE conflict\n"));
return (0);
}
}
if (retry_cnt++ < 6) {
"!v_s_do_s_c:retry packet 0x%p "
"status 0x%x reason %s",
&(((struct scsi_arq_status *)(uintptr_t)
"!v_s_do_s_c:retry "
"packet 0x%p sense data %s", (void *)pkt,
scsi_sname(skey)));
}
goto retry;
}
"!v_s_do_s_c: failed transport 0x%p 0x%x",
return (0);
}
switch (pkt->pkt_reason) {
case CMD_TIMEOUT:
"out (pkt 0x%p)", (void *)pkt));
return (0);
case CMD_CMPLT:
case STATUS_GOOD:
break;
case STATUS_CHECK:
struct scsi_arq_status *)
if ((skey ==
(skey ==
KEY_NOT_READY)) {
/*
* clear unit attn.
*/
VHCI_DEBUG(1,
"!v_s_do_s_c: "
"retry "
"packet 0x%p sense "
"data %s",
(void *)pkt,
(skey)));
goto retry;
}
"!ARQ while "
"transporting "
"(pkt 0x%p)",
(void *)pkt));
return (0);
}
return (0);
default:
"!Bad status returned "
"(pkt 0x%p, status %x)",
return (0);
}
break;
case CMD_INCOMPLETE:
case CMD_RESET:
case CMD_ABORTED:
case CMD_TRAN_ERR:
if (retry_cnt++ < 1) {
"!v_s_do_s_c: retry packet 0x%p %s",
goto retry;
}
/* FALLTHROUGH */
default:
"complete successfully (pkt 0x%p,"
return (0);
}
return (1);
}
static int
{
int circular;
if (pstate != MDI_PATHINFO_STATE_ONLINE) {
continue;
}
TR_CLOCK_TICK) == -1) {
"Quiesce of lun is not successful "
"vlun: 0x%p.", (void *)vlun));
return (0);
}
}
}
return (1);
}
static int
{
/*
* see if there are any other paths available; if none,
* then there is nothing to do.
*/
"%s%d: vhci_pgr_validate_and_register: first path\n",
return (1);
}
/*
* cleared this key. Validate key on some other path first.
* If it fails, return failure.
*/
success = 0;
/* Save the res key */
/*
* Sometimes CDB from application can be a Register_And_Ignore.
* Instead of validation, this cdb would result in force registration.
* Convert it to normal cdb for validation.
* After that be sure to restore the cdb.
*/
do {
"vhci_pgr_validate_and_register: no "
"client priv! 0x%p offlined?\n",
(void *)npip));
goto next_path_1;
}
"vhci_pgr_validate_and_register: same svp 0x%p"
" npip 0x%p vlun 0x%p\n",
goto next_path_1;
}
"vhci_pgr_validate_and_register: First validate on"
" osvp 0x%p being done. vlun 0x%p thread 0x%p Before bcopy"
(void *)vlun));
if (rval == 1) {
"%s%d: vhci_pgr_validate_and_register: key"
success = 1;
break;
} else {
"vhci_pgr_validate_and_register: First validation"
}
/*
* Try other paths
*/
/* Be sure to restore original cdb */
/* Restore the res_key */
/*
* If key could not be registered on any path for the first time,
* return success as online should still continue.
*/
if (success == 0) {
return (1);
}
/*
* Force register on new path
*/
if (rval != 1) {
"vhci_pgr_validate_and_register: register on new"
" path 0x%p svp 0x%p failed %x\n",
return (0);
}
"vhci_pgr_validate_and_register: zero service key\n"));
return (rval);
}
/*
* While the key was force registered, some other host might have
* cleared the key. Re-validate key on another pre-existing path
* before declaring success.
*/
/*
* Sometimes CDB from application can be Register and Ignore.
* Instead of validation, it would result in force registration.
* Convert it to normal cdb for validation.
* After that be sure to restore the cdb.
*/
success = 0;
do {
osvp = (scsi_vhci_priv_t *)
"vhci_pgr_validate_and_register: no "
"client priv! 0x%p offlined?\n",
(void *)npip));
goto next_path_2;
}
"vhci_pgr_validate_and_register: same osvp 0x%p"
" npip 0x%p vlun 0x%p\n",
goto next_path_2;
}
"vhci_pgr_validate_and_register: Re-validation on"
" osvp 0x%p being done. vlun 0x%p Before bcopy cdb1 %x\n",
if (rval == 1) {
"%s%d: vhci_pgr_validate_and_register: key"
success = 1;
break;
} else {
"vhci_pgr_validate_and_register: Re-validation on"
"v_pgr_val_reg: reval failed: ");
}
/*
* Try other paths
*/
/* Be sure to restore original cdb */
if (success == 1) {
/* Successfully validated registration */
return (1);
}
/*
* key invalid, back out by registering key value of 0
*/
"vhci_pgr_validate_and_register: backout on"
" svp 0x%p being done\n", (void *)svp));
/*
* Get a new path
*/
"%s%d: vhci_pgr_validate_and_register: no valid pip\n",
return (0);
}
"vhci_pgr_validate_and_register: backout on"
" svp 0x%p failed\n", (void *)svp));
"%s%d: vhci_pgr_validate_and_register: key"
ddi_get_instance(cdip)));
if (rval == VHCI_PGR_ILLEGALOP) {
"%s%d: vhci_pgr_validate_and_register: key"
ddi_get_instance(cdip)));
rval = 1;
} else
rval = 0;
} else {
"%s%d: vhci_pgr_validate_and_register: key"
" validation failed, key backed out\n",
}
return (rval);
}
/*
* taskq routine to dispatch a scsi cmd to vhci_scsi_start. This ensures
* that vhci_scsi_start is not called in interrupt context.
* As the upper layer gets TRAN_ACCEPT when the command is dispatched, we
* need to complete the command if something goes wrong.
*/
static void
{
" scsi-2 reserve for 0x%p\n",
/*
* To prevent the taskq from being called recursively we set the
* the VHCI_PKT_THRU_TASKQ bit in the vhci_pkt_states.
*/
/*
* Wait for the transport to get ready to send packets
* and if it times out, it will return something other than
* TRAN_BUSY. The vhci_reserve_delay may want to
* get tuned for other transports and is therefore a global.
* Using delay since this routine is called by taskq dispatch
* and not called during interrupt context.
*/
}
switch (rval) {
case TRAN_ACCEPT:
return;
default:
/*
* This pkt shall be retried, and to ensure another taskq
* is dispatched for it, clear the VHCI_PKT_THRU_TASKQ
* flag.
*/
/* Ensure that the pkt is retried without a reset */
"TRAN_rval %d returned for dip 0x%p", rval,
break;
}
/*
* vpkt_org_vpkt should always be NULL here if the retry command
* has been successfully dispatched. If vpkt_org_vpkt != NULL at
* this point, it is an error so restore the original vpkt and
* return an error to the target driver so it can retry the
* command as appropriate.
*/
}
}
static void
{
int held;
/*
* Perform a final check to see if the active path class is indeed
* not the preferred path class. As in the time the auto failback
* was dispatched, an external failover could have been detected.
* [Some other host could have detected this condition and triggered
* the auto failback before].
* In such a case if we go ahead with failover we will be negating the
* whole purpose of auto failback.
*/
char *best_pclass;
"auto failback for %s as %s pathclass already "
return;
}
}
== MDI_SUCCESS) {
"succeeded for device %s (GUID %s)",
} else {
"failed for device %s (GUID %s)",
}
}
#ifdef DEBUG
static void
{
}
#endif
static void
{
int i;
for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++)
for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++)
for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++)
for (i = 0; i < MHIOC_RESV_KEY_SIZE; i++)
prout->active_service_key[i]);
/* Printing all in one go. Otherwise it will jumble up */
"res_key: : %s\n"
"service_key : %s\n"
"active_res_key : %s\n"
"active_service_key: %s\n",
}
/*
* Called from vhci_scsi_start to update the pHCI pkt with target packet.
*/
static void
{
/*
* Polled Command is requested or HBA is in
* suspended state
*/
} else {
}
/* Re-initialize the following pHCI packet state information */
}
static int
{
/*
* Generic processing in MPxIO framework
*/
switch (ret) {
case MDI_SUCCESS:
ret = DDI_SUCCESS;
break;
case MDI_FAILURE:
ret = DDI_FAILURE;
break;
default:
break;
}
return (ret);
}
static int
{
int sps;
int mps_flag;
int rval = 0;
if (pip) {
/*
* If the call is from vhci_pathinfo_state_change,
* then this path was busy and is becoming ready to accept IO.
*/
"!vhci_pHCI_cap: only on path %p, ap %p, rval %x\n",
return (rval);
}
/*
* Set capability on all the pHCIs.
* If any path is busy, then the capability would be set by
* vhci_pathinfo_state_change.
*/
"!vhci_pHCI_cap: Unable to get a path, dip 0x%p",
(void *)cdip));
return (0);
}
"priv is NULL, pip 0x%p", (void *)pip));
return (rval);
}
"psd is NULL, pip 0x%p, svp 0x%p",
return (rval);
}
"!vhci_pHCI_cap: path %p, ap %p, rval %x\n",
/*
* Select next path and issue the setcap, repeat
* until all paths are exhausted
*/
return (1);
}
goto again;
}
return (rval);
}
static int
{
char *guid;
flags |= NDI_DEVI_DEBUG;
else
== MDI_SUCCESS)
return (NDI_SUCCESS);
else
return (NDI_FAILURE);
}
static int
void *arg)
{
flags |= NDI_DEVI_DEBUG;
}
/*
* Take the original vhci_pkt, create a duplicate of the pkt for resending
* as though it originated in ssd.
*/
static struct scsi_pkt *
{
/*
* Ensure consistent data at completion time by setting PKT_CONSISTENT
*/
pkt->pkt_statistics = 0;
pkt->pkt_reason = 0;
/*
* Save a pointer to the original vhci_pkt
*/
}
return (pkt);
}
/*
* Copy the successful completion information from the hba packet into
* the original target pkt from the upper layer. Returns the original
* vpkt and destroys the new vpkt from the internal retry.
*/
static struct vhci_pkt *
{
"completed successfully!\n"));
/*
* Copy the good status into the target driver's packet
*/
/*
* Destroy the internally created vpkt for the retry
*/
vpkt->vpkt_tgt_pkt);
return (ret_vpkt);
}
/* restart the request sense request */
static void
{
"vhci_uscsi_restart_sense: enter: rqpkt: %p", (void *)rqpkt));
/* if it fails - need to wakeup the original command */
}
}
/*
* auto-rqsense is not enabled so we have to retrieve the request sense
* manually.
*/
static int
{
int rval = 0;
"vhci_uscsi_send_sense: enter: bp: %p pkt: %p scmd: %p",
/* set up the packet information and cdb */
return (-1);
}
return (-1);
}
SCMD_REQUEST_SENSE, 0, SENSE_LENGTH, 0);
/*
* NOTE: This code path is related to MPAPI uscsi(7I), so path
* selection is not based on path_instance.
*/
rqpkt->pkt_path_instance = 0;
/* get her done */
switch (scsi_transport(rqpkt)) {
case TRAN_ACCEPT:
"transport accepted."));
break;
case TRAN_BUSY:
"transport busy, setting timeout."));
break;
default:
"transport failed"));
rval = -1;
}
return (rval);
}
/*
* done routine for the mpapi uscsi command - this is behaving as though
* FLAG_DIAGNOSE is set meaning there are no retries except for a manual
* request sense.
*/
void
{
int err;
"vhci_uscsi_iodone: enter: bp: %p pkt: %p scmd: %p",
/* Save the status and the residual into the uscsi_cmd struct */
/* return on a very successful command */
return;
}
" pkt_resid=%ld pkt_state: 0x%x b_count: %ld b_resid: %ld",
/*
* The command did not complete.
*/
"vhci_uscsi_iodone: command did not complete."
}
/*
* The auto-rqsense happened, and the packet has a filled-in
* scsi_arq_status structure, pointed to by pkt_scbp.
*/
"vhci_uscsi_iodone: received auto-requested sense"));
/* get the amount of data to copy into rqbuf */
*((char *)&arqstat->sts_rqpkt_status);
rqlen != 0) {
}
"vhci_uscsi_iodone: ARQ "
"uscsi_rqstatus=0x%x uscsi_rqresid=%d rqlen: %d "
"xfer: %d rqpkt_resid: %d\n",
}
/* a manual request sense was done - get the information */
/* get the amount of data to copy into rqbuf */
rqlen);
}
}
"uscsi_rqstatus=0x%x uscsi_rqresid=%d\n",
} else {
/*
* Command completed and we're not getting sense. Check for
* errors and decide what to do next.
*/
"vhci_uscsi_iodone: command appears complete: reason: %x",
pkt->pkt_reason));
/* need to manually get the request sense */
return;
}
} else {
"vhci_chk_err: appears complete"));
err = 0;
}
}
}
if (err) {
}
}
/*
* start routine for the mpapi uscsi command
*/
int
{
int retry = 0;
} else {
stat_size = 1;
}
"vhci_uscsi_iostart: rval: EINVAL"));
return (EINVAL);
}
}
"vhci_uscsi_iostart: ap: %p pkt: %p pcdbp: %p uscmdp: %p"
" ucdbp: %p pcdblen: %d bp: %p count: %ld pip: %p"
" stat_size: %d",
/*
* NOTE: This code path is related to MPAPI uscsi(7I), so path
* selection is not based on path_instance.
*/
pkt->pkt_path_instance = 0;
retry++;
}
if (retry >= vhci_uscsi_retry_count) {
"vhci_uscsi_iostart: tran_busy - retry: %d", retry));
}
switch (rval) {
case TRAN_ACCEPT:
rval = 0;
break;
default:
"vhci_uscsi_iostart: rval: %d count: %ld res: %ld",
break;
}
"vhci_uscsi_iostart: exit: rval: %d", rval));
return (rval);
}
/* ARGSUSED */
static struct scsi_failover_ops *
{
char *sfo_name;
char *override;
"!vhci_dev_fo:return NULL no scsi_device or inquiry"));
return (NULL);
}
/*
* Determine if device is supported under scsi_vhci, and select
* failover module.
*
* See if there is a scsi_vhci.conf file override for this devices's
*
* NULL If the NULL is returned then there is no scsi_vhci.conf
* override. For NULL, we determine the failover_ops for
* this device by checking the sfo_device_probe entry
* point for each 'fops' module, in order.
*
* NOTE: Correct operation may depend on module ordering
* of 'specific' (failover modules that are completely
* that based on T10 standards like TPGS). Currently,
* the value of 'ddi-forceload' in scsi_vhci.conf is used
* to establish the module list and probe order.
*
* "NONE" If value "NONE" is returned then there is a
* scsi_vhci.conf VID/PID override to indicate the device
* should not be supported under scsi_vhci (even if there
* is an 'fops' module supporting the device).
*
* "<other>" If another value is returned then that value is the
* name of the 'fops' module that should be used.
*/
/* NULL: default: select based on sfo_device_probe results */
continue;
/* found failover module, supported under scsi_vhci */
KM_SLEEP);
}
break;
}
/* !"NONE": select based on driver.conf specified name */
continue;
/*
* NOTE: If sfo_device_probe() has side-effects,
* including setting *ctprivp, these are not going
* to occur with override config.
*/
/* found failover module, supported under scsi_vhci */
}
break;
}
}
if (override)
return (sfo);
}
/*
* Determine the device described by cinfo should be enumerated under
* the vHCI or the pHCI - if there is a failover ops then device is
* supported under vHCI. By agreement with SCSA cinfo is a pointer
* to a scsi_device structure associated with a decorated pHCI probe node.
*/
/* ARGSUSED */
int
{
}
#ifdef DEBUG
extern struct scsi_key_strings scsi_cmds[];
static char *
{
char *cpnt;
/* tmp goes out of scope on return and caller sees garbage */
cpnt = "Unknown Command";
}
return (cpnt);
}
extern uchar_t scsi_cdb_size[];
static void
{
return;
}
}
static void
int len)
{
int i;
int c;
char *format;
for (i = 0; i < len; ) {
buf[0] = 0;
for (c = 0; c < 8 && i < len; c++, i++) {
if (byte < 0x10)
format = "0x0%x ";
else
format = "0x%x ";
}
}
}
#endif
static void
{
char *svl_wwn;
return;
} else {
}
"vhci_invalidate_mpapi_lu: "
"Invalidated LU(%s)", svl_wwn));
return;
}
}
"Could not find LU(%s) to invalidate.", svl_wwn));
}