stmf.c revision af40a12383722896f102f17fd227ed8a0de0bd15
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include <sys/byteorder.h>
#include <sys/ethernet.h>
#include <stmf.h>
#include <lpif.h>
#include <portif.h>
#include <stmf_ioctl.h>
#include <stmf_impl.h>
#include <lun_map.h>
#include <stmf_state.h>
#include <pppt_ic_if.h>
static uint64_t stmf_session_counter = 0;
static uint16_t stmf_rtpid_counter = 0;
/* start messages at 1 */
#define MSG_ID_TM_BIT 0x8000000000000000
void **result);
char *info);
void stmf_svc_init();
void stmf_check_freetask();
int target_reset);
void stmf_delete_all_ppds();
void stmf_trace_clear();
void stmf_worker_init();
void stmf_worker_mgmt();
void stmf_worker_task(void *arg);
/* pppt modhandle */
/* pppt modload imported functions */
static void stmf_update_kstat_lu_q(scsi_task_t *, void());
static void stmf_update_kstat_lport_q(scsi_task_t *, void());
extern struct mod_ops mod_driverops;
/* =====[ Tunables ]===== */
/* Internal tracing */
volatile int stmf_trace_on = 1;
/*
* The reason default task timeout is 75 is because we want the
* host to timeout 1st and mostly host timeout is 60 seconds.
*/
volatile int stmf_default_task_timeout = 75;
/*
* Setting this to one means, you are responsible for config load and keeping
* things in sync with persistent database.
*/
volatile int stmf_allow_modunload = 0;
volatile int stmf_max_nworkers = 256;
volatile int stmf_min_nworkers = 4;
volatile int stmf_worker_scale_down_delay = 20;
/* === [ Debugging and fault injection ] === */
#ifdef DEBUG
volatile int stmf_drop_task_counter = 0;
volatile int stmf_drop_buf_counter = 0;
#endif
static uint8_t stmf_first_zero[] =
{ 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 0xff };
static uint8_t stmf_first_one[] =
{ 0xff, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0 };
static kmutex_t trace_buf_lock;
static int trace_buf_size;
static int trace_buf_curndx;
static enum {
static int stmf_i_max_nworkers;
static int stmf_i_min_nworkers;
static int stmf_nworkers_cur; /* # of workers currently running */
static int stmf_nworkers_needed; /* # of workers need to be running */
static int stmf_worker_sel_counter = 0;
static uint32_t stmf_cur_ntasks = 0;
static clock_t stmf_wm_last = 0;
/*
* This is equal to stmf_nworkers_cur while we are increasing # workers and
* stmf_nworkers_needed while we are decreasing the worker count.
*/
static int stmf_nworkers_accepting_cmds;
static clock_t stmf_worker_scale_down_timer = 0;
static int stmf_worker_scale_down_qd = 0;
static struct cb_ops stmf_cb_ops = {
stmf_open, /* open */
stmf_close, /* close */
nodev, /* strategy */
nodev, /* print */
nodev, /* dump */
nodev, /* read */
nodev, /* write */
stmf_ioctl, /* ioctl */
nodev, /* devmap */
nodev, /* mmap */
nodev, /* segmap */
nochpoll, /* chpoll */
ddi_prop_op, /* cb_prop_op */
0, /* streamtab */
CB_REV, /* rev */
nodev, /* aread */
nodev /* awrite */
};
0,
nulldev, /* identify */
nulldev, /* probe */
nodev, /* reset */
NULL, /* bus_ops */
NULL /* power */
};
#define STMF_NAME "COMSTAR STMF"
#define STMF_MODULE_NAME "stmf"
};
static struct modlinkage modlinkage = {
&modldrv,
};
int
_init(void)
{
int ret;
if (ret)
return (ret);
trace_buf_curndx = 0;
/* STMF service is off by default */
return (ret);
}
int
_fini(void)
{
int ret;
return (EBUSY);
if ((!stmf_allow_modunload) &&
return (EBUSY);
}
return (EBUSY);
}
if (stmf_dlun_fini() != STMF_SUCCESS)
return (EBUSY);
if (stmf_worker_fini() != STMF_SUCCESS) {
return (EBUSY);
}
if (stmf_svc_fini() != STMF_SUCCESS) {
return (EBUSY);
}
if (ret) {
return (ret);
}
return (ret);
}
int
{
}
/* ARGSUSED */
static int
{
switch (cmd) {
case DDI_INFO_DEVT2DEVINFO:
break;
case DDI_INFO_DEVT2INSTANCE:
*result =
break;
default:
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
static int
{
switch (cmd) {
case DDI_ATTACH:
DDI_NT_STMF, 0) != DDI_SUCCESS) {
break;
}
return (DDI_SUCCESS);
}
return (DDI_FAILURE);
}
static int
{
switch (cmd) {
case DDI_DETACH:
ddi_remove_minor_node(dip, 0);
return (DDI_SUCCESS);
}
return (DDI_FAILURE);
}
/* ARGSUSED */
static int
{
if (stmf_state.stmf_exclusive_open) {
return (EBUSY);
}
if (stmf_state.stmf_opened) {
return (EBUSY);
}
}
return (0);
}
/* ARGSUSED */
static int
{
stmf_state.stmf_opened = 0;
if (stmf_state.stmf_exclusive_open &&
}
return (0);
}
int
{
int ret;
if (ret)
return (EFAULT);
goto copyin_iocdata_done;
}
if ((*iocd)->stmf_ibuf_size) {
}
if ((*iocd)->stmf_obuf_size)
if (ret == 0)
return (0);
if (*obuf) {
}
if (*ibuf) {
}
return (ret);
}
int
{
int ret;
if (iocd->stmf_obuf_size) {
if (ret)
return (EFAULT);
}
if (ret)
return (EFAULT);
return (0);
}
/* ARGSUSED */
static int
{
int ret = 0;
uint32_t n;
int i;
return (ENOTTY);
}
return (EPERM);
}
if (ret)
return (ret);
iocd->stmf_error = 0;
switch (cmd) {
case STMF_IOCTL_LU_LIST:
/* retrieves both registered/unregistered */
for (i = 0; i < n; i++) {
}
if (i < n) {
sizeof (slist_lu_t));
i++;
}
}
}
iocd->stmf_obuf_nentries = i;
break;
case STMF_IOCTL_REG_LU_LIST:
iocd->stmf_obuf_nentries = n;
for (i = 0; i < n; i++) {
}
break;
case STMF_IOCTL_VE_LU_LIST:
iocd->stmf_obuf_nentries = n;
for (i = 0; i < n; i++) {
}
break;
iocd->stmf_obuf_nentries = n;
for (i = 0; i < n; i++) {
}
break;
case STMF_IOCTL_SESSION_LIST:
break;
}
ilport->ilport_next) {
break;
}
}
break;
}
iocd->stmf_obuf_nentries = n;
for (i = 0; i < n; i++) {
} else {
}
}
break;
(p_id[0] == 0)) {
break;
}
break;
}
break;
}
} else {
}
break;
(iocd->stmf_obuf_size <
sizeof (sioc_target_port_props_t))) {
break;
}
break;
}
break;
}
} else {
}
break;
break;
}
break;
break;
}
break;
break;
}
break;
break;
}
break;
case STMF_IOCTL_SET_LU_STATE:
break;
}
if (stmf_state.stmf_inventory_locked) {
break;
}
break;
}
break;
}
if (ctl_ret == STMF_ALREADY)
ret = 0;
else if (ctl_ret != STMF_SUCCESS)
break;
break;
}
if (stmf_state.stmf_inventory_locked) {
break;
}
break;
}
}
break;
}
if (ctl_ret == STMF_ALREADY)
ret = 0;
else if (ctl_ret != STMF_SUCCESS)
break;
case STMF_IOCTL_ADD_HG_ENTRY:
/* FALLTHROUGH */
case STMF_IOCTL_ADD_TG_ENTRY:
break;
}
if (cmd == STMF_IOCTL_ADD_TG_ENTRY) {
}
break;
}
break; /* not allowed */
}
grp_entry->ident + 4,
grp_entry->ident[3],
&iocd->stmf_error);
break;
/* FALLTHROUGH */
break;
}
if (cmd == STMF_IOCTL_REMOVE_TG_ENTRY) {
}
break;
}
break; /* not allowed */
}
grp_entry->ident + 4,
grp_entry->ident[3],
&iocd->stmf_error);
break;
/* FALLTHROUGH */
break;
}
if (cmd == STMF_IOCTL_CREATE_TARGET_GROUP)
break;
}
break; /* not allowed */
}
break;
/* FALLTHROUGH */
break;
}
if (cmd == STMF_IOCTL_REMOVE_TARGET_GROUP)
break;
}
break; /* not allowed */
}
break;
case STMF_IOCTL_VALIDATE_VIEW:
break;
}
break;
}
if (!ve->ve_lu_number_valid)
if (ve->ve_all_hosts) {
}
if (ve->ve_all_targets) {
}
if (ve->ve_ndx_valid)
else
veid = 0xffffffff;
if (cmd == STMF_IOCTL_ADD_VIEW_ENTRY) {
&veid,
&iocd->stmf_error);
} else { /* STMF_IOCTL_VALIDATE_VIEW */
&iocd->stmf_error);
}
if (ret == 0 &&
if (!ve->ve_ndx_valid) {
}
if (!ve->ve_lu_number_valid) {
}
}
break;
break;
}
break;
}
if (!ve->ve_ndx_valid) {
break;
}
&iocd->stmf_error);
break;
case STMF_IOCTL_GET_HG_LIST:
/* FALLTHROUGH */
case STMF_IOCTL_GET_TG_LIST:
if (cmd == STMF_IOCTL_GET_TG_LIST)
iocd->stmf_obuf_nentries = n;
for (i = 0; i < n; i++) {
if (iocd->stmf_obuf_nentries > 0) {
}
continue;
}
grpname++;
}
break;
/* FALLTHROUGH */
break;
}
if (cmd == STMF_IOCTL_GET_TG_ENTRIES) {
}
if (!id_entry)
else {
iocd->stmf_obuf_nentries = n;
for (i = 0; i < n; i++) {
grp_entry++;
}
}
break;
case STMF_IOCTL_GET_VE_LIST:
for (view_entry = (stmf_view_entry_t *)
if (iocd->stmf_obuf_nentries >= n)
continue;
} else {
}
} else {
}
ve++;
}
}
break;
case STMF_IOCTL_LU_VE_LIST:
break;
}
continue;
for (view_entry = (stmf_view_entry_t *)
if (iocd->stmf_obuf_nentries >= n)
continue;
} else {
}
} else {
}
ve++;
}
break;
}
break;
case STMF_IOCTL_LOAD_PP_DATA:
break;
}
break;
}
/* returned token */
break;
}
break;
case STMF_IOCTL_GET_PP_DATA:
break;
}
break;
}
break;
}
break;
case STMF_IOCTL_CLEAR_PP_DATA:
break;
}
break;
}
break;
case STMF_IOCTL_CLEAR_TRACE:
break;
case STMF_IOCTL_ADD_TRACE:
}
break;
*((int *)obuf) = trace_buf_curndx;
} else {
}
break;
case STMF_IOCTL_GET_TRACE:
break;
}
i = *((int *)ibuf);
trace_buf_size)) {
break;
}
break;
default:
}
if (ret == 0) {
} else if (iocd->stmf_error) {
}
if (obuf) {
}
if (ibuf) {
}
return (ret);
}
static int
{
int online = 0;
int offline = 0;
int onlining = 0;
int offlining = 0;
offline++;
online++;
onlining++;
offlining++;
}
offline++;
online++;
onlining++;
offlining++;
}
if (stmf_state.stmf_service_running) {
if (onlining)
return (STMF_STATE_ONLINING);
else
return (STMF_STATE_ONLINE);
}
if (offlining) {
return (STMF_STATE_OFFLINING);
}
return (STMF_STATE_OFFLINE);
}
static int
{
int svc_state;
if (!stmf_state.stmf_exclusive_open) {
return (EACCES);
}
if (stmf_state.stmf_inventory_locked) {
return (EBUSY);
}
return (EINVAL);
}
if ((svc_state == STMF_STATE_OFFLINING) ||
(svc_state == STMF_STATE_ONLINING)) {
return (EBUSY);
}
if (svc_state == STMF_STATE_OFFLINE) {
return (EINVAL);
}
return (0);
}
return (EINVAL);
}
}
return (0);
}
return (EINVAL);
}
continue;
(void) stmf_ctl(STMF_CMD_LPORT_ONLINE,
}
continue;
}
return (0);
}
/* svc_state is STMF_STATE_ONLINE here */
return (EACCES);
}
continue;
(void) stmf_ctl(STMF_CMD_LPORT_OFFLINE,
}
continue;
}
return (0);
}
static int
{
return (0);
}
/*
* handles registration message from pppt for a logical unit
*/
{
return (STMF_SUCCESS);
}
}
return (STMF_SUCCESS);
}
/*
* handles de-registration message from pppt for a logical unit
*/
{
return (STMF_SUCCESS);
}
}
return (STMF_SUCCESS);
}
/*
* helper function to find a task that matches a task_msgid
*/
{
break;
}
}
return (NULL);
}
continue;
}
break;
}
}
return (itask->itask_task);
} else {
/* task not found. Likely already aborted. */
return (NULL);
}
}
/*
*/
{
return (STMF_FAILURE);
}
switch (msg->icm_msg_type) {
case STMF_ICM_REGISTER_LUN:
(void) stmf_ic_lu_reg(
break;
case STMF_ICM_LUN_ACTIVE:
(void) stmf_ic_lu_reg(
break;
case STMF_ICM_DEREGISTER_LUN:
(void) stmf_ic_lu_dereg(
break;
case STMF_ICM_SCSI_DATA:
(void) stmf_ic_rx_scsi_data(
break;
case STMF_ICM_SCSI_STATUS:
(void) stmf_ic_rx_scsi_status(
break;
case STMF_ICM_STATUS:
(void) stmf_ic_rx_status(
break;
default:
msg->icm_msg_type);
return (STMF_FAILURE);
}
return (STMF_SUCCESS);
}
{
/* for now, ignore other message status */
return (STMF_SUCCESS);
}
return (STMF_SUCCESS);
}
break;
}
}
return (STMF_SUCCESS);
}
/*
* handles scsi status message from pppt
*/
{
/* is this a task management command */
return (STMF_SUCCESS);
}
return (STMF_SUCCESS);
}
return (STMF_SUCCESS);
}
/*
* handles scsi data message from pppt
*/
{
/* is this a task management command */
return (STMF_SUCCESS);
}
static uint64_t data_msg_id;
/*
* send xfer done status to pppt
* for now, set the session id to 0 as we cannot
* ascertain it since we cannot find the task
*/
if (ic_xfer_done_msg) {
if (ic_ret != STMF_IC_MSG_SUCCESS) {
}
}
return (STMF_FAILURE);
}
if (task->task_additional_flags &
}
return (STMF_FAILURE);
}
return (STMF_FAILURE);
}
return (STMF_SUCCESS);
}
{
return (STMF_FAILURE);
}
if (ilport->ilport_proxy_registered == 0) {
return (STMF_FAILURE);
}
/*
* stmf will now take over the task handling for this task
* but it still needs to be treated differently from other
* default handled tasks, hence the ITASK_PROXY_TASK.
* If this is a task management function, we're really just
* duping the command to the peer. Set the TM bit so that
* we can recognize this on return since we won't be completing
* the proxied task in that case.
*/
if (task->task_mgmt_function) {
} else {
}
if (dbuf) {
} else {
}
if (ic_cmd_msg) {
if (ic_ret == STMF_IC_MSG_SUCCESS) {
ret = STMF_SUCCESS;
}
}
return (ret);
}
{
int error;
return (STMF_FAILURE);
}
"Unable to find symbol - stmf_ic_reg_port_msg_alloc");
return (STMF_FAILURE);
}
"Unable to find symbol - stmf_ic_dereg_port_msg_alloc");
return (STMF_FAILURE);
}
"Unable to find symbol - stmf_ic_reg_lun_msg_alloc");
return (STMF_FAILURE);
}
"Unable to find symbol - stmf_ic_lun_active_msg_alloc");
return (STMF_FAILURE);
}
"Unable to find symbol - stmf_ic_dereg_lun_msg_alloc");
return (STMF_FAILURE);
}
"Unable to find symbol - stmf_ic_scsi_cmd_msg_alloc");
return (STMF_FAILURE);
}
if (ic_scsi_data_xfer_done_msg_alloc == NULL &&
"Unable to find symbol -"
"stmf_ic_scsi_data_xfer_done_msg_alloc");
return (STMF_FAILURE);
}
if (ic_session_reg_msg_alloc == NULL &&
"Unable to find symbol -"
"stmf_ic_session_create_msg_alloc");
return (STMF_FAILURE);
}
if (ic_session_dereg_msg_alloc == NULL &&
"Unable to find symbol -"
"stmf_ic_session_destroy_msg_alloc");
return (STMF_FAILURE);
}
return (STMF_FAILURE);
}
return (STMF_FAILURE);
}
return (STMF_SUCCESS);
}
static void
{
}
static int
{
int ret = 0;
return (EINVAL);
}
if (pppt_modload() == STMF_FAILURE) {
goto err;
}
if (alua_state->alua_node != 0) {
/* reset existing rtpids to new base */
stmf_rtpid_counter = 255;
}
/* register existing local ports with ppp */
/* skip standby ports */
continue;
}
if (alua_state->alua_node != 0) {
}
0, NULL, stmf_proxy_msg_id);
if (ic_reg_port) {
if (ic_ret == STMF_IC_MSG_SUCCESS) {
} else {
"error on port registration "
"port - %s",
}
}
}
/* register existing logical units */
continue;
}
/* register with proxy module */
/* allocate the register message */
/* send the message */
if (ic_reg_lun) {
if (ic_ret == STMF_IC_MSG_SUCCESS) {
}
}
}
}
} else {
}
err:
return (ret);
}
typedef struct {
void *bp; /* back pointer from internal struct to main struct */
int alloc_size;
} __istmf_t;
typedef struct {
void *cp; /* Caller private */
void *ss; /* struct specific */
} __stmf_t;
static struct {
int shared;
int fw_private;
} stmf_sizes[] = { { 0, 0 },
};
void *
{
int stmf_size;
int kmem_flag;
return (NULL);
} else {
}
return (NULL);
/*
* In principle, the implementation inside stmf_alloc should not
* be changed anyway. But the original order of framework private
* data and caller private data does not support sglist in the caller
* private data.
* To work around this, the memory segments of framework private
* data and caller private data are re-ordered here.
* A better solution is to provide a specific interface to allocate
* the sglist, then we will not need this workaround any more.
* But before the new interface is available, the memory segment
* ordering should be kept as is.
*/
/* Just store the total size instead of storing additional size */
return (sh);
}
void
{
/*
* So far we dont need any struct specific processing. If such
* a need ever arises, then store the struct id in the framework
* private section and get it here as sh->fp->struct_id.
*/
}
/*
* Given a pointer to stmf_lu_t, verifies if this lu is registered with the
* framework and returns a pointer to framework private data for the lu.
* Returns NULL if the lu was not found.
*/
{
return (ilu);
}
return (NULL);
}
/*
* Given a pointer to stmf_local_port_t, verifies if this lport is registered
* with the framework and returns a pointer to framework private data for
* the lport.
* Returns NULL if the lport was not found.
*/
{
return (ilport);
}
return (NULL);
}
{
return (STMF_FAILURE);
/* See if we need to do a callback */
break;
}
}
goto rlp_bail_out;
}
goto rlp_bail_out;
ilp->ilp_cb_in_progress = 0;
return (STMF_SUCCESS);
}
{
return (STMF_BUSY);
}
}
return (STMF_SUCCESS);
}
}
return (STMF_NOT_FOUND);
}
{
return (STMF_FAILURE);
/* See if we need to do a callback */
break;
}
}
goto rpp_bail_out;
}
goto rpp_bail_out;
ipp->ipp_cb_in_progress = 0;
return (STMF_SUCCESS);
}
{
return (STMF_BUSY);
}
}
return (STMF_SUCCESS);
}
}
return (STMF_NOT_FOUND);
}
int
{
int s;
int ret;
*err_ret = 0;
return (EINVAL);
}
if (ppi->ppi_lu_provider) {
if (!ppd->ppd_lu_provider)
continue;
} else if (ppi->ppi_port_provider) {
if (!ppd->ppd_port_provider)
continue;
}
break;
}
/* New provider */
if (s > 254) {
return (EINVAL);
}
s += sizeof (stmf_pp_data_t) - 7;
return (ENOMEM);
}
ppd->ppd_alloc_size = s;
/* See if this provider already exists */
if (ppi->ppi_lu_provider) {
break;
}
}
} else {
break;
}
}
}
/* Link this ppd in */
}
/*
* User is requesting that the token be checked.
* If there was another set after the user's get
* it's an error
*/
if (ppi->ppi_token_valid) {
return (EINVAL);
}
}
return (ret);
}
/* Free any existing lists and add this one to the ppd */
/* set the token for writes */
/* return token to caller */
if (ppi_token) {
}
/* If there is a provider registered, do the notifications */
if (ppd->ppd_provider) {
if (ppi->ppi_lu_provider) {
goto bail_out;
ilp->ilp_cb_in_progress = 0;
} else {
goto bail_out;
ipp->ipp_cb_in_progress = 0;
}
}
return (0);
}
void
{
if (ppd->ppd_provider) {
if (ppd->ppd_lu_provider) {
((stmf_i_lu_provider_t *)
} else {
}
}
break;
}
return;
}
int
{
return (EINVAL);
}
if (ppi->ppi_lu_provider) {
if (!ppd->ppd_lu_provider)
continue;
} else if (ppi->ppi_port_provider) {
if (!ppd->ppd_port_provider)
continue;
}
break;
}
if (ppd) {
ret = 0;
}
return (ret);
}
int
{
return (EINVAL);
}
if (ppi->ppi_lu_provider) {
if (!ppd->ppd_lu_provider)
continue;
} else if (ppi->ppi_port_provider) {
if (!ppd->ppd_port_provider)
continue;
}
break;
}
NV_ENCODE_XDR)) != 0) {
goto done;
}
goto done;
}
NV_ENCODE_XDR, 0)) != 0) {
goto done;
}
ret = 0;
}
done:
return (ret);
}
void
{
}
}
/*
* 16 is the max string length of a protocol_ident, increase
* the size if needed.
*/
typedef struct stmf_kstat_lu_info {
typedef struct stmf_kstat_tgt_info {
/*
* This array matches the Protocol Identifier in stmf_ioctl.h
*/
char *protocol_ident[PROTOCOL_ANY] = {
"Fibre Channel",
"Parallel SCSI",
"SSA",
"IEEE_1394",
"SRP",
"iSCSI",
"SAS",
"ADT",
"ATAPI",
"UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN"
};
/*
*/
static void
{
return;
}
}
}
/*
*/
static void
{
}
}
}
static void
{
}
}
}
static void
{
}
}
}
static void
{
char ks_nm[KSTAT_STRLEN];
/* create kstat lun info */
return;
}
sizeof (stmf_kstat_lu_info_t) / sizeof (kstat_named_t),
KSTAT_FLAG_VIRTUAL)) == NULL) {
return;
}
/* convert guid to hex string */
int i;
for (i = 0; i < STMF_GUID_INPUT / 2; i++) {
}
(const char *)ilu->ilu_ascii_hex_guid);
/* create kstat lun io */
return;
}
}
static void
{
char ks_nm[KSTAT_STRLEN];
/* create kstat lport info */
return;
}
sizeof (stmf_kstat_tgt_info_t) / sizeof (kstat_named_t),
KSTAT_FLAG_VIRTUAL)) == NULL) {
return;
}
/* ident might not be null terminated */
(const char *)ilport->ilport_kstat_tgt_name);
/* protocol */
id = PROTOCOL_ANY;
}
/* create kstat lport io */
return;
}
}
/*
* set the asymmetric access state for a logical unit
* caller is responsible for establishing SCSI unit attention on
* state change
*/
{
if ((access_state != STMF_LU_STANDBY) &&
(access_state != STMF_LU_ACTIVE)) {
return (STMF_INVALID_ARG);
}
if (stmf_state.stmf_inventory_locked) {
return (STMF_BUSY);
}
break;
}
}
if (!ilu) {
} else {
/*
* We're changing access state on an existing logical unit
* Send the proxy registration message for this logical unit
* if we're in alua mode.
* If the requested state is STMF_LU_ACTIVE, we want to register
* this logical unit.
* If the requested state is STMF_LU_STANDBY, we're going to
* abort all tasks for this logical unit.
*/
access_state == STMF_LU_ACTIVE) {
/* allocate the register message */
/* send the message */
if (ic_reg_lun) {
if (ic_ret == STMF_IC_MSG_SUCCESS) {
}
}
}
access_state == STMF_LU_STANDBY) {
/* abort all tasks for this lu */
}
}
return (STMF_SUCCESS);
}
{
return (STMF_INVALID_ARG);
}
if (stmf_state.stmf_inventory_locked) {
return (STMF_BUSY);
}
return (STMF_ALREADY);
}
}
if (luid) {
}
((stmf_i_lu_provider_t *)
}
/*
* register with proxy module if available and logical unit
* is in active state
*/
/* allocate the register message */
/* send the message */
if (ic_reg_lun) {
if (ic_ret == STMF_IC_MSG_SUCCESS) {
}
}
}
}
/* XXX we should probably check if this lu can be brought online */
if (stmf_state.stmf_service_running) {
}
/* XXX: Generate event */
return (STMF_SUCCESS);
}
{
if (stmf_state.stmf_inventory_locked) {
return (STMF_BUSY);
}
return (STMF_INVALID_ARG);
}
}
if (ilu->ilu_ntasks) {
do {
}
/* de-register with proxy if available */
/* de-register with proxy module */
/* allocate the de-register message */
/* send the message */
if (ic_dereg_lun) {
if (ic_ret == STMF_IC_MSG_SUCCESS) {
}
}
}
}
else
}
}
((stmf_i_lu_provider_t *)
}
NULL;
}
} else {
return (STMF_BUSY);
}
if (ilu->ilu_kstat_info) {
}
if (ilu->ilu_kstat_io) {
}
return (STMF_SUCCESS);
}
void
{
}
{
int start_workers = 0;
if (stmf_state.stmf_inventory_locked) {
return (STMF_BUSY);
}
if (ilport->ilport_next)
}
/*
* only register ports that are not standby (proxy) ports
*/
if (ilport->ilport_standby == 0) {
}
ilport->ilport_standby == 0) {
0, NULL, stmf_proxy_msg_id);
if (ic_reg_port) {
if (ic_ret == STMF_IC_MSG_SUCCESS) {
} else {
}
}
}
if (stmf_workers_state == STMF_WORKERS_DISABLED) {
start_workers = 1;
}
if (start_workers)
/* XXX we should probably check if this lport can be brought online */
if (stmf_state.stmf_service_running) {
}
/* XXX: Generate event */
return (STMF_SUCCESS);
}
{
if (stmf_state.stmf_inventory_locked) {
return (STMF_BUSY);
}
/*
* deregister ports that are not standby (proxy)
*/
ilport->ilport_standby == 0) {
if (ic_dereg_port) {
if (ic_ret == STMF_IC_MSG_SUCCESS) {
}
}
}
if (ilport->ilport_nsessions == 0) {
if (ilport->ilport_next)
if (ilport->ilport_prev)
else
}
} else {
return (STMF_BUSY);
}
if (ilport->ilport_kstat_info) {
}
if (ilport->ilport_kstat_io) {
}
return (STMF_SUCCESS);
}
/*
* Port provider has to make sure that register/deregister session and
* port are serialized calls.
*/
{
/*
* Port state has to be online to register a scsi session. It is
* possible that we started an offline operation and a new SCSI
* session started at the same time (in that case also we are going
* to fail the registeration). But any other state is simply
* a bad port provider implementation.
*/
"register a session while the state is neither "
"online nor offlining");
}
return (STMF_FAILURE);
}
/* sessions use the ilport_lock. No separate lock is required */
/* XXX should we remove ISS_LUN_INVENTORY_CHANGED on new session? */
stmf_scsi_session_t *, ss);
return (STMF_SUCCESS);
}
void
{
int found = 0;
stmf_scsi_session_t *, ss);
if (ss->ss_rport_alias) {
}
delay(1);
goto try_dereg_ss_again;
}
/* dereg proxy session if not standby port */
if (ic_session_dereg) {
if (ic_ret == STMF_IC_MSG_SUCCESS) {
}
}
}
found = 1;
break;
}
}
if (!found) {
" session");
}
}
{
if (!stay_locked)
return (iss);
}
}
}
return (NULL);
}
void
{
break;
}
}
{
uint16_t n;
return (STMF_NOT_FOUND);
} else {
}
return (STMF_NOT_FOUND);
}
return (STMF_ALREADY);
}
return (STMF_ALLOC_FAILURE);
}
return (STMF_SUCCESS);
}
void
{
do {
if (old & STMF_ITL_BEING_TERMINATED)
return;
return;
drv_usecwait(10);
if (itl->itl_counter)
return;
}
{
int i;
if (nmaps == 0)
return (STMF_NOT_FOUND);
/* Something changed, start all over */
goto dereg_itl_start;
}
nu = 0;
if (!lm)
continue;
for (i = 0; i < lm->lm_nentries; i++) {
continue;
(ent->ent_itl_datap)) {
goto dai_scan_done;
}
}
} /* lun table for a session */
} /* sessions */
} /* ports */
for (i = 0; i < nu; i++) {
}
return (STMF_SUCCESS);
}
{
int i;
uint16_t n;
if (session_id == STMF_SESSION_ID_NONE)
return (STMF_INVALID_ARG);
return (STMF_NOT_FOUND);
} else {
}
return (STMF_NOT_FOUND);
}
if (lun) {
ent = (stmf_lun_map_ent_t *)
} else {
if (itl_handle == NULL) {
return (STMF_INVALID_ARG);
}
for (i = 0; i < lm->lm_nentries; i++) {
continue;
if (ent->ent_itl_datap &&
break;
}
}
}
return (STMF_NOT_FOUND);
}
return (STMF_SUCCESS);
}
{
int i;
uint16_t n;
return (STMF_NOT_FOUND);
} else {
}
for (i = 0; i < lm->lm_nentries; i++) {
continue;
break;
}
} else {
ent = (stmf_lun_map_ent_t *)
}
ret = STMF_SUCCESS;
} else {
}
return (ret);
}
{
if (ndx == 0xff)
return (NULL);
if (dbuf) {
task->task_cur_nbufs++;
return (dbuf);
}
return (NULL);
}
void
{
task->task_cur_nbufs--;
}
{
if (h > 3)
return (NULL);
return (itask->itask_dbufs[h]);
}
/* ARGSUSED */
struct scsi_task *
{
uint64_t *p;
uint8_t *l;
/*
* We allocate 7 extra bytes for CDB to provide a cdb pointer which
* is guaranteed to be 8 byte aligned. Some LU providers like OSD
* depend upon this alignment.
*/
if (cdb_length_in >= 16)
else
if (!lun_map_ent) {
} else {
}
return (NULL);
}
do {
new_task = 1;
break;
}
;
if (*ppitask) {
ilu->ilu_ntasks_free--;
} else {
new_task = 1;
}
/* CONSTCOND */
} while (0);
if (!new_task) {
task->task_timeout = 0;
*p++ = 0; *p++ = 0; p++; p++; *p++ = 0; *p++ = 0; *p = 0;
itask->itask_ncmds = 0;
} else {
return (NULL);
}
l = task->task_lun_no;
l[0] = lun[0];
}
}
if (new_task) {
return (NULL);
}
return (NULL);
}
/* kmem_zalloc automatically makes itask->itask_lu_prev NULL */
ilu->ilu_ntasks++;
}
} else {
}
return (task);
}
static void
{
itask->itask_proxy_msg_id = 0;
ilu->ilu_ntasks_free++;
}
void
{
/* free half of the minimal free of the free tasks */
if (!num_to_release) {
return;
}
break;
}
ilu->ilu_ntasks_free--;
if (itask->itask_lu_next)
if (itask->itask_lu_prev)
else
ilu->ilu_ntasks--;
}
}
/*
* Called with stmf_lock held
*/
void
{
/* stmf_svc_ilu_draining may get changed after stmf_lock is released */
if (!ilu->ilu_ntasks_min_free) {
continue;
}
/*
* we do not care about the accuracy of
* ilu_ntasks_min_free, so we don't lock here
*/
if (ddi_get_lbolt() >= endtime)
break;
}
}
void
{
clock_t l = ddi_get_lbolt();
continue;
}
if (task->task_timeout == 0)
else
continue;
STMF_TIMEOUT, NULL);
}
}
/*
* Called with stmf_lock held
*/
void
{
/* stmf_svc_ilu_timing may get changed after stmf_lock is released */
if (ilu->ilu_task_cntr2 == 0) {
continue;
}
} else {
if (ilu->ilu_task_cntr1 == 0) {
continue;
}
}
/*
* If we are here then it means that there is some slowdown
* in tasks on this lu. We need to check.
*/
if (ddi_get_lbolt() >= endtime)
break;
}
}
/*
* Kills all tasks on a lu except tm_task
*/
void
{
continue;
continue;
}
}
void
{
int i;
for (i = 0; i < 4; i++) {
if (map & 1) {
if (dbuf->db_lu_private) {
}
}
map >>= 1;
}
itask->itask_allocated_buf_map = 0;
}
}
void
{
if (itask->itask_itl_datap) {
-1) == 0) {
}
}
if (itask->itask_worker) {
}
/*
* After calling stmf_task_lu_free, the task pointer can no longer
* be trusted.
*/
}
void
{
int nv;
stmf_worker_t *w, *w1;
task->task_cur_nbufs = 0;
/* Latest value of currently running tasks */
/* Select the next worker using round robin */
if (nv >= stmf_nworkers_accepting_cmds) {
int s = nv;
do {
} while (nv >= stmf_nworkers_accepting_cmds);
if (nv < 0)
nv = 0;
/* Its ok if this cas fails */
s, nv);
}
w = &stmf_workers[nv];
/*
* A worker can be pinned by interrupt. So select the next one
* if it has lower load.
*/
w1 = stmf_workers;
} else {
}
w = w1;
mutex_enter(&w->worker_lock);
if (((w->worker_flags & STMF_WORKER_STARTED) == 0) ||
(w->worker_flags & STMF_WORKER_TERMINATE)) {
/*
* Maybe we are in the middle of a change. Just go to
* the 1st worker.
*/
mutex_exit(&w->worker_lock);
w = stmf_workers;
mutex_enter(&w->worker_lock);
}
itask->itask_worker = w;
/*
* Track max system load inside the worker as we already have the
* worker lock (no point implementing another lock). The service
* thread will do the comparisons and figure out the max overall
* system load.
*/
if (w->worker_max_sys_qdepth_pu < ct)
w->worker_max_sys_qdepth_pu = ct;
do {
if (task->task_mgmt_function) {
if ((tm == TM_TARGET_RESET) ||
(tm == TM_TARGET_COLD_RESET) ||
(tm == TM_TARGET_WARM_RESET)) {
}
}
new &= ~ITASK_IN_TRANSITION;
if (w->worker_task_tail) {
} else {
w->worker_task_head = itask;
}
w->worker_task_tail = itask;
if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) {
w->worker_max_qdepth_pu = w->worker_queue_depth;
}
if (dbuf) {
} else {
itask->itask_allocated_buf_map = 0;
}
if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0)
mutex_exit(&w->worker_lock);
/*
* This can only happen if during stmf_task_alloc(), ILU_RESET_ACTIVE
* was set between checking of ILU_RESET_ACTIVE and clearing of the
* ITASK_IN_FREE_LIST flag. Take care of these "sneaked-in" tasks here.
*/
}
}
/*
* ++++++++++++++ ABORT LOGIC ++++++++++++++++++++
* Once ITASK_BEING_ABORTED is set, ITASK_KNOWN_TO_LU can be reset already
* i.e. before ITASK_BEING_ABORTED being set. But if it was not, it cannot
* be reset until the LU explicitly calls stmf_task_lu_aborted(). Of course
* the LU will make this call only if we call the LU's abort entry point.
* we will only call that entry point if ITASK_KNOWN_TO_LU was set.
*
* Same logic applies for the port.
*
* Also ITASK_BEING_ABORTED will not be allowed to set if both KNOWN_TO_LU
* and KNOWN_TO_TGT_PORT are reset.
*
* +++++++++++++++++++++++++++++++++++++++++++++++
*/
{
if (ioflags & STMF_IOF_LU_DONE) {
do {
if (new & ITASK_BEING_ABORTED)
return (STMF_ABORTED);
new &= ~ITASK_KNOWN_TO_LU;
}
return (STMF_ABORTED);
#ifdef DEBUG
if (stmf_drop_buf_counter > 0) {
1)
return (STMF_SUCCESS);
}
#endif
stmf_data_buf_t *, dbuf);
stmf_data_buf_t *, dbuf);
return (ret);
}
void
{
mutex_enter(&w->worker_lock);
do {
if (old & ITASK_BEING_ABORTED) {
mutex_exit(&w->worker_lock);
return;
}
free_it = 0;
kstat_it = 0;
if (iof & STMF_IOF_LPORT_DONE) {
free_it = 1;
kstat_it = 1;
}
/*
* If the task is known to LU then queue it. But if
* it is already queued (multiple completions) then
* just update the buffer information by grabbing the
* worker lock. If the task is not known to LU,
* free this task.
*/
if (old & ITASK_KNOWN_TO_LU) {
free_it = 0;
update_queue_flags = 1;
if (old & ITASK_IN_WORKER_QUEUE) {
queue_it = 0;
} else {
queue_it = 1;
}
} else {
update_queue_flags = 0;
queue_it = 0;
}
if (kstat_it) {
}
if (update_queue_flags) {
if (queue_it) {
if (w->worker_task_tail) {
} else {
w->worker_task_head = itask;
}
w->worker_task_tail = itask;
if (++(w->worker_queue_depth) >
w->worker_max_qdepth_pu) {
w->worker_max_qdepth_pu = w->worker_queue_depth;
}
if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0)
}
}
mutex_exit(&w->worker_lock);
if (free_it) {
ITASK_BEING_ABORTED)) == 0) {
}
}
}
{
if (ioflags & STMF_IOF_LU_DONE) {
do {
if (new & ITASK_BEING_ABORTED)
return (STMF_ABORTED);
new &= ~ITASK_KNOWN_TO_LU;
}
return (STMF_SUCCESS);
}
return (STMF_ABORTED);
task->task_status_ctrl = 0;
task->task_resid = 0;
} else if (task->task_cmd_xfer_length >
} else if (task->task_nbytes_transferred <
} else {
task->task_status_ctrl = 0;
task->task_resid = 0;
}
}
void
{
mutex_enter(&w->worker_lock);
do {
if (old & ITASK_BEING_ABORTED) {
mutex_exit(&w->worker_lock);
return;
}
free_it = 0;
kstat_it = 0;
if (iof & STMF_IOF_LPORT_DONE) {
free_it = 1;
kstat_it = 1;
}
/*
* If the task is known to LU then queue it. But if
* it is already queued (multiple completions) then
* just update the buffer information by grabbing the
* worker lock. If the task is not known to LU,
* free this task.
*/
if (old & ITASK_KNOWN_TO_LU) {
free_it = 0;
queue_it = 1;
if (old & ITASK_IN_WORKER_QUEUE) {
" when task is already in worker queue "
" task = %p", (void *)task);
}
} else {
queue_it = 0;
}
task->task_completion_status = s;
if (kstat_it) {
}
if (queue_it) {
if (w->worker_task_tail) {
} else {
w->worker_task_head = itask;
}
w->worker_task_tail = itask;
if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) {
w->worker_max_qdepth_pu = w->worker_queue_depth;
}
if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0)
}
mutex_exit(&w->worker_lock);
if (free_it) {
ITASK_BEING_ABORTED)) == 0) {
} else {
" is not done, itask %p itask_flags %x",
}
}
}
void
{
mutex_enter(&w->worker_lock);
do {
if (old & ITASK_BEING_ABORTED) {
mutex_exit(&w->worker_lock);
return;
}
if (old & ITASK_IN_WORKER_QUEUE) {
" when task is in worker queue "
" task = %p", (void *)task);
}
new &= ~ITASK_KNOWN_TO_LU;
mutex_exit(&w->worker_lock);
ITASK_BEING_ABORTED)) == 0) {
} else {
" the task is still not done, task = %p", (void *)task);
}
}
void
{
stmf_worker_t *w;
do {
if ((old & ITASK_BEING_ABORTED) ||
((old & (ITASK_KNOWN_TO_TGT_PORT |
ITASK_KNOWN_TO_LU)) == 0)) {
return;
}
task->task_completion_status = s;
return;
}
/* Queue it and get out */
mutex_enter(&w->worker_lock);
mutex_exit(&w->worker_lock);
return;
}
if (w->worker_task_tail) {
} else {
w->worker_task_head = itask;
}
w->worker_task_tail = itask;
if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) {
w->worker_max_qdepth_pu = w->worker_queue_depth;
}
if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0)
mutex_exit(&w->worker_lock);
}
void
{
stmf_status_t, s);
switch (abort_cmd) {
case STMF_QUEUE_ABORT_LU:
return;
case STMF_QUEUE_TASK_ABORT:
return;
break;
f = ITASK_KNOWN_TO_LU;
break;
default:
return;
}
f |= ITASK_BEING_ABORTED | rf;
do {
if ((old & f) != f) {
return;
}
}
void
{
char info[STMF_CHANGE_INFO_LEN];
unsigned long long st;
st = s; /* gcc fix */
if ((s != STMF_ABORT_SUCCESS) && (s != STMF_NOT_FOUND)) {
} else if ((iof & STMF_IOF_LU_DONE) == 0) {
"Task aborted but LU is not finished, task ="
} else {
/*
* LU abort successfully
*/
return;
}
}
void
{
char info[STMF_CHANGE_INFO_LEN];
unsigned long long st;
st = s;
if ((s != STMF_ABORT_SUCCESS) && (s != STMF_NOT_FOUND)) {
"task %p, tgt port failed to abort ret=%llx", (void *)task,
st);
} else if ((iof & STMF_IOF_LPORT_DONE) == 0) {
"Task aborted but tgt port is not finished, "
} else {
/*
* LPORT abort successfully
*/
do {
if (!(old & ITASK_KNOWN_TO_TGT_PORT))
return;
} else {
}
return;
}
}
{
int i;
mutex_enter(&w->worker_lock);
mutex_exit(&w->worker_lock);
return (STMF_BUSY);
}
for (i = 0; i < itask->itask_ncmds; i++) {
mutex_exit(&w->worker_lock);
return (STMF_SUCCESS);
}
}
if (timeout == ITASK_DEFAULT_POLL_TIMEOUT) {
} else {
if (t == 0)
t = 1;
}
if (w->worker_task_tail) {
} else {
w->worker_task_head = itask;
}
w->worker_task_tail = itask;
if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) {
w->worker_max_qdepth_pu = w->worker_queue_depth;
}
if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0)
}
mutex_exit(&w->worker_lock);
return (STMF_SUCCESS);
}
{
int i;
mutex_enter(&w->worker_lock);
mutex_exit(&w->worker_lock);
return (STMF_BUSY);
}
for (i = 0; i < itask->itask_ncmds; i++) {
mutex_exit(&w->worker_lock);
return (STMF_SUCCESS);
}
}
if (timeout == ITASK_DEFAULT_POLL_TIMEOUT) {
} else {
if (t == 0)
t = 1;
}
if (w->worker_task_tail) {
} else {
w->worker_task_head = itask;
}
w->worker_task_tail = itask;
if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) {
w->worker_max_qdepth_pu = w->worker_queue_depth;
}
if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0)
}
mutex_exit(&w->worker_lock);
return (STMF_SUCCESS);
}
void
{
unsigned long long ret;
char info[STMF_CHANGE_INFO_LEN];
do {
call_lu_abort = 1;
} else {
call_lu_abort = 0;
}
if (call_lu_abort) {
} else {
}
} else if (ret != STMF_SUCCESS) {
}
"lu abort timed out");
}
}
do {
if ((old & (ITASK_KNOWN_TO_TGT_PORT |
call_port_abort = 1;
} else {
call_port_abort = 0;
}
if (call_port_abort) {
} else if (ret != STMF_SUCCESS) {
"Abort failed by tgt port %p ret %llx",
}
"lport abort timed out");
}
}
}
{
if (cmd & STMF_CMD_LU_OP) {
goto stmf_ctl_lock_exit;
}
} else if (cmd & STMF_CMD_LPORT_OP) {
goto stmf_ctl_lock_exit;
}
} else {
goto stmf_ctl_lock_exit;
}
switch (cmd) {
case STMF_CMD_LU_ONLINE:
ret = STMF_ALREADY;
goto stmf_ctl_lock_exit;
}
goto stmf_ctl_lock_exit;
}
break;
goto stmf_ctl_lock_exit;
}
STMF_SUCCESS) {
} else {
/* XXX: should throw a meesage an record more data */
}
ret = STMF_SUCCESS;
goto stmf_ctl_lock_exit;
case STMF_CMD_LU_OFFLINE:
ret = STMF_ALREADY;
goto stmf_ctl_lock_exit;
}
goto stmf_ctl_lock_exit;
}
break;
goto stmf_ctl_lock_exit;
}
STMF_SUCCESS) {
} else {
}
break;
/*
* LPORT_ONLINE/OFFLINE has nothing to do with link offline/online.
*/
case STMF_CMD_LPORT_ONLINE:
ret = STMF_ALREADY;
goto stmf_ctl_lock_exit;
}
goto stmf_ctl_lock_exit;
}
/*
* Only user request can recover the port from the
* FORCED_OFFLINE state
*/
ret = STMF_FAILURE;
goto stmf_ctl_lock_exit;
}
}
/*
* Avoid too frequent request to online
*/
ilport->ilport_online_times = 0;
ilport->ilport_avg_interval = 0;
}
ret = STMF_FAILURE;
"online the port");
"online the port, set FORCED_OFFLINE now");
goto stmf_ctl_lock_exit;
}
if (ilport->ilport_online_times > 0) {
} else {
ddi_get_lbolt() -
}
}
/*
* Submit online service request
*/
break;
goto stmf_ctl_lock_exit;
}
STMF_SUCCESS) {
(stmf_local_port_t *)obj,
} else {
}
ret = STMF_SUCCESS;
goto stmf_ctl_lock_exit;
case STMF_CMD_LPORT_OFFLINE:
ret = STMF_ALREADY;
goto stmf_ctl_lock_exit;
}
goto stmf_ctl_lock_exit;
}
break;
goto stmf_ctl_lock_exit;
}
STMF_SUCCESS) {
(stmf_local_port_t *)obj,
} else {
}
break;
default:
goto stmf_ctl_lock_exit;
}
return (STMF_SUCCESS);
return (ret);
}
/* ARGSUSED */
{
return (STMF_NOT_SUPPORTED);
}
/* ARGSUSED */
{
}
bufsizep));
}
return (STMF_NOT_SUPPORTED);
}
/*
* Used by port providers. pwwn is 8 byte wwn, sdid is the devid used by
* stmf to register local ports. The ident should have 20 bytes in buffer
* space to convert the wwn to "wwn.xxxxxxxxxxxxxxxx" string.
*/
void
{
/* Convert wwn value to "wwn.XXXXXXXXXXXXXXXX" format */
"wwn.%02X%02X%02X%02X%02X%02X%02X%02X",
}
{
uint8_t *p;
/* check if any ports are standby and create second group */
} else {
nports++;
}
}
/* The spec only allows for 255 ports to be reported per group */
if (nports_standby && ilu_alua) {
}
return (NULL);
}
p += 4;
p[0] = 0x80; /* PREF */
p[1] = 5; /* AO_SUP, S_SUP */
p[3] = 1; /* Group 1 */
} else {
p[3] = 0; /* Group 0 */
}
p += 8;
continue;
}
p += 4;
}
if (nports_standby && ilu_alua) {
p[0] = 0x02; /* Non PREF, Standby */
p[1] = 5; /* AO_SUP, S_SUP */
p[3] = 0; /* Group 0 */
} else {
p[3] = 1; /* Group 1 */
}
p += 8;
if (ilport->ilport_standby == 0) {
continue;
}
p += 4;
}
}
return (xd);
}
struct scsi_devid_desc *
{
}
break;
}
}
return (devid);
}
{
break;
}
}
return (rtpid);
}
static uint16_t stmf_lu_id_gen_number = 0;
{
}
{
uint8_t *p;
struct timeval32 timestamp32;
struct ether_addr mac;
if (company_id == COMPANY_ID_NONE)
return (STMF_INVALID_ARG);
p[0] = 0xf1; p[1] = 3; p[2] = 0; p[3] = 0x10;
}
if (hid != 0) {
e[4] = e[5] = 0;
}
*t = BE_32(*t);
return (STMF_SUCCESS);
}
/*
* saa is sense key, ASC, ASCQ
*/
void
{
if (st == 2) {
sd[0] = 0x70;
} else {
task->task_sense_length = 0;
}
}
{
uint32_t n = 4;
uint32_t m = 0;
if (page_len < 4)
return (0);
if (page_len > 65535)
page_len = 65535;
/* CONSTCOND */
while (1) {
m += sz;
n += copysz;
}
if (vpd_mask == 0)
break;
if (vpd_mask & STMF_VPD_LU_ID) {
continue;
} else if (vpd_mask & STMF_VPD_TARGET_ID) {
continue;
} else if (vpd_mask & STMF_VPD_TP_GROUP) {
p = small_buf;
bzero(p, 8);
p[0] = 1;
p[1] = 0x15;
p[3] = 4;
ilport = (stmf_i_local_port_t *)
p[7] = 1; /* Group 1 */
}
sz = 8;
continue;
} else if (vpd_mask & STMF_VPD_RELATIVE_TP_ID) {
p = small_buf;
bzero(p, 8);
p[0] = 1;
p[1] = 0x14;
p[3] = 4;
ilport = (stmf_i_local_port_t *)
sz = 8;
continue;
} else {
break;
}
}
return (n);
}
void
{
stmf_i_lu_t *ilu =
if (task->task_additional_flags &
}
if (task->task_cmd_xfer_length == 0) {
return;
}
return;
}
return;
}
return;
}
}
void
{
switch (task->task_mgmt_function) {
/*
* and ABORT_TASK. But unlike LUN_RESET we will not reset LU state
* in these cases. This needs to be changed to abort only the required
* set.
*/
case TM_ABORT_TASK:
case TM_ABORT_TASK_SET:
case TM_CLEAR_TASK_SET:
case TM_LUN_RESET:
/* issue the reset to the proxy node as well */
}
return;
case TM_TARGET_RESET:
case TM_TARGET_COLD_RESET:
case TM_TARGET_WARM_RESET:
return;
default:
/* We dont support this task mgmt function */
return;
}
}
void
{
/*
* To sync with target reset, grab this lock. The LU is not going
* anywhere as there is atleast one task pending (this task).
*/
return;
}
/*
* Mark this task as the one causing LU reset so that we know who
* was responsible for setting the ILU_RESET_ACTIVE. In case this
* task itself gets aborted, we will clear ILU_RESET_ACTIVE.
*/
/* Initiatiate abort on all commands on this LU except this one */
/* Start polling on this task */
!= STMF_SUCCESS) {
NULL);
return;
}
}
void
{
int i, lf;
/*
* To sync with LUN reset, grab this lock. The session is not going
* anywhere as there is atleast one task pending (this task).
*/
/* Grab the session lock as a writer to prevent any changes in it */
return;
}
/*
* Now go through each LUN in this session and make sure all of them
* can be reset.
*/
continue;
lf++;
return;
}
}
if (lf == 0) {
/* No luns in this session */
return;
}
/* ok, start the damage */
for (i = 0; i < lm->lm_nentries; i++) {
continue;
}
for (i = 0; i < lm->lm_nentries; i++) {
continue;
}
/* Start polling on this task */
!= STMF_SUCCESS) {
NULL);
return;
}
}
int
{
return (0);
}
return (0);
}
return (1);
}
void
{
uint32_t i;
/* Make local copy of global tunables */
if (stmf_i_min_nworkers < 4) {
stmf_i_min_nworkers = 4;
}
if (stmf_i_max_nworkers < stmf_i_min_nworkers) {
}
for (i = 0; i < stmf_i_max_nworkers; i++) {
stmf_worker_t *w = &stmf_workers[i];
}
/* Workers will be started by stmf_worker_mgmt() */
/* Lets wait for atleast one worker to start */
while (stmf_nworkers_cur == 0)
}
{
int i;
return (STMF_SUCCESS);
/* Wait for all the threads to die */
while (stmf_nworkers_cur != 0) {
if (ddi_get_lbolt() > sb) {
return (STMF_BUSY);
}
}
for (i = 0; i < stmf_i_max_nworkers; i++) {
stmf_worker_t *w = &stmf_workers[i];
mutex_destroy(&w->worker_lock);
cv_destroy(&w->worker_cv);
}
stmf_workers = NULL;
return (STMF_SUCCESS);
}
void
stmf_worker_task(void *arg)
{
stmf_worker_t *w;
clock_t wait_timer = 0;
w = (stmf_worker_t *)arg;
mutex_enter(&w->worker_lock);
if ((w->worker_ref_count == 0) &&
(w->worker_flags & STMF_WORKER_TERMINATE)) {
w->worker_flags &= ~(STMF_WORKER_STARTED |
w->worker_tid = NULL;
mutex_exit(&w->worker_lock);
thread_exit();
}
/* CONSTCOND */
while (1) {
dec_qdepth = 0;
wait_timer = 0;
wait_delta = 0;
if (w->worker_wait_head) {
ASSERT(w->worker_wait_tail);
if (w->worker_task_head == NULL)
w->worker_task_head =
w->worker_wait_head;
else
w->worker_wait_head;
w->worker_task_tail = w->worker_wait_tail;
w->worker_wait_head = w->worker_wait_tail =
NULL;
}
}
break;
}
if (w->worker_task_head == NULL)
w->worker_task_tail = NULL;
wait_queue = 0;
abort_free = 0;
if (itask->itask_ncmds > 0) {
} else {
}
do {
if (old & ITASK_BEING_ABORTED) {
goto out_itask_flag_loop;
} else if ((curcmd & ITASK_CMD_MASK) ==
/*
* set ITASK_KSTAT_IN_RUNQ, this flag
* will not reset until task completed
*/
} else {
goto out_itask_flag_loop;
}
/*
* we can decrement the itask_cmd_stack.
*/
if (curcmd == ITASK_CMD_ABORT) {
wait_queue = 1;
} else {
abort_free = 1;
}
} else if ((curcmd & ITASK_CMD_POLL) &&
wait_queue = 1;
}
if (wait_queue) {
if (w->worker_wait_tail) {
} else {
w->worker_wait_head = itask;
}
w->worker_wait_tail = itask;
if (wait_timer == 0) {
}
} else if ((--(itask->itask_ncmds)) != 0) {
if (w->worker_task_tail) {
} else {
w->worker_task_head = itask;
}
w->worker_task_tail = itask;
} else {
/*
* This is where the queue depth should go down by
* one but we delay that on purpose to account for
* the call into the provider. The actual decrement
* happens after the worker has done its job.
*/
dec_qdepth = 1;
}
/* We made it here means we are going to call LU */
else
mutex_exit(&w->worker_lock);
curcmd &= ITASK_CMD_MASK;
switch (curcmd) {
case ITASK_CMD_NEW_TASK:
iss = (stmf_i_scsi_session_t *)
break;
}
#ifdef DEBUG
if (stmf_drop_task_counter > 0) {
if (atomic_add_32_nv(
-1) == 1) {
break;
}
}
#endif
break;
case ITASK_CMD_DATA_XFER_DONE:
break;
case ITASK_CMD_STATUS_DONE:
break;
case ITASK_CMD_ABORT:
if (abort_free) {
} else {
}
break;
case ITASK_CMD_POLL_LU:
if (!wait_queue) {
}
break;
case ITASK_CMD_POLL_LPORT:
if (!wait_queue)
break;
case ITASK_CMD_SEND_STATUS:
/* case ITASK_CMD_XFER_DATA: */
break;
}
mutex_enter(&w->worker_lock);
if (dec_qdepth) {
w->worker_queue_depth--;
}
}
if (w->worker_ref_count == 0)
goto stmf_worker_loop;
else {
wait_delta = 1;
}
}
w->worker_flags &= ~STMF_WORKER_ACTIVE;
if (wait_timer) {
} else {
}
w->worker_flags |= STMF_WORKER_ACTIVE;
goto stmf_worker_loop;
}
void
{
int i;
int workers_needed;
uint32_t cur_max_ntasks = 0;
stmf_worker_t *w;
/* Check if we are trying to increase the # of threads */
for (i = stmf_nworkers_cur; i < stmf_nworkers_needed; i++) {
} else {
/* Wait for transition to complete */
return;
}
}
/* Check if we are trying to decrease the # of workers */
/*
* stmf_nworkers_accepting_cmds has already been
* updated by the request to reduce the # of workers.
*/
} else {
/* Wait for transition to complete */
return;
}
}
/* Check if we are being asked to quit */
if (stmf_workers_state != STMF_WORKERS_ENABLED) {
if (stmf_nworkers_cur) {
workers_needed = 0;
}
return;
}
/* Check if we are starting */
if (stmf_nworkers_cur < stmf_i_min_nworkers) {
}
if ((stmf_wm_last != 0) &&
qd = 0;
for (i = 0; i < stmf_nworkers_accepting_cmds; i++) {
stmf_workers[i].worker_max_qdepth_pu = 0;
if (stmf_workers[i].worker_max_sys_qdepth_pu >
}
stmf_workers[i].worker_max_sys_qdepth_pu = 0;
}
}
if (d <= tps) {
/* still ramping up */
return;
}
/* max qdepth cannot be more than max tasks */
if (qd > cur_max_ntasks)
qd = cur_max_ntasks;
/* See if we have more workers */
if (qd < stmf_nworkers_accepting_cmds) {
/*
* Since we dont reduce the worker count right away, monitor
* the highest load during the scale_down_delay.
*/
if (qd > stmf_worker_scale_down_qd)
if (stmf_worker_scale_down_timer == 0) {
1000 * 1000);
return;
}
if (ddi_get_lbolt() < stmf_worker_scale_down_timer) {
return;
}
/* Its time to reduce the workers */
return;
}
if (qd > stmf_i_max_nworkers)
if (qd < stmf_i_min_nworkers)
if (qd == stmf_nworkers_cur)
return;
workers_needed = qd;
/* NOTREACHED */
return;
if (workers_needed > stmf_nworkers_cur) {
for (i = stmf_nworkers_cur; i < workers_needed; i++) {
w = &stmf_workers[i];
}
return;
}
/* At this point we know that we are decreasing the # of workers */
/* Signal the workers that its time to quit */
w = &stmf_workers[i];
mutex_enter(&w->worker_lock);
if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0)
mutex_exit(&w->worker_lock);
}
}
/*
* Fills out a dbuf from stmf_xfer_data_t (contained in the db_lu_private).
* If all the data has been filled out, frees the xd and makes
* db_lu_private NULL.
*/
void
{
uint8_t *p;
int i;
uint32_t s;
dbuf->db_data_size = 0;
for (i = 0; i < dbuf->db_sglist_length; i++) {
dbuf->db_data_size += s;
return;
}
}
}
/* ARGSUSED */
{
return (STMF_SUCCESS);
}
void
{
uint8_t *p;
if (task->task_mgmt_function) {
return;
}
switch (cdbp[0]) {
case SCMD_INQUIRY:
/*
* Basic protocol checks. In addition, only reply to
* standard inquiry. Otherwise, the LU provider needs
* to respond.
*/
return;
}
if (task->task_additional_flags &
}
minsz = 36;
if (sz == 0) {
return;
}
/*
* Ignore any preallocated dbuf if the size is less
* than 36. It will be freed during the task_free.
*/
}
return;
}
/*
* Standard inquiry handling only.
*/
p[0] = DPQ_SUPPORTED | DTYPE_UNKNOWN;
p[2] = 5;
p[3] = 0x12;
p[4] = inq_page_length;
p[6] = 0x80;
dbuf->db_relative_offset = 0;
return;
case SCMD_REPORT_LUNS:
if (task->task_additional_flags &
}
if (sz < 16) {
return;
}
iss = (stmf_i_scsi_session_t *)
return;
}
return;
}
return;
}
}
void
{
return;
}
if (dbuf->db_lu_private) {
/* There is more */
return;
}
/*
* If this is a proxy task, it will need to be completed from the
* proxy port provider. This message lets pppt know that the xfer
* is complete. When we receive the status from pppt, we will
* then relay that status back to the lport.
*/
/* send xfer done status to pppt */
if (ic_xfer_done_msg) {
if (ic_ret != STMF_IC_MSG_SUCCESS) {
}
}
/* task will be completed from pppt */
return;
}
}
/* ARGSUSED */
void
{
}
/* ARGSUSED */
void
{
}
/* ARGSUSED */
{
int i;
switch (task->task_mgmt_function) {
case TM_ABORT_TASK:
case TM_ABORT_TASK_SET:
case TM_CLEAR_TASK_SET:
case TM_LUN_RESET:
break;
case TM_TARGET_RESET:
case TM_TARGET_COLD_RESET:
case TM_TARGET_WARM_RESET:
break;
}
return (STMF_ABORT_SUCCESS);
}
/*
* OK so its not a task mgmt. Make sure we free any xd sitting
* inside any dbuf.
*/
for (i = 0; i < 4; i++) {
if ((map & 1) &&
}
map >>= 1;
}
}
return (STMF_ABORT_SUCCESS);
}
void
{
/* Right now we only do this for handling task management functions */
switch (task->task_mgmt_function) {
case TM_ABORT_TASK:
case TM_ABORT_TASK_SET:
case TM_CLEAR_TASK_SET:
case TM_LUN_RESET:
return;
case TM_TARGET_RESET:
case TM_TARGET_COLD_RESET:
case TM_TARGET_WARM_RESET:
return;
}
}
/* ARGSUSED */
void
{
/* This function will never be called */
}
void
{
}
{
if (ilu->ilu_ntasks) {
do {
}
return (STMF_SUCCESS);
}
void
{
int i;
for (i = 0; i < lm->lm_nentries; i++) {
continue;
}
}
}
/*
* The return value is only used by function managing target reset.
*/
{
int ntasks_pending;
/*
* This function is also used during Target reset. The idea is that
* once all the commands are aborted, call the LU's reset entry
* point (abort entry point with a reset flag). But if this Task
* mgmt is running on this LU then all the tasks cannot be aborted.
* one task (this task) will still be running which is OK.
*/
(ntasks_pending == 1))) {
} else {
ret = STMF_SUCCESS;
}
if (ret == STMF_SUCCESS) {
}
if (target_reset) {
return (ret);
}
if (ret == STMF_SUCCESS) {
return (ret);
}
return (ret);
}
}
if (target_reset) {
/* Tell target reset polling code that we are not done */
return (STMF_BUSY);
}
!= STMF_SUCCESS) {
return (STMF_SUCCESS);
}
return (STMF_SUCCESS);
}
void
{
int i;
int not_done = 0;
for (i = 0; i < lm->lm_nentries; i++) {
continue;
if (ret == STMF_SUCCESS)
continue;
not_done = 1;
STMF_ABORTED, NULL);
return;
}
}
}
if (not_done) {
!= STMF_SUCCESS) {
return;
}
return;
}
}
{
return (STMF_INVALID_ARG);
}
return (STMF_SUCCESS);
}
{
if (eventid == STMF_EVENT_ALL) {
return (STMF_SUCCESS);
}
return (STMF_INVALID_ARG);
}
return (STMF_SUCCESS);
}
{
return (STMF_INVALID_ARG);
}
return (STMF_SUCCESS);
}
{
if (eventid == STMF_EVENT_ALL) {
return (STMF_SUCCESS);
}
return (STMF_INVALID_ARG);
}
return (STMF_SUCCESS);
}
void
{
}
}
void
{
}
}
void
{
return;
TASKQ_DEFAULTPRI, 0);
}
{
uint32_t i;
}
/* Wait for 5 seconds */
for (i = 0; i < 500; i++) {
else
break;
}
if (i == 500)
return (STMF_BUSY);
return (STMF_SUCCESS);
}
/* ARGSUSED */
void
{
clock_t worker_delay = 0;
int deq;
return;
}
if (stmf_state.stmf_svc_active) {
int waitq_add = 0;
case STMF_CMD_LPORT_ONLINE:
/* Fallthrough */
case STMF_CMD_LPORT_OFFLINE:
/* Fallthrough */
case STMF_CMD_LU_ONLINE:
/* Nothing to do */
waitq_add = 1;
break;
case STMF_CMD_LU_OFFLINE:
/* Remove all mappings of this LU */
waitq_add = 1;
break;
default:
}
if (waitq_add) {
/* Put it in the wait queue */
}
}
/* The waiting list is not going to be modified by anybody else */
deq = 0;
case STMF_CMD_LU_ONLINE:
deq = 1;
break;
case STMF_CMD_LU_OFFLINE:
break;
deq = 1;
break;
case STMF_CMD_LPORT_OFFLINE:
/* Fallthrough */
case STMF_CMD_LPORT_ONLINE:
deq = 1;
break;
}
if (deq) {
} else {
}
}
/* Do timeouts */
if (stmf_state.stmf_nlus &&
if (!stmf_state.stmf_svc_ilu_timing) {
/* we are starting a new round */
}
if (!stmf_state.stmf_svc_ilu_timing) {
/* we finished a complete round */
} else {
/* we still have some ilu items to check */
}
goto stmf_svc_loop;
}
/* Check if there are free tasks to clear */
if (stmf_state.stmf_nlus &&
if (!stmf_state.stmf_svc_ilu_draining) {
/* we are starting a new round */
drain_start = ddi_get_lbolt();
}
if (!stmf_state.stmf_svc_ilu_draining) {
/* we finished a complete round */
} else {
/* we still have some ilu items to check */
}
goto stmf_svc_loop;
}
/* Check if we need to run worker_mgmt */
if (ddi_get_lbolt() > worker_delay) {
worker_delay = ddi_get_lbolt() +
}
/* Check if any active session got its 1st LUN */
int stmf_level = 0;
int port_level;
ilport = next_ilport) {
if ((ilport->ilport_flags &
ILPORT_SS_GOT_INITIAL_LUNS) == 0) {
continue;
}
port_level = 0;
ISS_GOT_INITIAL_LUNS) == 0) {
continue;
}
port_level++;
stmf_level++;
/*
* scan all the ilports again as the
* ilport list might have changed.
*/
break;
}
if (port_level == 0) {
}
/* drop the lock if we are holding it. */
/* Max 4 session at a time */
if (stmf_level >= 4) {
break;
}
}
if (stmf_level == 0) {
}
}
}
goto stmf_svc_loop;
}
void
{
int s;
s = sizeof (stmf_svc_req_t);
if (info->st_additional_info) {
}
if (info->st_additional_info) {
sizeof (stmf_svc_req_t)));
}
req->svc_req_alloc_size = s;
}
}
void
{
char tbuf[160];
int len;
if (!stmf_trace_on)
return;
ddi_get_lbolt());
if (len > 158) {
len = 158;
}
trace_buf_curndx += len;
trace_buf_curndx = 0;
}
void
{
if (!stmf_trace_on)
return;
trace_buf_curndx = 0;
if (trace_buf_size > 0)
stmf_trace_buf[0] = 0;
}
static void
{
void *ctl_private;
int msg = 0;
if (offline_lu) {
if (((stmf_i_lu_t *)
msg = 1;
}
} else {
if (((stmf_i_local_port_t *)
msg = 1;
}
}
if (msg) {
stmf_trace(0, "Calling stmf_ctl to offline %s : %s",
"<no additional info>");
}
}