sf.c revision 3d19cdae966d9ac4218dd9859640463bd7da19d8
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2005 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
/*
* sf - Solaris Fibre Channel driver
*
* This module implements some of the Fibre Channel FC-4 layer, converting
* from FC frames to SCSI and back. (Note: no sequence management is done
* here, though.)
*/
#define DEBUG 1
#endif
/*
* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
* Need to use the ugly RAID LUN mappings in FCP Annex D
* to prevent SCSA from barfing. This *REALLY* needs to
* be addressed by the standards committee.
*/
#define RAID_LUNS 1
#ifdef DEBUG
static int sfdebug = 0;
#else
#endif
static int sf_bus_config_debug = 0;
/* Why do I have to do this? */
#include <sys/socal_cq_defs.h>
static void sf_softstate_unlink(struct sf *);
scsi_hba_tran_t *, struct scsi_device *);
scsi_hba_tran_t *, struct scsi_device *);
int, int, int);
static int sf_scsi_reset_notify(struct scsi_address *, int,
static int sf_scsi_get_name(struct scsi_device *, char *, int);
static int sf_scsi_get_bus_addr(struct scsi_device *, char *, int);
static int sf_add_cr_pool(struct sf *);
static void sf_crpool_free(struct sf *);
static int sf_kmem_cache_constructor(void *, void *, int);
static void sf_kmem_cache_destructor(void *, void *);
static void sf_statec_callback(void *, int);
static void sf_els_callback(struct fcal_packet *);
struct sf_target *);
static void sf_reportlun_callback(struct fcal_packet *);
struct sf_target *);
static void sf_inq_callback(struct fcal_packet *);
static void sf_els_free(struct fcal_packet *);
struct sf_els_hdr *, int, int64_t);
#ifdef RAID_LUNS
#else
#endif
static void sf_finish_init(struct sf *, int);
static int sf_commoncap(struct scsi_address *, char *, int, int, int);
static int sf_getcap(struct scsi_address *, char *, int);
static int sf_setcap(struct scsi_address *, char *, int, int);
static int sf_reset(struct scsi_address *, int);
static void sf_cmd_callback(struct fcal_packet *);
static void sf_throttle(struct sf *);
static void sf_watch(void *);
static void sf_throttle_start(struct sf *);
static void sf_check_targets(struct sf *);
static void sf_check_reset_delay(void *);
static void sf_force_lip(struct sf *);
/*PRINTFLIKE3*/
static int sf_kstat_update(kstat_t *, int);
ddi_eventcookie_t, void *);
static void sf_hp_daemon(void *);
/*
* this is required to be able to supply a control node
* where ioctls can be executed
*/
sf_open, /* open */
sf_close, /* close */
nodev, /* strategy */
nodev, /* print */
nodev, /* dump */
nodev, /* read */
nodev, /* write */
sf_ioctl, /* ioctl */
nodev, /* devmap */
nodev, /* mmap */
nodev, /* segmap */
nochpoll, /* poll */
ddi_prop_op, /* cb_prop_op */
0, /* streamtab */
};
/*
* autoconfiguration routines.
*/
DEVO_REV, /* devo_rev, */
0, /* refcnt */
sf_info, /* info */
nulldev, /* identify */
nulldev, /* probe */
sf_attach, /* attach */
sf_detach, /* detach */
nodev, /* reset */
&sf_cb_ops, /* driver operations */
NULL, /* bus operations */
NULL /* power management */
};
/* to ensure this module gets loaded in memory when we do */
char _depends_on[] = "misc/scsi";
&mod_driverops, /* Type of module. This one is a driver */
SF_NAME "%I%",
&sf_ops, /* driver ops */
};
static struct modlinkage modlinkage = {
};
/* XXXXXX The following is here to handle broken targets -- remove it later */
static int sf_reportlun_forever = 0;
/* XXXXXX */
static int sf_lip_on_plogo = 0;
static int sf_els_retries = SF_ELS_RETRIES;
static int sf_target_scan_cnt = 4;
static int sf_pkt_scan_cnt = 5;
static int sf_pool_scan_cnt = 1800;
static int sf_watchdog_init = 0;
static int sf_watchdog_time = 0;
static int sf_watchdog_timeout = 1;
static int sf_watchdog_tick;
static int sf_watch_running = 0;
static timeout_id_t sf_watchdog_id;
static timeout_id_t sf_reset_timeout_id;
static int sf_max_targets = SF_MAX_TARGETS;
static kmutex_t sf_global_mutex;
static int sf_core = 0;
static kcondvar_t sf_watch_cv;
extern pri_t minclsyspri;
static ddi_eventcookie_t sf_insert_eid;
static ddi_eventcookie_t sf_remove_eid;
static ndi_event_definition_t sf_event_defs[] = {
};
#define SF_N_NDI_EVENTS \
(sizeof (sf_event_defs) / sizeof (ndi_event_definition_t))
#ifdef DEBUG
static int sf_abort_flag = 0; /* bool: to do just one abort */
#endif
/*
*/
static uchar_t sf_switch_to_alpa[] = {
0xef, 0xe8, 0xe4, 0xe2, 0xe1, 0xe0, 0xdc, 0xda, 0xd9, 0xd6,
0xd5, 0xd4, 0xd3, 0xd2, 0xd1, 0xce, 0xcd, 0xcc, 0xcb, 0xca,
0xc9, 0xc7, 0xc6, 0xc5, 0xc3, 0xbc, 0xba, 0xb9, 0xb6, 0xb5,
0xb4, 0xb3, 0xb2, 0xb1, 0xae, 0xad, 0xac, 0xab, 0xaa, 0xa9,
0xa7, 0xa6, 0xa5, 0xa3, 0x9f, 0x9e, 0x9d, 0x9b, 0x98, 0x97,
0x90, 0x8f, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7c, 0x7a, 0x79,
0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6e, 0x6d, 0x6c, 0x6b,
0x6a, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5c, 0x5a, 0x59, 0x56,
0x55, 0x54, 0x53, 0x52, 0x51, 0x4e, 0x4d, 0x4c, 0x4b, 0x4a,
0x49, 0x47, 0x46, 0x45, 0x43, 0x3c, 0x3a, 0x39, 0x36, 0x35,
0x34, 0x33, 0x32, 0x31, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
0x27, 0x26, 0x25, 0x23, 0x1f, 0x1e, 0x1d, 0x1b, 0x18, 0x17,
0x10, 0x0f, 0x08, 0x04, 0x02, 0x01
};
static uchar_t sf_alpa_to_switch[] = {
0x00, 0x7d, 0x7c, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x7a, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x79, 0x78, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x77, 0x76, 0x00, 0x00, 0x75, 0x00, 0x74,
0x73, 0x72, 0x00, 0x00, 0x00, 0x71, 0x00, 0x70, 0x6f, 0x6e,
0x00, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x00, 0x00, 0x67,
0x66, 0x65, 0x64, 0x63, 0x62, 0x00, 0x00, 0x61, 0x60, 0x00,
0x5f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x5d,
0x5c, 0x5b, 0x00, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x00,
0x00, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x00, 0x00, 0x4e,
0x4d, 0x00, 0x4c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4b,
0x00, 0x4a, 0x49, 0x48, 0x00, 0x47, 0x46, 0x45, 0x44, 0x43,
0x42, 0x00, 0x00, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x00,
0x00, 0x3b, 0x3a, 0x00, 0x39, 0x00, 0x00, 0x00, 0x38, 0x37,
0x36, 0x00, 0x35, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x33, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x31, 0x30, 0x00, 0x00, 0x2f, 0x00, 0x2e, 0x2d, 0x2c,
0x00, 0x00, 0x00, 0x2b, 0x00, 0x2a, 0x29, 0x28, 0x00, 0x27,
0x26, 0x25, 0x24, 0x23, 0x22, 0x00, 0x00, 0x21, 0x20, 0x1f,
0x1e, 0x1d, 0x1c, 0x00, 0x00, 0x1b, 0x1a, 0x00, 0x19, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x17, 0x16, 0x15,
0x00, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x00, 0x00, 0x0e,
0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x00, 0x00, 0x08, 0x07, 0x00,
0x06, 0x00, 0x00, 0x00, 0x05, 0x04, 0x03, 0x00, 0x02, 0x00,
0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};
/*
* these macros call the proper transport-layer function given
* a particular transport
*/
#define soc_transport_poll(a, b, c, d)\
(*a->fcal_ops->fcal_transport_poll)(b, c, d)
#define soc_get_lilp_map(a, b, c, d, e)\
(*a->fcal_ops->fcal_lilp_map)(b, c, d, e)
#define soc_force_lip(a, b, c, d, e)\
(*a->fcal_ops->fcal_force_lip)(b, c, d, e)
#define soc_abort(a, b, c, d, e)\
(*a->fcal_ops->fcal_abort_cmd)(b, c, d, e)
#define soc_force_reset(a, b, c, d)\
(*a->fcal_ops->fcal_force_reset)(b, c, d)
#define soc_add_ulp(a, b, c, d, e, f, g, h)\
(*a->fcal_ops->fcal_add_ulp)(b, c, d, e, f, g, h)
#define soc_remove_ulp(a, b, c, d, e)\
(*a->fcal_ops->fcal_remove_ulp)(b, c, d, e)
/* power management property defines (should be in a common include file?) */
#define PM_HARDWARE_STATE_PROP "pm-hardware-state"
#define PM_NEEDS_SUSPEND_RESUME "needs-suspend-resume"
/* node properties */
#define NODE_WWN_PROP "node-wwn"
#define PORT_WWN_PROP "port-wwn"
#define LIP_CNT_PROP "lip-count"
#define TARGET_PROP "target"
#define LUN_PROP "lun"
/*
* initialize this driver and install this module
*/
int
_init(void)
{
int i;
if (i != 0)
return (i);
if ((i = scsi_hba_init(&modlinkage)) != 0) {
return (i);
}
sf_watch_running = 0;
if ((i = mod_install(&modlinkage)) != 0) {
return (i);
}
return (i);
}
/*
* remove this driver module from the system
*/
int
_fini(void)
{
int i;
if ((i = mod_remove(&modlinkage)) == 0) {
}
return (i);
}
int
{
}
/*
* Given the device number return the devinfo pointer or instance
*/
/*ARGSUSED*/
static int
{
switch (infocmd) {
case DDI_INFO_DEVT2DEVINFO:
else {
return (DDI_FAILURE);
}
break;
case DDI_INFO_DEVT2INSTANCE:
break;
default:
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
/*
* either attach or resume this driver
*/
static int
{
int instance;
int mutex_initted = FALSE;
struct fcal_transport *handle;
char buf[64];
int handle_bound = FALSE;
switch ((int)cmd) {
case DDI_RESUME:
/*
* we've previously been SF_STATE_OFFLINEd by a DDI_SUSPEND,
* so time to undo that and get going again by forcing a
* lip
*/
"sf_attach: DDI_RESUME for sf%d\n", instance));
return (DDI_FAILURE);
}
/*
* clear suspended flag so that normal operations can resume
*/
/*
* force a login by setting our state to offline
*/
/*
* call transport routine to register state change and
* ELS callback routines (to register us as a ULP)
*/
/*
* call transport routine to force loop initialization
*/
/*
* increment watchdog init flag, setting watchdog timeout
* if we are the first (since somebody has to do it)
*/
if (!sf_watchdog_init++) {
(caddr_t)0, sf_watchdog_tick);
} else {
}
return (DDI_SUCCESS);
case DDI_ATTACH:
/*
* this instance attaching for the first time
*/
DDI_SUCCESS) {
instance);
return (DDI_FAILURE);
}
"sf_attach: DDI_ATTACH for sf%d\n", instance));
/* this shouldn't happen since we just allocated it */
return (DDI_FAILURE);
}
/*
* from this point on, if there's an error, we must de-allocate
* soft state before returning DDI_FAILURE
*/
"sf%d: failed to obtain transport handle",
instance);
goto fail;
}
/* fill in our soft state structure */
sf->sf_check_n_close = 0;
instance);
goto fail;
}
/* create a a cache for this instance */
sizeof (fcal_packet_t) + sizeof (struct sf_pkt) +
sizeof (struct scsi_pkt), 8,
instance);
goto fail;
}
/* set up a handle and allocate memory for DMA */
sf_lilp_dmahandle) != DDI_SUCCESS) {
"sf%d: failed to allocate dma handle for lilp map",
instance);
goto fail;
}
i = sizeof (struct fcal_lilp_map) + 1;
i, sf->sf_sochandle->
instance);
goto fail;
}
if (real_size < i) {
/* no error message ??? */
goto fail; /* trouble allocating memory */
}
/*
* set up the address for the DMA transfers (getting a cookie)
*/
"sf%d: failed to bind dma handle for lilp map",
instance);
goto fail;
}
handle_bound = TRUE;
/* ensure only one cookie was allocated */
if (ccount != 1) {
goto fail;
}
/* ensure LILP map and DMA cookie addresses are even?? */
/* set up all of our mutexes and condition variables */
/* create our devctl minor node */
DDI_NT_NEXUS, 0) != DDI_SUCCESS) {
" for devctl", instance);
goto fail;
}
/* create fc minor node */
0) != DDI_SUCCESS) {
" for fc", instance);
goto fail;
}
/* allocate a SCSI transport structure */
/* remove all minor nodes created */
instance);
goto fail;
}
/* save ptr to new transport structure and fill it in */
/*
* register event notification routines with scsa
*/
/*
* register bus configure/unconfigure
*/
/*
* allocate an ndi event handle
*/
sizeof (sf_event_defs));
goto fail;
}
/* setup and attach SCSI hba transport */
instance);
goto fail;
}
/* set up kstats */
KSTAT_FLAG_VIRTUAL)) == NULL) {
instance);
} else {
}
/* create the hotplug thread */
/* add this soft state instance to the head of the list */
/*
* find entry in list that has the same FC-AL handle (if any)
*/
break; /* found matching entry */
}
}
/* if we found a matching entry keep track of it */
}
/*
* increment watchdog init flag, setting watchdog timeout
* if we are the first (since somebody has to do it)
*/
if (!sf_watchdog_init++) {
drv_usectohz(1000000);
} else {
}
/*
* set up matching entry to be our sibling
*/
}
/*
* create this property so that PM code knows we want
* to be suspended at PM time
*/
/* log the fact that we have a new device */
/*
* force a login by setting our state to offline
*/
/*
* call transport routine to register state change and
* ELS callback routines (to register us as a ULP)
*/
/*
* call transport routine to force loop initialization
*/
return (DDI_SUCCESS);
default:
return (DDI_FAILURE);
}
fail:
/*
* Unbind and free event set
*/
if (sf->sf_event_hdl) {
}
if (sf->sf_event_defs) {
}
}
}
if (handle_bound) {
}
}
}
}
}
if (mutex_initted) {
}
/*
* kill off the watchdog if we are the last instance
*/
if (!--sf_watchdog_init) {
} else {
}
/* remove all minor nodes */
}
return (DDI_FAILURE);
}
/* ARGSUSED */
static int
{
int instance;
int i;
/* NO OTHER THREADS ARE RUNNING */
return (DDI_FAILURE);
}
switch (cmd) {
case DDI_SUSPEND:
/*
* suspend our instance
*/
"sf_detach: DDI_SUSPEND for sf%d\n", instance));
/*
* There is a race condition in socal where while doing
* callbacks if a ULP removes it self from the callback list
* the for loop in socal may panic as cblist is junk and
* while trying to get cblist->next the system will panic.
*/
/* call transport to remove our unregister our callbacks */
/*
* begin process of clearing outstanding commands
* by issuing a lip
*/
/*
* toggle the device OFFLINE in order to cause
* outstanding commands to drain
*/
sf->sf_lip_cnt++;
for (i = 0; i < sf_max_targets; i++) {
}
/* do this for all LUNs as well */
}
}
}
}
/*
* kill off the watchdog if we are the last instance
*/
if (!--sf_watchdog_init) {
} else {
}
return (DDI_SUCCESS);
case DDI_DETACH:
/*
* detach this instance
*/
"sf_detach: DDI_DETACH for sf%d\n", instance));
/* remove this "sf" from the list of sf softstates */
/*
* prior to taking any DDI_DETACH actions, toggle the
* device OFFLINE in order to cause outstanding
* commands to drain
*/
sf->sf_lip_cnt++;
for (i = 0; i < sf_max_targets; i++) {
}
}
}
}
}
/* call transport to remove and unregister our callbacks */
/*
* kill off the watchdog if we are the last instance
*/
if (!--sf_watchdog_init) {
} else {
}
/* signal sf_hp_daemon() to exit and wait for exit */
/*
* Unbind and free event set
*/
if (sf->sf_event_hdl) {
}
if (sf->sf_event_defs) {
}
/* detach this instance of the HBA driver */
/* deallocate/unbind DMA handle for lilp map */
}
}
/*
* the kmem cache must be destroyed before free'ing
* up the crpools
*
* our finagle of "ntot" and "nfree"
* causes an ASSERT failure in "sf_cr_free()"
* if the kmem cache is free'd after invoking
* "sf_crpool_free()".
*/
"sf_detach: sf_crpool_free() for instance 0x%x\n",
instance));
/*
* set ntot to nfree for this particular entry
*
* this causes sf_crpool_free() to update
* the cr_pool list when deallocating this entry
*/
}
/*
* now that the cr_pool's are gone it's safe
* to destroy all softstate mutex's and cv's
*/
/* remove all minor nodes from the device tree */
/* remove properties created during attach() */
/* remove kstat's if present */
}
"sf_detach: ddi_soft_state_free() for instance 0x%x\n",
instance));
return (DDI_SUCCESS);
default:
return (DDI_FAILURE);
}
}
/*
* sf_softstate_unlink() - remove an sf instance from the list of softstates
*/
static void
{
struct sf *sf_found_sibling;
while (sf_watch_running) {
/* Busy working the list -- wait */
}
/*
* we have a sibling so NULL out its reference to us
*/
}
/* remove our instance from the global list */
/* we were at at head of the list */
} else {
/* find us in the list */
break;
}
/* remember this place */
}
}
}
static int
{
if (reset_delay < 0)
reset_delay = 0;
if (sf_bus_config_debug)
flag |= NDI_DEVI_DEBUG;
}
static int
{
if (sf_bus_config_debug)
flag |= NDI_DEVI_DEBUG;
}
/*
* called by transport to initialize a SCSI target
*/
/* ARGSUSED */
static int
{
#ifdef RAID_LUNS
int lun;
#else
#endif
int i, t_len;
unsigned int lip_cnt;
unsigned char wwn[FC_WWN_SIZE];
/* get and validate our SCSI target ID */
if (i >= sf_max_targets) {
return (DDI_NOT_WELL_FORMED);
}
/* get our port WWN property */
/* no port WWN property - ignore the OBP stub node */
return (DDI_NOT_WELL_FORMED);
}
/* get our LIP count property */
return (DDI_FAILURE);
}
/* and our LUN property */
return (DDI_FAILURE);
}
/* find the target structure for this instance */
return (DDI_FAILURE);
}
& SF_TARGET_INIT_DONE)) {
/*
* set links between HBA transport and target structures
* and set done flag
*/
} else {
/* already initialized ?? */
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
/*
* called by transport to free a target
*/
/* ARGSUSED */
static void
{
}
}
/*
* allocator for non-std size cdb/pkt_private/status -- return TRUE iff
* success, else return FALSE
*/
/*ARGSUSED*/
static int
{
if (tgtlen > PKT_PRIV_LEN) {
} else {
}
}
if (statuslen > EXTCMDS_STATUS_SIZE) {
} else {
}
}
if (failure) {
}
return (failure);
}
/*
* deallocator for non-std size cdb/pkt_private/status
*/
static void
{
"sf_scsi_impl_pktfree: freeing free packet");
/* NOTREACHED */
}
}
}
}
/*
* create or initialize a SCSI packet -- called internally and
* by the transport
*/
static struct scsi_pkt *
{
int kf;
struct fcal_packet *fpkt;
/*
* If we've already allocated a pkt once,
* this request is for dma allocation only.
*/
/*
* First step of sf_scsi_init_pkt: pkt allocation
*/
if (cmdlen > FCP_CDB_SIZE) {
return (NULL);
}
/*
* Selective zeroing of the pkt.
*/
sizeof (struct sf_pkt) + sizeof (struct
fcal_packet));
sizeof (struct sf_pkt));
/* zero pkt_private */
} else {
}
if (failure ||
(tgtlen > PKT_PRIV_LEN) ||
(statuslen > EXTCMDS_STATUS_SIZE)) {
if (!failure) {
/* need to allocate more space */
}
if (failure) {
return (NULL);
}
}
return (NULL);
}
/* fill in the FC-AL packet */
fpkt->fcal_pkt_flags = 0;
sizeof (struct fcp_cmd);
/* Fill in the Fabric Channel Header */
/* Establish the LUN */
}
} else {
/* pkt already exists -- just a request for DMA allocation */
}
/* zero cdb (bzero is too slow) */
/*
* Second step of sf_scsi_init_pkt: dma allocation
* Set up dma info
*/
int rval = 0;
/* there is a buffer and some data to transfer */
/* set up command and DMA flags */
/* a read */
cmd_flags &= ~CFLAG_DMASEND;
} else {
/* a write */
}
if (flags & PKT_CONSISTENT) {
}
/* ensure we have a DMA handle */
&cmd->cmd_dmahandle);
}
if (rval == 0) {
/* bind our DMA handle to our buffer */
}
if (rval != 0) {
/* DMA failure */
switch (rval) {
case DDI_DMA_NORESOURCES:
break;
case DDI_DMA_BADATTR:
case DDI_DMA_NOMAPPING:
break;
case DDI_DMA_TOOBIG:
default:
break;
}
/* clear valid flag */
/* destroy packet if we just created it */
}
return (NULL);
}
/* set up amt to transfer and set valid flag */
}
/* set up FC-AL packet */
/* DMA write */
} else {
/* DMA read */
}
} else {
/* not a read or write */
sizeof (struct fcp_cmd);
fcmd->fcp_data_len = 0;
}
}
/*
* destroy a SCSI packet -- called internally and by the transport
*/
static void
{
/* DMA was set up -- clean up */
}
/* take this packet off the doubly-linked list */
fpkt->fcal_pkt_flags = 0;
/* free the packet */
/* just a regular packet */
} else {
/* a packet with extra memory */
}
}
/*
* called by transport to unbind DMA handle
*/
/* ARGSUSED */
static void
{
}
}
/*
* called by transport to synchronize CPU and I/O views of memory
*/
/* ARGSUSED */
static void
{
DDI_SUCCESS) {
}
}
}
/*
* routine for reset notification setup, to register or cancel. -- called
* by transport
*/
static int
{
}
/*
* called by transport to get port WWN property (except sun4u)
*/
/* ARGSUSED */
static int
{
unsigned char wwn[FC_WWN_SIZE];
int i, lun;
i = sizeof (wwn);
name[0] = '\0';
return (0);
}
i = sizeof (lun);
name[0] = '\0';
return (0);
}
for (i = 0; i < FC_WWN_SIZE; i++)
return (1);
}
/*
* called by transport to get target soft AL-PA (except sun4u)
*/
/* ARGSUSED */
static int
{
return (0);
return (1);
}
/*
*/
static int
{
int cmd_buf_size;
int rsp_buf_size;
struct sf_cr_pool *ptr;
struct sf_cr_free_elem *cptr;
/* allocate room for the pool */
NULL) {
return (DDI_FAILURE);
}
/* allocate a DMA handle for the command pool */
goto fail;
}
/*
* Get a piece of memory in which to put commands
*/
goto fail;
}
/* bind the DMA handle to an address */
goto fail;
}
/* ensure only one cookie was allocated */
if (ccount != 1) {
goto fail;
}
/* allocate a DMA handle for the response pool */
goto fail;
}
/*
* Get a piece of memory in which to put responses
*/
goto fail;
}
/* bind the DMA handle to an address */
goto fail;
}
/* ensure only one cookie was allocated */
if (ccount != 1) {
goto fail;
}
/*
*/
/* ensure ptr points to start of long word (8-byte block) */
/* keep track of actual size after moving pointer */
/* set actual total number of entries */
/* set up DMA for each pair of entries */
i = 0;
i++;
}
/* terminate the list */
/* add this list at front of current one */
sf->sf_cr_pool_cnt++;
return (DDI_SUCCESS);
fail:
/* we failed so clean up */
if (cmd_bound) {
}
}
if (rsp_bound) {
}
}
}
}
return (DDI_FAILURE);
}
/*
* in the pool as needed
*/
static int
{
struct sf_cr_pool *ptr;
struct sf_cr_free_elem *cptr;
/* find a free buffer in the existing pool */
break;
} else {
}
}
/* did we find a free buffer ? */
/* we found a free buffer -- take it off the free list */
/* set up the command to use the buffer pair */
return (DDI_SUCCESS); /* success */
}
/* no free buffer available -- can we allocate more ? */
/* we need to allocate more buffer pairs */
if (sf->sf_cr_flag) {
/* somebody already allocating for this instance */
if (func == SLEEP_FUNC) {
/* user wants to wait */
/* we've been woken so go try again */
goto try_again;
}
/* user does not want to wait */
return (DDI_FAILURE); /* give up */
}
/* set flag saying we're allocating */
/* add to our pool */
/* couldn't add to our pool for some reason */
sf->sf_cr_flag = 0;
return (DDI_FAILURE); /* give up */
}
/*
* clear flag saying we're allocating and tell all other
* that care
*/
sf->sf_cr_flag = 0;
/* now that we have more buffers try again */
goto try_again;
}
/* we don't have room to allocate any more buffers */
return (DDI_FAILURE); /* give up */
}
/*
*/
static void
{
struct sf_cr_free_elem *elem;
}
/*
*/
static void
{
} else {
}
sf->sf_cr_pool_cnt--;
return;
}
}
}
/* ARGSUSED */
static int
{
return (0);
}
/* ARGSUSED */
static void
{
}
}
}
/*
* called by transport when a state change occurs
*/
static void
{
int i;
switch (msg) {
case FCAL_STATUS_LOOP_ONLINE: {
int ret; /* ret value from getmap */
int lip_cnt; /* to save current count */
int cnt; /* map length */
/*
* the loop has gone online
*/
sf->sf_lip_cnt++;
/* scan each target hash queue */
for (i = 0; i < SF_NUM_HASH_QUEUES; i++) {
/*
* foreach target, if it's not offline then
* mark it as busy
*/
| SF_TARGET_MARK);
#ifdef DEBUG
/*
* for debugging, print out info on any
* pending commands (left hanging)
*/
sft_pkt_head) {
"cmd 0x%p pending "
"after lip\n",
(void *)cmd->cmd_fp_pkt));
}
}
#endif
}
}
/*
* since the loop has just gone online get a new map from
* the transport
*/
sf_core = 0;
}
"!soc lilp map failed status=0x%x\n", ret);
sf->sf_lip_cnt++;
return;
}
/* ensure consistent view of DMA memory */
/* how many entries in map ? */
if (cnt >= SF_MAX_LILP_ENTRIES) {
return;
}
/*
* since the last entry of the map may be mine (common) check
* for that, and if it is we have one less entry to look at
*/
cnt--;
}
/* If we didn't get a valid loop map enable all targets */
for (i = 0; i < sizeof (sf_switch_to_alpa); i++)
cnt = i;
}
if (sf->sf_device_count == 0) {
break;
}
"!statec_callback: starting with %d targets\n",
sf->sf_device_count));
/* scan loop map, logging into all ports (except mine) */
for (i = 0; i < cnt; i++) {
"!lilp map entry %d = %x,%x\n", i,
/* is this entry for somebody else ? */
/* do a PLOGI to this port */
lip_cnt)) {
/* a problem logging in */
/*
* problem not from a new LIP
*/
sf->sf_device_count--;
>= 0);
if (sf->sf_device_count == 0) {
lip_cnt);
}
}
}
}
}
break;
}
case FCAL_STATUS_ERR_OFFLINE:
/*
* loop has gone offline due to an error
*/
sf->sf_lip_cnt++;
if (!sf->sf_online_timer) {
}
/*
* if we are suspended, preserve the SF_STATE_SUSPENDED flag,
* since throttling logic in sf_watch() depends on
* preservation of this flag while device is suspended
*/
"sf_statec_callback, sf%d: "
"got FCAL_STATE_OFFLINE during DDI_SUSPEND\n",
} else {
}
/* scan each possible target on the loop */
for (i = 0; i < sf_max_targets; i++) {
| SF_TARGET_MARK);
}
}
break;
case FCAL_STATE_RESET: {
/*
* a transport reset
*/
sf->sf_lip_cnt++;
/*
* if we are suspended, preserve the SF_STATE_SUSPENDED flag,
* since throttling logic in sf_watch() depends on
* preservation of this flag while device is suspended
*/
"sf_statec_callback, sf%d: "
"got FCAL_STATE_RESET during DDI_SUSPEND\n",
} else {
}
/*
* scan each possible target on the loop, looking for targets
* that need callbacks ran
*/
for (i = 0; i < sf_max_targets; i++) {
| SF_TARGET_MARK);
/*
* run remove event callbacks for lun
*
* We have a nasty race condition here
* 'cause we're dropping this mutex to
* run the callback and expect the
* linked list to be the same.
*/
(void) ndi_event_retrieve_cookie(
(void) ndi_event_run_callbacks(
}
}
}
/*
* scan for ELS commands that are in transport, not complete,
* and have a valid timeout, building a private list
*/
/*
* cmd in transport && not complete &&
* timeout valid
*
* move this entry from ELS input list to our
* private list
*/
/* push this on private list head */
/* remove this entry from input list */
/*
* remove this entry from somewhere in
* the middle of the list
*/
}
} else {
/*
* remove this entry from the head
* of the list
*/
}
}
} else {
}
}
/*
* foreach cmd in our list free the ELS packet associated
* with it
*/
}
/*
* scan for commands from each possible target
*/
for (i = 0; i < sf_max_targets; i++) {
/*
* scan all active commands for this target,
* looking for commands that have been issued,
* are in transport, and are not yet complete
* (so we can terminate them because of the
* reset)
*/
sft_pkt_head) {
SF_STATE_ISSUED) &&
(fpkt->fcal_cmd_state &
(!(fpkt->fcal_cmd_state &
FCAL_CMD_COMPLETE))) {
/* a command to be reset */
pkt->pkt_statistics |=
mutex_exit(&cmd->
mutex_exit(&target->
}
} else {
mutex_exit(&cmd->
/* get next command */
}
}
}
}
/*
* get packet queue for this target, resetting all remaining
* commands
*/
}
}
break;
}
default:
break;
}
}
/*
* called to send a PLOGI (N_port login) ELS request to a destination ID,
* returning TRUE upon success, else returning FALSE
*/
static int
int lip_cnt)
{
struct la_els_logi *logi;
struct sf_els_hdr *privp;
sizeof (union sf_els_cmd), sizeof (union sf_els_rsp),
return (FALSE);
}
if (els_code == LA_ELS_PLOGI) {
- 4);
} else if (els_code == LA_ELS_LOGO) {
}
}
/*
* send an ELS IU via the transport,
* returning TRUE upon success, else returning FALSE
*/
static int
{
}
/* call the transport to send a packet */
}
}
}
return (FALSE); /* failure */
}
return (TRUE); /* success */
}
/*
* called as the pkt_comp routine for ELS FC packets
*/
static void
{
int tgt_id;
short ncmds;
/*
* we've received an ELS callback, i.e. an ELS packet has arrived
*/
/* take the current packet off of the queue */
return;
}
}
}
}
/* get # pkts in this callback */
/* sync idea of memory */
/* was this an OK ACC msg ?? */
/*
* this was an OK ACC pkt
*/
case LA_ELS_PLOGI:
/*
* was able to to an N_port login
*/
"!PLOGI to al_pa %x succeeded, wwn %x%x\n",
/* try to do a process login */
goto fail; /* PRLI failed */
}
break;
case LA_ELS_PRLI:
/*
* was able to do a process login
*/
"!PRLI to al_pa %x succeeded\n",
privp->dest_nport_id));
/* try to do address discovery */
goto fail; /* ADISC failed */
}
break;
case LA_ELS_ADISC:
/*
* found a target via ADISC
*/
"!ADISC to al_pa %x succeeded\n",
privp->dest_nport_id));
/* create the target info */
(int64_t)0))
== NULL) {
goto fail; /* can't create target */
}
/*
* ensure address discovered matches what we thought
* it would be
*/
privp->dest_nport_id) {
"target 0x%x, AL-PA 0x%x and "
"hard address 0x%x don't match\n",
goto fail; /* addr doesn't match */
}
/*
* get inquiry data from the target
*/
goto fail; /* inquiry failed */
}
break;
default:
"!ELS %x to al_pa %x succeeded\n",
break;
}
} else {
/*
* oh oh -- this was not an OK ACC packet
*/
/* get target ID from dest loop address */
/* keep track of failures */
if (fpkt->fcal_pkt_status ==
}
}
"!ELS %x to al_pa %x failed, retrying",
}
/* device busy? wait a bit ... */
if (fpkt->fcal_pkt_status ==
return;
}
/* call the transport to send a pkt */
FCAL_NOSLEEP, CQ_REQUEST_1) !=
}
}
}
goto fail;
} else
return;
} else {
goto fail;
}
} else {
#ifdef DEBUG
"LS_RJT reason = %x\n",
} else
"fc packet status = %x",
fpkt->fcal_pkt_status));
}
#endif
goto fail;
}
}
return; /* success */
fail:
sf->sf_device_count--;
if (sf->sf_device_count == 0) {
}
}
if (free_pkt) {
}
}
/*
* send a PRLI (process login) ELS IU via the transport,
* returning TRUE upon success, else returning FALSE
*/
static int
{
sizeof (struct la_els_prli);
fprli->data_overlay_allowed = 0;
fprli->cmd_data_mixed = 0;
fprli->data_resp_mixed = 0;
fprli->write_xfer_rdy_disabled = 0;
}
/*
* send an ADISC (address discovery) ELS IU via the transport,
* returning TRUE upon success, else returning FALSE
*/
static int
{
sizeof (struct la_els_adisc);
}
static struct fcal_packet *
{
struct fcal_packet *fpkt;
struct sf_els_hdr *privp;
"Could not allocate fcal_packet for ELS\n"));
return (NULL);
}
KM_NOSLEEP)) == NULL) {
"Could not allocate sf_els_hdr for ELS\n"));
goto fail;
}
"Could not allocate DMA handle for ELS\n"));
goto fail;
}
"Could not allocate DMA memory for ELS\n"));
goto fail;
}
"DMA memory too small for ELS\n"));
goto fail;
}
"Could not bind DMA memory for ELS\n"));
goto fail;
}
if (ccount != 1) {
"Wrong cookie count for ELS\n"));
goto fail;
}
"Could not allocate DMA handle for ELS rsp\n"));
goto fail;
}
"Could not allocate DMA memory for ELS rsp\n"));
goto fail;
}
"DMA memory too small for ELS rsp\n"));
goto fail;
}
"Could not bind DMA memory for ELS rsp\n"));
goto fail;
}
if (ccount != 1) {
"Wrong cookie count for ELS rsp\n"));
goto fail;
}
fpkt->fcal_pkt_flags = 0;
/* Fill in the Fabric Channel Header */
return (fpkt);
fail:
if (cmd_dma_handle != NULL) {
if (cmd_bound) {
(void) ddi_dma_unbind_handle(cmd_dma_handle);
}
}
if (rsp_dma_handle != NULL) {
if (rsp_bound) {
(void) ddi_dma_unbind_handle(rsp_dma_handle);
}
}
return (NULL);
}
static void
{
}
}
}
}
if (privp->data_dma_handle) {
}
}
}
}
static struct sf_target *
{
int hash;
#ifdef RAID_LUNS
/* XXXX Work around SCSA limitations. */
#endif
return (NULL);
}
if (lun != 0) {
/*
* Since LUNs != 0 are queued up after LUN == 0, find LUN == 0
* and enqueue the new LUN.
*/
/*
* Yeep -- no LUN 0?
*/
return (NULL);
}
/* LUN 0 already finished, duplicate its state */
return (target);
/*
* LUN 0 online or not examined yet.
* Try to bring the LUN back online
*/
return (target);
}
return (NULL);
}
/* Initialize new target structure */
#ifdef RAID_LUNS
#endif
/* Don't let anyone use this till we finishup init. */
/* Traverse the list looking for this target */
}
return (ntarget);
}
/* It's been touched this LIP -- duplicate WWNs */
tnum);
}
return (NULL);
}
/* Someone else is in our slot */
"target 0x%x, duplicate switch settings\n", tnum);
return (NULL);
}
tnum);
sizeof (struct sf_target_stats));
}
}
return (NULL);
}
#ifdef RAID_LUNS
#endif
} else {
}
return (target);
}
/*
* find the target for a given sf instance
*/
/* ARGSUSED */
static struct sf_target *
#ifdef RAID_LUNS
#else
#endif
{
int hash;
#ifndef RAID_LUNS
sizeof (target->sft_port_wwn)) == 0 &&
break;
#else
sizeof (target->sft_port_wwn)) == 0 &&
break;
#endif
}
return (target);
}
/*
* Send out a REPORT_LUNS command.
*/
static int
{
int handle_bound = 0;
char *msg = "Transport";
msg = "ddi_dma_alloc_handle()";
goto fail;
}
msg = "ddi_dma_mem_alloc()";
goto fail;
}
if (real_size < REPORT_LUNS_SIZE) {
msg = "DMA mem < REPORT_LUNS_SIZE";
goto fail;
}
msg = "ddi_dma_addr_bind_handle()";
goto fail;
}
handle_bound = 1;
if (ccount != 1) {
msg = "ccount != 1";
goto fail;
}
sizeof (struct fcp_cmd);
/* Now set the buffer size. If DDI gave us extra, that's O.K. */
(real_size&0x0ff);
/* We know he's there, so this should be fast */
return (1);
fail:
"%s failure for REPORTLUN to target 0x%x\n",
if (lun_dma_handle != NULL) {
if (handle_bound)
(void) ddi_dma_unbind_handle(lun_dma_handle);
}
}
return (0);
}
/*
* Handle the results of a REPORT_LUNS command:
* Create additional targets if necessary
* Initiate INQUIRYs on all LUNs.
*/
static void
{
struct scsi_report_luns *ptr =
int delayed_retry = 0;
int i, free_pkt = 1;
short ncmds;
/* use as temporary state variable */
return;
}
}
"!REPORTLUN to al_pa %x pkt status %x scsi status %x\n",
/* See if target simply does not support REPORT_LUNS. */
rsp->fcp_sense_len >=
struct scsi_extended_sense *sense;
sense = (struct scsi_extended_sense *)
+ rsp->fcp_response_len);
/* Fake LUN 0 */
"!REPORTLUN Faking good "
"completion for alpa %x\n",
privp->dest_nport_id));
"!REPORTLUN device alpa %x "
"key %x code %x\n",
goto fail;
}
"!REPORTLUN device alpa %x was reset\n",
privp->dest_nport_id));
} else {
"!REPORTLUN device alpa %x "
"key %x code %x\n",
/* XXXXXX The following is here to handle broken targets -- remove it later */
if (sf_reportlun_forever &&
goto retry;
/* XXXXXX */
delayed_retry = 1;
}
}
struct fcp_rsp_info *bep;
fcp_response_len + 1);
0, 0, DDI_DMA_SYNC_FORKERNEL);
/* Convert from #bytes to #ints */
"!REPORTLUN to al_pa %x succeeded: %d LUNs\n",
if (!ptr->lun_list_len) {
/* No LUNs? Ya gotta be kidding... */
"SCSI violation -- "
"target 0x%x reports no LUNs\n",
privp->dest_nport_id]);
}
}
sf->sf_lip_cnt; i++) {
struct sf_els_hdr *nprivp;
struct fcal_packet *nfpkt;
/* LUN 0 is already in `target' */
}
if (target) {
sizeof (struct sf_els_hdr),
sizeof (union sf_els_cmd),
sizeof (union sf_els_rsp),
if (nprivp)
}
0)) {
lip_cnt) {
sf->sf_device_count --;
}
}
}
return;
} else {
"!REPORTLUN al_pa %x fcp failure, "
"fcp_rsp_code %x scsi status %x\n",
goto fail;
}
}
delayed_retry = 1;
}
/* XXXXXX The following is here to handle broken targets -- remove it later */
/* XXXXXX */
if (delayed_retry) {
} else {
}
if (!delayed_retry)
"!REPORTLUN to al_pa %x failed, retrying\n",
privp->dest_nport_id));
goto fail;
} else
return;
} else {
}
} else {
fail:
/* REPORT_LUN failed -- try inquiry */
return;
} else {
free_pkt = 0;
}
sf->sf_device_count--;
if (sf->sf_device_count == 0)
}
}
if (free_pkt) {
}
}
static int
{
int handle_bound = FALSE;
char *msg = "Transport";
msg = "ddi_dma_alloc_handle()";
goto fail;
}
msg = "ddi_dma_mem_alloc()";
goto fail;
}
if (real_size < SUN_INQSIZE) {
msg = "DMA mem < inquiry size";
goto fail;
}
msg = "ddi_dma_addr_bind_handle()";
goto fail;
}
handle_bound = TRUE;
if (ccount != 1) {
msg = "ccount != 1";
goto fail;
}
sizeof (struct fcp_cmd);
fail:
"%s failure for INQUIRY to target 0x%x\n",
if (inq_dma_handle != NULL) {
if (handle_bound) {
(void) ddi_dma_unbind_handle(inq_dma_handle);
}
}
}
return (FALSE);
}
/*
* called as the pkt_comp routine for INQ packets
*/
static void
{
int delayed_retry = FALSE;
short ncmds;
/* use as temporary state variable */
return;
}
}
}
}
(size_t)0, DDI_DMA_SYNC_FORKERNEL);
"!INQUIRY to al_pa %x scsi status %x",
struct fcp_rsp_info *bep;
fcp_response_len + 1);
"!INQUIRY to al_pa %x lun %" PRIx64
" succeeded\n",
sizeof (*prt));
sf->sf_device_count--;
if (sf->sf_device_count == 0) {
}
}
return;
}
STATUS_BUSY) ||
}
} else {
}
}
delayed_retry = 1;
}
if (delayed_retry) {
} else {
}
if (!delayed_retry) {
"INQUIRY to al_pa %x failed, retrying",
privp->dest_nport_id));
}
}
/* if not delayed call transport to send a pkt */
if (!delayed_retry &&
FCAL_NOSLEEP, CQ_REQUEST_1) !=
}
}
}
goto fail;
}
return;
}
} else {
fail:
"Retry Count: %d\n",
sf->sf_device_count--;
if (sf->sf_device_count == 0) {
}
}
}
}
static void
{
int i; /* loop index */
int cflag;
/* scan all hash queues */
for (i = 0; i < SF_NUM_HASH_QUEUES; i++) {
/* see if target is not offline */
/*
* target already offline
*/
goto next_entry;
}
/*
* target is not already offline -- see if it has
* already been marked as ready to go offline
*/
/*
* target already marked, so take it offline
*/
goto next_entry;
}
/* clear target busy flag */
/* is target init not yet done ?? */
/* get pointer to target dip */
/*
* target init not yet done &&
* devinfo not yet created
*/
goto next_entry;
}
/*
* target init already done || devinfo already created
*/
/* a problem creating properties */
goto next_entry;
}
/* create a new element for the hotplug list */
KM_NOSLEEP)) != NULL) {
/* fill in the new element */
/* add the new element into the hotplug list */
} else {
/* this is the first element in list */
elem;
}
} else {
/* could not allocate memory for element ?? */
(void) ndi_devi_online_async(dip, 0);
}
/* ensure no new LIPs have occurred */
return;
}
}
/* done scanning all targets in this queue */
}
/* done with all hash queues */
sf->sf_online_timer = 0;
}
/*
* create devinfo node
*/
static void
{
char **compatible = NULL;
int ncompatible;
char *scsi_binding_set;
/* get the 'scsi-binding-set' property */
/* determine the node name and compatible */
if (scsi_binding_set)
/* if nodename can't be determined then print a message and skip it */
#ifndef RAID_LUNS
"@w%02x%02x%02x%02x%02x%02x%02x%02x,%x\n"
" compatible: %s",
#else
"@w%02x%02x%02x%02x%02x%02x%02x%02x,%x\n"
" compatible: %s",
#endif
goto fail;
}
/* allocate the node */
goto fail;
}
/* decorate the node with compatible */
goto fail;
}
/* add addressing properties to the node */
goto fail;
}
goto fail;
}
goto fail;
}
return;
fail:
} else {
}
}
}
}
/*
* create required properties, returning TRUE iff we succeed, else
* returning FALSE
*/
static int
{
return (FALSE);
}
return (FALSE);
}
return (FALSE);
}
return (FALSE);
}
#ifndef RAID_LUNS
return (0);
}
#else
return (0);
}
#endif
return (TRUE);
}
/*
* called by the transport to offline a target
*/
/* ARGSUSED */
static void
{
struct sf_hp_elem *elem;
sf_core = 0;
}
/* XXXX if this is LUN 0, offline all other LUNs */
/* abort all cmds for this target */
} else {
elem;
}
} else {
/* don't do NDI_DEVI_REMOVE for now */
"device offline failed",
} else {
"device offline succeeded\n",
}
}
} else {
}
}
}
/*
*
* returning:
* 1 (TRUE) boolean capability is true (on get)
* 0 (FALSE) invalid capability, can't set capability (on set),
* or boolean capability is false (on get)
* -1 (UNDEFINED) can't find capability (SCSA) or unsupported capability
* 3 when getting SCSI version number
* AL_PA when getting port initiator ID
*/
static int
{
int cidx;
return (rval);
}
/* get index of capability string */
/* can't find capability */
return (UNDEFINED);
}
if (doset) {
/*
* Process setcap request.
*/
/*
* At present, we can only set binary (0/1) values
*/
switch (cidx) {
case SCSI_CAP_ARQ: /* can't set this capability */
break;
default:
"sf_setcap: unsupported %d", cidx));
break;
}
"set cap: cap=%s,val=0x%x,tgtonly=0x%x"
",doset=0x%x,rval=%d\n",
} else {
/*
* Process getcap request.
*/
switch (cidx) {
case SCSI_CAP_DMA_MAX:
break; /* don't' have this capability */
case SCSI_CAP_INITIATOR_ID:
break;
case SCSI_CAP_ARQ:
break;
case SCSI_CAP_TAGGED_QING:
break;
case SCSI_CAP_SCSI_VERSION:
rval = 3;
break;
break;
default:
"sf_scsi_getcap: unsupported"));
break;
}
"get cap: cap=%s,val=0x%x,tgtonly=0x%x,"
"doset=0x%x,rval=%d\n",
}
return (rval);
}
/*
* called by the transport to get a capability
*/
static int
{
}
/*
* called by the transport to set a capability
*/
static int
{
}
/*
* called by the transport to abort a target
*/
static int
{
struct fcal_packet *fpkt;
int old_target_state;
int lip_cnt;
int tgt_id;
int deferred_destroy;
deferred_destroy = 0;
- sizeof (struct fcal_packet));
sizeof (struct sf_pkt));
(void *)fpkt));
} else {
}
break;
} else {
}
}
/* call transport to abort command */
(rval == FCAL_ABORT_FAILED)) {
} else if (rval == FCAL_BAD_ABORT) {
+ 20;
} else {
"Command Abort failed\n"));
}
} else {
}
}
} else {
return (rval);
}
/* prepare the packet for transport */
/*
* call transport to send a pkt polled
*
* if that fails call the transport to abort it
*/
(void) ddi_dma_sync(
(off_t)
if (((struct fcp_rsp_info *)
(&cmd->cmd_rsp_block->
fcp_response_len + 1))->
rsp_code == FCP_NO_FAILURE) {
/* abort cmds for this targ */
} else {
"Target %d Abort Task "
}
} else {
+ 20;
FCAL_ABORTED &&
(t != FCAL_ABORT_FAILED)) {
"sf_abort failed, "
"initiating LIP\n");
deferred_destroy = 1;
}
} else {
}
}
}
if (!deferred_destroy) {
}
}
}
}
return (my_rval);
}
/*
* called by the transport and internally to reset a target
*/
static int
{
struct fcal_packet *fpkt;
int lip_cnt;
int deferred_destroy;
/* We don't support RESET_LUN yet. */
if (level == RESET_TARGET) {
struct sf_reset_list *p;
== NULL)
return (rval);
/* All target resets go to LUN 0 */
}
kmem_free(p, sizeof (struct sf_reset_list));
return (rval);
}
/*
* XXXX If we supported RESET_LUN we should check here
* to see if any LUN were being reset and somehow fail
* that operation.
*/
}
deferred_destroy = 0;
/* prepare the packet for transport */
/* call transport to send a pkt polled */
CQ_REQUEST_1)) == FCAL_TRANSPORT_SUCCESS) {
if ((fpkt->fcal_pkt_status ==
FCAL_STATUS_OK) &&
(((struct fcp_rsp_info *)
(&cmd->cmd_rsp_block->
fcp_response_len + 1))->
rsp_code == FCP_NO_FAILURE)) {
"!sf%d: Target 0x%x Reset "
"successful\n",
} else {
"!sf%d: Target 0x%x "
"Reset failed."
"Status code 0x%x "
"Resp code 0x%x\n",
((struct fcp_rsp_info *)
(&cmd->cmd_rsp_block->
fcp_response_len + 1))->
rsp_code);
}
} else {
"0x%x Reset Failed. Ret=%x\n",
/* call the transport to abort a cmd */
+ 20;
FCAL_ABORTED) &&
(t != FCAL_ABORT_FAILED)) {
"!sf%d: Target 0x%x Reset "
"failed. Abort Failed, "
"forcing LIP\n",
deferred_destroy = 1;
}
} else {
}
}
}
/*
* Defer releasing the packet if we abort returned with
* a BAD_ABORT or timed out, because there is a
* possibility that the ucode might return it.
* We wait for at least 20s and let it be released
* by the sf_watch thread
*/
if (!deferred_destroy) {
/* for cache */
}
} else {
"Resource allocation error.\n",
}
p->timeout = ddi_get_lbolt() +
sf->sf_reset_list = p;
if (sf_reset_timeout_id == 0) {
}
} else {
}
}
kmem_free(p, sizeof (struct sf_reset_list));
}
} else {
/*
* We are currently in a lip, so let this one
* finish before forcing another one.
*/
return (TRUE);
}
}
return (rval);
}
/*
* abort all commands for a target
*
* if try_abort is set then send an abort
* if abort is set then this is abort, else this is a reset
*/
static void
{
struct fcal_packet *fpkt;
int rval = FCAL_ABORTED;
/*
* First pull all commands for all LUNs on this target out of the
* overflow list. We can tell it's the same target by comparing
* the node WWN.
*/
else
}
} else {
}
} else {
}
}
}
/*
* Now complete all the commands on our list. In the process,
* the completion routine may take the commands off the target
* lists.
*/
if (abort) {
} else {
}
/*
* call the packet completion routine only for
* non-polled commands. Ignore the polled commands as
* they timeout and will be handled differently
*/
}
/*
* Finally get all outstanding commands for each LUN, and abort them if
* they've been issued, and call the completion routine.
* For the case where sf_offline_target is called from sf_watch
* due to a Offline Timeout, it is quite possible that the soc+
* ucode is hosed and therefore cannot return the commands.
* Clear up all the issued commands as well.
* Try_abort will be false only if sf_abort_all is coming from
* sf_target_offline.
*/
(fpkt->fcal_cmd_state &
if (try_abort) {
/* call the transport to abort a pkt */
fpkt, 1);
}
if ((rval == FCAL_ABORTED) ||
(rval == FCAL_ABORT_FAILED)) {
if (abort) {
pkt->pkt_statistics |=
} else {
pkt->pkt_statistics |=
}
}
return;
}
} else {
}
}
}
}
/*
* called by the transport to start a packet
*/
static int
{
int rval;
(void *)cmd);
}
/* prepare the packet for transport */
return (rval);
}
return (TRAN_FATAL_ERROR);
}
return (TRAN_BUSY);
}
goto enque;
}
/* if no interrupts then do polled I/O */
}
/* regular interrupt-driven I/O */
if (!sf->sf_use_lock) {
/* locking no needed */
/* call the transport to send a pkt */
return (TRAN_BADPKT);
}
return (TRAN_ACCEPT);
}
/* regular I/O using locking */
/*
* either we're throttling back or there are already commands
* on the queue, so enqueue this one for later
*/
/* add to the queue */
} else {
/* this is the first entry in the queue */
}
return (TRAN_ACCEPT);
}
/*
* start this packet now
*/
/* still have cmd mutex */
}
/*
* internal routine to start a packet from the queue now
*
* enter with cmd mutex held and leave with it released
*/
static int
{
/* we have the cmd mutex */
/* call transport to send the pkt */
return (TRAN_BADPKT);
}
return (TRAN_ACCEPT);
}
/*
* prepare a packet for transport
*/
static int
{
/* XXXX Need to set the LUN ? */
return (TRAN_BADPKT);
}
/* invalidate imp field(s) of rsp block */
/* set up amt of I/O to do */
}
} else {
}
/* set up the Tagged Queuing type */
}
/*
* Sync the cmd segment
*/
sizeof (struct fcp_cmd), DDI_DMA_SYNC_FORDEV);
return (TRAN_ACCEPT);
}
/*
* fill in packet hdr source and destination IDs and hdr byte count
*/
static void
{
}
/*
* do polled I/O using transport
*/
static int
{
int timeout;
int rval;
cmd->cmd_timeout = 0;
/* call transport to send a pkt polled */
if (rval != FCAL_TRANSPORT_SUCCESS) {
if (rval == FCAL_TRANSPORT_TIMEOUT) {
} else {
}
return (TRAN_BADPKT);
}
return (TRAN_ACCEPT);
}
/* a shortcut for defining debug messages below */
#ifdef DEBUG
#else
#define SF_DMSG1(s) /* do nothing */
#endif
/*
* the pkt_comp callback for command packets
*/
static void
{
short ncmds;
int tgt_id;
int good_scsi_status = TRUE;
(void *)cmd);
}
/* cmd already being aborted -- nothing to do */
return;
}
}
} else {
}
!= STATUS_GOOD)) {
/*
* The next two checks make sure that if there
* is no sense data or a valid response and
* the command came back with check condition,
* the command should be retried
*/
}
}
(uint_t)0, DDI_DMA_SYNC_FORCPU);
}
/*
* Update the transfer resid, if appropriate
*/
/*
* Check to see if the SCSI command failed.
*
*/
/*
* First see if we got a FCP protocol error.
*/
struct fcp_rsp_info *bep;
bep =
case FCP_CMND_INVALID:
SF_DMSG1("FCP_RSP FCP_CMND "
"fields invalid");
break;
case FCP_TASK_MGMT_NOT_SUPPTD:
SF_DMSG1("FCP_RSP Task"
"Management Function"
"Not Supported");
break;
case FCP_TASK_MGMT_FAILED:
SF_DMSG1("FCP_RSP Task "
"Management Function"
"Failed");
break;
case FCP_DATA_RO_MISMATCH:
SF_DMSG1("FCP_RSP FCP_DATA RO "
"mismatch with "
"FCP_XFER_RDY DATA_RO");
break;
case FCP_DL_LEN_MISMATCH:
SF_DMSG1("FCP_RSP FCP_DATA length "
"different than BURST_LEN");
break;
default:
SF_DMSG1("FCP_RSP invalid RSP_CODE");
break;
}
}
}
/*
* See if we got a SCSI error with sense data
*/
sizeof (struct scsi_extended_sense));
struct scsi_arq_status *arq;
struct scsi_extended_sense *sensep =
(struct scsi_extended_sense *)sense;
STATUS_GOOD) {
== STATUS_CHECK) {
good_scsi_status = 1;
/* REPORT_LUNS_HAS_CHANGED */
"!REPORT_LUNS_HAS_CHANGED\n");
}
}
}
(cmd->cmd_scblen >=
sizeof (struct scsi_arq_status))) {
/*
* copy out sense information
*/
rqlen);
sizeof (struct scsi_extended_sense) -
arq->sts_rqpkt_reason = 0;
arq->sts_rqpkt_statistics = 0;
}
}
/*
* The firmware returns the number of bytes actually
* we asked and if it is different, we lost frames ?
*/
(good_scsi_status) &&
int byte_cnt =
if (byte_cnt != 0) {
"!sf_cmd_callback: Lost Frame: "
"(write) received 0x%x expected"
" 0x%x target 0x%x\n",
}
"!sf_cmd_callback: Lost Frame: (read) "
"received 0x%x expected 0x%x "
"target 0x%x\n", byte_cnt,
}
}
}
} else {
/* pkt status was not ok */
switch (fpkt->fcal_pkt_status) {
case FCAL_STATUS_ERR_OFFLINE:
SF_DMSG1("Fibre Channel Offline");
| SF_TARGET_MARK);
}
break;
break;
case FCAL_STATUS_TIMEOUT:
SF_DMSG1("Fibre Channel Timeout");
break;
case FCAL_STATUS_ERR_OVERRUN:
SF_DMSG1("CMD_DATA_OVR");
break;
SF_DMSG1("Unknown CQ type");
break;
case FCAL_STATUS_BAD_SEG_CNT:
SF_DMSG1("Bad SEG CNT");
break;
case FCAL_STATUS_BAD_XID:
SF_DMSG1("Fibre Channel Invalid X_ID");
break;
case FCAL_STATUS_XCHG_BUSY:
SF_DMSG1("Fibre Channel Exchange Busy");
break;
SF_DMSG1("Insufficient CQEs");
break;
case FCAL_STATUS_ALLOC_FAIL:
SF_DMSG1("ALLOC FAIL");
break;
case FCAL_STATUS_BAD_SID:
SF_DMSG1("Fibre Channel Invalid S_ID");
break;
sf_core = 0;
}
msg2 =
"INCOMPLETE DMA XFER due to bad SOC+ card, replace HBA";
break;
case FCAL_STATUS_CRC_ERR:
msg2 = "Fibre Channel CRC Error on frames";
break;
case FCAL_STATUS_NO_SEQ_INIT:
SF_DMSG1("Fibre Channel Seq Init Error");
break;
case FCAL_STATUS_OPEN_FAIL:
SF_DMSG1("Fibre Channel Open Failure");
SF_TARGET_MARK | SF_TARGET_OFFLINE)) == 0) {
"forcing LIP\n",
}
break;
SF_DMSG1("Fibre Channel Online Timeout");
break;
default:
SF_DMSG1("Unknown FC Status");
break;
}
}
#ifdef DEBUG
/*
* msg1 will be non-NULL if we've detected some sort of error
*/
"!Transport error on cmd=0x%p target=0x%x: %s\n",
}
#endif
}
#ifdef DEBUG
if (!sf->sf_use_lock) {
}
#endif
}
(void *)pkt));
}
}
/*
* start throttling for this instance
*/
static void
{
/* this command is busy -- go to next */
continue;
}
/* this cmd not busy and not issued */
/* remove this packet from the queue */
/* this was the first packet */
/* this was the last packet */
}
} else {
/* some packet in the middle of the queue */
}
}
} else {
}
}
}
}
}
/*
* called when the max exchange value is exceeded to throttle back commands
*/
static void
{
} else {
} else {
/*
* This case is just a safeguard, should not really
* happen(ncmds < SF_DECR_DELTA and MAX_EXCHG exceed
*/
}
}
} else {
} else {
}
}
}
}
/*
* sf watchdog routine, called for a timeout
*/
/*ARGSUSED*/
static void
{
struct sf_els_hdr *privp;
static int count = 0, pscan_count = 0;
count++;
pscan_count++;
sf_watch_running = 1;
/* disable throttling while we're suspended */
"sf_watch, sf%d:throttle disabled "
"due to DDI_SUSPEND\n",
continue;
}
if (sf->sf_take_core) {
}
cmdmax) {
}
} else {
}
>> 2;
#ifdef DEBUG
if (sf->sf_use_lock) {
"use lock flag off\n"));
}
#endif
}
}
if (pscan_count >= sf_pool_scan_cnt) {
}
}
/* timeout this command */
}
}
}
} else {
}
}
for (i = 0; i < sf_max_targets; i++) {
"Unstable: Failed to bring "
"Loop Online\n");
mescount = 1;
}
}
}
sf->sf_online_timer = 0;
}
if (count >= sf_pkt_scan_cnt) {
}
for (i = 0; i < sf_max_targets; i++) {
SF_TARGET_BUSY)) {
"!Offline Timeout\n");
(void) soc_take_core(
sf_core = 0;
}
break;
}
}
} else {
}
}
if (count >= sf_pkt_scan_cnt) {
count = 0;
}
if (pscan_count >= sf_pool_scan_cnt) {
pscan_count = 0;
}
/* reset timeout */
/* signal waiting thread */
sf_watch_running = 0;
}
/*
* called during a timeout to check targets
*/
static void
{
int i;
int lip_cnt;
/* check scan all possible targets */
for (i = 0; i < sf_max_targets; i++) {
target->sft_scan_count++;
return;
}
target->sft_scan_count = 0;
#ifdef DEBUG
sf_abort_flag = 0;
#else
cmd->cmd_timeout))) {
#endif
cmd->cmd_timeout = 0;
/* prevent reset from getting at this packet */
return;
else {
return;
} else {
}
}
/*
* if the abort and lip fail, a reset will be carried out.
* But the reset will ignore this packet. We have waited at least
* 20 seconds after the initial timeout. Now, complete it here.
* This also takes care of spurious bad aborts.
*/
<= sf_watchdog_time)) {
pkt->pkt_statistics |=
/* handle deferred_destroy case */
} else {
cntl_reset == 1) ||
cntl_reset = 0;
cntl_abort_tsk = 0;
/* for cache */
}
}
} else {
}
}
}
}
}
/*
* a command to a target has timed out
* return TRUE iff cmd abort failed or timed out, else return FALSE
*/
static int
{
int rval;
struct fcal_packet *fpkt;
int tgt_id;
sf_core = 0;
}
/* call the transport to abort a command */
switch (rval) {
case FCAL_ABORTED:
}
break; /* success */
case FCAL_ABORT_FAILED:
}
break;
case FCAL_BAD_ABORT:
sf_core = 0;
}
+ 20;
break;
case FCAL_TIMEOUT:
break;
default:
"Command Abort failed target 0x%x, forcing a LIP\n", tgt_id);
sf_core = 0;
}
break;
}
return (retval);
}
/*
* an ELS command has timed out
* return ???
*/
static struct sf_els_hdr *
{
struct fcal_packet *fpkt;
char what[64];
/* use as temporary state variable */
/*
* take socal core if required. Timeouts for IB and hosts
* are not very interesting, so we take socal core only
* if the timeout is *not* for a IB or host.
*/
sf_core = 0;
}
sf_core = 0;
}
sf_core = 0;
}
} else {
}
if (dflag) {
/* delayed retry */
"!sf%d: %s to target %x delayed retry\n",
goto try_again;
}
}
}
}
"!%s to target 0x%x retrying\n",
what,
}
}
(els_code != LA_ELS_LOGO)) {
}
sf->sf_device_count--;
if (sf->sf_device_count == 0) {
sf->sf_lip_cnt);
}
}
}
}
} else {
}
} else {
sf_core = 0;
}
"status=0x%x, forcing LIP\n", what,
}
}
return (privp);
}
/*
* called by timeout when a reset times out
*/
/*ARGSUSED*/
static void
sf_check_reset_delay(void *arg)
{
lb = ddi_get_lbolt();
sf_reset_timeout_id = 0;
/* is this type cast needed? */
/* abort all cmds for this target */
while (target) {
}
}
tp = (struct sf_reset_list *)
&sf->sf_reset_list;
lb = ddi_get_lbolt();
} else {
}
}
}
if (reset_timeout_flag && (sf_reset_timeout_id == 0)) {
}
}
/*
* called to "reset the bus", i.e. force loop initialization (and address
* re-negotiation)
*/
static void
{
int i;
/* disable restart of lip if we're suspended */
"sf_force_lip, sf%d: lip restart disabled "
"due to DDI_SUSPEND\n",
return;
}
for (i = 0; i < sf_max_targets; i++) {
}
}
sf->sf_lip_cnt++;
#ifdef DEBUG
/* are we allowing LIPs ?? */
if (sf_lip_flag != 0) {
#endif
/* call the transport to force loop initialization */
FCAL_FORCE_LIP)) != FCAL_SUCCESS) &&
(i != FCAL_TIMEOUT)) {
/* force LIP failed */
sf_core = 0;
}
#ifdef DEBUG
/* are we allowing reset after LIP failed ?? */
if (sf_reset_flag != 0) {
#endif
/* restart socal after resetting it */
"!Force lip failed Status code 0x%x. Reseting\n",
i);
/* call transport to force a reset */
#ifdef DEBUG
}
#endif
}
#ifdef DEBUG
}
#endif
}
/*
* called by the transport when an unsolicited ELS is received
*/
static void
{
struct la_els_rjt *rsp;
int i, tgt_id;
struct fcal_packet *fpkt;
struct sf_els_hdr *privp;
return;
}
if (i > SOC_CQE_PAYLOAD) {
i = SOC_CQE_PAYLOAD;
}
case LA_ELS_LOGO:
/*
* logout received -- log the fact
*/
if (sf_lip_on_plogo) {
}
break;
default: /* includes LA_ELS_PLOGI */
/*
* something besides a logout received -- we don't handle
* this so send back a reject saying its unsupported
*/
/* allocate room for a response */
sizeof (struct la_els_rjt), sizeof (union sf_els_rsp),
break;
}
/* fill in pkt header */
/* fill in response */
break;
}
}
/*
* Error logging, printing, and debug print routines
*/
/*PRINTFLIKE3*/
static void
{
char buf[256];
} else {
}
}
/*
* called to get some sf kstats -- return 0 on success else return errno
*/
static int
{
if (rw == KSTAT_WRITE) {
/* can't write */
return (EACCES);
}
return (0); /* success */
}
/*
* Unix Entry Points
*/
/*
* driver entry point for opens on control device
*/
/* ARGSUSED */
static int
{
/* just ensure soft state exists for this device */
return (ENXIO);
}
++(sf->sf_check_n_close);
return (0);
}
/*
* driver entry point for last close on control device
*/
/* ARGSUSED */
static int
{
return (ENXIO);
}
return (ENODEV);
} else {
--(sf->sf_check_n_close);
}
return (0);
}
/*
* driver entry point for sf ioctl commands
*/
/* ARGSUSED */
static int
{
int cnt, i;
int retval; /* return value */
struct devctl_iocdata *dcp;
struct scsi_address ap;
return (ENXIO);
}
/* handle all ioctls */
switch (cmd) {
/*
* We can use the generic implementation for these ioctls
*/
case DEVCTL_DEVICE_GETSTATE:
case DEVCTL_DEVICE_ONLINE:
case DEVCTL_DEVICE_OFFLINE:
case DEVCTL_BUS_GETSTATE:
/*
* return FC map
*/
case SFIOCGMAP:
goto dun;
}
int i, j = 0;
/* Need to generate a fake lilp map */
for (i = 0; i < sf_max_targets; i++) {
if (sf->sf_targets[i])
sf->sf_targets[i]->
}
}
sizeof (la_wwn_t));
sizeof (la_wwn_t));
for (i = 0; i < cnt; i++) {
sizeof (la_wwn_t));
sizeof (la_wwn_t));
continue;
}
al_pa]];
SF_TARGET_BUSY))) {
[i].sf_node_wwn,
sizeof (la_wwn_t));
[i].sf_port_wwn,
sizeof (la_wwn_t));
map.sf_addr_pair[i].
map.sf_addr_pair[i].
continue;
}
}
sf_node_wwn, sizeof (la_wwn_t));
sf_port_wwn, sizeof (la_wwn_t));
}
goto dun;
}
break;
/*
* handle device control ioctls
*/
case DEVCTL_DEVICE_RESET:
goto dun;
}
goto dun;
}
goto dun;
}
goto dun;
}
goto dun;
}
goto dun;
}
break;
case DEVCTL_BUS_QUIESCE:
case DEVCTL_BUS_UNQUIESCE:
goto dun;
case DEVCTL_BUS_RESET:
case DEVCTL_BUS_RESETALL:
break;
default:
goto dun;
}
retval = 0; /* success */
dun:
return (retval);
}
/*
* get the target given a DIP
*/
static struct sf_target *
{
int i;
/* scan each hash queue for the DIP in question */
for (i = 0; i < SF_NUM_HASH_QUEUES; i++) {
return (target); /* success: target found */
}
}
}
return (NULL); /* failure: target not found */
}
/*
* called by the transport to get an event cookie
*/
static int
{
/* can't find instance for this device */
return (DDI_FAILURE);
}
}
/*
* called by the transport to add an event callback
*/
static int
{
/* can't find instance for this device */
return (DDI_FAILURE);
}
}
/*
* called by the transport to remove an event callback
*/
static int
{
/* can't find instance for this device */
return (DDI_FAILURE);
}
}
/*
* called by the transport to post an event
*/
static int
{
/* is this a remove event ?? */
if (remove_cookie == eventid) {
/* handle remove event */
/* no sf instance for this device */
return (NDI_FAILURE);
}
/* get the target for this event */
/*
* clear device info for this target and mark as
* not done
*/
return (NDI_SUCCESS); /* event handled */
}
/* no target for this event */
return (NDI_FAILURE);
}
/* an insertion event */
!= NDI_SUCCESS) {
return (NDI_FAILURE);
}
}
/*
* the sf hotplug daemon, one thread per sf instance
*/
static void
sf_hp_daemon(void *arg)
{
struct sf_hp_elem *elem;
int tgt_id;
callb_generic_cpr, "sf_hp_daemon");
do {
/* save ptr to head of list */
/* take element off of list */
/* element only one in list -- list now empty */
} else {
/* remove element from head of list */
}
case SF_ONLINE:
/* online this target */
(void) ndi_event_retrieve_cookie(
break;
case SF_OFFLINE:
/* offline this target */
/* don't do NDI_DEVI_REMOVE for now */
NDI_SUCCESS) {
"device offline failed", tgt_id));
} else {
"device offline succeeded\n",
tgt_id));
}
break;
}
}
/* if exit is not already signaled */
if (sf->sf_hp_exit == 0) {
/* wait to be signaled by work or exit */
}
} while (sf->sf_hp_exit == 0);
/* sf_hp_daemon_mutex is dropped by CALLB_CPR_EXIT */
thread_exit(); /* no more hotplug thread */
/* NOTREACHED */
}