/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2004-2012 Emulex. All rights reserved.
* Use is subject to license terms.
*/
#include <emlxs.h>
/* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
/*
* emlxs_handle_fcp_event
*
* Description: Process an FCP Rsp Ring completion
*
*/
/* ARGSUSED */
extern void
{
#ifdef SAN_DIAG_SUPPORT
#endif
/* Initialize the status */
localstat = 0;
scsi_status = 0;
asc = 0;
ascq = 0;
sense = 0;
check_underrun = 0;
fix_it = 0;
if (!sbp) {
/* completion with missing xmit command */
return;
}
#ifdef SAN_DIAG_SUPPORT
#endif /* SAN_DIAG_SUPPORT */
data_rx = 0;
/* Sync data in data buffer only on FC_PKT_FCP_READ */
#ifdef TEST_SUPPORT
hba->underrun_counter--;
/* Report 512 bytes missing by adapter */
/* Corrupt 512 bytes of Data buffer */
/* Set FCP response to STATUS_GOOD */
}
#endif /* TEST_SUPPORT */
}
/* Process the pkt */
/* Check for immediate return */
if ((iostat == IOSTAT_SUCCESS) &&
PACKET_IN_ABORT | PACKET_POLLED))) {
#if (EMLXS_MODREVX == EMLXS_MODREV2X)
#endif /* EMLXS_MODREV2X */
#ifdef FMA_SUPPORT
#endif /* FMA_SUPPORT */
cp->ulpCmplCmd++;
#ifdef FMA_SUPPORT
}
#endif /* FMA_SUPPORT */
return;
}
/*
* A response is only placed in the resp buffer if IOSTAT_FCP_RSP_ERROR
* is reported.
*/
/* Check if a response buffer was not provided */
goto done;
}
/* Get the response buffer pointer */
/* Validate the response payload */
}
rsp->fcp_response_len = 0;
}
rsp->fcp_sense_len = 0;
}
goto done;
}
/* Set the valid response flag */
#ifdef SAN_DIAG_SUPPORT
if (scsi_status == SCSI_STAT_QUE_FULL) {
} else if (scsi_status == SCSI_STAT_BUSY) {
}
#endif
/*
* Convert a task abort to a check condition with no data
* transferred. We saw a data corruption when Solaris received
* a Task Abort from a tape.
*/
if (scsi_status == SCSI_STAT_TASK_ABORT) {
"Task Abort. "
"Fixed. did=0x%06x sbp=%p cmd=%02x dl=%d",
if (pkt->pkt_datalen) {
} else {
}
}
/*
* We only need to check underrun if data could
* have been sent
*/
/* Always check underrun if status is good */
if (scsi_status == SCSI_STAT_GOOD) {
check_underrun = 1;
}
/* Check the sense codes if this is a check condition */
else if (scsi_status == SCSI_STAT_CHECK_COND) {
check_underrun = 1;
/* Check if sense data was provided */
}
#ifdef SAN_DIAG_SUPPORT
#endif
}
/* Status is not good and this is not a check condition */
/* No data should have been sent */
else {
check_underrun = 0;
}
/* Initialize the resids */
pkt->pkt_resp_resid = 0;
pkt->pkt_data_resid = 0;
/* Check if no data was to be transferred */
if (pkt->pkt_datalen == 0) {
goto done;
}
/* Get the residual underrun count reported by the SCSI reply */
/* Set the pkt_data_resid to what the scsi response resid */
/* Adjust the pkt_data_resid field if needed */
/*
* Get the residual underrun count reported by
* our adapter
*/
#ifdef SAN_DIAG_SUPPORT
}
#endif
/* Get the actual amount of data transferred */
/*
* If the residual being reported by the adapter is
* greater than the residual being reported in the
* reply, then we have a true underrun.
*/
switch (scsi_opcode) {
case SCSI_INQUIRY:
break;
case SCSI_RX_DIAG:
scsi_dl =
scsi_cmd[16];
break;
default:
}
#ifdef FCP_UNDERRUN_PATCH1
/*
* If status is not good and no data was
* actually transferred, then we must fix
* the issue
*/
fix_it = 1;
"Underrun(1). Fixed. "
"did=0x%06x sbp=%p cmd=%02x "
"dl=%d,%d rx=%d rsp=%d",
(pkt->pkt_datalen -
}
}
#endif /* FCP_UNDERRUN_PATCH1 */
#ifdef FCP_UNDERRUN_PATCH2
if (scsi_status == SCSI_STAT_GOOD) {
/*
* If status is good and this is an
* inquiry request and the amount of
* data
*/
/*
* requested <= data received, then we
* must fix the issue.
*/
if ((scsi_opcode == SCSI_INQUIRY) &&
fix_it = 1;
"Underrun(2). Fixed. "
"did=0x%06x sbp=%p "
"cmd=%02x dl=%d,%d "
"rx=%d rsp=%d",
}
/*
* If status is good and this is an
* inquiry request and the amount of
* data requested >= 128 bytes, but
* only 128 bytes were received,
* then we must fix the issue.
*/
else if ((scsi_opcode == SCSI_INQUIRY) &&
fix_it = 1;
"Underrun(3). Fixed. "
"did=0x%06x sbp=%p "
"cmd=%02x dl=%d,%d "
"rx=%d rsp=%d",
}
}
}
#endif /* FCP_UNDERRUN_PATCH2 */
/*
* Check if SCSI response payload should be
* fixed or if a DATA_UNDERRUN should be
* reported
*/
if (fix_it) {
/*
* Fix the SCSI response payload itself
*/
} else {
/*
* Change the status from
* IOSTAT_FCP_RSP_ERROR to
* IOSTAT_DATA_UNDERRUN
*/
}
}
/*
* If the residual being reported by the adapter is
* less than the residual being reported in the reply,
* then we have a true overrun. Since we don't know
* where the extra data came from or went to then we
* cannot trust anything we received
*/
/*
* Change the status from
* IOSTAT_FCP_RSP_ERROR to
* IOSTAT_DATA_OVERRUN
*/
}
/*
* Get the residual underrun count reported by
* our adapter
*/
#ifdef SAN_DIAG_SUPPORT
}
#endif /* SAN_DIAG_SUPPORT */
/* Get the actual amount of data transferred */
/*
* If the residual being reported by the adapter is
* greater than the residual being reported in the
* reply, then we have a true underrun.
*/
#ifdef FCP_UNDERRUN_PATCH1
/*
* If status is not good and no data was
* actually transferred, then we must fix
* the issue
*/
fix_it = 1;
"Underrun(1). Fixed. "
"did=0x%06x sbp=%p cmd=%02x "
"dl=%d,%d rx=%d rsp=%d",
(pkt->pkt_datalen -
}
}
#endif /* FCP_UNDERRUN_PATCH1 */
/*
* Check if SCSI response payload should be
* fixed or if a DATA_UNDERRUN should be
* reported
*/
if (fix_it) {
/*
* Fix the SCSI response payload itself
*/
} else {
/*
* Change the status from
* IOSTAT_FCP_RSP_ERROR to
* IOSTAT_DATA_UNDERRUN
*/
}
}
/*
* If the residual being reported by the adapter is
* less than the residual being reported in the reply,
* then we have a true overrun. Since we don't know
* where the extra data came from or went to then we
* cannot trust anything we received
*/
/*
* Change the status from
* IOSTAT_FCP_RSP_ERROR to
* IOSTAT_DATA_OVERRUN
*/
}
}
done:
/* Print completion message */
switch (iostat) {
case IOSTAT_SUCCESS:
/* Build SCSI GOOD status */
if (pkt->pkt_rsplen) {
}
break;
case IOSTAT_FCP_RSP_ERROR:
break;
case IOSTAT_REMOTE_STOP:
break;
case IOSTAT_LOCAL_REJECT:
switch (localstat) {
case IOERR_SEQUENCE_TIMEOUT:
"Local reject. "
"%s did=0x%06x sbp=%p cmd=%02x tmo=%d ",
break;
default:
"Local reject. %s 0x%06x %p %02x (%x)(%x)",
}
break;
case IOSTAT_NPORT_RJT:
break;
case IOSTAT_FABRIC_RJT:
break;
case IOSTAT_NPORT_BSY:
#ifdef SAN_DIAG_SUPPORT
#endif
break;
case IOSTAT_FABRIC_BSY:
#ifdef SAN_DIAG_SUPPORT
#endif
break;
case IOSTAT_INTERMED_RSP:
"Intermediate response. did=0x%06x sbp=%p cmd=%02x", did,
sbp, scsi_opcode);
break;
case IOSTAT_LS_RJT:
break;
case IOSTAT_DATA_UNDERRUN:
"Underrun. did=0x%06x sbp=%p cmd=%02x "
"dl=%d,%d rx=%d rsp=%d (%02x,%02x,%02x,%02x)",
break;
case IOSTAT_DATA_OVERRUN:
"Overrun. did=0x%06x sbp=%p cmd=%02x "
"dl=%d,%d rx=%d rsp=%d (%02x,%02x,%02x,%02x)",
break;
case IOSTAT_RSP_INVALID:
"Rsp Invalid. did=0x%06x sbp=%p cmd=%02x dl=%d rl=%d"
"(%d, %d, %d)",
break;
default:
"Unknown status=%x reason=%x did=0x%06x sbp=%p cmd=%02x",
break;
}
if (iostat == IOSTAT_SUCCESS) {
} else {
}
return;
} /* emlxs_handle_fcp_event() */
/*
* emlxs_post_buffer
*
* This routine will post count buffers to the
* ring with the QUE_RING_BUF_CN command. This
* allows 2 buffers / command to be posted.
* Returns the number of buffers NOT posted.
*/
/* SLI3 */
extern int
{
int32_t i;
int32_t j;
mp = 0;
maxqbuf = 2;
}
#ifdef SFCT_SUPPORT
seg = MEM_FCTBUF;
}
#endif /* SFCT_SUPPORT */
else {
return (0);
}
/*
* While there are buffers to post
*/
while (cnt) {
return (cnt);
}
/*
* Max buffers can be posted per command
*/
for (i = 0; i < maxqbuf; i++) {
if (cnt <= 0)
break;
/* fill in BDEs for command */
== 0) {
icmd->ULPBDECOUNT = i;
for (j = 0; j < i; j++) {
if (mp) {
(void *)mp);
}
}
return (cnt + i);
}
/*
* map that page and save the address pair for lookup
* later
*/
rp,
mp,
/*
* EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
* "UB Post: ring=%d addr=%08x%08x size=%d",
* rp->ringno, icmd->un.cont64[i].addrHigh,
* icmd->un.cont64[i].addrLow, size);
*/
cnt--;
}
icmd->ULPBDECOUNT = i;
/* used for delimiter between commands */
}
rp->fc_missbufcnt = 0;
return (0);
} /* emlxs_post_buffer() */
static void
{
int i;
/* We will process all nodes with this tag later */
for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
}
}
}
static NODELIST *
{
int i;
/* Find first node */
tagged = 0;
for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
continue;
}
continue;
}
break;
}
if (tagged) {
break;
}
}
return (tagged);
}
extern int
{
int i;
/* Target mode only uses this routine for linkdowns */
return (0);
}
linkdown = 0;
vlinkdown = 0;
unreg_vpi = 0;
update = 0;
clear_all = 0;
return (0);
}
switch (format) {
case 0: /* Port */
mask = 0x00ffffff;
break;
case 1: /* Area */
mask = 0x00ffff00;
break;
case 2: /* Domain */
mask = 0x00ff0000;
break;
case 3: /* Network */
mask = 0x00000000;
break;
#ifdef DHCHAP_SUPPORT
case 0xfe: /* Virtual link down */
mask = 0x00000000;
vlinkdown = 1;
break;
#endif /* DHCHAP_SUPPORT */
case 0xff: /* link is down */
mask = 0x00000000;
linkdown = 1;
break;
case 0xfd: /* New fabric */
default:
mask = 0x00000000;
linkdown = 1;
clear_all = 1;
break;
}
/*
* If link is down then this is a hard shutdown and flush
* If link not down then this is a soft shutdown and flush
* (e.g. RSCN)
*/
if (linkdown) {
sizeof (SERV_PARM));
update = 1;
}
/* Tell ULP about it */
if (update) {
}
}
#ifdef SFCT_SUPPORT
}
#endif /* SFCT_SUPPORT */
} else {
&emlxs_link_down_msg, "*");
}
}
}
unreg_vpi = 1;
#ifdef DHCHAP_SUPPORT
/* Stop authentication with all nodes */
#endif /* DHCHAP_SUPPORT */
/* Flush the base node */
/* Flush any pending ub buffers */
}
#ifdef DHCHAP_SUPPORT
/* virtual link down */
else if (vlinkdown) {
update = 1;
}
/* Tell ULP about it */
if (update) {
"Switch authentication failed.");
}
}
#ifdef SFCT_SUPPORT
}
#endif /* SFCT_SUPPORT */
} else {
"Switch authentication failed. *");
}
}
}
/* Flush the base node */
}
#endif /* DHCHAP_SUPPORT */
else {
}
/* Set the node tags */
unreg_vpi = 0;
(void) emlxs_rpi_pause_notify(port,
/*
* In port_online we need to resume
* these RPIs before we can use them.
*/
}
}
goto done;
}
/* Set the node tags */
} else {
adisc_support = 0;
}
/* Check ADISC support level */
switch (adisc_support) {
case 0: /* No support - Flush all IO to all matching nodes */
for (;;) {
/*
* We need to hold the locks this way because
* EMLXS_SLI_UNREG_NODE and the flush routines enter the
* same locks. Also, when we release the lock the list
* can change out from under us.
*/
/* Find first node */
action = 0;
for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
continue;
}
/*
* Check for any device that matches
* our mask
*/
if (linkdown) {
action = 1;
break;
} else { /* Must be an RCSN */
action = 2;
break;
}
}
}
if (action) {
break;
}
}
/* Check if nothing was found */
if (action == 0) {
break;
} else if (action == 1) {
} else if (action == 2) {
#ifdef DHCHAP_SUPPORT
#endif /* DHCHAP_SUPPORT */
/*
* Close the node for any further normal IO
* A PLOGI with reopen the node
*/
/* Flush tx queue */
/* Flush chip queue */
}
}
break;
case 1: /* Partial support - Flush IO for non-FCP2 matching nodes */
for (;;) {
/*
* We need to hold the locks this way because
* EMLXS_SLI_UNREG_NODE and the flush routines enter the
* same locks. Also, when we release the lock the list
* can change out from under us.
*/
action = 0;
for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
continue;
}
/*
* Check for special FCP2 target device
* that matches our mask
*/
if ((nlp->nlp_fcp_info &
(nlp-> nlp_fcp_info &
NLP_FCP_2_DEVICE) &&
aff_d_id) {
action = 3;
break;
}
/*
* Check for any other device that
* matches our mask
*/
aff_d_id) {
if (linkdown) {
action = 1;
break;
} else { /* Must be an RSCN */
action = 2;
break;
}
}
}
if (action) {
break;
}
}
/* Check if nothing was found */
if (action == 0) {
break;
} else if (action == 1) {
} else if (action == 2) {
#ifdef DHCHAP_SUPPORT
#endif /* DHCHAP_SUPPORT */
/*
* Close the node for any further normal IO
* A PLOGI with reopen the node
*/
/* Flush tx queue */
/* Flush chip queue */
unreg_vpi = 0;
(void) emlxs_rpi_pause_notify(port,
}
#ifdef DHCHAP_SUPPORT
#endif /* DHCHAP_SUPPORT */
/*
* Close the node for any further normal IO
* An ADISC or a PLOGI with reopen the node
*/
((linkdown) ? 0 : 60));
/* Flush tx queues except for FCP ring */
/* Flush chip queues except for FCP ring */
(void) emlxs_chipq_node_flush(port,
(void) emlxs_chipq_node_flush(port,
(void) emlxs_chipq_node_flush(port,
}
}
break;
case 2: /* Full support - Hold FCP IO to FCP target matching nodes */
break;
}
for (;;) {
/*
* We need to hold the locks this way because
* EMLXS_SLI_UNREG_NODE and the flush routines enter the
* same locks. Also, when we release the lock the list
* can change out from under us.
*/
action = 0;
for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
continue;
}
/*
* Check for FCP target device that
* matches our mask
*/
if ((nlp-> nlp_fcp_info &
aff_d_id) {
action = 3;
break;
}
/*
* Check for any other device that
* matches our mask
*/
aff_d_id) {
if (linkdown) {
action = 1;
break;
} else { /* Must be an RSCN */
action = 2;
break;
}
}
}
if (action) {
break;
}
}
/* Check if nothing was found */
if (action == 0) {
break;
} else if (action == 1) {
} else if (action == 2) {
/*
* Close the node for any further normal IO
* A PLOGI with reopen the node
*/
/* Flush tx queue */
/* Flush chip queue */
unreg_vpi = 0;
(void) emlxs_rpi_pause_notify(port,
}
/*
* Close the node for any further normal IO
* An ADISC or a PLOGI with reopen the node
*/
((linkdown) ? 0 : 60));
/* Flush tx queues except for FCP ring */
/* Flush chip queues except for FCP ring */
(void) emlxs_chipq_node_flush(port,
(void) emlxs_chipq_node_flush(port,
(void) emlxs_chipq_node_flush(port,
}
}
break;
} /* switch() */
done:
if (unreg_vpi) {
(void) emlxs_mb_unreg_vpi(port);
}
return (0);
} /* emlxs_port_offline() */
extern void
{
/*
* EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
* "linkup_callback. vpi=%d fc_flag=%x", vport->vpi, hba->flag);
*/
return;
}
return;
}
/* Check for mode */
/* Set the node tags */
/* The RPI was paused in port_offline */
(void) emlxs_rpi_resume_notify(vport,
}
}
} else {
}
/* Check for loop topology */
} else {
}
/* Set the link speed */
case 0:
break;
case LA_1GHZ_LINK:
break;
case LA_2GHZ_LINK:
break;
case LA_4GHZ_LINK:
break;
case LA_8GHZ_LINK:
break;
case LA_10GHZ_LINK:
break;
case LA_16GHZ_LINK:
break;
default:
break;
}
npiv_linkup = 0;
update = 0;
update = 1;
npiv_linkup = 1;
}
}
if (update) {
} else if (npiv_linkup) {
&emlxs_npiv_link_up_msg, "%s%s%s",
}
}
#ifdef SFCT_SUPPORT
}
#endif /* SFCT_SUPPORT */
} else {
} else if (npiv_linkup) {
&emlxs_npiv_link_up_msg, "%s%s%s *",
}
}
/* Check for waiting threads */
}
}
/* Flush any pending ub buffers */
}
return;
} /* emlxs_port_online() */
/* SLI3 */
extern void
{
int i;
}
/* Set scope */
/* Filter hba flags */
hba->discovery_timer = 0;
hba->linkup_timer = 0;
for (i = 0; i < MAX_VPORTS; i++) {
continue;
}
}
return;
} /* emlxs_linkdown() */
/* SLI3 */
extern void
{
/* Check for any mode changes */
#ifdef MENLO_SUPPORT
/*
* Trigger linkup CV and don't start linkup & discovery
* timers
*/
return;
}
#endif /* MENLO_SUPPORT */
/* Set the linkup & discovery timers */
return;
} /* emlxs_linkup() */
/*
* emlxs_reset_link
*
* Description:
* Called to reset the link with an init_link
*
* Returns:
*
*/
extern int
{
int rval = 0;
int tmo;
int rc;
/*
* Get a buffer to use for the mailbox command
*/
== NULL) {
"Unable to allocate mailbox buffer.");
rval = 1;
goto reset_link_fail;
}
if (linkup) {
"Resetting link...");
} else {
"Disabling link...");
}
/* Bring link down first */
if (wait) {
} else {
wait = MBX_NOWAIT;
}
(rc != MBXERR_LINK_DOWN)) {
rval = 1;
goto reset_link_fail;
}
tmo = 120;
do {
tmo--;
if (!tmo) {
rval = 1;
"Linkdown timeout.");
goto reset_link_fail;
}
if (linkup) {
/*
* Setup and issue mailbox INITIALIZE LINK command
*/
if (wait == MBX_NOWAIT) {
== NULL) {
"Unable to allocate mailbox buffer.");
rval = 1;
goto reset_link_fail;
}
} else {
/* Reuse mbq from previous mbox */
}
/* Clear the loopback mode */
hba->loopback_tics = 0;
rval = 1;
goto reset_link_fail;
}
}
}
return (rval);
} /* emlxs_reset_link() */
extern int
{
uint32_t i = 0;
/* Make sure adapter is offline or exit trying (30 seconds) */
while (i++ < 30) {
/* Check if adapter is already going online */
return (0);
}
/* Check again */
return (0);
}
/* Check if adapter is offline */
/* Mark it going online */
/* Currently !FC_ONLINE_MODE and !FC_OFFLINE_MODE */
break;
}
BUSYWAIT_MS(1000);
}
"Going online...");
rval);
/* Set FC_OFFLINE_MODE */
return (rval);
}
/* Start the timer */
/* Set FC_ONLINE_MODE */
#ifdef SFCT_SUPPORT
(void) emlxs_fct_port_initialize(port);
}
#endif /* SFCT_SUPPORT */
return (rval);
} /* emlxs_online() */
extern int
{
uint32_t i = 0;
/* Make sure adapter is online or exit trying (30 seconds) */
while (i++ < 30) {
/* Check if adapter is already going offline */
return (0);
}
/* Check again */
return (0);
}
/* Check if adapter is online */
/* Mark it going offline */
/* Currently !FC_ONLINE_MODE and !FC_OFFLINE_MODE */
break;
}
BUSYWAIT_MS(1000);
}
"Going offline...");
/* Declare link down */
} else {
}
#ifdef SFCT_SUPPORT
(void) emlxs_fct_port_shutdown(port);
}
#endif /* SFCT_SUPPORT */
/* Check if adapter was shutdown */
/*
* Force mailbox cleanup
* This will wake any sleeping or polling threads
*/
}
/* Pause here for the IO to settle */
/* Unregister all nodes */
#ifdef FMA_SUPPORT
/* Access handle validation */
#endif /* FMA_SUPPORT */
}
/* Stop the timer */
/* For safety flush every iotag list */
if (emlxs_iotag_flush(hba)) {
/* Pause here for the IO to flush */
}
/* Wait for poll command request to settle */
while (hba->io_poll_count > 0) {
}
/* Shutdown the adapter interface */
rval = 0;
done:
return (rval);
} /* emlxs_offline() */
extern int
{
#ifdef FMA_SUPPORT
#endif /* FMA_SUPPORT */
return (rval);
}
#ifdef FMA_SUPPORT
!= DDI_FM_OK) {
return (1);
}
#endif /* FMA_SUPPORT */
return (0);
} /* End emlxs_power_down */
extern int
{
#ifdef FMA_SUPPORT
#endif /* FMA_SUPPORT */
#ifdef FMA_SUPPORT
!= DDI_FM_OK) {
return (1);
}
#endif /* FMA_SUPPORT */
/* Bring adapter online */
/* Put chip in D3 state */
}
return (rval);
}
return (rval);
} /* emlxs_power_up() */
/*
*
* NAME: emlxs_ffcleanup
*
* FUNCTION: Cleanup all the Firefly resources used by configuring the adapter
*
* EXECUTION ENVIRONMENT: process only
*
* CALLED FROM: CFG_TERM
*
* INPUT: hba - pointer to the dev_ctl area.
*
* RETURNS: none
*/
extern void
{
uint32_t i;
/* Disable all but the mailbox interrupt */
/* Make sure all port nodes are destroyed */
for (i = 0; i < MAX_VPORTS; i++) {
if (port->node_count) {
(void) EMLXS_SLI_UNREG_NODE(port, 0, 0, 0, 0);
}
}
/* Clear all interrupt enable conditions */
return;
} /* emlxs_ffcleanup() */
extern uint16_t
{
uint32_t i;
"Pkt already registered! channel=%d iotag=%d sbp=%p",
}
iotag = 0;
}
break;
}
iotag = 0;
}
/*
* EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
* "register_pkt: channel=%d iotag=%d sbp=%p",
* cp->channelno, iotag, sbp);
*/
return (iotag);
} /* emlxs_register_pkt() */
extern emlxs_buf_t *
{
/* Check the iotag range */
return (NULL);
}
/* Remove the sbp from the table */
return (sbp);
}
/* Clean up the sbp */
hba->channel_tx_count--;
}
}
}
return (sbp);
} /* emlxs_unregister_pkt() */
/* Flush all IO's to all nodes for a given IO Channel */
extern uint32_t
{
Q abort;
uint32_t i;
/* While a node needs servicing */
/* Check if priority queue is not empty */
/* Transfer all iocb's to local queue */
} else {
}
}
/* Check if tx queue is not empty */
/* Transfer all iocb's to local queue */
} else {
}
}
/* Clear the queue pointers */
/* Remove node from service queue */
/* If this is the last node on list */
} else {
/* Remove node from head */
}
/* Clear node */
}
/* First cleanup the iocb's while still holding the lock */
while (iocbq) {
/* Free the IoTag and the bmp */
if (sbp) {
}
} else {
}
/*
* If the fpkt is already set, then we will leave it
* alone. This ensures that this pkt is only accounted
* for on one fpkt->flush_count
*/
fpkt->flush_count++;
}
}
} /* end of while */
/* Now abort the iocb's */
while (iocbq) {
/* Save the next iocbq for now */
/* Unlink this iocbq */
/* Get the pkt */
if (sbp) {
} else {
IOERR_LINK_DOWN, 1);
}
}
/* Free the iocb and its associated buffers */
else {
/* SLI3 */
(FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
/* HBA is detaching or offlining */
if (icmd->ULPCOMMAND !=
void *tmp;
for (i = 0;
i < icmd->ULPBDECOUNT;
i++) {
if (mp) {
}
}
}
(void *)iocbq);
} else {
/* repost the unsolicited buffer */
iocbq);
}
}
}
} /* end of while */
/* Now trigger channel service */
continue;
}
}
} /* emlxs_tx_channel_flush() */
/* Flush all IO's on all or a given ring for a given node */
extern uint32_t
{
Q abort;
uint32_t i;
/* Flush all I/O's on tx queue to this target */
ndlp->nlp_active = 0;
}
continue;
}
/* Check if priority queue is not empty */
/* Transfer all iocb's to local queue */
} else {
}
}
}
/* Check if tx queue is not empty */
/* Transfer all iocb's to local queue */
} else {
}
}
/* Clear the queue pointers */
/* If this node was on the channel queue, remove it */
/* If this is the only node on list */
} else {
/*
* This is a little more difficult find the
* previous node in the circular channel queue
*/
}
}
}
/* Clear node */
}
}
/* First cleanup the iocb's while still holding the lock */
while (iocbq) {
/* Free the IoTag and the bmp */
if (sbp) {
}
} else {
}
/*
* If the fpkt is already set, then we will leave it
* alone. This ensures that this pkt is only accounted
* for on one fpkt->flush_count
*/
fpkt->flush_count++;
}
}
} /* end of while */
/* Now abort the iocb's outside the locks */
while (iocbq) {
/* Save the next iocbq for now */
/* Unlink this iocbq */
/* Get the pkt */
if (sbp) {
} else {
IOERR_LINK_DOWN, 1);
}
}
/* Free the iocb and its associated buffers */
else {
/* CMD_CLOSE_XRI_CN should also free the memory */
/* SLI3 */
(FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
/* HBA is detaching or offlining */
if (icmd->ULPCOMMAND !=
void *tmp;
int ch;
for (i = 0;
i < icmd->ULPBDECOUNT;
i++) {
if (mp) {
}
}
}
(void *)iocbq);
} else {
/* repost the unsolicited buffer */
}
/*
* Resend the abort iocbq if any
*/
}
}
} /* end of while */
/* Now trigger channel service */
continue;
}
}
} /* emlxs_tx_node_flush() */
/* Check for IO's on all or a given ring for a given node */
extern uint32_t
{
count = 0;
/* Flush all I/O's on tx queue to this target */
continue;
}
/* Check if priority queue is not empty */
}
/* Check if tx queue is not empty */
}
}
return (count);
} /* emlxs_tx_node_check() */
/* Flush all IO's on the any ring for a given node's lun */
extern uint32_t
{
Q abort;
uint32_t i;
if (lun == EMLXS_LUN_NONE) {
return (0);
}
/* Flush I/O's on txQ to this target's lun */
/* Scan the priority queue first */
while (iocbq) {
/* Check if this IO is for our lun */
/* Remove iocb from the node's ptx queue */
if (next == 0) {
}
if (prev == 0) {
} else {
}
/*
* Add this iocb to our local abort Q
*/
} else {
}
} else {
}
} /* while (iocbq) */
/* Scan the regular queue */
while (iocbq) {
/* Check if this IO is for our lun */
/* Remove iocb from the node's tx queue */
if (next == 0) {
}
if (prev == 0) {
} else {
}
/*
* Add this iocb to our local abort Q
*/
} else {
}
} else {
}
} /* while (iocbq) */
} /* for loop */
/* First cleanup the iocb's while still holding the lock */
while (iocbq) {
/* Free the IoTag and the bmp */
if (sbp) {
}
} else {
}
/*
* If the fpkt is already set, then we will leave it
* alone. This ensures that this pkt is only accounted
* for on one fpkt->flush_count
*/
fpkt->flush_count++;
}
}
} /* end of while */
/* Now abort the iocb's outside the locks */
while (iocbq) {
/* Save the next iocbq for now */
/* Unlink this iocbq */
/* Get the pkt */
if (sbp) {
} else {
IOERR_LINK_DOWN, 1);
}
}
/* Free the iocb and its associated buffers */
else {
/* Should never happen! */
/* SLI3 */
(FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
/* HBA is detaching or offlining */
if (icmd->ULPCOMMAND !=
void *tmp;
int ch;
for (i = 0;
i < icmd->ULPBDECOUNT;
i++) {
if (mp) {
}
}
}
(void *)iocbq);
} else {
/* repost the unsolicited buffer */
}
/*
* Resend the abort iocbq if any
*/
}
}
} /* end of while */
/* Now trigger channel service */
continue;
}
}
} /* emlxs_tx_lun_flush() */
extern void
{
/* Set node to base node by default */
if (sbp) {
}
}
if (lock) {
}
if (sbp) {
} else {
}
if (lock) {
}
} else {
IOERR_LINK_DOWN, 1);
}
return;
} else {
if (lock) {
}
}
return;
}
if (sbp) {
if (lock) {
}
return;
}
hba->channel_tx_count++;
}
/* Check iocbq priority */
/* Add the iocb to the bottom of the node's ptx queue */
} else {
}
} else { /* Normal priority */
/* Add the iocb to the bottom of the node's tx queue */
} else {
}
}
/*
* Check if the node is not already on channel queue and
* (is not closed or is a priority request)
*/
/* If so, then add it to the channel queue */
/*
* If this is not the base node then add it
* to the tail
*/
} else { /* Otherwise, add it to the head */
/* The command node always gets priority */
}
} else {
}
}
/* Adjust the channel timeout timer */
if (lock) {
}
return;
} /* emlxs_tx_put() */
extern IOCBQ *
{
if (lock) {
}
/* Check if a node needs servicing */
/* Get next iocb from node's priority queue */
/* Check if this is last entry */
} else {
/* Remove iocb from head */
}
}
/* Get next iocb from node tx queue if node not closed */
/* Check if this is last entry */
} else {
/* Remove iocb from head */
}
}
/* Now deal with node itself */
/* Check if node still needs servicing */
/*
* If this is the base node, then don't shift the
* pointers. We want to drain the base node before
* moving on
*/
/*
* Just shift channel queue pointers to next
* node
*/
}
} else {
/* Remove node from channel queue */
/* If this is the last node on list */
} else {
/* Remove node from head */
}
/* Clear node */
}
/*
* If no iocbq was found on this node, then it will have
* been removed. So try again.
*/
if (!iocbq) {
goto begin;
}
if (sbp) {
/*
* Check flags before we enter mutex in case this
* has been flushed and destroyed
*/
(PACKET_IN_COMPLETION | PACKET_IN_CHIPQ)) ||
goto begin;
}
(PACKET_IN_COMPLETION | PACKET_IN_CHIPQ)) ||
goto begin;
}
hba->channel_tx_count--;
}
}
if (iocbq) {
}
/* Adjust the ring timeout timer */
if (lock) {
}
return (iocbq);
} /* emlxs_tx_get() */
/*
* Remove all cmd from from_rp's txq to to_rp's txq for ndlp.
* The old IoTag has to be released, the new one has to be
* allocated. Others no change
* TX_CHANNEL lock is held
*/
extern void
{
if (lock) {
}
/* Scan the ndlp's fchanno txq to get the iocb of fcp cmd */
while (iocbq) {
/* Check if this iocb is fcp cmd */
switch (iocb->ULPCOMMAND) {
/* FCP commands */
case CMD_FCP_ICMND_CR:
case CMD_FCP_ICMND_CX:
case CMD_FCP_IREAD_CR:
case CMD_FCP_IREAD_CX:
case CMD_FCP_IWRITE_CR:
case CMD_FCP_IWRITE_CX:
case CMD_FCP_ICMND64_CR:
case CMD_FCP_ICMND64_CX:
case CMD_FCP_IREAD64_CR:
case CMD_FCP_IREAD64_CX:
case CMD_FCP_IWRITE64_CR:
case CMD_FCP_IWRITE64_CX:
/* We found a fcp cmd */
break;
default:
/* this is not fcp cmd continue */
continue;
}
/* found a fcp cmd iocb in fchanno txq, now deque it */
/* This is the last iocbq */
}
/* This is the first one then remove it from head */
} else {
}
/* Add this iocb to our local toberemovedq */
/* This way we donot hold the TX_CHANNEL lock too long */
} else {
}
} /* While (iocbq) */
/* from_chan->nodeq.q_first must be non NULL */
/* nodeq is not empty, now deal with the node itself */
(void *)nlp;
}
} else {
/* If this is the only node on list */
(void *)nlp) {
NULL;
NULL;
} else {
}
/* Clear node */
} else {
count--;
do {
n_next =
break;
}
} while (count--);
if (count != 0) {
if (n_next ==
n_prev->
=
((NODELIST *)
[fchanno];
} else {
n_prev->
=
[fchanno];
}
/* Clear node */
NULL;
}
}
}
}
}
/* Now cleanup the iocb's */
while (iocbq) {
/* Free the IoTag and the bmp */
if (sbp) {
}
} else {
}
/*
* If the fpkt is already set, then we will leave it
* alone. This ensures that this pkt is only accounted
* for on one fpkt->flush_count
*/
fpkt->flush_count++;
}
}
} /* end of while */
while (iocbq) {
/* Save the next iocbq for now */
/* Unlink this iocbq */
/* Get the pkt */
if (sbp) {
} else {
IOERR_LINK_DOWN, 1);
}
}
/* Free the iocb and its associated buffers */
else {
/* SLI3 */
(FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
/* HBA is detaching or offlining */
if (icmd->ULPCOMMAND !=
void *tmp;
int ch;
for (i = 0;
i < icmd->ULPBDECOUNT;
i++) {
if (mp) {
hba,
tmp);
}
}
}
(void *)iocbq);
} else {
/* repost the unsolicited buffer */
}
}
}
} /* end of while */
/* Now flush the chipq if any */
}
if (lock) {
}
return;
} /* emlxs_tx_move */
extern uint32_t
{
Q abort;
continue;
}
fpkt);
}
}
} /* for */
/* Now put the iocb's on the tx queue */
while (iocbq) {
/* Save the next iocbq for now */
/* Unlink this iocbq */
/* Send this iocbq */
}
/* Now trigger channel service */
continue;
}
}
} /* emlxs_chipq_node_flush() */
/* Flush all IO's left on all iotag lists */
extern uint32_t
{
Q abort;
count = 0;
/* Check if the slot is empty */
continue;
}
/* We are building an abort list per channel */
continue;
}
/* Check if IO is valid */
"iotag_flush: Invalid IO found. iotag=%d",
iotag);
continue;
}
/* Set IOCB status */
"iotag_flush: iotag=%d sbp=%p "
"xrip=%p state=%x flag=%x",
} else {
"iotag_flush: iotag=%d sbp=%p "
}
} else {
/* Clean up the sbp */
hba->channel_tx_count --;
}
}
}
}
/* At this point all nodes are assumed destroyed */
/* Add this iocb to our local abort Q */
} else {
}
}
/* Trigger deferred completion */
} else {
}
"iotag_flush: channel=%d count=%d",
}
}
return (count);
} /* emlxs_iotag_flush() */
/* Checks for IO's on all or a given channel for a given node */
extern uint32_t
{
count = 0;
continue;
}
count++;
}
}
} /* for */
return (count);
} /* emlxs_chipq_node_check() */
/* Flush all IO's for a given node's lun (on any channel) */
extern uint32_t
{
Q abort;
if (lun == EMLXS_LUN_NONE) {
return (0);
}
}
}
/* Now put the iocb's on the tx queue */
while (iocbq) {
/* Save the next iocbq for now */
/* Unlink this iocbq */
/* Send this iocbq */
}
/* Now trigger channel service */
continue;
}
}
} /* emlxs_chipq_lun_flush() */
/*
* Issue an ABORT_XRI_CN iocb command to abort an FCP command already issued.
* This must be called while holding the EMLXS_FCTAB_LOCK
*/
extern IOCBQ *
{
return (NULL);
}
/*
* set up an iotag using special Abort iotags
*/
}
/* Try to issue abort by XRI if possible */
} else {
}
} else {
}
return (iocbq);
} /* emlxs_create_abort_xri_cn() */
/* This must be called while holding the EMLXS_FCTAB_LOCK */
extern IOCBQ *
{
return (NULL);
}
/*
* set up an iotag using special Abort iotags
*/
}
} else {
}
return (iocbq);
} /* emlxs_create_abort_xri_cx() */
/* This must be called while holding the EMLXS_FCTAB_LOCK */
extern IOCBQ *
{
return (NULL);
}
/*
* set up an iotag using special Abort iotags
*/
}
/* Try to issue close by XRI if possible */
} else {
}
} else {
}
return (iocbq);
} /* emlxs_create_close_xri_cn() */
/* This must be called while holding the EMLXS_FCTAB_LOCK */
extern IOCBQ *
{
return (NULL);
}
/*
* set up an iotag using special Abort iotags
*/
}
} else {
}
return (iocbq);
} /* emlxs_create_close_xri_cx() */
void
{
return;
}
"Closing ELS exchange: xid=%x", rxid);
return;
}
}
/* Create the abort IOCB */
if (iocbq) {
"Closing ELS exchange: xid=%x iotag=%d", rxid,
}
} /* emlxs_close_els_exchange() */
void
{
return;
}
"Aborting ELS exchange: xid=%x", rxid);
/* We have no way to abort unsolicited exchanges */
/* that we have not responded to at this time */
/* So we will return for now */
return;
}
}
/* Create the abort IOCB */
} else {
}
if (iocbq) {
"Aborting ELS exchange: xid=%x iotag=%d", rxid,
}
} /* emlxs_abort_els_exchange() */
void
{
return;
}
"Aborting CT exchange: xid=%x", rxid);
/* We have no way to abort unsolicited exchanges */
/* that we have not responded to at this time */
/* So we will return for now */
return;
}
}
/* Create the abort IOCB */
} else {
}
if (iocbq) {
"Aborting CT exchange: xid=%x iotag=%d", rxid,
}
} /* emlxs_abort_ct_exchange() */
/* This must be called while holding the EMLXS_FCTAB_LOCK */
static void
{
/* Create the close XRI IOCB */
} else {
}
/*
* Add this iocb to our local abort Q
* This way we don't hold the CHIPQ lock too long
*/
if (iocbq) {
} else {
}
}
/* set the flags */
sbp->abort_attempts++;
/*
* If the fpkt is already set, then we will leave it alone
* This ensures that this pkt is only accounted for on one
* fpkt->flush_count
*/
fpkt->flush_count++;
}
return;
} /* emlxs_sbp_abort_add() */