pmcs_subr.c revision ec2c44b8ba99d683354835779a251ce942c2dddc
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*
*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* This file contains various support routines.
*/
/*
* Local static data
*/
static int tgtmap_usec = MICROSEC;
/*
* SAS Topology Configuration
*/
pmcs_phy_t *);
pmcs_phy_t *);
static void pmcs_lock_phy_impl(pmcs_phy_t *, int);
static void pmcs_unlock_phy_impl(pmcs_phy_t *, int);
static void pmcs_reap_dead_phy(pmcs_phy_t *);
char *reason_string);
/*
* Often used strings
*/
const char pmcs_nowrk[] = "%s: unable to get work structure";
const char pmcs_nomsg[] = "%s: unable to get Inbound Message entry";
const char pmcs_timeo[] = "!%s: command timed out";
extern const ddi_dma_attr_t pmcs_dattr;
/*
* Some Initial setup steps.
*/
int
{
/*
* Check current state. If we're not at READY state,
* we can't go further.
*/
return (-1);
}
"%s: AAP unit not ready (state 0x%x)",
return (-1);
}
/*
* Read the offset from the Message Unit scratchpad 0 register.
* This allows us to read the MPI Configuration table.
*
* Check its signature for validity.
*/
"(register offset=0x%08x, passed offset=0x%08x)", __func__,
return (-1);
}
"BAROFF=0x%08x, passed BAROFF=0x%08x)", __func__,
return (-1);
}
"%s: Bad MPI Configuration Table Signature 0x%x", __func__,
return (-1);
}
"%s: Bad MPI Configuration Revision 0x%x", __func__,
return (-1);
}
/*
* Generate offsets for the General System, Inbound Queue Configuration
* and Outbound Queue configuration tables. This way the macros to
* access those tables will work correctly.
*/
PMCS_NIQ);
return (-1);
}
PMCS_NOQ);
return (-1);
}
__func__);
return (-1);
}
}
}
/*
* Verify that ioq_depth is valid (> 0 and not so high that it
* would cause us to overrun the chip with commands).
*/
"%s: I/O queue depth set to 0. Setting to %d",
}
"%s: I/O queue depth set too low (%d). Setting to %d",
}
"%s: I/O queue depth set too high (%d). Setting to %d",
}
/*
* Allocate consistent memory for OQs and IQs.
*/
/*
* The Rev C chip has the ability to do PIO to or from consistent
* memory anywhere in a 64 bit address space, but the firmware is
* not presently set up to do so.
*/
for (i = 0; i < PMCS_NIQ; i++) {
&pwp->iqp_acchdls[i],
"Failed to setup DMA for iqp[%d]", i);
return (-1);
}
}
for (i = 0; i < PMCS_NOQ; i++) {
&pwp->oqp_acchdls[i],
"Failed to setup DMA for oqp[%d]", i);
return (-1);
}
}
/*
* Install the IQ and OQ addresses (and null out the rest).
*/
if (i < PMCS_NIQ) {
if (i != PMCS_IQ_OTHER) {
} else {
(PMCS_QENTRY_SIZE << 16));
}
} else {
}
}
if (i < PMCS_NOQ) {
} else {
}
}
/*
* Set up logging, if defined.
*/
}
/*
* Interrupt vectors, outbound queues, and odb_auto_clear
*
* If we got 4 interrupt vectors, we'll assign one to each outbound
* queue as well as the fatal interrupt, and auto clear can be set
* for each.
*
* If we only got 2 vectors, one will be used for I/O completions
* and the other for the other two vectors. In this case, auto_
* interrupt will be mapped to the PMCS_FATAL_INTERRUPT bit, which
* is not an interrupt vector.
*
* If we only got 1 interrupt vector, auto_clear must be set to 0,
* and again the fatal interrupt will be mapped to the
* PMCS_FATAL_INTERRUPT bit (again, not an interrupt vector).
*/
case PMCS_INT_MSIX:
case PMCS_INT_MSI:
case 1:
pwp->odb_auto_clear = 0;
break;
case 2:
(1 << PMCS_MSIX_IODONE);
break;
case 4:
(PMCS_MSIX_FATAL << PMCS_FERIV_SHIFT));
(1 << PMCS_MSIX_EVENTS);
break;
}
break;
case PMCS_INT_FIXED:
pwp->odb_auto_clear = 0;
break;
}
/*
* Enable Interrupt Reassertion
* Default Delay 1000us
*/
if ((ferr & PMCS_MPI_IRAE) == 0) {
}
return (0);
}
/*
* Start the Message Passing protocol with the PMC chip.
*/
int
{
int i;
for (i = 0; i < 1000; i++) {
PMCS_MSGU_IBDB_MPIINI) == 0) {
break;
}
drv_usecwait(1000);
}
return (-1);
}
drv_usecwait(500000);
/*
* Check to make sure we got to INIT state.
*/
"DBCLR 0x%x)", __func__,
return (-1);
}
return (0);
}
/*
* Stop the Message Passing protocol with the PMC chip.
*/
int
{
int i;
}
}
for (i = 0; i < 2000; i++) {
PMCS_MSGU_IBDB_MPICTU) == 0) {
break;
}
drv_usecwait(1000);
}
return (-1);
}
return (0);
}
/*
* Do a sequence of ECHO messages to test for MPI functionality,
* all inbound and outbound queue functionality and interrupts.
*/
int
{
int iterations;
/*
* We want iterations to be max_cmd * 3 to ensure that we run the
* echo test enough times to iterate through every inbound queue
* at least twice.
*/
echo_total = 0;
count = 0;
while (count < iterations) {
rval = -1;
break;
}
rval = -1;
break;
}
if (iqe == PMCS_IQ_OTHER) {
/* This is on the high priority queue */
} else {
}
echo_start = gethrtime();
iqe = 0;
}
iqo = 0;
}
if (result) {
"%s: command timed out on echo test #%d",
rval = -1;
break;
}
}
/*
* The intr_threshold is adjusted by PMCS_INTR_THRESHOLD in order to
* remove the overhead of things like the delay in getting signaled
* for completion.
*/
if (echo_total != 0) {
}
return (rval);
}
/*
* Start the (real) phys
*/
int
{
int result;
return (0);
}
return (-1);
}
return (-1);
}
if (pwp->separate_ports) {
} else {
}
if (result) {
} else {
}
return (0);
}
int
{
int i;
if (pmcs_start_phy(pwp, i,
return (-1);
}
if (pmcs_clear_diag_counters(pwp, i)) {
"reset counters on PHY (%d)", __func__, i);
}
}
}
return (0);
}
/*
* Called with PHY locked
*/
int
{
const char *mbar;
if (level > 0) {
}
return (ENOMEM);
}
/*
* If level > 0, we need to issue an SMP_REQUEST with a PHY_CONTROL
* function to do either a link reset or hard reset. If level == 0,
* root (local) PHY
*/
if (level) {
stsoff = 2;
/*
*/
iomb[5] = 0;
if (type == PMCS_PHYOP_HARD_RESET) {
(PMCS_PHYOP_HARD_RESET << 16));
} else {
(PMCS_PHYOP_LINK_RESET << 16));
}
"%s: sending %s to %s for phy 0x%x",
amt = 7;
} else {
/*
* Unlike most other Outbound messages, status for
* a local phy operation is in DWORD 3.
*/
stsoff = 3;
if (type == PMCS_PHYOP_LINK_RESET) {
mbar = "LOCAL PHY LINK RESET";
} else {
mbar = "LOCAL PHY HARD RESET";
}
amt = 3;
}
return (ENOMEM);
}
if (result) {
"%s: Unable to issue SMP abort for htag 0x%08x",
} else {
"%s: Issuing SMP ABORT for htag 0x%08x",
}
return (EIO);
}
if (status != PMCOUT_STATUS_OK) {
char buf[32];
status);
}
return (EIO);
}
return (0);
}
/*
* Stop the (real) phys. No PHY or softstate locks are required as this only
* happens during detach.
*/
void
{
int result;
return;
}
return;
}
return;
}
/*
* Make this unconfigured now.
*/
if (result) {
}
}
pptr->configured = 0;
}
/*
* No locks should be required as this is only called during detach
*/
void
{
int i;
pmcs_stop_phy(pwp, i);
}
}
}
/*
* Run SAS_DIAG_EXECUTE with cmd and cmd_desc passed.
* ERR_CNT_RESET: return status of cmd
* DIAG_REPORT_GET: return value of the counter
*/
int
{
int result;
return (DDI_FAILURE);
}
return (DDI_FAILURE);
}
if (result) {
return (DDI_FAILURE);
}
/* Return for counter reset */
if (cmd == PMCS_ERR_CNT_RESET)
return (status);
/* Return for counter value */
if (status) {
return (DDI_FAILURE);
}
}
/* Get the current value of the counter for desc on phynum and return it. */
int
{
}
/* Clear all of the counters for phynum. Returns the status of the command. */
int
{
return (DDI_FAILURE);
return (DDI_FAILURE);
return (DDI_FAILURE);
return (DDI_FAILURE);
return (DDI_SUCCESS);
}
/*
* Get firmware timestamp
*/
int
{
int result;
return (-1);
}
return (-1);
}
if (result) {
return (-1);
}
return (0);
}
/*
* Dump all pertinent registers
*/
void
{
int i;
"OBDB (intr): 0x%08x (mask): 0x%08x (clear): 0x%08x",
for (i = 0; i < PMCS_NIQ; i++) {
}
for (i = 0; i < PMCS_NOQ; i++) {
}
"GST TABLE BASE: 0x%08x (STATE=0x%x QF=%d GSTLEN=%d HMI_ERR=0x%x)",
PMCS_HMI_ERR(val));
if (pinfo & 1) {
started = 1;
}
"GST TABLE PHY%d STARTED=%d LINK=%d RERR=0x%08x",
}
}
/*
* Handle SATA Abort and other error processing
*/
int
{
int r, level = 0;
while (pptr) {
/*
* XXX: Need to make sure this doesn't happen
* XXX: when non-NCQ commands are running.
*/
if (pptr->need_rl_ext) {
goto next_phy;
}
if (r == ENOMEM) {
goto next_phy;
}
if (r) {
if (r == ENOMEM) {
goto next_phy;
}
/* what if other failures happened? */
pptr->abort_sent = 0;
}
}
goto next_phy;
}
pptr->abort_pending = 0;
goto next_phy;
}
} else {
}
}
}
return (0);
}
/*
* Register a device (get a device handle for it).
* Called with PHY lock held.
*/
int
{
int result = 0;
goto out;
}
tmp = PMCS_DEVREG_TLR |
if (IS_ROOT_PHY(pptr)) {
} else {
}
if (IS_ROOT_PHY(pptr)) {
} else {
}
} else {
}
if (result) {
goto out;
}
switch (status) {
case PMCS_DEVREG_OK:
goto out;
} else if (status != PMCS_DEVREG_OK) {
"%s: phy %s already has bogus devid 0x%x",
goto out;
} else {
"%s: phy %s already has a device id 0x%x",
}
}
break;
default:
goto out;
}
out:
return (result);
}
/*
* Deregister a device (remove a device handle).
* Called with PHY locked.
*/
void
{
int result;
return;
}
return;
}
if (result) {
return;
}
if (status != PMCOUT_STATUS_OK) {
} else {
pptr->valid_device_id = 0;
}
}
/*
* Deregister all registered devices.
*/
void
{
/*
* Start at the maximum level and walk back to level 0. This only
* gets done during detach after all threads and timers have been
* destroyed, so there's no need to hold the softstate or PHY lock.
*/
while (phyp) {
}
if (phyp->valid_device_id) {
}
}
}
/*
* Perform a 'soft' reset on the PMC chip
*/
int
{
int i;
/*
* Disable interrupts
*/
if (pwp->locks_initted) {
}
/*
* Step 1
*/
if ((s2 & PMCS_MSGU_HOST_SOFT_RESET_READY) == 0) {
for (i = 0; i < 100; i++) {
if (s2) {
break;
}
drv_usecwait(10000);
}
if (s2 == 0) {
"SOFT_RESET_READY never came ready", __func__);
PMCS_MSGU_CPU_SOFT_RESET_READY) == 0 ||
PMCS_MSGU_CPU_SOFT_RESET_READY) == 0) {
if (pwp->locks_initted) {
}
return (-1);
}
}
}
/*
* Step 2
*/
drv_usecwait(10);
drv_usecwait(10);
drv_usecwait(10);
drv_usecwait(10);
drv_usecwait(10);
drv_usecwait(10);
/*
* Step 3
*/
gsm & ~PMCS_SOFT_RESET_BITS);
/*
* Step 4
*/
rapchk, 0);
wapchk, 0);
wdpchk, 0);
/*
* Step 5
*/
drv_usecwait(100);
/*
* Step 5.5 (Temporary workaround for 1.07.xx Beta)
*/
drv_usecwait(10);
/*
* Step 6
*/
drv_usecwait(10);
/*
* Step 7
*/
/*
* Step 8
*/
drv_usecwait(100);
/*
* Step 9
*/
/*
* Step 10
*/
drv_usecwait(100);
/*
* Step 11
*/
drv_usecwait(10);
/*
* Step 12
*/
drv_usecwait(10);
drv_usecwait(10);
drv_usecwait(10);
/*
* Step 13
*/
/*
* Step 14
*/
drv_usecwait(100);
/*
* Step 15
*/
for (spc = 0, i = 0; i < 1000; i++) {
drv_usecwait(1000);
break;
}
}
"SFR didn't toggle (sfr 0x%x)", spc);
if (pwp->locks_initted) {
}
return (-1);
}
/*
* Step 16
*/
/*
* Wait for up to 5 seconds for AAP state to come either ready or error.
*/
for (i = 0; i < 50; i++) {
if (spc == PMCS_MSGU_AAP_STATE_ERROR ||
spc == PMCS_MSGU_AAP_STATE_READY) {
break;
}
drv_usecwait(100000);
}
"soft reset failed (state 0x%x)", spc);
if (pwp->locks_initted) {
}
return (-1);
}
if (pwp->locks_initted) {
}
return (0);
}
/*
* Return at this point if we dont need to startup.
*/
if (no_restart) {
return (0);
}
/*
* Clean up various soft state.
*/
}
continue;
}
}
}
for (i = 0; i < PMCS_NIQ; i++) {
pmcs_wr_iqpi(pwp, i, 0);
pmcs_wr_iqci(pwp, i, 0);
}
}
for (i = 0; i < PMCS_NOQ; i++) {
pmcs_wr_oqpi(pwp, i, 0);
pmcs_wr_oqci(pwp, i, 0);
}
}
}
}
/*
* Clear out any leftover commands sitting in the work list
*/
case PMCS_TAG_TYPE_WAIT:
break;
case PMCS_TAG_TYPE_CBACK:
case PMCS_TAG_TYPE_NONE:
break;
default:
break;
}
} else {
/*
* The other states of NIL, READY and INTR
* should not be visible outside of a lock being held.
*/
}
}
/*
* Restore Interrupt Mask
*/
pwp->mpi_table_setup = 0;
/*
* Set up MPI again.
*/
if (pmcs_setup(pwp)) {
msg = "unable to setup MPI tables again";
goto fail_restart;
}
/*
* Restart MPI
*/
if (pmcs_start_mpi(pwp)) {
msg = "unable to restart MPI again";
goto fail_restart;
}
/*
* Run any completions
*/
/*
* Delay
*/
drv_usecwait(1000000);
return (0);
return (-1);
}
/*
* Reset a device or a logical unit.
*/
int
{
int rval = 0;
return (ENXIO);
}
/*
* Some devices do not support SAS_I_T_NEXUS_RESET as
* it is not a mandatory (in SAM4) task management
* function, while LOGIC_UNIT_RESET is mandatory.
*
* The problem here is that we need to iterate over
* all known LUNs to emulate the semantics of
* "RESET_TARGET".
*
* XXX: FIX ME
*/
lun = 0;
}
NULL);
return (EINVAL);
}
} else {
"%s: cannot reset a SMP device yet (%s)",
return (EINVAL);
}
/*
* Now harvest any commands killed by this action
* by issuing an ABORT for all commands on this device.
*
* We do this even if the the tmf or reset fails (in case there
* are any dead commands around to be harvested *anyway*).
* We don't have to await for the abort to complete.
*/
}
return (rval);
}
/*
* Called with PHY locked.
*/
static int
{
if (pptr->valid_device_id == 0) {
/*
* If we changed while registering, punt
*/
return (-1);
}
/*
* If we had a failure to register, check against errors.
* An ENOMEM error means we just retry (temp resource shortage).
*/
return (-1);
}
/*
* An ETIMEDOUT error means we retry (if our counter isn't
* exhausted)
*/
} else {
"%s: Retries exhausted for %s, killing",
pptr->config_stop = 0;
}
return (-1);
}
/*
* Other errors or no valid device id is fatal, but don't
* preclude a future action.
*/
return (-1);
}
}
return (0);
}
int
{
return (B_FALSE);
/* create target map */
"%s: failed to create tgtmap", __func__);
return (B_FALSE);
}
return (B_TRUE);
}
int
{
return (B_FALSE);
/* destroy target map */
return (B_TRUE);
}
/*
* Query the phymap and populate the iport handle passed in.
* Called with iport lock held.
*/
int
{
int phynum;
int inst;
/*
* Query the phymap regarding the phys in this iport and populate
* the iport's phys list. Hereafter this list is maintained via
* port up and down events in pmcs_intr.c
*/
/* Grab the phy pointer from root_phys */
/*
* Set a back pointer in the phy to this iport.
*/
/*
* If this phy is the primary, set a pointer to it on our
* iport handle, and set our portid from it.
*/
if (!pptr->subsidiary) {
}
/*
* Finally, insert the phy into our list
*/
}
return (DDI_SUCCESS);
}
/*
* Return the iport that ua is associated with, or NULL. If an iport is
* returned, it will be held and the caller must release the hold.
*/
static pmcs_iport_t *
{
break;
}
}
return (iport);
}
/*
* Return the iport that pptr is associated with, or NULL.
* If an iport is returned, there is a hold that the caller must release.
*/
{
char *ua;
if (ua) {
if (iport) {
"found iport [0x%p] on ua (%s) for phy [0x%p], "
}
}
return (iport);
}
void
{
/*
* Release a refcnt on this iport. If this is the last reference,
* signal the potential waiter in pmcs_iport_unattach().
*/
}
}
void
{
return;
}
pwp->phymap_active++;
DDI_SUCCESS) {
} else {
"(%d), added iport handle on unit address [%s]", __func__,
}
/* Set the HBA softstate as our private data for this unit address */
/*
* We are waiting on attach for this iport node, unless it is still
* attached. This can happen if a consumer has an outstanding open
* on our iport node, but the port is down. If this is the case, we
* need to configure our iport here for reuse.
*/
if (iport) {
"failed to configure phys on iport [0x%p] at "
}
}
}
void
{
pwp->phymap_active--;
DDI_SUCCESS) {
} else {
"count (%d), removed iport handle on unit address [%s]",
}
return;
}
}
/*
* Top-level discovery function
*/
void
{
return;
}
/* Ensure we have at least one phymap active */
if (pwp->phymap_active == 0) {
"%s: phymap inactive, exiting", __func__);
return;
}
/*
* If no iports have attached, but we have PHYs that are up, we
* are waiting for iport attach to complete. Restart discovery.
*/
if (!pwp->iports_attached) {
"%s: no iports attached, retry discovery", __func__);
return;
}
if (pwp->configuring) {
"%s: configuration already in progress", __func__);
return;
}
"%s: cannot allocate scratch", __func__);
return;
}
/*
* The order of the following traversals is important.
*
* The first one checks for changed expanders.
*
* The second one aborts commands for dead devices and deregisters them.
*
* The third one clears the contents of dead expanders from the tree
*
* The fourth one clears now dead devices in expanders that remain.
*/
/*
* 1. Check expanders marked changed (but not dead) to see if they still
* have the same number of phys and the same SAS address. Mark them,
* their subsidiary phys (if wide) and their descendents dead if
* anything has changed. Check the devices they contain to see if
* *they* have changed. If they've changed from type NOTHING we leave
* them marked changed to be configured later (picking up a new SAS
* address and link rate if possible). Otherwise, any change in type,
* SAS address or removal of target role will cause us to mark them
* (and their descendents) as dead (and cause any pending commands
* and associated devices to be removed).
*
* NOTE: We don't want to bail on discovery if the config has
* changed until *after* we run pmcs_kill_devices.
*/
/*
* 2. Descend the tree looking for dead devices and kill them
* by aborting all active commands and then deregistering them.
*/
goto out;
}
/*
* 3. Check for dead expanders and remove their children from the tree.
* By the time we get here, the devices and commands for them have
* already been terminated and removed.
*
* We do this independent of the configuration count changing so we can
* free any dead device PHYs that were discovered while checking
* expanders. We ignore any subsidiary phys as pmcs_clear_expander
* will take care of those.
*
* NOTE: pmcs_clear_expander requires softstate lock
*/
/*
* Call pmcs_clear_expander for every root PHY. It will
* recurse and determine which (if any) expanders actually
* need to be cleared.
*/
}
/*
* 4. Check for dead devices and nullify them. By the time we get here,
* the devices and commands for them have already been terminated
* and removed. This is different from step 2 in that this just nulls
* phys that are part of expanders that are still here but used to
* be something but are no longer something (e.g., after a pulled
* disk drive). Note that dead expanders had their contained phys
* removed from the tree- here, the expanders themselves are
* nullified (unless they were removed by being contained in another
* expander phy).
*/
/*
* 5. Now check for and configure new devices.
*/
goto restart;
}
out:
/*
* Observation is stable, report what we currently see to
* the tgtmaps for delta processing. Start by setting
* BEGIN on all tgtmaps.
*/
goto restart;
}
} else {
/*
* If config_changed is TRUE, we need to reschedule
* discovery now.
*/
"%s: Config has changed, will re-run discovery", __func__);
}
pwp->configuring = 0;
#ifdef DEBUG
"PHY %s dead=%d changed=%d configured=%d "
}
}
#endif
return;
/* Clean up and restart discovery */
pwp->configuring = 0;
}
/*
* Return any PHY that needs to have scheduled work done. The PHY is returned
* locked.
*/
static pmcs_phy_t *
{
while (pptr) {
return (pptr);
}
if (cphyp) {
return (cphyp);
}
} else {
}
}
return (NULL);
}
/*
* Report current observations to SCSA.
*/
static boolean_t
{
char *ap;
/*
* Observation is stable, report what we currently see to the tgtmaps
* for delta processing. Start by setting BEGIN on all tgtmaps.
*/
/*
* Unless we have at least one phy up, skip this iport.
* Note we don't need to lock the iport for report_skip
* since it is only used here. We are doing the skip so that
* the phymap and iportmap stabilization times are honored -
* giving us the ability to recover port operation within the
* stabilization time without unconfiguring targets using the
* port.
*/
continue; /* skip set_begin */
}
iport->report_skip = 0;
"%s: cannot set_begin tgtmap ", __func__);
return (B_FALSE);
}
"%s: set begin on tgtmap [0x%p]", __func__,
(void *)tgtmap);
}
/*
* Now, cycle through all levels of all phys and report
* observations into their respective tgtmaps.
*/
while (pptr) {
/*
* Skip PHYs that have nothing attached or are dead.
*/
continue;
}
"%s: oops, PHY %s changed; restart discovery",
return (B_FALSE);
}
/*
* Get the iport for this root PHY, then call the helper
* to report observations for this iport's targets
*/
/* No iport for this tgt */
"%s: no iport for this target",
__func__);
continue;
}
if (!iport->report_skip) {
return (B_FALSE);
}
}
}
/*
* The observation is complete, end sets. Note we will skip any
* iports that are active, but have no PHYs in them (i.e. awaiting
* unconfigure). Set to restart discovery if we find this.
*/
if (iport->report_skip)
continue; /* skip set_end */
"%s: cannot set_end tgtmap ", __func__);
return (B_FALSE);
}
"%s: set end on tgtmap [0x%p]", __func__,
(void *)tgtmap);
}
/*
* Now that discovery is complete, set up the necessary
* DDI properties on each iport node.
*/
/* Set up the DDI properties on each phy */
/* Set up the 'attached-port' property on the iport */
/*
* This iport is down, but has not been
* removed from our list (unconfigured).
* Set our value to '0'.
*/
} else {
/* Otherwise, set it to remote phy's wwn */
}
__func__);
}
}
return (B_TRUE);
}
/*
* Report observations into a particular iport's target map
*
* Called with phyp (and all descendents) locked
*/
static boolean_t
{
char *ua;
while (lphyp) {
default: /* Skip unknown PHYs. */
/* for non-root phys, skip to sibling */
goto next_phy;
case SATA:
case SAS:
break;
case EXPANDER:
break;
}
goto next_phy;
}
"iport_observation: adding %s on tgtmap [0x%p] phy [0x%p]",
DDI_SUCCESS) {
return (B_FALSE);
}
return (B_FALSE);
}
}
/* for non-root phys, report siblings too */
if (IS_ROOT_PHY(lphyp)) {
} else {
}
}
return (B_TRUE);
}
/*
* Check for and configure new devices.
*
* If the changed device is a SATA device, add a SATA device.
*
* If the changed device is a SAS device, add a SAS device.
*
* If the changed device is an EXPANDER device, do a REPORT
* GENERAL SMP command to find out the number of contained phys.
*
* For each number of contained phys, allocate a phy, do a
* DISCOVERY SMP command to find out what kind of device it
* is and add it to the linked list of phys on the *next* level.
*
* NOTE: pptr passed in by the caller will be a root PHY
*/
static int
{
int rval = 0;
/*
* First, walk through each PHY at this level
*/
while (pptr) {
/*
* Set the new dtype if it has changed
*/
}
goto next_phy;
}
/*
* Confirm that this target's iport is configured
*/
/* No iport for this tgt, restart */
"%s: iport not yet configured, "
"retry discovery", __func__);
rval = -1;
goto next_phy;
}
case NOTHING:
break;
case SATA:
case SAS:
break;
case EXPANDER:
break;
}
if (pwp->config_changed) {
goto next_phy;
}
}
if (rval != 0) {
return (rval);
}
/*
* Now walk through each PHY again, recalling ourselves if they
* have children
*/
while (pptr) {
if (pchild) {
if (rval != 0) {
break;
}
}
}
return (rval);
}
/*
* Set all phys and descendent phys as changed if changed == B_TRUE, otherwise
* mark them all as not changed.
*
* Called with parent PHY locked.
*/
void
int level)
{
if (level == 0) {
if (changed) {
} else {
}
}
level + 1);
}
} else {
while (pptr) {
if (changed) {
} else {
}
}
level + 1);
}
}
}
}
/*
* Take the passed phy mark it and its descendants as dead.
* Fire up reconfiguration to abort commands and bury it.
*
* Called with the parent PHY locked.
*/
void
{
while (pptr) {
pptr->abort_sent = 0;
pptr->need_rl_ext = 0;
}
}
/*
* Only kill siblings at level > 0
*/
if (level == 0) {
return;
}
}
}
/*
* Go through every PHY and clear any that are dead (unless they're expanders)
*/
static void
{
while (phyp) {
if (IS_ROOT_PHY(phyp)) {
}
}
}
if (IS_ROOT_PHY(phyp)) {
}
}
}
/*
* Clear volatile parts of a phy. Called with PHY locked.
*/
void
{
/* keep sibling */
/* keep children */
/* keep parent */
/* keep hw_event_ack */
/* keep phynum */
pptr->ds_recovery_retries = 0;
/* keep dtype */
pptr->config_stop = 0;
pptr->spinup_hold = 0;
/* keep portid */
pptr->valid_device_id = 0;
pptr->abort_sent = 0;
pptr->abort_pending = 0;
pptr->need_rl_ext = 0;
pptr->subsidiary = 0;
pptr->configured = 0;
/* Only mark dead if it's not a root PHY and its dtype isn't NOTHING */
/* XXX: What about directly attached disks? */
/* keep SAS address */
/* keep path */
/* keep ref_count */
/* Don't clear iport on root PHYs - they are handled in pmcs_intr.c */
if (!IS_ROOT_PHY(pptr)) {
}
/* keep target */
}
/*
* Allocate softstate for this target if there isn't already one. If there
* is, just redo our internal configuration. If it is actually "new", we'll
* soon get a tran_tgt_init for it.
*
* Called with PHY locked.
*/
static void
{
/*
* If the config failed, mark the PHY as changed.
*/
"%s: pmcs_configure_phy failed for phy 0x%p", __func__,
(void *)pptr);
return;
}
/* Mark PHY as no longer changed */
/*
* If the PHY has no target pointer, see if there's a dead PHY that
* matches.
*/
}
/*
* Only assign the device if there is a target for this PHY with a
* matching SAS address. If an iport is disconnected from one piece
* of storage and connected to another within the iport stabilization
*
* Otherwise, it'll get done in tran_tgt_init.
*/
if (!IS_ROOT_PHY(pptr)) {
}
"%s: Not assigning existing tgt %p for PHY %p "
(void *)pptr);
return;
}
"%s: pmcs_assign_device failed for target 0x%p",
}
}
}
/*
* Called with PHY lock held.
*/
static boolean_t
{
char *dtype;
/*
* Mark this device as no longer changed.
*/
/*
* If we don't have a device handle, get one.
*/
return (B_FALSE);
}
case SAS:
dtype = "SAS";
break;
case SATA:
dtype = "SATA";
break;
case EXPANDER:
dtype = "SMP";
break;
default:
dtype = "???";
}
return (B_TRUE);
}
/*
* Called with PHY locked
*/
static void
{
/*
* to resource shortages, we'll set it again. While we're doing
* configuration, other events may set it again as well. If the PHY
* is a root PHY and is currently marked as having changed, reset the
* config_stop timer as well.
*/
}
/*
* Step 2- make sure we don't overflow
*/
"%s: SAS expansion tree too deep", __func__);
return;
}
/*
* Step 3- Check if this expander is part of a wide phy that has
* already been configured.
*
* This is known by checking this level for another EXPANDER device
* with the same SAS address and isn't already marked as a subsidiary
* phy and a parent whose SAS address is the same as our SAS address
* (if there are parents).
*/
if (!IS_ROOT_PHY(pptr)) {
/*
* No need to lock the parent here because we're in discovery
* and the only time a PHY's children pointer can change is
* in discovery; either in pmcs_clear_expander (which has
* already been called) or here, down below. Plus, trying to
* grab the parent's lock here can cause deadlock.
*/
} else {
}
while (ctmp) {
/*
* If we've checked all PHYs up to pptr, we stop. Otherwise,
* we'll be checking for a primary PHY with a higher PHY
* number than pptr, which will never happen. The primary
* PHY on non-root expanders will ALWAYS be the lowest
* numbered PHY.
*/
break;
}
/*
* If pptr and ctmp are root PHYs, just grab the mutex on
* ctmp. No need to lock the entire tree. If they are not
* root PHYs, there is no need to lock since a non-root PHY's
* SAS address and other characteristics can only change in
* discovery anyway.
*/
if (root_phy) {
}
int widephy = 0;
/*
* If these phys are not root PHYs, compare their SAS
* addresses too.
*/
if (!root_phy) {
widephy = 1;
}
} else {
widephy = 1;
}
if (widephy) {
"%s part of wide PHY %s (now %d wide)",
if (root_phy) {
}
return;
}
}
if (root_phy) {
}
}
/*
* Step 4- If we don't have a device handle, get one. Since this
* is the primary PHY, make sure subsidiary is cleared.
*/
pptr->subsidiary = 0;
goto out;
}
/*
* Step 5- figure out how many phys are in this expander.
*/
if (nphy <= 0) {
} else {
"%s: Retries exhausted for %s, killing", __func__,
pptr->config_stop = 0;
}
goto out;
}
/*
* Step 6- Allocate a list of phys for this expander and figure out
* what each one is.
*/
for (i = 0; i < nphy; i++) {
}
if (pwp->config_changed) {
/*
* Clean up the newly allocated PHYs and return
*/
while (clist) {
}
return;
}
/*
* Step 7- Now fill in the rest of the static portions of the phy.
*/
if (ctmp->tolerates_sas2) {
ASSERT(i < SAS2_PHYNUM_MAX);
} else {
ASSERT(i < SAS_PHYNUM_MAX);
}
}
/*
* Step 8- Discover things about each phy in the expander.
*/
if (result <= 0) {
} else {
pptr->config_stop = 0;
"%s: Retries exhausted for %s, killing",
}
goto out;
}
/* Set pend_dtype to dtype for 1st time initialization */
}
/*
* Step 9- Install the new list on the next level. There should be
* no children pointer on this PHY. If there is, we'd need to know
* how it happened (The expander suddenly got more PHYs?).
*/
" to PHY %s: This should never happen", __func__,
goto out;
} else {
}
/*
* We only set width if we're greater than level 0.
*/
}
/*
* Now tell the rest of the world about us, as an SMP node.
*/
out:
while (clist) {
}
}
/*
* 2. Check expanders marked changed (but not dead) to see if they still have
* the same number of phys and the same SAS address. Mark them, their subsidiary
* phys (if wide) and their descendents dead if anything has changed. Check the
* the devices they contain to see if *they* have changed. If they've changed
* from type NOTHING we leave them marked changed to be configured later
* (picking up a new SAS address and link rate if possible). Otherwise, any
* change in type, SAS address or removal of target role will cause us to
* mark them (and their descendents) as dead and cause any pending commands
* and associated devices to be removed.
*
* Called with PHY (pptr) locked.
*/
static void
{
/*
* Step 1: Mark phy as not changed. We will mark it changed if we need
* to retry.
*/
/*
* Reset the config_stop time. Although we're not actually configuring
* anything here, we do want some indication of when to give up trying
* if we can't communicate with the expander.
*/
/*
* Step 2: Figure out how many phys are in this expander. If
* pmcs_expander_get_nphy returns 0 we ran out of resources,
* so reschedule and try later. If it returns another error,
* just return.
*/
if (nphy <= 0) {
} else {
pptr->config_stop = 0;
"%s: Retries exhausted for %s, killing", __func__,
}
return;
}
/*
* Step 3: If the number of phys don't agree, kill the old sub-tree.
*/
"%s: number of contained phys for %s changed from %d to %d",
/*
* Force a rescan of this expander after dead contents
* are cleared and removed.
*/
return;
}
/*
* Step 4: if we're at the bottom of the stack, we're done
* (we can't have any levels below us)
*/
return;
}
/*
* Step 5: Discover things about each phy in this expander. We do
* this by walking the current list of contained phys and doing a
* content discovery for it to a local phy.
*/
"%s: No children attached to expander @ %s?", __func__,
return;
}
while (ctmp) {
/*
* Allocate a local PHY to contain the proposed new contents
* and link it to the rest of the local PHYs so that they
* can all be freed later.
*/
if (local_list == NULL) {
local_list = local;
local_tail = local;
} else {
local_tail = local;
}
/*
* Need to lock the local PHY since pmcs_expander_content_
* discovery may call pmcs_clear_phy on it, which expects
* the PHY to be locked.
*/
if (result <= 0) {
} else {
pptr->config_stop = 0;
"%s: Retries exhausted for %s, killing",
}
/*
* Release all the local PHYs that we allocated.
*/
return;
}
}
/*
* Step 6: Compare the local PHY's contents to our current PHY. If
* there are changes, take the appropriate action.
* This is done in two steps (step 5 above, and 6 here) so that if we
* have to bail during this process (e.g. pmcs_expander_content_discover
* fails), we haven't actually changed the state of any of the real
* PHYs. Next time we come through here, we'll be starting over from
* scratch. This keeps us from marking a changed PHY as no longer
* changed, but then having to bail only to come back next time and
* think that the PHY hadn't changed. If this were to happen, we
* would fail to properly configure the device behind this PHY.
*/
local = local_list;
while (ctmp) {
/*
* We set local to local_list prior to this loop so that we
* can simply walk the local_list while we walk this list. The
* two lists should be completely in sync.
*
* Clear the changed flag here.
*/
"type changed from %s to %s (killing)",
/*
* Force a rescan of this expander after dead
* contents are cleared and removed.
*/
} else {
"%s: %s type changed from NOTHING to %s",
}
"device type changed from %d to %d (killing)",
/*
* Force a rescan of this expander after dead
* contents are cleared and removed.
*/
}
/* If the speed changed from invalid, force rescan */
} else {
/* Just update to the new link rate */
}
}
sizeof (ctmp->sas_address)) != 0) {
/*
* Force a rescan of this expander after dead
* contents are cleared and removed.
*/
} else {
"%s: %s looks the same (type %s)",
/*
* If EXPANDER, still mark it changed so we
* re-evaluate its contents. If it's not an expander,
* but it hasn't been configured, also mark it as
* changed so that it will undergo configuration.
*/
!ctmp->configured) {
} else {
/* It simply hasn't changed */
}
}
/*
* If the PHY changed, call pmcs_kill_changed if indicated,
* update its contents to reflect its current state and mark it
* as changed.
*/
if (changed) {
/*
* pmcs_kill_changed will mark the PHY as changed, so
* only do PHY_CHANGED if we did not do kill_changed.
*/
if (kill_changed) {
} else {
/*
* If we're not killing the device, it's not
* dead. Mark the PHY as changed.
*/
"%s: Unmarking PHY %s dead, "
"restarting discovery",
}
}
/*
* If the dtype of this PHY is now NOTHING, mark it as
* unconfigured. Set pend_dtype to what the new dtype
* is. It'll get updated at the end of the discovery
* process.
*/
sizeof (local->sas_address));
ctmp->configured = 0;
} else {
sizeof (local->sas_address));
}
}
}
/*
* If we got to here, that means we were able to see all the PHYs
* and we can now update all of the real PHYs with the information
* we got on the local PHYs. Once that's done, free all the local
* PHYs.
*/
}
/*
* Top level routine to check expanders. We call pmcs_check_expander for
* each expander. Since we're not doing any configuration right now, it
* doesn't matter if this is breadth-first.
*/
static boolean_t
{
/*
* Check each expander at this level
*/
while (phyp && !config_changed) {
phyp->configured) {
}
}
if (config_changed) {
return (config_changed);
}
/*
* Now check the children
*/
while (phyp && !config_changed) {
if (pchild) {
}
}
/*
* We're done
*/
return (config_changed);
}
/*
* Called with softstate and PHY locked
*/
static void
{
while (ctmp) {
/*
* If the expander is dead, mark its children dead
*/
}
}
}
/*
* If this expander is not dead, we're done here.
*/
return;
}
/*
* Now snip out the list of children below us and release them
*/
while (ctmp) {
"%s: dead PHY 0x%p (%s) (ref_count %d)", __func__,
/*
* Put this PHY on the dead PHY list for the watchdog to
* clean up after any outstanding work has completed.
*/
}
/*
* Clear subsidiary phys as well. Getting the parent's PHY lock
* is only necessary if level == 0 since otherwise the parent is
* already locked.
*/
if (!IS_ROOT_PHY(pptr)) {
if (level == 0) {
}
if (level == 0) {
}
} else {
}
while (ctmp) {
continue;
}
/*
* We only need to lock subsidiary PHYs on the level 0
* expander. Any children of that expander, subsidiaries or
* not, will already be locked.
*/
if (level == 0) {
}
sizeof (ctmp->sas_address)) != 0) {
if (level == 0) {
}
continue;
}
if (level == 0) {
}
}
}
/*
* Called with PHY locked and with scratch acquired. We return 0 if
* we fail to allocate resources or notice that the configuration
* count changed while we were running the command. We return
* less than zero if we had an I/O error or received an unsupported
* configuration. Otherwise we return the number of phys in the
* expander.
*/
static int
{
char buf[64];
int result;
ival = 0x40001100;
result = 0;
goto out;
}
__func__);
result = 0;
goto out;
}
/*
* Send SMP REPORT GENERAL (of either SAS1.1 or SAS2 flavors).
*/
msg[5] = 0;
msg[6] = 0;
msg[7] = 0;
msg[8] = 0;
msg[9] = 0;
msg[10] = 0;
msg[11] = 0;
msg[15] = 0;
if (pwp->config_changed) {
result = 0;
goto out;
}
if (result) {
"%s: Unable to issue SMP ABORT for htag 0x%08x",
} else {
"%s: Issuing SMP ABORT for htag 0x%08x",
}
result = 0;
goto out;
}
if (status == PMCOUT_STATUS_UNDERFLOW ||
status == PMCOUT_STATUS_OVERFLOW) {
}
if (status != PMCOUT_STATUS_OK) {
switch (status) {
/* FALLTHROUGH */
/* FALLTHROUGH */
/* FALLTHROUGH */
/* FALLTHROUGH */
/* FALLTHROUGH */
/* FALLTHROUGH */
"%s: expander %s SMP operation failed (%s)",
break;
/*
* For the IO_DS_NON_OPERATIONAL case, we need to kick off
* device state recovery and return 0 so that the caller
* doesn't assume this expander is dead for good.
*/
"%s: expander %s device state non-operational",
"%s: No target to do DS recovery for PHY "
"%p (%s), attempting PHY hard reset",
break;
}
break;
}
default:
break;
}
"%s: bad response frame type 0x%x",
} else if (srf->srf_result != 0) {
/*
* Check to see if we have a value of 3 for failure and
* whether we were using a SAS2.0 allocation length value
* and retry without it.
*/
ival &= ~0xff00;
"%s: err 0x%x with SAS2 request- retry with SAS1",
goto again;
}
} else if (srgr->srgr_configuring) {
"%s: expander at phy %s is still configuring",
result = 0;
} else {
if (ival & 0xff00) {
}
}
out:
return (result);
}
/*
* Called with expander locked (and thus, pptr) as well as all PHYs up to
* the root, and scratch acquired. Return 0 if we fail to allocate resources
* or notice that the configuration changed while we were running the command.
*
* We return less than zero if we had an I/O error or received an
* unsupported configuration.
*/
static int
{
char buf[64];
int result;
result = 0;
goto out;
}
/*
* Send SMP DISCOVER (of either SAS1.1 or SAS2 flavors).
*/
if (expander->tolerates_sas2) {
} else {
}
msg[5] = 0;
msg[7] = 0;
msg[8] = 0;
msg[9] = 0;
msg[10] = 0;
msg[11] = 0;
msg[15] = 0;
result = 0;
goto out;
}
/*
* Drop PHY lock while waiting so other completions aren't potentially
* blocked.
*/
if (pwp->config_changed) {
result = 0;
goto out;
}
if (result) {
"%s: Unable to issue SMP ABORT for htag 0x%08x",
} else {
"%s: Issuing SMP ABORT for htag 0x%08x",
}
goto out;
}
/*
* Point roff to the DMA offset for returned data
*/
if (status == PMCOUT_STATUS_UNDERFLOW ||
status == PMCOUT_STATUS_OVERFLOW) {
}
if (status != PMCOUT_STATUS_OK) {
switch (status) {
/* FALLTHROUGH */
/* FALLTHROUGH */
/* FALLTHROUGH */
/* FALLTHROUGH */
/* FALLTHROUGH */
"%s: expander %s SMP operation failed (%s)",
break;
default:
break;
}
goto out;
"%s: bad response frame type 0x%x",
goto out;
goto out;
/* Need not fail if PHY is Vacant */
if (result != SMP_RES_PHY_VACANT) {
goto out;
}
}
switch (sdr->sdr_attached_device_type) {
case SAS_IF_DTYPE_ENDPOINT:
"exp_content: %s atdt=0x%x lr=%x is=%x ts=%x SAS="
if (sdr->sdr_attached_sata_device ||
} else if (sdr->sdr_attached_ssp_target) {
} else if (tgt_support || ini_support) {
"tgt support=%x init support=(%x)",
}
break;
case SAS_IF_DTYPE_EDGE:
case SAS_IF_DTYPE_FANOUT:
"exp_content: %s atdt=0x%x lr=%x is=%x ts=%x SAS="
if (sdr->sdr_attached_smp_target) {
/*
* Avoid configuring phys that just point back
* at a parent phy
*/
"%s: skipping port back to parent "
break;
}
} else if (tgt_support || ini_support) {
"tgt support=%x init support=(%x)",
}
break;
default:
break;
}
/*
* If the attached device is a SATA device and the expander
* is (possibly) a SAS2 compliant expander, check for whether
* there is a NAA=5 WWN field starting at this offset and
* use that for the SAS Address for this device.
*/
} else {
}
/*
* Now run up from the expander's parent up to the top to
* make sure we only use the least common link_rate.
*/
"%s: derating link rate from %x to %x due "
}
}
} else {
}
result = 1;
out:
return (result);
}
/*
* Get a work structure and assign it a tag with type and serial number
* If a structure is returned, it is returned locked.
*/
{
pmcwork_t *p;
if (p == NULL) {
/*
* If we couldn't get a work structure, it's time to bite
* the bullet, grab the pfree_lock and copy over all the
* work structures from the pending free list to the actual
* free list. This shouldn't happen all that often.
*/
if (p == NULL) {
return (NULL);
}
}
mutex_enter(&p->lock);
p->state = PMCS_WORK_STATE_READY;
p->ssp_event = 0;
p->dead = 0;
if (phyp) {
}
return (p);
}
/*
* Called with pwrk lock held. Returned with lock released.
*/
void
{
p->last_state = p->state;
if (p->phy) {
}
p->state = PMCS_WORK_STATE_NIL;
p->htag = PMCS_TAG_FREE;
p->timer = 0;
mutex_exit(&p->lock);
} else {
}
}
/*
* Find a work structure based upon a tag and make sure that the tag
* serial number matches the work structure we've found.
* If a structure is found, its lock is held upon return.
*/
{
pmcwork_t *p;
mutex_enter(&p->lock);
return (p);
}
mutex_exit(&p->lock);
return (NULL);
}
/*
* Issue an abort for a command or for all commands.
*
* Since this can be called from interrupt context,
* we don't wait for completion if wait is not set.
*
* Called with PHY lock held.
*/
int
int wait)
{
if (pptr->abort_all_start) {
return (EBUSY);
}
case SAS:
break;
case SATA:
break;
case EXPANDER:
break;
default:
return (0);
}
pptr);
return (ENOMEM);
}
if (wait) {
}
if (pptr->valid_device_id == 0) {
return (ENODEV);
}
if (all_cmds) {
msg[3] = 0;
} else {
msg[4] = 0;
}
return (ENOMEM);
}
if (all_cmds) {
"%s: aborting all commands for %s device %s. (htag=0x%x)",
msg[1]);
} else {
"%s: aborting tag 0x%x for %s device %s. (htag=0x%x)",
msg[1]);
}
if (!wait) {
return (0);
}
"%s: Abort complete (result=0x%x), but "
"aq not empty (tgt 0x%p), waiting",
}
}
if (all_cmds) {
pptr->abort_all_start = 0;
}
if (result) {
"%s: Abort (htag 0x%08x) request timed out",
"%s: Trying DS error recovery for tgt 0x%p",
(void) pmcs_send_err_recovery_cmd(pwp,
}
}
return (ETIMEDOUT);
}
if (status != PMCOUT_STATUS_OK) {
/*
* The only non-success status are IO_NOT_VALID &
* IO_ABORT_IN_PROGRESS.
* In case of IO_ABORT_IN_PROGRESS, the other ABORT cmd's
* status is of concern and this duplicate cmd status can
* be ignored.
* If IO_NOT_VALID, that's not an error per-se.
* For abort of single I/O complete the command anyway.
* If, however, we were aborting all, that is a problem
* as IO_NOT_VALID really means that the IO or device is
* not there. So, discovery process will take of the cleanup.
*/
if (all_cmds) {
} else {
return (EINVAL);
}
return (0);
}
"%s: Restoring OPERATIONAL dev_state for tgt 0x%p",
(void) pmcs_send_err_recovery_cmd(pwp,
}
}
return (0);
}
/*
* Issue a task management function to an SSP device.
*
* Called with PHY lock held.
* statlock CANNOT be held upon entry.
*/
int
{
static const uint8_t ssp_rsp_evec[] = {
0x58, 0x61, 0x56, 0x72, 0x00
};
return (ENOMEM);
}
/*
* NB: We use the PMCS_OQ_GENERAL outbound queue
* NB: so as to not get entangled in normal I/O
* NB: processing.
*/
} else {
msg[3] = 0;
}
return (ENOMEM);
}
return (EIO);
}
}
"%s: sending '%s' to %s (lun %llu) tag 0x%x", __func__,
/*
* This is a command sent to the target device, so it can take
* significant amount of time to complete when path & device is busy.
* Set a timeout to 20 seconds
*/
if (result) {
return (ETIMEDOUT);
}
return (ETIMEDOUT);
}
if (status != PMCOUT_STATUS_OK) {
"%s: status %s for TMF %s action to %s, lun %llu",
if ((status == PMCOUT_STATUS_IO_DS_NON_OPERATIONAL) ||
} else if (status == PMCOUT_STATUS_IO_DS_IN_RECOVERY) {
/*
* If the status is IN_RECOVERY, it's an indication
* that it's now time for us to request to have the
* device state set to OPERATIONAL since we're the ones
* that requested recovery to begin with.
*/
} else {
}
"%s: Sending err recovery cmd"
" for tgt 0x%p (status = %s)",
}
}
return (EIO);
} else {
"%s: Sending err recovery cmd"
" for tgt 0x%p (status = %s)",
}
}
}
return (EIO);
}
xd += SAS_RSP_HDR_SIZE;
"%s: TMF response not RESPONSE DATA (0x%x)",
return (EIO);
}
"Bad SAS RESPONSE DATA LENGTH", msg);
return (EIO);
}
/*
* The status is actually in the low-order byte. The upper three
* bytes contain additional information for the TMFs that support them.
* However, at this time we do not issue any of those. In the other
* cases, the upper three bytes are supposed to be 0, but it appears
* they aren't always. Just mask them off.
*/
switch (status & 0xff) {
case SAS_RSP_TMF_COMPLETE:
result = 0;
break;
case SAS_RSP_TMF_SUCCEEDED:
result = 0;
break;
case SAS_RSP_INVALID_FRAME:
"%s: TMF returned INVALID FRAME", __func__);
break;
"%s: TMF returned TMF NOT SUPPORTED", __func__);
break;
case SAS_RSP_TMF_FAILED:
"%s: TMF returned TMF FAILED", __func__);
break;
"%s: TMF returned INCORRECT LUN", __func__);
break;
"%s: TMF returned OVERLAPPED INITIATOR PORT TRANSFER TAG "
"ATTEMPTED", __func__);
break;
default:
break;
}
return (result);
}
/*
* Called with PHY lock held and scratch acquired
*/
int
{
const char *utag_fail_fmt = "%s: untagged NCQ command failure";
const char *tag_fail_fmt = "%s: NCQ command failure (tag 0x%x)";
return (ENOMEM);
}
msg[9] = 0;
msg[10] = 0;
msg[11] = 0;
msg[15] = 0;
return (ENOMEM);
}
if (result) {
return (EIO);
}
"%s: cannot find target for phy 0x%p for "
return (EIO);
}
if ((status == PMCOUT_STATUS_IO_DS_NON_OPERATIONAL) ||
} else {
}
" Recovery for tgt(0x%p) for status(%s)",
}
return (EIO);
}
fis[4] = 0;
if (fp[0] & 0x80) {
} else {
fp[0] & 0x1f);
}
pptr->need_rl_ext = 0;
return (0);
}
/*
* Transform a structure from CPU to Device endian format, or
* vice versa, based upon a transformation vector.
*
* A transformation vector is an array of bytes, each byte
* of which is defined thusly:
*
* bit 7: from CPU to desired endian, otherwise from desired endian
* to CPU format
* bit 6: Big Endian, else Little Endian
* bits 5-4:
* 00 Undefined
* 01 One Byte quantities
* 02 Two Byte quantities
* 03 Four Byte quantities
*
* bits 3-0:
* 00 Undefined
* Number of quantities to transform
*
* The vector is terminated by a 0 value.
*/
void
{
return;
}
return;
}
return;
}
while ((c = *xfvec++) != 0) {
int nbyt = (c & 0xf);
switch (size) {
case 1:
{
while (nbyt-- > 0) {
}
break;
}
case 2:
{
while (nbyt-- > 0) {
if (bige) {
} else {
}
}
break;
}
case 3:
{
while (nbyt-- > 0) {
if (bige) {
} else {
}
}
break;
}
default:
return;
}
}
}
const char *
pmcs_get_rate(unsigned int linkrt)
{
const char *rate;
switch (linkrt) {
case SAS_LINK_RATE_1_5GBIT:
rate = "1.5";
break;
case SAS_LINK_RATE_3GBIT:
rate = "3.0";
break;
case SAS_LINK_RATE_6GBIT:
rate = "6.0";
break;
default:
rate = "???";
break;
}
return (rate);
}
const char *
{
switch (type) {
case NOTHING:
return ("NIL");
case SATA:
return ("SATA");
case SAS:
return ("SSP");
case EXPANDER:
return ("EXPANDER");
}
return ("????");
}
const char *
pmcs_tmf2str(int tmf)
{
switch (tmf) {
case SAS_ABORT_TASK:
return ("Abort Task");
case SAS_ABORT_TASK_SET:
return ("Abort Task Set");
case SAS_CLEAR_TASK_SET:
return ("Clear Task Set");
case SAS_LOGICAL_UNIT_RESET:
return ("Logical Unit Reset");
case SAS_I_T_NEXUS_RESET:
return ("I_T Nexus Reset");
case SAS_CLEAR_ACA:
return ("Clear ACA");
case SAS_QUERY_TASK:
return ("Query Task");
case SAS_QUERY_TASK_SET:
return ("Query Task Set");
case SAS_QUERY_UNIT_ATTENTION:
return ("Query Unit Attention");
default:
return ("Unknown");
}
}
const char *
{
switch (status) {
case PMCOUT_STATUS_OK:
return ("OK");
case PMCOUT_STATUS_ABORTED:
return ("ABORTED");
case PMCOUT_STATUS_OVERFLOW:
return ("OVERFLOW");
case PMCOUT_STATUS_UNDERFLOW:
return ("UNDERFLOW");
case PMCOUT_STATUS_FAILED:
return ("FAILED");
return ("ABORT_RESET");
return ("IO_NOT_VALID");
case PMCOUT_STATUS_NO_DEVICE:
return ("NO_DEVICE");
return ("ILLEGAL_PARAMETER");
return ("LINK_FAILURE");
case PMCOUT_STATUS_PROG_ERROR:
return ("PROG_ERROR");
return ("EDC_IN_ERROR");
return ("EDC_OUT_ERROR");
return ("ERROR_HW_TIMEOUT");
return ("XFER_ERR_BREAK");
return ("XFER_ERR_PHY_NOT_READY");
return ("OPEN_CNX_PROTOCOL_NOT_SUPPORTED");
return ("OPEN_CNX_ERROR_ZONE_VIOLATION");
return ("OPEN_CNX_ERROR_BREAK");
return ("OPEN_CNX_ERROR_IT_NEXUS_LOSS");
return ("OPENCNX_ERROR_BAD_DESTINATION");
return ("OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED");
return ("OPEN_CNX_ERROR_STP_RESOURCES_BUSY");
return ("OPEN_CNX_ERROR_WRONG_DESTINATION");
return ("OPEN_CNX_ERROR_UNKNOWN_EROOR");
return ("IO_XFER_ERROR_NAK_RECEIVED");
return ("XFER_ERROR_ACK_NAK_TIMEOUT");
return ("XFER_ERROR_PEER_ABORTED");
return ("XFER_ERROR_RX_FRAME");
return ("IO_XFER_ERROR_DMA");
return ("XFER_ERROR_CREDIT_TIMEOUT");
return ("XFER_ERROR_SATA_LINK_TIMEOUT");
return ("XFER_ERROR_SATA");
return ("XFER_ERROR_REJECTED_NCQ_MODE");
return ("XFER_ERROR_ABORTED_DUE_TO_SRST");
return ("XFER_ERROR_ABORTED_NCQ_MODE");
return ("IO_XFER_OPEN_RETRY_TIMEOUT");
return ("SMP_RESP_CONNECTION_ERROR");
return ("XFER_ERROR_UNEXPECTED_PHASE");
return ("XFER_ERROR_RDY_OVERRUN");
return ("XFER_ERROR_RDY_NOT_EXPECTED");
return ("XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT");
return ("XFER_ERROR_CMD_ISSUE_BREAK_BEFORE_ACK_NACK");
return ("XFER_ERROR_CMD_ISSUE_PHY_DOWN_BEFORE_ACK_NAK");
return ("XFER_ERROR_OFFSET_MISMATCH");
return ("XFER_ERROR_ZERO_DATA_LEN");
return ("XFER_CMD_FRAME_ISSUED");
return ("ERROR_INTERNAL_SMP_RESOURCE");
return ("IO_PORT_IN_RESET");
return ("DEVICE STATE NON-OPERATIONAL");
return ("DEVICE STATE IN RECOVERY");
default:
return (NULL);
}
}
{
int i;
for (i = 0; i < 8; i++) {
result <<= 8;
}
return (result);
}
void
{
int i;
for (i = 0; i < 8; i++) {
wwn >>= 8;
}
}
void
{
const char *fwsupport;
switch (PMCS_FW_TYPE(pwp)) {
case PMCS_FW_TYPE_RELEASED:
fwsupport = "Released";
break;
case PMCS_FW_TYPE_DEVELOPMENT:
fwsupport = "Development";
break;
case PMCS_FW_TYPE_ALPHA:
fwsupport = "Alpha";
break;
case PMCS_FW_TYPE_BETA:
fwsupport = "Beta";
break;
default:
fwsupport = "Special";
break;
}
}
void
{
} else {
}
}
/*
* Implementation for pmcs_find_phy_by_devid.
* If the PHY is found, it is returned locked.
*/
static pmcs_phy_t *
{
while (phyp) {
return (phyp);
}
if (match) {
return (match);
}
}
if (IS_ROOT_PHY(phyp)) {
} else {
}
}
return (NULL);
}
/*
* If the PHY is found, it is returned locked
*/
{
while (phyp) {
if (match) {
return (match);
}
}
return (NULL);
}
/*
* This function is called as a sanity check to ensure that a newly registered
* PHY doesn't have a device_id that exists with another registered PHY.
*/
static boolean_t
{
while (pptr) {
"%s: phy %s already exists as %s with "
return (B_FALSE);
}
return (rval);
}
}
}
/* This PHY and device_id are valid */
return (B_TRUE);
}
/*
* If the PHY is found, it is returned locked
*/
static pmcs_phy_t *
{
while (phyp) {
if (phyp->valid_device_id) {
return (phyp);
}
}
if (matched_phy) {
return (matched_phy);
}
}
/*
* Only iterate through non-root PHYs
*/
if (IS_ROOT_PHY(phyp)) {
} else {
}
}
return (NULL);
}
{
while (pptr) {
if (matched_phy) {
return (matched_phy);
}
}
return (NULL);
}
/*
* pmcs_find_phy_by_sas_address
*
* Find a PHY that both matches "sas_addr" and is on "iport".
* If a matching PHY is found, it is returned locked.
*/
{
int ua_form = 1;
char addr[PMCS_MAX_UA_SIZE];
} else {
}
while (pptr) {
/*
* If the PHY is dead or does not have a valid device ID,
* skip it.
*/
goto next_phy;
}
goto next_phy;
}
return (pptr);
}
sas_addr);
if (pnext) {
return (pnext);
}
}
}
return (NULL);
}
void
{
switch (fis[0] & 0xff) {
case FIS_REG_H2DEV:
"OP=0x%02x Feature=0x%04x Count=0x%04x Device=0x%02x "
(unsigned long long)
break;
case FIS_REG_D2H:
"us=0x%02x Error=0x%02x Dev=0x%02x Count=0x%04x LBA=%llu",
break;
default:
"0x%08x 0x%08x 0x%08x",
break;
}
}
void
{
size_t i;
}
}
/*
* If phyp == NULL we're being called from the worker thread, in which
* case we need to check all the PHYs. In this case, the softstate lock
* will be held.
* If phyp is non-NULL, just issue the spinup release for the specified PHY
* (which will already be locked).
*/
void
{
"%s: Issuing spinup release only for PHY %s", __func__,
return;
}
phyp->spinup_hold = 0;
return;
}
while (tphyp) {
if (tphyp->spinup_hold == 0) {
continue;
}
"%s: Issuing spinup release for PHY %s", __func__,
break;
}
tphyp->spinup_hold = 0;
}
}
/*
* Abort commands on dead PHYs and deregister them as well as removing
* the associated targets.
*/
static int
{
int rval = 0;
while (phyp) {
if (pchild) {
if (rval) {
return (rval);
}
}
/*
* pmcs_remove_device requires the softstate lock.
*/
} else {
}
if (remove_device) {
if (rval) {
return (rval);
}
} else {
}
}
return (rval);
}
/*
* Called with PHY locked
*/
int
{
int r, result;
/*
* There may be an outstanding ABORT_ALL running, which we wouldn't
* know just by checking abort_pending. We can, however, check
* abort_all_start. If it's non-zero, there is one, and we'll just
* sit here and wait for it to complete. If we don't, we'll remove
* the device while there are still commands pending.
*/
if (pptr->abort_all_start) {
while (pptr->abort_all_start) {
"%s: Waiting for outstanding ABORT_ALL on PHY 0x%p",
}
} else if (pptr->abort_pending) {
if (r) {
"%s: ABORT_ALL returned non-zero status (%d) for "
return (r);
}
pptr->abort_pending = 0;
}
if (pptr->valid_device_id == 0) {
return (0);
}
return (ENOMEM);
}
return (ENOMEM);
}
if (result) {
return (ETIMEDOUT);
}
if (status != PMCOUT_STATUS_OK) {
"%s: status 0x%x when trying to deregister device %s",
}
pptr->valid_device_id = 0;
return (0);
}
/*
* Acknowledge the SAS h/w events that need acknowledgement.
* This is only needed for first level PHYs.
*/
void
{
if (pptr->hw_event_ack == 0) {
continue;
}
break;
}
pptr->hw_event_ack = 0;
}
}
/*
* Load DMA
*/
int
{
/*
* If we have no data segments, we're done.
*/
return (0);
}
/*
* Get the S/G list pointer.
*/
/*
* If we only have one dma segment, we can directly address that
* data within the Inbound message itself.
*/
msg[15] = 0;
return (0);
}
/*
* Otherwise, we'll need one or more external S/G list chunks.
* Get the first one and its dma address into the Inbound message.
*/
return (-1);
}
msg[14] = 0;
tsc = 0;
/*
* If the current segment count for this chunk is one less than
* the number s/g lists per chunk and we have more than one seg
* to go, we need another chunk. Get it, and make sure that the
* tail end of the the previous chunk points the new chunk
* (if remembering an offset can be called 'pointing to').
*
* Note that we can store the offset into our command area that
* represents the new chunk in the length field of the part
* that points the PMC chip at the next chunk- the PMC chip
* ignores this field when the EXTENSION bit is set.
*
* This is required for dma unloads later.
*/
"%s: out of SG lists", __func__);
return (-1);
}
tsc = 0;
}
sg++;
}
return (0);
}
/*
* Unload DMA
*/
void
{
}
}
/*
* Take a chunk of consistent memory that has just been allocated and inserted
* into the cip indices and prepare it for DMA chunk usage and add it to the
* freelist.
*
* Called with dma_lock locked (except during attach when it's unnecessary)
*/
void
{
unsigned long off, n;
} else {
}
}
/*
* Install offsets into chunk lists.
*/
}
}
"added %lu DMA chunks ", n);
}
/*
* Change the value of the interrupt coalescing timer. This is done currently
* only for I/O completions. If we're using the "auto clear" feature, it can
* be turned back on when interrupt coalescing is turned off and must be
* turned off when the coalescing timer is on.
* NOTE: PMCS_MSIX_GENERAL and PMCS_OQ_IODONE are the same value. As long
* as that's true, we don't need to distinguish between them.
*/
void
{
if (adj == DECREASE_TIMER) {
/* If the timer is already off, nothing to do. */
return;
}
/* Disable the timer */
}
} else {
}
} else {
/*
* If the timer isn't on yet, do the setup for it now.
*/
/* If auto clear is being used, turn it off. */
(pwp->odb_auto_clear &
~(1 << PMCS_MSIX_IODONE)));
}
(1 << PMCS_MSIX_IODONE));
} else {
}
}
}
/*
* Adjust the interrupt threshold based on the current timer value
*/
}
/*
* Register Access functions
*/
{
DDI_SUCCESS) {
__func__);
}
return (iqci);
}
{
DDI_SUCCESS) {
__func__);
}
return (oqpi);
}
{
off &= GSM_BASE_MASK;
drv_usecwait(10);
}
drv_usecwait(10);
}
return (rv);
}
void
{
off &= GSM_BASE_MASK;
drv_usecwait(10);
}
drv_usecwait(10);
}
}
{
switch (off) {
case PMCS_SPC_RESET:
case PMCS_SPC_BOOT_STRAP:
case PMCS_SPC_DEVICE_ID:
case PMCS_DEVICE_REVISION:
break;
default:
break;
}
return (off);
}
void
{
switch (off) {
case PMCS_SPC_RESET:
case PMCS_DEVICE_REVISION:
break;
default:
break;
}
}
{
}
{
}
{
}
{
}
{
}
{
}
{
}
void
{
}
void
{
}
void
{
}
void
{
}
void
{
}
void
{
DDI_SUCCESS) {
__func__);
}
}
void
{
}
void
{
}
void
{
DDI_SUCCESS) {
__func__);
}
}
/*
* Check the status value of an outbound IOMB and report anything bad
*/
void
{
int offset;
return;
}
switch (opcode) {
/*
* The following have no status field, so ignore them
*/
case PMCOUT_ECHO:
case PMCOUT_SAS_HW_EVENT:
case PMCOUT_GET_DEVICE_HANDLE:
case PMCOUT_SATA_EVENT:
case PMCOUT_SSP_EVENT:
case PMCOUT_GPIO:
case PMCOUT_GPIO_EVENT:
case PMCOUT_GET_TIME_STAMP:
case PMCOUT_SKIP_ENTRIES:
case PMCOUT_GET_NVMD_DATA: /* Actually lower 16 bits of word 3 */
case PMCOUT_SET_NVMD_DATA: /* but ignore - we don't use these */
return;
case PMCOUT_GENERAL_EVENT:
offset = 1;
break;
case PMCOUT_SSP_COMPLETION:
case PMCOUT_SMP_COMPLETION:
case PMCOUT_SATA_COMPLETION:
case PMCOUT_DEVICE_INFO:
case PMCOUT_FW_FLASH_UPDATE:
case PMCOUT_SSP_ABORT:
case PMCOUT_SATA_ABORT:
case PMCOUT_SMP_ABORT:
case PMCOUT_SET_DEVICE_STATE:
case PMCOUT_GET_DEVICE_STATE:
case PMCOUT_SET_DEVICE_INFO:
offset = 2;
break;
case PMCOUT_LOCAL_PHY_CONTROL:
case PMCOUT_SAS_DIAG_EXECUTE:
case PMCOUT_PORT_CONTROL:
offset = 3;
break;
case PMCOUT_GET_INFO:
case PMCOUT_GET_VPD:
case PMCOUT_SET_VPD:
case PMCOUT_TWI:
"Got response for deprecated opcode", iomb);
return;
default:
"Got response for unknown opcode", iomb);
return;
}
"bad status on TAG_TYPE_NONE command", iomb);
}
}
/*
* Called with statlock held
*/
void
{
(void *)xp);
/*
* Clear the dip now. This keeps pmcs_remove_device from attempting
* to call us on the same device while we're still flushing queues.
* The only side effect is we can no longer update SM-HBA properties,
* but this device is going away anyway, so no matter.
*/
xp->special_running = 0;
xp->recovering = 0;
xp->recover_wait = 0;
xp->event_recovery = 0;
/* Don't clear xp->phy */
/* Don't clear xp->actv_cnt */
/*
* Flush all target queues
*/
}
static int
{
switch (result) {
case SMP_RES_UNKNOWN_FUNCTION:
"Function Result: Unknown SMP Function(0x%x)",
break;
case SMP_RES_FUNCTION_FAILED:
"Function Result: SMP Function Failed(0x%x)",
break;
"Function Result: Invalid Request Frame Length(0x%x)",
break;
"Function Result: Incomplete Descriptor List(0x%x)",
break;
"Function Result: PHY does not exist(0x%x)",
break;
case SMP_RES_PHY_VACANT:
"Function Result: PHY Vacant(0x%x)",
break;
default:
"Function Result: (0x%x)",
break;
}
return (result);
}
/*
* Do all the repetitive stuff necessary to setup for DMA
*
* pwp: Used for dip
* dma_attr: ddi_dma_attr_t to use for the mapping
* acch: ddi_acc_handle_t to use for the mapping
* dmah: ddi_dma_handle_t to use
* length: Amount of memory for mapping
* kvp: Pointer filled in with kernel virtual address on successful return
* dma_addr: Pointer filled in with DMA address on successful return
*/
{
};
DDI_SUCCESS) {
return (B_FALSE);
}
return (B_FALSE);
}
!= DDI_DMA_MAPPED) {
return (B_FALSE);
}
if (cookie_cnt != 1) {
}
return (B_FALSE);
}
return (B_TRUE);
}
/*
* Flush requested queues for a particular target. Called with statlock held
*/
void
{
pmcs_cmd_t *sp;
"%s: Flushing queues (%d) for target 0x%p", __func__,
/*
* Commands on the wait queue (or the special queue below) don't have
* work structures associated with them.
*/
if (queues & PMCS_TGT_WAIT_QUEUE) {
"%s: Removing cmd 0x%p from wq for target 0x%p",
}
}
/*
* Commands on the active queue will have work structures associated
* with them.
*/
if (queues & PMCS_TGT_ACTIVE_QUEUE) {
/*
* If we found a work structure, mark it as dead
* and complete it
*/
}
"%s: Removing cmd 0x%p from aq for target 0x%p",
}
}
if (queues & PMCS_TGT_SPECIAL_QUEUE) {
"%s: Removing cmd 0x%p from sq for target 0x%p",
}
}
}
void
{
case PMCS_TAG_TYPE_CBACK:
{
break;
}
case PMCS_TAG_TYPE_WAIT:
}
break;
case PMCS_TAG_TYPE_NONE:
#ifdef DEBUG
#endif
break;
default:
/*
* We will leak a structure here if we don't know
* what happened
*/
break;
}
}
/*
* Determine if iport still has targets. During detach(9E), if SCSA is
* successfull in its guarantee of tran_tgt_free(9E) before detach(9E),
* this should always return B_FALSE.
*/
{
int i;
return (B_FALSE);
}
continue;
}
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Called with softstate lock held
*/
void
{
"%s: target %p iport addres is null",
}
"%s: no iport associated with tgt(0x%p)",
return;
}
}
}
/*
* Get device state. Called with statlock and PHY lock held.
*/
int
{
int result;
return (-1);
}
return (-1);
}
if (phyp->valid_device_id == 0) {
return (-1);
}
return (-1);
}
if (result) {
__func__);
return (-1);
}
"%s: retrieved_ds=0x%x, target_ds=0x%x", __func__,
}
return (0);
} else {
"%s: cmd failed Status(0x%x), returning ", __func__,
return (-1);
}
}
/*
* Set device state. Called with target's statlock and PHY lock held.
*/
int
{
int result;
return (-1);
}
return (-1);
}
__func__);
return (-1);
}
if (phyp->valid_device_id == 0) {
"%s: Invalid DeviceID", __func__);
return (-1);
}
return (-1);
}
if (result) {
"%s: cmd timed out, returning", __func__);
return (-1);
}
return (0);
} else {
"%s: cmd failed Status(0x%x), returning ", __func__,
return (-1);
}
}
void
{
int rc;
/*
* First time, check to see if we're already performing recovery
*/
if (pwp->ds_err_recovering) {
return;
}
} else {
}
while (pptr) {
/*
* Since ds_err_recovering is set, we can be assured these
* PHYs won't disappear on us while we do this.
*/
if (pchild) {
}
goto next_phy;
}
"%s: no target for DS error recovery for "
}
goto next_phy;
}
if (tgt->recover_wait == 0) {
goto next_phy;
}
/*
* Step 1: Put the device into the IN_RECOVERY state
*/
if (rc != 0) {
"%s: pmcs_get_dev_state on PHY %s "
"failed (rc=%d)",
goto next_phy;
}
(ds == PMCS_DEVICE_STATE_IN_RECOVERY)) {
"%s: Target 0x%p already IN_RECOVERY", __func__,
(void *)tgt);
} else {
"%s: pmcs_send_err_recovery_cmd "
"result(%d) tgt(0x%p) ds(0x%x) tgt->ds(0x%x)",
if (rc) {
"%s: pmcs_send_err_recovery_cmd to PHY %s "
"failed (rc=%d)",
"pmcs_send_err_recovery_cmd");
goto next_phy;
}
}
/*
* Step 2: Perform a hard reset on the PHY
*/
/*
* Must release statlock here because pmcs_reset_phy will
* drop and reacquire the PHY lock.
*/
if (rc) {
"%s: HARD_RESET to PHY %s failed (rc=%d)",
goto next_phy;
}
/*
*/
if (pptr->abort_all_start) {
while (pptr->abort_all_start) {
"%s: Waiting for outstanding ABORT_ALL on "
}
} else {
if (rc != 0) {
"%s: pmcs_abort to PHY %s failed (rc=%d)",
goto next_phy;
}
}
/*
* Step 4: Set the device back to OPERATIONAL state
*/
if (rc == 0) {
tgt->recover_wait = 0;
pptr->ds_recovery_retries = 0;
/*
* Don't bother to run the work queues if the PHY
* is dead.
*/
pwp, DDI_NOSLEEP);
}
} else {
"%s: Failed to SET tgt 0x%p to OPERATIONAL state",
goto next_phy;
}
if (tgt) {
}
}
/*
* Only clear ds_err_recovering if we're exiting for good and not
* just unwinding from recursion
*/
pwp->ds_err_recovering = 0;
}
}
/*
* Called with target's statlock and PHY lock held.
*/
int
{
int rc = -1;
if (tgt->recovering) {
return (0);
}
__func__);
return (-1);
}
switch (dev_state) {
"%s: Target 0x%p already IN_RECOVERY", __func__,
(void *)tgt);
rc = 0; /* This is not an error */
goto no_action;
}
if (rc != 0) {
"%s(1): Failed to SET tgt(0x%p) to _IN_RECOVERY",
}
break;
"%s: Target 0x%p not ready to go OPERATIONAL",
goto no_action;
}
if (rc != 0) {
"%s(2): Failed to SET tgt(0x%p) to OPERATIONAL",
tgt->reset_success = 0;
}
break;
"%s: Device at %s is non-operational",
rc = 0;
break;
default:
"%s: Invalid state requested (%d)", __func__,
break;
}
tgt->recovering = 0;
return (rc);
}
/*
* pmcs_lock_phy_impl
*
* This function is what does the actual work for pmcs_lock_phy. It will
* lock all PHYs from phyp down in a top-down fashion.
*
* Locking notes:
* 1. level starts from 0 for the PHY ("parent") that's passed in. It is
* not a reflection of the actual level of the PHY in the SAS topology.
* 2. If parent is an expander, then parent is locked along with all its
* descendents.
* 3. Expander subsidiary PHYs at level 0 are not locked. It is the
* responsibility of the caller to individually lock expander subsidiary PHYs
* at level 0 if necessary.
* 4. Siblings at level 0 are not traversed due to the possibility that we're
* locking a PHY on the dead list. The siblings could be pointing to invalid
* PHYs. We don't lock siblings at level 0 anyway.
*/
static void
{
/*
* Start walking the PHYs.
*/
while (tphyp) {
/*
* If we're at the top level, only lock ourselves. For anything
* at level > 0, traverse children while locking everything.
*/
"%s: PHY 0x%p parent 0x%p path %s lvl %d",
}
}
if (level == 0) {
return;
}
}
}
/*
* pmcs_lock_phy
*
* This function is responsible for locking a PHY and all its descendents
*/
void
{
#ifdef DEBUG
char *callername = NULL;
if (callername == NULL) {
"%s: PHY 0x%p path %s caller: unknown", __func__,
} else {
"%s: PHY 0x%p path %s caller: %s+%lx", __func__,
}
#else
#endif
pmcs_lock_phy_impl(phyp, 0);
}
/*
* pmcs_unlock_phy_impl
*
* Unlock all PHYs from phyp down in a bottom-up fashion.
*/
static void
{
/*
* Recurse down to the bottom PHYs
*/
if (level == 0) {
}
} else {
while (phy_next) {
level + 1);
}
}
}
/*
* Iterate through PHYs unlocking all at level > 0 as well the top PHY
*/
while (phy_next) {
"%s: PHY 0x%p parent 0x%p path %s lvl %d",
}
if (level == 0) {
return;
}
}
}
/*
* pmcs_unlock_phy
*
* Unlock a PHY and all its descendents
*/
void
{
#ifdef DEBUG
char *callername = NULL;
if (callername == NULL) {
"%s: PHY 0x%p path %s caller: unknown", __func__,
} else {
"%s: PHY 0x%p path %s caller: %s+%lx", __func__,
}
#else
#endif
pmcs_unlock_phy_impl(phyp, 0);
}
/*
* pmcs_get_root_phy
*
* For a given phy pointer return its root phy.
* The caller must be holding the lock on every PHY from phyp up to the root.
*/
{
while (phyp) {
if (IS_ROOT_PHY(phyp)) {
break;
}
}
return (phyp);
}
/*
* pmcs_free_dma_chunklist
*
* Free DMA S/G chunk list
*/
void
{
while (pwp->dma_chunklist) {
if (pchunk->dma_handle) {
DDI_SUCCESS) {
}
}
}
}
/*
* Start ssp event recovery. We have to schedule recovery operation because
* it involves sending multiple commands to device and we should not do it
* in the interrupt context.
* If it is failure of a recovery command, let the recovery thread deal with it.
* Called with pmcwork lock held.
*/
void
{
if (pptr) {
}
}
}
/*
* No target, need to run RE-DISCOVERY here.
*/
}
/*
* Although we cannot mark phy to force abort nor mark phy
* as changed, killing of a target would take care of aborting
* commands for the device.
*/
"processing found. Scheduling RECONFIGURE", __func__);
return;
} else {
"%s is non-operational", __func__,
}
return;
}
/*
* If this command is run in WAIT mode, it is a failing recovery
* command. If so, just wake up recovery thread waiting for
* command completion.
*/
if (tag == PMCS_TAG_TYPE_WAIT) {
}
return;
}
/*
* To recover from primary failures,
* we need to schedule handling events recovery.
*/
"%s: Scheduling SSP event recovery for tgt(0x%p) "
}
/* Work cannot be completed until event recovery is completed. */
}
/*
* SSP target event recovery
* Entered with a phy lock held
* Pwrk lock is not needed - pwrk is on the target aq and no other thread
* will do anything with it until this thread starts the chain of recovery.
* Statlock may be acquired and released.
*/
void
{
int rv;
if (event == PMCOUT_STATUS_XFER_ERR_BREAK ||
/* Command may be still pending on device */
if (rv != 0) {
goto out;
}
if (status == SAS_RSP_TMF_COMPLETE) {
/* Command NOT pending on a device */
"%s: No pending command for tgt 0x%p",
/* Nothing more to do, just abort it on chip */
htag = 0;
}
}
/*
* All other events left the command pending in the host
* Send abort task and abort it on the chip
*/
if (htag != 0) {
goto out;
}
/*
* Abort either took care of work completion, or put device in
* a recovery state
*/
return;
out:
/* Abort failed, do full device recovery */
"%s: Setting IN_RECOVERY for tgt 0x%p",
(void) pmcs_send_err_recovery_cmd(pwp,
}
}
/*
* SSP event recovery task.
*/
void
{
int idx;
pmcs_cmd_t *cp;
int er_flag;
continue;
}
"%s: found target(0x%p)", __func__,
(void *) tgt);
/* Check what cmd expects recovery */
/*
* Since work structure is on this
* target aq, and only this thread
* is accessing it now, we do not need
* to lock it
*/
/*
* aq may contain TMF commands,
* so we may not find work
* structure with htag
*/
break;
}
"%s: pwrk(%p) ctag(0x%x)",
/*
* We dropped statlock, so
* restart scanning from scratch
*/
goto restart;
}
}
tgt->event_recovery = 0;
"%s: end of SSP event recovery for "
}
}
}
"%s: end of SSP event recovery for pwp(0x%p)", __func__,
(void *) pwp);
}
/*ARGSUSED2*/
int
{
return (0);
}
/*ARGSUSED1*/
void
{
}
/*
* Free all PHYs from the kmem_cache starting at phyp as well as everything
* on the dead_phys list.
*
* NOTE: This function does not free root PHYs as they are not allocated
* from the kmem_cache.
*
* No PHY locks are acquired as this should only be called during DDI_DETACH
* or soft reset (while pmcs interrupts are disabled).
*/
void
{
return;
}
while (tphyp) {
}
if (!IS_ROOT_PHY(tphyp)) {
}
}
while (tphyp) {
}
}
/*
* Free a list of PHYs linked together by the sibling pointer back to the
* kmem cache from whence they came. This function does not recurse, so the
* caller must ensure there are no children.
*/
void
{
while (phyp) {
}
}
/*
* Make a copy of an existing PHY structure. This is used primarily in
* discovery to compare the contents of an existing PHY with what gets
* reported back by an expander.
*
* This function must not be called from any context where sleeping is
* not possible.
*
* The new PHY is returned unlocked.
*/
static pmcs_phy_t *
{
/*
* Go ahead and just copy everything...
*/
/*
* But the following must be set appropriately for this copy
*/
return (local);
}
int
{
return (DDI_FAILURE);
}
return (de.fme_status);
}
int
{
return (DDI_FAILURE);
}
return (de.fme_status);
}
void
{
char buf[FM_MAX_CLASS];
}
}
int
{
int i;
/* check all acc & dma handles allocated in attach */
goto check_failed;
}
for (i = 0; i < PMCS_NIQ; i++) {
if ((pmcs_check_dma_handle(
goto check_failed;
}
}
for (i = 0; i < PMCS_NOQ; i++) {
if ((pmcs_check_dma_handle(
goto check_failed;
}
}
goto check_failed;
}
goto check_failed;
}
!= DDI_SUCCESS))) {
goto check_failed;
}
while (pchunk) {
!= DDI_SUCCESS) ||
!= DDI_SUCCESS)) {
goto check_failed;
}
}
return (0);
return (1);
}
/*
* pmcs_handle_dead_phys
*
* If the PHY has no outstanding work associated with it, remove it from
* the dead PHY list and free it.
*
* If pwp->ds_err_recovering or pwp->configuring is set, don't run.
* This keeps routines that need to submit work to the chip from having to
* hold PHY locks to ensure that PHYs don't disappear while they do their work.
*/
void
{
return;
}
/*
* Check every PHY in the dead PHY list
*/
/*
* Check for outstanding work
*/
"%s: Not freeing PHY 0x%p: target 0x%p is not free",
} else {
/*
* No outstanding work or target references. Remove it
* from the list and free it
*/
"%s: Freeing inactive dead PHY 0x%p @ %s "
/*
* If pphyp is NULL, then phyp was the head of the list,
* so just reset the head to nphyp. Otherwise, the
* previous PHY will now point to nphyp (the next PHY)
*/
} else {
}
/*
* If the target still points to this PHY, remove
* that linkage now.
*/
}
}
}
}
}
void
{
}
void
{
}
/*
* pmcs_reap_dead_phy
*
* This function is called from pmcs_new_tport when we have a PHY
* without a target pointer. It's possible in that case that this PHY
* may have a "brother" on the dead_phys list. That is, it may be the same as
* this one but with a different root PHY number (e.g. pp05 vs. pp04). If
* that's the case, update the dead PHY and this new PHY. If that's not the
* case, we should get a tran_tgt_init on this after it's reported to SCSA.
*
* Called with PHY locked.
*/
static void
{
/*
* Check the dead PHYs list
*/
while (ctmp) {
continue;
}
/*
* Same SAS address on same iport. Now check to see if
* the PHY path is the same with the possible exception
* of the root PHY number.
* The "5" is the string length of "pp00."
*/
break;
}
}
}
/*
* Found a match. Remove the target linkage and drop the
* ref count on the old PHY. Then, increment the ref count
* on the new PHY to compensate.
*/
if (ctmp) {
"%s: Found match in dead PHY list for new PHY %s",
/*
* If there is a pointer to the target in the dead
* PHY, and that PHY's ref_count drops to 0, we can
* clear the target linkage now. If the PHY's
* ref_count is > 1, then there may be multiple
* LUNs still remaining, so leave the linkage.
*/
/*
* Update the target's linkage as well
*/
}
}
}
}
/*
* Called with iport lock held
*/
void
{
}
/*
* Called with the iport lock held
*/
void
{
/*
* If phyp is NULL, remove all PHYs from the iport
*/
}
return;
}
}
/*
* This function checks to see if the target pointed to by phyp is still
* correct. This is done by comparing the target's unit address with the
* SAS address in phyp.
*
* Called with PHY locked and target statlock held
*/
static boolean_t
{
char unit_address[PMCS_MAX_UA_SIZE];
}
return (rval);
}
void
{
if (xp->recover_wait == 0) {
/*
* Rather than waiting for the watchdog timer, we'll
* kick it right now.
*/
}
}
/*
* Increment the phy ds error retry count.
* If too many retries, mark phy dead and restart discovery;
* otherwise schedule ds recovery.
*/
static void
{
"%s: retry limit reached after %s to PHY %s failed",
tgt->recover_wait = 0;
} else {
}
}