mptsas.c revision 96c4a178a18cd52ee5001195f1552d9cef0c38f0
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright (c) 2000 to 2009, LSI Corporation.
* All rights reserved.
*
* Redistribution and use in source and binary forms of all code within
* this file that is exclusively owned by LSI, with or without
* modification, is permitted provided that, in addition to the CDDL 1.0
* License requirements, the following conditions are met:
*
* Neither the name of the author nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/*
* mptsas - This is a driver based on LSI Logic's MPT2.0 interface.
*
*/
#define MPTSAS_DEBUG
#endif
/*
* standard header files.
*/
#include <sys/sysevent.h>
#pragma pack(1)
#pragma pack()
/*
* private header files.
*
*/
#include <sys/raidioctl.h>
/*
* FMA header files
*/
/*
* autoconfiguration data and routines.
*/
/*
* cb_ops function
*/
#ifndef __sparc
#endif /* __sparc */
/*
* Resource initilaization for hardware
*/
/*
* SCSA function prototypes
*/
int tgtonly);
/*
* SMP functions
*/
/*
* internal function prototypes.
*/
static void mptsas_ncmds_checkdrain(void *arg);
static void mptsas_handle_event(void *args);
static int mptsas_handle_event_sync(void *args);
static void mptsas_handle_dr(void *args);
dev_info_t *pdip);
static void mptsas_restart_cmd(void *);
static void mptsas_watch(void *arg);
int kmflags);
mptsas_cmd_t *cmd);
int *resid);
static void mptsas_start_watch_reset_delay();
static void mptsas_watch_reset_delay(void *arg);
/*
* helper functions
*/
int lun);
int lun);
int *lun);
/*
* Enumeration / DR functions
*/
dev_info_t **lundip);
dev_info_t **lundip);
dev_info_t **dip);
int lun);
dev_info_t **smp_dip);
static void mptsas_record_event(void *args);
mptsas_smp_t *data);
dev_info_t **smp_dip);
/*
* Power management functions
*/
static void mptsas_idle_pm(void *arg);
/*
* MPT MSI tunable:
*
* By default MSI is enabled on all supported platforms.
*/
static int mptsas_add_intrs(mptsas_t *, int);
static void mptsas_rem_intrs(mptsas_t *);
/*
* FMA Prototypes
*/
/*
* This device is created by the SCSI pseudo nexus driver (SCSI vHCI). It is
* under this device that the paths to a physical device are created when
* MPxIO is used.
*/
extern dev_info_t *scsi_vhci_dip;
/*
* Tunable timeout value for Inquiry VPD page 0x83
* By default the value is 30 seconds.
*/
int mptsas_inq83_retry_timeout = 30;
/*
* This is used to allocate memory for message frame storage, not for
* data I/O DMA. All message frames must be stored in the first 4G of
* physical memory.
*/
DMA_ATTR_V0, /* attribute layout version */
0x0ull, /* address low - should be 0 (longlong) */
0xffffffffull, /* address high - 32-bit max range */
0x00ffffffull, /* count max - max DMA object size */
4, /* allocation alignment requirements */
0x78, /* burstsizes - binary encoded values */
1, /* minxfer - gran. of DMA engine */
0x00ffffffull, /* maxxfer - gran. of DMA engine */
0xffffffffull, /* max segment size (DMA boundary) */
MPTSAS_MAX_DMA_SEGS, /* scatter/gather list length */
512, /* granularity - device transfer size */
0 /* flags, set to 0 */
};
/*
* This is used for data I/O DMA memory allocation. (full 64-bit DMA
* physical addresses are supported.)
*/
DMA_ATTR_V0, /* attribute layout version */
0x0ull, /* address low - should be 0 (longlong) */
0xffffffffffffffffull, /* address high - 64-bit max */
0x00ffffffull, /* count max - max DMA object size */
4, /* allocation alignment requirements */
0x78, /* burstsizes - binary encoded values */
1, /* minxfer - gran. of DMA engine */
0x00ffffffull, /* maxxfer - gran. of DMA engine */
0xffffffffull, /* max segment size (DMA boundary) */
MPTSAS_MAX_DMA_SEGS, /* scatter/gather list length */
512, /* granularity - device transfer size */
DDI_DMA_RELAXED_ORDERING /* flags, enable relaxed ordering */
};
};
static struct cb_ops mptsas_cb_ops = {
scsi_hba_open, /* open */
scsi_hba_close, /* close */
nodev, /* strategy */
nodev, /* print */
nodev, /* dump */
nodev, /* read */
nodev, /* write */
mptsas_ioctl, /* ioctl */
nodev, /* devmap */
nodev, /* mmap */
nodev, /* segmap */
nochpoll, /* chpoll */
ddi_prop_op, /* cb_prop_op */
NULL, /* streamtab */
D_MP, /* cb_flag */
CB_REV, /* rev */
nodev, /* aread */
nodev /* awrite */
};
static struct dev_ops mptsas_ops = {
DEVO_REV, /* devo_rev, */
0, /* refcnt */
ddi_no_info, /* info */
nulldev, /* identify */
nulldev, /* probe */
mptsas_attach, /* attach */
mptsas_detach, /* detach */
nodev, /* reset */
&mptsas_cb_ops, /* driver operations */
NULL, /* bus operations */
mptsas_power, /* power management */
#ifdef __sparc
#else
mptsas_quiesce /* quiesce */
#endif /* __sparc */
};
#define MPTSAS_MOD_STRING "MPTSAS HBA Driver 00.00.00.20"
&mod_driverops, /* Type of module. This one is a driver */
MPTSAS_MOD_STRING, /* Name of the module. */
&mptsas_ops, /* driver ops */
};
static struct modlinkage modlinkage = {
};
#define TARGET_PROP "target"
#define LUN_PROP "lun"
#define SAS_PROP "sas-mpt"
#define MDI_GUID "wwn"
#define NDI_GUID "guid"
#define MPTSAS_DEV_GONE "mptsas_dev_gone"
/*
* Local static data
*/
#if defined(MPTSAS_DEBUG)
#endif /* defined(MPTSAS_DEBUG) */
static kmutex_t mptsas_global_mutex;
static void *mptsas_state; /* soft state ptr */
static krwlock_t mptsas_global_rwlock;
static kmutex_t mptsas_log_mutex;
static char mptsas_log_buf[256];
static clock_t mptsas_scsi_watchdog_tick;
static clock_t mptsas_tick;
static timeout_id_t mptsas_reset_watch;
static timeout_id_t mptsas_timeout_id;
static int mptsas_timeouts_enabled = 0;
/*
* warlock directives
*/
#ifdef MPTSAS_DEBUG
void debug_enter(char *);
#endif
/*
* Notes:
* - scsi_hba_init(9F) initializes SCSI HBA modules
* - must call scsi_hba_fini(9F) if modload() fails
*/
int
_init(void)
{
int status;
/* CONSTCOND */
NDBG0(("_init"));
if (status != 0) {
return (status);
}
return (status);
}
}
return (status);
}
/*
* Notes:
* - scsi_hba_fini(9F) uninitializes SCSI HBA modules
*/
int
_fini(void)
{
int status;
/* CONSTCOND */
NDBG0(("_fini"));
}
return (status);
}
/*
* The loadable-module _info(9E) entry point
*/
int
{
/* CONSTCOND */
NDBG0(("mptsas _info"));
}
static int
{
char phymask[8];
int physport = -1;
int dynamic_port = 0;
int rval = DDI_FAILURE;
int i = 0;
/* CONSTCOND */
switch (cmd) {
case DDI_ATTACH:
break;
case DDI_RESUME:
/*
* If this a scsi-iport node, nothing to do here.
*/
return (DDI_SUCCESS);
default:
return (DDI_FAILURE);
}
NULL) {
"get tran vector for the HBA node");
return (DDI_FAILURE);
}
return (DDI_FAILURE);
NULL) {
"get tran vector for the iport node");
return (DDI_FAILURE);
}
/*
* Overwrite parent's tran_hba_private to iport's tran vector
*/
/*
* Get SAS address for initiator port according dev_handle
*/
return (DDI_SUCCESS);
}
for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
break;
}
}
if (i == MPTSAS_MAX_PHYS) {
"seems not exist", iport);
return (DDI_FAILURE);
}
dynamic_port = 1;
else
dynamic_port = 0;
if (rval != DDI_SUCCESS) {
"SAS address of initiator failed!", iport);
return (DDI_FAILURE);
}
wwid);
"initiator-port", initiator_wwnstr) !=
return (DDI_FAILURE);
}
"phymask", phy_mask) !=
return (DDI_FAILURE);
}
"dynamic-port", dynamic_port) !=
return (DDI_FAILURE);
}
/*
*/
dip, 0) == MDI_SUCCESS) {
}
return (DDI_SUCCESS);
}
/*
* Notes:
* Set up all device state and allocate data structures,
* mutexes, condition variables, etc. for device operation.
* Add interrupts needed.
* Return DDI_SUCCESS if device is ready, else return DDI_FAILURE.
*/
static int
{
int instance, i, j;
int doneq_thread_num;
char buf[64];
char intr_added = 0;
char map_setup = 0;
char config_setup = 0;
char hba_attach_setup = 0;
char smp_attach_setup = 0;
char mutex_init_done = 0;
char event_taskq_create = 0;
char dr_taskq_create = 0;
char doneq_thread_create = 0;
int intr_types;
int tran_flags = 0;
int rval = DDI_FAILURE;
/* CONSTCOND */
if (scsi_hba_iport_unit_address(dip)) {
}
switch (cmd) {
case DDI_ATTACH:
break;
case DDI_RESUME:
return (DDI_FAILURE);
if (!mpt) {
return (DDI_FAILURE);
}
/*
* Reset hardware and softc to "no outstanding commands"
* Note that a check condition can result on first command
* to a target.
*/
/*
* raise power.
*/
(void) pm_busy_component(dip, 0);
} else {
}
if (rval == DDI_SUCCESS) {
} else {
/*
* The pm_raise_power() call above failed,
* and that can only occur if we were unable
* to reset the hardware. This is probably
* due to unhealty hardware, and because
* important filesystems(such as the root
* filesystem) could be on the attached disks,
* it would not be a good idea to continue,
* as we won't be entirely certain we are
* writing correct data. So we panic() here
* to not only prevent possible data corruption,
* but to give developers or end users a hope
* of identifying and correcting any problems.
*/
fm_panic("mptsas could not reset hardware "
"during resume");
}
}
mpt->m_suspended = 0;
/*
* Reinitialize ioc
*/
(void) pm_idle_component(dip, 0);
}
fm_panic("mptsas init chip fail during resume");
}
/*
* mptsas_update_driver_data needs interrupts so enable them
* first.
*/
/* start requests, if possible */
/*
* Restart watch thread
*/
if (mptsas_timeout_id == 0) {
}
/* report idle status to pm framework */
(void) pm_idle_component(dip, 0);
}
return (DDI_SUCCESS);
default:
return (DDI_FAILURE);
}
/*
* Allocate softc information.
*/
"mptsas%d: cannot allocate soft state", instance);
goto fail;
}
"mptsas%d: cannot get soft state", instance);
goto fail;
}
/* Allocate a transport structure */
/* Indicate that we are 'sizeof (scsi_*(9S))' clean. */
/* Make a per-instance copy of the structures */
/*
* Initialize FMA
*/
goto fail;
}
config_setup++;
sizeof (Mpi2SCSITaskManagementRequest_t)) == DDI_FAILURE) {
goto fail;
}
/*
* This is a workaround for a XMITS ASIC bug which does not
* drive the CBE upper bits.
*/
}
/*
* Setup configuration space
*/
goto fail;
}
goto fail;
}
map_setup++;
/*
* A taskq is created for dealing with the event handler
*/
goto fail;
}
/*
* A taskq is created for dealing with dr events
*/
"mptsas_dr_taskq",
"failed");
goto fail;
}
0, "mptsas_doneq_thread_threshold_prop", 10);
0, "mptsas_doneq_length_threshold_prop", 8);
0, "mptsas_doneq_thread_n_prop", 8);
if (mpt->m_doneq_thread_n) {
kmem_zalloc(sizeof (mptsas_doneq_thread_list_t)
for (j = 0; j < mpt->m_doneq_thread_n; j++) {
MUTEX_DRIVER, NULL);
}
}
/* Get supported interrupt types */
"failed\n");
goto fail;
}
/*
* Try MSI, but fall back to FIXED
*/
NDBG0(("Using MSI interrupt type"));
goto intr_done;
}
}
if (intr_types & DDI_INTR_TYPE_FIXED) {
NDBG0(("Using FIXED interrupt type"));
goto intr_done;
}
NDBG0(("FIXED interrupt registration failed"));
}
goto fail;
intr_added++;
/* Initialize mutex used in interrupt handler */
/*
* Disable hardware interrupt since we're not ready to
* handle it yet.
*/
/*
* Enable interrupts
*/
/* Call ddi_intr_block_enable() for MSI interrupts */
} else {
/* Call ddi_intr_enable for MSI or FIXED interrupts */
for (i = 0; i < mpt->m_intr_cnt; i++) {
}
}
/*
* Initialize power management component
*/
if (mptsas_init_pm(mpt)) {
"failed");
goto fail;
}
}
/*
* Initialize chip
*/
goto fail;
}
/*
* initialize SCSI HBA transport structure
*/
goto fail;
}
/*
* Register the iport for multiple port HBA
*/
/*
* initial value of mask is 0
*/
for (i = 0; i < mpt->m_num_phys; i++) {
char phy_mask_name[8];
continue;
if ((mask & (1 << i)) != 0)
continue;
for (j = 0; j < mpt->m_num_phys; j++) {
phy_mask |= (1 << j);
}
}
for (j = 0; j < mpt->m_num_phys; j++) {
if ((phy_mask >> j) & 0x01) {
}
}
/*
* register a iport
*/
}
/*
* register a virtual port for RAID volume always
*/
/*
* All children of the HBA are iports. We need tran was cloned.
* So we pass the flags to SCSA. SCSI_HBA_TRAN_CLONE will be
* inherited to iport's tran vector.
*/
goto fail;
}
goto fail;
}
/*
* Initialize smp hash table
*/
/*
* create kmem cache for packets
*/
goto fail;
}
/*
* create kmem cache for extra SGL frames if SGL cannot
* be accomodated into main request frame.
*/
sizeof (mptsas_cache_frames_t), 8,
goto fail;
}
if (mpt->m_scsi_reset_delay == 0) {
"scsi_reset_delay of 0 is not recommended,"
" resetting to SCSI_DEFAULT_RESET_DELAY\n");
}
/*
* Initialize the wait and done FIFO queue
*/
mpt->m_tx_draining = 0;
/*
* ioc cmd queue initialize
*/
/*
* enable event notification
*/
goto fail;
}
/* Check all dma handles allocated in attach */
!= DDI_SUCCESS) ||
!= DDI_SUCCESS) ||
!= DDI_SUCCESS) ||
!= DDI_SUCCESS) ||
!= DDI_SUCCESS)) {
goto fail;
}
/* Check all acc handles allocated in attach */
!= DDI_SUCCESS) ||
!= DDI_SUCCESS) ||
!= DDI_SUCCESS) ||
!= DDI_SUCCESS) ||
!= DDI_SUCCESS) ||
!= DDI_SUCCESS)) {
goto fail;
}
/*
* After this point, we are not going to fail the attach.
*/
/*
* used for mptsas_watch
*/
if (mptsas_head == NULL) {
mptsas_head = mpt;
} else {
}
mptsas_tail = mpt;
if (mptsas_timeouts_enabled == 0) {
}
/* Print message of HBA present */
/* report idle status to pm framework */
(void) pm_idle_component(dip, 0);
}
return (DDI_SUCCESS);
fail:
if (mpt) {
mptsas_timeout_id = 0;
}
/* deallocate in reverse order */
if (mpt->m_cache_frames) {
}
if (mpt->m_kmem_cache) {
}
if (hba_attach_setup) {
(void) scsi_hba_detach(dip);
}
if (smp_attach_setup) {
(void) smp_hba_detach(dip);
}
if (intr_added) {
}
if (doneq_thread_create) {
for (j = 0; j < mpt->m_doneq_thread_n; j++) {
}
while (mpt->m_doneq_thread_n) {
&mpt->m_doneq_mutex);
}
for (j = 0; j < doneq_thread_num; j++) {
}
sizeof (mptsas_doneq_thread_list_t)
* doneq_thread_num);
}
if (event_taskq_create) {
}
if (dr_taskq_create) {
}
if (mutex_init_done) {
}
if (map_setup) {
}
if (config_setup) {
}
}
}
}
return (DDI_FAILURE);
}
static int
{
if (scsi_hba_iport_unit_address(devi)) {
return (DDI_SUCCESS);
}
return (DDI_SUCCESS);
if (!mpt) {
return (DDI_SUCCESS);
}
if (mpt->m_suspended++) {
return (DDI_SUCCESS);
}
/*
* Cancel timeout threads for this mpt
*/
if (mpt->m_quiesce_timeid) {
mpt->m_quiesce_timeid = 0;
}
if (mpt->m_restart_cmd_timeid) {
mpt->m_restart_cmd_timeid = 0;
}
if (mpt->m_pm_timeid != 0) {
mpt->m_pm_timeid = 0;
/*
* Report idle status for last ioctl since
* calls to pm_busy_component(9F) are stacked.
*/
}
/*
* Cancel watch threads if all mpts suspended
*/
if (!g->m_suspended)
break;
}
if (g == NULL) {
if (mptsas_timeout_id) {
mptsas_timeout_id = 0;
}
if (mptsas_reset_watch) {
mptsas_reset_watch = 0;
}
}
/*
* If this mpt is not in full power(PM_LEVEL_D0), just return.
*/
return (DDI_SUCCESS);
}
/* Disable HBA interrupts in hardware */
/* drain the taskq */
return (DDI_SUCCESS);
}
/*
* quiesce(9E) entry point.
*
* This function is called when the system is single-threaded at high
* PIL with preemption disabled. Therefore, this function must not be
* blocked.
*
* This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
* DDI_FAILURE indicates an error condition and should almost never happen.
*/
#ifndef __sparc
static int
{
return (DDI_SUCCESS);
return (DDI_SUCCESS);
/* Disable HBA interrupts in hardware */
return (DDI_SUCCESS);
}
#endif /* __sparc */
/*
* detach(9E). Remove all device allocations and system resources;
* disable device interrupts.
* Return DDI_SUCCESS if done; DDI_FAILURE if there's a problem.
*/
static int
{
/* CONSTCOND */
switch (cmd) {
case DDI_DETACH:
return (mptsas_do_detach(devi));
case DDI_SUSPEND:
return (mptsas_suspend(devi));
default:
return (DDI_FAILURE);
}
/* NOTREACHED */
}
static int
{
int circ = 0;
int circ1 = 0;
int i;
int doneq_thread_num = 0;
return (DDI_FAILURE);
if (!mpt) {
return (DDI_FAILURE);
}
/*
* Still have pathinfo child, should not detach mpt driver
*/
if (scsi_hba_iport_unit_address(dip)) {
if (mpt->m_mpxio_enable) {
/*
* MPxIO enabled for the iport
*/
continue;
}
NDBG12(("detach failed because of "
"outstanding path info"));
return (DDI_FAILURE);
}
(void) mdi_phci_unregister(dip, 0);
}
return (DDI_SUCCESS);
}
/* Make sure power level is D0 before accessing registers */
(void) pm_busy_component(dip, 0);
DDI_SUCCESS) {
"mptsas%d: Raise power request failed.",
mpt->m_instance);
(void) pm_idle_component(dip, 0);
return (DDI_FAILURE);
}
}
}
if (mpt->m_doneq_thread_n) {
for (i = 0; i < mpt->m_doneq_thread_n; i++) {
}
while (mpt->m_doneq_thread_n) {
&mpt->m_doneq_mutex);
}
for (i = 0; i < doneq_thread_num; i++) {
}
sizeof (mptsas_doneq_thread_list_t)
* doneq_thread_num);
}
/*
* Remove device instance from the global linked list
*/
if (mptsas_head == mpt) {
} else {
break;
}
}
if (m == NULL) {
}
}
if (mptsas_tail == mpt) {
mptsas_tail = m;
}
/*
* Cancel timeout threads for this mpt
*/
if (mpt->m_quiesce_timeid) {
mpt->m_quiesce_timeid = 0;
}
if (mpt->m_restart_cmd_timeid) {
mpt->m_restart_cmd_timeid = 0;
}
if (mpt->m_pm_timeid != 0) {
mpt->m_pm_timeid = 0;
/*
* Report idle status for last ioctl since
* calls to pm_busy_component(9F) are stacked.
*/
}
/*
* last mpt? ... if active, CANCEL watch threads.
*/
if (mptsas_head == NULL) {
/*
* Clear mptsas_timeouts_enable so that the watch thread
* gets restarted on DDI_ATTACH
*/
if (mptsas_timeout_id) {
mptsas_timeout_id = 0;
}
if (mptsas_reset_watch) {
mptsas_reset_watch = 0;
}
}
/*
* Delete nt_active.
*/
if (active) {
}
/* deallocate everything that was allocated in mptsas_attach */
(void) scsi_hba_detach(dip);
(void) smp_hba_detach(dip);
/* Lower the power informing PM Framework */
"!mptsas%d: Lower power request failed "
"during detach, ignoring.",
mpt->m_instance);
}
}
}
return (DDI_SUCCESS);
}
static int
{
/* allocate Task Management ddi_dma resources */
return (DDI_FAILURE);
}
!= DDI_SUCCESS) {
return (DDI_FAILURE);
}
!= DDI_DMA_MAPPED) {
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
static void
{
mpt->m_hshk_dma_size = 0;
}
}
static int
{
#ifndef __lock_lint
#endif
int rval = DDI_SUCCESS;
int polls = 0;
if (scsi_hba_iport_unit_address(dip) != 0)
return (DDI_SUCCESS);
return (DDI_FAILURE);
}
/*
* If the device is busy, don't lower its power level
*/
return (DDI_FAILURE);
}
switch (level) {
case PM_LEVEL_D0:
/*
* Wait up to 30 seconds for IOC to come out of reset.
*/
if (polls++ > 3000) {
break;
}
}
/*
* If IOC is not in operational state, try to hard reset it.
*/
if ((ioc_status & MPI2_IOC_STATE_MASK) !=
"mptsas_power: hard reset failed");
return (DDI_FAILURE);
}
}
break;
case PM_LEVEL_D3:
break;
default:
rval = DDI_FAILURE;
break;
}
return (rval);
}
/*
* Initialize configuration space and figure out which
* chip and revison of the chip the mpt driver is using.
*/
int
{
NDBG0(("mptsas_config_space_init"));
/*
* Get the chip device id:
*/
/*
* Save the revision.
*/
/*
* Save the SubSystem Vendor and Device IDs
*/
/*
* Set the latency timer to 0x40 as specified by the upa -> pci
* bridge chip design team. This may be done by the sparc pci
* bus nexus driver, but the driver should make sure the latency
* timer is correct for performance reasons.
*/
/*
* Check if capabilities list is supported and if so,
* get initial capabilities pointer and clear bits 0,1.
*/
& PCI_STAT_CAP) {
PCI_CONF_CAP_PTR), 4);
} else {
}
/*
* Walk capabilities if supported.
*/
/*
* Check that we haven't exceeded the maximum number of
* capabilities and that the pointer is in a valid range.
*/
if (++cap_count > 48) {
"too many device capabilities.\n");
return (FALSE);
}
if (caps_ptr < 64) {
"capabilities pointer 0x%x out of range.\n",
caps_ptr);
return (FALSE);
}
/*
* Get next capability and check that it is valid.
* For now, we only support power management.
*/
switch (cap) {
case PCI_CAP_ID_PM:
"?mptsas%d supports power management.\n",
mpt->m_instance);
/* Save PMCSR offset */
break;
/*
* 0x5 is Message signaled interrupts and 0x7
* is pci-x capable. Both are unsupported for now
* but supported by the 1030 chip so we don't
* need to keep printing out the notice.
* 0x10 is PCI-E support (1064E/1068E)
* 0x11 is MSIX supported by the 1064/1068
*/
case 0x5:
case 0x7:
case 0x10:
case 0x11:
break;
default:
"?mptsas%d unrecognized capability "
break;
}
/*
* Get next capabilities pointer and clear bits 0,1.
*/
}
return (TRUE);
}
static void
{
/*
* Set the command register to the needed values.
*/
cmdreg &= ~PCI_COMM_IO;
}
static void
{
/*
* Clear the master enable bit in the PCI command register.
* This prevents any bus mastering activity like DMA.
*/
cmdreg &= ~PCI_COMM_ME;
}
int
{
"unable to allocate dma handle.");
return (DDI_FAILURE);
}
DDI_SUCCESS) {
"unable to allocate memory for dma xfer.");
return (DDI_FAILURE);
}
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
void
{
}
}
int
{
int rval;
"unable to allocate dma handle.");
return (DDI_FAILURE);
}
"unable to allocate request structure.");
return (DDI_FAILURE);
}
(void) ddi_dma_mem_free(&accessp);
return (DDI_FAILURE);
}
rval = DDI_FAILURE;
}
if (dma_handle != NULL) {
(void) ddi_dma_unbind_handle(dma_handle);
(void) ddi_dma_mem_free(&accessp);
}
return (rval);
}
static int
{
/*
* The size of the request frame pool is:
* Number of Request Frames * Request Frame Size
*/
/*
* set the DMA attributes. System Request Message Frames must be
* aligned on a 16-byte boundry.
*/
/*
* allocate the request frame pool.
*/
"Unable to allocate dma handle.");
return (DDI_FAILURE);
}
!= DDI_SUCCESS) {
"Unable to allocate request frames.");
return (DDI_FAILURE);
}
return (DDI_FAILURE);
}
/*
* Store the request frame memory address. This chip uses this
* address to dma to and from the driver's frame. The second
* address is the address mpt uses to fill in the frame.
*/
/*
* Clear the request frame pool.
*/
return (DDI_SUCCESS);
}
static int
{
/*
* The size of the reply frame pool is:
* Number of Reply Frames * Reply Frame Size
*/
/*
* set the DMA attributes. System Reply Message Frames must be
* aligned on a 4-byte boundry. This is the default.
*/
/*
* allocate the reply frame pool
*/
"Unable to allocate dma handle.");
return (DDI_FAILURE);
}
!= DDI_SUCCESS) {
"Unable to allocate reply frames.");
return (DDI_FAILURE);
}
return (DDI_FAILURE);
}
/*
* Store the reply frame memory address. This chip uses this
* address to dma to and from the driver's frame. The second
* address is the address mpt uses to process the frame.
*/
/*
* Clear the reply frame pool.
*/
return (DDI_SUCCESS);
}
static int
{
/*
* The reply free queue size is:
* Reply Free Queue Depth * 4
* The "4" is the size of one 32 bit address (low part of 64-bit
* address)
*/
/*
* set the DMA attributes The Reply Free Queue must be aligned on a
* 16-byte boundry.
*/
/*
* allocate the reply free queue
*/
"Unable to allocate dma handle.");
return (DDI_FAILURE);
}
!= DDI_SUCCESS) {
"Unable to allocate free queue.");
return (DDI_FAILURE);
}
return (DDI_FAILURE);
}
/*
* Store the reply free queue memory address. This chip uses this
* address to read from the reply free queue. The second address
* is the address mpt uses to manage the queue.
*/
/*
* Clear the reply free queue memory.
*/
return (DDI_SUCCESS);
}
static int
{
/*
* The reply descriptor post queue size is:
* Reply Descriptor Post Queue Depth * 8
* The "8" is the size of each descriptor (8 bytes or 64 bits).
*/
/*
* set the DMA attributes. The Reply Descriptor Post Queue must be
* aligned on a 16-byte boundry.
*/
/*
* allocate the reply post queue
*/
"Unable to allocate dma handle.");
return (DDI_FAILURE);
}
!= DDI_SUCCESS) {
"Unable to allocate post queue.");
return (DDI_FAILURE);
}
return (DDI_FAILURE);
}
/*
* Store the reply descriptor post queue memory address. This chip
* uses this address to write to the reply descriptor post queue. The
* second address is the address mpt uses to manage the queue.
*/
/*
* Clear the reply post queue memory.
*/
return (DDI_SUCCESS);
}
static int
{
return (DDI_FAILURE);
}
}
return (DDI_SUCCESS);
}
static void
{
if (cmd->cmd_extra_frames) {
(void *)cmd->cmd_extra_frames);
}
}
static void
{
NDBG0(("mptsas_cfg_fini"));
}
static void
{
NDBG0(("mptsas_hba_fini"));
/*
* Disable any bus mastering ability (i.e: DMA) prior to freeing any
* allocated DMA resources.
*/
/*
* Free up any allocated memory
*/
}
}
}
}
* mpt->m_max_replies);
}
}
static int
{
int lun = 0;
int phynum = -1;
int reallen = 0;
/* Get the target num */
LUN_PROP, 0);
/*
* Stick in the address of the form "wWWN,LUN"
*/
/*
* Stick in the address of form "pPHY,LUN"
*/
} else {
return (DDI_FAILURE);
}
}
return (DDI_SUCCESS);
}
/*
* tran_tgt_init(9E) - target device instance initialization
*/
static int
{
#ifndef __lock_lint
#endif
/*
* At this point, the scsi_device structure already exists
* and has been initialized.
*
* Use this function to allocate target-private data structures,
* if needed by this HBA. Add revised flow-control and queue
* properties for child here, if desired and if you can tell they
* support tagged queueing by now.
*/
int phymask = 0;
NDBG0(("mptsas_scsi_tgt_init: hbadip=0x%p tgtdip=0x%p lun=%d",
if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
return (DDI_FAILURE);
}
/*
* phymask is 0 means the virtual port for RAID
*/
"phymask", 0);
/*
* Very bad news if this occurs. Somehow scsi_vhci has
* lost the pathinfo node for this target.
*/
return (DDI_NOT_WELL_FORMED);
}
return (DDI_FAILURE);
}
&psas_wwn) == MDI_SUCCESS) {
sas_wwn = 0;
}
(void) mdi_prop_free(psas_wwn);
}
} else {
DDI_PROP_DONTPASS, LUN_PROP, 0);
sas_wwn = 0;
}
} else {
sas_wwn = 0;
}
}
sas_wwn);
return (DDI_FAILURE);
}
KM_SLEEP);
}
return (DDI_SUCCESS);
}
if (ptgt->m_deviceinfo &
int inq89_len = 0x238;
int reallen = 0;
int rval = 0;
int i;
/*
* chapter 12.4.2 VPD page 89h includes 512 bytes ATA IDENTIFY
* DEVICE data or ATA IDENTIFY PACKET DEVICE data.
*/
if (rval != 0) {
}
return (DDI_SUCCESS);
}
model[SATA_ID_MODEL_LEN] = 0;
fw[SATA_ID_FW_LEN] = 0;
/*
*/
break;
if (i < SATA_ID_MODEL_LEN) {
/*
* terminate vid, establish pid
*/
*pid++ = 0;
} else {
/*
* vid will stay "ATA ", the rule is same
* as sata framework implementation.
*/
/*
* model is all pid
*/
}
/*
* override SCSA "inquiry-*" properties
*/
if (vid)
(void) scsi_device_prop_update_inqstring(sd,
if (pid)
(void) scsi_device_prop_update_inqstring(sd,
(void) scsi_device_prop_update_inqstring(sd,
}
} else {
}
return (DDI_SUCCESS);
}
/*
* tran_tgt_free(9E) - target device instance deallocation
*/
static void
{
#ifndef __lock_lint
#endif
if (tgt_private != NULL) {
}
}
/*
* scsi_pkt handling
*
* Visible to the external world via the transport structure.
*/
/*
* Notes:
* - normal operation is to schedule the command to be transported,
* and return TRAN_ACCEPT if this is successful.
* - if NO_INTR, tran_start must poll device for command completion
*/
static int
{
#ifndef __lock_lint
#endif
int rval;
return (TRAN_FATAL_ERROR);
/*
* prepare the pkt before taking mutex.
*/
if (rval != TRAN_ACCEPT) {
return (rval);
}
/*
* If busy, return TRAN_BUSY; if there's some other formatting error
* in the packet, return TRAN_BADPKT; otherwise, fall through to the
* return of TRAN_ACCEPT.
*
* Remember that access to shared resources, including the mptsas_t
* data structure and the HBA hardware registers, must be protected
* with mutexes, here and everywhere.
*
* Also remember that at interrupt time, you'll get an argument
* to the interrupt handler which is a pointer to your mptsas_t
* structure; you'll have to remember which commands are outstanding
* and which scsi_pkt is the currently-running command so the
* interrupt handler can refer to the pkt to set completion
* status, call the target driver back through pkt_comp, etc.
*
* If the instance lock is held by other thread, don't spin to wait
* for it. Instead, queue the cmd and next time when the instance lock
* is not held, accept all the queued cmd. A extra tx_waitq is
* introduced to protect the queue.
*
* The polled cmd will not be queud and accepted as usual.
*
* Under the tx_waitq mutex, record whether a thread is draining
* the tx_waitq. An IO requesting thread that finds the instance
* mutex contended appends to the tx_waitq and while holding the
* tx_wait mutex, if the draining flag is not set, sets it and then
* proceeds to spin for the instance mutex. This scheme ensures that
* the last cmd in a burst be processed.
*
* we enable this feature only when the helper threads are enabled,
* at which we think the loads are heavy.
*
* per instance mutex m_tx_waitq_mutex is introduced to protect the
* m_tx_waitqtail, m_tx_waitq, m_tx_draining.
*/
if (mpt->m_doneq_thread_n) {
} else {
/*
* ptgt->m_dr_flag is protected by m_mutex or
* m_tx_waitq_mutex. In this case, m_tx_waitq_mutex
* is acquired.
*/
/*
* The command should be allowed to
* retry by returning TRAN_BUSY to
* to stall the I/O's which come from
* in unstable state now.
*/
return (TRAN_BUSY);
} else {
/*
* The device is offline, just fail the
* command by returning
* TRAN_FATAL_ERROR.
*/
return (TRAN_FATAL_ERROR);
}
}
if (mpt->m_tx_draining) {
} else { /* drain the queue */
}
}
} else {
/*
* ptgt->m_dr_flag is protected by m_mutex or m_tx_waitq_mutex
* in this case, m_mutex is acquired.
*/
/*
* commands should be allowed to retry by
* returning TRAN_BUSY to stall the I/O's
* which come from scsi_vhci since the device/
* path is in unstable state now.
*/
return (TRAN_BUSY);
} else {
/*
* The device is offline, just fail the
* command by returning TRAN_FATAL_ERROR.
*/
return (TRAN_FATAL_ERROR);
}
}
}
return (rval);
}
/*
* Accept all the queued cmds(if any) before accept the current one.
*/
static int
{
int rval;
/*
* The call to mptsas_accept_tx_waitq() must always be performed
* because that is where mpt->m_tx_draining is cleared.
*/
/*
* ptgt->m_dr_flag is protected by m_mutex or m_tx_waitq_mutex
* in this case, m_mutex is acquired.
*/
/*
* The command should be allowed to retry by returning
* TRAN_BUSY to stall the I/O's which come from
* now.
*/
return (TRAN_BUSY);
} else {
/*
* The device is offline, just fail the command by
* return TRAN_FATAL_ERROR.
*/
return (TRAN_FATAL_ERROR);
}
}
return (rval);
}
static int
{
int rval = TRAN_ACCEPT;
if (rval != TRAN_ACCEPT) {
return (rval);
}
}
/*
* reset the throttle if we were draining
*/
NDBG23(("reset throttle"));
}
/*
* If device handle has already been invalidated, just
* fail the command. In theory, command from scsi_vhci
* client is impossible send down command with invalid
* devhdl since devhdl is set after path offline, target
* driver is not suppose to select a offlined path.
*/
NDBG20(("rejecting command, it might because invalid devhdl "
"request."));
return (rval);
} else {
return (TRAN_FATAL_ERROR);
}
}
/*
* The first case is the normal case. mpt gets a command from the
* target driver and starts it.
* Since SMID 0 is reserved and the TM slot is reserved, the actual max
* commands is m_max_requests - 2.
*/
(ptgt->m_reset_delay == 0) &&
} else {
}
} else {
/*
* Add this pkt to the work queue
*/
/*
* Only flush the doneq if this is not a TM
* cmd. For TM cmds the flushing of the
* doneq will be done in those routines.
*/
}
}
}
return (rval);
}
int
{
int slot;
/*
* Account for reserved TM request slot and reserved SMID of 0.
*/
/*
* m_tags is equivalent to the SMID when sending requests. Since the
* SMID cannot be 0, start out at one if rolling over past the size
* of the request queue depth. Also, don't use the last SMID, which is
* reserved for TM requests.
*/
}
/* Validate tag, should never fail. */
/*
* Make sure SMID is not using reserved value of 0
* and the TM request slot.
*/
/*
* only increment per target ncmds if this is not a
* command that has no target associated with it (i.e. a
* event acknoledgment)
*/
}
/*
* If initial timout is less than or equal to one tick, bump
* the timeout by a tick so that command doesn't timeout before
* its allotted time.
*/
}
return (TRUE);
} else {
int i;
/*
* If slot in use, scan until a free one is found. Don't use 0
* or final slot, which is reserved for TM requests.
*/
}
goto alloc_tag;
}
}
}
return (FALSE);
}
/*
* prepare the pkt:
* the pkt may have been resubmitted or just reused so
* initialize some fields and do some checks.
*/
static int
{
/*
* Reinitialize some fields that need it; the packet may
* have been resubmitted
*/
pkt->pkt_statistics = 0;
/*
* zero status byte.
*/
/*
* consistent packets need to be sync'ed first
* (only for data going out)
*/
}
}
return (TRAN_ACCEPT);
}
/*
* tran_init_pkt(9E) - allocate scsi_pkt(9S) for command
*
* One of three possibilities:
* - allocate scsi_pkt
* - allocate scsi_pkt and DMA resources
* - allocate DMA resources to an already-allocated pkt
*/
static struct scsi_pkt *
{
int failure = 1;
int rval;
int kf;
if (tgt_private == NULL) {
return (NULL);
}
return (NULL);
#ifdef MPTSAS_TEST_EXTRN_ALLOC
#endif
NDBG3(("mptsas_scsi_init_pkt:\n"
"\ttgt=%d in=0x%p bp=0x%p clen=%d slen=%d tlen=%d flags=%x",
/*
* Allocate the new packet.
*/
struct buf *save_arq_bp;
if (cmd) {
sizeof (struct mptsas_cmd));
failure = 0;
}
(tgtlen > PKT_PRIV_LEN) ||
(statuslen > EXTCMDS_STATUS_SIZE)) {
if (failure == 0) {
/*
* if extern alloc fails, all will be
* deallocated, including cmd
*/
}
if (failure) {
/*
* if extern allocation fails, it will
* deallocate the new pkt as well
*/
return (NULL);
}
}
} else {
}
/* grab cmd->cmd_cookiec here as oldcookiec */
/*
* If the dma was broken up into PARTIAL transfers cmd_nwin will be
* greater than 0 and we'll need to grab the next dma window
*/
/*
* SLM-not doing extra command frame right now; may add later
*/
/*
* Make sure we havn't gone past the the total number
* of windows
*/
return (NULL);
}
return (NULL);
}
goto get_dma_cookies;
}
}
/*
* DMA resource allocation. This version assumes your
* HBA has some sort of bus-mastering or onboard DMA capability, with a
* scatter-gather list of length MPTSAS_MAX_DMA_SEGS, as given in the
* ddi_dma_attr_t structure and passed to scsi_impl_dmaget.
*/
/*
* Set up DMA memory and position to the next DMA segment.
*/
} else {
}
if (flags & PKT_CONSISTENT) {
}
if (flags & PKT_DMA_PARTIAL) {
}
/*
* workaround for byte hole issue on psycho and
* schizo pre 2.1
*/
}
if (rval == DDI_DMA_PARTIAL_MAP) {
cmd->cmd_winindex = 0;
&cmd->cmd_cookiec);
switch (rval) {
case DDI_DMA_NORESOURCES:
break;
case DDI_DMA_BADATTR:
case DDI_DMA_NOMAPPING:
break;
case DDI_DMA_TOOBIG:
default:
break;
}
if (new_cmd) {
}
}
cmd->cmd_cookiec);
if (new_cmd) {
}
}
/*
* Allocate extra SGL buffer if needed.
*/
DDI_FAILURE) {
"failed");
if (new_cmd) {
}
}
}
/*
* Always use scatter-gather transfer
* Use the loop below to store physical addresses of
* DMA segments, from the DMA cookies, into your HBA's
* scatter-gather list.
* We need to ensure we have enough kmem alloc'd
* for the sg entries since we are no longer using an
* array inside mptsas_cmd_t.
*
* We check cmd->cmd_cookiec against oldcookiec so
* the scatter-gather list is correctly allocated
*/
}
}
"unable to kmem_alloc enough memory "
/*
* if we have an ENOMEM condition we need to behave
* the same way as the rest of this routine
*/
if (new_cmd) {
}
}
}
/*
* store the first segment into the S/G list
*/
/*
* dmacount counts the size of the dma for this window
* (if partial dma is being used). totaldmacount
* keeps track of the total amount of dma we have
* transferred for all the windows (needed to calculate
* the resid value below).
*/
/*
* We already stored the first DMA scatter gather segment,
* start at 1 if we need to store more.
*/
/*
* Get next DMA cookie
*/
&cmd->cmd_cookie);
dmap++;
/*
* store the segment parms into the S/G list
*/
}
/*
* If this was partially allocated we set the resid
* the amount of data NOT transferred in this window
* If there is only one window, the resid will be 0
*/
}
return (pkt);
}
/*
* tran_destroy_pkt(9E) - scsi_pkt(9s) deallocation
*
* Notes:
* - also frees DMA resources if allocated
* - implicit DMA synchonization
*/
static void
{
NDBG3(("mptsas_scsi_destroy_pkt: target=%d pkt=0x%p",
}
}
CFLAG_SCBEXTERN)) == 0) {
} else {
}
}
/*
* kmem cache constructor and destructor:
* When constructing, we bzero the cmd and allocate the dma handle
* When destructing, just free the dma handle
*/
static int
{
struct scsi_address ap;
NDBG4(("mptsas_kmem_cache_constructor"));
/*
* allocate a dma handle
*/
return (-1);
}
return (-1);
}
/*
* allocate a arq handle
*/
return (-1);
}
return (-1);
}
return (0);
}
static void
{
#ifndef __lock_lint
#endif
NDBG4(("mptsas_kmem_cache_destructor"));
if (cmd->cmd_arqhandle) {
}
if (cmd->cmd_arq_buf) {
}
if (cmd->cmd_dmahandle) {
}
}
static int
{
mptsas_cache_frames_t *p = buf;
&p->m_dma_hdl) != DDI_SUCCESS) {
" extra SGL.");
return (DDI_FAILURE);
}
ddi_dma_free_handle(&p->m_dma_hdl);
" extra SGL.");
return (DDI_FAILURE);
}
(void) ddi_dma_mem_free(&p->m_acc_hdl);
ddi_dma_free_handle(&p->m_dma_hdl);
" extra SGL");
return (DDI_FAILURE);
}
/*
* Store the SGL memory address. This chip uses this
* address to dma to and from the driver. The second
* address is the address mpt uses to fill in the SGL.
*/
return (DDI_SUCCESS);
}
static void
{
#ifndef __lock_lint
#endif
mptsas_cache_frames_t *p = buf;
(void) ddi_dma_unbind_handle(p->m_dma_hdl);
(void) ddi_dma_mem_free(&p->m_acc_hdl);
ddi_dma_free_handle(&p->m_dma_hdl);
p->m_phys_addr = NULL;
p->m_frames_addr = NULL;
}
}
/*
* allocate and deallocate external pkt space (ie. not part of mptsas_cmd)
* for non-standard length cdb, pkt_private, status areas
* if allocation fails, then deallocate all external space and the pkt
*/
/* ARGSUSED */
static int
{
struct scsi_address ap;
NDBG3(("mptsas_pkt_alloc_extern: "
"cmd=0x%p cmdlen=%d tgtlen=%d statuslen=%d kf=%x",
goto fail;
}
}
if (tgtlen > PKT_PRIV_LEN) {
goto fail;
}
}
if (statuslen > EXTCMDS_STATUS_SIZE) {
goto fail;
}
/* allocate sense data buf for DMA */
struct scsi_arq_status, sts_sensedata);
goto fail;
}
/*
* allocate a extern arq handle and bind the buf
*/
goto fail;
}
&cookiec)
!= DDI_SUCCESS) {
goto fail;
}
}
return (0);
fail:
return (1);
}
/*
* deallocate external pkt space and deallocate the pkt
*/
static void
{
"mptsas_pkt_destroy_extern: freeing free packet");
/* NOTREACHED */
}
}
}
if (cmd->cmd_ext_arqhandle) {
}
if (cmd->cmd_ext_arq_buf)
}
}
}
/*
* tran_sync_pkt(9E) - explicit DMA synchronization
*/
/*ARGSUSED*/
static void
{
NDBG3(("mptsas_scsi_sync_pkt: target=%d, pkt=0x%p",
if (cmd->cmd_dmahandle) {
}
}
/*
* tran_dmafree(9E) - deallocate DMA resources allocated for command
*/
/*ARGSUSED*/
static void
{
NDBG3(("mptsas_scsi_dmafree: target=%d pkt=0x%p",
}
}
}
static void
{
}
}
static void
{
/*
* Save the number of entries in the DMA
*/
/*
*/
} else {
}
/*
* We have 2 cases here. First where we can fit all the
* SG elements into the main frame, and the case
* where we can't.
* If we have more cookies than we can attach to a frame
* we will need to use a chain element to point
* a location of memory where the rest of the S/G
* elements reside.
*/
while (cookiec--) {
/*
* If this is the last cookie, we set the flags
* to indicate so
*/
if (cookiec == 0) {
flags |=
}
flags |= (MPI2_SGE_FLAGS_HOST_TO_IOC <<
} else {
flags |= (MPI2_SGE_FLAGS_IOC_TO_HOST <<
}
dmap++;
sge++;
}
} else {
/*
* Hereby we start to deal with multiple frames.
* The process is as follows:
* 1. Determine how many frames are needed for SGL element
* storage; Note that all frames are stored in contiguous
* memory space and in 64-bit DMA mode each element is
* 3 double-words (12 bytes) long.
* 2. Fill up the main frame. We need to do this separately
* since it contains the SCSI IO request header and needs
* dedicated processing. Note that the last 4 double-words
* of the SCSI IO header is for SGL element storage
* (MPI2_SGE_IO_UNION).
* 3. Fill the chain element in the main frame, so the DMA
* engine can use the following frames.
* 4. Enter a loop to fill the remaining frames. Note that the
* last frame contains no chain element. The remaining
* frames go into the mpt SGL buffer allocated on the fly,
* not immediately following the main message frame, as in
* Gen1.
* Some restrictions:
* 1. For 64-bit DMA, the simple element and chain element
* are both of 3 double-words (12 bytes) in size, even
* though all frames are stored in the first 4G of mem
* range and the higher 32-bits of the address are always 0.
* 2. On some controllers (like the 1064/1068), a frame can
* hold SGL elements with the last 1 or 2 double-words
* (4 or 8 bytes) un-used. On these controllers, we should
* recognize that there's not enough room for another SGL
* element and move the sge pointer to the next frame.
*/
int temp;
/*
* Sgemax is the number of SGE's that will fit
* each extra frame and frames is total
* number of frames we'll need. 1 sge entry per
* frame is reseverd for the chain element thus the -1 below.
*/
- 1);
/*
* A little check to see if we need to round up the number
* of frames we need
*/
sgemax) > 1) {
} else {
}
/*
* First fill in the main frame
*/
/*
* If this is the last SGE of this frame
* we set the end of list flag
*/
}
flags |=
} else {
flags |=
}
dmap++;
sge++;
}
/*
* Fill in the chain element in the main frame.
* About calculation on ChainOffset:
* 1. Struct msg_scsi_io_request has 4 double-words (16 bytes)
* in the end reserved for SGL element storage
* (MPI2_SGE_IO_UNION); we should count it in our
* calculation. See its definition in the header file.
* 2. Constant j is the counter of the current SGL element
* that will be processed, and (j - 1) is the number of
* SGL elements that have been processed (stored in the
* main frame).
* 3. ChainOffset value should be in units of double-words (4
* bytes) so the last value should be divided by 4.
*/
(sizeof (MPI2_SCSI_IO_REQUEST) -
sizeof (MPI2_SGE_IO_UNION) +
/*
* The size of the next frame is the accurate size of space
* (in bytes) used to store the SGL elements. j is the counter
* of SGL elements. (j - 1) is the number of SGL elements that
* have been processed (stored in frames).
*/
if (frames >= 2) {
sizeof (MPI2_SGE_SIMPLE64) *
sizeof (MPI2_SGE_SIMPLE64);
} else {
sizeof (MPI2_SGE_SIMPLE64));
}
p = cmd->cmd_extra_frames;
p->m_phys_addr);
/* SGL is allocated in the first 4G mem range */
/*
* If there are more than 2 frames left we have to
* fill in the next chain offset to the location of
* the chain element in the next frame.
* sgemax is the number of simple elements in an extra
* frame. Note that the value NextChainOffset should be
* in double-words (4 bytes).
*/
if (frames >= 2) {
} else {
}
/*
* Jump to next frame;
* Starting here, chain buffers go into the per command SGL.
* This buffer is allocated when chain buffers are needed.
*/
i = cookiec;
/*
* Start filling in frames with SGE's. If we
* reach the end of frame and still have SGE's
* to fill we need to add a chain element and
* use another frame. j will be our counter
* for what cookie we are at and i will be
* the total cookiec. k is the current frame
*/
for (k = 1; k <= frames; k++) {
/*
* If we have reached the end of frame
* and we have more SGE's to fill in
* we have to fill the final entry
* with a chain element and then
* continue to the next frame
*/
j--;
chainflags = (
/*
* k is the frame counter and (k + 1)
* is the number of the next frame.
* Note that frames are in contiguous
* memory space.
*/
(p->m_phys_addr +
(mpt->m_req_frame_size * k)));
/*
* If there are more than 2 frames left
* we have to next chain offset to
* the location of the chain element
* in the next frame and fill in the
* length of the next chain
*/
if ((frames - k) >= 2) {
(sgemax *
sizeof (MPI2_SGE_SIMPLE64))
>> 2);
sizeof (MPI2_SGE_SIMPLE64) *
sizeof (MPI2_SGE_SIMPLE64));
} else {
/*
* This is the last frame. Set
* the NextChainOffset to 0 and
* Length is the total size of
* all remaining simple elements
*/
0);
(cookiec - j) *
sizeof (MPI2_SGE_SIMPLE64));
}
/* Jump to the next frame */
((char *)p->m_frames_addr +
(int)mpt->m_req_frame_size * k);
continue;
}
&sge->FlagsLength);
/*
* If we are at the end of the frame and
* there is another frame to fill in
* we set the last simple element as last
* element
*/
}
/*
* If this is the final cookie we
* indicate it by setting the flags
*/
if (j == i) {
}
flags |=
} else {
flags |=
}
dmap++;
sge++;
}
}
/*
* Sync DMA with the chain buffers that were just created
*/
}
}
/*
* Interrupt handling
* Utility routine. Poll for status of a command sent to HBA
* without interrupts (a FLAG_NOINTR command).
*/
int
{
}
/*
* Wait, using drv_usecwait(), long enough for the command to
* reasonably return from the target if the target isn't
* "dead". A polled command may well be sent from scsi_poll, and
* there are retries built in to scsi_poll if the transport
* accepted the packet (TRAN_ACCEPT). scsi_poll waits 1 second
* and retries the transport up to scsi_poll_busycnt times
* (currently 60) if
* 1. pkt_reason is CMD_INCOMPLETE and pkt_state is 0, or
* 2. pkt_reason is CMD_CMPLT and *pkt_scbp has STATUS_BUSY
*
* limit the waiting to avoid a hang in the event that the
* cmd never gets started but we are still receiving interrupts
*/
NDBG5(("mptsas_poll: command incomplete"));
break;
}
}
/*
* this isn't supposed to happen, the hba must be wedged
* Mark this cmd as a timeout.
*/
NDBG5(("mptsas_poll: not on waitq"));
} else {
/* find and remove it from the waitq */
NDBG5(("mptsas_poll: delete from waitq"));
}
}
NDBG5(("mptsas_poll: done"));
return (rval);
}
/*
* Used for polling cmds and TM function
*/
static int
{
int cnt;
NDBG5(("mptsas_wait_intr"));
/*
* Get the current interrupt mask. When re-enabling ints, set mask to
* saved value.
*/
/*
* Keep polling for at least (polltime * 1000) seconds
*/
drv_usecwait(1000);
continue;
}
/*
* The reply is valid, process it according to its
* type.
*/
mpt->m_post_index = 0;
}
mpt->m_polled_intr = 0;
/*
* Re-enable interrupts and quit.
*/
int_mask);
return (TRUE);
}
/*
* Clear polling flag, re-enable interrupts and quit.
*/
mpt->m_polled_intr = 0;
return (FALSE);
}
static void
{
/*
* This is a success reply so just complete the IO. First, do a sanity
* check on the SMID. The final slot is used for TM requests, which
* would not come into this reply handler.
*/
SMID);
return;
}
/*
* print warning and return if the slot is empty
*/
"in slot %d", SMID);
return;
}
}
return;
} else {
}
/*
* The target returned QFULL or busy, do not add tihs
* pkt to the doneq since the hba will retry
* this cmd.
*
* The pkt has already been resubmitted in
* mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
* Remove this cmd_flag here.
*/
} else {
}
}
static void
{
int reply_frame_no;
/*
* If reply frame is not in the proper range we should ignore this
* message and exit the interrupt handler.
*/
mpt->m_reply_frame_size != 0)) {
"address 0x%x\n", reply_addr);
return;
}
/*
* don't get slot information and command for events since these values
* don't exist
*/
if (function != MPI2_FUNCTION_EVENT_NOTIFICATION) {
/*
* This could be a TM reply, which use the last allocated SMID,
* so allow for that.
*/
"%d\n", SMID);
return;
}
/*
* print warning and return if the slot is empty
*/
"reply in slot %d", SMID);
return;
}
return;
}
}
/*
* Depending on the function, we need to handle
* the reply frame (and cmd) differently.
*/
switch (function) {
break;
cmd);
break;
break;
/*
* Record the event if its type is enabled in
* this mpt instance by ioctl.
*/
/*
* Handle time critical events
* NOT_RESPONDING/ADDED only now
*/
/*
* Would not return main process,
* just let taskq resolve ack action
* and ack would be sent in taskq thread
*/
NDBG20(("send mptsas_handle_event_sync success"));
}
"for dispatch taskq");
/*
* Return the reply frame to the free queue.
*/
&((uint32_t *)(void *)
mpt->m_free_index = 0;
}
}
return;
default:
break;
}
/*
* Return the reply frame to the free queue.
*/
mpt->m_free_index = 0;
}
mpt->m_free_index);
return;
/*
* The target returned QFULL or busy, do not add tihs
* pkt to the doneq since the hba will retry
* this cmd.
*
* The pkt has already been resubmitted in
* mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
* Remove this cmd_flag here.
*/
} else {
}
}
static void
{
struct scsi_arq_status *arqstat;
} else {
}
&reply->ResponseInfo);
&reply->IOCLogInfo);
"?Log info 0x%x received for target %d.\n"
"\tscsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x",
}
NDBG31(("\t\tscsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x",
if (loginfo == 0x31170000) {
/*
* if loginfo PL_LOGINFO_CODE_IO_DEVICE_MISSING_DELAY_RETRY
* 0x31170000 comes, that means the device missing delay
* is in progressing, the command need retry later.
*/
return;
}
if ((scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS) &&
((ioc_status & MPI2_IOCSTATUS_MASK) ==
if (ptgt->m_reset_delay == 0) {
}
return;
}
responsedata &= 0x000000FF;
return;
}
}
switch (scsi_status) {
}
}
cmd->cmd_rqslen));
/*
* Set proper status for pkt if autosense was valid
*/
struct scsi_status zero_status = { 0 };
}
/*
* ASC=0x47 is parity error
* ASC=0x48 is initiator detected error received
*/
}
/*
*/
sizeof (mptsas_topo_change_list_t),
"resource for handle SAS dynamic"
"reconfigure.\n");
break;
}
(void *)topo_node,
DDI_NOSLEEP)) != DDI_SUCCESS) {
"for handle SAS dynamic reconfigure"
"failed. \n");
}
}
break;
case MPI2_SCSI_STATUS_GOOD:
switch (ioc_status & MPI2_IOCSTATUS_MASK) {
if (ptgt->m_reset_delay == 0) {
}
NDBG31(("lost disk for target%d, command:%x",
break;
break;
}
break;
break;
break;
break;
case MPI2_IOCSTATUS_BUSY:
/*
* set throttles to drain
*/
}
/*
* retry command
*/
break;
default:
"unknown ioc_status = %x\n", ioc_status);
"count = %x, scsi_status = %x", scsi_state,
break;
}
break;
break;
case MPI2_SCSI_STATUS_BUSY:
NDBG31(("scsi_status busy received"));
break;
NDBG31(("scsi_status reservation conflict received"));
break;
default:
"mptsas_process_intr: invalid scsi status\n");
break;
}
}
static void
{
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
"failed. IOCStatus=0x%x IOCLogInfo=0x%x target=%d\n",
return;
}
switch (task_type) {
break;
break;
default:
break;
}
}
static void
{
}
}
if (pkt) {
}
}
mpt->m_doneq_thread_n--;
}
/*
* mpt interrupt handler.
*/
static uint_t
{
/*
* If interrupts are shared by two channels then
* check whether this interrupt is genuinely for this
* channel by making sure first the chip is in high
* power state.
*/
return (DDI_INTR_UNCLAIMED);
}
/*
* If polling, interrupt was triggered by some shared interrupt because
* IOC interrupts are disabled during polling, so polling routine will
* handle any replies. Considering this, if polling is happening,
* return with interrupt unclaimed.
*/
if (mpt->m_polled_intr) {
return (DDI_INTR_UNCLAIMED);
}
/*
* Read the istat register.
*/
if ((INTPENDING(mpt)) != 0) {
/*
* read fifo until empty.
*/
#ifndef __lock_lint
#endif
while (TRUE) {
break;
}
/*
* The reply is valid, process it according to its
* type. Also, set a flag for updated the reply index
* after they've all been processed.
*/
mpt->m_post_index = 0;
}
}
/*
* Update the global reply index if at least one reply was
* processed.
*/
if (did_reply) {
}
} else {
return (DDI_INTR_UNCLAIMED);
}
NDBG1(("mptsas_intr complete"));
/*
* If no helper threads are created, process the doneq in ISR.
* If helpers are created, use the doneq length as a metric to
* measure the load on the interrupt CPU. If it is long enough,
* which indicates the load is heavy, then we deliver the IO
* completions to the helpers.
* this measurement has some limitations although, it is simple
* and straightforward and works well for most of the cases at
* present.
*/
if (!mpt->m_doneq_thread_n ||
} else {
}
/*
* If there are queued cmd, start them now.
*/
}
return (DDI_INTR_CLAIMED);
}
static void
{
/*
* The reply is valid, process it according to its
* type. Also, set a flag for updated the reply index
* after they've all been processed.
*/
} else if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
} else {
}
/*
* Clear the reply descriptor for re-use and increment
* index.
*/
0xFFFFFFFFFFFFFFFF);
}
/*
* handle qfull condition
*/
static void
{
(ptgt->m_qfull_retries == 0)) {
/*
* We have exhausted the retries on QFULL, or,
* the target driver has indicated that it
* wants to handle QFULL itself by setting
* qfull-retries capability to 0. In either case
* we want the target driver's QFULL handling
* to kick in. We do this by having pkt_reason
* as CMD_CMPLT and pkt_scbp as STATUS_QFULL.
*/
} else {
if (ptgt->m_reset_delay == 0) {
ptgt->m_t_throttle =
}
/*
* when target gives queue full status with no commands
* outstanding (m_t_ncmds == 0), throttle is set to 0
* (HOLD_THROTTLE), and the queue full handling start
* (see psarc/1994/313); if there are commands outstanding,
* throttle is set to (m_t_ncmds - 2)
*/
/*
* By setting throttle to QFULL_THROTTLE, we
* avoid submitting new commands and in
* mptsas_restart_cmd find out slots which need
* their throttles to be cleared.
*/
if (mpt->m_restart_cmd_timeid == 0) {
}
}
}
}
{
int i;
/*
* RAID doesn't have valid phymask and physport so we use phymask == 0
* and physport == 0xff to indicate that it's RAID.
*/
if (phymask == 0) {
return (0xff);
}
for (i = 0; i < 8; i++) {
if (phymask & (1 << i)) {
break;
}
}
}
{
uint8_t i = 0;
/*
* If physport is 0xFF, this is a RAID volume. Use phymask of 0.
*/
if (physport == 0xFF) {
return (0);
}
for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
break;
}
}
NDBG20(("mptsas%d physport_to_phymask:physport :%x phymask :%x, ",
return (phy_mask);
}
/*
* mpt free device handle after device gone, by use of passthrough
*/
static int
{
int ret;
/*
* Need to compose a SAS IO Unit Control request message
* and call mptsas_do_passthru() function
*/
if (ret != 0) {
"Control error %d", ret);
return (DDI_FAILURE);
}
/* do passthrough success, check the ioc status */
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
static void
{
char *phy_mask_name;
int i, j;
(void) mptsas_get_sas_io_unit_page(mpt);
for (i = 0; i < mpt->m_num_phys; i++) {
phy_mask = 0x00;
continue;
if ((mask & (1 << i)) != 0)
continue;
for (j = 0; j < mpt->m_num_phys; j++) {
phy_mask |= (1 << j);
}
}
for (j = 0; j < mpt->m_num_phys; j++) {
if ((phy_mask >> j) & 0x01) {
}
}
/*
* register a iport, if the port has already been existed
* SCSA will do nothing and just return.
*/
}
}
/*
* mptsas_handle_dr is a task handler for DR, the DR action includes:
* 4. LUNs of a existing device status change.
* 6. Member of RAID volume is released because of RAID deletion.
* 7. Physical disks are removed because of RAID creation.
*/
static void
mptsas_handle_dr(void *args) {
char *phy_mask_name;
uint8_t port_update = 0;
switch (event) {
if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
/*
* Direct attached or expander attached device added
* into system or a Phys Disk that is being unhidden.
*/
port_update = 1;
}
break;
/*
* New expander added into system, it must be the head
* of topo_change_list_t
*/
port_update = 1;
break;
default:
port_update = 0;
break;
}
/*
* All cases port_update == 1 may cause initiator port form change
*/
/*
* mpt->m_port_chng flag indicates some PHYs of initiator
* port have changed to online. So when expander added or
* directly attached device online event come, we force to
* update port information by issueing SAS IO Unit Page and
* update PHYMASKs.
*/
(void) mptsas_update_phymask(mpt);
mpt->m_port_chng = 0;
}
while (topo_node) {
phymask = 0;
if (event & (MPTSAS_DR_EVENT_OFFLINE_TARGET |
/*
* For all offline events, phymask is known
*/
goto find_parent;
}
if (event & MPTSAS_TOPO_FLAG_REMOVE_HANDLE) {
goto handle_topo_change;
}
if (flags & MPTSAS_TOPO_FLAG_LUN_ASSOCIATED) {
goto find_parent;
}
if ((flags ==
(event == MPTSAS_DR_EVENT_RECONFIG_TARGET)) {
/*
* There is no any field in IR_CONFIG_CHANGE
* parent after SAS Device Page0 request.
*/
goto handle_topo_change;
}
if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
/*
* If the direct attached device added or a
* phys disk is being unhidden, argument
* physport actually is PHY#, so we have to get
* phymask according PHY#.
*/
}
/*
* Translate physport to phymask so that we can search
* parent dip.
*/
physport);
/*
* For RAID topology change node, write the iport name
* as v0.
*/
if (flags & MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
} else {
/*
* phymask can bo 0 if the drive has been
* pulled by the time an add event is
* processed. If phymask is 0, just skip this
* event and continue.
*/
if (phymask == 0) {
sizeof (mptsas_topo_change_list_t));
continue;
}
}
"iport, should not happen!");
goto out;
}
}
if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
/*
* If direct attached device associated, make sure
* reset the parent before start the next one. But
* all devices associated with expander shares the
* parent. Also, reset parent if this is for RAID.
*/
}
}
out:
}
static void
{
int rval = 0;
{
char *phy_mask_name;
/*
* Get latest RAID info.
*/
(void) mptsas_get_raid_info(mpt);
break;
} else {
}
/*
* If a Phys Disk was deleted, RAID info needs to be
* updated to reflect the new topology.
*/
(void) mptsas_get_raid_info(mpt);
/*
* Get sas device page 0 by DevHandle to make sure if
*/
if (rval == DEV_INFO_WRONG_DEVICE_TYPE) {
"mptsas_handle_topo_change: target %d is "
} else if (rval == DEV_INFO_FAIL_ALLOC) {
"mptsas_handle_topo_change: could not "
"allocate memory. \n");
}
/*
* If rval is DEV_INFO_PHYS_DISK than there is nothing
* else to do, just leave.
*/
if (rval != DEV_INFO_SUCCESS) {
return;
}
}
"iport for PD, should not happen!");
break;
}
}
if (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
&lundip);
} else {
/*
* hold nexus for bus configure
*/
/*
* release nexus for bus configure
*/
}
NDBG20(("mptsas%d handle_topo_change to online devhdl:%x, "
break;
}
{
break;
if (sas_wwn) {
} else {
}
/*
* Get latest RAID info, if RAID volume status change
*/
(void) mptsas_get_raid_info(mpt);
}
/*
* Abort all outstanding command on the device
*/
if (rval) {
NDBG20(("mptsas%d handle_topo_change to reset target "
"before offline devhdl:%x, phymask:%x, rval:%x",
rval));
}
NDBG20(("mptsas%d handle_topo_change to offline devhdl:%x, "
if (rval == DDI_SUCCESS) {
} else {
/*
* clean DR_INTRANSITION flag to allow I/O down to
* PHCI driver since failover finished.
* Invalidate the devhdl
*/
}
/*
* Send SAS IO Unit Control to free the dev handle
*/
if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
NDBG20(("mptsas%d handle_topo_change to remove "
rval));
}
break;
}
{
/*
* If this is the remove handle event, do a reset first.
*/
if (rval) {
NDBG20(("mpt%d reset target before remove "
}
}
/*
* Send SAS IO Unit Control to free the dev handle
*/
NDBG20(("mptsas%d handle_topo_change to remove "
rval));
break;
}
{
if (rval != DDI_SUCCESS) {
"handle %x", devhdl);
return;
}
return;
}
break;
}
{
break;
/*
* The mptsas_smp_t data is released only if the dip is offlined
* successfully.
*/
NDBG20(("mptsas%d handle_topo_change to remove devhdl:%x, "
if (rval == DDI_SUCCESS) {
} else {
}
break;
}
default:
return;
}
}
/*
* Record the event if its type is enabled in mpt instance by ioctl.
*/
static void
mptsas_record_event(void *args)
{
int i, j;
/*
* Generate a system event to let anyone who cares know that a
* LOG_ENTRY_ADDED event has occurred. This is sent no matter what the
* event mask is set to.
*/
if (event == MPI2_EVENT_LOG_ENTRY_ADDED) {
}
/*
* Record the event only if it is not masked. Determine which dword
* and bit of event mask to test.
*/
i = mpt->m_event_index;
if (event_data_len > 0) {
/*
* Limit data to size in m_event entry
*/
}
for (j = 0; j < event_data_len; j++) {
&(eventreply->EventData[j]));
}
/*
* check for index wrap-around
*/
if (++i == MPTSAS_EVENT_QUEUE_SIZE) {
i = 0;
}
/*
* Set flag to send the event.
*/
}
}
/*
* Generate a system event if flag is set to let anyone who cares know
* that an event has occurred.
*/
if (sendAEN) {
}
}
/*
* handle sync events from ioc in interrupt
* return value:
* DDI_SUCCESS: The event is handled by this func
* DDI_FAILURE: Event is not handled
*/
static int
mptsas_handle_event_sync(void *args)
{
&eventreply->IOCStatus)) {
"!mptsas_handle_event_sync: IOCStatus=0x%x, "
"IOCLogInfo=0x%x", iocstatus,
&eventreply->IOCLogInfo));
} else {
"mptsas_handle_event_sync: IOCStatus=0x%x, "
"IOCLogInfo=0x%x", iocstatus,
&eventreply->IOCLogInfo));
}
}
/*
* figure out what kind of event we got and handle accordingly
*/
switch (event) {
{
NDBG20(("mptsas_handle_event_sync: SAS topology change"));
string[0] = 0;
if (expd_handle) {
switch (expstatus) {
/*
* New expander device added
*/
sizeof (mptsas_topo_change_list_t),
KM_SLEEP);
} else {
}
break;
"removed");
break;
sizeof (mptsas_topo_change_list_t),
KM_SLEEP);
} else {
}
break;
break;
"delaying removal");
break;
default:
break;
}
} else {
}
NDBG20(("SAS TOPOLOGY CHANGE for enclosure %x expander %x%s\n",
for (i = 0; i < num_entries; i++) {
phy = i + start_phy_num;
/*
* Filter out processing of Phy Vacant Status unless
* the reason code is "Not Responding". Process all
* other combinations of Phy Status and Reason Codes.
*/
if ((phystatus &
(reason_code !=
continue;
}
curr[0] = 0;
prev[0] = 0;
string[0] = 0;
switch (reason_code) {
{
NDBG20(("mptsas%d phy %d physical_port %d "
physport, dev_handle));
switch (state) {
break;
"failed speed negotiation");
break;
"complete");
break;
case SMP_RESET_IN_PROGRESS:
"progress");
break;
"1.5 Gbps");
break;
"Gbps");
break;
"Gbps");
break;
default:
"unknown");
break;
}
/*
* New target device added into the system.
* Set association flag according to if an
* expander is used or not.
*/
exp_flag =
if (flags ==
}
sizeof (mptsas_topo_change_list_t),
KM_SLEEP);
if (expd_handle == 0) {
/*
* Per MPI 2, if expander dev handle
* is 0, it's a directly attached
* device. So driver use PHY to decide
* which iport is associated
*/
}
} else {
}
break;
}
{
NDBG20(("mptsas%d phy %d physical_port %d "
/*
* Set association flag according to if an
* expander is used or not.
*/
exp_flag =
if (flags ==
}
/*
* Target device is removed from the system
* Before the device is really offline from
* from system.
*/
/*
* If ptgt is NULL here, it means that the
* DevHandle is not in the hash table. This is
* reasonable sometimes. For example, if a
* disk was pulled, then added, then pulled
* again, the disk will not have been put into
* the hash table because the add event will
* have an invalid phymask. BUT, this does not
* mean that the DevHandle is invalid. The
* controller will still have a valid DevHandle
* that must be removed. To do this, use the
* MPTSAS_TOPO_FLAG_REMOVE_HANDLE event.
*/
sizeof (mptsas_topo_change_list_t),
KM_SLEEP);
} else {
}
break;
}
/*
* Update DR flag immediately avoid I/O failure
* before failover finish. Pay attention to the
* mutex protect, we need grab m_tx_waitq_mutex
* during set m_dr_flag because we won't add
* the following command into waitq, instead,
* we need return TRAN_BUSY in the tran_start
* context.
*/
sizeof (mptsas_topo_change_list_t),
KM_SLEEP);
} else {
}
break;
}
switch (state) {
break;
"failed speed negotiation");
break;
"complete");
break;
case SMP_RESET_IN_PROGRESS:
"progress");
break;
"1.5 Gbps");
if ((expd_handle == 0) &&
(enc_handle == 1)) {
}
break;
"Gbps");
if ((expd_handle == 0) &&
(enc_handle == 1)) {
}
break;
"6.0 Gbps");
if ((expd_handle == 0) &&
(enc_handle == 1)) {
}
break;
default:
"unknown");
break;
}
switch (state) {
break;
"failed speed negotiation");
break;
"complete");
break;
case SMP_RESET_IN_PROGRESS:
"in progress");
break;
"1.5 Gbps");
break;
"3.0 Gbps");
break;
"6.0 Gbps");
break;
default:
break;
}
"changed, ");
break;
continue;
"target not responding, delaying "
"removal");
break;
}
NDBG20(("mptsas%d phy %d DevHandle %x, %s%s%s\n",
prev));
}
/*
* Launch DR taskq to handle topology change
*/
mptsas_handle_dr, (void *)topo_head,
DDI_NOSLEEP)) != DDI_SUCCESS) {
"for handle SAS DR event failed. \n");
}
}
break;
}
{
NDBG20(("mptsas%d IR_CONFIGURATION_CHANGE_LIST event received",
mpt->m_instance));
for (i = 0; i < num_entries; i++) {
switch (reason) {
{
NDBG20(("mptsas %d volume added\n",
mpt->m_instance));
sizeof (mptsas_topo_change_list_t),
KM_SLEEP);
} else {
}
break;
}
{
NDBG20(("mptsas %d volume deleted\n",
mpt->m_instance));
break;
/*
* Clear any flags related to volume
*/
/*
* Update DR flag immediately avoid I/O failure
*/
sizeof (mptsas_topo_change_list_t),
KM_SLEEP);
} else {
}
break;
}
{
break;
/*
* Update DR flag immediately avoid I/O failure
*/
sizeof (mptsas_topo_change_list_t),
KM_SLEEP);
} else {
}
break;
}
{
/*
* The physical drive is released by a IR
* volume. But we cannot get the the physport
* or phynum from the event data, so we only
* Device Page0 request for the devhdl.
*/
sizeof (mptsas_topo_change_list_t),
KM_SLEEP);
} else {
}
break;
}
default:
break;
}
}
/*
* Launch DR taskq to handle topology change
*/
mptsas_handle_dr, (void *)topo_head,
DDI_NOSLEEP)) != DDI_SUCCESS) {
"for handle SAS DR event failed. \n");
}
}
break;
}
default:
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
/*
* handle events from ioc
*/
static void
mptsas_handle_event(void *args)
{
&eventreply->IOCStatus)) {
"!mptsas_handle_event: IOCStatus=0x%x, "
"IOCLogInfo=0x%x", iocstatus,
&eventreply->IOCLogInfo));
} else {
"mptsas_handle_event: IOCStatus=0x%x, "
"IOCLogInfo=0x%x", iocstatus,
&eventreply->IOCLogInfo));
}
}
/*
* figure out what kind of event we got and handle accordingly
*/
switch (event) {
break;
case MPI2_EVENT_LOG_DATA:
&eventreply->IOCLogInfo);
iocloginfo));
break;
case MPI2_EVENT_STATE_CHANGE:
break;
break;
case MPI2_EVENT_SAS_DISCOVERY:
{
char string[80];
string[0] = 0;
switch (rc) {
break;
break;
default:
break;
}
break;
}
case MPI2_EVENT_EVENT_CHANGE:
break;
case MPI2_EVENT_TASK_SET_FULL:
{
NDBG20(("TASK_SET_FULL received for mptsas%d, depth %d\n",
&taskfull->CurrentDepth)));
break;
}
{
/*
* SAS TOPOLOGY CHANGE LIST Event has already been handled
* in mptsas_handle_event_sync() of interrupt context
*/
break;
}
{
char string[80];
&encstatus->ReasonCode);
switch (rc) {
break;
break;
default:
break;
}
NDBG20(("mptsas%d ENCLOSURE STATUS CHANGE for enclosure %x%s\n",
break;
}
/*
* MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE is handled by
* mptsas_handle_event_sync,in here just send ack message.
*/
{
wwn));
switch (rc) {
&statuschange->ASC),
&statuschange->ASCQ)));
break;
NDBG20(("Device not supported"));
break;
NDBG20(("IOC internally generated the Target Reset "
"for devhdl:%x", devhdl));
break;
NDBG20(("IOC's internally generated Target Reset "
"completed for devhdl:%x", devhdl));
break;
NDBG20(("IOC internally generated Abort Task"));
break;
NDBG20(("IOC's internally generated Abort Task "
"completed"));
break;
NDBG20(("IOC internally generated Abort Task Set"));
break;
NDBG20(("IOC internally generated Clear Task Set"));
break;
NDBG20(("IOC internally generated Query Task"));
break;
NDBG20(("Device sent an Asynchronous Notification"));
break;
default:
break;
}
break;
}
{
/*
* IR TOPOLOGY CHANGE LIST Event has already been handled
* in mpt_handle_event_sync() of interrupt context
*/
break;
}
{
char reason_str[80];
switch (rc) {
break;
"expansion");
break;
break;
default:
rc);
}
NDBG20(("mptsas%d raid operational status: (%s)"
"\thandle(0x%04x), percent complete(%d)\n",
break;
}
case MPI2_EVENT_IR_VOLUME:
{
&irVolume->VolDevHandle);
NDBG20(("EVENT_IR_VOLUME event is received"));
/*
* Get latest RAID info and then find the DevHandle for this
* event in the configuration. If the DevHandle is not found
* just exit the event.
*/
(void) mptsas_get_raid_info(mpt);
config++) {
m_raidhandle == devhandle) {
break;
}
}
}
if (!found) {
break;
}
switch (irVolume->ReasonCode) {
{
uint32_t i;
", auto-config of hot-swap drives is %s"
", write caching is %s"
", hot-spare pool mask is %02x\n",
? "disabled" : "enabled",
? "controlled by member disks" :
? "disabled" :
? "enabled" :
"incorrectly set",
break;
}
{
"Volume %d is now %s\n", vol,
? "optimal" :
? "degraded" :
? "online" :
? "initializing" :
? "failed" :
? "missing" :
"state unknown");
break;
}
{
" Volume %d is now %s%s%s%s%s%s%s%s%s\n",
vol,
? ", enabled" : ", disabled",
? ", quiesced" : "",
? ", inactive" : ", active",
state &
? ", bad block table is full" : "",
state &
? ", resync in progress" : "",
? ", background initialization in progress" : "",
state &
? ", capacity expansion in progress" : "",
state &
? ", consistency check in progress" : "",
? ", data scrub in progress" : "");
break;
}
default:
break;
}
break;
}
{
&irPhysDisk->Slot);
&irPhysDisk->NewValue);
&irPhysDisk->ReasonCode);
NDBG20(("EVENT_IR_PHYSICAL_DISK event is received"));
switch (reason) {
" PhysDiskNum %d with DevHandle 0x%x in slot %d "
"for enclosure with handle 0x%x is now in hot "
"spare pool %d",
break;
" PhysDiskNum %d with DevHandle 0x%x in slot %d "
"for enclosure with handle 0x%x is now "
? ", inactive" : ", active",
? ", out of sync" : "",
? ", quiesced" : "",
status &
? ", write cache enabled" : "",
? ", capacity expansion target" : "");
break;
" PhysDiskNum %d with DevHandle 0x%x in slot %d "
"for enclosure with handle 0x%x is now %s\n",
? "optimal" :
? "rebuilding" :
? "degraded" :
? "a hot spare" :
? "online" :
? "offline" :
? "not compatible" :
? "not configured" :
"state unknown");
break;
}
break;
}
default:
NDBG20(("mptsas%d: unknown event %x received",
break;
}
/*
* Return the reply frame to the free queue.
*/
mpt->m_free_index = 0;
}
mpt->m_free_index);
}
/*
* invoked from timeout() to restart qfull cmds with throttle == 0
*/
static void
mptsas_restart_cmd(void *arg)
{
mpt->m_restart_cmd_timeid = 0;
if (ptgt->m_reset_delay == 0) {
}
}
}
}
void
{
int slot;
int t;
/*
* Task Management cmds are removed in their own routines. Also,
* we don't want to modify timeout based on TM cmds.
*/
return;
}
/*
* remove the cmd.
*/
/*
* only decrement per target ncmds if command
* has a target associated with it.
*/
/*
* reset throttle if we just ran an untagged command
* to a tagged target
*/
}
}
}
/*
* This is all we need to do for ioc commands.
*/
return;
}
/*
* Figure out what to set tag Q timeout for...
*
* Optimize: If we have duplicate's of same timeout
* we're using, then we'll use it again until we run
* out of duplicates. This should be the normal case
* for block and raw I/O.
* If no duplicates, we have to scan through tag que and
* find the longest timeout value and use it. This is
* going to take a while...
* Add 1 to m_n_slots to account for TM request.
*/
uint_t n = 0;
ushort_t i;
/*
* This crude check assumes we don't do
* this too often which seems reasonable
* for block and raw I/O.
*/
for (i = 0; i < nslots; i++) {
}
}
ptgt->m_timebase = n;
} else {
ptgt->m_timebase = 0;
}
}
}
}
/*
* accept all cmds on the tx_waitq if any and then
* start a fresh request from the top of the device queue.
*
* since there are always cmds queued on the tx_waitq, and rare cmds on
* the instance waitq, so this function should not be invoked in the ISR,
* the mptsas_restart_waitq() is invoked in the ISR instead. otherwise, the
* burden belongs to the IO dispatch CPUs is moved the interrupt CPU.
*/
static void
{
if (mpt->m_tx_waitq) {
}
}
/*
* start a fresh request from the top of the device queue
*/
static void
{
/*
* If there is a reset delay, don't start any cmds. Otherwise, start
* as many cmds as possible.
* Since SMID 0 is reserved and the TM slot is reserved, the actual max
* commands is m_max_requests - 2.
*/
/*
* passthru command get slot need
* set CFLAG_PREPARED.
*/
}
continue;
}
/*
* Send the config page request and delete it
* from the waitq.
*/
}
continue;
}
}
ptgt->m_t_throttle))) {
}
}
}
}
/*
* Cmds are queued if tran_start() doesn't get the m_mutexlock(no wait).
* Accept all those queued cmds before new cmd is accept so that the
* cmds are sent in order.
*/
static void
{
/*
* A Bus Reset could occur at any time and flush the tx_waitq,
* so we cannot count on the tx_waitq to contain even one cmd.
* And when the m_tx_waitq_mutex is released and run
* mptsas_accept_pkt(), the tx_waitq may be flushed.
*/
for (;;) {
mpt->m_tx_draining = 0;
break;
}
}
"to accept cmd on queue\n");
}
}
/*
* mpt tag type lookup
*/
static char mptsas_tag_lookup[] =
static int
{
int n;
/*
* Set SMID and increment index. Rollover to 1 instead of 0 if index
* is at the max. 0 is an invalid SMID, so we call the first index 1.
*/
/*
* It is possible for back to back device reset to
* happen before the reset delay has expired. That's
* ok, just let the device reset go out on the bus.
*/
}
/*
* if a non-tagged cmd is submitted to an active tagged target
* then drain before submitting this cmd; SCSI-2 allows RQSENSE
* to be untagged
*/
NDBG23(("target=%d, untagged cmd, start draining\n",
if (ptgt->m_reset_delay == 0) {
}
}
return (DDI_FAILURE);
}
/*
* Set correct tag bits.
*/
FLAG_TAGMASK) >> 12)]) {
case MSG_SIMPLE_QTAG:
break;
case MSG_HEAD_QTAG:
break;
case MSG_ORDERED_QTAG:
break;
default:
break;
}
} else {
}
}
}
/*
*/
if (cmd->cmd_cookiec > 0) {
} else {
}
/*
* save ARQ information
*/
} else {
}
NDBG31(("starting message=0x%p, with cmd=0x%p",
/*
* Build request descriptor and write it to the request desc post reg.
*/
/*
* Start timeout.
*/
#ifdef MPTSAS_TEST
/*
* Temporarily set timebase = 0; needed for
* timeout torture test.
*/
if (mptsas_test_timeouts) {
ptgt->m_timebase = 0;
}
#endif
if (n == 0) {
} else if (n > 0) {
} else if (n < 0) {
}
#ifdef MPTSAS_TEST
/*
* Set back to a number higher than
* mptsas_scsi_watchdog_tick
* so timeouts will happen in mptsas_watchsubr
*/
if (mptsas_test_timeouts) {
}
#endif
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
/*
* Select a helper thread to handle current doneq
*/
static void
{
uint64_t t, i;
for (i = 0; i < mpt->m_doneq_thread_n; i++) {
/*
* If the completed command on help thread[i] less than
* doneq_thread_threshold, then pick the thread[i]. Otherwise
* pick a thread which has least completed command.
*/
t = i;
break;
}
t = i;
}
}
mptsas_doneq_mv(mpt, t);
}
/*
* move the current global doneq to the doneq of thead[t]
*/
static void
{
}
mpt->m_doneq_len--;
}
}
void
{
/* Check all acc and dma handles */
DDI_SUCCESS) ||
DDI_SUCCESS) ||
DDI_SUCCESS) ||
DDI_SUCCESS) ||
DDI_SUCCESS) ||
DDI_SUCCESS) ||
DDI_SUCCESS)) {
pkt->pkt_statistics = 0;
}
DDI_SUCCESS) ||
DDI_SUCCESS) ||
DDI_SUCCESS) ||
DDI_SUCCESS) ||
DDI_SUCCESS)) {
pkt->pkt_statistics = 0;
}
if (cmd->cmd_dmahandle &&
pkt->pkt_statistics = 0;
}
if ((cmd->cmd_extra_frames &&
DDI_SUCCESS) ||
DDI_SUCCESS)))) {
pkt->pkt_statistics = 0;
}
if (cmd->cmd_arqhandle &&
pkt->pkt_statistics = 0;
}
if (cmd->cmd_ext_arqhandle &&
pkt->pkt_statistics = 0;
}
}
/*
* These routines manipulate the queue of commands that
* are waiting for their completion routines to be called.
* The queue is usually in FIFO order but on an MP system
* it's possible for the completion routines to get out
* of order. If that's a problem you need to add a global
* mutex around the code that calls the completion routine
* in the interrupt handler.
*/
static void
{
/*
* only add scsi pkts that have completion routines to
* the doneq. no intr cmds do not have callbacks.
*/
mpt->m_doneq_len++;
}
}
static mptsas_cmd_t *
{
/* pop one off the done queue */
/* if the queue is now empty fix the tail pointer */
}
}
return (cmd);
}
static void
{
mpt->m_doneq_len = 0;
/*
* run the completion routines of all the
* completed commands
*/
/* run this command's completion routine */
}
mpt->m_in_callback = 0;
}
}
/*
* These routines manipulate the target's queue of pending requests
*/
void
{
if (ptgt)
}
} else {
}
}
static mptsas_cmd_t *
{
NDBG7(("mptsas_waitq_rm"));
if (cmd) {
if (ptgt) {
}
}
return (cmd);
}
/*
* remove specified cmd from the middle of the wait queue.
*/
static void
{
NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
if (ptgt) {
}
NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
return;
}
NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
return;
}
}
}
static mptsas_cmd_t *
{
NDBG7(("mptsas_tx_waitq_rm"));
return (cmd);
}
/*
* remove specified cmd from the middle of the tx_waitq.
*/
static void
{
NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
return;
}
NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
return;
}
}
}
/*
* device and bus reset handling
*
* Notes:
* - RESET_ALL: reset the controller
* - RESET_TARGET: reset the target specified in scsi_address
*/
static int
{
int rval;
return (FALSE);
}
level));
/*
* if we are not in panic set up a reset delay for this target
*/
if (!ddi_in_panic()) {
} else {
}
/*
* The transport layer expect to only see TRUE and
* FALSE. Therefore, we will adjust the return value
* if mptsas_do_scsi_reset returns FAILED.
*/
return (rval);
}
static int
{
if (mptsas_debug_resets) {
devhdl);
}
/*
* Issue a Target Reset message to the target specified but not to a
* disk making up a raid volume. Just look through the RAID config
* Phys Disk list of DevHandles. If the target's DevHandle is in this
* list, then don't reset this target.
*/
return (TRUE);
}
}
}
return (rval);
}
static int
{
}
static int
{
return (1);
} else {
return (0);
}
}
static int
{
}
void
{
/*
* are allowed. Not allowing change of throttles during draining
* limits error recovery but will reduce draining time
*
* all throttles should have been set to HOLD_THROTTLE
*/
return;
}
if (what == HOLD_THROTTLE) {
} else if (ptgt->m_reset_delay == 0) {
}
}
/*
* Clean up from a device reset.
* For the case of target reset, this function clears the waitq of all
* commands for a particular target. For the case of abort task set, this
*/
static void
{
int slot;
/*
* Make sure the I/O Controller has flushed all cmds
* that are associated with this target for a target reset
* Account for TM requests, which use the last SMID.
*/
continue;
switch (tasktype) {
"discovered non-NULL cmd in slot %d, "
}
break;
stat = STAT_ABORTED;
/*FALLTHROUGH*/
"discovered non-NULL cmd in slot %d, "
stat);
}
break;
default:
break;
}
}
/*
* Flush the waitq and tx_waitq of this target's cmds
*/
switch (tasktype) {
}
}
}
}
break;
stat = STAT_ABORTED;
/*FALLTHROUGH*/
}
}
}
}
break;
default:
tasktype);
break;
}
}
/*
* Clean up hba state, abort all outstanding command and commands in waitq
* reset timeout of all targets.
*/
static void
{
int slot;
NDBG25(("mptsas_flush_hba"));
/*
* The I/O Controller should have already sent back
* all commands via the scsi I/O reply frame. Make
* sure all commands have been flushed.
* Account for TM request, which use the last SMID.
*/
continue;
continue;
"cmd in slot %d", slot);
}
/*
* Flush the waitq.
*/
} else {
}
}
/*
* Flush the tx_waitq
*/
}
}
/*
* set pkt_reason and OR in pkt_statistics flag
*/
static void
{
#ifndef __lock_lint
#endif
NDBG25(("mptsas_set_pkt_reason: cmd=0x%p reason=%x stat=%x",
if (cmd) {
}
}
}
static void
{
NDBG22(("mptsas_start_watch_reset_delay"));
MPTSAS_WATCH_RESET_DELAY_TICK * 1000));
}
}
static void
{
NDBG22(("mptsas_setup_bus_reset_delay"));
}
}
/*
* mptsas_watch_reset_delay(_subr) is invoked by timeout() and checks every
* mpt instance for active reset delays
*/
static void
mptsas_watch_reset_delay(void *arg)
{
#ifndef __lock_lint
#endif
int not_done = 0;
NDBG22(("mptsas_watch_reset_delay"));
mptsas_reset_watch = 0;
continue;
}
}
if (not_done) {
}
}
static int
{
int done = 0;
int restart = 0;
if (ptgt->m_reset_delay != 0) {
ptgt->m_reset_delay -=
if (ptgt->m_reset_delay <= 0) {
ptgt->m_reset_delay = 0;
restart++;
} else {
done = -1;
}
}
}
if (restart > 0) {
}
return (done);
}
#ifdef MPTSAS_TEST
static void
{
if (mptsas_rtest == target) {
mptsas_rtest = -1;
}
if (mptsas_rtest == -1) {
NDBG22(("mptsas_test_reset success"));
}
}
}
#endif
/*
* abort handling:
*
* Notes:
* - if pkt is not NULL, abort just that command
* - if pkt is NULL, abort all outstanding commands for target
*/
static int
{
int rval;
return (rval);
}
static int
{
/*
* If you can abort them, return 1, else return 0.
* Each packet that's aborted should be sent back to the target
* driver through the callback routine, with pkt_reason set to
* CMD_ABORTED.
*
* abort cmd pkt on HBA hardware; clean out of outstanding
* command lists, etc.
*/
/* abort the specified packet */
if (sp->cmd_queued) {
NDBG23(("mptsas_do_scsi_abort: queued sp=0x%p aborted",
(void *)sp));
goto done;
}
/*
* Have mpt firmware abort this command
*/
lun);
/*
* The transport layer expects only TRUE and FALSE.
* Therefore, if mptsas_ioc_task_management returns
* FAILED we will return FALSE.
*/
goto done;
}
}
/*
* If pkt is NULL then abort task set
*/
/*
* The transport layer expects only TRUE and FALSE.
* Therefore, if mptsas_ioc_task_management returns
* FAILED we will return FALSE.
*/
#ifdef MPTSAS_TEST
if (rval && mptsas_test_stop) {
debug_enter("mptsas_do_scsi_abort");
}
#endif
done:
return (rval);
}
/*
* capability handling:
* (*tran_getcap). Get the capability named, and return its value.
*/
static int
{
int ckey;
NDBG24(("mptsas_scsi_getcap: target=%d, cap=%s tgtonly=%x",
return (UNDEFINED);
}
switch (ckey) {
case SCSI_CAP_DMA_MAX:
break;
case SCSI_CAP_ARQ:
break;
case SCSI_CAP_MSG_OUT:
case SCSI_CAP_PARITY:
case SCSI_CAP_UNTAGGED_QING:
break;
case SCSI_CAP_TAGGED_QING:
break;
break;
case SCSI_CAP_LINKED_CMDS:
break;
case SCSI_CAP_QFULL_RETRIES:
break;
break;
case SCSI_CAP_CDB_LEN:
rval = CDB_GROUP4;
break;
break;
if (mpt->m_ioc_capabilities &
else
break;
default:
break;
}
return (rval);
}
/*
* (*tran_setcap). Set the capability named to the value given.
*/
static int
{
int ckey;
NDBG24(("mptsas_scsi_setcap: target=%d, cap=%s value=%x tgtonly=%x",
if (!tgtonly) {
return (rval);
}
return (UNDEFINED);
}
switch (ckey) {
case SCSI_CAP_DMA_MAX:
case SCSI_CAP_MSG_OUT:
case SCSI_CAP_PARITY:
case SCSI_CAP_INITIATOR_ID:
case SCSI_CAP_LINKED_CMDS:
case SCSI_CAP_UNTAGGED_QING:
/*
* None of these are settable via
* the capability interface.
*/
break;
case SCSI_CAP_ARQ:
/*
* We cannot turn off arq so return false if asked to
*/
if (value) {
} else {
}
break;
case SCSI_CAP_TAGGED_QING:
break;
case SCSI_CAP_QFULL_RETRIES:
break;
break;
default:
break;
}
return (rval);
}
/*
* Utility routine for mptsas_ifsetcap/ifgetcap
*/
/*ARGSUSED*/
static int
{
if (!cap)
return (FALSE);
return (TRUE);
}
static int
{
int rval = -1;
NDBG9(("cannot change size of active slots array"));
return (rval);
}
if (new_active == NULL) {
NDBG1(("new active alloc failed"));
} else {
/*
* Since SMID 0 is reserved and the TM slot is reserved, the
* number of slots that can be used at any one time is
* m_max_requests - 2.
*/
if (old_active) {
}
rval = 0;
}
return (rval);
}
/*
* Error logging, printing, and debug print routines.
*/
static char *mptsas_label = "mpt_sas";
/*PRINTFLIKE3*/
void
{
if (mpt) {
} else {
dev = 0;
}
} else {
}
}
#ifdef MPTSAS_DEBUG
/*PRINTFLIKE1*/
void
mptsas_printf(char *fmt, ...)
{
dev_info_t *dev = 0;
#ifdef PROM_PRINTF
#else
#endif
}
#endif
/*
* timeout handling
*/
static void
mptsas_watch(void *arg)
{
#ifndef __lock_lint
#endif
NDBG30(("mptsas_watch"));
/* Skip device if not powered on */
} else {
continue;
}
}
/*
* For now, always call mptsas_watchsubr.
*/
}
}
}
static void
{
int i;
#ifdef MPTSAS_TEST
if (mptsas_enable_untagged) {
}
#endif
/*
* Check for commands stuck in active slot
* Account for TM requests, which use the last SMID.
*/
if (cmd->cmd_active_timeout <= 0) {
/*
* There seems to be a command stuck
* in the active slot. Drain throttle.
*/
}
}
if (cmd->cmd_active_timeout <= 0) {
/*
* passthrough command timeout
*/
}
}
}
}
/*
* If we were draining due to a qfull condition,
* go back to full throttle.
*/
}
(ptgt->m_timebase)) {
if (ptgt->m_timebase <=
ptgt->m_timebase +=
continue;
}
continue;
}
NDBG23(("pending timeout"));
}
}
}
}
/*
* timeout recovery
*/
static void
{
"Target %d", devhdl);
/*
* If the current target is not the target passed in,
* try to reset that target.
*/
NDBG29(("mptsas_cmd_timeout: device reset"));
"recovery failed!", devhdl);
}
}
/*
* Device / Hotplug control
*/
static int
{
return (-1);
return (mptsas_quiesce_bus(mpt));
}
static int
{
return (-1);
return (mptsas_unquiesce_bus(mpt));
}
static int
{
NDBG28(("mptsas_quiesce_bus"));
/* Set all the throttles to zero */
}
/* If there are any outstanding commands in the queue */
/*
* Quiesce has been interrupted
*/
}
if (mpt->m_quiesce_timeid != 0) {
mpt->m_quiesce_timeid = 0;
return (-1);
}
return (-1);
} else {
/* Bus has been quiesced */
return (0);
}
}
/* Bus was not busy - QUIESCED */
return (0);
}
static int
{
NDBG28(("mptsas_unquiesce_bus"));
}
return (0);
}
static void
mptsas_ncmds_checkdrain(void *arg)
{
mpt->m_quiesce_timeid = 0;
/* Command queue has been drained */
} else {
/*
* The throttle may have been reset because
* of a SCSI bus reset
*/
}
drv_usectohz(1000000)));
}
}
}
static void
{
int i;
char buf[128];
buf[0] = '\0';
for (i = 0; i < (int)cmd->cmd_cdblen; i++) {
}
"?pkt_flags=0x%x pkt_statistics=0x%x pkt_state=0x%x\n",
}
static void
{
uint32_t i, sense_bufp;
/*
* Store the passthrough message in memory location
* corresponding to our slot number
*/
for (i = 0; i < request_size; i++) {
}
if (data_size || dataout_size) {
if (dataout_size) {
0xffffffffull));
>> 32));
sgep++;
}
if (direction == MPTSAS_PASS_THRU_DIRECTION_WRITE) {
} else {
}
0xffffffffull));
}
if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
/*
* Put SGE for data and data_out buffer at the end of
* scsi_io_request message header.(64 bytes in total)
* Following above SGEs, the residual space will be
* used by sense data.
*/
sense_bufp += 64;
/*
* Set SGLOffset0 value
*/
/*
* Setup descriptor info
*/
}
/*
* We must wait till the message has been completed before
* beginning the next message so we wait for this one to
* finish.
*/
}
}
static int
{
int rvalue;
/*
* copy in the request buffer since it could be used by
* another thread when the pt request into waitq
*/
goto out;
}
if (function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
}
goto out;
}
if (data_size != 0) {
DDI_SUCCESS) {
"resource");
goto out;
}
if (direction == MPTSAS_PASS_THRU_DIRECTION_WRITE) {
for (i = 0; i < data_size; i++) {
"copy read data");
goto out;
}
}
}
}
if (dataout_size != 0) {
DDI_SUCCESS) {
"resource");
goto out;
}
for (i = 0; i < dataout_size; i++) {
" data");
goto out;
}
}
}
goto out;
}
/*
*/
/*
* Save the command in a slot
*/
/*
* Once passthru command get slot, set cmd_flags
* CFLAG_PREPARED.
*/
} else {
}
}
}
goto out;
}
/*
* cmd_rfm is zero means the command reply is a CONTEXT
* reply and no PCI Write to post the free reply SMFA
* because no reply message frame is used.
* cmd_rfm is non-zero means the reply is a ADDRESS
* reply and reply message frame is used.
*/
}
goto out;
}
goto out;
}
goto out;
}
if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
reply_len = sizeof (MPI2_SCSI_IO_REPLY);
} else {
sense_len = 0;
}
for (i = 0; i < reply_len; i++) {
mode)) {
"reply data");
goto out;
}
}
for (i = 0; i < sense_len; i++) {
"sense data");
goto out;
}
}
}
if (data_size) {
if (direction != MPTSAS_PASS_THRU_DIRECTION_WRITE) {
for (i = 0; i < data_size; i++) {
if (ddi_copyout((uint8_t *)(
mode)) {
"copy out the reply data");
goto out;
}
}
}
}
out:
/*
* Put the reply frame back on the free queue, increment the free
* index, and write the new index to the free index register. But only
* if this reply is an ADDRESS reply.
*/
if (pt_flags & MPTSAS_ADDRESS_REPLY) {
mpt->m_free_index = 0;
}
mpt->m_free_index);
}
pt_flags &= (~MPTSAS_REQUEST_POOL_CMD);
}
if (pt_flags & MPTSAS_REQUEST_POOL_CMD)
if (pt_flags & MPTSAS_DATA_ALLOCATED) {
DDI_SUCCESS) {
}
}
if (pt_flags & MPTSAS_DATAOUT_ALLOCATED) {
DDI_SUCCESS) {
}
}
if (pt_flags & MPTSAS_CMD_TIMEOUT) {
}
}
if (request_msg)
return (status);
}
static int
{
/*
* If timeout is 0, set timeout to default of 60 seconds.
*/
}
(data->DataOutSize != 0))))) {
} else {
data->DataOutSize = 0;
}
/*
* Send passthru request messages
*/
return (mptsas_do_passthru(mpt,
} else {
return (EINVAL);
}
}
/*
* This routine handles the "event query" ioctl.
*/
static int
int *rval)
{
int status;
uint8_t i;
for (i = 0; i < 4; i++) {
}
} else {
status = 0;
}
return (status);
}
/*
* This routine handles the "event enable" ioctl.
*/
static int
int *rval)
{
int status;
uint8_t i;
for (i = 0; i < 4; i++) {
}
status = 0;
} else {
}
return (status);
}
/*
* This routine handles the "event report" ioctl.
*/
static int
int *rval)
{
int status;
mode) == 0) {
} else {
sizeof (driverdata.Size),
mode) != 0) {
} else {
status = 0;
}
} else {
status = 0;
}
}
} else {
status = 0;
}
} else {
}
return (status);
}
static void
{
int *reg_data;
/*
* Lookup the 'reg' property and extract the other data
*/
/*
* Extract the PCI data from the 'reg' property first DWORD.
* The entry looks like the following:
* First DWORD:
* Bits 0 - 7 8-bit Register number
* Bits 8 - 10 3-bit Function number
* Bits 11 - 15 5-bit Device number
* Bits 16 - 23 8-bit Bus number
* Bits 24 - 25 2-bit Address Space type identifier
*
* Store the device number in PCIDeviceHwId.
* Store the function number in MpiPortNumber.
* PciInformation stores bus, device, and function together
*/
ddi_prop_free((void *)reg_data);
} else {
/*
* If we can't determine the PCI data then we fill in FF's for
* the data to indicate this.
*/
}
/*
* Saved in the mpt->m_fwversion
*/
}
static void
{
char *driver_verstr = MPTSAS_MOD_STRING;
adapter_data->BiosVersion = 0;
}
static void
{
int *reg_data, i;
/*
* Lookup the 'reg' property and extract the other data
*/
/*
* Extract the PCI data from the 'reg' property first DWORD.
* The entry looks like the following:
* First DWORD:
* Bits 8 - 10 3-bit Function number
* Bits 11 - 15 5-bit Device number
* Bits 16 - 23 8-bit Bus number
*/
ddi_prop_free((void *)reg_data);
} else {
/*
* If we can't determine the PCI info then we fill in FF's for
* the data to indicate this.
*/
}
/*
* Now get the interrupt vector and the pci header. The vector can
* only be 0 right now. The header is the first 256 bytes of config
* space.
*/
pci_info->InterruptVector = 0;
i);
}
}
static int
int *rval)
{
int status = 0;
int copylen;
}
return (EPERM);
}
/* Make sure power level is D0 before accessing registers */
DDI_SUCCESS) {
"mptsas%d: mptsas_ioctl: Raise power "
return (ENXIO);
}
} else {
}
} else {
}
switch (cmd) {
case MPTIOCTL_UPDATE_FLASH:
sizeof (struct mptsas_update_flash), mode)) {
break;
}
if (mptsas_update_flash(mpt,
}
/*
* Reset the chip to start using the new
* firmware. Reset if failed also.
*/
}
break;
case MPTIOCTL_PASS_THRU:
/*
* The user has requested to pass through a command to
* be executed by the MPT firmware. Call our routine
* which does this. Only allow one passthru IOCTL at
* one time.
*/
sizeof (mptsas_pass_thru_t), mode)) {
break;
}
if (mpt->m_passthru_in_progress) {
return (EBUSY);
}
mpt->m_passthru_in_progress = 0;
break;
/*
* The user has requested to read adapter data. Call
* our routine which does this.
*/
sizeof (mptsas_adapter_data_t), mode)) {
break;
}
if (adapter_data.StructureLength >=
sizeof (mptsas_adapter_data_t)) {
sizeof (mptsas_adapter_data_t);
copylen = sizeof (mptsas_adapter_data_t);
} else {
sizeof (mptsas_adapter_data_t);
}
}
break;
case MPTIOCTL_GET_PCI_INFO:
/*
* The user has requested to read pci info. Call
* our routine which does this.
*/
sizeof (mptsas_pci_info_t), mode) != 0) {
}
break;
case MPTIOCTL_RESET_ADAPTER:
"failed");
}
break;
case MPTIOCTL_EVENT_QUERY:
/*
* The user has done an event query. Call our routine
* which does this.
*/
break;
case MPTIOCTL_EVENT_ENABLE:
/*
* The user has done an event enable. Call our routine
* which does this.
*/
break;
case MPTIOCTL_EVENT_REPORT:
/*
* The user has done an event report. Call our routine
* which does this.
*/
break;
default:
rval);
break;
}
/*
* Report idle status to pm after grace period because
* multiple ioctls may be queued and raising power
* for every ioctl is time consuming. If a timeout is
* pending for the previous ioctl, cancel the timeout and
* report idle status to pm because calls to pm_busy_component(9F)
* are stacked.
*/
if (mpt->m_pm_timeid != 0) {
mpt->m_pm_timeid = 0;
/*
* Report idle status for previous ioctl since
* calls to pm_busy_component(9F) are stacked.
*/
}
}
return (status);
}
int
int rval = DDI_SUCCESS;
/*
* Set all throttles to HOLD
*/
}
/*
* Disable interrupts
*/
/*
* Abort all commands: outstanding commands, commands in waitq and
* tx_waitq.
*/
/*
* Reinitialize the chip.
*/
rval = DDI_FAILURE;
}
/*
* Enable interrupts again
*/
/*
* If mptsas_init_chip was successful, update the driver data.
*/
if (rval == DDI_SUCCESS) {
}
/*
* Reset the throttles
*/
}
if (rval != DDI_SUCCESS) {
}
return (rval);
}
int
{
uint32_t i;
if (first_time == FALSE) {
/*
* De-allocate buffers before re-allocating them using the
* latest IOC facts.
*/
/*
* Setup configuration space
*/
"failed!");
goto fail;
}
}
/*
* Check to see if the firmware image is valid
*/
goto fail;
}
/*
* Reset the chip
*/
goto fail;
}
/*
* IOC facts can change after a diag reset so all buffers that are
* based on these numbers must be de-allocated and re-allocated. Get
* new IOC facts each time chip is initialized.
*/
"failed");
goto fail;
}
/*
* Re-allocate active slots here if not the first reset. Since
* m_active could have a different number of slots allocated after a
* reset, just de-allocate the old m_active structure and re-allocate a
* new one. Save the tables and IR info from the old m_active.
*/
if (first_time == FALSE) {
if (new_active == NULL) {
"failed!");
goto fail;
} else {
for (i = 0; i < new_active->m_num_raid_configs; i++) {
new_active->m_raidconfig[i] =
}
}
}
/*
* Allocate request message frames
*/
"failed");
goto fail;
}
/*
* Allocate reply free queue
*/
"failed!");
goto fail;
}
/*
* Allocate reply descriptor post queue
*/
"failed!");
goto fail;
}
/*
* Allocate reply message frames
*/
"failed!");
goto fail;
}
/*
* Re-Initialize ioc to operational state
*/
goto fail;
}
/*
* Initialize reply post index and request index. Reply free index is
* initialized after the next loop.
*/
mpt->m_post_index = 0;
/*
* Initialize the Reply Free Queue with the physical addresses of our
* reply frames.
*/
}
/*
* Initialize the reply free index to one past the last frame on the
* queue. This will signify that the queue is empty to start with.
*/
mpt->m_free_index = i;
/*
* Initialize the reply post queue to 0xFFFFFFFF,0xFFFFFFFF's.
*/
for (i = 0; i < mpt->m_post_queue_depth; i++) {
0xFFFFFFFFFFFFFFFF);
}
/*
* Enable ports
*/
goto fail;
}
/*
* First, make sure the HBA is set in "initiator" mode. Once that
* is complete, get the base WWID.
*/
if (first_time == TRUE) {
if (mptsas_set_initiator_mode(mpt)) {
"failed!");
goto fail;
}
"mptsas_get_manufacture_page5 failed!");
goto fail;
}
}
/*
* enable events
*/
if (first_time == FALSE) {
goto fail;
}
}
/*
* We need checks in attach and these.
* chip_init is called in mult. places
*/
DDI_SUCCESS) ||
DDI_SUCCESS) ||
DDI_SUCCESS) ||
DDI_SUCCESS) ||
DDI_SUCCESS)) {
goto fail;
}
/* Check all acc handles */
DDI_SUCCESS) ||
DDI_SUCCESS) ||
DDI_SUCCESS) ||
DDI_SUCCESS) ||
DDI_SUCCESS) ||
DDI_SUCCESS)) {
goto fail;
}
return (DDI_SUCCESS);
fail:
return (DDI_FAILURE);
}
static int
{
char pmc_name[16];
char *pmc[] = {
NULL,
"0=Off (PCI D3 State)",
"3=On (PCI D0 State)",
};
/*
* If power management is supported by this chip, create
* pm-components property for the power management framework
*/
"mptsas%d: pm-component property creation failed.",
mpt->m_instance);
return (DDI_FAILURE);
}
/*
* Power on device.
*/
mpt->m_instance);
}
return (DDI_FAILURE);
}
/*
* Set pm idle delay.
*/
return (DDI_SUCCESS);
}
/*
* mptsas_add_intrs:
*
* Register FIXED or MSI interrupts.
*/
static int
{
/* Get number of interrupts */
return (DDI_FAILURE);
}
/* Get number of available interrupts */
return (DDI_FAILURE);
}
}
/* Mpt only have one interrupt routine */
count = 1;
}
/* Allocate an array of interrupt handles */
/* call ddi_intr_alloc() */
ret);
return (DDI_FAILURE);
}
/* use interrupt count returned or abort? */
}
/*
* Get priority for first msi, assume remaining are all the same
*/
/* Free already allocated intr */
for (i = 0; i < actual; i++) {
}
return (DDI_FAILURE);
}
/* Test for high level mutex */
"Hi level interrupt not supported\n");
/* Free already allocated intr */
for (i = 0; i < actual; i++) {
}
return (DDI_FAILURE);
}
/* Call ddi_intr_add_handler() */
for (i = 0; i < actual; i++) {
"failed %d\n", ret);
/* Free already allocated intr */
for (i = 0; i < actual; i++) {
}
return (DDI_FAILURE);
}
}
!= DDI_SUCCESS) {
/* Free already allocated intr */
for (i = 0; i < actual; i++) {
}
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
/*
* mptsas_rem_intrs:
*
* Unregister FIXED or MSI interrupts
*/
static void
{
int i;
NDBG6(("mptsas_rem_intrs"));
/* Disable all interrupts */
/* Call ddi_intr_block_disable() */
} else {
for (i = 0; i < mpt->m_intr_cnt; i++) {
}
}
/* Call ddi_intr_remove_handler() */
for (i = 0; i < mpt->m_intr_cnt; i++) {
}
}
/*
* The IO fault service error handling callback function
*/
/*ARGSUSED*/
static int
{
/*
* as the driver can always deal with an error in any dma or
* access handle, we can just return the fme_status value.
*/
return (err->fme_status);
}
/*
* mptsas_fm_init - initialize fma capabilities and register with IO
* fault services.
*/
static void
{
/*
* Need to change iblock to priority for new MSI intr
*/
/* Only register with IO Fault Services if we have some capability */
if (mpt->m_fm_capabilities) {
/* Adjust access and dma attributes for FMA */
/*
* Register capabilities with IO Fault Services.
* mpt->m_fm_capabilities will be updated to indicate
* capabilities actually supported (not requested.)
*/
/*
* Initialize pci ereport capabilities if ereport
* capable (should always be.)
*/
}
/*
* Register error callback if error callback capable.
*/
mptsas_fm_error_cb, (void *) mpt);
}
}
}
/*
* mptsas_fm_fini - Releases fma capabilities and un-registers with IO
* fault services.
*
*/
static void
{
/* Only unregister FMA capabilities if registered */
if (mpt->m_fm_capabilities) {
/*
* Un-register error callback if error callback capable.
*/
}
/*
* Release any resources allocated by pci_ereport_setup()
*/
}
/* Unregister from IO Fault Services */
/* Adjust access and dma attributes for FMA */
}
}
int
{
return (DDI_FAILURE);
return (de.fme_status);
}
int
{
return (DDI_FAILURE);
return (de.fme_status);
}
void
{
char buf[FM_MAX_CLASS];
}
}
static int
{
int rval;
if (rval != DDI_SUCCESS) {
return (rval);
}
if ((dev_info & (MPI2_SAS_DEVICE_INFO_SSP_TARGET |
return (rval);
}
/*
* Get SATA Device Name from SAS device page0 for
* sata device, if device name doesn't exist, set m_sas_wwn to
* 0 for direct attached SATA. For the device behind the expander
* we still can use STP address assigned by expander.
*/
if (dev_info & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
/* alloc a tmp_tgt to send the cmd */
KM_SLEEP);
} else if (dev_info & MPI2_SAS_DEVICE_INFO_DIRECT_ATTACH) {
sas_wwn = 0;
}
}
/*
* Check if the dev handle is for a Phys Disk. If so, set return value
* and exit. Don't add Phys Disks to hash.
*/
return (rval);
}
}
}
"structure!");
return (rval);
}
return (DEV_INFO_SUCCESS);
}
{
int inq83_len = 0xFF;
int inq83_retry = 3;
int rval = DDI_FAILURE;
if (rval != DDI_SUCCESS) {
goto out;
}
/* According to SAT2, the first descriptor is logic unit name */
goto out;
}
goto out;
NDBG20(("SATA drive has no NAA format GUID."));
goto out;
} else {
/* The data is not ready, wait and retry */
inq83_retry--;
if (inq83_retry <= 0) {
goto out;
}
NDBG20(("The GUID is not ready, retry..."));
goto inq83_retry;
}
out:
return (sata_guid);
}
static int
{
struct scsi_address ap;
int resid = 0;
int ret = DDI_FAILURE;
return (ret);
}
cdb[0] = SCMD_INQUIRY;
cdb[5] = 0;
&resid);
if (ret == DDI_SUCCESS) {
if (reallen) {
}
}
if (data_bp) {
}
return (ret);
}
static int
int *resid)
{
int ret = DDI_FAILURE;
/*
* scsi_hba_tran_t->tran_tgt_private is used to pass the address
* information to scsi_init_pkt, allocate a scsi_hba_tran structure
* to simulate the cmds from sd
*/
sizeof (scsi_hba_tran_t), KM_SLEEP);
if (tran_clone == NULL) {
goto out;
}
sizeof (mptsas_tgt_private_t), KM_SLEEP);
if (tgt_private == NULL) {
goto out;
}
goto out;
}
goto out;
}
goto out;
}
}
ret = DDI_SUCCESS;
out:
if (pktp) {
}
if (tran_clone) {
}
if (tgt_private) {
}
return (ret);
}
static int
{
size_t s = 0;
long lunnum;
long phyid = -1;
int rc = DDI_FAILURE;
ptr++;
return (DDI_FAILURE);
}
wwid_str[s] = '\0';
goto out;
}
lun_str[s] = '\0';
if (name[0] == 'p') {
} else {
}
if (rc != DDI_SUCCESS)
goto out;
if (phyid != -1) {
}
if (rc != 0)
goto out;
rc = DDI_SUCCESS;
out:
if (wwid_str)
if (lun_str)
return (rc);
}
/*
* mptsas_parse_smp_name() is to parse sas wwn string
* which format is "wWWN"
*/
static int
{
if (*ptr != 'w') {
return (DDI_FAILURE);
}
ptr++;
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
static int
{
int ret = NDI_FAILURE;
int circ = 0;
int circ1 = 0;
int lun = 0;
if (scsi_hba_iport_unit_address(pdip) == 0) {
return (DDI_FAILURE);
}
if (!mpt) {
return (DDI_FAILURE);
}
/*
* Hold the nexus across the bus_config
*/
switch (op) {
case BUS_CONFIG_ONE:
ret = NDI_FAILURE;
break;
}
ptr++;
/*
* This is a SMP target device
*/
if (ret != DDI_SUCCESS) {
ret = NDI_FAILURE;
break;
}
/*
* OBP could pass down a non-canonical form
* bootpath without LUN part when LUN is 0.
* So driver need adjust the string.
*/
ptr++;
}
/*
* The device path is wWWID format and the device
* is not SMP target device.
*/
if (ret != DDI_SUCCESS) {
ret = NDI_FAILURE;
break;
}
if (ptr[0] == 'w') {
} else if (ptr[0] == 'p') {
childp);
}
} else {
ret = NDI_FAILURE;
break;
}
/*
* DDI group instructed us to use this flag.
*/
break;
case BUS_CONFIG_DRIVER:
case BUS_CONFIG_ALL:
ret = NDI_SUCCESS;
break;
}
if (ret == NDI_SUCCESS) {
}
return (ret);
}
static int
{
int rval = DDI_FAILURE;
SUN_INQSIZE, 0, (uchar_t)0);
} else {
rval = DDI_FAILURE;
}
out:
return (rval);
}
static int
dev_info_t **lundip)
{
int rval;
int phymask;
/*
* Get the physical port associated to the iport
*/
"phymask", 0);
/*
* didn't match any device by searching
*/
return (DDI_FAILURE);
}
/*
* If the LUN already exists and the status is online,
* we just return the pointer to dev_info_t directly.
* For the mdi_pathinfo node, we'll handle it in
* mptsas_create_virt_lun()
* TODO should be also in mptsas_handle_dr
*/
/*
* TODO Another senario is, we hotplug the same disk
* on the same slot, the devhdl changed, is this
* possible?
* tgt_private->t_private != ptgt
*/
/*
* The device has changed although the devhdl is the
* same (Enclosure mapping mode, change drive on the
* same slot)
*/
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
if (phymask == 0) {
/*
* Configure IR volume
*/
return (rval);
}
return (rval);
}
static int
dev_info_t **lundip)
{
int rval;
/*
* didn't match any device by searching
*/
return (DDI_FAILURE);
}
/*
* If the LUN already exists and the status is online,
* we just return the pointer to dev_info_t directly.
* For the mdi_pathinfo node, we'll handle it in
* mptsas_create_virt_lun().
*/
return (DDI_SUCCESS);
}
return (rval);
}
static int
{
/* determine report luns addressing type */
/*
* Vendors in the field have been found to be concatenating
* of switching to flat space addressing
*/
/* 00b - peripheral device addressing method */
/* FALLTHRU */
/* 10b - logical unit addressing method */
/* FALLTHRU */
/* 01b - flat space addressing method */
/* byte0 bit0-5=msb lun byte1 bit0-7=lsb lun */
return (DDI_SUCCESS);
default:
return (DDI_FAILURE);
}
}
static int
{
struct scsi_address ap;
int ret = DDI_FAILURE;
int retry = 0;
int lun_list_len = 0;
uint8_t lun_addr_type = 0;
int buf_len = 128;
if (sas_wwn == 0) {
/*
* It's a SATA without Device Name
* So don't try multi-LUNs
*/
return (DDI_SUCCESS);
} else {
/*
* need configure and create node
*/
return (DDI_FAILURE);
}
}
/*
* WWN (SAS address or Device Name exist)
*/
if (dev_info & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
/*
* SATA device with Device Name
* So don't try multi-LUNs
*/
return (DDI_SUCCESS);
} else {
return (DDI_FAILURE);
}
}
do {
if (repluns_bp == NULL) {
retry++;
continue;
}
cdb[0] = SCMD_REPORT_LUNS;
repluns_bp, NULL);
if (ret != DDI_SUCCESS) {
retry++;
continue;
}
lun_list_len = BE_32(*(int *)((void *)(
ret = DDI_SUCCESS;
break;
}
} while (retry < 3);
if (ret != DDI_SUCCESS)
return (ret);
/*
* find out the number of luns returned by the SCSI ReportLun call
* and allocate buffer space
*/
if (saved_repluns == NULL) {
return (DDI_FAILURE);
}
continue;
}
ret = DDI_SUCCESS;
else
ptgt);
}
}
return (DDI_SUCCESS);
}
static int
{
int rval = DDI_FAILURE;
"not found.", target);
return (rval);
}
SUN_INQSIZE, 0, (uchar_t)0);
0);
} else {
rval = DDI_FAILURE;
}
out:
return (rval);
}
/*
* configure all RAID volumes for virtual iport
*/
static void
{
int target;
/*
* Get latest RAID info and search for any Volume DevHandles. If any
* are found, configure the volume.
*/
== 1) {
&lundip);
}
}
}
}
static void
{
int lun;
int i;
int find;
char *addr;
char *nodename;
while (child) {
find = 0;
continue;
}
continue;
}
DDI_SUCCESS) {
continue;
}
for (i = 0; i < lun_cnt; i++) {
find = 1;
break;
}
}
} else {
continue;
}
if (find == 0) {
/*
* The lun has not been there already
*/
}
}
while (pip) {
find = 0;
continue;
}
&lun) != DDI_SUCCESS) {
continue;
}
for (i = 0; i < lun_cnt; i++) {
find = 1;
break;
}
}
} else {
continue;
}
if (find == 0) {
/*
* The lun has not been there already
*/
}
}
}
void
{
int rval = 0;
/*
* Get latest RAID info.
*/
(void) mptsas_get_raid_info(mpt);
for (; mpt->m_done_traverse_smp == 0; ) {
!= DDI_SUCCESS) {
break;
}
}
/*
* Config target devices
*/
/*
* Do loop to get sas device page 0 by GetNextHandle till the
* we try to config it.
*/
for (; mpt->m_done_traverse_dev == 0; ) {
&dev_handle, &ptgt);
if ((rval == DEV_INFO_FAIL_PAGE0) ||
(rval == DEV_INFO_FAIL_ALLOC)) {
break;
}
}
}
void
{
data->device_info = 0;
/*
* For tgttbl, clear dr_flag.
*/
}
}
void
{
/*
* TODO after hard reset, update the driver data structures
* 2. invalid all the entries in hash table
* m_devhdl = 0xffff and m_deviceinfo = 0
* 3. call sas_device_page/expander_page to update hash table
*/
/*
* Invalid the existing entries
*/
mpt->m_done_traverse_dev = 0;
mpt->m_done_traverse_smp = 0;
}
static void
{
int phymask = 0;
/*
* Get the phymask associated to the iport
*/
"phymask", 0);
/*
* Enumerate RAID volumes here (phymask == 0).
*/
if (phymask == 0) {
return;
}
}
}
}
}
}
}
static int
{
int rval = DDI_FAILURE;
if (rval != DDI_SUCCESS) {
/*
* The return value means the SCMD_REPORT_LUNS
* did not execute successfully. The target maybe
* doesn't support such command.
*/
}
return (rval);
}
/*
* if there is any path under the HBA, the return value will be always fail
* because we didn't call mdi_pi_free for path
*/
static int
{
size_t s;
while (child) {
continue;
}
continue;
}
continue;
}
if (tmp_rval != DDI_SUCCESS) {
rval = DDI_FAILURE;
prechild, MPTSAS_DEV_GONE) !=
"unable to create property for "
"SAS %s (MPTSAS_DEV_GONE)", addr);
}
}
}
while (pip) {
continue;
}
continue;
}
continue;
}
/*
* driver will not invoke mdi_pi_free, so path will not
* be freed forever, return DDI_FAILURE.
*/
rval = DDI_FAILURE;
}
return (rval);
}
static int
{
int rval = DDI_FAILURE;
char *devname;
} else {
return (DDI_FAILURE);
}
/*
* Make sure node is attached otherwise
* it won't have related cache nodes to
* clean up. i_ddi_devi_attached is
* similiar to i_ddi_node_state(cdip) >=
* DS_ATTACHED.
*/
if (i_ddi_devi_attached(cdip)) {
/* Get full devname */
/* Clean cache */
}
if (MDI_PI_IS_OFFLINE(rpip)) {
rval = DDI_SUCCESS;
} else {
}
} else {
}
return (rval);
}
static dev_info_t *
{
while (child) {
!= DDI_SUCCESS) {
continue;
}
break;
}
}
return (child);
}
static int
{
int rval = DDI_FAILURE;
char *devname;
char wwn_str[MPTSAS_WWN_STRLEN];
return (DDI_SUCCESS);
/*
* Make sure node is attached otherwise
* it won't have related cache nodes to
* clean up. i_ddi_devi_attached is
* similiar to i_ddi_node_state(cdip) >=
* DS_ATTACHED.
*/
if (i_ddi_devi_attached(cdip)) {
/* Get full devname */
/* Clean cache */
}
return (rval);
}
static dev_info_t *
{
int rval = DDI_FAILURE;
while (child) {
if (rval != DDI_SUCCESS) {
continue;
}
break;
}
}
return (child);
}
static dev_info_t *
{
return (child);
}
static dev_info_t *
{
char *addr;
return (child);
}
static mdi_pathinfo_t *
{
return (path);
}
static mdi_pathinfo_t *
{
return (path);
}
static int
{
int i = 0;
int inq83_len1 = 0xFF;
int inq83_len = 0;
int rval = DDI_FAILURE;
/*
* devices, we won't try to enumerate them under
* scsi_vhci, so no need to try page83
*/
goto create_lun;
/*
* The LCA returns good SCSI status, but corrupt page 83 data the first
* time it is queried. The solution is to keep trying to request page83
* and verify the GUID is not (DDI_NOT_WELL_FORMED) in
* mptsas_inq83_retry_timeout seconds. If the timeout expires, driver
* give up to get VPD page at this stage and fail the enumeration.
*/
for (i = 0; i < mptsas_inq83_retry_timeout; i++) {
if (rval != 0) {
goto out;
}
/*
* create DEVID from inquiry data
*/
if ((rval = ddi_devid_scsi_encode(
/*
* extract GUID from DEVID
*/
/*
* Do not enable MPXIO if the strlen(guid) is greater
* than MPTSAS_MAX_GUID_LEN, this constrain would be
* handled by framework later.
*/
"lun:%x doesn't have a valid GUID, "
"multipathing for this drive is "
}
}
/*
* devid no longer needed
*/
break;
} else if (rval == DDI_NOT_WELL_FORMED) {
/*
* return value of ddi_devid_scsi_encode equal to
* DDI_NOT_WELL_FORMED means DEVID_RETRY, it worth
* to retry inquiry page 0x83 and get GUID.
*/
NDBG20(("Not well formed devid, retry..."));
continue;
} else {
rval = DDI_FAILURE;
goto create_lun;
}
}
if (i == mptsas_inq83_retry_timeout) {
}
rval = DDI_FAILURE;
}
if (rval != DDI_SUCCESS) {
}
out:
/*
* guid no longer needed
*/
}
return (rval);
}
static int
{
int target;
char **compatible = NULL;
int ncompatible = 0;
int mdi_rtn = MDI_FAILURE;
int rval = DDI_FAILURE;
if (sas_wwn) {
} else {
}
/*
* Same path back online again.
*/
(void) ddi_prop_free(old_guid);
if (!MDI_PI_IS_ONLINE(*pip) &&
!MDI_PI_IS_STANDBY(*pip)) {
} else {
rval = DDI_SUCCESS;
}
if (rval != DDI_SUCCESS) {
"%x, lun:%x online failed!", target,
lun);
}
return (rval);
} else {
/*
* The GUID of the LUN has changed which maybe
* because customer mapped another volume to the
* same LUN.
*/
"target:%x, lun:%x was changed, maybe "
"because someone mapped another volume "
(void) ddi_prop_free(old_guid);
if (!MDI_PI_IS_OFFLINE(*pip)) {
if (rval != MDI_SUCCESS) {
"target:%x, lun:%x offline "
return (DDI_FAILURE);
}
}
"%x, lun:%x free failed!", target,
lun);
return (DDI_FAILURE);
}
}
} else {
return (DDI_FAILURE);
}
}
/*
* if nodename can't be determined then print a message and skip it
*/
return (DDI_FAILURE);
}
/* The property is needed by MPAPI */
if (sas_wwn)
else
0, pip);
if (mdi_rtn == MDI_SUCCESS) {
guid) != DDI_SUCCESS) {
"create property for target %d lun %d (MDI_GUID)",
goto virt_create_done;
}
lun) != DDI_SUCCESS) {
"create property for target %d lun %d (LUN_PROP)",
goto virt_create_done;
}
compatible, ncompatible) !=
"create property for target %d lun %d (COMPATIBLE)",
goto virt_create_done;
}
"create property for target %d lun %d "
goto virt_create_done;
/*
* Direct attached SATA device without DeviceName
*/
"create property for SAS target %d lun %d "
goto virt_create_done;
}
/*
* set obp path for pathinfo
*/
"disk@%s", lun_addr);
DDI_SUCCESS) {
"unable to set obp-path for object %s",
goto virt_create_done;
}
}
if (devinfo & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
"pm-capable", 1)) !=
"failed to create pm-capable "
"property, target %d", target);
goto virt_create_done;
}
}
if (mdi_rtn == MDI_NOT_SUPPORTED) {
}
(void) mdi_pi_free(*pip, 0);
}
}
}
}
}
}
static int
{
int target;
int ndi_rtn = NDI_FAILURE;
char **compatible = NULL;
int ncompatible = 0;
int instance = 0;
/*
* generate compatible property with binding-set "mpt"
*/
/*
* if nodename can't be determined then print a message and skip it
*/
return (DDI_FAILURE);
}
/*
* if lun alloc success, set props
*/
if (ndi_rtn == NDI_SUCCESS) {
"property for target %d lun %d (LUN_PROP)",
goto phys_create_done;
}
!= DDI_PROP_SUCCESS) {
"property for target %d lun %d (COMPATIBLE)",
goto phys_create_done;
}
/*
* We need the SAS WWN for non-multipath devices, so
* we'll use the same property as that multipathing
* devices need to present for MPAPI. If we don't have
* a WWN (e.g. parallel SCSI), don't create the prop.
*/
!= DDI_PROP_SUCCESS) {
"create property for SAS target %d lun %d "
goto phys_create_done;
}
if (sas_wwn && ndi_prop_update_byte_array(
"create property for SAS target %d lun %d "
goto phys_create_done;
} else if ((sas_wwn == 0) && (ndi_prop_update_int(
DDI_PROP_SUCCESS)) {
/*
* Direct attached SATA device without DeviceName
*/
"create property for SAS target %d lun %d "
goto phys_create_done;
}
"create property for SAS target %d lun %d"
goto phys_create_done;
}
"to create guid property for target %d "
goto phys_create_done;
}
/*
* if this is a SAS controller, and the target is a SATA
* drive, set the 'pm-capable' property for sd and if on
* an OPL platform, also check if this is an ATAPI
* device.
*/
if (devinfo & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
NDBG2(("mptsas%d: creating pm-capable property, "
"failed to create pm-capable "
"property, target %d", target);
goto phys_create_done;
}
}
/*
* add 'obp-path' properties for devinfo
*/
if (sas_wwn) {
} else {
}
!= DDI_SUCCESS) {
"unable to set obp-path for SAS "
"object %s", component);
goto phys_create_done;
}
}
/*
* If props were setup ok, online the lun
*/
if (ndi_rtn == NDI_SUCCESS) {
/*
* Try to online the new node
*/
}
/*
* If success set rtn flag, else unwire alloc'd lun
*/
if (ndi_rtn != NDI_SUCCESS) {
NDBG12(("mptsas driver unable to online "
(void) ndi_devi_free(*lun_dip);
}
}
}
}
}
static int
{
struct smp_device smp_sd;
/* XXX An HBA driver should not be allocating an smp_device. */
return (NDI_FAILURE);
return (NDI_SUCCESS);
}
static int
{
int rval;
int phymask;
/*
* Get the physical port associated to the iport
* PHYMASK TODO
*/
"phymask", 0);
/*
* Find the smp node in hash table with specified sas address and
* physical port
*/
return (DDI_FAILURE);
}
return (rval);
}
static int
{
char wwn_str[MPTSAS_WWN_STRLEN];
int ndi_rtn = NDI_FAILURE;
/*
* Probe smp device, prevent the node of removed device from being
* configured succesfully
*/
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
/*
* if lun alloc success, set props
*/
if (ndi_rtn == NDI_SUCCESS) {
/*
* Set the flavor of the child to be SMP flavored
*/
"property for smp device %s (sas_wwn)",
wwn_str);
goto smp_create_done;
}
"create property for SMP %s (SMP_PROP) ",
wwn_str);
goto smp_create_done;
}
/*
* If props were setup ok, online the lun
*/
if (ndi_rtn == NDI_SUCCESS) {
/*
* Try to online the new node
*/
}
/*
* If success set rtn flag, else unwire alloc'd lun
*/
if (ndi_rtn != NDI_SUCCESS) {
NDBG12(("mptsas unable to online "
"SMP target %s", wwn_str));
(void) ndi_devi_free(*smp_dip);
}
}
}
/* smp transport routine */
{
int ret;
/*
* Need to compose a SMP request message
* and call mptsas_do_passthru() function
*/
req.PassthroughFlags = 0;
req.ChainOffset = 0;
return (DDI_FAILURE);
}
if (smp_pkt->smp_pkt_rspsize > 0) {
}
if (smp_pkt->smp_pkt_reqsize > 0) {
}
if (ret != 0) {
return (DDI_FAILURE);
}
/* do passthrough success, check the smp status */
break;
break;
break;
default:
break;
}
return (DDI_FAILURE);
}
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
static void
mptsas_idle_pm(void *arg)
{
mpt->m_pm_timeid = 0;
}
/*
* If we didn't get a match, we need to get sas page0 for each device, and
* untill we get a match. If failed, return NULL
* TODO should be implemented similar to mptsas_wwid_to_ptgt?
*/
static mptsas_target_t *
{
int i, j = 0;
int rval = 0;
int phymask;
/*
* Get the physical port associated to the iport
*/
"phymask", 0);
if (phymask == 0)
return (NULL);
/*
* PHY named device must be direct attached and attaches to
* narrow port, if the iport is not parent of the device which
* we are looking for.
*/
for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
if ((1 << i) & phymask)
j++;
}
if (j > 1)
return (NULL);
/*
* Must be a narrow port and single device attached to the narrow port
* So the physical port num of device which is equal to the iport's
* port num is the device what we are looking for.
*/
return (NULL);
return (ptgt);
}
}
if (mpt->m_done_traverse_dev) {
return (NULL);
}
/* If didn't get a match, come here */
for (; ; ) {
&cur_handle, &ptgt);
if ((rval == DEV_INFO_FAIL_PAGE0) ||
(rval == DEV_INFO_FAIL_ALLOC)) {
break;
}
if ((rval == DEV_INFO_WRONG_DEVICE_TYPE) ||
(rval == DEV_INFO_PHYS_DISK)) {
continue;
}
break;
}
}
return (ptgt);
}
/*
* The ptgt->m_sas_wwn contains the wwid for each disk.
* For Raid volumes, we need to check m_raidvol[x].m_raidwwid
* If we didn't get a match, we need to get sas page0 for each device, and
* untill we get a match
* If failed, return NULL
*/
static mptsas_target_t *
{
int rval = 0;
return (tmp_tgt);
}
if (phymask == 0) {
/*
* It's IR volume
*/
if (rval) {
}
return (tmp_tgt);
}
if (mpt->m_done_traverse_dev) {
return (NULL);
}
/* If didn't get a match, come here */
for (; ; ) {
&cur_handle, &tmp_tgt);
if ((rval == DEV_INFO_FAIL_PAGE0) ||
(rval == DEV_INFO_FAIL_ALLOC)) {
break;
}
if ((rval == DEV_INFO_WRONG_DEVICE_TYPE) ||
(rval == DEV_INFO_PHYS_DISK)) {
continue;
}
break;
}
}
return (tmp_tgt);
}
static mptsas_smp_t *
{
int rval = 0;
return (psmp);
}
if (mpt->m_done_traverse_smp) {
return (NULL);
}
/* If didn't get a match, come here */
for (; ; ) {
&smp_node);
if (rval != DDI_SUCCESS) {
break;
}
break;
}
}
return (psmp);
}
/* helper functions using hash */
/*
* Can't have duplicate entries for same devhdl,
* if there are invalid entries, the devhdl should be set to 0xffff
*/
static void *
{
break;
}
}
return (data);
}
{
NDBG20(("Hash item already exist"));
return (tmp_tgt);
}
return (NULL);
}
/* Initialized the tgt structure */
return (tmp_tgt);
}
static void
{
} else {
}
}
/*
* Return the entry in the hash table
*/
static mptsas_smp_t *
{
return (ret_data);
}
return (ret_data);
}
static void
{
} else {
}
}
/*
* Hash operation functions
* key1 is the sas_wwn, key2 is the phymask
*/
static void
{
return;
}
}
static void
{
return;
}
}
}
}
/*
* You must guarantee the element doesn't exist in the hash table
* before you call mptsas_hash_add()
*/
static void
{
return;
}
} else {
}
}
static void *
{
return (NULL);
}
} else {
}
return (data);
} else {
}
}
return (NULL);
}
static void *
{
return (NULL);
}
return (data);
} else {
}
}
return (NULL);
}
static void *
{
return (NULL);
}
if (pos == MPTSAS_HASH_FIRST) {
} else {
return (NULL);
} else {
}
}
/* the traverse reaches the end */
return (NULL);
} else {
}
}
}