mr_sas.c revision 34841cc2abc43146ada78560d5f179be666acbda
/*
* mr_sas.c: source for mr_sas driver
*
* MegaRAID device driver for SAS2.0 controllers
* Copyright (c) 2008-2009, LSI Logic Corporation.
* All rights reserved.
*
* Version:
* Author:
* Arun Chandrashekhar
* Manju R
* Rajesh Prabhakaran
* Seokmann Ju
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
*
* 3. Neither the name of the author nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include <sys/byteorder.h>
#include "mr_sas.h"
/*
* FMA header files
*/
/*
* Local static data
*/
static void *mrsas_state = NULL;
static int debug_level_g = CL_NONE;
#pragma weak scsi_hba_open
#pragma weak scsi_hba_close
#pragma weak scsi_hba_ioctl
static ddi_dma_attr_t mrsas_generic_dma_attr = {
DMA_ATTR_V0, /* dma_attr_version */
0, /* low DMA address range */
0xFFFFFFFFU, /* high DMA address range */
0xFFFFFFFFU, /* DMA counter register */
8, /* DMA address alignment */
0x07, /* DMA burstsizes */
1, /* min DMA size */
0xFFFFFFFFU, /* max DMA size */
0xFFFFFFFFU, /* segment boundary */
MRSAS_MAX_SGE_CNT, /* dma_attr_sglen */
512, /* granularity of device */
0 /* bus specific DMA flags */
};
/*
* cb_ops contains base level routines
*/
static struct cb_ops mrsas_cb_ops = {
mrsas_open, /* open */
mrsas_close, /* close */
nodev, /* strategy */
nodev, /* print */
nodev, /* dump */
nodev, /* read */
nodev, /* write */
mrsas_ioctl, /* ioctl */
nodev, /* devmap */
nodev, /* mmap */
nodev, /* segmap */
nochpoll, /* poll */
nodev, /* cb_prop_op */
0, /* streamtab */
CB_REV, /* cb_rev */
nodev, /* cb_aread */
nodev /* cb_awrite */
};
/*
* dev_ops contains configuration routines
*/
DEVO_REV, /* rev, */
0, /* refcnt */
mrsas_getinfo, /* getinfo */
nulldev, /* identify */
nulldev, /* probe */
mrsas_attach, /* attach */
mrsas_detach, /* detach */
mrsas_reset, /* reset */
&mrsas_cb_ops, /* char/block ops */
NULL, /* bus ops */
NULL, /* power */
ddi_quiesce_not_supported, /* quiesce */
};
char _depends_on[] = "misc/scsi";
&mod_driverops, /* module type - driver */
&mrsas_ops, /* driver ops */
};
static struct modlinkage modlinkage = {
MODREV_1, /* ml_rev - must be MODREV_1 */
&modldrv, /* ml_linkage */
NULL /* end of driver linkage */
};
static struct ddi_device_acc_attr endian_attr = {
};
/*
* ************************************************************************** *
* *
* common entry points - for loadable kernel modules *
* *
* ************************************************************************** *
*/
int
_init(void)
{
int ret;
sizeof (struct mrsas_instance), 0);
if (ret != DDI_SUCCESS) {
return (ret);
}
return (ret);
}
if (ret != DDI_SUCCESS) {
}
return (ret);
}
int
{
}
int
_fini(void)
{
int ret;
return (ret);
return (ret);
}
/*
* ************************************************************************** *
* *
* common entry points - for autoconfiguration *
* *
* ************************************************************************** *
*/
static int
{
int instance_no;
int nregs;
uint8_t added_isr_f = 0;
uint8_t added_soft_isr_f = 0;
uint8_t create_ioc_node_f = 0;
uint8_t tran_alloc_f = 0;
int intr_types = 0;
char *data;
int msi_enable = 0;
struct mrsas_instance *instance;
/* CONSTCOND */
/*
* check to see whether this device is in a DMA-capable slot.
*/
"mr_sas%d: Device in slave-only slot, unused",
instance_no));
return (DDI_FAILURE);
}
switch (cmd) {
case DDI_ATTACH:
/* allocate the soft state for the instance */
!= DDI_SUCCESS) {
"mr_sas%d: Failed to allocate soft state",
instance_no));
return (DDI_FAILURE);
}
"mr_sas%d: Bad soft state", instance_no));
return (DDI_FAILURE);
}
sizeof (struct mrsas_instance));
sizeof (struct mrsas_func_ptr), KM_SLEEP);
/* Setup the PCI configuration space handles */
DDI_SUCCESS) {
"mr_sas%d: pci config setup failed ",
instance_no));
sizeof (struct mrsas_func_ptr));
return (DDI_FAILURE);
}
"mr_sas: failed to get registers."));
sizeof (struct mrsas_func_ptr));
return (DDI_FAILURE);
}
PCI_CONF_COMM) | PCI_COMM_ME));
"0x%x:0x%x 0x%x:0x%x, irq:%d drv-ver:%s",
/* enable bus-mastering */
if (!(command & PCI_COMM_ME)) {
command |= PCI_COMM_ME;
"enable bus-mastering", instance_no));
} else {
"bus-mastering already set", instance_no));
}
/* initialize function pointers */
if ((device_id == PCI_DEVICE_ID_LSI_2108VDE) ||
(device_id == PCI_DEVICE_ID_LSI_2108V)) {
"2108V/DE detected", instance_no));
} else {
"mr_sas: Invalid device detected"));
sizeof (struct mrsas_func_ptr));
return (DDI_FAILURE);
}
/* Initialize FMA */
"fm-capable", DDI_FM_EREPORT_CAPABLE |
/* Initialize Interrupts */
return (DDI_FAILURE);
}
if (reglength > DEFAULT_MFI_MEM_SZ) {
"mr_sas: register length to map is "
"0x%lx bytes", reglength));
}
!= DDI_SUCCESS) {
"mr_sas: couldn't map control registers"));
goto fail_attach;
}
/*
* Disable Interrupt Now.
* Setup Software interrupt
*/
msi_enable = 0;
msi_enable = 1;
"msi_enable = %d ENABLED",
msi_enable));
}
}
msi_enable));
/* Check for all supported interrupt types */
"ddi_intr_get_supported_types() failed"));
goto fail_attach;
}
"ddi_intr_get_supported_types() ret: 0x%x",
intr_types));
/* Initialize and Setup Interrupt handler */
if (mrsas_add_intrs(instance,
DDI_INTR_TYPE_MSIX) != DDI_SUCCESS) {
"MSIX interrupt query failed"));
goto fail_attach;
}
} else if (msi_enable && (intr_types &
if (mrsas_add_intrs(instance,
DDI_INTR_TYPE_MSI) != DDI_SUCCESS) {
"MSI interrupt query failed"));
goto fail_attach;
}
} else if (intr_types & DDI_INTR_TYPE_FIXED) {
msi_enable = 0;
if (mrsas_add_intrs(instance,
DDI_INTR_TYPE_FIXED) != DDI_SUCCESS) {
"FIXED interrupt query failed"));
goto fail_attach;
}
} else {
"suppport either FIXED or MSI/X "
"interrupts"));
goto fail_attach;
}
added_isr_f = 1;
/* setup the mfi based low level driver */
"could not initialize the low level driver"));
goto fail_attach;
}
/* Initialize all Mutex */
"completed_pool_mtx", MUTEX_DRIVER,
/* Register our soft-isr for highlevel interrupts. */
DDI_SUCCESS) {
" Software ISR did not register"));
goto fail_attach;
}
added_soft_isr_f = 1;
}
/* Allocate a transport structure */
"scsi_hba_tran_alloc failed"));
goto fail_attach;
}
tran_alloc_f = 1;
/* Attach this instance of the hba */
!= DDI_SUCCESS) {
"scsi_hba_attach failed"));
goto fail_attach;
}
/* create devctl node for cfgadm command */
DDI_NT_SCSI_NEXUS, 0) == DDI_FAILURE) {
"mr_sas: failed to create devctl node."));
goto fail_attach;
}
create_devctl_node_f = 1;
/* create scsi node for cfgadm command */
DDI_NT_SCSI_ATTACHMENT_POINT, 0) ==
DDI_FAILURE) {
"mr_sas: failed to create scsi node."));
goto fail_attach;
}
create_scsi_node_f = 1;
/*
* Create a node for applications
* for issuing ioctl to the driver.
*/
DDI_PSEUDO, 0) == DDI_FAILURE) {
"mr_sas: failed to create ioctl node."));
goto fail_attach;
}
create_ioc_node_f = 1;
/* Create a taskq to handle dr events */
"mrsas_dr_taskq", 1,
TASKQ_DEFAULTPRI, 0)) == NULL) {
"mr_sas: failed to create taskq "));
goto fail_attach;
}
/* enable interrupt */
/* initiate AEN */
if (start_mfi_aen(instance)) {
"mr_sas: failed to initiate AEN."));
goto fail_initiate_aen;
}
"AEN started for instance %d.", instance_no));
/* Finally! We are on the air. */
DDI_SUCCESS) {
goto fail_attach;
}
DDI_SUCCESS) {
goto fail_attach;
}
KM_SLEEP);
break;
case DDI_PM_RESUME:
"mr_sas: DDI_PM_RESUME"));
break;
case DDI_RESUME:
"mr_sas: DDI_RESUME"));
break;
default:
"mr_sas: invalid attach cmd=%x", cmd));
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
if (create_devctl_node_f) {
}
if (create_scsi_node_f) {
}
if (create_ioc_node_f) {
}
if (tran_alloc_f) {
}
if (added_soft_isr_f) {
}
if (added_isr_f) {
}
}
"mr_sas: return failure from mrsas_attach"));
return (DDI_FAILURE);
}
/*ARGSUSED*/
static int
{
int rval;
struct mrsas_instance *instance;
switch (cmd) {
case DDI_INFO_DEVT2DEVINFO:
instance = (struct mrsas_instance *)
rval = DDI_FAILURE;
} else {
rval = DDI_SUCCESS;
}
break;
case DDI_INFO_DEVT2INSTANCE:
rval = DDI_SUCCESS;
break;
default:
rval = DDI_FAILURE;
}
return (rval);
}
static int
{
int instance_no;
struct mrsas_instance *instance;
/* CONSTCOND */
if (!instance) {
"mr_sas:%d could not get instance in detach",
instance_no));
return (DDI_FAILURE);
}
"mr_sas%d: detaching device 0x%4x:0x%4x:0x%4x:0x%4x",
switch (cmd) {
case DDI_DETACH:
"mrsas_detach: DDI_DETACH"));
"mr_sas:%d failed to detach",
instance_no));
return (DDI_FAILURE);
}
"failed to abort prevous AEN command"));
return (DDI_FAILURE);
}
}
}
* sizeof (struct mrsas_ld));
sizeof (struct mrsas_func_ptr));
break;
case DDI_PM_SUSPEND:
"mrsas_detach: DDI_PM_SUSPEND"));
break;
case DDI_SUSPEND:
"mrsas_detach: DDI_SUSPEND"));
break;
default:
"invalid detach command:0x%x", cmd));
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
/*
* ************************************************************************** *
* *
* common entry points - for character driver types *
* *
* ************************************************************************** *
*/
static int
{
int rval = 0;
/* Check root permissions */
"mr_sas: Non-root ioctl access denied!"));
return (EPERM);
}
/* Verify we are being opened as a character device */
"mr_sas: ioctl node must be a char node"));
return (EINVAL);
}
== NULL) {
return (ENXIO);
}
if (scsi_hba_open) {
}
return (rval);
}
static int
{
int rval = 0;
/* no need for locks! */
if (scsi_hba_close) {
}
return (rval);
}
static int
int *rvalp)
{
int rval = 0;
struct mrsas_instance *instance;
struct mrsas_ioctl *ioctl;
/* invalid minor number */
return (ENXIO);
}
KM_SLEEP);
case MRSAS_IOCTL_FIRMWARE:
sizeof (struct mrsas_ioctl), mode)) {
"ERROR IOCTL copyin"));
return (EFAULT);
}
} else {
}
"mrsas_ioctl: copy_to_user failed"));
rval = 1;
}
break;
case MRSAS_IOCTL_AEN:
"mrsas_ioctl: ERROR AEN copyin"));
return (EFAULT);
}
"mrsas_ioctl: copy_to_user failed"));
rval = 1;
}
break;
default:
"scsi_hba_ioctl called, ret = %x.", rval));
}
return (rval);
}
/*
* ************************************************************************** *
* *
* common entry points - for block driver types *
* *
* ************************************************************************** *
*/
/*ARGSUSED*/
static int
{
int instance_no;
struct mrsas_instance *instance;
if (!instance) {
"in reset", instance_no));
return (DDI_FAILURE);
}
instance_no));
return (DDI_SUCCESS);
}
/*
* ************************************************************************** *
* *
* entry points (SCSI HBA) *
* *
* ************************************************************************** *
*/
/*ARGSUSED*/
static int
{
struct mrsas_instance *instance;
if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
"ndi_dev_is_persistent_node DDI_FAILURE t = %d l = %d",
return (DDI_FAILURE);
}
}
}
return (DDI_SUCCESS);
}
/*ARGSUSED*/
static void
{
struct mrsas_instance *instance;
}
}
}
static dev_info_t *
{
char addr[SCSI_MAXNAMELEN];
char tmp[MAXNAMELEN];
DDI_SUCCESS) {
continue;
}
break;
}
}
(void *)child));
return (child);
}
static int
{
if (tgt == -1) {
return (DDI_FAILURE);
}
"lun", -1);
if (lun == -1) {
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
static struct scsi_pkt *
{
struct mrsas_instance *instance;
/* step #1 : pkt allocation */
return (NULL);
}
/*
* Initialize the new pkt - we redundantly initialize
* all the fields for illustrative purposes.
*/
acmd->cmd_ncookies = 0;
acmd->cmd_cookie = 0;
acmd->cmd_cookiecnt = 0;
pkt->pkt_statistics = 0;
pkt->pkt_reason = 0;
} else {
}
/* step #2 : dma allocation/move */
callback) == DDI_FAILURE) {
if (new_pkt) {
}
}
} else {
}
}
}
return (pkt);
}
static int
{
/*
* Check if the command is already completed by the mrsas_build_cmd()
* routine. In which case the busy_flag would be clear and scb will be
* NULL and appropriate reason provided in pkt_reason field
*/
if (cmd_done) {
}
return (TRAN_ACCEPT);
}
return (TRAN_BUSY);
}
return (TRAN_BUSY);
}
/* Synchronize the Cmd frame for the controller */
} else {
pkt->pkt_statistics = 0;
&hdr->cmd_status)) {
case MFI_STAT_OK:
break;
pkt->pkt_statistics = 0;
break;
break;
default:
}
}
}
return (TRAN_ACCEPT);
}
/*ARGSUSED*/
static int
{
/* abort command not supported by H/W */
return (DDI_FAILURE);
}
/*ARGSUSED*/
static int
{
/* reset command not supported by H/W */
return (DDI_FAILURE);
}
/*ARGSUSED*/
static int
{
int rval = 0;
/* we do allow inquiring about capabilities for other targets */
return (-1);
}
switch (scsi_hba_lookup_capstr(cap)) {
case SCSI_CAP_DMA_MAX:
/* Limit to 16MB max transfer */
break;
case SCSI_CAP_MSG_OUT:
rval = 1;
break;
case SCSI_CAP_DISCONNECT:
rval = 0;
break;
case SCSI_CAP_SYNCHRONOUS:
rval = 0;
break;
case SCSI_CAP_WIDE_XFER:
rval = 1;
break;
case SCSI_CAP_TAGGED_QING:
rval = 1;
break;
case SCSI_CAP_UNTAGGED_QING:
rval = 1;
break;
case SCSI_CAP_PARITY:
rval = 1;
break;
case SCSI_CAP_INITIATOR_ID:
break;
case SCSI_CAP_ARQ:
rval = 1;
break;
case SCSI_CAP_LINKED_CMDS:
rval = 0;
break;
rval = 1;
break;
case SCSI_CAP_GEOMETRY:
rval = -1;
break;
default:
rval = -1;
break;
}
return (rval);
}
/*ARGSUSED*/
static int
{
int rval = 1;
/* We don't allow setting capabilities for other targets */
return (-1);
}
switch (scsi_hba_lookup_capstr(cap)) {
case SCSI_CAP_DMA_MAX:
case SCSI_CAP_MSG_OUT:
case SCSI_CAP_PARITY:
case SCSI_CAP_LINKED_CMDS:
case SCSI_CAP_DISCONNECT:
case SCSI_CAP_SYNCHRONOUS:
case SCSI_CAP_UNTAGGED_QING:
case SCSI_CAP_WIDE_XFER:
case SCSI_CAP_INITIATOR_ID:
case SCSI_CAP_ARQ:
/*
* None of these are settable via
* the capability interface.
*/
break;
case SCSI_CAP_TAGGED_QING:
rval = 1;
break;
case SCSI_CAP_SECTOR_SIZE:
rval = 1;
break;
case SCSI_CAP_TOTAL_SECTORS:
rval = 1;
break;
default:
rval = -1;
break;
}
return (rval);
}
static void
{
}
/* free the pkt */
}
/*ARGSUSED*/
static void
{
}
}
/*ARGSUSED*/
static void
{
}
}
/*
* mrsas_isr(caddr_t)
*
* The Interrupt Service Routine
*
* Collect status for all completed commands and do callback
*
*/
static uint_t
{
int need_softintr;
return (DDI_INTR_UNCLAIMED);
}
0, 0, DDI_DMA_SYNC_FORCPU);
!= DDI_SUCCESS) {
return (DDI_INTR_CLAIMED);
}
return (DDI_INTR_CLAIMED);
}
consumer++;
consumer = 0;
}
}
0, 0, DDI_DMA_SYNC_FORDEV);
if (instance->softint_running) {
need_softintr = 0;
} else {
need_softintr = 1;
}
if (need_softintr) {
}
} else {
/*
* Not a high-level interrupt, therefore call the soft level
* interrupt explicitly
*/
(void) mrsas_softintr(instance);
}
return (DDI_INTR_CLAIMED);
}
/*
* ************************************************************************** *
* *
* libraries *
* *
* ************************************************************************** *
*/
/*
* get_mfi_pkt : Get a command from the free pool
* After successful allocation, the caller of this routine
* must clear the frame buffer (memset to zero) before
* using the packet further.
*
* ***** Note *****
* After clearing the frame buffer the context id of the
* frame buffer SHOULD be restored back.
*/
static struct mrsas_cmd *
{
if (!mlist_empty(head)) {
}
return (cmd);
}
/*
* return_mfi_pkt : Return a cmd to free command pool
*/
static void
{
}
/*
* destroy_mfi_frame_pool
*/
static void
{
int i;
/* return all frames to pool */
for (i = 0; i < max_cmd+1; i++) {
}
}
/*
* create_mfi_frame_pool
*/
static int
{
int i = 0;
int cookie_cnt;
sge_sz = sizeof (struct mrsas_sge64);
/* calculated the number of 64byte frames required for SGL */
while (i < max_cmd+1) {
"create_mfi_frame_pool: could not alloc."));
return (DDI_FAILURE);
}
"mr_sas: pci_pool_alloc failed"));
return (ENOMEM);
}
i++;
}
return (DDI_SUCCESS);
}
/*
* free_additional_dma_buffer
*/
static void
{
(void) mrsas_free_dma_obj(instance,
}
(void) mrsas_free_dma_obj(instance,
}
}
/*
* alloc_additional_dma_buffer
*/
static int
{
/* max cmds plus 1 + producer & consumer */
0xFFFFFFFFU;
"mr_sas: could not alloc reply queue"));
return (DDI_FAILURE);
}
(reply_q_sz + 8);
(reply_q_sz + 8);
/* allocate evt_detail */
"could not allocate data transfer buffer."));
return (DDI_FAILURE);
}
sizeof (struct mrsas_evt_detail));
return (DDI_SUCCESS);
}
/*
* free_space_for_mfi
*/
static void
{
int i;
/* already freed */
return;
}
/* first free the MFI frame pool */
/* free all the commands in the cmd_list */
sizeof (struct mrsas_cmd));
}
/* free the cmd_list buffer itself */
}
/*
* alloc_space_for_mfi
*/
static int
{
int i;
/* reserve 1 more slot for flush_cache */
/*
* instance->cmd_list is an array of struct mrsas_cmd pointers.
* Allocate the dynamic array first and then allocate individual
* commands.
*/
for (i = 0; i < max_cmd+1; i++) {
KM_SLEEP);
}
/* add all the commands to command pool (instance->cmd_pool) */
for (i = 0; i < max_cmd; i++) {
}
/* single slot for flush_cache won't be added in command pool */
/* create a frame pool and assign one frame to each cmd */
if (create_mfi_frame_pool(instance)) {
return (DDI_FAILURE);
}
/* create a frame pool and assign one frame to each cmd */
if (alloc_additional_dma_buffer(instance)) {
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
/*
* get_ctrl_info
*/
static int
struct mrsas_ctrl_info *ctrl_info)
{
int ret = 0;
struct mrsas_dcmd_frame *dcmd;
struct mrsas_ctrl_info *ci;
if (!cmd) {
"Failed to get a cmd for ctrl info"));
return (DDI_FAILURE);
}
/* Clear the frame buffer and assign back the context id */
if (!ci) {
"Failed to alloc mem for ctrl info"));
return (DDI_FAILURE);
}
/* for( i = 0; i < DCMD_MBOX_SZ; i++ ) dcmd->mbox.b[i] = 0; */
sizeof (struct mrsas_ctrl_info));
sizeof (struct mrsas_ctrl_info));
ret = 0;
/* should get more members of ci with ddi_get when needed */
} else {
ret = -1;
}
ret = -1;
}
return (ret);
}
/*
* abort_aen_cmd
*/
static int
struct mrsas_cmd *cmd_to_abort)
{
int ret = 0;
struct mrsas_abort_frame *abort_fr;
if (!cmd) {
"Failed to get a cmd for ctrl info"));
return (DDI_FAILURE);
}
/* Clear the frame buffer and assign back the context id */
/* prepare and issue the abort frame */
&abort_fr->abort_mfi_phys_addr_hi, 0);
"abort_aen_cmd: issue_cmd_in_sync_mode failed"));
ret = -1;
} else {
ret = 0;
}
return (ret);
}
/*
* init_mfi
*/
static int
{
struct mrsas_ctrl_info ctrl_info;
struct mrsas_init_frame *init_frame;
struct mrsas_init_queue_info *initq_info;
/* we expect the FW state to be READY */
goto fail_ready_state;
}
/* get various operational parameters from status register */
0xFF0000) >> 0x10;
/*
* Reduce the max supported cmds by 1. This is to ensure that the
* reply_q_sz (1 more than the max cmd that driver may send)
* does not exceed max cmds that the FW can support
*/
/* create a pool of commands */
goto fail_alloc_fw_space;
/*
* Prepare a init frame. Note the init frame points to queue info
* structure. Each frame has SGL allocated after first 64 bytes. For
* this frame - since we don't need any SGL - we use SGL's space as
* queue info structure
*/
/* Clear the frame buffer and assign back the context id */
initq_info = (struct mrsas_init_queue_info *)
((unsigned long)init_frame + 64);
sizeof (struct mrsas_init_queue_info));
/* issue the init frame in polled mode */
goto fail_fw_init;
}
goto fail_fw_init;
}
/* gather misc FW related information */
} else {
PAGESIZE / 512;
}
goto fail_fw_init;
}
return (DDI_SUCCESS);
return (DDI_FAILURE);
}
/*
* mfi_state_transition_to_ready : Move the FW to READY state
*
* @reg_set : MFI register set
*/
static int
{
int i;
fw_state =
"mfi_state_transition_to_ready:FW state = 0x%x", fw_state));
while (fw_state != MFI_STATE_READY) {
"mfi_state_transition_to_ready:FW state%x", fw_state));
switch (fw_state) {
case MFI_STATE_FAULT:
"mr_sas: FW in FAULT state!!"));
return (ENODEV);
case MFI_STATE_WAIT_HANDSHAKE:
/* set the CLR bit in IMR0 */
"mr_sas: FW waiting for HANDSHAKE"));
/*
* PCI_Hot Plug: MFI F/W requires
* (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG)
* to be set
*/
/* WR_IB_MSG_0(MFI_INIT_CLEAR_HANDSHAKE, instance); */
max_wait = 2;
break;
/* set the CLR bit in IMR0 */
"mr_sas: FW state boot message pending"));
/*
* PCI_Hot Plug: MFI F/W requires
* (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG)
* to be set
*/
max_wait = 10;
break;
case MFI_STATE_OPERATIONAL:
/* bring it to READY state; assuming max wait 2 secs */
"mr_sas: FW in OPERATIONAL state"));
/*
* PCI_Hot Plug: MFI F/W requires
* (MFI_INIT_READY | MFI_INIT_MFIMODE | MFI_INIT_ABORT)
* to be set
*/
/* WR_IB_DOORBELL(MFI_INIT_READY, instance); */
max_wait = 10;
break;
case MFI_STATE_UNDEFINED:
/* this state should not last for more than 2 seconds */
max_wait = 2;
break;
case MFI_STATE_BB_INIT:
max_wait = 2;
break;
case MFI_STATE_FW_INIT:
max_wait = 2;
break;
case MFI_STATE_DEVICE_SCAN:
max_wait = 10;
break;
default:
"mr_sas: Unknown state 0x%x", fw_state));
return (ENODEV);
}
/* the cur_state should not last for more than max_wait secs */
/* fw_state = RD_OB_MSG_0(instance) & MFI_STATE_MASK; */
fw_state =
} else {
break;
}
}
/* return error if fw_state hasn't changed after max_wait */
"FW state hasn't changed in %d secs", max_wait));
return (ENODEV);
}
};
"mfi_state_transition_to_ready:FW ctrl = 0x%x", fw_ctrl));
/*
* Write 0xF to the doorbell register to do the following.
* - Abort all outstanding commands (bit 0).
* - Transition from OPERATIONAL to READY state (bit 1).
* - Discard (possible) low MFA posted in 64-bit mode (bit-2).
* - Set to release FW to continue running (i.e. BIOS handshake
* (bit 3).
*/
return (ENODEV);
}
return (DDI_SUCCESS);
}
/*
* get_seq_num
*/
static int
struct mrsas_evt_log_info *eli)
{
int ret = DDI_SUCCESS;
struct mrsas_dcmd_frame *dcmd;
struct mrsas_evt_log_info *eli_tmp;
if (!cmd) {
return (ENOMEM);
}
/* Clear the frame buffer and assign back the context id */
/* allocate the data transfer buffer */
"get_seq_num: could not allocate data transfer buffer."));
return (DDI_FAILURE);
}
sizeof (struct mrsas_evt_log_info));
sizeof (struct mrsas_evt_log_info));
sizeof (struct mrsas_evt_log_info));
"failed to issue MRSAS_DCMD_CTRL_EVENT_GET_INFO");
ret = DDI_FAILURE;
} else {
ret = DDI_SUCCESS;
}
ret = DDI_FAILURE;
ret = DDI_FAILURE;
}
return (ret);
}
/*
* start_mfi_aen
*/
static int
{
int ret = 0;
struct mrsas_evt_log_info eli;
/* get the latest sequence number from FW */
return (-1);
}
/* register AEN with FW for latest sequence number plus 1 */
if (ret) {
return (-1);
}
return (ret);
}
/*
* flush_cache
*/
static void
{
struct mrsas_dcmd_frame *dcmd;
return;
"flush_cache: failed to issue MFI_DCMD_CTRL_CACHE_FLUSH"));
}
}
/*
* service_mfi_aen- Completes an AEN command
* @instance: Adapter soft state
* @cmd: Command to be completed
*
*/
static void
{
struct mrsas_evt_detail *evt_detail =
int rval = 0;
int tgt = 0;
cmd->cmd_status = 0;
}
/*
* log the MFI AEN event to the sysevent queue so that
* application will get noticed
*/
"mr_sas%d: Failed to log AEN event", instance_no));
}
/*
* Check for any ld devices that has changed state. i.e. online
* or offline.
*/
"AEN: code = %x class = %x locale = %x args = %x",
case MR_EVT_CFG_CLEARED: {
"mr_sas: CFG CLEARED AEN rval = %d "
}
}
break;
}
case MR_EVT_LD_DELETED: {
"tgt id = %d index = %d", rval,
break;
} /* End of MR_EVT_LD_DELETED */
case MR_EVT_LD_CREATED: {
"tgt id = %d index = %d", rval,
break;
} /* End of MR_EVT_LD_CREATED */
} /* End of Main Switch */
seq_num++;
sizeof (struct mrsas_evt_detail));
/* Issue the aen registration frame */
}
/*
* complete_cmd_in_sync_mode - Completes an internal command
* @instance: Adapter soft state
* @cmd: Command to be completed
*
* The issue_cmd_in_sync_mode() function waits for a command to complete
* after it issues a command. This function wakes up that waiting routine by
* calling wake_up() on the wait queue.
*/
static void
{
cmd->cmd_status = 0;
}
}
/*
* mrsas_softintr - The Software ISR
* @param arg : HBA soft state
*
* called from high-level interrupt if hi-level interrupt are not there,
* otherwise triggered as a soft interrupt
*/
static uint_t
{
struct mrsas_header *hdr;
struct scsi_arq_status *arqstat;
return (DDI_INTR_CLAIMED);
}
/* perform all callbacks first, before releasing the SCBs */
/* syncronize the Cmd frame for the controller */
0, 0, DDI_DMA_SYNC_FORCPU);
DDI_SUCCESS) {
return (DDI_INTR_CLAIMED);
}
/* remove the internal command from the process list */
case MFI_CMD_OP_PD_SCSI:
case MFI_CMD_OP_LD_SCSI:
case MFI_CMD_OP_LD_READ:
case MFI_CMD_OP_LD_WRITE:
/*
* MFI_CMD_OP_PD_SCSI and MFI_CMD_OP_LD_SCSI
* could have been issued either through an
* IO path or an IOCTL path. If it was via IOCTL,
* we will send it to internal completion.
*/
break;
}
/* regular commands */
}
}
pkt->pkt_statistics = 0;
"CDB[0] = %x completed for %s: size %lx context %x",
struct scsi_inquiry *inq;
if (acmd->cmd_dmacount != 0) {
inq = (struct scsi_inquiry *)
/* don't expose physical drives to OS */
} else if ((hdr->cmd_status ==
DTYPE_DIRECT) {
/* for physical disk */
hdr->cmd_status =
}
}
}
switch (hdr->cmd_status) {
case MFI_STAT_OK:
break;
break;
(CE_WARN, "Initialization in Progress"));
break;
((struct scsi_status *)
(CE_WARN, "TEST_UNIT_READY fail"));
} else {
arqstat->sts_rqpkt_resid = 0;
(uint8_t *)
&(arqstat->sts_sensedata),
acmd->cmd_scblen -
offsetof(struct scsi_arq_status,
}
break;
case MFI_STAT_LD_OFFLINE:
"device not found error"));
break;
((struct scsi_status *)
arqstat->sts_rqpkt_resid = 0;
/*
* LOGICAL BLOCK ADDRESS OUT OF RANGE:
* ASC: 0x21h; ASCQ: 0x00h;
*/
break;
default:
break;
}
if (acmd->cmd_dmahandle) {
pkt->pkt_statistics = 0;
}
}
/* Call the callback routine */
}
break;
case MFI_CMD_OP_SMP:
case MFI_CMD_OP_STP:
break;
case MFI_CMD_OP_DCMD:
/* see if got an event notification */
"mrsas_softintr: "
"aborted_aen returned"));
} else {
(-1));
}
} else {
}
break;
case MFI_CMD_OP_ABORT:
/*
* MFI_CMD_OP_ABORT successfully completed
* in the synchronous mode
*/
break;
default:
}
}
break;
}
}
instance->softint_running = 0;
return (DDI_INTR_CLAIMED);
}
/*
* mrsas_alloc_dma_obj
*
* Allocate the memory and other resources for an dma object.
*/
static int
{
int i;
struct ddi_device_acc_attr tmp_endian_attr;
if (i != DDI_SUCCESS) {
switch (i) {
case DDI_DMA_BADATTR :
"Failed ddi_dma_alloc_handle- Bad attribute"));
break;
case DDI_DMA_NORESOURCES :
"Failed ddi_dma_alloc_handle- No Resources"));
break;
default :
"Failed ddi_dma_alloc_handle: "
"unknown status %d", i));
break;
}
return (-1);
}
return (-1);
}
return (-1);
}
return (-1);
}
return (-1);
}
return (cookie_cnt);
}
/*
* mrsas_free_dma_obj(struct mrsas_instance *, dma_obj_t)
*
* De-allocate the memory and other resources for an dma object, which must
* have been alloated by a previous call to mrsas_alloc_dma_obj()
*/
static int
{
return (DDI_FAILURE);
}
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
/*
* mrsas_dma_alloc(instance_t *, struct scsi_pkt *, struct buf *,
* int, int (*)())
*
* Allocate dma resources for a new scsi command
*/
static int
{
int dma_flags;
int i;
} else {
}
if (flags & PKT_CONSISTENT) {
}
if (flags & PKT_DMA_PARTIAL) {
}
switch (i) {
case DDI_DMA_BADATTR:
return (DDI_FAILURE);
case DDI_DMA_NORESOURCES:
return (DDI_FAILURE);
default:
"impossible result (0x%x)", i));
return (DDI_FAILURE);
}
}
switch (i) {
case DDI_DMA_PARTIAL_MAP:
if ((dma_flags & DDI_DMA_PARTIAL) == 0) {
"DDI_DMA_PARTIAL_MAP impossible"));
goto no_dma_cookies;
}
DDI_FAILURE) {
goto no_dma_cookies;
}
DDI_FAILURE) {
goto no_dma_cookies;
}
goto get_dma_cookies;
case DDI_DMA_MAPPED:
acmd->cmd_dma_len = 0;
acmd->cmd_dma_offset = 0;
i = 0;
acmd->cmd_dmacount = 0;
for (;;) {
acmd->cmd_dmacount +=
if (i == instance->max_num_sge ||
i == acmd->cmd_ncookies)
break;
&acmd->cmd_dmacookies[i]);
}
acmd->cmd_cookie = i;
acmd->cmd_cookiecnt = i;
} else {
}
return (DDI_SUCCESS);
case DDI_DMA_NORESOURCES:
break;
case DDI_DMA_NOMAPPING:
break;
case DDI_DMA_TOOBIG:
break;
case DDI_DMA_INUSE:
" DDI_DMA_INUSE impossible"));
break;
default:
"impossible result (0x%x)", i));
break;
}
return (DDI_FAILURE);
}
/*
* mrsas_dma_move(struct mrsas_instance *, struct scsi_pkt *, struct buf *)
*
* move dma resources to next dma window
*
*/
static int
{
int i = 0;
/*
* If there are no more cookies remaining in this window,
* must move to the next window first.
*/
return (DDI_SUCCESS);
}
/* at last window, cannot move */
return (DDI_FAILURE);
}
DDI_FAILURE) {
return (DDI_FAILURE);
}
acmd->cmd_cookie = 0;
} else {
/* still more cookies in this window - get the next one */
&acmd->cmd_dmacookies[0]);
}
/* get remaining cookies in this window, up to our maximum */
for (;;) {
acmd->cmd_cookie++;
if (i == instance->max_num_sge ||
break;
}
&acmd->cmd_dmacookies[i]);
}
acmd->cmd_cookiecnt = i;
} else {
}
return (DDI_SUCCESS);
}
/*
* build_cmd
*/
static struct mrsas_cmd *
{
uint32_t i;
struct mrsas_sge64 *mfi_sgl;
struct mrsas_pthru_frame *pthru;
struct mrsas_io_frame *ldio;
/* find out if this is logical or physical drive command. */
*cmd_done = 0;
/* get the command packet */
return (NULL);
}
/* Clear the frame buffer and assign back the context id */
/* lets get the command directions */
}
}
} else {
}
flags |= MFI_FRAME_SGL64;
/*
* case SCMD_SYNCHRONIZE_CACHE:
* flush_cache(instance);
* return_mfi_pkt(instance, cmd);
* *cmd_done = 1;
*
* return (NULL);
*/
case SCMD_READ:
case SCMD_WRITE:
case SCMD_READ_G1:
case SCMD_WRITE_G1:
/*
* preare the Logical IO frame:
* 2nd bit is zero for all read cmds
*/
/* Initialize sense Information */
<< 16)));
}
break;
}
default:
case SCMD_MODE_SENSE:
case SCMD_MODE_SENSE_G1: {
switch (page_code) {
case 0x3:
case 0x4:
(void) mrsas_mode_sense_build(pkt);
*cmd_done = 1;
return (NULL);
}
break;
}
default:
break;
}
/* prepare the DCDB frame */
acmd->cmd_dmacount);
break;
}
#ifdef lint
#endif
/* prepare the scatter-gather list for the firmware */
}
}
return (cmd);
}
/*
* issue_mfi_pthru
*/
static int
{
void *ubuf;
uint32_t kphys_addr = 0;
struct mrsas_pthru_frame *kpthru;
struct mrsas_pthru_frame *pthru;
int i;
if (model == DDI_MODEL_ILP32) {
} else {
#ifdef _ILP32
#else
#endif
}
if (xferlen) {
/* means IOCTL requires DMA */
/* allocate the data transfer buffer */
/* allocate kernel buffer for DMA */
"could not allocate data transfer buffer."));
return (DDI_FAILURE);
}
/* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
for (i = 0; i < xferlen; i++) {
1, mode)) {
"issue_mfi_pthru : "
"copy from user space failed"));
return (DDI_FAILURE);
}
}
}
}
/* pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr; */
"issue_mfi_pthru: fw_ioctl failed"));
} else {
for (i = 0; i < xferlen; i++) {
if (ddi_copyout(
"issue_mfi_pthru : "
"copy to user space failed"));
return (DDI_FAILURE);
}
}
}
}
if (xferlen) {
/* free kernel buffer */
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
/*
* issue_mfi_dcmd
*/
static int
{
void *ubuf;
uint32_t kphys_addr = 0;
struct mrsas_dcmd_frame *kdcmd;
struct mrsas_dcmd_frame *dcmd;
int i;
if (model == DDI_MODEL_ILP32) {
} else {
#ifdef _ILP32
#else
#endif
}
if (xferlen) {
/* means IOCTL requires DMA */
/* allocate the data transfer buffer */
/* allocate kernel buffer for DMA */
"could not allocate data transfer buffer."));
return (DDI_FAILURE);
}
/* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
for (i = 0; i < xferlen; i++) {
1, mode)) {
"issue_mfi_dcmd : "
"copy from user space failed"));
return (DDI_FAILURE);
}
}
}
}
} else {
for (i = 0; i < xferlen; i++) {
if (ddi_copyout(
1, mode)) {
"issue_mfi_dcmd : "
"copy to user space failed"));
return (DDI_FAILURE);
}
}
}
}
if (xferlen) {
/* free kernel buffer */
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
/*
* issue_mfi_smp
*/
static int
{
void *request_ubuf;
void *response_ubuf;
uint32_t request_xferlen = 0;
uint32_t response_xferlen = 0;
struct mrsas_smp_frame *ksmp;
struct mrsas_smp_frame *smp;
struct mrsas_sge32 *sge32;
#ifndef _ILP32
struct mrsas_sge64 *sge64;
#endif
int i;
if (model == DDI_MODEL_ILP32) {
"response_xferlen = %x, request_xferlen = %x",
"response_ubuf = %p, request_ubuf = %p",
} else {
#ifdef _ILP32
"response_xferlen = %x, request_xferlen = %x",
"response_ubuf = %p, request_ubuf = %p",
#else
#endif
}
if (request_xferlen) {
/* means IOCTL requires DMA */
/* allocate the data transfer buffer */
/* allocate kernel buffer for DMA */
"could not allocate data transfer buffer."));
return (DDI_FAILURE);
}
/* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
for (i = 0; i < request_xferlen; i++) {
1, mode)) {
"copy from user space failed"));
return (DDI_FAILURE);
}
}
}
if (response_xferlen) {
/* means IOCTL requires DMA */
/* allocate the data transfer buffer */
/* allocate kernel buffer for DMA */
"could not allocate data transfer buffer."));
return (DDI_FAILURE);
}
/* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
for (i = 0; i < response_xferlen; i++) {
1, mode)) {
"copy from user space failed"));
return (DDI_FAILURE);
}
}
}
/* smp->context = ksmp->context; */
sizeof (uint64_t));
if (model == DDI_MODEL_ILP32) {
"issue_mfi_smp: DDI_MODEL_ILP32"));
} else {
#ifdef _ILP32
"issue_mfi_smp: DDI_MODEL_ILP32"));
#else
"issue_mfi_smp: DDI_MODEL_LP64"));
#endif
}
"smp->response_xferlen = %d, smp->request_xferlen = %d "
"issue_mfi_smp: fw_ioctl failed"));
} else {
"issue_mfi_smp: copy to user space"));
if (request_xferlen) {
for (i = 0; i < request_xferlen; i++) {
if (ddi_copyout(
i, (uint8_t *)request_ubuf + i,
1, mode)) {
"issue_mfi_smp : copy to user space"
" failed"));
return (DDI_FAILURE);
}
}
}
if (response_xferlen) {
for (i = 0; i < response_xferlen; i++) {
if (ddi_copyout(
+ i, (uint8_t *)response_ubuf
+ i, 1, mode)) {
"issue_mfi_smp : copy to "
"user space failed"));
return (DDI_FAILURE);
}
}
}
}
if (request_xferlen) {
/* free kernel buffer */
return (DDI_FAILURE);
}
if (response_xferlen) {
/* free kernel buffer */
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
/*
* issue_mfi_stp
*/
static int
{
void *fis_ubuf;
void *data_ubuf;
uint32_t fis_xferlen = 0;
uint32_t data_xferlen = 0;
struct mrsas_stp_frame *kstp;
struct mrsas_stp_frame *stp;
int i;
if (model == DDI_MODEL_ILP32) {
}
else
{
#ifdef _ILP32
#else
#endif
}
if (fis_xferlen) {
/* means IOCTL requires DMA */
/* allocate the data transfer buffer */
/* allocate kernel buffer for DMA */
"could not allocate data transfer buffer."));
return (DDI_FAILURE);
}
/* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
for (i = 0; i < fis_xferlen; i++) {
"copy from user space failed"));
return (DDI_FAILURE);
}
}
}
if (data_xferlen) {
/* means IOCTL requires DMA */
/* allocate the data transfer buffer */
/* allocate kernel buffer for DMA */
"could not allocate data transfer buffer."));
return (DDI_FAILURE);
}
/* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
for (i = 0; i < data_xferlen; i++) {
"copy from user space failed"));
return (DDI_FAILURE);
}
}
}
} else {
if (fis_xferlen) {
for (i = 0; i < fis_xferlen; i++) {
if (ddi_copyout(
"issue_mfi_stp : copy to "
"user space failed"));
return (DDI_FAILURE);
}
}
}
}
if (data_xferlen) {
for (i = 0; i < data_xferlen; i++) {
if (ddi_copyout(
"issue_mfi_stp : copy to"
" user space failed"));
return (DDI_FAILURE);
}
}
}
if (fis_xferlen) {
/* free kernel buffer */
return (DDI_FAILURE);
}
if (data_xferlen) {
/* free kernel buffer */
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
/*
* fill_up_drv_ver
*/
static void
{
}
/*
* handle_drv_ioctl
*/
static int
int mode)
{
int i;
int rval = DDI_SUCCESS;
void *ubuf;
struct mrsas_dcmd_frame *kdcmd;
struct mrsas_drv_ver dv;
struct mrsas_pci_information pi;
if (model == DDI_MODEL_ILP32) {
"handle_drv_ioctl: DDI_MODEL_ILP32"));
} else {
#ifdef _ILP32
"handle_drv_ioctl: DDI_MODEL_ILP32"));
#else
"handle_drv_ioctl: DDI_MODEL_LP64"));
#endif
}
"MRSAS_DRIVER_IOCTL_DRIVER_VERSION"));
"MRSAS_DRIVER_IOCTL_DRIVER_VERSION : "
"copy to user space failed"));
rval = 1;
} else {
kdcmd->cmd_status = 0;
}
break;
"MRSAS_DRIVER_IOCTL_PCI_INFORMAITON"));
"MRSAS_DRIVER_IOCTL_PCI_INFORMATION : "
"ddi_prop_look_int_array failed"));
rval = DDI_FAILURE;
} else {
ddi_prop_free((void *)props);
}
for (i = 0; i < (sizeof (struct mrsas_pci_information) -
i++) {
pci_conf_buf[i] =
}
"MRSAS_DRIVER_IOCTL_PCI_INFORMATION : "
"copy to user space failed"));
rval = 1;
} else {
kdcmd->cmd_status = 0;
}
break;
default:
"invalid driver specific IOCTL opcode = 0x%x",
rval = DDI_FAILURE;
break;
}
return (rval);
}
/*
* handle_mfi_ioctl
*/
static int
int mode)
{
int rval = DDI_SUCCESS;
struct mrsas_header *hdr;
if (!cmd) {
"failed to get a cmd packet"));
return (DDI_FAILURE);
}
/* Clear the frame buffer and assign back the context id */
case MFI_CMD_OP_DCMD:
break;
case MFI_CMD_OP_SMP:
break;
case MFI_CMD_OP_STP:
break;
case MFI_CMD_OP_LD_SCSI:
case MFI_CMD_OP_PD_SCSI:
break;
default:
rval = DDI_FAILURE;
break;
}
rval = DDI_FAILURE;
return (rval);
}
/*
* AEN
*/
static int
{
int rval = 0;
return (rval);
}
static int
{
int ret_val;
struct mrsas_dcmd_frame *dcmd;
union mrsas_evt_class_locale curr_aen;
union mrsas_evt_class_locale prev_aen;
/*
* If there an AEN pending already (aen_cmd), check if the
* class_locale of that pending AEN is inclusive of the new
* AEN request we currently have. If it is, then we don't have
* to do anything. In other words, whichever events the current
* AEN request is subscribing to, have already been subscribed
* to.
*
* If the old_cmd is _not_ inclusive, then we have to abort
* that command, form a class_locale that is superset of both
* old and current and re-issue to the FW
*/
if (aen_cmd) {
/*
* A class whose enum value is smaller is inclusive of all
* higher values. If a PROGRESS (= -1) was previously
* registered, then a new registration requests for higher
* classes need not be sent to FW. They are automatically
* included.
*
* Locale numbers don't have such hierarchy. They are bitmap
* values
*/
/*
* Previously issued event registration includes
* current request. Nothing to do.
*/
return (0);
} else {
if (ret_val) {
"failed to abort prevous AEN command"));
return (ret_val);
}
}
} else {
}
if (!cmd)
return (ENOMEM);
/* Clear the frame buffer and assign back the context id */
/* for(i = 0; i < DCMD_MBOX_SZ; i++) dcmd->mbox.b[i] = 0; */
sizeof (struct mrsas_evt_detail));
/* Prepare DCMD for aen registration */
sizeof (struct mrsas_evt_detail));
sizeof (struct mrsas_evt_detail));
/*
* Store reference to the cmd used to register for AEN. When an
* application wants us to register for AEN, we have to abort this
* cmd and re-register with a new EVENT LOCALE supplied by that app
*/
/* Issue the aen registration frame */
/* atomic_add_16 (&instance->fw_outstanding, 1); */
return (0);
}
static void
{
#define MAX_SCSI_DEVICE_CODE 14
int i;
char inquiry_buf[256] = {0};
int len;
const char *const scsi_device_types[] = {
"Direct-Access ",
"Sequential-Access",
"Printer ",
"Processor ",
"WORM ",
"CD-ROM ",
"Scanner ",
"Optical Device ",
"Medium Changer ",
"Communications ",
"Unknown ",
"Unknown ",
"Unknown ",
"Enclosure ",
};
len = 0;
for (i = 8; i < 16; i++) {
scsi_inq[i]);
}
for (i = 16; i < 32; i++) {
scsi_inq[i]);
}
for (i = 32; i < 36; i++) {
scsi_inq[i]);
}
i = scsi_inq[0] & 0x1f;
i < MAX_SCSI_DEVICE_CODE ? scsi_device_types[i] :
"Unknown ");
} else {
}
}
static int
{
return ((int)RD_OB_SCRATCH_PAD_0(instance));
}
static void
{
/* Issue the command to the FW */
}
/*
* issue_cmd_in_sync_mode
*/
static int
{
int i;
}
if (i < (msecs -1)) {
return (DDI_SUCCESS);
} else {
return (DDI_FAILURE);
}
}
/*
* issue_cmd_in_poll_mode
*/
static int
{
int i;
struct mrsas_header *frame_hdr;
/* issue the frame using inbound queue port */
/* wait for cmd_status to change from 0xFF */
for (i = 0; i < msecs && (
== MFI_CMD_STATUS_POLL_MODE); i++) {
}
== MFI_CMD_STATUS_POLL_MODE) {
"cmd polling timed out"));
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
static void
{
/* WR_OB_DOORBELL_CLEAR(0xFFFFFFFF, instance); */
/* WR_OB_INTR_MASK(~0x80000000, instance); */
/* dummy read to force PCI flush */
"outbound_intr_mask = 0x%x", mask));
}
static void
{
/* WR_OB_INTR_MASK(0xFFFFFFFF, instance); */
/* dummy read to force PCI flush */
#ifdef lint
#endif
}
static int
{
int ret = DDI_INTR_CLAIMED;
/* check if it is our interrupt */
if (!(status & MFI_REPLY_2108_MESSAGE_INTR)) {
}
}
if (ret == DDI_INTR_UNCLAIMED) {
return (ret);
}
/* clear the interrupt by writing back the same value */
/* dummy READ */
return (ret);
}
static int
{
int ret = DDI_SUCCESS;
DDI_SUCCESS) {
}
ret = DDI_FAILURE;
}
!= DDI_SUCCESS) {
}
ret = DDI_FAILURE;
}
DDI_SUCCESS) {
}
ret = DDI_FAILURE;
}
}
ret = DDI_FAILURE;
}
return (ret);
}
/*ARGSUSED*/
static int
{
/*
* as the driver can always deal with an error in any dma or
* access handle, we can just return the fme_status value.
*/
return (err->fme_status);
}
static void
{
/* Need to change iblock to priority for new MSI intr */
/* Only register with IO Fault Services if we have some capability */
if (instance->fm_capabilities) {
/* Adjust access and dma attributes for FMA */
/*
* Register capabilities with IO Fault Services.
* fm_capabilities will be updated to indicate
* capabilities actually supported (not requested.)
*/
/*
* Initialize pci ereport capabilities if ereport
* capable (should always be.)
*/
}
/*
* Register error callback if error callback capable.
*/
mrsas_fm_error_cb, (void*) instance);
}
} else {
}
}
static void
{
/* Only unregister FMA capabilities if registered */
if (instance->fm_capabilities) {
/*
* Un-register error callback if error callback capable.
*/
}
/*
* Release any resources allocated by pci_ereport_setup()
*/
}
/* Unregister from IO Fault Services */
/* Adjust access and dma attributes for FMA */
}
}
int
{
return (DDI_FAILURE);
}
return (de.fme_status);
}
int
{
return (DDI_FAILURE);
}
return (de.fme_status);
}
void
{
char buf[FM_MAX_CLASS];
}
}
static int
{
intr_type));
/* Get number of interrupts */
return (DDI_FAILURE);
}
/* Get number of available interrupts */
return (DDI_FAILURE);
}
/* Only one interrupt routine. So limit the count to 1 */
if (count > 1) {
count = 1;
}
/*
* Allocate an array of interrupt handlers. Currently we support
* only one interrupt. The framework can be extended later.
*/
/* Allocate interrupt */
"avail = %d", avail));
return (DDI_FAILURE);
}
}
/*
* Get the priority of the interrupt allocated.
*/
"get priority call failed"));
for (i = 0; i < actual; i++) {
}
return (DDI_FAILURE);
}
/*
* Test for high level mutex. we don't support them.
*/
"High level interrupts not supported."));
for (i = 0; i < actual; i++) {
}
return (DDI_FAILURE);
}
/* Call ddi_intr_add_handler() */
for (i = 0; i < actual; i++) {
if (ret != DDI_SUCCESS) {
"failed %d", ret));
for (i = 0; i < actual; i++) {
}
return (DDI_FAILURE);
}
}
ret));
/* Free already allocated intr */
for (i = 0; i < actual; i++) {
(void) ddi_intr_remove_handler(
instance->intr_htable[i]);
}
return (DDI_FAILURE);
}
} else {
"%d", i));
}
}
return (DDI_SUCCESS);
}
static void
{
int i;
/* Disable all interrupts first */
} else {
}
}
/* Remove all the handlers */
}
}
static int
{
struct mrsas_instance *instance;
int config;
int rval;
return (NDI_FAILURE);
}
/* Hold nexus during bus_config */
switch (op) {
case BUS_CONFIG_ONE: {
rval = NDI_FAILURE;
break;
}
ptr++;
rval = NDI_FAILURE;
break;
}
if (lun == 0) {
} else {
rval = NDI_FAILURE;
}
break;
}
case BUS_CONFIG_DRIVER:
case BUS_CONFIG_ALL: {
rval = NDI_SUCCESS;
break;
}
}
if (rval == NDI_SUCCESS) {
}
rval));
return (rval);
}
static int
{
}
rval = NDI_SUCCESS;
return (rval);
}
static int
{
char devbuf[SCSI_MAXNAMELEN];
char *addr;
long num;
/* Parse dev name and address */
addr = "";
for (p = devbuf; *p != '\0'; p++) {
if (*p == '@') {
addr = p + 1;
*p = '\0';
} else if (*p == ':') {
*p = '\0';
break;
}
}
/* Parse target and lun */
if (*p == ',') {
lp = p + 1;
*p = '\0';
break;
}
}
return (DDI_FAILURE); /* Can declare this as constant */
}
}
return (DDI_FAILURE);
}
}
return (DDI_SUCCESS); /* Success case */
}
static int
{
struct scsi_device *sd;
int rval;
if (ldip) {
}
"mrsas_config_ld: Child = %p found t = %d l = %d",
return (NDI_SUCCESS);
}
else
rval = NDI_FAILURE;
/* sd_unprobe is blank now. Free buffer manually */
}
rval));
return (rval);
}
static int
{
char **compatible = NULL;
int ncompatible = 0;
char *childname;
int rval;
rval = NDI_FAILURE;
goto finish;
}
/* Create a dev node */
"mr_sas_config_scsi_device: ndi_devi_alloc rval = %x", rval));
if (rval == NDI_SUCCESS) {
rval = NDI_FAILURE;
goto finish;
}
rval = NDI_FAILURE;
goto finish;
}
rval = NDI_FAILURE;
goto finish;
}
if (rval != NDI_SUCCESS) {
(void) ndi_devi_free(ldip);
} else {
}
}
if (dipp) {
}
"mr_sas: config_scsi_device rval = %d t%dL%d",
return (rval);
}
/*ARGSUSED*/
static int
{
"mrsas_service_evt called for t%dl%d event = %d",
return (ENOMEM);
}
DDI_SUCCESS) {
"mr_sas: Event task failed for t%dl%d event = %d",
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
static void
{
int circ1 = 0;
char *devname;
" tgt %d lun %d event %d",
} else {
return;
}
case MRSAS_EVT_CONFIG_TGT:
0, NULL);
}
"mr_sas: EVT_CONFIG_TGT called:"
" for tgt %d lun %d event %d",
} else {
"mr_sas: EVT_CONFIG_TGT dip != NULL:"
" for tgt %d lun %d event %d",
}
break;
case MRSAS_EVT_UNCONFIG_TGT:
if (dip) {
if (i_ddi_devi_attached(dip)) {
}
"mr_sas: EVT_UNCONFIG_TGT called:"
" for tgt %d lun %d event %d",
} else {
"mr_sas: EVT_UNCONFIG_TGT dip == NULL:"
" for tgt %d lun %d event %d",
}
break;
}
}
static int
{
struct mode_header *modehdrp;
/* ADD pkt statistics as Command failed. */
return (NULL);
}
switch (page_code) {
case 0x3: {
(uchar_t)(sizeof (struct mode_format));
break;
}
case 0x4: {
(uchar_t)(sizeof (struct mode_geometry));
break;
}
default:
break;
}
return (NULL);
}