aac.c revision 58bc78c7a7ad65c04aaa3ef379a396df23988691
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright 2005-08 Adaptec, Inc.
* Copyright (c) 2005-08 Adaptec Inc., Achim Leubner
* Copyright (c) 2000 Michael Smith
* Copyright (c) 2001 Scott Long
* Copyright (c) 2000 BSDi
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/ddidmareq.h>
#include <sys/byteorder.h>
#include "aac_regs.h"
#include "aac.h"
/*
* FMA header files
*/
/*
* For minor nodes created by the SCSA framework, minor numbers are
* formed by left-shifting instance by INST_MINOR_SHIFT and OR in a
* number less than 64.
*
* To support cfgadm, need to confirm the SCSA framework by creating
* and calling scsi_hba_xxx() functions aacordingly.
*/
#define AAC_MINOR 32
#define AAC_SCSA_MINOR(x) ((x) & TRAN_MINOR_MASK)
#define AAC_PD(t) ((t) - AAC_MAX_LD)
((t) < AAC_MAX_DEV(softs)) ? \
if (!(cond)) { \
while (count) { \
drv_usecwait(100); \
if (cond) \
break; \
count--; \
} \
} \
}
#define AAC_SENSE_DATA_DESCR_LEN \
(sizeof (struct scsi_descr_sense_hdr) + \
sizeof (struct scsi_information_sense_descr))
#define AAC_ARQ64_LENGTH \
(sizeof (struct scsi_arq_status) + \
/* NOTE: GETG4ADDRTL(cdbp) is int32_t */
#define AAC_CDB_INQUIRY_CMDDT 0x02
#define AAC_CDB_INQUIRY_EVPD 0x01
#define AAC_VPD_PAGE_CODE 1
#define AAC_VPD_PAGE_LENGTH 3
#define AAC_VPD_PAGE_DATA 4
#define AAC_VPD_ID_CODESET 0
#define AAC_VPD_ID_TYPE 1
#define AAC_VPD_ID_LENGTH 3
#define AAC_VPD_ID_DATA 4
#define AAC_SCSI_RPTLUNS_HEAD_SIZE 0x08
#define AAC_SCSI_RPTLUNS_ADDR_SIZE 0x08
#define AAC_SCSI_RPTLUNS_ADDR_MASK 0xC0
/* 00b - peripheral device addressing method */
#define AAC_SCSI_RPTLUNS_ADDR_PERIPHERAL 0x00
/* 01b - flat space addressing method */
#define AAC_SCSI_RPTLUNS_ADDR_FLAT_SPACE 0x40
/* 10b - logical unit addressing method */
#define AAC_SCSI_RPTLUNS_ADDR_LOGICAL_UNIT 0x80
/* Return the size of FIB with data part type data_type */
#define AAC_FIB_SIZEOF(data_type) \
(sizeof (struct aac_fib_header) + sizeof (data_type))
/* Return the container size defined in mir */
/* The last entry of aac_cards[] is for unknown cards */
#define AAC_UNKNOWN_CARD \
#define CARD_IS_UNKNOWN(i) (i == AAC_UNKNOWN_CARD)
/* Write host data at valp to device mem[off] repeatedly count times */
/* Read device data at mem[off] to host addr valp repeatedly count times */
#define AAC_ENABLE_INTR(softs) { \
else \
}
#define AAC_FWSTATUS_GET(softs) \
#define AAC_THROTTLE_DRAIN -1
/* Poll time for aac_do_poll_io() */
/* IOP reset */
#define AAC_IOP_RESET_SUCCEED 0 /* IOP reset succeed */
/*
* Hardware access functions
*/
static int aac_rx_get_fwstatus(struct aac_softstate *);
static int aac_rx_get_mailbox(struct aac_softstate *, int);
static int aac_rkt_get_fwstatus(struct aac_softstate *);
static int aac_rkt_get_mailbox(struct aac_softstate *, int);
/*
* SCSA function prototypes
*/
/*
* Interrupt handler functions
*/
static int aac_query_intrs(struct aac_softstate *, int);
static int aac_add_intrs(struct aac_softstate *);
static void aac_remove_intrs(struct aac_softstate *);
/*
* Internal functions in attach
*/
static int aac_check_card_type(struct aac_softstate *);
static int aac_check_firmware(struct aac_softstate *);
static int aac_common_attach(struct aac_softstate *);
static void aac_common_detach(struct aac_softstate *);
static int aac_probe_containers(struct aac_softstate *);
static int aac_alloc_comm_space(struct aac_softstate *);
static int aac_setup_comm_space(struct aac_softstate *);
static void aac_free_comm_space(struct aac_softstate *);
static int aac_hba_setup(struct aac_softstate *);
/*
* Sync FIB operation functions
*/
/*
* Command queue operation functions
*/
static void aac_cmd_initq(struct aac_cmd_queue *);
/*
* FIB queue operation functions
*/
static int aac_fib_dequeue(struct aac_softstate *, int, int *);
/*
* Slot operation functions
*/
static int aac_create_slots(struct aac_softstate *);
static void aac_destroy_slots(struct aac_softstate *);
static void aac_alloc_fibs(struct aac_softstate *);
static void aac_destroy_fibs(struct aac_softstate *);
static void aac_free_fib(struct aac_slot *);
/*
* Internal functions
*/
static void aac_start_waiting_io(struct aac_softstate *);
static void aac_drain_comp_q(struct aac_softstate *);
static int aac_dma_sync_ac(struct aac_cmd *);
static int aac_shutdown(struct aac_softstate *);
static int aac_reset_adapter(struct aac_softstate *);
static void aac_unhold_bus(struct aac_softstate *, int);
int, int);
/*
* Adapter Initiated FIB handling function
*/
/*
* Timeout handling thread function
*/
static void aac_daemon(void *);
/*
* IOCTL interface related functions
*/
/*
* FMA Prototypes
*/
static void aac_fm_init(struct aac_softstate *);
static void aac_fm_fini(struct aac_softstate *);
void aac_fm_ereport(struct aac_softstate *, char *);
/*
* Auto enumeration functions
*/
void *, dev_info_t **);
static int aac_dr_event(struct aac_softstate *, int, int, int);
#ifdef DEBUG
/*
* UART debug output support
*/
#define AAC_PRINT_BUFFER_SIZE 512
#define AAC_FW_DBG_STRLEN_OFFSET 0x00
#define AAC_FW_DBG_FLAGS_OFFSET 0x04
#define AAC_FW_DBG_BLED_OFFSET 0x08
static int aac_get_fw_debug_buffer(struct aac_softstate *);
static char aac_prt_buf[AAC_PRINT_BUFFER_SIZE];
static char aac_fmt[] = " %s";
static char aac_fmt_header[] = " %s.%d: %s";
static kmutex_t aac_prt_mutex;
/*
* Debug flags to be put into the softstate flags field
* when initialized
*/
/* AACDB_FLAGS_KERNEL_PRINT | */
/* AACDB_FLAGS_FW_PRINT | */
/* AACDB_FLAGS_MISC | */
/* AACDB_FLAGS_FUNC1 | */
/* AACDB_FLAGS_FUNC2 | */
/* AACDB_FLAGS_SCMD | */
/* AACDB_FLAGS_AIF | */
/* AACDB_FLAGS_FIB | */
/* AACDB_FLAGS_IOCTL | */
0;
/* AACDB_FLAGS_FIB_RW | */
/* AACDB_FLAGS_FIB_IOCTL | */
/* AACDB_FLAGS_FIB_SRB | */
/* AACDB_FLAGS_FIB_SYNC | */
/* AACDB_FLAGS_FIB_HEADER | */
/* AACDB_FLAGS_FIB_TIMEOUT | */
0;
#endif /* DEBUG */
static struct cb_ops aac_cb_ops = {
aac_open, /* open */
aac_close, /* close */
nodev, /* strategy */
nodev, /* print */
nodev, /* dump */
nodev, /* read */
nodev, /* write */
aac_ioctl, /* ioctl */
nodev, /* devmap */
nodev, /* mmap */
nodev, /* segmap */
nochpoll, /* poll */
ddi_prop_op, /* cb_prop_op */
NULL, /* streamtab */
CB_REV, /* cb_rev */
nodev, /* async I/O read entry point */
nodev /* async I/O write entry point */
};
static struct dev_ops aac_dev_ops = {
0,
NULL,
};
static struct modldrv aac_modldrv = {
"AAC Driver " AAC_DRIVER_VERSION,
};
static struct modlinkage aac_modlinkage = {
};
static struct aac_softstate *aac_softstatep;
/*
* Supported card list
* ordered in vendor id, subvendor id, subdevice id, and device id
*/
static struct aac_card_type aac_cards[] = {
"Dell", "PERC 3/Di"},
"Dell", "PERC 3/Di"},
"Dell", "PERC 3/Si"},
"Dell", "PERC 3/Di"},
"Dell", "PERC 3/Si"},
"Dell", "PERC 3/Di"},
"Dell", "PERC 3/Di"},
"Dell", "PERC 3/Di"},
"Dell", "PERC 3/Di"},
"Dell", "PERC 3/Di"},
"Dell", "PERC 320/DC"},
"Adaptec", "2200S"},
"Adaptec", "2120S"},
"Adaptec", "2200S"},
{0, 0, 0, 0, AAC_HWIF_UNKNOWN,
};
/*
* Hardware access functions for i960 based cards
*/
static struct aac_interface aac_rx_interface = {
};
/*
* Hardware access functions for Rocket based cards
*/
static struct aac_interface aac_rkt_interface = {
};
};
static struct {
int size;
int notify;
} aac_qinfo[] = {
};
/*
* Default aac dma attributes
*/
static ddi_dma_attr_t aac_dma_attr = {
0, /* lowest usable address */
0xffffffffull, /* high DMA address range */
0xffffffffull, /* DMA counter register */
AAC_DMA_ALIGN, /* DMA address alignment */
1, /* DMA burstsizes */
1, /* min effective DMA size */
0xffffffffull, /* max DMA xfer size */
0xffffffffull, /* segment boundary */
1, /* s/g list length */
AAC_BLK_SIZE, /* granularity of device */
0 /* DMA transfer flags */
};
struct aac_drinfo {
struct aac_softstate *softs;
int tgt;
int lun;
int event;
};
/*
* Warlock directives
*
* Different variables with the same types have to be protected by the
* same mutex; otherwise, warlock will complain with "variables don't
* seem to be protected consistently". For example,
* aac_softstate::{q_wait, q_comp} are type of aac_cmd_queue, and protected
* by aac_softstate::{io_lock, q_comp_mutex} respectively. We have to
* declare them as protected explictly at aac_cmd_dequeue().
*/
aac_sge))
int
_init(void)
{
int rval = 0;
#ifdef DEBUG
#endif
sizeof (struct aac_softstate), 0)) != 0)
goto error;
ddi_soft_state_fini((void *)&aac_softstatep);
goto error;
}
ddi_soft_state_fini((void *)&aac_softstatep);
goto error;
}
return (rval);
#ifdef DEBUG
#endif
return (rval);
}
int
{
}
/*
* An HBA driver cannot be unload unless you reboot,
* so this function will be of no use.
*/
int
_fini(void)
{
int rval;
goto error;
ddi_soft_state_fini((void *)&aac_softstatep);
#ifdef DEBUG
#endif
return (0);
return (rval);
}
static int
{
int instance, i;
int attach_state = 0;
char *data;
int intr_types;
switch (cmd) {
case DDI_ATTACH:
break;
case DDI_RESUME:
return (DDI_FAILURE);
default:
return (DDI_FAILURE);
}
/* Get soft state */
goto error;
}
#ifdef DEBUG
#endif
/* Initialize FMA */
/* Check the card type */
goto error;
}
/* We have found the right card and everything is OK */
/* Map PCI mem space */
goto error;
/* Get the type of device intrrupts */
"ddi_intr_get_supported_types() failed");
goto error;
}
"ddi_intr_get_supported_types() ret: 0x%x", intr_types);
if (intr_types & DDI_INTR_TYPE_MSI) {
!= DDI_SUCCESS) {
"MSI interrupt query failed");
goto error;
}
} else if (intr_types & DDI_INTR_TYPE_FIXED) {
!= DDI_SUCCESS) {
"FIXED interrupt query failed");
goto error;
}
} else {
"Device cannot suppport both FIXED and MSI interrupts");
goto error;
}
/* Init mutexes */
/* Check for legacy device naming support */
}
}
/*
* Everything has been set up till now,
* we will do some common attach.
*/
goto error;
/* Init the cmd queues */
for (i = 0; i < AAC_CMDQ_NUM; i++)
goto error;
/* Connect interrupt handlers */
"Can not setup soft interrupt handler!");
goto error;
}
"Interrupt registration failed, intr type: %s",
goto error;
}
goto error;
}
DDI_NT_SCSI_ATTACHMENT_POINT, 0) != DDI_SUCCESS) {
goto error;
}
/* Create aac node for app. to issue ioctls */
DDI_PSEUDO, 0) != DDI_SUCCESS) {
goto error;
}
/* Create a taskq for dealing with dr events */
TASKQ_DEFAULTPRI, 0)) == NULL) {
goto error;
}
/* Create a thread for command timeout */
/* Common attach is OK, so we are attached! */
return (DDI_SUCCESS);
if (attach_state & AAC_ATTACH_SCSI_TRAN_SETUP) {
(void) scsi_hba_detach(dip);
}
if (attach_state & AAC_ATTACH_KMUTEX_INITED) {
}
return (DDI_FAILURE);
}
static int
{
switch (cmd) {
case DDI_DETACH:
break;
case DDI_SUSPEND:
return (DDI_FAILURE);
default:
return (DDI_FAILURE);
}
softs->timeout_id = 0;
(void) scsi_hba_detach(dip);
return (DDI_SUCCESS);
}
/*ARGSUSED*/
static int
{
(void) aac_shutdown(softs);
return (DDI_SUCCESS);
}
/*
* Bring the controller down to a dormant state and detach all child devices.
* This function is called before detach or system shutdown.
* Note: we can assume that the q_wait on the controller is empty, as we
* won't allow shutdown if any device is open.
*/
static int
{
int rval;
/* Flush all caches, set FW to write through mode */
AAC_FIB_SIZEOF(struct aac_close_command));
return (rval);
}
static uint_t
{
return (DDI_INTR_CLAIMED);
} else {
return (DDI_INTR_UNCLAIMED);
}
}
/*
* Setup auto sense data for pkt
*/
static void
{
arqstat->sts_rqpkt_resid = 0;
arqstat->sts_rqpkt_statistics = 0;
if (info <= 0xfffffffful) {
} else { /* 64-bit LBA */
struct scsi_descr_sense_hdr *dsp;
struct scsi_information_sense_descr *isd;
sizeof (struct scsi_information_sense_descr);
}
}
/*
* Setup auto sense data for HARDWARE ERROR
*/
static void
{
}
/*
* Setup auto sense data for UNIT ATTENTION
*/
/*ARGSUSED*/
static void
{
}
}
/*
* Send a command to the adapter in New Comm. interface
*/
static int
{
if (index == 0xffffffffUL) {
if (index == 0xffffffffUL)
return (AACERR);
}
device += 4;
device += 4;
return (AACOK);
}
static void
{
if (dvp) {
softs->total_slots);
}
} else { /* cmd in waiting queue */
}
}
}
static void
{
index >>= 2;
/* Make sure firmware reported index is valid */
"Firmware error: invalid slot index received from FW");
return;
}
/*
* For fast response IO, the firmware do not return any FIB
* data, so we need to fill in the FIB status and state so that
* FIB users can handle it correctly.
*/
if (fast) {
/*
* Update state for CPU not for device, no DMA sync
* needed
*/
}
/* Handle completed ac */
} else {
}
}
}
/*
* Interrupt handler for New Comm. interface
* New Comm. interface use a different mechanism for interrupt. No explict
* message queues, and driver need only accesses the mapped PCI mem space to
* find the completed FIB or AIF.
*/
static int
{
if (index == 0xfffffffful)
return (0);
}
if (index != 0xfffffffful) {
do {
if ((index & AAC_SENDERADDR_MASK_AIF) == 0) {
} else if (index != 0xfffffffeul) {
/*
* 0xfffffffe means that the controller wants
* more work, ignore it for now. Otherwise,
* AIF received.
*/
index &= ~2;
/*
* Copy AIF from adapter to the empty AIF slot
*/
fib_size);
if (aac_check_acc_handle(softs-> \
else
/*
* AIF memory is owned by the adapter, so let it
* know that we are done with it.
*/
}
} while (index != 0xfffffffful);
/*
* Process waiting cmds before start new ones to
* ensure first IOs are serviced first.
*/
return (AAC_DB_COMMAND_READY);
} else {
return (0);
}
}
static uint_t
{
if (aac_process_intr_new(softs))
else
return (rval);
}
/*
* Interrupt handler for old interface
* Explicit message queues are used to send FIB to and get completed FIB from
* manner. The driver has to query the queues to find the completed FIB.
*/
static int
{
return (DDI_INTR_UNCLAIMED);
}
if (status & AAC_DB_RESPONSE_READY) {
int slot_idx;
/* ACK the intr */
(void) AAC_STATUS_GET(softs);
/*
* Process waiting cmds before start new ones to
* ensure first IOs are serviced first.
*/
return (AAC_DB_RESPONSE_READY);
} else if (status & AAC_DB_COMMAND_READY) {
int aif_idx;
(void) AAC_STATUS_GET(softs);
AACOK) {
offsetof(struct aac_comm_space, \
(type)); }
/* Copy AIF from adapter to the empty AIF slot */
/* Complete AIF back to adapter with good status */
if (fib_xfer_state & AAC_FIBSTATE_FROMADAP) {
if (fib_size0 > AAC_FIB_SIZE)
}
/* Put the AIF response on the response queue */
}
return (AAC_DB_COMMAND_READY);
} else if (status & AAC_DB_PRINTF_READY) {
/* ACK the intr */
(void) AAC_STATUS_GET(softs);
else
return (AAC_DB_PRINTF_READY);
} else if (status & AAC_DB_COMMAND_NOT_FULL) {
/*
* Without these two condition statements, the OS could hang
* after a while, especially if there are a lot of AIF's to
* handle, for instance if a drive is pulled from an array
* under heavy load.
*/
return (AAC_DB_COMMAND_NOT_FULL);
} else if (status & AAC_DB_RESPONSE_NOT_FULL) {
return (AAC_DB_RESPONSE_NOT_FULL);
} else {
return (0);
}
}
static uint_t
{
int rval;
if (aac_process_intr_old(softs))
else
return (rval);
}
/*
* Query FIXED or MSI interrupts
*/
static int
{
"aac_query_intrs:interrupt type 0x%x", intr_type);
/* Get number of interrupts */
"ddi_intr_get_nintrs() failed, ret %d count %d",
return (DDI_FAILURE);
}
/* Get number of available interrupts */
"ddi_intr_get_navail() failed, ret %d avail %d",
return (DDI_FAILURE);
}
"ddi_intr_get_nvail returned %d, navail() returned %d",
/* Allocate an array of interrupt handles */
if (intr_type == DDI_INTR_TYPE_MSI) {
} else { /* must be DDI_INTR_TYPE_FIXED */
}
/* Call ddi_intr_alloc() */
"ddi_intr_alloc() failed, ret = %d", ret);
actual = 0;
goto error;
}
goto error;
}
/* Get priority for first msi, assume remaining are all the same */
"ddi_intr_get_pri() failed, ret = %d", ret);
goto error;
}
/* Test for high level mutex */
"aac_query_intrs: Hi level interrupt not supported");
goto error;
}
return (DDI_SUCCESS);
/* Free already allocated intr */
for (i = 0; i < actual; i++)
return (DDI_FAILURE);
}
/*
* Register FIXED or MSI interrupts, and enable them
*/
static int
{
int i, ret;
/* Call ddi_intr_add_handler() */
for (i = 0; i < actual; i++) {
"ddi_intr_add_handler() failed ret = %d", ret);
/* Free already allocated intr */
for (i = 0; i < actual; i++)
return (DDI_FAILURE);
}
}
!= DDI_SUCCESS) {
/* Free already allocated intr */
for (i = 0; i < actual; i++)
return (DDI_FAILURE);
}
/* Enable interrupts */
/* for MSI block enable */
} else {
}
return (DDI_SUCCESS);
}
/*
* Unregister FIXED or MSI interrupts
*/
static void
{
int i;
/* Disable all interrupts */
/* Call ddi_intr_block_disable() */
} else {
}
/* Call ddi_intr_remove_handler() */
}
}
/*
* Set pkt_reason and OR in pkt_statistics flag
*/
static void
{
#ifndef __lock_lint
#endif
}
/*
* Handle a finished pkt of soft SCMD
*/
static void
{
/* AAC_CMD_NO_INTR means no complete callback */
}
}
/*
* Handlers for completed IOs, common to aac_intr_new() and aac_intr_old()
*/
/*
* Handle completed logical device IO command
*/
/*ARGSUSED*/
static void
{
struct aac_blockread_response *resp;
/*
* block_read/write has a similar response header, use blockread
* response for both.
*/
} else {
}
}
/*
* Handle completed phys. device IO command
*/
static void
{
struct aac_srb_reply *resp;
/* First check FIB status */
if (resp_status == ST_OK) {
if (scsi_status == STATUS_GOOD) {
/* Next check SRB status */
switch (srb_status & 0x3f) {
case SRB_STATUS_DATA_OVERRUN:
"scmd=%d, xfer=%d, buflen=%d",
switch (cmd) {
case SCMD_READ:
case SCMD_WRITE:
case SCMD_READ_G1:
case SCMD_WRITE_G1:
case SCMD_READ_G4:
case SCMD_WRITE_G4:
case SCMD_READ_G5:
case SCMD_WRITE_G5:
CMD_DATA_OVR, 0);
break;
}
/*FALLTHRU*/
case SRB_STATUS_PENDING:
case SRB_STATUS_SUCCESS:
/*
* pkt_resid should only be calculated if the
* status is ERROR_RECOVERY/PENDING/SUCCESS/
*/
if (data_xfer_length) {
}
break;
case SRB_STATUS_ABORTED:
"SRB_STATUS_ABORTED, xfer=%d, resid=%d",
break;
case SRB_STATUS_ABORT_FAILED:
"SRB_STATUS_ABORT_FAILED, xfer=%d, " \
"resid=%d", data_xfer_length,
0);
break;
case SRB_STATUS_PARITY_ERROR:
"SRB_STATUS_PARITY_ERROR, xfer=%d, " \
"resid=%d", data_xfer_length,
break;
case SRB_STATUS_NO_DEVICE:
case SRB_STATUS_INVALID_LUN:
#ifdef DEBUG
"SRB_STATUS_NO_DEVICE(%d), " \
"xfer=%d, resid=%d ",
srb_status & 0x3f,
}
#endif
break;
case SRB_STATUS_TIMEOUT:
"SRB_STATUS_COMMAND_TIMEOUT, xfer=%d, " \
"resid=%d", data_xfer_length,
break;
case SRB_STATUS_BUS_RESET:
"SRB_STATUS_BUS_RESET, xfer=%d, " \
"resid=%d", data_xfer_length,
break;
default:
break;
}
} else if (scsi_status == STATUS_CHECK) {
/* CHECK CONDITION */
struct scsi_arq_status *arqstat =
arqstat->sts_rqpkt_resid = 0;
arqstat->sts_rqpkt_statistics = 0;
&resp->sense_data_size);
"CHECK CONDITION: sense len=%d, xfer len=%d",
if (sense_data_size > SENSE_LENGTH)
} else {
"scsi_status=%d, srb_status=%d",
}
} else {
}
}
/*
* Handle completed IOCTL command
*/
/*ARGSUSED*/
void
{
/*
* NOTE: Both aac_ioctl_send_fib() and aac_send_raw_srb()
* may wait on softs->event, so use cv_broadcast() instead
* of cv_signal().
*/
/* Get the size of the response FIB from its FIB.Header.Size field */
}
/*
* Handle completed Flush command
*/
/*ARGSUSED*/
static void
{
struct aac_synchronize_reply *resp;
}
/*
* Access PCI space to see if the driver can support the card
*/
static int
{
int card_index;
/* Map pci configuration space */
DDI_SUCCESS) {
return (AACERR);
}
card_index = 0;
while (!CARD_IS_UNKNOWN(card_index)) {
break;
}
card_index++;
}
/*
* Unknown aac card
* do a generic match based on the VendorID and DeviceID to
* support the new cards in the aac family
*/
if (CARD_IS_UNKNOWN(card_index)) {
goto error;
}
case 0x285:
break;
case 0x286:
break;
default:
goto error;
}
}
/* Set hardware dependent interface */
case AAC_HWIF_I960RX:
break;
case AAC_HWIF_RKT:
break;
default:
goto error;
}
/* Set card names */
/* Set up quirks */
/* Force the busmaster enable bit on */
if ((pci_cmd & PCI_COMM_ME) == 0) {
pci_cmd |= PCI_COMM_ME;
if ((pci_cmd & PCI_COMM_ME) == 0) {
goto error;
}
}
/* Set memory base to map */
return (AACOK); /* card type detected */
return (AACERR); /* no matched card found */
}
/*
* Check the firmware to determine the features to support and the FIB
* parameters to use.
*/
static int
{
/* Get supported options */
if (status != SRB_STATUS_INVALID_REQUEST) {
"?Fatal error: request adapter info error");
return (AACERR);
}
options = 0;
atu_size = 0;
} else {
}
return (AACOK);
"?Fatal error: firmware changed, system needs reboot");
return (AACERR);
}
/*
* The following critical settings are initialized only once during
* driver attachment.
*/
/* Process supported options */
if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 &&
} else {
/*
* Quirk AAC_FLAGS_NO4GB is for FIB address and thus comm space
* only. IO is handled by the DMA engine which does not suffer
* from the ATU window programming workarounds necessary for
* CPU copy operations.
*/
}
if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0) {
}
if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE) {
}
if (options & AAC_SUPPORTED_NONDASD) {
"!Enable Non-DASD access");
}
}
}
/* Read preferred settings */
max_fib_size = 0;
}
/* Enable new comm. and rawio at the same time */
(max_fib_size != 0)) {
/* read out and save PCI MBR */
&pci_handle) == DDI_SUCCESS)) {
}
"!Enable New Comm. interface");
}
}
/* Set FIB parameters */
} else {
sizeof (struct aac_blockwrite64) +
sizeof (struct aac_sg_entry64)) /
sizeof (struct aac_sg_entry64);
else
sizeof (struct aac_blockwrite) +
sizeof (struct aac_sg_entry)) /
sizeof (struct aac_sg_entry);
}
}
/*
* 64K maximum segment size in scatter gather list is controlled by
* the NEW_COMM bit in the adapter information. If not set, the card
* can only accept a maximum of 64K. It is not recommended to permit
* more than 128KB of total transfer size to the adapters because
* performance is negatively impacted.
*
* For new comm, segment size equals max xfer size. For old comm,
* we use 64K for both.
*/
/* Setup FIB operations */
else
/* 64-bit LBA needs descriptor format sense data */
"!max_fibs %d max_fibsize 0x%x max_sectors %d max_sg %d",
return (AACOK);
}
static void
{
}
/*
* The following function comes from Adaptec:
*
* Query adapter information and supplement adapter information
*/
static int
{
struct aac_adapter_info *ainfp;
struct aac_supplement_adapter_info *sinfp;
sizeof (struct aac_fib_header)) != AACOK) {
return (AACERR);
}
if (ainfr) {
&ainfr->KernelRevision);
&ainfr->MonitorRevision);
&ainfr->BIOSRevision);
}
if (sinfr) {
if (!(softs->support_opt &
"SupplementAdapterInfo not supported");
return (AACERR);
}
sizeof (struct aac_fib_header)) != AACOK) {
"RequestSupplementAdapterInfo failed");
return (AACERR);
}
sizeof (struct vpd_info));
}
return (AACOK);
}
static int
{
struct aac_ctcfg_resp *c_resp;
struct aac_bus_info *cmd;
struct aac_bus_info_response *resp;
int rval;
/* Detect MethodId */
AAC_FIB_SIZEOF(struct aac_ctcfg));
"VM_ContainerConfig command fail");
return (AACERR);
}
/* Detect phys. bus count and max. target id first */
/*
* For VM_Ioctl, the firmware uses the Header.Size filled from the
* driver as the size to be returned. Therefore the driver has to use
* sizeof (struct aac_bus_info_response) because it is greater than
* sizeof (struct aac_bus_info).
*/
AAC_FIB_SIZEOF(struct aac_bus_info_response));
/* Scan all coordinates with INQUIRY */
return (AACERR);
}
return (AACOK);
}
/*
* The following function comes from Adaptec:
*
* Routine to be called during initialization of communications with
* the adapter to handle possible adapter configuration issues. When
* the adapter first boots up, it examines attached drives, etc, and
* potentially comes up with a new or revised configuration (relative to
* what's stored in it's NVRAM). Additionally it may discover problems
* that make the current physical configuration unworkable (currently
* applicable only to cluster configuration issues).
*
* If there are no configuration issues or the issues are considered
* trival by the adapter, it will set it's configuration status to
* "FSACT_CONTINUE" and execute the "commit confiuguration" action
* automatically on it's own.
*
* However, if there are non-trivial issues, the adapter will set it's
* internal configuration status to "FSACT_PAUSE" or "FASCT_ABORT"
* and wait for some agent on the host to issue the "\ContainerCommand
* \VM_ContainerConfig\CT_COMMIT_CONFIG" FIB command to cause the
* un-inhibited operation. The host agent should first issue the
* "\ContainerCommand\VM_ContainerConfig\CT_GET_CONFIG_STATUS" FIB
* command to obtain information about config issues detected by
* the adapter.
*
* Normally the adapter's PC BIOS will execute on the host following
* adapter poweron and reset and will be responsible for querring the
* adapter with CT_GET_CONFIG_STATUS and issuing the CT_COMMIT_CONFIG
* command if appropriate.
*
* However, with the introduction of IOP reset support, the adapter may
* boot up without the benefit of the adapter's PC BIOS host agent.
* This routine is intended to take care of these issues in situations
* where BIOS doesn't execute following adapter poweron or reset. The
* CT_COMMIT_CONFIG command is a no-op if it's already been issued, so
* there is no harm in doing this when it's already been done.
*/
static int
{
struct aac_Container *cmd;
struct aac_Container_resp *resp;
struct aac_cf_status_header *cfg_sts_hdr;
int rval;
/* Get adapter config status */
sizeof (struct aac_cf_status_header));
AAC_FIB_SIZEOF(struct aac_Container));
/* Commit configuration if it's reasonable to do so. */
if (cfg_stat_action <= CFACT_PAUSE) {
AAC_FIB_SIZEOF(struct aac_Container));
/* Successful completion */
else
/* Auto-commit aborted due to error(s). */
} else {
/*
* Auto-commit aborted due to adapter indicating
* configuration issue(s) too dangerous to auto-commit.
*/
}
} else {
}
return (rval);
}
/*
* Hardware initialization and resource allocation
*/
static int
{
int i;
/*
* Do a little check here to make sure there aren't any outstanding
* FIBs in the message queue. At this point there should not be and
* if there are they are probably left over from another instance of
* the driver like when the system crashes and the crash dump driver
* gets loaded.
*/
;
/*
* Wait the card to complete booting up before do anything that
* attempts to communicate with it.
*/
goto error;
if (i == 0) {
goto error;
}
/* Read and set card supported options and settings */
goto error;
}
/* Clear out all interrupts */
AAC_STATUS_CLR(softs, ~0);
/* Setup communication space with the card */
goto error;
}
goto error;
}
#ifdef DEBUG
#endif
/* Allocate slots */
goto error;
}
/* Allocate FIBs */
if (softs->total_fibs == 0)
goto error;
softs->total_fibs);
}
/* Get adapter names */
struct aac_supplement_adapter_info sinf;
} else {
/*
* Now find the controller name in supp_adapter_info->
* AdapterTypeText. Use the first word as the vendor
* and the other words as the product name.
*/
p = sinf.AdapterTypeText;
/* Skip heading spaces */
while (*p && (*p == ' ' || *p == '\t'))
p++;
p0 = p;
while (*p && (*p != ' ' && *p != '\t'))
p++;
/* Remove middle spaces */
while (*p && (*p == ' ' || *p == '\t'))
*p++ = 0;
p1 = p;
/* Remove trailing spaces */
*p-- = 0;
} else {
"?adapter name mis-formatted\n");
if (*p0)
}
}
}
"!aac driver %d.%02d.%02d-%d, found card: " \
"%s %s(pci0x%x.%x.%x.%x) at 0x%x",
/* Perform acceptance of adapter-detected config changes if possible */
goto error;
}
/* Setup containers (logical devices) */
goto error;
}
/* Setup phys. devices */
int index;
goto error;
}
"?Fatal error: bus map changed");
goto error;
}
sizeof (struct aac_nondasd));
}
sizeof (struct aac_nondasd), KM_SLEEP);
index = 0;
struct aac_nondasd *dvp =
}
}
}
}
/* Check dma & acc handles allocated in attach */
goto error;
}
goto error;
}
for (i = 0; i < softs->total_slots; i++) {
DDI_SUCCESS) {
goto error;
}
}
return (AACOK);
return (AACERR);
sizeof (struct aac_nondasd));
}
if (softs->total_fibs > 0)
if (softs->total_slots > 0)
if (softs->comm_space_dma_handle)
return (AACERR);
}
/*
* Hardware shutdown and resource release
*/
static void
{
(void) aac_shutdown(softs);
sizeof (struct aac_nondasd));
}
}
/*
* Send a synchronous command to the controller and wait for a result.
* Indicate if the controller completed the command with an error status.
*/
int
{
int timeout;
/* Fill in mailbox */
/* Ensure the sync command doorbell flag is cleared */
/* Then set it to signal the adapter */
/* Spin waiting for the command to complete */
if (!timeout) {
"Sync command timed out after %d seconds (0x%x)!",
return (AACERR);
}
/* Clear the completion flag */
/* Get the command status */
if (status != SRB_STATUS_SUCCESS) {
"Sync command fail: status = 0x%x", status);
return (AACERR);
}
return (AACOK);
}
/*
* Send a synchronous FIB to the adapter and wait for its completion
*/
static int
{
int rval;
/* Sync fib only supports 512 bytes */
if (fibsize > AAC_FIB_SIZE)
return (AACERR);
/*
* Setup sync fib
* Need not reinitialize FIB header if it's already been filled
* by others like aac_cmd_fib_scsi as aac_cmd.
*/
/* Give the FIB to the controller, wait for a response. */
"Send sync fib to controller failed");
return (AACERR);
}
return (AACERR);
}
return (AACOK);
}
static void
aac_cmd_initq(struct aac_cmd_queue *q)
{
}
/*
* Remove a cmd from the head of q
*/
static struct aac_cmd *
aac_cmd_dequeue(struct aac_cmd_queue *q)
{
_NOTE(ASSUMING_PROTECTED(*q))
else
}
return (acp);
}
/*
* Add a cmd to the tail of q
*/
static void
{
}
/*
* Remove the cmd ac from q
*/
static void
{
} else {
}
}
/* ac is not in the queue */
}
/*
* Atomically insert an entry into the nominated queue, returns 0 on success or
* AACERR if the queue is full.
*
* Note: it would be more efficient to defer notifying the controller in
* the case where we may be inserting several entries in rapid succession,
* but implementing this usefully may be difficult (it would involve a
*/
static int
{
return (AACERR);
}
/*
* Wrap the queue first before we check the queue to see
* if it is full
*/
pi = 0;
/* XXX queue full */
return (AACERR);
/* Fill in queue entry */
/* Update producer index */
pi + 1);
(void) ddi_dma_sync(dma,
return (AACOK);
}
/*
* Atomically remove one entry from the nominated queue, returns 0 on
* success or AACERR if the queue is empty.
*/
static int
{
int unfull = 0;
/* Check for queue empty */
return (AACERR);
pi = 0;
/* Check for queue full */
unfull = 1;
/*
* The controller does not wrap the queue,
* so we have to do it by ourselves
*/
ci = 0;
/* Fetch the entry */
return (AACERR);
}
switch (queue) {
case AAC_HOST_NORM_RESP_Q:
case AAC_HOST_HIGH_RESP_Q:
break;
case AAC_HOST_NORM_CMD_Q:
case AAC_HOST_HIGH_CMD_Q:
break;
default:
return (AACERR);
}
/* Update consumer index */
ci + 1);
(void) ddi_dma_sync(dma,
return (AACOK);
}
/*
* Request information of the container cid
*/
static struct aac_mntinforesp *
{
struct aac_mntinforesp *mir;
return (NULL);
}
return (mir);
return (NULL);
}
static int
{
struct aac_mntinforesp *mir;
return (AACERR);
if (*count > AAC_MAX_LD) {
"container count(%d) > AAC_MAX_LD", *count);
return (AACERR);
}
return (AACOK);
}
static int
{
return (AACERR);
return (AACERR);
return (AACOK);
}
static int
{
struct aac_mntinforesp *mir;
/* Get container basic info */
return (AACERR);
">>> Container %d deleted", cid);
}
} else {
/* Get container UID */
"query container %d uid failed", cid);
return (AACERR);
}
">>> Container %u uid changed to %d",
}
">>> Container %u size changed to %"PRIu64,
}
} else { /* Init new container */
">>> Container %d added: " \
"size=0x%x.%08x, type=%d, name=%s",
cid,
}
}
return (AACOK);
}
/*
* Do a rescan of all the possible containers and update the container list
*/
static int
{
/* Loop over possible containers */
return (AACERR);
total++;
}
struct aac_container *dvp;
continue;
}
}
return (AACOK);
}
static int
{
/* Allocate DMA for comm. space */
if (ddi_dma_alloc_handle(
NULL,
"Cannot alloc dma handle for communication area");
goto error;
}
if (ddi_dma_mem_alloc(
sizeof (struct aac_comm_space),
NULL,
&rlen,
"Cannot alloc mem for communication area");
goto error;
}
NULL,
sizeof (struct aac_comm_space),
NULL,
&cookie,
&cookien) != DDI_DMA_MAPPED) {
"DMA bind failed for communication area");
goto error;
}
/* Setup sync FIB space */
return (AACOK);
if (softs->comm_space_acc_handle) {
}
if (softs->comm_space_dma_handle) {
}
return (AACERR);
}
static void
{
}
/*
* Initialize the data structures that are required for the communication
* interface to operate
*/
static int
{
struct aac_adapter_init *initp;
int qoffset;
/* Setup adapter init struct */
/* Setup the preferred settings */
} else {
/*
* Tells the adapter about the physical location of various
* important shared data structures
*/
qoffset = (comm_space_phyaddr + \
if (qoffset)
/* Init queue table */
/* Init queue entries */
}
/* Send init structure to the card */
"Cannot send init structure to adapter");
return (AACERR);
}
return (AACOK);
}
static uchar_t *
{
return (buf + AAC_VENDOR_LEN);
}
static uchar_t *
{
return (buf + AAC_PRODUCT_LEN);
}
/*
* Construct unit serial number from container uid
*/
static uchar_t *
{
int i, d;
for (i = 7; i >= 0; i--) {
d = uid & 0xf;
uid >>= 4;
}
return (buf + 8);
}
/*
* SPC-3 7.5 INQUIRY command implementation
*/
static void
{
/* Command Support Data is not supported */
return;
}
}
/* SPC-3 8.4 Vital product data parameters */
switch (page) {
case 0x00:
/* Supported VPD pages */
return;
break;
case 0x80:
/* Unit serial number page */
return;
break;
case 0x83:
/* Device identification page */
return;
/*
* SPC-3 Table 111 - Identifier type
* One recommanded method of constructing the remainder
* of identifier field is to concatenate the product
* identification field from the standard INQUIRY data
* field and the product serial number field from the
* unit serial number page.
*/
break;
default:
0x24, 0x00, 0);
break;
}
} else {
if (page != 0) {
0x24, 0x00, 0);
return;
}
return;
/*
* For "sd-max-xfer-size" property which may impact performance
* when IO threads increase.
*/
}
}
/*
* SPC-3 7.10 MODE SENSE command implementation
*/
static void
{
struct mode_header *headerp;
struct mode_header_g1 *g1_headerp;
unsigned int ncyl;
int unsupport_page = 0;
return;
/* calculate the size of needed buffer */
else /* must be SCMD_MODE_SENSE_G1 */
pages_size = 0;
switch (pagecode) {
case SD_MODE_SENSE_PAGE3_CODE:
pages_size += sizeof (struct mode_format);
break;
case SD_MODE_SENSE_PAGE4_CODE:
pages_size += sizeof (struct mode_geometry);
break;
case MODEPAGE_CTRL_MODE:
pages_size += sizeof (struct mode_control_scsi3);
} else {
unsupport_page = 1;
}
break;
case MODEPAGE_ALLPAGES:
pages_size += sizeof (struct mode_format) +
sizeof (struct mode_geometry) +
sizeof (struct mode_control_scsi3);
} else {
pages_size += sizeof (struct mode_format) +
sizeof (struct mode_geometry);
}
break;
default:
/* unsupported pages */
unsupport_page = 1;
}
/* allocate buffer to fill the send data */
sdata_size += pages_size;
headerp->bdesc_length = 0;
} else {
g1_headerp = (void *)sense_data;
sizeof (g1_headerp->length));
g1_headerp->bdesc_length = 0;
}
if (unsupport_page)
goto finish;
if (pagecode == SD_MODE_SENSE_PAGE3_CODE ||
pagecode == MODEPAGE_ALLPAGES) {
/* SBC-3 7.1.3.3 Format device page */
struct mode_format *page3p;
next_page += sizeof (struct mode_format);
}
if (pagecode == SD_MODE_SENSE_PAGE4_CODE ||
pagecode == MODEPAGE_ALLPAGES) {
/* SBC-3 7.1.3.8 Rigid disk device geometry page */
struct mode_geometry *page4p;
next_page += sizeof (struct mode_geometry);
}
/* 64-bit LBA need large sense data */
struct mode_control_scsi3 *mctl;
sizeof (struct mode_control_scsi3) -
sizeof (struct mode_page);
}
/* copyout the valid data. */
}
static int
{
if (tgt == -1)
return (DDI_FAILURE);
if (lun == -1)
return (DDI_FAILURE);
return (DDI_SUCCESS);
}
/*ARGSUSED*/
static int
{
#if defined(DEBUG) || defined(__lock_lint)
#endif
struct aac_device *dvp;
if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
/*
* If no persistent node exist, we don't allow .conf node
* to be created.
*/
/* Create this .conf node */
return (DDI_SUCCESS);
}
return (DDI_FAILURE);
}
/*
* detected and valid
*/
return (DDI_FAILURE);
}
if (tgt < AAC_MAX_LD) {
return (DDI_FAILURE);
}
/*
* Save the tgt_dip for the given target if one doesn't exist
* already. Dip's for non-existance tgt's will be cleared in
* tgt_free.
*/
} else {
}
return (DDI_SUCCESS);
}
static void
{
#ifndef __lock_lint
#endif
if (tgt < AAC_MAX_LD) {
} else {
}
}
/*
* Check if the firmware is Up And Running. If it is in the Kernel Panic
* state, (BlinkLED code + 1) is returned.
* 0 -- firmware up and running
* -1 -- firmware dead
* >0 -- firmware kernel panic
*/
static int
{
int rval;
if (rval & AAC_KERNEL_UP_AND_RUNNING) {
rval = 0;
} else if (rval & AAC_KERNEL_PANIC) {
} else {
rval = -1;
}
return (rval);
}
static void
{
/*
* Each lun should generate a unit attention
* condition when reset.
* Phys. drives are treated as logical ones
* during error recovery.
*/
}
switch (reason) {
case CMD_TIMEOUT:
acp);
break;
case CMD_RESET:
/* aac support only RESET_ALL */
break;
case CMD_ABORTED:
acp);
break;
}
}
}
/*
* Abort all the pending commands of type iocmd or just the command pkt
* corresponding to pkt
*/
static void
int reason)
{
int i;
} else {
}
/*
* a) outstanding commands on the controller
* Note: should abort outstanding commands only after one
* IOP reset has been done.
*/
if (iocmd & AAC_IOCMD_OUTSTANDING) {
for (i = 0; i < AAC_MAX_LD; i++) {
}
}
/* b) commands in the waiting queues */
for (i = 0; i < AAC_CMDQ_NUM; i++) {
if (iocmd & (1 << i)) {
if (ac_arg) {
} else {
}
}
}
}
/*
* The draining thread is shared among quiesce threads. It terminates
* when the adapter is quiesced or stopped by aac_stop_drain().
*/
static void
aac_check_drain(void *arg)
{
softs->drain_timeid = 0;
/*
* If both ASYNC and SYNC bus throttle are held,
* wake up threads only when both are drained out.
*/
else
}
}
/*
* If not draining the outstanding cmds, drain them. Otherwise,
* only update ndrains.
*/
static void
{
}
}
/*
* Stop the draining thread when no other threads use it any longer.
* Side effect: io_lock may be released in the middle.
*/
static void
{
if (softs->drain_timeid != 0) {
softs->drain_timeid = 0;
}
}
}
/*
* The following function comes from Adaptec:
*
* Once do an IOP reset, basically the driver have to re-initialize the card
* as if up from a cold boot, and the driver is responsible for any IO that
* is outstanding to the adapter at the time of the IOP RESET. And prepare
* for IOP RESET by making the init code modular with the ability to call it
* from multiple places.
*/
static int
{
int health;
int rval = AAC_IOP_RESET_FAILED;
/* Disable interrupt */
if (health == -1) {
goto finish;
}
if (health == 0) /* flush drives if possible */
(void) aac_shutdown(softs);
/* Execute IOP reset */
struct aac_pause_command *pc;
/*
* Sunrise Lake has dual cores and we must drag the
* other core with us to reset simultaneously. There
* are 2 bits in the Inbound Reset Control and Status
* Register (offset 0x38) of the Sunrise Lake to reset
* the chip without clearing out the PCI configuration
* info (COMMAND & BARS).
*/
/*
* We need to wait for 5 seconds before accessing the MU
* again 10000 * 100us = 1000,000us = 1000ms = 1s
*/
while (wait_count) {
wait_count--;
}
} else {
if (status == SRB_STATUS_INVALID_REQUEST)
else /* probably timeout */
/* Unwind aac_shutdown() */
AAC_FIB_SIZEOF(struct aac_pause_command));
if (aac_check_adapter_health(softs) != 0)
else
/*
* IOP reset not supported or IOP not reseted
*/
goto finish;
}
}
/*
* Re-read and renegotiate the FIB parameters, as one of the actions
* that can result from an IOP reset is the running of a new firmware
* image.
*/
goto finish;
return (rval);
}
static void
int throttle)
{
/*
* are allowed. All throttles should have been set to 0.
*/
return;
}
static void
{
int i, q;
/* Hold bus by holding every device on the bus */
for (q = 0; q < AAC_CMDQ_NUM; q++) {
if (iocmds & (1 << q)) {
softs->bus_throttle[q] = 0;
for (i = 0; i < AAC_MAX_LD; i++)
for (i = 0; i < AAC_MAX_PD(softs); i++)
}
}
}
static void
{
int i, q;
for (q = 0; q < AAC_CMDQ_NUM; q++) {
if (iocmds & (1 << q)) {
/*
* Should not unhold AAC_IOCMD_ASYNC bus, if it has been
* quiesced or being drained by possibly some quiesce
* threads.
*/
continue;
for (i = 0; i < AAC_MAX_LD; i++)
q, softs->total_slots);
for (i = 0; i < AAC_MAX_PD(softs); i++)
q, softs->total_slots);
}
}
}
static int
{
int health;
int rval;
/*
* Hold off new io commands and wait all outstanding io
* commands to complete.
*/
if (health == 0) {
if (sync_cmds == 0 && async_cmds == 0) {
goto finish;
}
/*
* Give the adapter up to AAC_QUIESCE_TIMEOUT more seconds
* to complete the outstanding io commands
*/
int (*intr_handler)(struct aac_softstate *);
/*
* Poll the adapter by ourselves in case interrupt is disabled
* and to avoid releasing the io_lock.
*/
drv_usecwait(100);
(void) intr_handler(softs);
timeout--;
}
/* Cmds drained out */
goto finish;
/* Cmds not drained out, adapter overloaded */
goto finish;
}
}
/*
* If a longer waiting time still can't drain any outstanding io
* commands, do IOP reset.
*/
return (rval);
}
static int
{
int rval;
return (0);
}
case AAC_IOP_RESET_SUCCEED:
break;
case AAC_IOP_RESET_FAILED:
/* Abort IOCTL cmds when adapter is dead */
break;
case AAC_IOP_RESET_ABNORMAL:
}
return (rval == 0);
}
static int
{
return (1);
}
void
{
/* Free dma mapping */
}
}
if (acp->buf_dma_handle) {
}
}
static void
{
}
/*
* Handle command to logical device
*/
static int
{
struct aac_container *dvp;
int rval;
case SCMD_INQUIRY: /* inquiry */
rval = TRAN_ACCEPT;
break;
case SCMD_READ_CAPACITY: /* read capacity */
struct scsi_capacity cap;
/* check 64-bit LBA */
if (last_lba > 0xffffffffull) {
} else {
}
}
rval = TRAN_ACCEPT;
break;
case SCMD_SVC_ACTION_IN_G4: /* read capacity 16 */
/* Check if containers need 64-bit LBA support */
struct scsi_capacity_16 cap16;
int cap_len = sizeof (struct scsi_capacity_16);
}
} else {
}
rval = TRAN_ACCEPT;
break;
case SCMD_READ_G4: /* read_16 */
case SCMD_WRITE_G4: /* write_16 */
/* NOTE: GETG4ADDRTL(cdbp) is int32_t */
goto do_io;
}
rval = TRAN_ACCEPT;
break;
case SCMD_READ: /* read_6 */
case SCMD_WRITE: /* write_6 */
goto do_io;
case SCMD_READ_G5: /* read_12 */
case SCMD_WRITE_G5: /* write_12 */
goto do_io;
case SCMD_READ_G1: /* read_10 */
case SCMD_WRITE_G1: /* write_10 */
/*
* If LBA > array size AND rawio, the
* adapter may hang. So check it before
* sending.
* NOTE: (blkno + blkcnt) may overflow
*/
AAC_BLK_SIZE) <= cnt_size)) {
} else {
/*
* Request exceeds the capacity of disk,
* set error block number to last LBA
* + 1.
*/
KEY_ILLEGAL_REQUEST, 0x21,
0x00, cnt_size);
rval = TRAN_ACCEPT;
}
/* For 0 length IO, just return ok */
rval = TRAN_ACCEPT;
} else {
rval = TRAN_BADPKT;
}
break;
case SCMD_MODE_SENSE: /* mode_sense_6 */
case SCMD_MODE_SENSE_G1: { /* mode_sense_10 */
int capacity;
else
rval = TRAN_ACCEPT;
break;
}
case SCMD_TEST_UNIT_READY:
case SCMD_REQUEST_SENSE:
case SCMD_FORMAT:
case SCMD_START_STOP:
}
}
rval = TRAN_ACCEPT;
break;
case SCMD_SYNCHRONIZE_CACHE:
break;
case SCMD_DOORLOCK:
rval = TRAN_ACCEPT;
break;
default: /* unknown command */
rval = TRAN_ACCEPT;
break;
}
return (rval);
}
static int
{
int rval;
/*
* Reinitialize some fields of ac and pkt; the packet may
* have been resubmitted
*/
#ifdef DEBUG
#endif
pkt->pkt_statistics = 0;
/* Consistent packets need to be sync'ed first */
return (TRAN_BADPKT);
}
} else {
}
else
goto error;
} else {
}
} else {
#ifdef DEBUG
"Cannot send cmd to target t%dL%d: %s",
"target invalid");
} else {
"Cannot send cmd to target t%dL%d: %s",
"adapter dead");
}
#endif
}
return (rval);
}
static int
{
struct aac_device *dvp;
int rval;
/* We don't allow inquiring about capabilities for other targets */
return (-1);
}
return (-1);
}
switch (scsi_hba_lookup_capstr(cap)) {
case SCSI_CAP_ARQ: /* auto request sense */
rval = 1;
break;
case SCSI_CAP_UNTAGGED_QING:
case SCSI_CAP_TAGGED_QING:
rval = 1;
break;
case SCSI_CAP_DMA_MAX:
break;
default:
rval = -1;
break;
}
return (rval);
}
/*ARGSUSED*/
static int
{
struct aac_device *dvp;
int rval;
/* We don't allow inquiring about capabilities for other targets */
return (-1);
}
return (-1);
}
switch (scsi_hba_lookup_capstr(cap)) {
case SCSI_CAP_ARQ:
/* Force auto request sense */
break;
case SCSI_CAP_UNTAGGED_QING:
case SCSI_CAP_TAGGED_QING:
break;
default:
rval = -1;
break;
}
return (rval);
}
static void
{
acp->left_cookien);
}
}
int
{
int bioerr;
int rval;
/* Move window to build s/g map */
if (acp->total_nwin > 0) {
if (rval == DDI_SUCCESS)
goto get_dma_cookies;
"ddi_dma_getwin() fail %d", rval);
return (AACERR);
}
return (AACERR);
}
/* We need to transfer data, so we alloc DMA resources for this pkt */
/*
* We will still use this point to fake some
* infomation in tran_start
*/
/* Set dma flags */
if (BUF_IS_READ(bp)) {
} else {
}
if (flags & PKT_CONSISTENT)
if (flags & PKT_DMA_PARTIAL)
/* Alloc buf dma handle */
if (!acp->buf_dma_handle) {
&acp->buf_dma_handle);
if (rval != DDI_SUCCESS) {
"Can't allocate DMA handle, errno=%d",
rval);
goto error_out;
}
}
/* Bind buf */
&acp->left_cookien);
} else {
"non-aligned buffer: addr=0x%p, cnt=%lu",
if (rval != DDI_SUCCESS) {
"Cannot alloc DMA to non-aligned buf");
bioerr = 0;
goto error_out;
}
}
switch (rval) {
case DDI_DMA_PARTIAL_MAP:
"Cannot get number of DMA windows");
bioerr = 0;
goto error_out;
}
acp->left_cookien);
break;
case DDI_DMA_MAPPED:
acp->left_cookien);
break;
case DDI_DMA_NORESOURCES:
bioerr = 0;
"Cannot bind buf for DMA: DDI_DMA_NORESOURCES");
goto error_out;
case DDI_DMA_BADATTR:
case DDI_DMA_NOMAPPING:
"Cannot bind buf for DMA: DDI_DMA_NOMAPPING");
goto error_out;
case DDI_DMA_TOOBIG:
"Cannot bind buf for DMA: DDI_DMA_TOOBIG(%d)",
goto error_out;
default:
"Cannot bind buf for DMA: %d", rval);
goto error_out;
}
acp->left_cookien);
goto error_out;
}
}
"sgt kmem_alloc fail");
goto error_out;
}
}
}
/*
* Note: The old DMA engine do not correctly handle
* dma_attr_maxxfer attribute. So we have to ensure
* it by ourself.
*/
goto error_out;
}
/* Return remaining byte count */
} else {
/*
* Allocated DMA size is greater than the buf
* size of bp. This is caused by devices like
* tape. we have extra bytes allocated, but
* the packet residual has to stay correct.
*/
}
"bp=0x%p, xfered=%d/%d, resid=%d",
}
}
return (AACOK);
return (AACERR);
}
static struct scsi_pkt *
{
/* Allocate pkt */
int slen;
/* Force auto request sense */
return (NULL);
}
} else {
}
} else {
}
return (pkt);
if (new_acp)
return (NULL);
}
/*
* tran_sync_pkt(9E) - explicit DMA synchronization
*/
/*ARGSUSED*/
static void
{
}
/*
* tran_dmafree(9E) - deallocate DMA resources allocated for command
*/
/*ARGSUSED*/
static void
{
}
static int
{
do {
/* Quiesce has been interrupted */
return (AACERR);
}
}
return (AACOK);
}
static int
{
int rval;
rval = 0;
else
rval = 1;
return (rval);
}
static int
{
return (AACOK);
}
static int
{
int rval;
rval = 0;
else
rval = 1;
return (rval);
}
static int
{
int rval;
return (AACERR);
hba_tran, 0);
if (rval != DDI_SUCCESS) {
return (AACERR);
}
return (AACOK);
}
/*
* FIB setup operations
*/
/*
* Init FIB header
*/
static void
{
xfer_state |=
AAC_FIBSTATE_FAST_RESPONSE /* enable fast io */;
} else {
}
}
/*
* Init FIB for raw IO command
*/
static void
{
struct aac_sg_entryraw *sgp;
/* Calculate FIB size */
sizeof (struct aac_sg_entryraw);
/* Fill SG table */
}
}
/* Init FIB for 64-bit block IO command */
static void
{
struct aac_sg_entry64 *sgp;
sizeof (struct aac_sg_entry64);
/*
* The definitions for aac_blockread64 and aac_blockwrite64
* are the same.
*/
/* Fill SG table */
}
}
/* Init FIB for block IO command */
static void
{
struct aac_sg_entry *sgp;
sizeof (struct aac_sg_entry);
} else {
sizeof (struct aac_sg_entry);
}
/*
* aac_blockread and aac_blockwrite have the similar
* structure head, so use br for bw here
*/
/* Fill SG table */
}
}
/*ARGSUSED*/
void
{
}
static void
{
struct aac_synchronize_command *sync =
sizeof (struct aac_synchronize_command);
sizeof (((struct aac_synchronize_reply *)0)->Data));
}
/*
* Init FIB for pass-through SCMD
*/
static void
{
} else {
}
}
static void
{
struct aac_sg_entry *sgp;
sizeof (struct aac_srb) - sizeof (struct aac_sg_entry) + \
/* Fill FIB and SRB headers, and copy cdb */
/* Fill SG table */
}
}
static void
{
struct aac_sg_entry64 *sgp;
sizeof (struct aac_srb) - sizeof (struct aac_sg_entry) + \
/* Fill FIB and SRB headers, and copy cdb */
/* Fill SG table */
}
}
static int
{
return (AACOK);
}
return (AACERR);
}
static int
{
if (dvp) {
}
ASSERT(q == AAC_CMDQ_ASYNC);
}
} else {
goto do_bind;
}
return (AACERR);
}
static void
{
int rval;
/* Set ac and pkt */
}
} else {
/*
* If fib can not be enqueued, the adapter is in an abnormal
* state, there will be no interrupt to us.
*/
}
/*
* NOTE: We send command only when slots availabe, so should never
* reach here.
*/
}
}
}
static void
{
/* Serve as many waiting io's as possible */
aac_cmd_delete(q, acp);
}
break;
}
}
static void
{
/*
* Sync FIB io is served before async FIB io so that io requests
* sent by interactive userland commands get responded asap.
*/
}
static void
{
/*CONSTCOND*/
while (1) {
/*
* Consistent packets need to be sync'ed first
*/
pkt->pkt_statistics = 0;
}
}
if ((aac_check_acc_handle(softs-> \
comm_space_acc_handle) != DDI_SUCCESS) ||
pci_mem_handle) != DDI_SUCCESS)) {
pkt->pkt_statistics = 0;
}
if (aac_check_dma_handle(softs-> \
comm_space_dma_handle) != DDI_SUCCESS) {
pkt->pkt_statistics = 0;
}
}
} else {
break;
}
}
}
static int
{
/* Allocate FIB dma resource */
if (ddi_dma_alloc_handle(
NULL,
"Cannot alloc dma handle for slot fib area");
goto error;
}
if (ddi_dma_mem_alloc(
NULL,
&rlen,
"Cannot alloc mem for slot fib area");
goto error;
}
NULL,
NULL,
&cookie,
&cookien) != DDI_DMA_MAPPED) {
"dma bind failed for slot fib area");
goto error;
}
/* Check dma handles allocated in fib attach */
goto error;
}
/* Check acc handles allocated in fib attach */
goto error;
}
return (AACOK);
if (slotp->fib_acc_handle) {
}
if (slotp->fib_dma_handle) {
}
return (AACERR);
}
static void
{
slotp->fib_phyaddr = 0;
}
static void
{
int i;
for (i = 0; i < softs->total_slots &&
if (slotp->fib_phyaddr)
continue;
break;
/* Insert the slot to the free slot list */
softs->total_fibs++;
}
}
static void
{
softs->total_fibs--;
}
}
static int
{
int i;
return (AACERR);
}
for (i = 0; i < softs->total_slots; i++)
softs->total_fibs = 0;
return (AACOK);
}
static void
{
softs->total_slots);
softs->total_slots = 0;
}
struct aac_slot *
{
}
return (slotp);
}
static void
{
}
int
{
else
return (TRAN_ACCEPT);
/*
* Because sync FIB is always 512 bytes and used for critical
* functions, async FIB is used for poll IO.
*/
return (TRAN_ACCEPT);
} else {
return (TRAN_ACCEPT);
}
return (TRAN_BADPKT);
}
static int
{
int (*intr_handler)(struct aac_softstate *);
/*
* Interrupt is disabled, we have to poll the adapter by ourselves.
*/
int i = AAC_POLL_TIME * 1000;
if (i == 0)
}
return (AACOK);
return (AACERR);
}
static int
{
return (AACOK);
return (AACERR);
}
static int
{
if (acp->buf_dma_handle) {
} else {
return (AACERR);
}
}
return (AACOK);
}
/*
* The following function comes from Adaptec:
*
* When driver sees a particular event that means containers are changed, it
* will rescan containers. However a change may not be complete until some
* other event is received. For example, creating or deleting an array will
* incur as many as six AifEnConfigChange events which would generate six
* container rescans. To diminish rescans, driver set a flag to wait for
* another particular event. When sees that events come in, it will do rescan.
*/
static int
{
struct aac_aif_command *aif;
int en_type;
int devcfg_needed;
if (fib_command != AifRequest) {
return (AACERR);
}
/* Update internal container state */
devcfg_needed = 0;
case AifCmdDriverNotify: {
switch (en_type) {
case AifDenMorphComplete:
break;
}
devcfg_needed = 1;
break;
}
case AifCmdEventNotify:
switch (en_type) {
case AifEnAddContainer:
case AifEnDeleteContainer:
break;
case AifEnContainerChange:
if (!softs->devcfg_wait_on)
break;
case AifEnContainerEvent:
devcfg_needed = 1;
break;
}
devcfg_needed = 1;
break;
case AifCmdJobProgress:
int pr_status;
(pr_status == AifJobStsSuccess))
else if ((pr_ctick == 0) &&
(pr_status == AifJobStsRunning))
}
break;
}
if (devcfg_needed) {
softs->devcfg_wait_on = 0;
(void) aac_probe_containers(softs);
}
/* Modify AIF contexts */
if (next == 0) {
struct aac_fib_context *ctx;
}
}
}
/* Wakeup applications */
return (AACOK);
}
/*
* Timeout recovery
*/
/*ARGSUSED*/
static void
{
#ifdef DEBUG
#endif
/*
* Besides the firmware in unhealthy state, an overloaded
* adapter may also incur pkt timeout.
* There is a chance for an adapter with a slower IOP to take
* longer than 60 seconds to process the commands, such as when
* to perform IOs. So the adapter is doing a build on a RAID-5
* while being required longer completion times should be
* tolerated.
*/
switch (aac_do_reset(softs)) {
case AAC_IOP_RESET_SUCCEED:
break;
case AAC_IOP_RESET_FAILED:
/* Abort all waiting cmds when adapter is dead */
break;
case AAC_IOP_RESET_ABNORMAL:
}
}
/*
* The following function comes from Adaptec:
*
* Time sync. command added to synchronize time with firmware every 30
* minutes (required for correct AIF timestamps etc.)
*/
static int
{
}
static void
aac_daemon(void *arg)
{
/* Check slot for timeout pkts */
aac_timebase += aac_tick;
}
break;
}
}
/* Time sync. with firmware every AAC_SYNC_TICK */
if (aac_sync_time <= aac_timebase) {
else
}
}
/*
* Architecture dependent functions
*/
static int
{
}
static int
{
}
static void
{
}
static int
{
}
static int
{
}
static void
{
}
/*
* cb_ops functions
*/
static int
{
struct aac_softstate *softs;
int instance;
return (EINVAL);
if (AAC_IS_SCSA_NODE(minor))
if (instance >= AAC_MAX_ADAPTERS)
return (ENXIO);
return (ENXIO);
return (0);
}
/*ARGSUSED*/
static int
{
int instance;
return (EINVAL);
if (AAC_IS_SCSA_NODE(minor))
if (instance >= AAC_MAX_ADAPTERS)
return (ENXIO);
return (0);
}
static int
int *rval_p)
{
struct aac_softstate *softs;
int instance;
return (EPERM);
if (AAC_IS_SCSA_NODE(minor))
if (instance < AAC_MAX_ADAPTERS) {
}
return (ENXIO);
}
/*
* The IO fault service error handling callback function
*/
/*ARGSUSED*/
static int
{
/*
* as the driver can always deal with an error in any dma or
* access handle, we can just return the fme_status value.
*/
return (err->fme_status);
}
/*
* aac_fm_init - initialize fma capabilities and register with IO
* fault services.
*/
static void
{
/*
* Need to change iblock to priority for new MSI intr
*/
/* Only register with IO Fault Services if we have some capability */
if (softs->fm_capabilities) {
/* Adjust access and dma attributes for FMA */
/*
* Register capabilities with IO Fault Services.
* fm_capabilities will be updated to indicate
* capabilities actually supported (not requested.)
*/
/*
* Initialize pci ereport capabilities if ereport
* capable (should always be.)
*/
}
/*
* Register error callback if error callback capable.
*/
aac_fm_error_cb, (void *) softs);
}
}
}
/*
* aac_fm_fini - Releases fma capabilities and un-registers with IO
* fault services.
*/
static void
{
/* Only unregister FMA capabilities if registered */
if (softs->fm_capabilities) {
/*
* Un-register error callback if error callback capable.
*/
}
/*
* Release any resources allocated by pci_ereport_setup()
*/
}
/* Unregister from IO Fault Services */
/* Adjust access and dma attributes for FMA */
}
}
int
{
return (de.fme_status);
}
int
{
return (de.fme_status);
}
void
{
char buf[FM_MAX_CLASS];
}
}
/*
* Autoconfiguration support
*/
static int
{
char devbuf[SCSI_MAXNAMELEN];
char *addr;
long num;
/* Parse dev name and address */
addr = "";
for (p = devbuf; *p != '\0'; p++) {
if (*p == '@') {
addr = p + 1;
*p = '\0';
} else if (*p == ':') {
*p = '\0';
break;
}
}
/* Parse taget and lun */
if (*p == ',') {
lp = p + 1;
*p = '\0';
break;
}
}
return (AACERR);
}
return (AACERR);
}
return (AACOK);
}
static dev_info_t *
{
char addr[SCSI_MAXNAMELEN];
char tmp[MAXNAMELEN];
if (tgt < AAC_MAX_LD) {
if (lun == 0) {
}
} else {
/* We don't care about non-persistent node */
if (ndi_dev_is_persistent_node(child) == 0)
continue;
continue;
break;
}
}
return (child);
}
static int
dev_info_t **dipp)
{
char **compatible = NULL;
int ncompatible = 0;
char *childname;
int rval;
rval = NDI_FAILURE;
goto finish;
}
/* Create dev node */
&ldip);
if (rval == NDI_SUCCESS) {
!= DDI_PROP_SUCCESS) {
rval = NDI_FAILURE;
goto finish;
}
!= DDI_PROP_SUCCESS) {
rval = NDI_FAILURE;
goto finish;
}
!= DDI_PROP_SUCCESS) {
rval = NDI_FAILURE;
goto finish;
}
if (rval != NDI_SUCCESS) {
(void) ndi_devi_free(ldip);
}
}
if (dipp)
return (rval);
}
/*ARGSUSED*/
static int
{
if (tgt < AAC_MAX_LD) {
int rval;
if (lun == 0) {
return (NDI_SUCCESS);
}
}
return (NDI_FAILURE);
} else {
int dtype;
return (NDI_FAILURE);
"Phys. device found: tgt %d dtype %d: %s",
/* Only non-DASD exposed */
return (NDI_FAILURE);
return (NDI_SUCCESS);
}
}
static int
dev_info_t **ldip)
{
struct scsi_device sd;
int rval;
if (ldip)
return (NDI_SUCCESS);
}
scsi_unprobe(&sd);
return (rval);
}
static int
{
struct scsi_address ap;
int list_len = 0;
int lun_total = 0;
int i;
for (i = 0; i < 2; i++) {
uchar_t *p;
return (AACERR);
}
sizeof (struct scsi_arq_status), 0, PKT_CONSISTENT,
return (AACERR);
}
cdb[0] = SCMD_REPORT_LUNS;
/* Convert buffer len from local to LE_32 */
*p = data & 0xff;
data >>= 8;
}
break;
}
/* Convert list_len from LE_32 to local */
data <<= 8;
data |= *p;
}
}
}
if (i >= 2) {
for (i = 0; i < (list_len / AAC_SCSI_RPTLUNS_ADDR_SIZE); i++) {
/* Determine report luns addressing type */
switch (buf[0] & AAC_SCSI_RPTLUNS_ADDR_MASK) {
/*
* Vendors in the field have been found to be
* complete lun value instead of switching to
* flat space addressing
*/
"abnormal lun number: %d", lun);
break;
}
lun_total++;
break;
}
}
} else {
/* The target may do not support SCMD_REPORT_LUNS. */
lun_total++;
}
return (lun_total);
}
static void
{
struct aac_device *dvp;
if (en)
else
}
static int
{
struct aac_softstate *softs;
int circ = 0;
int rval;
return (NDI_FAILURE);
/* Commands for bus config should be blocked as the bus is quiesced */
"bus_config abroted because bus is quiesced");
return (NDI_FAILURE);
}
/* Hold the nexus across the bus_config */
switch (op) {
case BUS_CONFIG_ONE: {
rval = NDI_FAILURE;
break;
}
break;
}
case BUS_CONFIG_DRIVER:
case BUS_CONFIG_ALL: {
}
/* Config the non-DASD devices connected to the card */
total = 0;
index = AAC_MAX_LD;
total++;
}
}
"?Total %d phys. device(s) found", total);
rval = NDI_SUCCESS;
break;
}
}
if (rval == NDI_SUCCESS)
return (rval);
}
static void
{
struct aac_device *dvp;
int valid;
int circ1 = 0;
/* Hold the nexus across the bus_config */
case AAC_EVT_ONLINE:
case AAC_EVT_OFFLINE:
/* Device onlined */
}
/* Device offlined */
(void) aac_do_reset(softs);
}
break;
}
}
static int
{
struct aac_drinfo *drp;
return (AACERR);
return (AACERR);
}
return (AACOK);
}
#ifdef DEBUG
/* -------------------------debug aid functions-------------------------- */
#define AAC_FIB_CMD_KEY_STRINGS \
TestCommandResponse, "TestCommandResponse", \
TestAdapterCommand, "TestAdapterCommand", \
LastTestCommand, "LastTestCommand", \
ReinitHostNormCommandQueue, "ReinitHostNormCommandQueue", \
ReinitHostHighCommandQueue, "ReinitHostHighCommandQueue", \
ReinitHostHighRespQueue, "ReinitHostHighRespQueue", \
ReinitHostNormRespQueue, "ReinitHostNormRespQueue", \
ReinitAdapNormCommandQueue, "ReinitAdapNormCommandQueue", \
ReinitAdapHighCommandQueue, "ReinitAdapHighCommandQueue", \
ReinitAdapHighRespQueue, "ReinitAdapHighRespQueue", \
ReinitAdapNormRespQueue, "ReinitAdapNormRespQueue", \
InterfaceShutdown, "InterfaceShutdown", \
DmaCommandFib, "DmaCommandFib", \
StartProfile, "StartProfile", \
TermProfile, "TermProfile", \
SpeedTest, "SpeedTest", \
TakeABreakPt, "TakeABreakPt", \
RequestPerfData, "RequestPerfData", \
SetInterruptDefTimer, "SetInterruptDefTimer", \
SetInterruptDefCount, "SetInterruptDefCount", \
GetInterruptDefStatus, "GetInterruptDefStatus", \
LastCommCommand, "LastCommCommand", \
NuFileSystem, "NuFileSystem", \
UFS, "UFS", \
HostFileSystem, "HostFileSystem", \
LastFileSystemCommand, "LastFileSystemCommand", \
ContainerCommand, "ContainerCommand", \
ContainerCommand64, "ContainerCommand64", \
ClusterCommand, "ClusterCommand", \
ScsiPortCommand, "ScsiPortCommand", \
ScsiPortCommandU64, "ScsiPortCommandU64", \
AifRequest, "AifRequest", \
CheckRevision, "CheckRevision", \
FsaHostShutdown, "FsaHostShutdown", \
RequestAdapterInfo, "RequestAdapterInfo", \
IsAdapterPaused, "IsAdapterPaused", \
SendHostTime, "SendHostTime", \
LastMiscCommand, "LastMiscCommand"
#define AAC_CTVM_SUBCMD_KEY_STRINGS \
VM_Null, "VM_Null", \
VM_NameServe, "VM_NameServe", \
VM_ContainerConfig, "VM_ContainerConfig", \
VM_Ioctl, "VM_Ioctl", \
VM_FilesystemIoctl, "VM_FilesystemIoctl", \
VM_CloseAll, "VM_CloseAll", \
VM_CtBlockRead, "VM_CtBlockRead", \
VM_CtBlockWrite, "VM_CtBlockWrite", \
VM_SliceBlockRead, "VM_SliceBlockRead", \
VM_SliceBlockWrite, "VM_SliceBlockWrite", \
VM_DriveBlockRead, "VM_DriveBlockRead", \
VM_DriveBlockWrite, "VM_DriveBlockWrite", \
VM_EnclosureMgt, "VM_EnclosureMgt", \
VM_Unused, "VM_Unused", \
VM_CtBlockVerify, "VM_CtBlockVerify", \
VM_CtPerf, "VM_CtPerf", \
VM_CtBlockRead64, "VM_CtBlockRead64", \
VM_CtBlockWrite64, "VM_CtBlockWrite64", \
VM_CtBlockVerify64, "VM_CtBlockVerify64", \
VM_CtHostRead64, "VM_CtHostRead64", \
VM_CtHostWrite64, "VM_CtHostWrite64", \
VM_NameServe64, "VM_NameServe64"
#define AAC_CT_SUBCMD_KEY_STRINGS \
CT_Null, "CT_Null", \
CT_GET_SLICE_COUNT, "CT_GET_SLICE_COUNT", \
CT_GET_PARTITION_COUNT, "CT_GET_PARTITION_COUNT", \
CT_GET_PARTITION_INFO, "CT_GET_PARTITION_INFO", \
CT_GET_CONTAINER_COUNT, "CT_GET_CONTAINER_COUNT", \
CT_GET_CONTAINER_INFO_OLD, "CT_GET_CONTAINER_INFO_OLD", \
CT_WRITE_MBR, "CT_WRITE_MBR", \
CT_WRITE_PARTITION, "CT_WRITE_PARTITION", \
CT_UPDATE_PARTITION, "CT_UPDATE_PARTITION", \
CT_UNLOAD_CONTAINER, "CT_UNLOAD_CONTAINER", \
CT_CONFIG_SINGLE_PRIMARY, "CT_CONFIG_SINGLE_PRIMARY", \
CT_READ_CONFIG_AGE, "CT_READ_CONFIG_AGE", \
CT_WRITE_CONFIG_AGE, "CT_WRITE_CONFIG_AGE", \
CT_READ_SERIAL_NUMBER, "CT_READ_SERIAL_NUMBER", \
CT_ZERO_PAR_ENTRY, "CT_ZERO_PAR_ENTRY", \
CT_READ_MBR, "CT_READ_MBR", \
CT_READ_PARTITION, "CT_READ_PARTITION", \
CT_DESTROY_CONTAINER, "CT_DESTROY_CONTAINER", \
CT_DESTROY2_CONTAINER, "CT_DESTROY2_CONTAINER", \
CT_SLICE_SIZE, "CT_SLICE_SIZE", \
CT_CHECK_CONFLICTS, "CT_CHECK_CONFLICTS", \
CT_MOVE_CONTAINER, "CT_MOVE_CONTAINER", \
CT_READ_LAST_DRIVE, "CT_READ_LAST_DRIVE", \
CT_WRITE_LAST_DRIVE, "CT_WRITE_LAST_DRIVE", \
CT_UNMIRROR, "CT_UNMIRROR", \
CT_MIRROR_DELAY, "CT_MIRROR_DELAY", \
CT_GEN_MIRROR, "CT_GEN_MIRROR", \
CT_GEN_MIRROR2, "CT_GEN_MIRROR2", \
CT_TEST_CONTAINER, "CT_TEST_CONTAINER", \
CT_MOVE2, "CT_MOVE2", \
CT_SPLIT, "CT_SPLIT", \
CT_SPLIT2, "CT_SPLIT2", \
CT_SPLIT_BROKEN, "CT_SPLIT_BROKEN", \
CT_SPLIT_BROKEN2, "CT_SPLIT_BROKEN2", \
CT_RECONFIG, "CT_RECONFIG", \
CT_BREAK2, "CT_BREAK2", \
CT_BREAK, "CT_BREAK", \
CT_MERGE2, "CT_MERGE2", \
CT_MERGE, "CT_MERGE", \
CT_FORCE_ERROR, "CT_FORCE_ERROR", \
CT_CLEAR_ERROR, "CT_CLEAR_ERROR", \
CT_ASSIGN_FAILOVER, "CT_ASSIGN_FAILOVER", \
CT_CLEAR_FAILOVER, "CT_CLEAR_FAILOVER", \
CT_GET_FAILOVER_DATA, "CT_GET_FAILOVER_DATA", \
CT_VOLUME_ADD, "CT_VOLUME_ADD", \
CT_VOLUME_ADD2, "CT_VOLUME_ADD2", \
CT_MIRROR_STATUS, "CT_MIRROR_STATUS", \
CT_COPY_STATUS, "CT_COPY_STATUS", \
CT_COPY, "CT_COPY", \
CT_UNLOCK_CONTAINER, "CT_UNLOCK_CONTAINER", \
CT_LOCK_CONTAINER, "CT_LOCK_CONTAINER", \
CT_MAKE_READ_ONLY, "CT_MAKE_READ_ONLY", \
CT_MAKE_READ_WRITE, "CT_MAKE_READ_WRITE", \
CT_CLEAN_DEAD, "CT_CLEAN_DEAD", \
CT_ABORT_MIRROR_COMMAND, "CT_ABORT_MIRROR_COMMAND", \
CT_SET, "CT_SET", \
CT_GET, "CT_GET", \
CT_GET_NVLOG_ENTRY, "CT_GET_NVLOG_ENTRY", \
CT_GET_DELAY, "CT_GET_DELAY", \
CT_ZERO_CONTAINER_SPACE, "CT_ZERO_CONTAINER_SPACE", \
CT_GET_ZERO_STATUS, "CT_GET_ZERO_STATUS", \
CT_SCRUB, "CT_SCRUB", \
CT_GET_SCRUB_STATUS, "CT_GET_SCRUB_STATUS", \
CT_GET_SLICE_INFO, "CT_GET_SLICE_INFO", \
CT_GET_SCSI_METHOD, "CT_GET_SCSI_METHOD", \
CT_PAUSE_IO, "CT_PAUSE_IO", \
CT_RELEASE_IO, "CT_RELEASE_IO", \
CT_SCRUB2, "CT_SCRUB2", \
CT_MCHECK, "CT_MCHECK", \
CT_CORRUPT, "CT_CORRUPT", \
CT_GET_TASK_COUNT, "CT_GET_TASK_COUNT", \
CT_PROMOTE, "CT_PROMOTE", \
CT_SET_DEAD, "CT_SET_DEAD", \
CT_CONTAINER_OPTIONS, "CT_CONTAINER_OPTIONS", \
CT_GET_NV_PARAM, "CT_GET_NV_PARAM", \
CT_GET_PARAM, "CT_GET_PARAM", \
CT_NV_PARAM_SIZE, "CT_NV_PARAM_SIZE", \
CT_COMMON_PARAM_SIZE, "CT_COMMON_PARAM_SIZE", \
CT_PLATFORM_PARAM_SIZE, "CT_PLATFORM_PARAM_SIZE", \
CT_SET_NV_PARAM, "CT_SET_NV_PARAM", \
CT_ABORT_SCRUB, "CT_ABORT_SCRUB", \
CT_GET_SCRUB_ERROR, "CT_GET_SCRUB_ERROR", \
CT_LABEL_CONTAINER, "CT_LABEL_CONTAINER", \
CT_CONTINUE_DATA, "CT_CONTINUE_DATA", \
CT_STOP_DATA, "CT_STOP_DATA", \
CT_GET_PARTITION_TABLE, "CT_GET_PARTITION_TABLE", \
CT_GET_DISK_PARTITIONS, "CT_GET_DISK_PARTITIONS", \
CT_GET_MISC_STATUS, "CT_GET_MISC_STATUS", \
CT_GET_CONTAINER_PERF_INFO, "CT_GET_CONTAINER_PERF_INFO", \
CT_GET_TIME, "CT_GET_TIME", \
CT_READ_DATA, "CT_READ_DATA", \
CT_CTR, "CT_CTR", \
CT_CTL, "CT_CTL", \
CT_DRAINIO, "CT_DRAINIO", \
CT_RELEASEIO, "CT_RELEASEIO", \
CT_GET_NVRAM, "CT_GET_NVRAM", \
CT_GET_MEMORY, "CT_GET_MEMORY", \
CT_PRINT_CT_LOG, "CT_PRINT_CT_LOG", \
CT_ADD_LEVEL, "CT_ADD_LEVEL", \
CT_NV_ZERO, "CT_NV_ZERO", \
CT_READ_SIGNATURE, "CT_READ_SIGNATURE", \
CT_THROTTLE_ON, "CT_THROTTLE_ON", \
CT_THROTTLE_OFF, "CT_THROTTLE_OFF", \
CT_GET_THROTTLE_STATS, "CT_GET_THROTTLE_STATS", \
CT_MAKE_SNAPSHOT, "CT_MAKE_SNAPSHOT", \
CT_REMOVE_SNAPSHOT, "CT_REMOVE_SNAPSHOT", \
CT_WRITE_USER_FLAGS, "CT_WRITE_USER_FLAGS", \
CT_READ_USER_FLAGS, "CT_READ_USER_FLAGS", \
CT_MONITOR, "CT_MONITOR", \
CT_GEN_MORPH, "CT_GEN_MORPH", \
CT_GET_SNAPSHOT_INFO, "CT_GET_SNAPSHOT_INFO", \
CT_CACHE_SET, "CT_CACHE_SET", \
CT_CACHE_STAT, "CT_CACHE_STAT", \
CT_TRACE_START, "CT_TRACE_START", \
CT_TRACE_STOP, "CT_TRACE_STOP", \
CT_TRACE_ENABLE, "CT_TRACE_ENABLE", \
CT_TRACE_DISABLE, "CT_TRACE_DISABLE", \
CT_FORCE_CORE_DUMP, "CT_FORCE_CORE_DUMP", \
CT_SET_SERIAL_NUMBER, "CT_SET_SERIAL_NUMBER", \
CT_RESET_SERIAL_NUMBER, "CT_RESET_SERIAL_NUMBER", \
CT_ENABLE_RAID5, "CT_ENABLE_RAID5", \
CT_CLEAR_VALID_DUMP_FLAG, "CT_CLEAR_VALID_DUMP_FLAG", \
CT_GET_MEM_STATS, "CT_GET_MEM_STATS", \
CT_GET_CORE_SIZE, "CT_GET_CORE_SIZE", \
CT_CREATE_CONTAINER_OLD, "CT_CREATE_CONTAINER_OLD", \
CT_STOP_DUMPS, "CT_STOP_DUMPS", \
CT_PANIC_ON_TAKE_A_BREAK, "CT_PANIC_ON_TAKE_A_BREAK", \
CT_GET_CACHE_STATS, "CT_GET_CACHE_STATS", \
CT_MOVE_PARTITION, "CT_MOVE_PARTITION", \
CT_FLUSH_CACHE, "CT_FLUSH_CACHE", \
CT_READ_NAME, "CT_READ_NAME", \
CT_WRITE_NAME, "CT_WRITE_NAME", \
CT_TOSS_CACHE, "CT_TOSS_CACHE", \
CT_LOCK_DRAINIO, "CT_LOCK_DRAINIO", \
CT_CONTAINER_OFFLINE, "CT_CONTAINER_OFFLINE", \
CT_SET_CACHE_SIZE, "CT_SET_CACHE_SIZE", \
CT_CLEAN_SHUTDOWN_STATUS, "CT_CLEAN_SHUTDOWN_STATUS", \
CT_CLEAR_DISKLOG_ON_DISK, "CT_CLEAR_DISKLOG_ON_DISK", \
CT_CLEAR_ALL_DISKLOG, "CT_CLEAR_ALL_DISKLOG", \
CT_CACHE_FAVOR, "CT_CACHE_FAVOR", \
CT_READ_PASSTHRU_MBR, "CT_READ_PASSTHRU_MBR", \
CT_SCRUB_NOFIX, "CT_SCRUB_NOFIX", \
CT_SCRUB2_NOFIX, "CT_SCRUB2_NOFIX", \
CT_FLUSH, "CT_FLUSH", \
CT_REBUILD, "CT_REBUILD", \
CT_FLUSH_CONTAINER, "CT_FLUSH_CONTAINER", \
CT_RESTART, "CT_RESTART", \
CT_GET_CONFIG_STATUS, "CT_GET_CONFIG_STATUS", \
CT_TRACE_FLAG, "CT_TRACE_FLAG", \
CT_RESTART_MORPH, "CT_RESTART_MORPH", \
CT_GET_TRACE_INFO, "CT_GET_TRACE_INFO", \
CT_GET_TRACE_ITEM, "CT_GET_TRACE_ITEM", \
CT_COMMIT_CONFIG, "CT_COMMIT_CONFIG", \
CT_CONTAINER_EXISTS, "CT_CONTAINER_EXISTS", \
CT_GET_SLICE_FROM_DEVT, "CT_GET_SLICE_FROM_DEVT", \
CT_OPEN_READ_WRITE, "CT_OPEN_READ_WRITE", \
CT_WRITE_MEMORY_BLOCK, "CT_WRITE_MEMORY_BLOCK", \
CT_GET_CACHE_PARAMS, "CT_GET_CACHE_PARAMS", \
CT_CRAZY_CACHE, "CT_CRAZY_CACHE", \
CT_GET_PROFILE_STRUCT, "CT_GET_PROFILE_STRUCT", \
CT_SET_IO_TRACE_FLAG, "CT_SET_IO_TRACE_FLAG", \
CT_GET_IO_TRACE_STRUCT, "CT_GET_IO_TRACE_STRUCT", \
CT_CID_TO_64BITS_UID, "CT_CID_TO_64BITS_UID", \
CT_64BITS_UID_TO_CID, "CT_64BITS_UID_TO_CID", \
CT_PAR_TO_64BITS_UID, "CT_PAR_TO_64BITS_UID", \
CT_CID_TO_32BITS_UID, "CT_CID_TO_32BITS_UID", \
CT_32BITS_UID_TO_CID, "CT_32BITS_UID_TO_CID", \
CT_PAR_TO_32BITS_UID, "CT_PAR_TO_32BITS_UID", \
CT_SET_FAILOVER_OPTION, "CT_SET_FAILOVER_OPTION", \
CT_GET_FAILOVER_OPTION, "CT_GET_FAILOVER_OPTION", \
CT_STRIPE_ADD2, "CT_STRIPE_ADD2", \
CT_CREATE_VOLUME_SET, "CT_CREATE_VOLUME_SET", \
CT_CREATE_STRIPE_SET, "CT_CREATE_STRIPE_SET", \
CT_VERIFY_CONTAINER, "CT_VERIFY_CONTAINER", \
CT_IS_CONTAINER_DEAD, "CT_IS_CONTAINER_DEAD", \
CT_GET_CONTAINER_OPTION, "CT_GET_CONTAINER_OPTION", \
CT_GET_SNAPSHOT_UNUSED_STRUCT, "CT_GET_SNAPSHOT_UNUSED_STRUCT", \
CT_CLEAR_SNAPSHOT_UNUSED_STRUCT, "CT_CLEAR_SNAPSHOT_UNUSED_STRUCT", \
CT_GET_CONTAINER_INFO, "CT_GET_CONTAINER_INFO", \
CT_CREATE_CONTAINER, "CT_CREATE_CONTAINER", \
CT_CHANGE_CREATIONINFO, "CT_CHANGE_CREATIONINFO", \
CT_CHECK_CONFLICT_UID, "CT_CHECK_CONFLICT_UID", \
CT_CONTAINER_UID_CHECK, "CT_CONTAINER_UID_CHECK", \
"CT_IS_CONTAINER_MEATADATA_STANDARD", \
CT_IS_SLICE_METADATA_STANDARD, "CT_IS_SLICE_METADATA_STANDARD", \
CT_GET_IMPORT_COUNT, "CT_GET_IMPORT_COUNT", \
CT_CANCEL_ALL_IMPORTS, "CT_CANCEL_ALL_IMPORTS", \
CT_GET_IMPORT_INFO, "CT_GET_IMPORT_INFO", \
CT_IMPORT_ARRAY, "CT_IMPORT_ARRAY", \
CT_GET_LOG_SIZE, "CT_GET_LOG_SIZE", \
CT_ALARM_GET_STATE, "CT_ALARM_GET_STATE", \
CT_ALARM_SET_STATE, "CT_ALARM_SET_STATE", \
CT_ALARM_ON_OFF, "CT_ALARM_ON_OFF", \
CT_GET_EE_OEM_ID, "CT_GET_EE_OEM_ID", \
CT_GET_PPI_HEADERS, "CT_GET_PPI_HEADERS", \
CT_GET_PPI_DATA, "CT_GET_PPI_DATA", \
CT_GET_PPI_ENTRIES, "CT_GET_PPI_ENTRIES", \
CT_DELETE_PPI_BUNDLE, "CT_DELETE_PPI_BUNDLE", \
CT_GET_PARTITION_TABLE_2, "CT_GET_PARTITION_TABLE_2", \
CT_GET_PARTITION_INFO_2, "CT_GET_PARTITION_INFO_2", \
CT_GET_DISK_PARTITIONS_2, "CT_GET_DISK_PARTITIONS_2", \
CT_QUIESCE_ADAPTER, "CT_QUIESCE_ADAPTER", \
CT_CLEAR_PPI_TABLE, "CT_CLEAR_PPI_TABLE"
#define AAC_CL_SUBCMD_KEY_STRINGS \
CL_NULL, "CL_NULL", \
DS_INIT, "DS_INIT", \
DS_RESCAN, "DS_RESCAN", \
DS_CREATE, "DS_CREATE", \
DS_DELETE, "DS_DELETE", \
DS_ADD_DISK, "DS_ADD_DISK", \
DS_REMOVE_DISK, "DS_REMOVE_DISK", \
DS_MOVE_DISK, "DS_MOVE_DISK", \
DS_TAKE_OWNERSHIP, "DS_TAKE_OWNERSHIP", \
DS_RELEASE_OWNERSHIP, "DS_RELEASE_OWNERSHIP", \
DS_FORCE_OWNERSHIP, "DS_FORCE_OWNERSHIP", \
DS_GET_DISK_SET_PARAM, "DS_GET_DISK_SET_PARAM", \
DS_GET_DRIVE_PARAM, "DS_GET_DRIVE_PARAM", \
DS_GET_SLICE_PARAM, "DS_GET_SLICE_PARAM", \
DS_GET_DISK_SETS, "DS_GET_DISK_SETS", \
DS_GET_DRIVES, "DS_GET_DRIVES", \
DS_SET_DISK_SET_PARAM, "DS_SET_DISK_SET_PARAM", \
DS_ONLINE, "DS_ONLINE", \
DS_OFFLINE, "DS_OFFLINE", \
DS_ONLINE_CONTAINERS, "DS_ONLINE_CONTAINERS", \
DS_FSAPRINT, "DS_FSAPRINT", \
CL_CFG_SET_HOST_IDS, "CL_CFG_SET_HOST_IDS", \
CL_CFG_SET_PARTNER_HOST_IDS, "CL_CFG_SET_PARTNER_HOST_IDS", \
CL_CFG_GET_CLUSTER_CONFIG, "CL_CFG_GET_CLUSTER_CONFIG", \
CC_CLI_CLEAR_MESSAGE_BUFFER, "CC_CLI_CLEAR_MESSAGE_BUFFER", \
CC_SRV_CLEAR_MESSAGE_BUFFER, "CC_SRV_CLEAR_MESSAGE_BUFFER", \
CC_CLI_SHOW_MESSAGE_BUFFER, "CC_CLI_SHOW_MESSAGE_BUFFER", \
CC_SRV_SHOW_MESSAGE_BUFFER, "CC_SRV_SHOW_MESSAGE_BUFFER", \
CC_CLI_SEND_MESSAGE, "CC_CLI_SEND_MESSAGE", \
CC_SRV_SEND_MESSAGE, "CC_SRV_SEND_MESSAGE", \
CC_CLI_GET_MESSAGE, "CC_CLI_GET_MESSAGE", \
CC_SRV_GET_MESSAGE, "CC_SRV_GET_MESSAGE", \
CC_SEND_TEST_MESSAGE, "CC_SEND_TEST_MESSAGE", \
CC_GET_BUSINFO, "CC_GET_BUSINFO", \
CC_GET_PORTINFO, "CC_GET_PORTINFO", \
CC_GET_NAMEINFO, "CC_GET_NAMEINFO", \
CC_GET_CONFIGINFO, "CC_GET_CONFIGINFO", \
CQ_QUORUM_OP, "CQ_QUORUM_OP"
#define AAC_AIF_SUBCMD_KEY_STRINGS \
AifCmdEventNotify, "AifCmdEventNotify", \
AifCmdJobProgress, "AifCmdJobProgress", \
AifCmdAPIReport, "AifCmdAPIReport", \
AifCmdDriverNotify, "AifCmdDriverNotify", \
AifReqJobList, "AifReqJobList", \
AifReqJobsForCtr, "AifReqJobsForCtr", \
AifReqJobsForScsi, "AifReqJobsForScsi", \
AifReqJobReport, "AifReqJobReport", \
AifReqTerminateJob, "AifReqTerminateJob", \
AifReqSuspendJob, "AifReqSuspendJob", \
AifReqResumeJob, "AifReqResumeJob", \
AifReqSendAPIReport, "AifReqSendAPIReport", \
AifReqAPIJobStart, "AifReqAPIJobStart", \
AifReqAPIJobUpdate, "AifReqAPIJobUpdate", \
AifReqAPIJobFinish, "AifReqAPIJobFinish"
#define AAC_IOCTL_SUBCMD_KEY_STRINGS \
Reserved_IOCTL, "Reserved_IOCTL", \
GetDeviceHandle, "GetDeviceHandle", \
BusTargetLun_to_DeviceHandle, "BusTargetLun_to_DeviceHandle", \
DeviceHandle_to_BusTargetLun, "DeviceHandle_to_BusTargetLun", \
RescanBus, "RescanBus", \
GetDeviceProbeInfo, "GetDeviceProbeInfo", \
GetDeviceCapacity, "GetDeviceCapacity", \
GetContainerProbeInfo, "GetContainerProbeInfo", \
GetRequestedMemorySize, "GetRequestedMemorySize", \
GetBusInfo, "GetBusInfo", \
GetVendorSpecific, "GetVendorSpecific", \
EnhancedGetDeviceProbeInfo, "EnhancedGetDeviceProbeInfo", \
EnhancedGetBusInfo, "EnhancedGetBusInfo", \
SetupExtendedCounters, "SetupExtendedCounters", \
GetPerformanceCounters, "GetPerformanceCounters", \
ResetPerformanceCounters, "ResetPerformanceCounters", \
ReadModePage, "ReadModePage", \
WriteModePage, "WriteModePage", \
ReadDriveParameter, "ReadDriveParameter", \
WriteDriveParameter, "WriteDriveParameter", \
ResetAdapter, "ResetAdapter", \
ResetBus, "ResetBus", \
ResetBusDevice, "ResetBusDevice", \
ExecuteSrb, "ExecuteSrb", \
Create_IO_Task, "Create_IO_Task", \
Delete_IO_Task, "Delete_IO_Task", \
Get_IO_Task_Info, "Get_IO_Task_Info", \
Check_Task_Progress, "Check_Task_Progress", \
InjectError, "InjectError", \
GetDeviceDefectCounts, "GetDeviceDefectCounts", \
GetDeviceDefectInfo, "GetDeviceDefectInfo", \
GetDeviceStatus, "GetDeviceStatus", \
ClearDeviceStatus, "ClearDeviceStatus", \
DiskSpinControl, "DiskSpinControl", \
DiskSmartControl, "DiskSmartControl", \
WriteSame, "WriteSame", \
ReadWriteLong, "ReadWriteLong", \
FormatUnit, "FormatUnit", \
TargetDeviceControl, "TargetDeviceControl", \
TargetChannelControl, "TargetChannelControl", \
FlashNewCode, "FlashNewCode", \
DiskCheck, "DiskCheck", \
RequestSense, "RequestSense", \
DiskPERControl, "DiskPERControl", \
Read10, "Read10", \
Write10, "Write10"
#define AAC_AIFEN_KEY_STRINGS \
AifEnGeneric, "Generic", \
AifEnTaskComplete, "TaskComplete", \
AifEnConfigChange, "Config change", \
AifEnContainerChange, "Container change", \
AifEnDeviceFailure, "device failed", \
AifEnMirrorFailover, "Mirror failover", \
AifEnContainerEvent, "container event", \
AifEnFileSystemChange, "File system changed", \
AifEnConfigPause, "Container pause event", \
AifEnConfigResume, "Container resume event", \
AifEnFailoverChange, "Failover space assignment changed", \
AifEnRAID5RebuildDone, "RAID5 rebuild finished", \
AifEnEnclosureManagement, "Enclosure management event", \
AifEnBatteryEvent, "battery event", \
AifEnAddContainer, "Add container", \
AifEnDeleteContainer, "Delete container", \
AifEnSMARTEvent, "SMART Event", \
AifEnBatteryNeedsRecond, "battery needs reconditioning", \
AifEnClusterEvent, "cluster event", \
AifEnDiskSetEvent, "disk set event occured", \
AifDenMorphComplete, "morph operation completed", \
AifDenVolumeExtendComplete, "VolumeExtendComplete"
struct aac_key_strings {
int key;
char *message;
};
extern struct scsi_key_strings scsi_cmds[];
static struct aac_key_strings aac_fib_cmds[] = {
-1, NULL
};
static struct aac_key_strings aac_ctvm_subcmds[] = {
-1, NULL
};
static struct aac_key_strings aac_ct_subcmds[] = {
-1, NULL
};
static struct aac_key_strings aac_cl_subcmds[] = {
-1, NULL
};
static struct aac_key_strings aac_aif_subcmds[] = {
-1, NULL
};
static struct aac_key_strings aac_ioctl_subcmds[] = {
-1, NULL
};
static struct aac_key_strings aac_aifens[] = {
-1, NULL
};
/*
* The following function comes from Adaptec:
*
* Get the firmware print buffer parameters from the firmware,
* if the command was successful map in the address.
*/
static int
{
if (mondrv_buf_size) {
/*
* See if the address is already mapped in, and
* if so set it up from the base address
*/
if ((mondrv_buf_paddrh == 0) &&
softs->debug_fw_flags = 0;
return (AACOK);
}
}
}
return (AACERR);
}
int
{
return ((debug_flags & (AACDB_FLAGS_FW_PRINT | \
}
static void
{
if (noheader) {
if (sl) {
} else {
}
} else {
if (sl) {
aac_fmt_header[0] = sl;
} else {
}
}
}
/*
* The following function comes from Adaptec:
*
* Format and print out the data passed in to UART or console
* as specified by debug flags.
*/
void
{
char sl; /* system log character */
/* Set up parameters and call sprintf function to format the data */
sl = 0;
} else {
fmt++;
}
/* Make sure the softs structure has been passed in for this section */
if (softs) {
/* If we are set up for a Firmware print */
(softs->debug_buf_size)) {
/* Make sure the string size is within boundaries */
/*
* Wait for no more than AAC_PRINT_TIMEOUT for the
* previous message length to clear (the handshake).
*/
for (i = 0; i < AAC_PRINT_TIMEOUT; i++) {
if (!PCI_MEM_GET32(softs,
softs->debug_buf_offset + \
break;
drv_usecwait(1000);
}
/*
* If the length is clear, copy over the message, the
* flags, and the length. Make sure the length is the
* last because that is the signal for the Firmware to
* pick it up.
*/
softs->debug_buf_offset + \
aac_prt_buf, count);
softs->debug_buf_offset + \
softs->debug_buf_offset + \
} else {
}
}
/*
* If the Kernel Debug Print flag is set, send it off
* to the Kernel Debugger
*/
} else {
/* Driver not initialized yet, no firmware or header output */
}
}
/*
* Translate command number to description string
*/
static char *
{
int i;
}
return (NULL);
}
static void
{
int is_pd = 0;
char *desc;
if (tgt >= AAC_MAX_LD) {
is_pd = 1;
lun = 0;
}
"SCMD> Unknown(0x%2x) --> c%dt%dL%d %s",
return;
}
switch (cmd) {
case SCMD_READ:
case SCMD_WRITE:
"SCMD> %s 0x%x[%d] %s --> c%dt%dL%d %s",
break;
case SCMD_READ_G1:
case SCMD_WRITE_G1:
"SCMD> %s 0x%x[%d] %s --> c%dt%dL%d %s",
break;
case SCMD_READ_G4:
case SCMD_WRITE_G4:
"SCMD> %s 0x%x.%08x[%d] %s --> c%dt%dL%d %s",
break;
case SCMD_READ_G5:
case SCMD_WRITE_G5:
"SCMD> %s 0x%x[%d] %s --> c%dt%dL%d %s",
break;
default:
}
}
void
{
char *caller;
int i;
if (acp) {
return;
caller = "SCMD";
caller = "IOCTL";
caller = "SRB";
else
return;
} else {
return;
caller = "SYNC";
}
/* Print FIB header */
}
/* Print FIB data */
switch (fib_cmd) {
case ContainerCommand:
break;
switch (sub_cmd) {
case VM_ContainerConfig: {
struct aac_Container *pContainer =
break;
return;
}
case VM_Ioctl:
break;
case VM_CtBlockRead:
case VM_CtBlockWrite: {
struct aac_blockread *br =
"FIB> %s Container %d 0x%x/%d", subcmdstr,
for (i = 0; i < sgcount; i++)
" %d: 0x%08x/%d", i,
SgByteCount));
return;
}
}
break;
case ContainerCommand64: {
struct aac_blockread64 *br =
if (sub_cmd == VM_CtHostRead64)
subcmdstr = "VM_CtHostRead64";
else if (sub_cmd == VM_CtHostWrite64)
subcmdstr = "VM_CtHostWrite64";
else
break;
"FIB> %s Container %d 0x%x/%d", subcmdstr,
for (i = 0; i < sgcount; i++) {
" %d: 0x%08x.%08x/%d", i,
SgByteCount));
}
return;
}
case RawIo: {
"FIB> RawIo Container %d 0x%llx/%d 0x%x",
for (i = 0; i < sgcount; i++) {
}
return;
}
case ClusterCommand:
break;
case AifRequest:
break;
default:
break;
}
if (subcmdstr)
else if (cmdstr)
else
}
static void
{
int aif_command;
int aif_en_type;
char *str;
switch (aif_command) {
case AifCmdEventNotify:
if (str)
else
break;
case AifCmdJobProgress:
case AifJobStsSuccess:
str = "success"; break;
case AifJobStsFinished:
str = "finished"; break;
case AifJobStsAborted:
str = "aborted"; break;
case AifJobStsFailed:
str = "failed"; break;
case AifJobStsSuspended:
str = "suspended"; break;
case AifJobStsRunning:
str = "running"; break;
default:
str = "unknown"; break;
}
"AIF! JobProgress (%d) - %s (%d, %d)",
break;
case AifCmdAPIReport:
break;
case AifCmdDriverNotify:
break;
default:
break;
}
}
#endif /* DEBUG */