arcmsr.c revision 7d14b8f218dc86a431d15e352b4bf15fbb1b3596
/*
* O.S : Solaris
* FILE NAME : arcmsr.c
* BY : Erich Chen, C.L. Huang
* Description: SCSI RAID Device Driver for
* ARECA RAID Host adapter
*
* Copyright (C) 2002,2010 Areca Technology Corporation All rights reserved.
* Copyright (C) 2002,2010 Erich Chen
* Web site: www.areca.com.tw
* E-mail: erich@areca.com.tw; ching2048@areca.com.tw
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* 3. The party using or redistributing the source code and binary forms
* agrees to the disclaimer below and the terms and conditions set forth
* herein.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*
*/
/*
* This file and its contents are supplied under the terms of the
* Common Development and Distribution License ("CDDL"), version 1.0.
* You may only use this file in accordance with the terms of version
* 1.0 of the CDDL.
*
* A full copy of the text of the CDDL should have accompanied this
* source. A copy of the CDDL is also available via the Internet at
*/
/*
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
*/
#include <sys/ddidmareq.h>
#include "arcmsr.h"
int whom);
struct scsi_device *sd);
dev_info_t **dipp);
dev_info_t **ldip);
/*PRINTFLIKE3*/
/*PRINTFLIKE2*/
static void arcmsr_ccbs_timeout(void* arg);
static void arcmsr_devMap_monitor(void* arg);
static int arcmsr_add_intr(struct ACB *, int);
static void *arcmsr_soft_state = NULL;
static ddi_dma_attr_t arcmsr_dma_attr = {
DMA_ATTR_V0, /* ddi_dma_attr version */
0, /* low DMA address range */
0xffffffffffffffffull, /* high DMA address range */
0x00ffffff, /* DMA counter counter upper bound */
1, /* DMA address alignment requirements */
1, /* minimum effective DMA size */
ARCMSR_MAX_XFER_LEN, /* maximum DMA xfer size */
/*
* list element's "address+length". The Intel IOP331 can not use
* segments over the 4G boundary due to segment boundary restrictions
*/
0xffffffff,
ARCMSR_MAX_SG_ENTRIES, /* scatter/gather list count */
1, /* device granularity */
DDI_DMA_FORCE_PHYSICAL /* Bus specific DMA flags */
};
static ddi_dma_attr_t arcmsr_ccb_attr = {
DMA_ATTR_V0, /* ddi_dma_attr version */
0, /* low DMA address range */
0xffffffff, /* high DMA address range */
0x00ffffff, /* DMA counter counter upper bound */
1, /* default byte alignment */
1, /* minimum effective DMA size */
0xffffffff, /* maximum DMA xfer size */
0x00ffffff, /* max segment size, segment boundary restrictions */
1, /* device granularity */
DDI_DMA_FORCE_PHYSICAL /* Bus specific DMA flags */
};
static struct cb_ops arcmsr_cb_ops = {
scsi_hba_open, /* open(9E) */
scsi_hba_close, /* close(9E) */
nodev, /* strategy(9E), returns ENXIO */
nodev, /* print(9E) */
nodev, /* dump(9E) Cannot be used as a dump device */
nodev, /* read(9E) */
nodev, /* write(9E) */
arcmsr_cb_ioctl, /* ioctl(9E) */
nodev, /* devmap(9E) */
nodev, /* mmap(9E) */
nodev, /* segmap(9E) */
NULL, /* chpoll(9E) returns ENXIO */
nodev, /* prop_op(9E) */
NULL, /* streamtab(9S) */
D_MP,
nodev, /* aread(9E) */
nodev /* awrite(9E) */
};
static struct dev_ops arcmsr_ops = {
DEVO_REV, /* devo_rev */
0, /* reference count */
nodev, /* getinfo */
nulldev, /* identify */
nulldev, /* probe */
arcmsr_attach, /* attach */
arcmsr_detach, /* detach */
arcmsr_reset, /* reset, shutdown, reboot notify */
&arcmsr_cb_ops, /* driver operations */
NULL, /* bus operations */
NULL /* power */
};
static struct modldrv arcmsr_modldrv = {
&mod_driverops, /* Type of module. This is a driver. */
"ARECA RAID Controller", /* module name, from arcmsr.h */
&arcmsr_ops, /* driver ops */
};
static struct modlinkage arcmsr_modlinkage = {
};
int
_init(void)
{
int ret;
if (ret != 0) {
return (ret);
}
return (ret);
}
if (arcmsr_soft_state != NULL) {
}
}
return (ret);
}
int
_fini(void)
{
int ret;
if (ret == 0) {
/* if ret = 0 , said driver can remove */
if (arcmsr_soft_state != NULL) {
}
}
return (ret);
}
int
{
}
/*
* Function: arcmsr_attach(9E)
* Description: Set up all device state and allocate data structures,
* mutexes, condition variables, etc. for device operation.
* Set mt_attr property for driver to indicate MT-safety.
* Add interrupts needed.
* Input: dev_info_t *dev_info, ddi_attach_cmd_t cmd
* Output: Return DDI_SUCCESS if device is ready,
* else return DDI_FAILURE
*/
static int
{
switch (cmd) {
case DDI_ATTACH:
return (arcmsr_do_ddi_attach(dev_info,
case DDI_RESUME:
/*
* There is no hardware state to restart and no
* timeouts to restart since we didn't DDI_SUSPEND with
* active cmds or active timeouts We just need to
* unblock waiting threads and restart I/O the code
*/
return (DDI_FAILURE);
}
/* restart ccbs "timeout" watchdog */
acb->timeout_count = 0;
return (DDI_SUCCESS);
default:
return (DDI_FAILURE);
}
}
/*
* Function: arcmsr_detach(9E)
* Description: Remove all device allocation and system resources, disable
* device interrupt.
* Input: dev_info_t *dev_info
* ddi_detach_cmd_t cmd
* Output: Return DDI_SUCCESS if done,
* else returnDDI_FAILURE
*/
static int
int instance;
return (DDI_FAILURE);
switch (cmd) {
case DDI_DETACH:
if (acb->timeout_id != 0) {
acb->timeout_id = 0;
}
if (acb->timeout_sc_id != 0) {
acb->timeout_sc_id = 0;
}
/* Remove interrupt set up by ddi_add_intr */
/* unbind mapping object to handle */
/* Free ccb pool memory */
/* Free DMA handle */
"(should not happen)");
/* free scsi_hba_transport from scsi_hba_tran_alloc */
return (DDI_SUCCESS);
case DDI_SUSPEND:
if (acb->timeout_id != 0) {
acb->timeout_id = 0;
}
if (acb->timeout_sc_id != 0) {
acb->timeout_sc_id = 0;
}
/* disable all outbound interrupt */
(void) arcmsr_disable_allintr(acb);
/* stop adapter background rebuild */
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A:
break;
case ACB_ADAPTER_TYPE_B:
break;
case ACB_ADAPTER_TYPE_C:
break;
}
return (DDI_SUCCESS);
default:
return (DDI_FAILURE);
}
}
static int
{
if (scsi_hba_transport == NULL)
return (DDI_FAILURE);
if (!acb)
return (DDI_FAILURE);
return (DDI_SUCCESS);
}
static int
{
struct CMD_MESSAGE_FIELD *pktioctlfld;
int retvalue = 0;
if (instance < 0)
return (ENXIO);
return (EPERM);
return (ENXIO);
sizeof (struct CMD_MESSAGE_FIELD), mode) != 0) {
goto ioctl_out;
}
/* validity check */
goto ioctl_out;
}
switch ((unsigned int)ioctl_cmd) {
{
int32_t allxfer_len = 0;
/* copy READ QBUFFER to srb */
acb->rqbuf_firstidx++;
/* if last index number set it to 0 */
ptmpQbuffer++;
allxfer_len++;
}
/*
* this iop data does no chance to make me overflow
* again here, so just do it
*/
while (iop_len > 0) {
acb->rqbuf_lastidx++;
/* if last index number set it to 0 */
iop_data++;
iop_len--;
}
/* let IOP know data has been read */
}
sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
break;
}
{
(void) memcpy(ptmpuserbuffer,
/*
* check ifdata xfer length of this request will overflow
* my array qbuffer
*/
if (wqbuf_lastidx != wqbuf_firstidx) {
} else {
& (ARCMSR_MAX_QBUFFER - 1);
if (my_empty_len >= user_len) {
while (user_len > 0) {
/* copy srb data to wqbuffer */
pQbuffer =
ptmpuserbuffer, 1);
acb->wqbuf_lastidx++;
/* iflast index number set it to 0 */
acb->wqbuf_lastidx %=
user_len--;
}
/* post first Qbuffer */
}
} else {
}
}
sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
break;
}
{
}
acb->rqbuf_firstidx = 0;
acb->rqbuf_lastidx = 0;
/* report success */
sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
break;
}
{
}
acb->wqbuf_firstidx = 0;
acb->wqbuf_lastidx = 0;
/* report success */
sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
break;
}
{
}
acb->rqbuf_firstidx = 0;
acb->rqbuf_lastidx = 0;
acb->wqbuf_firstidx = 0;
acb->wqbuf_lastidx = 0;
/* report success */
sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
break;
}
sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
break;
/* Not supported: ARCMSR_MESSAGE_SAY_HELLO */
break;
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A:
break;
case ACB_ADAPTER_TYPE_B:
break;
case ACB_ADAPTER_TYPE_C:
break;
}
break;
default:
rvalp));
}
return (retvalue);
}
/*
* Function: arcmsr_tran_tgt_init
* Description: Called when initializing a target device instance. If
* no per-target initialization is required, the HBA
* may leave tran_tgt_init to NULL
* Input:
* dev_info_t *host_dev_info,
* dev_info_t *target_dev_info,
* scsi_hba_tran_t *tran,
* struct scsi_device *sd
*
* Return: DDI_SUCCESS if success, else return DDI_FAILURE
*
* target resources.
* It also enables the HBA to qualify the device's address as valid and
* supportable for that particular HBA.
* By returning DDI_FAILURE, the instance of the target driver for that
* device will not be probed or attached.
* This entry point is not required, and if none is supplied,
* the framework will attempt to probe and attach all possible instances
* of the appropriate target drivers.
*/
static int
{
return (DDI_FAILURE);
}
if (ndi_dev_is_persistent_node(target_dev_info) == 0) {
/*
* If no persistent node exist, we don't allow .conf node
* to be created.
*/
arcmsr_name_node) != DDI_SUCCESS)) {
return (DDI_SUCCESS);
}
}
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
/*
* Function: arcmsr_tran_getcap(9E)
* Description: Get the capability named, and returnits value.
* Return Values: current value of capability, ifdefined
* -1 ifcapability is not defined
* ------------------------------------------------------
* Common Capability Strings Array
* ------------------------------------------------------
* #define SCSI_CAP_DMA_MAX 0
* #define SCSI_CAP_MSG_OUT 1
* #define SCSI_CAP_DISCONNECT 2
* #define SCSI_CAP_SYNCHRONOUS 3
* #define SCSI_CAP_WIDE_XFER 4
* #define SCSI_CAP_PARITY 5
* #define SCSI_CAP_INITIATOR_ID 6
* #define SCSI_CAP_UNTAGGED_QING 7
* #define SCSI_CAP_TAGGED_QING 8
* #define SCSI_CAP_ARQ 9
* #define SCSI_CAP_LINKED_CMDS 10 a
* #define SCSI_CAP_SECTOR_SIZE 11 b
* #define SCSI_CAP_TOTAL_SECTORS 12 c
* #define SCSI_CAP_GEOMETRY 13 d
* #define SCSI_CAP_RESET_NOTIFICATION 14 e
* #define SCSI_CAP_QFULL_RETRIES 15 f
* #define SCSI_CAP_QFULL_RETRY_INTERVAL 16 10
* #define SCSI_CAP_SCSI_VERSION 17 11
* #define SCSI_CAP_INTERCONNECT_TYPE 18 12
* #define SCSI_CAP_LUN_RESET 19 13
*/
static int
{
int capability = 0;
return (DDI_FAILURE);
}
return (-1);
}
switch (scsi_hba_lookup_capstr(cap)) {
case SCSI_CAP_MSG_OUT:
case SCSI_CAP_DISCONNECT:
case SCSI_CAP_WIDE_XFER:
case SCSI_CAP_TAGGED_QING:
case SCSI_CAP_UNTAGGED_QING:
case SCSI_CAP_PARITY:
case SCSI_CAP_ARQ:
capability = 1;
break;
case SCSI_CAP_SECTOR_SIZE:
break;
case SCSI_CAP_DMA_MAX:
/* Limit to 16MB max transfer */
break;
case SCSI_CAP_INITIATOR_ID:
break;
case SCSI_CAP_GEOMETRY:
/* head , track , cylinder */
break;
default:
capability = -1;
break;
}
return (capability);
}
/*
* Function: arcmsr_tran_setcap(9E)
* Description: Set the specific capability.
* Return Values: 1 - capability exists and can be set to new value
* 0 - capability could not be set to new value
* -1 - no such capability
*/
static int
{
int supported = 0;
return (-1);
}
return (-1);
}
case SCSI_CAP_ARQ: /* 9 auto request sense */
case SCSI_CAP_UNTAGGED_QING: /* 7 */
case SCSI_CAP_TAGGED_QING: /* 8 */
/* these are always on, and cannot be turned off */
break;
case SCSI_CAP_TOTAL_SECTORS: /* c */
supported = 1;
break;
case SCSI_CAP_DISCONNECT: /* 2 */
case SCSI_CAP_WIDE_XFER: /* 4 */
case SCSI_CAP_INITIATOR_ID: /* 6 */
case SCSI_CAP_DMA_MAX: /* 0 */
case SCSI_CAP_MSG_OUT: /* 1 */
case SCSI_CAP_PARITY: /* 5 */
case SCSI_CAP_LINKED_CMDS: /* a */
case SCSI_CAP_RESET_NOTIFICATION: /* e */
case SCSI_CAP_SECTOR_SIZE: /* b */
/* these are not settable */
supported = 0;
break;
default:
supported = -1;
break;
}
return (supported);
}
/*
* Function: arcmsr_tran_init_pkt
* Return Values: pointer to scsi_pkt, or NULL
* Description: simultaneously allocate both a scsi_pkt(9S) structure and
* DMA resources for that pkt.
* Called by kernel on behalf of a target driver
* calling scsi_init_pkt(9F).
* Refer to tran_init_pkt(9E) man page
* Context: Can be called from different kernel process threads.
* Can be called by interrupt thread.
* Allocates SCSI packet and DMA resources
*/
static struct
{
struct ARCMSR_CDB *arcmsr_cdb;
int old_pkt_flag;
return (NULL);
}
/* get free CCB */
return (NULL);
}
if (statuslen < sizeof (struct scsi_arq_status)) {
statuslen = sizeof (struct scsi_arq_status);
}
return (NULL);
}
/* Initialize CCB */
/* record how many sg are needed to xfer on this pkt */
ccb->pkt_ncookies = 0;
/* record how many sg we got from this window */
ccb->pkt_cookie = 0;
/* record how many windows have partial dma map set */
/* record current sg window position */
ccb->pkt_curwin = 0;
ccb->pkt_dma_len = 0;
ccb->pkt_dma_offset = 0;
/*
* we will still use this point for we want to fake some
* information in tran_start
*/
/* Initialize arcmsr_cdb */
arcmsr_cdb->Bus = 0;
/* Fill in the rest of the structure */
pkt->pkt_statistics = 0;
pkt->pkt_reason = 0;
old_pkt_flag = 0;
} else {
return (NULL);
}
}
/*
* you cannot update CdbLength with cmdlen here, it would
* cause a data compare error
*/
old_pkt_flag = 1;
}
/* Second step : dma allocation/move */
/*
* system had a lot of data trunk need to xfer, from...20 byte
* to 819200 byte.
* arcmsr_dma_alloc will get pkt_dma_handle (not null) until
* this lot of data trunk xfer done this mission will be done
* by some of continue READ or WRITE scsi command, till this
* lot of data trunk xfer completed.
* arcmsr_dma_move do the action repeatedly, and use the same
* ccb till this lot of data trunk xfer complete notice.
* when after the arcmsr_tran_init_pkt returns the solaris
* kernel is by your pkt_resid and its b_bcount to give you
* which type of scsi command descriptor to implement the
* length of folowing arcmsr_tran_start scsi cdb (data length)
*
* Each transfer should be aligned on a 512 byte boundary
*/
DDI_FAILURE) {
/*
* the HBA driver is unable to allocate DMA
* resources, it must free the allocated
* scsi_pkt(9S) before returning
*/
if (old_pkt_flag == 0) {
"allocation failed to free "
"scsi hba pkt");
}
return (NULL);
}
} else {
/* DMA resources to next DMA window, for old pkt */
return (NULL);
}
}
} else {
}
return (pkt);
}
/*
* Function: arcmsr_tran_start(9E)
* Description: Transport the command in pktp to the target device.
* The command is not finished when this returns, only
* sent to the target; arcmsr_intr_handler will call
* scsi_hba_pkt_comp(pktp) when the target device has done.
*
* Input: struct scsi_address *ap, struct scsi_pkt *pktp
* Output: TRAN_ACCEPT if pkt is OK and not driver not busy
* TRAN_BUSY if driver is
* TRAN_BADPKT if pkt is invalid
*/
static int
{
0, 0, DDI_DMA_SYNC_FORCPU);
return (TRAN_ACCEPT);
}
/* IMPORTANT: Target 16 is a virtual device for iop message transfer */
if (target == 16) {
switch (scsicmd) {
case SCMD_INQUIRY: {
if (lun != 0) {
arcmsr_ccb_complete(ccb, 0);
return (TRAN_ACCEPT);
}
/* The EVDP and pagecode is not supported */
} else {
/* Periph Qualifier & Periph Dev Type */
inqdata[0] = DTYPE_PROCESSOR;
/* rem media bit & Dev Type Modifier */
inqdata[1] = 0;
/* ISO, ECMA, & ANSI versions */
inqdata[2] = 0;
inqdata[3] = 0;
/* length of additional data */
/* Vendor Identification */
/* Product Identification */
PIDLEN);
/* Product Revision */
}
}
arcmsr_ccb_complete(ccb, 0);
return (TRAN_ACCEPT);
}
case SCMD_WRITE_BUFFER:
case SCMD_READ_BUFFER: {
/* error just for retry */
}
arcmsr_ccb_complete(ccb, 0);
return (TRAN_ACCEPT);
}
default:
arcmsr_ccb_complete(ccb, 0);
return (TRAN_ACCEPT);
}
}
}
return (TRAN_ACCEPT);
}
}
return (TRAN_BUSY);
return (TRAN_FATAL_ERROR);
}
return (TRAN_ACCEPT);
}
/*
* Function name: arcmsr_tran_destroy_pkt
* Return Values: none
* Description: Called by kernel on behalf of a target driver
* calling scsi_destroy_pkt(9F).
* Refer to tran_destroy_pkt(9E) man page
* Context: Can be called from different kernel process threads.
* Can be called by interrupt thread.
*/
static void
{
return;
}
return;
}
if (pkt_dma_handle) {
}
}
if (pkt_dma_handle) {
(void) ddi_dma_free_handle(&pkt_dma_handle);
}
if (ccb) {
} else {
}
} else {
}
}
}
/*
* Function name: arcmsr_tran_dmafree()
* Return Values: none
* Description: free dvma resources
* Context: Can be called from different kernel process threads.
* Can be called by interrupt thread.
*/
static void
{
return;
}
}
}
}
/*
* Function name: arcmsr_tran_sync_pkt()
* Return Values: none
* Description: sync dma
* Context: Can be called from different kernel process threads.
* Can be called by interrupt thread.
*/
static void
{
return;
}
DDI_SUCCESS) {
"sync pkt failed for target %d lun %d",
}
}
}
/*
* Function: arcmsr_tran_abort(9E)
* SCSA interface routine to abort pkt(s) in progress.
* Aborts the pkt specified. If NULL pkt, aborts ALL pkts.
* Output: Return 1 if success
* Return 0 if failure
*/
static int
{
int return_code;
while (acb->ccboutstandingcount != 0) {
drv_usecwait(10000);
}
if (return_code != DDI_SUCCESS) {
return (0);
}
return (1);
}
/*
* Function: arcmsr_tran_reset(9E)
* SCSA interface routine to perform scsi resets on either
* a specified target or the bus (default).
* Output: Return 1 if success
* Return 0 if failure
*/
static int
int return_code = 1;
/* Are we in the middle of dumping core? */
if (ddi_in_panic())
return (return_code);
switch (level) {
case RESET_ALL: /* 0 */
acb->num_resets++;
if (acb->timeout_count) {
if (arcmsr_iop_reset(acb) != 0) {
}
}
break;
case RESET_TARGET: /* 1 */
return_code = 0;
break;
case RESET_BUS: /* 2 */
return_code = 0;
break;
case RESET_LUN: /* 3 */
return_code = 0;
break;
default:
return_code = 0;
}
return (return_code);
}
static int
{
int circ = 0;
int rval;
return (NDI_FAILURE);
switch (op) {
case BUS_CONFIG_ONE:
rval = NDI_FAILURE;
break;
}
}
break;
case BUS_CONFIG_DRIVER:
case BUS_CONFIG_ALL:
}
rval = NDI_SUCCESS;
break;
}
if (rval == NDI_SUCCESS)
return (rval);
}
/*
* Function name: arcmsr_dma_alloc
* Return Values: 0 if successful, -1 if failure
* Description: allocate DMA resources
* Context: Can only be called from arcmsr_tran_init_pkt()
* register struct scsi_address *ap = &((pkt)->pkt_address);
*/
static int
{
int resid = 0;
int total_ccb_xferlen = 0;
uint8_t i;
/*
* at this point the PKT SCSI CDB is empty, and dma xfer length
* is bp->b_bcount
*/
} else {
}
if (flags & PKT_CONSISTENT) {
}
if (flags & PKT_DMA_PARTIAL) {
}
if (alloc_result != DDI_SUCCESS) {
return (DDI_FAILURE);
}
switch (map_method) {
case DDI_DMA_PARTIAL_MAP:
/*
* When your main memory size larger then 4G
* DDI_DMA_PARTIAL_MAP will be touched.
*
* We've already set DDI_DMA_PARTIAL in dma_flags,
* so if it's now missing, there's something screwy
* happening. We plow on....
*/
if ((dma_flags & DDI_DMA_PARTIAL) == 0) {
"dma partial mapping lost ...impossible case!");
}
DDI_FAILURE) {
}
DDI_FAILURE) {
}
i = 0;
/* first cookie is accessed from ccb->pkt_dmacookies[0] */
for (;;) {
i++;
if ((i == ARCMSR_MAX_SG_ENTRIES) ||
(i == ccb->pkt_ncookies) ||
(total_ccb_xferlen == ARCMSR_MAX_XFER_LEN)) {
break;
}
/*
* next cookie will be retrieved from
* ccb->pkt_dmacookies[i]
*/
&ccb->pkt_dmacookies[i]);
}
ccb->pkt_cookie = i;
if (total_ccb_xferlen > 512) {
if (resid != 0) {
i--;
/* modify last sg length */
}
}
return (DDI_SUCCESS);
case DDI_DMA_MAPPED:
ccb->pkt_dma_len = 0;
ccb->pkt_dma_offset = 0;
i = 0;
/* first cookie is accessed from ccb->pkt_dmacookies[0] */
for (;;) {
i++;
if ((i == ARCMSR_MAX_SG_ENTRIES) ||
(i == ccb->pkt_ncookies) ||
(total_ccb_xferlen == ARCMSR_MAX_XFER_LEN)) {
break;
}
/*
* next cookie will be retrieved from
* ccb->pkt_dmacookies[i]
*/
&ccb->pkt_dmacookies[i]);
}
ccb->pkt_cookie = i;
if (total_ccb_xferlen > 512) {
if (resid != 0) {
i--;
/* modify last sg length */
}
}
return (DDI_SUCCESS);
case DDI_DMA_NORESOURCES:
break;
case DDI_DMA_NOMAPPING:
break;
case DDI_DMA_TOOBIG:
break;
case DDI_DMA_INUSE:
"(should not happen)");
break;
default:
break;
}
return (DDI_FAILURE);
}
/*
* Function name: arcmsr_dma_move
* Return Values: 0 if successful, -1 if failure
* Description: move DMA resources to next DMA window
* Context: Can only be called from arcmsr_tran_init_pkt()
*/
static int
{
uint8_t i = 0;
int resid = 0;
int total_ccb_xferlen = 0;
i++;
}
/*
* If there are no more cookies remaining in this window,
* move to the next window.
*/
/*
* only dma map "partial" arrive here
*/
return (DDI_SUCCESS);
}
/* At last window, cannot move */
return (DDI_FAILURE);
}
DDI_FAILURE) {
return (DDI_FAILURE);
}
/* reset cookie pointer */
ccb->pkt_cookie = 0;
} else {
/*
* only dma map "all" arrive here
* We still have more cookies in this window,
* get the next one
* access the pkt_dma_handle remain cookie record at
* ccb->pkt_dmacookies array
*/
&ccb->pkt_dmacookies[i]);
}
/* Get remaining cookies in this window, up to our maximum */
/* retrieve and store cookies, start at ccb->pkt_dmacookies[0] */
for (;;) {
i++;
/* handled cookies count level indicator */
ccb->pkt_cookie++;
if ((i == ARCMSR_MAX_SG_ENTRIES) ||
(total_ccb_xferlen == ARCMSR_MAX_XFER_LEN)) {
break;
}
&ccb->pkt_dmacookies[i]);
}
if (total_ccb_xferlen > 512) {
if (resid != 0) {
i--;
/* modify last sg length */
}
}
return (DDI_SUCCESS);
}
/*ARGSUSED*/
static void
{
struct ARCMSR_CDB *arcmsr_cdb;
char *psge;
int arccdbsize = 0x30;
if (sgcount != 0) {
int length, i;
int cdb_sgcount = 0;
int total_xfer_length = 0;
/* map stor port SG list to our iop SG List. */
for (i = 0; i < sgcount; i++) {
/* Get physaddr of the current data pointer */
if (address_hi == 0) {
arccdbsize += sizeof (struct SG32ENTRY);
} else {
arccdbsize += sizeof (struct SG64ENTRY);
}
cdb_sgcount++;
}
if (arccdbsize > 256) {
}
} else {
arcmsr_cdb->DataLength = 0;
}
}
/*
* arcmsr_post_ccb - Send a protocol specific ARC send postcard to a AIOC.
*
* handle: Handle of registered ARC protocol driver
* adapter_id: AIOC unique identifier(integer)
* pPOSTCARD_SEND: Pointer to ARC send postcard
*
* This routine posts a ARC send postcard to the request post FIFO of a
* specific ARC adapter.
*/
static int
{
struct ARCMSR_CDB *arcmsr_cdb;
/* TODO: Use correct offset and size for syncing? */
return (DDI_FAILURE);
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A:
{
struct HBA_msgUnit *phbamu;
} else {
}
if (pkt_flags & FLAG_NOINTR)
break;
}
case ACB_ADAPTER_TYPE_B:
{
struct HBB_msgUnit *phbbmu;
int ending_index, index;
} else {
}
index++;
/* if last index number set it to 0 */
if (pkt_flags & FLAG_NOINTR)
break;
}
case ACB_ADAPTER_TYPE_C:
{
struct HBC_msgUnit *phbcmu;
if (acb->cdb_phyaddr_hi32) {
} else {
}
if (pkt_flags & FLAG_NOINTR)
break;
}
}
return (DDI_SUCCESS);
}
static void
{
return;
}
}
/*
* TODO: This represents a potential race condition, and is
* ultimately a poor design decision. Revisit this code
* and solve the mutex ownership issue correctly.
*/
} else {
}
if (flag == 1) {
}
}
static void
{
if (!error) {
}
} else {
"target %d lun %d selection "
}
&acb->ccb_complete_list);
break;
case ARCMSR_DEV_ABORTED:
case ARCMSR_DEV_INIT_FAIL:
" 'ARCMSR_DEV_INIT_FAIL'");
&acb->ccb_complete_list);
break;
case SCSISTAT_CHECK_CONDITION:
&acb->ccb_complete_list);
break;
default:
"target %d lun %d isr received CMD_DONE"
" with unknown DeviceStatus (0x%x)",
/* unknown error or crc error just for retry */
&acb->ccb_complete_list);
break;
}
}
}
static void
{
return;
}
case ARCMSR_CCB_TIMEOUT:
if (ccb_state & ARCMSR_CCB_WAIT4_FREE)
else
return;
case ARCMSR_CCB_ABORTED:
if (ccb_state & ARCMSR_CCB_WAIT4_FREE)
else
return;
case ARCMSR_CCB_RESET:
if (ccb_state & ARCMSR_CCB_WAIT4_FREE)
else
return;
default:
return;
}
}
}
static void
{
struct SENSE_DATA *cdb_sensedata;
struct scsi_arq_status *arq_status;
arq_status->sts_rqpkt_resid = 0;
/* auto rqsense took place */
if (err_blkno <= 0xfffffffful) {
struct scsi_extended_sense *sts_sensedata;
/* must eq CLASS_EXTENDED_SENSE (0x07) */
sts_sensedata->es_cmd_info[0] =
} else { /* 64-bit LBA */
struct scsi_descr_sense_hdr *dsp;
struct scsi_information_sense_descr *isd;
dsp = (struct scsi_descr_sense_hdr *)
dsp->ds_qual_code =
sizeof (struct scsi_information_sense_descr);
}
}
}
static int
{
uint32_t intmask_org = 0;
int i = 0;
acb->num_aborts++;
/*
* We don't support abort of a single packet. All
* callers in our kernel always do a global abort, so
* there is no point in having code to support it
* here.
*/
return (DDI_FAILURE);
}
/*
* if abortpkt is NULL, the upper layer needs us
* to abort all commands
*/
if (acb->ccboutstandingcount != 0) {
/* disable all outbound interrupt */
/* clear and abort all outbound posted Q */
/* talk to iop 331 outstanding command aborted */
(void) arcmsr_abort_host_command(acb);
for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
/*
* this ccb will complete at
* hwinterrupt
*/
/* ccb->ccb_state = ARCMSR_CCB_ABORTED; */
}
}
/*
* enable outbound Post Queue, outbound
* doorbell Interrupt
*/
}
return (DDI_SUCCESS);
}
/*
* Autoconfiguration support
*/
static int
char devbuf[SCSI_MAXNAMELEN];
char *addr;
long num;
/* Parse dev name and address */
addr = "";
for (p = devbuf; *p != '\0'; p++) {
if (*p == '@') {
addr = p + 1;
*p = '\0';
} else if (*p == ':') {
*p = '\0';
break;
}
}
/* Parse target and lun */
if (*p == ',') {
lp = p + 1;
*p = '\0';
break;
}
}
return (-1);
}
return (-1);
}
return (0);
}
static int
{
-1);
if (tgt == -1)
return (DDI_FAILURE);
-1);
if (lun == -1)
return (DDI_FAILURE);
return (DDI_SUCCESS);
}
static dev_info_t *
{
char addr[SCSI_MAXNAMELEN];
char tmp[SCSI_MAXNAMELEN];
/* We don't care about non-persistent node */
if (ndi_dev_is_persistent_node(child) == 0)
continue;
continue;
break;
}
return (child);
}
static int
{
char **compatible = NULL;
int ncompatible = 0;
int rval;
rval = NDI_FAILURE;
goto finish;
}
/* Create dev node */
if (rval == NDI_SUCCESS) {
"unable to create target property for T%dL%d",
rval = NDI_FAILURE;
goto finish;
}
"unable to create lun property for T%dL%d",
rval = NDI_FAILURE;
goto finish;
}
"unable to create compatible property for T%dL%d",
rval = NDI_FAILURE;
goto finish;
}
if (rval != NDI_SUCCESS) {
(void) ndi_devi_free(ldip);
} else {
}
}
if (dipp)
return (rval);
}
static int
{
struct scsi_device sd;
int rval;
if (ldip) {
}
return (NDI_SUCCESS);
}
if (rval == SCSIPROBE_EXISTS)
scsi_unprobe(&sd);
return (rval);
}
static int
{
const char *type_str;
switch (intr_type) {
case DDI_INTR_TYPE_MSI:
type_str = "MSI";
break;
case DDI_INTR_TYPE_MSIX:
type_str = "MSIX";
break;
case DDI_INTR_TYPE_FIXED:
type_str = "FIXED";
break;
default:
type_str = "unknown";
break;
}
/* Determine number of supported interrupts */
"no interrupts of type %s, rc=0x%x, count=%d",
return (DDI_FAILURE);
}
return (DDI_FAILURE);
}
}
/*
* Get priority for first msi, assume remaining are all the same
*/
return (DDI_FAILURE);
}
return (DDI_FAILURE);
}
for (int x = 0; x < acb->intr_count; x++) {
type_str);
return (DDI_FAILURE);
}
}
/* Call ddi_intr_block_enable() for MSI */
} else {
/* Call ddi_intr_enable() for MSI non block enable */
for (int x = 0; x < acb->intr_count; x++) {
}
}
return (DDI_SUCCESS);
}
static void
{
int x;
return;
/* Disable all interrupts */
/* Call ddi_intr_block_disable() */
} else {
for (x = 0; x < acb->intr_count; x++) {
}
}
/* Call ddi_intr_remove_handler() */
for (x = 0; x < acb->intr_count; x++) {
}
}
static void
{
}
static void
{
}
static int
{
switch (wval) {
case PCI_DEVICE_ID_ARECA_1880:
case PCI_DEVICE_ID_ARECA_1882:
{
(caddr_t *)&iop_mu_regs_map0, 0,
return (DDI_FAILURE);
}
DDI_SUCCESS) {
return (DDI_FAILURE);
}
return (DDI_FAILURE);
}
&count) != DDI_DMA_MAPPED) {
return (DDI_FAILURE);
}
- PtrToNum(arcmsr_ccbs_area));
/* ioport base */
break;
}
case PCI_DEVICE_ID_ARECA_1201:
{
struct HBB_msgUnit *phbbmu;
sizeof (struct HBB_msgUnit);
/* Allocate memory for the ccb */
DDI_SUCCESS) {
return (DDI_FAILURE);
}
return (DDI_FAILURE);
}
return (DDI_FAILURE);
}
- PtrToNum(arcmsr_ccbs_area));
/* setup device register */
(caddr_t *)&iop_mu_regs_map0, 0,
return (DDI_FAILURE);
}
/* ARCMSR_DRV2IOP_DOORBELL */
(caddr_t *)&iop_mu_regs_map1, 0,
return (DDI_FAILURE);
}
/* ARCMSR_MSGCODE_RWBUFFER */
break;
}
case PCI_DEVICE_ID_ARECA_1110:
case PCI_DEVICE_ID_ARECA_1120:
case PCI_DEVICE_ID_ARECA_1130:
case PCI_DEVICE_ID_ARECA_1160:
case PCI_DEVICE_ID_ARECA_1170:
case PCI_DEVICE_ID_ARECA_1210:
case PCI_DEVICE_ID_ARECA_1220:
case PCI_DEVICE_ID_ARECA_1230:
case PCI_DEVICE_ID_ARECA_1231:
case PCI_DEVICE_ID_ARECA_1260:
case PCI_DEVICE_ID_ARECA_1261:
case PCI_DEVICE_ID_ARECA_1270:
case PCI_DEVICE_ID_ARECA_1280:
case PCI_DEVICE_ID_ARECA_1212:
case PCI_DEVICE_ID_ARECA_1222:
case PCI_DEVICE_ID_ARECA_1380:
case PCI_DEVICE_ID_ARECA_1381:
case PCI_DEVICE_ID_ARECA_1680:
case PCI_DEVICE_ID_ARECA_1681:
{
(caddr_t *)&iop_mu_regs_map0, 0,
return (DDI_FAILURE);
}
DDI_SUCCESS) {
return (DDI_FAILURE);
}
return (DDI_FAILURE);
}
&count) != DDI_DMA_MAPPED) {
return (DDI_FAILURE);
}
- PtrToNum(arcmsr_ccbs_area));
/* ioport base */
break;
}
default:
return (DDI_FAILURE);
}
/* here we can not access pci configuration again */
/* physical address of acb->pccb_pool */
for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
}
/* disable all outbound interrupt */
if (!arcmsr_iop_confirm(acb)) {
return (DDI_FAILURE);
}
}
}
/* enable outbound Post Queue, outbound doorbell Interrupt */
return (0);
}
static int
{
int raid6 = 1;
char *type;
int intr_types;
/*
* Soft State Structure
* The driver should allocate the per-device-instance
* soft state structure, being careful to clean up properly if
* an error occurs. Allocate data structure.
*/
return (DDI_FAILURE);
}
/* acb is already zalloc()d so we don't need to bzero() it */
/*
* The driver, if providing DMA, should also check that its hardware is
* installed in a DMA-capable slot
*/
" a DMA-capable slot");
goto error_level_0;
}
goto error_level_0;
}
if (wval != PCI_VENDOR_ID_ARECA) {
"'vendorid (0x%04x) does not match 0x%04x "
"(PCI_VENDOR_ID_ARECA)",
goto error_level_0;
}
switch (wval) {
case PCI_DEVICE_ID_ARECA_1110:
case PCI_DEVICE_ID_ARECA_1210:
case PCI_DEVICE_ID_ARECA_1201:
raid6 = 0;
/*FALLTHRU*/
case PCI_DEVICE_ID_ARECA_1120:
case PCI_DEVICE_ID_ARECA_1130:
case PCI_DEVICE_ID_ARECA_1160:
case PCI_DEVICE_ID_ARECA_1170:
case PCI_DEVICE_ID_ARECA_1220:
case PCI_DEVICE_ID_ARECA_1230:
case PCI_DEVICE_ID_ARECA_1260:
case PCI_DEVICE_ID_ARECA_1270:
case PCI_DEVICE_ID_ARECA_1280:
type = "SATA 3G";
break;
case PCI_DEVICE_ID_ARECA_1380:
case PCI_DEVICE_ID_ARECA_1381:
case PCI_DEVICE_ID_ARECA_1680:
case PCI_DEVICE_ID_ARECA_1681:
type = "SAS 3G";
break;
case PCI_DEVICE_ID_ARECA_1880:
type = "SAS 6G";
break;
default:
type = "X-TYPE";
goto error_level_0;
}
/* we disable iop interrupt here */
goto error_level_1;
}
/* Allocate a transport structure */
goto error_level_2;
}
/* init scsi host adapter transport entry */
/*
* If no per-target initialization is required, the HBA can leave
* tran_tgt_init set to NULL.
*/
/* iop init and enable interrupt here */
/* Get supported interrupt types */
DDI_SUCCESS) {
goto error_level_3;
}
if (intr_types & DDI_INTR_TYPE_FIXED) {
goto error_level_5;
} else if (intr_types & DDI_INTR_TYPE_MSI) {
goto error_level_5;
}
/*
* The driver should attach this instance of the device, and
* perform error cleanup if necessary
*/
goto error_level_5;
}
/* Create a taskq for dealing with dr events */
TASKQ_DEFAULTPRI, 0)) == NULL) {
goto error_level_8;
}
acb->timeout_count = 0;
/* active ccbs "timeout" watchdog */
/* report device info */
return (DDI_SUCCESS);
(void) scsi_hba_detach(dev_info);
if (acb->scsi_hba_transport)
if (acb->ccbs_acc_handle)
if (acb->ccbs_pool_handle)
if (acb->pci_acc_handle)
return (DDI_FAILURE);
}
static void
{
char buf[256];
}
}
static void
{
}
static void
{
}
static void
{
}
static void
{
}
static void
{
}
static void
{
}
static struct list_head *
{
return (NULL);
}
return (one);
}
static struct CCB *
{
&acb->ccb_complete_list);
if (first_complete_ccb_list == NULL) {
return (NULL);
}
return (ccb);
}
static struct CCB *
{
int ccb_get_index, ccb_put_index;
if (ccb_get_index >= ARCMSR_MAX_FREECCB_NUM)
if (ccb_put_index != ccb_get_index) {
} else {
}
return (ccb);
}
static void
{
return;
}
acb->ccb_put_index++;
acb->ccb_put_index =
}
static void
arcmsr_ccbs_timeout(void* arg)
{
int i, instance, timeout_count = 0;
if (acb->ccboutstandingcount != 0) {
/* check each ccb */
if (i != DDI_SUCCESS) {
if ((acb->timeout_id != 0) &&
/* do pkt timeout check each 60 secs */
(void*)acb, (ARCMSR_TIMEOUT_WATCH *
drv_usectohz(1000000)));
}
return;
}
for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
break;
}
continue;
}
continue;
}
continue;
}
continue;
}
"scsi target %d lun %d cmd=0x%x "
"command timeout, ccb=0x%p",
/* acb->devstate[id][lun] = ARECA_RAID_GONE; */
continue;
}
}
}
if ((acb->timeout_id != 0) &&
/* do pkt timeout check each 60 secs */
}
}
static void
{
int i;
/* disable all outbound interrupts */
for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
}
}
}
/* enable outbound Post Queue, outbound doorbell Interrupt */
}
static int
{
struct scsi_device sd;
int rval;
if (rval == SCSIPROBE_EXISTS) {
if (rval != NDI_SUCCESS) {
} else {
}
}
} else {
if (rval == SCSIPROBE_EXISTS)
}
scsi_unprobe(&sd);
return (rval);
}
static void
{
char *devicemap;
char temp;
char diff;
int circ = 0;
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A:
{
struct HBA_msgUnit *phbamu;
break;
}
case ACB_ADAPTER_TYPE_B:
{
struct HBB_msgUnit *phbbmu;
devicemap = (char *)
break;
}
case ACB_ADAPTER_TYPE_C:
{
struct HBC_msgUnit *phbcmu;
break;
}
}
if (diff != 0) {
*acb_dev_map = temp;
(void) arcmsr_scsi_device_probe(acb,
} else if ((temp & 0x01) == 0 &&
lun);
if (mutex_owned(&acb->
isr_mutex)) {
(void)
dip,
} else {
mutex_enter(&acb->
(void)
dip,
mutex_exit(&acb->
}
}
}
temp >>= 1;
diff >>= 1;
}
}
devicemap++;
acb_dev_map++;
}
}
static void
arcmsr_devMap_monitor(void* arg)
{
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A:
{
struct HBA_msgUnit *phbamu;
break;
}
case ACB_ADAPTER_TYPE_B:
{
struct HBB_msgUnit *phbbmu;
break;
}
case ACB_ADAPTER_TYPE_C:
{
struct HBC_msgUnit *phbcmu;
break;
}
}
if ((acb->timeout_id != 0) &&
/* do pkt timeout check each 5 secs */
}
}
static uint32_t
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A:
{
struct HBA_msgUnit *phbamu;
/* disable all outbound interrupt */
break;
}
case ACB_ADAPTER_TYPE_B:
{
struct HBB_msgUnit *phbbmu;
/* disable all outbound interrupt */
/* disable all interrupts */
break;
}
case ACB_ADAPTER_TYPE_C:
{
struct HBC_msgUnit *phbcmu;
/* disable all outbound interrupt */
break;
}
}
return (intmask_org);
}
static void
int mask;
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A:
{
struct HBA_msgUnit *phbamu;
/*
* enable outbound Post Queue, outbound doorbell message0
* Interrupt
*/
break;
}
case ACB_ADAPTER_TYPE_B:
{
struct HBB_msgUnit *phbbmu;
/* 1=interrupt enable, 0=interrupt disable */
intmask_org | mask);
break;
}
case ACB_ADAPTER_TYPE_C:
{
struct HBC_msgUnit *phbcmu;
/* enable outbound Post Queue,outbound doorbell Interrupt */
break;
}
}
}
static void
{
/* stop adapter background rebuild */
/* disable all outbound interrupt */
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A:
break;
case ACB_ADAPTER_TYPE_B:
break;
case ACB_ADAPTER_TYPE_C:
break;
}
/*
* enable outbound Post Queue
* enable outbound doorbell Interrupt
*/
}
}
static uint8_t
{
uint32_t i;
struct HBA_msgUnit *phbamu;
do {
for (i = 0; i < 100; i++) {
&phbamu->outbound_intstatus) &
/* clear interrupt */
return (TRUE);
}
drv_usecwait(10000);
if (ddi_in_panic()) {
/* clear interrupts */
return (TRUE);
}
} /* max 1 second */
return (FALSE);
}
static uint8_t
{
struct HBB_msgUnit *phbbmu;
uint32_t i;
do {
for (i = 0; i < 100; i++) {
/* clear interrupt */
return (TRUE);
}
drv_usecwait(10000);
if (ddi_in_panic()) {
/* clear interrupts */
return (TRUE);
}
} /* max 1 second */
return (FALSE);
}
static uint8_t
{
uint32_t i;
struct HBC_msgUnit *phbcmu;
do {
for (i = 0; i < 100; i++) {
&phbcmu->outbound_doorbell) &
/* clear interrupt */
&phbcmu->outbound_doorbell_clear, c);
return (TRUE);
}
drv_usecwait(10000);
if (ddi_in_panic()) {
/* clear interrupts */
&phbcmu->outbound_doorbell_clear, c);
return (TRUE);
}
} /* max 1 second */
return (FALSE);
}
static void
struct HBA_msgUnit *phbamu;
int retry_count = 30;
/* enlarge wait flush adapter cache time: 10 minutes */
do {
if (arcmsr_hba_wait_msgint_ready(acb)) {
break;
} else {
retry_count--;
}
} while (retry_count != 0);
}
static void
struct HBB_msgUnit *phbbmu;
int retry_count = 30;
/* enlarge wait flush adapter cache time: 10 minutes */
do {
if (arcmsr_hbb_wait_msgint_ready(acb)) {
break;
} else {
retry_count--;
}
} while (retry_count != 0);
}
static void
{
struct HBC_msgUnit *phbcmu;
int retry_count = 30;
/* enlarge wait flush adapter cache time: 10 minutes */
do {
if (arcmsr_hbc_wait_msgint_ready(acb)) {
break;
} else {
retry_count--;
}
} while (retry_count != 0);
}
static uint8_t
{
if (!arcmsr_hba_wait_msgint_ready(acb)) {
"timeout while waiting for 'abort all "
"outstanding commands'");
return (0xff);
}
return (0x00);
}
static uint8_t
{
if (!arcmsr_hbb_wait_msgint_ready(acb)) {
"timeout while waiting for 'abort all "
"outstanding commands'");
return (0x00);
}
return (0x00);
}
static uint8_t
{
if (!arcmsr_hbc_wait_msgint_ready(acb)) {
"timeout while waiting for 'abort all "
"outstanding commands'");
return (0xff);
}
return (0x00);
}
static void
{
int i = 0;
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A:
{
struct HBA_msgUnit *phbamu;
/* clear and abort all outbound posted Q */
/* clear interrupt */
(i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
/* frame must be 32 bytes aligned */
/* the CDB is the first field of the CCB */
/* check if command done with no error */
}
break;
}
case ACB_ADAPTER_TYPE_B:
{
struct HBB_msgUnit *phbbmu;
/* clear all outbound posted Q */
/* clear doorbell interrupt */
for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
phbbmu->done_qbuffer[i] = 0;
/* frame must be 32 bytes aligned */
(flag_ccb << 5)));
/* check if command done with no error */
error =
(flag_ccb &
}
phbbmu->post_qbuffer[i] = 0;
} /* drain reply FIFO */
phbbmu->doneq_index = 0;
phbbmu->postq_index = 0;
break;
}
case ACB_ADAPTER_TYPE_C:
{
struct HBC_msgUnit *phbcmu;
&phbcmu->host_int_status) &
(i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
/* need to do */
/* frame must be 32 bytes aligned */
}
break;
}
}
}
/*
* Routine Description: try to get echo from iop.
* Arguments:
* Return Value: Nothing.
*/
static uint8_t
{
struct HBA_msgUnit *phbamu;
if (!arcmsr_hba_wait_msgint_ready(acb)) {
"... timeout ...");
rtnval = 0xFF;
}
/* enable all outbound interrupt */
}
return (rtnval);
}
/*
* Routine Description: Reset 80331 iop.
* Arguments:
* Return Value: Nothing.
*/
static uint8_t
{
int i = 0;
if (acb->ccboutstandingcount > 0) {
/* disable all outbound interrupt */
/* clear and abort all outbound posted Q */
/* talk to iop 331 outstanding command aborted */
for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
/* ccb->ccb_state = ARCMSR_CCB_RESET; */
}
}
/* enable all outbound interrupt */
} else {
}
return (rtnval);
}
static struct QBUFFER *
{
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A:
{
struct HBA_msgUnit *phbamu;
break;
}
case ACB_ADAPTER_TYPE_B:
{
struct HBB_msgUnit *phbbmu;
break;
}
case ACB_ADAPTER_TYPE_C:
{
struct HBC_msgUnit *phbcmu;
break;
}
}
return (qb);
}
static struct QBUFFER *
{
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A:
{
struct HBA_msgUnit *phbamu;
break;
}
case ACB_ADAPTER_TYPE_B:
{
struct HBB_msgUnit *phbbmu;
break;
}
case ACB_ADAPTER_TYPE_C:
{
struct HBC_msgUnit *phbcmu;
break;
}
}
return (qbuffer);
}
static void
{
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A:
{
struct HBA_msgUnit *phbamu;
/* let IOP know the data has been read */
break;
}
case ACB_ADAPTER_TYPE_B:
{
struct HBB_msgUnit *phbbmu;
/* let IOP know the data has been read */
break;
}
case ACB_ADAPTER_TYPE_C:
{
struct HBC_msgUnit *phbcmu;
/* let IOP know data has been read */
break;
}
}
}
static void
{
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
struct HBA_msgUnit *phbamu;
/*
* push inbound doorbell tell iop, driver data write ok
* and wait reply on next hwinterrupt for next Qbuffer post
*/
break;
}
case ACB_ADAPTER_TYPE_B:
{
struct HBB_msgUnit *phbbmu;
/*
* push inbound doorbell tell iop, driver data was writen
* successfully, then await reply on next hwinterrupt for
* next Qbuffer post
*/
break;
}
case ACB_ADAPTER_TYPE_C:
{
struct HBC_msgUnit *phbcmu;
/*
* push inbound doorbell tell iop, driver data write ok
* and wait reply on next hwinterrupt for next Qbuffer post
*/
break;
}
}
}
static void
{
int32_t allxfer_len = 0;
(allxfer_len < 124)) {
acb->wqbuf_firstidx++;
/* if last index number set it to 0 */
iop_data++;
allxfer_len++;
}
/*
* push inbound doorbell and wait reply at hwinterrupt
* routine for next Qbuffer post
*/
}
}
static void
{
struct HBA_msgUnit *phbamu;
if (!arcmsr_hba_wait_msgint_ready(acb))
"timeout while waiting for background rebuild completion");
}
static void
{
struct HBB_msgUnit *phbbmu;
if (!arcmsr_hbb_wait_msgint_ready(acb))
"timeout while waiting for background rebuild completion");
}
static void
{
struct HBC_msgUnit *phbcmu;
if (!arcmsr_hbc_wait_msgint_ready(acb))
"timeout while waiting for background rebuild completion");
}
static int
{
struct CMD_MESSAGE_FIELD *pcmdmessagefld;
int retvalue = 0, transfer_len = 0;
char *buffer;
/* 4 bytes: Areca io control code */
if (transfer_len > sizeof (struct CMD_MESSAGE_FIELD)) {
goto message_out;
}
switch (controlcode) {
{
unsigned long *ver_addr;
int32_t allxfer_len = 0;
acb->rqbuf_firstidx++;
ptmpQbuffer++;
allxfer_len++;
}
while (iop_len > 0) {
acb->rqbuf_lastidx++;
iop_data++;
iop_len--;
}
}
break;
}
{
(void) memcpy(ptmpuserbuffer,
if (wqbuf_lastidx != wqbuf_firstidx) {
struct scsi_arq_status *arq_status;
arq_status = (struct scsi_arq_status *)
sizeof (struct scsi_arq_status));
arq_status->sts_rqpkt_resid = 0;
struct scsi_extended_sense *sts_sensedata;
/* has error report sensedata */
/* AdditionalSenseLength */
/* AdditionalSenseCode */
}
} else {
(ARCMSR_MAX_QBUFFER - 1);
if (my_empty_len >= user_len) {
while (user_len > 0) {
acb->wqbuf_lastidx];
ptmpuserbuffer, 1);
acb->wqbuf_lastidx++;
acb->wqbuf_lastidx %=
user_len--;
}
}
} else {
struct scsi_arq_status *arq_status;
/* has error report sensedata */
arq_status = (struct scsi_arq_status *)
sizeof (struct scsi_arq_status));
arq_status->sts_rqpkt_resid = 0;
struct scsi_extended_sense *
/* has error report sensedata */
/* AdditionalSenseLength */
/* AdditionalSenseCode */
}
}
}
break;
}
}
acb->rqbuf_firstidx = 0;
acb->rqbuf_lastidx = 0;
break;
}
acb->wqbuf_firstidx = 0;
acb->wqbuf_lastidx = 0;
break;
}
acb->rqbuf_firstidx = 0;
acb->rqbuf_lastidx = 0;
acb->wqbuf_firstidx = 0;
acb->wqbuf_lastidx = 0;
break;
break;
/*
* Not supported - ARCMSR_MESSAGE_SAY_HELLO
*/
break;
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A:
break;
case ACB_ADAPTER_TYPE_B:
break;
case ACB_ADAPTER_TYPE_C:
break;
}
break;
default:
}
return (retvalue);
}
static void
{
int i = 0;
/* disable all outbound interrupts */
(void) arcmsr_disable_allintr(acb);
/* stop adapter background rebuild */
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A:
break;
case ACB_ADAPTER_TYPE_B:
break;
case ACB_ADAPTER_TYPE_C:
break;
}
/* abort all outstanding commands */
if (acb->ccboutstandingcount != 0) {
/* clear and abort all outbound posted Q */
/* talk to iop outstanding command aborted */
(void) arcmsr_abort_host_command(acb);
for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
/* ccb->ccb_state = ARCMSR_CCB_ABORTED; */
}
}
}
}
/* get firmware miscellaneous data */
static void
{
struct HBA_msgUnit *phbamu;
char *acb_firm_model;
char *acb_firm_version;
char *acb_device_map;
char *iop_firm_model;
char *iop_firm_version;
char *iop_device_map;
int count;
/* firm_model, 15 */
/* firm_version, 17 */
/* device_map, 21 */
if (!arcmsr_hba_wait_msgint_ready(acb))
"timeout while waiting for adapter firmware "
"miscellaneous data");
count = 8;
while (count) {
count--;
}
count = 16;
while (count) {
count--;
}
count = 16;
while (count) {
count--;
}
acb->firm_version);
/* firm_request_len, 1 */
/* firm_numbers_queue, 2 */
/* firm_sdram_size, 3 */
/* firm_ide_channels, 4 */
}
/* get firmware miscellaneous data */
static void
{
struct HBB_msgUnit *phbbmu;
char *acb_firm_model;
char *acb_firm_version;
char *acb_device_map;
char *iop_firm_model;
char *iop_firm_version;
char *iop_device_map;
int count;
/* firm_model, 15 */
iop_firm_model = (char *)
/* firm_version, 17 */
iop_firm_version = (char *)
/* device_map, 21 */
iop_device_map = (char *)
if (!arcmsr_hbb_wait_msgint_ready(acb))
"timeout while waiting for adapter firmware "
"miscellaneous data");
count = 8;
while (count) {
count--;
}
count = 16;
while (count) {
count--;
}
count = 16;
while (count) {
count--;
}
acb->firm_version);
/* firm_request_len, 1 */
/* firm_numbers_queue, 2 */
/* firm_sdram_size, 3 */
/* firm_ide_channels, 4 */
}
/* get firmware miscellaneous data */
static void
{
struct HBC_msgUnit *phbcmu;
char *acb_firm_model;
char *acb_firm_version;
char *acb_device_map;
char *iop_firm_model;
char *iop_firm_version;
char *iop_device_map;
int count;
/* firm_model, 15 */
/* firm_version, 17 */
/* device_map, 21 */
/* post "get config" instruction */
if (!arcmsr_hbc_wait_msgint_ready(acb))
"timeout while waiting for adapter firmware "
"miscellaneous data");
count = 8;
while (count) {
count--;
}
count = 16;
while (count) {
count--;
}
count = 16;
while (count) {
count--;
}
acb->firm_version);
/* firm_request_len, 1, 04-07 */
/* firm_numbers_queue, 2, 08-11 */
/* firm_sdram_size, 3, 12-15 */
/* firm_ide_channels, 4, 16-19 */
/* firm_cfg_version, 25, 100-103 */
}
/* start background rebuild */
static void
struct HBA_msgUnit *phbamu;
if (!arcmsr_hba_wait_msgint_ready(acb))
"timeout while waiting for background rebuild to start");
}
static void
struct HBB_msgUnit *phbbmu;
if (!arcmsr_hbb_wait_msgint_ready(acb))
"timeout while waiting for background rebuild to start");
}
static void
struct HBC_msgUnit *phbcmu;
if (!arcmsr_hbc_wait_msgint_ready(acb))
"timeout while waiting for background rebuild to start");
}
static void
{
struct HBA_msgUnit *phbamu;
uint32_t poll_count = 0;
/* TODO: Use correct offset and size for syncing? */
return;
for (;;) {
if (poll_ccb_done) {
/* chip FIFO no ccb for completion already */
break;
} else {
drv_usecwait(25000);
break;
}
if (acb->ccboutstandingcount == 0) {
break;
}
poll_count++;
&phbamu->outbound_intstatus) &
outbound_intstatus); /* clear interrupt */
}
}
/* frame must be 32 bytes aligned */
/* check if command done with no error */
continue;
}
continue;
}
"polling op got unexpected ccb command done");
continue;
}
} /* drain reply FIFO */
}
static void
{
struct HBB_msgUnit *phbbmu;
uint32_t poll_count = 0;
int index;
/* Use correct offset and size for syncing */
return;
for (;;) {
if (poll_ccb_done) {
/* chip FIFO no ccb for completion already */
break;
} else {
drv_usecwait(25000);
break;
if (acb->ccboutstandingcount == 0)
break;
poll_count++;
/* clear doorbell interrupt */
}
}
index++;
/* if last index number set it to 0 */
/* check if command done with no error */
/* frame must be 32 bytes aligned */
/* check if command done with no error */
continue;
}
continue;
}
"polling op got unexpect ccb command done");
continue;
}
} /* drain reply FIFO */
}
static void
{
struct HBC_msgUnit *phbcmu;
uint32_t poll_count = 0;
/* Use correct offset and size for syncing */
return;
for (;;) {
&phbcmu->host_int_status) &
if (poll_ccb_done) {
/* chip FIFO no ccb for completion already */
break;
} else {
drv_usecwait(25000);
break;
}
if (acb->ccboutstandingcount == 0) {
break;
}
poll_count++;
}
}
/* frame must be 32 bytes aligned */
/* the CDB is the first field of the CCB */
/* check if command done with no error */
continue;
}
continue;
}
"polling op got unexpected ccb command done");
continue;
}
} /* drain reply FIFO */
}
/*
* Function: arcmsr_hba_hardware_reset()
* Bug Fix for Intel IOP cause firmware hang on.
* and kernel panic
*/
static void
{
struct HBA_msgUnit *phbamu;
int i;
/* backup pci config data */
for (i = 0; i < 64; i++) {
}
/* hardware reset signal */
if ((PCI_DEVICE_ID_ARECA_1680 ==
} else {
}
drv_usecwait(1000000);
/* write back pci config data */
for (i = 0; i < 64; i++) {
}
drv_usecwait(1000000);
}
/*
* Function: arcmsr_abort_host_command
*/
static uint8_t
{
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A:
break;
case ACB_ADAPTER_TYPE_B:
break;
case ACB_ADAPTER_TYPE_C:
break;
}
return (rtnval);
}
/*
* Function: arcmsr_handle_iop_bus_hold
*/
static void
{
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A:
{
struct HBA_msgUnit *phbamu;
int retry_count = 0;
acb->timeout_count = 0;
drv_usecwait(1000000);
&phbamu->outbound_msgaddr1) &
ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) {
if (retry_count > 60) {
"waiting for hardware"
"bus reset return, RETRY TERMINATED!!");
return;
}
retry_count++;
goto sleep_again;
}
break;
}
}
}
static void
int my_empty_len, iop_len;
int rqbuf_firstidx, rqbuf_lastidx;
/* check this iop data if overflow my rqbuffer */
(ARCMSR_MAX_QBUFFER - 1);
if (my_empty_len >= iop_len) {
while (iop_len > 0) {
/* if last index number set it to 0 */
iop_data++;
iop_len--;
}
/* signature, let IOP know data has been read */
} else {
}
}
static void
/*
* check if there are any mail packages from user space program
* in my post bag, now is the time to send them into Areca's firmware
*/
int allxfer_len = 0;
(allxfer_len < 124)) {
acb->wqbuf_firstidx++;
/* if last index number set it to 0 */
iop_data++;
allxfer_len++;
}
/*
* push inbound doorbell, tell iop driver data write ok
* await reply on next hwinterrupt for next Qbuffer post
*/
}
}
static void
{
struct HBA_msgUnit *phbamu;
/*
* Maybe here we need to check wrqbuffer_lock is locked or not
* DOORBELL: ding! dong!
* check if there are any mail need to pack from firmware
*/
/* clear doorbell interrupt */
}
static void
{
struct HBC_msgUnit *phbcmu;
/*
* Maybe here we need to check wrqbuffer_lock is locked or not
* DOORBELL: ding! dong!
* check if there are any mail need to pick from firmware
*/
outbound_doorbell); /* clear interrupt */
}
}
/* messenger of "driver to iop commands" */
}
}
static void
{
(void (*)(void *))arcmsr_dr_handle,
}
}
static void
{
/* clear interrupts */
(void (*)(void *))arcmsr_dr_handle,
}
}
static void
{
(void (*)(void *))arcmsr_dr_handle,
}
}
static void
{
struct HBA_msgUnit *phbamu;
/* areca cdb command done */
/* Use correct offset and size for syncing */
/* frame must be 32 bytes aligned */
/* check if command done with no error */
} /* drain reply FIFO */
}
static void
{
struct HBB_msgUnit *phbbmu;
int index;
/* areca cdb command done */
return;
/* frame must be 32 bytes aligned */
/* the CDB is the first field of the CCB */
/* check if command done with no error */
index++;
/* if last index number set it to 0 */
} /* drain reply FIFO */
}
static void
{
struct HBC_msgUnit *phbcmu;
/* areca cdb command done */
/* Use correct offset and size for syncing */
&phbcmu->host_int_status) &
/* check if command done with no error */
/* frame must be 32 bytes aligned */
/* the CDB is the first field of the CCB */
/* check if command done with no error */
if (throttling == ARCMSR_HBC_ISR_THROTTLING_LEVEL) {
break;
}
throttling++;
} /* drain reply FIFO */
}
static uint_t
struct HBA_msgUnit *phbamu;
if (outbound_intstatus == 0) /* it must be a shared irq */
return (DDI_INTR_UNCLAIMED);
outbound_intstatus); /* clear interrupt */
/* MU doorbell interrupts */
/* MU post queue interrupts */
}
return (DDI_INTR_CLAIMED);
}
static uint_t
struct HBB_msgUnit *phbbmu;
if (outbound_doorbell == 0) /* it must be a shared irq */
return (DDI_INTR_UNCLAIMED);
/* clear doorbell interrupt */
/* wait a cycle */
/* MU ioctl transfer doorbell interrupts */
/* MU post queue interrupts */
/* MU message interrupt */
}
return (DDI_INTR_CLAIMED);
}
static uint_t
{
struct HBC_msgUnit *phbcmu;
/* check outbound intstatus */
if (host_interrupt_status == 0) /* it must be share irq */
return (DDI_INTR_UNCLAIMED);
/* MU ioctl transfer doorbell interrupts */
/* messenger of "ioctl message read write" */
}
/* MU post queue interrupts */
/* messenger of "scsi commands" */
}
return (DDI_INTR_CLAIMED);
}
static uint_t
{
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A:
break;
case ACB_ADAPTER_TYPE_B:
break;
case ACB_ADAPTER_TYPE_C:
break;
default:
/* We should never be here */
ASSERT(0);
break;
}
}
return (retrn);
}
static void
firmware_state = 0;
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A:
{
struct HBA_msgUnit *phbamu;
do {
} while ((firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK)
== 0);
break;
}
case ACB_ADAPTER_TYPE_B:
{
struct HBB_msgUnit *phbbmu;
do {
} while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0);
break;
}
case ACB_ADAPTER_TYPE_C:
{
struct HBC_msgUnit *phbcmu;
do {
} while ((firmware_state & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK)
== 0);
break;
}
}
}
static void
{
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
struct HBA_msgUnit *phbamu;
/* empty doorbell Qbuffer if door bell rung */
/* clear doorbell interrupt */
break;
}
case ACB_ADAPTER_TYPE_B: {
struct HBB_msgUnit *phbbmu;
/* clear interrupt and message state */
/* let IOP know data has been read */
break;
}
case ACB_ADAPTER_TYPE_C: {
struct HBC_msgUnit *phbcmu;
/* empty doorbell Qbuffer if door bell ringed */
/* clear outbound doobell isr */
/* let IOP know data has been read */
break;
}
}
}
static uint32_t
/*
* here we need to tell iop 331 about our freeccb.HighPart
* if freeccb.HighPart is non-zero
*/
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A:
if (cdb_phyaddr_hi32 != 0) {
struct HBA_msgUnit *phbamu;
&phbamu->msgcode_rwbuffer[0],
if (!arcmsr_hba_wait_msgint_ready(acb)) {
"timeout setting ccb "
"high physical address");
return (FALSE);
}
}
break;
/* if adapter is type B, set window of "post command queue" */
case ACB_ADAPTER_TYPE_B: {
struct HBB_msgUnit *phbbmu;
phbbmu->postq_index = 0;
phbbmu->doneq_index = 0;
if (!arcmsr_hbb_wait_msgint_ready(acb)) {
"queue window");
return (FALSE);
}
/* driver "set config" signature */
/* normal should be zero */
/* postQ size (256+8)*4 */
/* doneQ size (256+8)*4 */
post_queue_phyaddr+1056);
/* ccb maxQ size must be --> [(256+8)*4] */
if (!arcmsr_hbb_wait_msgint_ready(acb)) {
"timeout setting command queue window");
return (FALSE);
}
if (!arcmsr_hbb_wait_msgint_ready(acb)) {
return (FALSE);
}
break;
}
case ACB_ADAPTER_TYPE_C:
if (cdb_phyaddr_hi32 != 0) {
struct HBC_msgUnit *phbcmu;
&phbcmu->msgcode_rwbuffer[0],
if (!arcmsr_hbc_wait_msgint_ready(acb)) {
"high part physical address' timeout");
return (FALSE);
}
}
break;
}
return (TRUE);
}
/*
* ONLY used for Adapter type B
*/
static void
{
struct HBB_msgUnit *phbbmu;
if (!arcmsr_hbb_wait_msgint_ready(acb))
}
/* start background rebuild */
static void
{
/* disable all outbound interrupt */
(void) arcmsr_iop_confirm(acb);
/* start background rebuild */
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A:
break;
case ACB_ADAPTER_TYPE_B:
break;
case ACB_ADAPTER_TYPE_C:
break;
}
/* empty doorbell Qbuffer if door bell rang */
/* enable outbound Post Queue, outbound doorbell Interrupt */
}