amr.c revision 89b43686db1fe9681d80a7cf5662730cb9378cae
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
* Copyright (c) 2011 Bayard G. Bell. All rights reserved.
*/
/*
* Copyright (c) 1999,2000 Michael Smith
* Copyright (c) 2000 BSDi
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* Copyright (c) 2002 Eric Moore
* Copyright (c) 2002 LSI Logic Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* 3. The party using or redistributing the source code and binary forms
* agrees to the disclaimer below and the terms and conditions set forth
* herein.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/int_types.h>
#include <sys/byteorder.h>
#include "amrreg.h"
#include "amrvar.h"
/* dynamic debug symbol */
int amr_debug_var = 0;
int local_counter = 0; \
done_flag = 1; \
while (!(cond)) { \
if ((local_counter) > count) { \
done_flag = 0; \
break; \
} \
(local_counter)++; \
} \
}
int local_counter = 0; \
done_flag = 1; \
while (!(cond)) { \
drv_usecwait(100); \
if ((local_counter) > count) { \
done_flag = 0; \
break; \
} \
(local_counter)++; \
} \
}
/*
* driver interfaces
*/
/*
* Command wrappers
*/
/*
* Command processing.
*/
unsigned int capacity);
/*
* Status monitoring
*/
static void amr_periodic(void *data);
/*
* Interface-specific shims
*/
static void amr_start_waiting_queue(void *softp);
/*
* SCSI interface
*/
/*
* Function prototypes
*
* SCSA functions exported by means of the transport table
*/
int whom);
static ddi_dma_attr_t buffer_dma_attr = {
DMA_ATTR_V0, /* version of this structure */
0, /* lowest usable address */
0xffffffffull, /* highest usable address */
0x00ffffffull, /* maximum DMAable byte count */
4, /* alignment */
1, /* burst sizes */
1, /* minimum transfer */
0xffffffffull, /* maximum transfer */
0xffffffffull, /* maximum segment length */
AMR_NSEG, /* maximum number of segments */
AMR_BLKSIZE, /* granularity */
0, /* flags (reserved) */
};
static ddi_dma_attr_t addr_dma_attr = {
DMA_ATTR_V0, /* version of this structure */
0, /* lowest usable address */
0xffffffffull, /* highest usable address */
0x7fffffff, /* maximum DMAable byte count */
4, /* alignment */
1, /* burst sizes */
1, /* minimum transfer */
0xffffffffull, /* maximum transfer */
0xffffffffull, /* maximum segment length */
1, /* maximum number of segments */
1, /* granularity */
0, /* flags (reserved) */
};
DEVO_REV, /* devo_rev, */
0, /* refcnt */
amr_info, /* info */
nulldev, /* identify */
nulldev, /* probe */
amr_attach, /* attach */
amr_detach, /* detach */
nodev, /* reset */
NULL, /* driver operations */
(struct bus_ops *)0, /* bus operations */
0, /* power */
ddi_quiesce_not_supported, /* devo_quiesce */
};
extern struct mod_ops mod_driverops;
&mod_driverops, /* Type of module. driver here */
"AMR Driver", /* Name of the module. */
&amr_ops, /* Driver ops vector */
};
static struct modlinkage modlinkage = {
&modldrv,
};
/* DMA access attributes */
static ddi_device_acc_attr_t accattr = {
};
static struct amr_softs *amr_softstatep;
int
_init(void)
{
int error;
sizeof (struct amr_softs), 0);
if (error != 0)
goto error_out;
ddi_soft_state_fini((void*)&amr_softstatep);
goto error_out;
}
if (error != 0) {
ddi_soft_state_fini((void*)&amr_softstatep);
goto error_out;
}
return (error);
return (error);
}
int
{
}
int
_fini(void)
{
int error;
return (error);
}
ddi_soft_state_fini((void*)&amr_softstatep);
return (error);
}
static int
{
int error;
int instance;
switch (cmd) {
case DDI_ATTACH:
break;
case DDI_RESUME:
return (DDI_FAILURE);
default:
return (DDI_FAILURE);
}
/*
* Initialize softs.
*/
return (DDI_FAILURE);
!= DDI_SUCCESS) {
goto error_out;
}
if (error != DDI_SUCCESS) {
goto error_out;
}
/*
* Determine board type.
*/
/*
* Make sure we are going to be able to talk to this board.
*/
if ((command & PCI_COMM_MAE) == 0) {
goto error_out;
}
/* force the busmaster enable bit on */
if (!(command & PCI_COMM_ME)) {
command |= PCI_COMM_ME;
if (!(command & PCI_COMM_ME))
goto error_out;
}
/*
* Allocate and connect our interrupt.
*/
if (ddi_intr_hilevel(dev, 0) != 0) {
"High level interrupt is not supported!"));
goto error_out;
}
!= DDI_SUCCESS) {
goto error_out;
}
/* sychronize waits for the busy slots via this cv */
/*
* Do bus-independent initialisation, bring controller online.
*/
goto error_out;
goto error_out;
goto error_out;
/*
* A taskq is created for dispatching the waiting queue processing
* thread. The threads number equals to the logic drive number and
* the thread number should be 1 if there is no logic driver is
* configured for this instance.
*/
goto error_out;
}
goto error_out;
}
/* set up the tran interface */
goto error_out;
}
/* schedule a thread for periodic check */
/* print firmware information in verbose mode */
/* clear any interrupts */
return (DDI_SUCCESS);
}
}
for (i = 0; i < softs->sg_max_count; i++) {
(void) ddi_dma_unbind_handle(
(void) ddi_dma_mem_free(
(void) ddi_dma_free_handle(
}
}
}
}
return (DDI_FAILURE);
}
/*
* Bring the controller down to a dormant state and detach all child devices.
* This function is called during detach, system shutdown.
*
* Note that we can assume that the bufq on the controller is empty, as we won't
* allow shutdown if any device is open.
*/
/*ARGSUSED*/
{
int instance;
/* flush the controllor */
return (EIO);
}
/* release the amr timer */
}
for (i = 0; i < softs->sg_max_count; i++) {
(void) ddi_dma_unbind_handle(
(void) ddi_dma_mem_free(
(void) ddi_dma_free_handle(
}
/* disconnect the interrupt handler */
/* wait for the completion of current in-progress interruptes */
if (!done_flag) {
}
(void) scsi_hba_detach(dev);
/* print firmware information in verbose mode */
return (DDI_SUCCESS);
}
/*ARGSUSED*/
{
int instance;
switch (infocmd) {
case DDI_INFO_DEVT2DEVINFO:
return (DDI_SUCCESS);
} else {
return (DDI_FAILURE);
}
case DDI_INFO_DEVT2INSTANCE:
break;
default:
break;
}
return (DDI_SUCCESS);
}
/*
* Take an interrupt, or be poked by other code to look for interrupt-worthy
* status.
*/
static uint_t
{
return (DDI_INTR_UNCLAIMED);
}
/* collect finished commands, queue anything waiting */
return (DDI_INTR_CLAIMED);
}
/*
* Setup the amr mailbox
*/
static int
{
if (ddi_dma_alloc_handle(
NULL,
goto error_out;
}
if (ddi_dma_mem_alloc(
sizeof (struct amr_mailbox) + 16,
&accattr,
NULL,
&mbox_len,
&softs->mbox_acc_handle) !=
DDI_SUCCESS) {
goto error_out;
}
NULL,
NULL,
goto error_out;
}
goto error_out;
/* The phy address of mailbox must be aligned on a 16-byte boundary */
return (DDI_SUCCESS);
if (softs->mbox_dma_cookien)
if (softs->mbox_acc_handle) {
}
if (softs->mbox_dma_handle) {
}
return (DDI_FAILURE);
}
/*
* Perform a periodic check of the controller status
*/
static void
amr_periodic(void *data)
{
uint32_t i;
register struct amr_command *ac;
for (i = 0; i < softs->sg_max_count; i++) {
continue;
continue;
}
(ddi_get_time() -
"!timed out packet detected,\
sc = %p, pkt = %p, index = %d, ac = %p",
(void *)softs,
(void *)pkt,
i,
/* pull command from the busy index */
if (softs->amr_busyslots > 0)
softs->amr_busyslots--;
if (softs->amr_busyslots == 0)
/* call pkt callback */
}
} else {
}
}
/* restart the amr timer */
}
/*
* Interrogate the controller for the operational parameters we require.
*/
static int
{
struct amr_enquiry3 *aex;
struct amr_prodinfo *ap;
struct amr_enquiry *ae;
int instance;
/*
* If we haven't found the real limit yet, let us have a couple of
* commands in order to be able to probe.
*/
/*
* Try to issue an ENQUIRY3 command
*/
" drive %d: size: %d state %x properties %x\n",
ldrv,
"!instance %d log-drive %d is offline",
else
softs->amr_nlogdrives++;
}
"Cannot obtain product data from controller"));
return (EIO);
}
} else {
so try another way"));
/* failed, try the 8LD ENQUIRY commands */
AMR_ENQ_BUFFER_SIZE, AMR_CMD_EXT_ENQUIRY2, 0, 0))
== NULL) {
AMR_ENQ_BUFFER_SIZE, AMR_CMD_ENQUIRY, 0, 0))
== NULL) {
"Cannot obtain configuration data"));
return (EIO);
}
ae->ae_signature = 0;
}
/*
* Fetch current state of logical drives.
*/
" ********* drive %d: %d state %x properties %x",
ldrv,
"!instance %d log-drive %d is offline",
else
softs->amr_nlogdrives++;
}
}
/*
* Mark remaining drives as unused.
*/
/*
* doesn't trust the controller's reported value, and lockups have
* been seen when we do.
*/
return (DDI_SUCCESS);
}
/*
* Run a generic enquiry-style command.
*/
static void *
{
struct amr_command ac;
void *result;
/* set command flags */
/* build the command proper */
return (NULL);
return (NULL);
}
/* allocate the response structure */
return (result);
}
/*
* Flush the controller's internal cache, return status.
*/
static int
{
struct amr_command ac;
int error = 0;
/* build the command proper */
/* have to poll, as the system may be going down or otherwise damaged */
return (error);
}
return (error);
}
/*
* Take a command, submit it to the controller and wait for it to return.
* Returns nonzero on error. Can be safely called with interrupts enabled.
*/
static int
{
while (softs->amr_busyslots != 0)
/*
*/
sizeof (struct amr_sgentry) * AMR_NSEG);
(void) ddi_dma_sync(
0, 0, DDI_DMA_SYNC_FORDEV);
}
/* sync the dma memory */
/* sync the dma memory */
1000, done_flag);
if (!done_flag) {
return (1);
}
if (!done_flag) {
return (1);
}
/* acknowledge that we have the commands */
if (!done_flag) {
return (1);
}
}
/*
*/
static int
{
uint32_t i;
softs->sg_max_count = 0;
for (i = 0; i < AMR_MAXCMD; i++) {
/* reset the cookien */
cookien = 0;
if (ddi_dma_alloc_handle(
NULL,
"Cannot alloc dma handle for s/g table"));
goto error_out;
}
sizeof (struct amr_sgentry) * AMR_NSEG,
&accattr,
&len,
!= DDI_SUCCESS) {
"Cannot allocate DMA memory"));
goto error_out;
}
NULL,
len,
NULL,
&cookie,
&cookien) != DDI_DMA_MAPPED) {
"Cannot bind communication area for s/g table"));
goto error_out;
}
if (cookien != 1)
goto error_out;
softs->sg_max_count++;
}
return (DDI_SUCCESS);
/*
* Couldn't allocate/initialize all of the sg table entries.
* Clean up the partially-initialized entry before returning.
*/
if (cookien) {
}
}
}
/*
* At least two sg table entries are needed. One is for regular data
* I/O commands, the other is for poll I/O commands.
*/
}
/*
*
* These functions may be safely called multiple times on a given command.
*/
static void
int nsegments)
{
struct amr_sgentry *sg;
size = 0;
/*
* There is no next cookie if the end of the current
* window is reached. Otherwise, the next cookie
* would be found.
*/
}
}
/*
* map the amr command for enquiry, allocate the DMA resource
*/
static int
{
} else {
}
/* process the DMA by address bind mode */
&ac->buffer_dma_handle) !=
DDI_SUCCESS) {
"Cannot allocate addr DMA tag"));
goto error_out;
}
&accattr,
NULL,
&len,
&ac->buffer_acc_handle) !=
DDI_SUCCESS) {
"Cannot allocate DMA memory"));
goto error_out;
}
if ((ddi_dma_addr_bind_handle(
"Cannot bind addr for dma"));
goto error_out;
}
return (DDI_SUCCESS);
if (ac->num_of_cookie)
if (ac->buffer_acc_handle) {
}
if (ac->buffer_dma_handle) {
}
return (DDI_FAILURE);
}
/*
* unmap the amr command for enquiry, free the DMA resource
*/
static void
{
(void *)ac));
/* if the command involved data at all and was mapped */
if (ac->buffer_dma_handle)
(void) ddi_dma_unbind_handle(
if (ac->buffer_acc_handle) {
}
if (ac->buffer_dma_handle) {
(void) ddi_dma_free_handle(
&ac->buffer_dma_handle);
}
}
}
/*
* map the amr command, allocate the DMA resource
*/
static int
{
int error;
} else {
}
}
}
return (DDI_SUCCESS);
}
/* if the command involves data at all, and hasn't been mapped */
/* process the DMA by buffer bind mode */
cb,
arg,
&ac->num_of_cookie);
switch (error) {
case DDI_DMA_PARTIAL_MAP:
"Cannot get dma num win"));
(void) ddi_dma_unbind_handle(
(void) ddi_dma_free_handle(
&ac->buffer_dma_handle);
return (DDI_FAILURE);
}
ac->current_win = 0;
break;
case DDI_DMA_MAPPED:
ac->current_win = 0;
break;
default:
"Cannot bind buf for dma"));
(void) ddi_dma_free_handle(
&ac->buffer_dma_handle);
return (DDI_FAILURE);
}
ac->current_cookie = 0;
/* get the next window */
ac->current_win++;
&ac->num_of_cookie);
ac->current_cookie = 0;
}
} else {
}
return (DDI_SUCCESS);
}
/*
* unmap the amr command, free the DMA resource
*/
static void
{
(void *)ac));
/* if the command involved data at all and was mapped */
}
static int
{
/*
* hba_private always points to the amr_softs struct
*/
SCSI_HBA_TRAN_CLONE) != DDI_SUCCESS) {
return (DDI_FAILURE);
} else {
return (DDI_SUCCESS);
}
}
/*ARGSUSED*/
static int
{
return (DDI_SUCCESS);
return (DDI_FAILURE);
}
static int
{
int ret;
struct amr_command *ac;
AMR_LDRV_OFFLINE)) {
ret = TRAN_BADPKT;
return (ret);
}
case SCMD_READ: /* read */
case SCMD_READ_G1: /* read g1 */
case SCMD_READ_BUFFER: /* read buffer */
case SCMD_WRITE: /* write */
case SCMD_WRITE_G1: /* write g1 */
case SCMD_WRITE_BUFFER: /* write buffer */
(void) amr_poll_command(ac);
} else {
} else {
}
amr_start_waiting_queue((void *)softs);
}
ret = TRAN_ACCEPT;
break;
case SCMD_INQUIRY: /* inquiry */
struct scsi_inquiry inqp;
/*
* The EVDP and pagecode is
* not supported
*/
} else {
/* Enable Tag Queue */
}
sizeof (struct scsi_inquiry));
}
| STATE_SENT_CMD);
ret = TRAN_ACCEPT;
break;
case SCMD_READ_CAPACITY: /* read capacity */
struct scsi_capacity cp;
}
ret = TRAN_ACCEPT;
break;
case SCMD_MODE_SENSE: /* mode sense */
case SCMD_MODE_SENSE_G1: /* mode sense g1 */
ret = TRAN_ACCEPT;
break;
case SCMD_TEST_UNIT_READY: /* test unit ready */
case SCMD_REQUEST_SENSE: /* request sense */
case SCMD_FORMAT: /* format */
case SCMD_START_STOP: /* start stop */
case SCMD_SYNCHRONIZE_CACHE: /* synchronize cache */
}
| STATE_SENT_CMD);
ret = TRAN_ACCEPT;
break;
default: /* any other commands */
| STATE_ARQ_DONE);
ret = TRAN_ACCEPT;
break;
}
return (ret);
}
/*
* functionality according to the "level" in interface. However, we got the
* confirmation from LSI that these HBA cards does not support any commands to
*
* If the tran_reset() return a FAILURE to the sd, the system will not
* continue to dump the core. But core dump is an crucial method to analyze
* problems in panic. Now we adopt a work around solution, that is to return
* a fake SUCCESS to sd during panic, which will force the system continue
* to dump core though the core may have problems in some situtation because
* some on-the-fly commands will continue DMAing data to the memory.
* In addition, the work around core dump method may not be performed
* successfully if the panic is caused by the HBA itself. So the work around
* solution is not a good example for the implementation of tran_reset(),
* the most reasonable approach should send a reset command to the adapter.
*/
/*ARGSUSED*/
static int
{
if (ddi_in_panic()) {
/* Acknowledge the card if there are any significant commands */
while (softs->amr_busyslots > 0) {
if (!done_flag) {
/*
* command not completed, indicate the
* problem and continue get ac
*/
"AMR command is not completed");
return (0);
}
/* wait for the acknowledge from hardware */
if (!done_flag) {
/*
* command is not completed, return from the
* current interrupt and wait for the next one
*/
return (0);
}
}
/* flush the controllor */
/*
* If the system is in panic, the tran_reset() will return a
* fake SUCCESS to sd, then the system would continue dump the
* core by poll commands. This is a work around for dumping
* core in panic.
*
* Note: Some on-the-fly command will continue DMAing data to
* the memory when the core is dumping, which may cause
* some flaws in the dumped core file, so a cmn_err()
* will be printed out to warn users. However, for most
* cases, the core file will be fine.
*/
"that doesn't support software reset. This "
"means that memory being used by the HBA for "
"DMA based reads could have been updated after "
"we panic'd.");
return (1);
} else {
/* return failure to sd */
return (0);
}
}
/*ARGSUSED*/
static int
{
/*
* We don't allow inquiring about capabilities for other targets
*/
return (-1);
switch (scsi_hba_lookup_capstr(cap)) {
case SCSI_CAP_ARQ:
return (1);
case SCSI_CAP_GEOMETRY:
case SCSI_CAP_SECTOR_SIZE:
return (AMR_DEFAULT_SECTORS);
case SCSI_CAP_TOTAL_SECTORS:
/* number of sectors */
case SCSI_CAP_UNTAGGED_QING:
case SCSI_CAP_TAGGED_QING:
return (1);
default:
return (-1);
}
}
/*ARGSUSED*/
static int
int whom)
{
/*
* We don't allow setting capabilities for other targets
*/
"Set Cap not supported, string = %s, whom=%d",
return (-1);
}
switch (scsi_hba_lookup_capstr(cap)) {
case SCSI_CAP_ARQ:
return (1);
case SCSI_CAP_TOTAL_SECTORS:
return (1);
case SCSI_CAP_SECTOR_SIZE:
return (1);
case SCSI_CAP_UNTAGGED_QING:
case SCSI_CAP_TAGGED_QING:
default:
return (0);
}
}
static struct scsi_pkt *
{
struct amr_command *ac;
AMR_LDRV_OFFLINE)) {
return (NULL);
}
/* force auto request sense */
return (NULL);
}
pkt->pkt_statistics = 0;
pkt->pkt_reason = 0;
return (pkt);
}
"Cannot allocate buffer DMA tag"));
return (NULL);
}
} else {
return (pkt);
}
}
} else {
}
if (flags & PKT_CONSISTENT) {
}
if (flags & PKT_DMA_PARTIAL) {
}
return (NULL);
}
"init pkt, pkt_resid=%d, b_bcount=%d, data_transfered=%d",
ac->data_transfered));
return (pkt);
}
static void
{
if (ac->buffer_dma_handle) {
}
}
/*ARGSUSED*/
static void
{
if (ac->buffer_dma_handle) {
}
}
/*ARGSUSED*/
static void
{
}
}
/*ARGSUSED*/
static void
{
cmd = AMR_CMD_LREAD;
} else {
}
}
static void
{
struct mode_format *page3p;
struct mode_geometry *page4p;
struct mode_header *headerp;
return;
switch (pagecode) {
case SD_MODE_SENSE_PAGE3_CODE:
return;
case SD_MODE_SENSE_PAGE4_CODE:
return;
default:
return;
}
}
static void
{
struct scsi_arq_status *arqstat;
arqstat->sts_rqpkt_resid = 0;
arqstat->sts_rqpkt_statistics = 0;
}
static void
amr_start_waiting_queue(void *softp)
{
struct amr_command *ac;
/* only one command allowed at the same time */
/*
* Find an available slot, the last slot is
* occupied by poll I/O command.
*/
/*
* only one command allowed at the
* same time
*/
return;
}
softs->amr_busyslots++;
sizeof (struct amr_sgentry) *
AMR_NSEG);
(void) ddi_dma_sync(
0, 0, DDI_DMA_SYNC_FORDEV);
}
/* take the cmd from the queue */
if (!done_flag) {
/*
* command not completed, indicate the
* problem and continue get ac
*/
"AMR command is not completed");
break;
}
0, 0, DDI_DMA_SYNC_FORDEV);
/*
* current ac is submitted
* so quit 'for-loop' to get next ac
*/
break;
}
}
/* no slot, finish our task */
break;
}
/* only one command allowed at the same time */
}
static void
{
/* acknowledge interrupt */
(void) AMR_QGET_ODB(softs);
0, 0, DDI_DMA_SYNC_FORCPU);
/* save mailbox, which contains a list of completed commands */
/* wait for the acknowledge from hardware */
if (!done_flag) {
/*
* command is not completed, return from the current
* interrupt and wait for the next one
*/
return;
}
for (i = 0; i < mbox->mb_nstatus; i++) {
/* pull the command from the busy index */
if (softs->amr_busyslots > 0)
softs->amr_busyslots--;
if (softs->amr_busyslots == 0)
/* enqueue here */
if (head) {
} else {
}
} else {
"ac in mailbox is NULL!"));
}
}
} else {
}
}
/* dispatch a thread to process the pending I/O if there is any */
}
}
static void
{
while (localhead) {
} else {
}
}
}
}