/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
* Copyright (c) 2011 Bayard G. Bell. All rights reserved.
*/
/*
* SCSA HBA nexus driver that emulates an HBA connected to SCSI target
* devices (large disks).
*/
#ifdef DEBUG
#define EMUL64DEBUG
#endif
#include <sys/emul64cmd.h>
#include <sys/emul64var.h>
int emul64debug = 0;
#ifdef EMUL64DEBUG
static int emul64_cdb_debug = 0;
#endif
/*
* cb_ops function prototypes
*/
/*
* dev_ops functions prototypes
*/
/*
* Function prototypes
*
* SCSA functions exported by means of the transport table
*/
static void emul64_pkt_comp(void *);
int whom);
/*
* internal functions
*/
static int emul64_get_tgtrange(struct emul64 *,
emul64_tgt_t **,
static int emul64_write_off(struct emul64 *,
emul64_tgt_t *,
static int emul64_write_on(struct emul64 *,
emul64_tgt_t *,
static void emul64_nowrite_free(emul64_nowrite_t *);
emul64_nowrite_t ***prevp);
#ifdef EMUL64DEBUG
#endif
#ifdef _DDICT
static int ddi_in_panic(void);
static int ddi_in_panic() { return (0); }
#ifndef SCSI_CAP_RESET_NOTIFICATION
#endif
#ifndef SCSI_RESET_NOTIFY
#endif
#ifndef SCSI_RESET_CANCEL
#endif
#endif
/*
* Tunables:
*
* emul64_max_task
* The taskq facility is used to queue up SCSI start requests on a per
* controller basis. If the maximum number of queued tasks is hit,
* taskq_ent_alloc() delays for a second, which adversely impacts our
* performance. This value establishes the maximum number of task
* queue entries when taskq_create is called.
*
* emul64_task_nthreads
* Specifies the number of threads that should be used to process a
* controller's task queue. Our init function sets this to the number
* of CPUs on the system, but this can be overridden in emul64.conf.
*/
/*
* Local static data
*/
/*
*/
scsi_hba_open, /* cb_open */
scsi_hba_close, /* cb_close */
nodev, /* cb_strategy */
nodev, /* cb_print */
nodev, /* cb_dump */
nodev, /* cb_read */
nodev, /* cb_write */
emul64_ioctl, /* cb_ioctl */
nodev, /* cb_devmap */
nodev, /* cb_mmap */
nodev, /* cb_segmap */
nochpoll, /* cb_chpoll */
ddi_prop_op, /* cb_prop_op */
NULL, /* cb_str */
CB_REV, /* cb_rev */
nodev, /* cb_aread */
nodev /* cb_awrite */
};
/*
* autoconfiguration routines.
*/
DEVO_REV, /* rev, */
0, /* refcnt */
emul64_info, /* getinfo */
nulldev, /* identify */
nulldev, /* probe */
emul64_attach, /* attach */
emul64_detach, /* detach */
nodev, /* reset */
&emul64_cbops, /* char/block ops */
NULL, /* bus ops */
NULL, /* power */
ddi_quiesce_not_needed, /* quiesce */
};
&mod_driverops, /* module type - driver */
"emul64 SCSI Host Bus Adapter", /* module name */
&emul64_ops, /* driver ops */
};
MODREV_1, /* ml_rev - must be MODREV_1 */
&modldrv, /* ml_linkage */
NULL /* end of driver linkage */
};
int
_init(void)
{
int ret;
if (ret != 0)
return (ret);
return (ret);
}
/* Set the number of task threads to the number of CPUs */
if (boot_max_ncpus == -1) {
} else {
}
if (ret != 0) {
}
return (ret);
}
int
_fini(void)
{
int ret;
return (ret);
return (ret);
}
int
{
}
/*
* Given the device number return the devinfo pointer
* from the scsi_device structure.
*/
/*ARGSUSED*/
static int
{
switch (cmd) {
case DDI_INFO_DEVT2DEVINFO:
else {
return (DDI_FAILURE);
}
break;
case DDI_INFO_DEVT2INSTANCE:
break;
default:
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
/*
* Attach an instance of an emul64 host adapter. Allocate data structures,
* initialize the emul64 and we're on the air.
*/
/*ARGSUSED*/
static int
{
int mutex_initted = 0;
int instance;
switch (cmd) {
case DDI_ATTACH:
break;
case DDI_RESUME:
if (!tran) {
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
default:
"emul64%d: Cmd != DDI_ATTACH/DDI_RESUME", instance);
return (DDI_FAILURE);
}
/*
* Allocate emul64 data structure.
*/
"emul64%d: Failed to alloc soft state",
instance);
return (DDI_FAILURE);
}
instance);
return (DDI_FAILURE);
}
/*
* Allocate a transport structure
*/
goto fail;
}
/*
* Attach this instance of the hba
*/
0) != DDI_SUCCESS) {
goto fail;
}
/*
* Look up the scsi-options property
*/
/* mutexes to protect the emul64 request and response queue */
mutex_initted = 1;
/*
* Initialize the default Target Capabilities and Sync Rates
*/
return (DDI_SUCCESS);
fail:
if (mutex_initted) {
}
if (tran) {
}
return (DDI_FAILURE);
}
/*ARGSUSED*/
static int
{
/* get transport structure pointer from the dip */
return (DDI_FAILURE);
}
/* get soft state from transport structure */
if (!emul64) {
return (DDI_FAILURE);
}
switch (cmd) {
case DDI_DETACH:
(void) scsi_hba_detach(dip);
return (DDI_SUCCESS);
case DDI_SUSPEND:
return (DDI_SUCCESS);
default:
return (DDI_FAILURE);
}
}
/*
* Function name : emul64_tran_tgt_init
*
* Return Values : DDI_SUCCESS if target supported, DDI_FAILURE otherwise
*
*/
/*ARGSUSED*/
static int
{
/*
* We get called for each target driver.conf node, multiple
* Check to see if transport to tgt,lun already established.
*/
if (tgt) {
ret = DDI_SUCCESS;
goto out;
}
/* see if we have driver.conf specified device for this target,lun */
goto out;
if (length < 2) {
"elements", prop_name);
goto out;
}
/* pick geometry name and vidpid string from string array */
geo = *geo_vidpid;
/* lookup geometry property integer array */
goto out;
}
if (length2 < 6) {
"elements", *geo_vidpid);
goto out;
}
/* allocate and initialize tgt structure for tgt,lun */
/* create avl for data block storage */
/* save scsi_address and vidpid */
/*
* The high order 4 bytes of the sector count always come first in
* emul64.conf. They are followed by the low order 4 bytes. Not
* all CPU types want them in this order, but laddr_t takes care of
* this for us. We then pick up geometry (ncyl X nheads X nsect).
*/
/*
* On 32-bit platforms, fix block size if it's greater than the
* allowable maximum.
*/
#if !defined(_LP64)
#endif
/* insert target structure into list */
ret = DDI_SUCCESS;
if (geoip)
if (geo_vidpid)
return (ret);
}
/*
* Function name : emul64_i_initcap
*
* Return Values : NONE
* Description : Initializes the default target capabilities and
* Sync Rates.
*
* Context : Called from the user thread through attach.
*
*/
static void
{
int i;
cap = 0;
synch = 0;
for (i = 0; i < NTARGETS_WIDE; i++) {
}
}
/*
* Function name : emul64_scsi_getcap()
*
* Return Values : current value of capability, if defined
* -1 if capability is not defined
* Description : returns current capability value
*
* Context : Can be called from different kernel process threads.
* Can be called by interrupt thread.
*/
static int
{
int rval = 0;
/*
* We don't allow inquiring about capabilities for other targets
*/
return (-1);
}
switch (scsi_hba_lookup_capstr(cap)) {
case SCSI_CAP_DMA_MAX:
break;
case SCSI_CAP_MSG_OUT:
rval = 1;
break;
case SCSI_CAP_DISCONNECT:
rval = 1;
break;
case SCSI_CAP_SYNCHRONOUS:
rval = 1;
break;
case SCSI_CAP_WIDE_XFER:
rval = 1;
break;
case SCSI_CAP_TAGGED_QING:
rval = 1;
break;
case SCSI_CAP_UNTAGGED_QING:
rval = 1;
break;
case SCSI_CAP_PARITY:
rval = 1;
break;
case SCSI_CAP_INITIATOR_ID:
break;
case SCSI_CAP_ARQ:
rval = 1;
break;
case SCSI_CAP_LINKED_CMDS:
break;
rval = 1;
break;
default:
rval = -1;
break;
}
return (rval);
}
/*
* Function name : emul64_scsi_setcap()
*
* Return Values : 1 - capability exists and can be set to new value
* 0 - capability could not be set to new value
* -1 - no such capability
*
* Description : sets a capability for a target
*
* Context : Can be called from different kernel process threads.
* Can be called by interrupt thread.
*/
static int
{
int rval = 0;
/*
* We don't allow setting capabilities for other targets
*/
return (-1);
}
switch (scsi_hba_lookup_capstr(cap)) {
case SCSI_CAP_DMA_MAX:
case SCSI_CAP_MSG_OUT:
case SCSI_CAP_PARITY:
case SCSI_CAP_UNTAGGED_QING:
case SCSI_CAP_LINKED_CMDS:
/*
* None of these are settable via
* the capability interface.
*/
break;
case SCSI_CAP_DISCONNECT:
rval = 1;
break;
case SCSI_CAP_SYNCHRONOUS:
rval = 1;
break;
case SCSI_CAP_TAGGED_QING:
rval = 1;
break;
case SCSI_CAP_WIDE_XFER:
rval = 1;
break;
case SCSI_CAP_INITIATOR_ID:
rval = -1;
break;
case SCSI_CAP_ARQ:
rval = 1;
break;
case SCSI_CAP_TOTAL_SECTORS:
break;
case SCSI_CAP_SECTOR_SIZE:
break;
default:
rval = -1;
break;
}
return (rval);
}
/*
* Function name : emul64_scsi_init_pkt
*
* Return Values : pointer to scsi_pkt, or NULL
* Description : Called by kernel on behalf of a target driver
* calling scsi_init_pkt(9F).
* Refer to tran_init_pkt(9E) man page
*
* Context : Can be called from different kernel process threads.
* Can be called by interrupt thread.
*/
/* ARGSUSED */
static struct scsi_pkt *
{
/*
* First step of emul64_scsi_init_pkt: pkt allocation
*/
"scsi_hba_pkt_alloc failed");
return (NULL);
}
/*
* Initialize the new pkt - we redundantly initialize
* all the fields for illustrative purposes.
*/
pkt->pkt_statistics = 0;
pkt->pkt_reason = 0;
} else {
}
/*
* Second step of emul64_scsi_init_pkt: dma allocation/move
*/
} else {
}
}
return (pkt);
}
/*
* Function name : emul64_scsi_destroy_pkt
*
* Return Values : none
* Description : Called by kernel on behalf of a target driver
* calling scsi_destroy_pkt(9F).
* Refer to tran_destroy_pkt(9E) man page
*
* Context : Can be called from different kernel process threads.
* Can be called by interrupt thread.
*/
static void
{
/*
* emul64_scsi_dmafree inline to make things faster
*/
/*
* Free the mapping.
*/
}
/*
* Free the pkt
*/
}
/*
* Function name : emul64_scsi_dmafree()
*
* Return Values : none
* Description : free dvma resources
*
* Context : Can be called from different kernel process threads.
* Can be called by interrupt thread.
*/
/*ARGSUSED*/
static void
{
}
/*
* Function name : emul64_scsi_sync_pkt()
*
* Return Values : none
* Description : sync dma
*
* Context : Can be called from different kernel process threads.
* Can be called by interrupt thread.
*/
/*ARGSUSED*/
static void
{
}
/*
* routine for reset notification setup, to register or cancel.
*/
static int
{
while (p) {
break; /* An entry exists for this target */
beforep = p;
p = p->next;
}
} else {
}
sizeof (struct emul64_reset_notify_entry));
rval = DDI_SUCCESS;
p = kmem_zalloc(sizeof (struct emul64_reset_notify_entry),
KM_SLEEP);
rval = DDI_SUCCESS;
}
return (rval);
}
/*
* Function name : emul64_scsi_start()
*
* Return Values : TRAN_FATAL_ERROR - emul64 has been shutdown
* TRAN_BUSY - request queue is full
* TRAN_ACCEPT - pkt has been submitted to emul64
*
* Description : init pkt, start the request
*
* Context : Can be called from different kernel process threads.
* Can be called by interrupt thread.
*/
static int
{
#ifdef EMUL64DEBUG
if (emul64_cdb_debug) {
}
#endif /* EMUL64DEBUG */
/*
* calculate deadline from pkt_time
* Instead of multiplying by 100 (ie. HZ), we multiply by 128 so
* we can shift and at the same time have a 28% grace period
* we ignore the rare case of pkt_time == 0 and deal with it
* in emul64_i_watch()
*/
cur_lbolt = ddi_get_lbolt();
} else {
dispatched = NULL;
if (emul64_collect_stats) {
/*
* If we are collecting statistics, call
* taskq_dispatch in no sleep mode, so that we can
* detect if we are exceeding the queue length that
* was established in the call to taskq_create in
* emul64_attach. If the no sleep call fails
* (returns NULL), the task will be dispatched in
* sleep mode below.
*/
if (dispatched == NULL) {
/* Queue was full. dispatch failed. */
}
}
if (dispatched == NULL) {
}
}
done:
return (rval);
}
void
{
/* got check, no data transferred and ARQ done */
/* for ARQ */
arq->sts_rqpkt_resid = 0;
}
{
/*
* If there is no target, skip the error injection and
* let the packet be handled normally. This would normally
* never happen since a_target and a_lun are setup in
* emul64_scsi_init_pkt.
*/
return (ERR_INJ_DISABLE);
}
/*
* Calculate available sense buffer length. We could just
* assume sizeof(struct scsi_extended_sense) but hopefully
* that limitation will go away soon.
*/
(sizeof (struct scsi_arq_status) -
sizeof (struct scsi_extended_sense));
}
/* for ARQ */
arq->sts_rqpkt_resid = 0;
/* Copy sense data */
if (tgt->emul64_einj_sense_data != 0) {
}
}
/* Return current error injection state */
return (tgt->emul64_einj_state);
}
int
{
/* Check args */
return (EINVAL);
}
sizeof (error_inj_req), 0) != 0) {
return (EFAULT);
}
/* Make sure device exists */
return (ENODEV);
}
/* Free old sense buffer if we have one */
tgt->emul64_einj_sense_length = 0;
}
/*
* Now handle error injection request. If error injection
* is requested we will return the sense data provided for
* any I/O to this target until told to stop.
*/
switch (error_inj_req.eccd_inj_state) {
case ERR_INJ_ENABLE:
case ERR_INJ_ENABLE_NODATA:
if (error_inj_req.eccd_sns_dlen) {
/* Copy sense data */
error_inj_req.eccd_sns_dlen, 0) != 0) {
"emul64: sense data copy in failed\n");
return (EFAULT);
}
}
break;
case ERR_INJ_DISABLE:
default:
break;
}
return (0);
}
int bsd_scsi_start_stop_unit(struct scsi_pkt *);
int bsd_scsi_test_unit_ready(struct scsi_pkt *);
int bsd_scsi_request_sense(struct scsi_pkt *);
int bsd_scsi_inquiry(struct scsi_pkt *);
int bsd_scsi_format(struct scsi_pkt *);
int bsd_scsi_io(struct scsi_pkt *);
int bsd_scsi_log_sense(struct scsi_pkt *);
int bsd_scsi_mode_sense(struct scsi_pkt *);
int bsd_scsi_mode_select(struct scsi_pkt *);
int bsd_scsi_read_capacity(struct scsi_pkt *);
int bsd_scsi_read_capacity_16(struct scsi_pkt *);
int bsd_scsi_reserve(struct scsi_pkt *);
int bsd_scsi_format(struct scsi_pkt *);
int bsd_scsi_release(struct scsi_pkt *);
int bsd_scsi_read_defect_list(struct scsi_pkt *);
int bsd_scsi_reassign_block(struct scsi_pkt *);
static void
{
/*
* If error injection is configured to return with
* no data return now without handling the command.
* This is how normal check conditions work.
*
* If the error injection state is ERR_INJ_ENABLE
* (or if error injection is disabled) continue and
* handle the command. This would be used for
* KEY_RECOVERABLE_ERROR type conditions.
*/
return;
}
case SCMD_START_STOP:
(void) bsd_scsi_start_stop_unit(pkt);
break;
case SCMD_TEST_UNIT_READY:
(void) bsd_scsi_test_unit_ready(pkt);
break;
case SCMD_REQUEST_SENSE:
(void) bsd_scsi_request_sense(pkt);
break;
case SCMD_INQUIRY:
(void) bsd_scsi_inquiry(pkt);
break;
case SCMD_FORMAT:
(void) bsd_scsi_format(pkt);
break;
case SCMD_READ:
case SCMD_WRITE:
case SCMD_READ_G1:
case SCMD_WRITE_G1:
case SCMD_READ_G4:
case SCMD_WRITE_G4:
(void) bsd_scsi_io(pkt);
break;
case SCMD_LOG_SENSE_G1:
(void) bsd_scsi_log_sense(pkt);
break;
case SCMD_MODE_SENSE:
case SCMD_MODE_SENSE_G1:
(void) bsd_scsi_mode_sense(pkt);
break;
case SCMD_MODE_SELECT:
case SCMD_MODE_SELECT_G1:
(void) bsd_scsi_mode_select(pkt);
break;
case SCMD_READ_CAPACITY:
(void) bsd_scsi_read_capacity(pkt);
break;
case SCMD_SVC_ACTION_IN_G4:
(void) bsd_scsi_read_capacity_16(pkt);
} else {
}
break;
case SCMD_RESERVE:
case SCMD_RESERVE_G1:
(void) bsd_scsi_reserve(pkt);
break;
case SCMD_RELEASE:
case SCMD_RELEASE_G1:
(void) bsd_scsi_release(pkt);
break;
case SCMD_REASSIGN_BLOCK:
(void) bsd_scsi_reassign_block(pkt);
break;
case SCMD_READ_DEFECT_LIST:
(void) bsd_scsi_read_defect_list(pkt);
break;
case SCMD_PRIN:
case SCMD_PROUT:
case SCMD_REPORT_LUNS:
/* ASC 0x24 INVALID FIELD IN CDB */
break;
default:
break;
case SCMD_GET_CONFIGURATION:
case 0x35: /* SCMD_SYNCHRONIZE_CACHE */
/* Don't complain */
break;
}
}
static void
{
if (!tgt) {
} else {
pkt->pkt_statistics = 0;
}
}
/* ARGSUSED */
static int
{
return (1);
}
/* ARGSUSED */
static int
{
return (1);
}
static int
emul64_tgt_t **tgtp,
{
return (EFAULT);
}
return (ENXIO);
}
return (0);
}
static int
int cmd,
int mode,
int *rvalp)
{
int instance;
int rv = 0;
return (ENXIO);
}
switch (cmd) {
case EMUL64_WRITE_OFF:
if (rv == 0) {
}
break;
case EMUL64_WRITE_ON:
if (rv == 0) {
}
break;
case EMUL64_ZERO_RANGE:
if (rv == 0) {
}
break;
case EMUL64_ERROR_INJECT:
break;
default:
break;
}
return (rv);
}
/* ARGSUSED */
static int
{
/* Find spot in list */
/* Insert into list */
}
if (emul64_collect_stats) {
}
} else {
return (EINVAL);
}
return (0);
}
/* ARGSUSED */
static int
{
int rv = 0;
/* Find spot in list */
/* Remove from list */
}
switch (overlap) {
case O_NONE:
break;
case O_SAME:
if (emul64_collect_stats) {
}
break;
case O_OVERLAP:
case O_SUBSET:
break;
}
return (rv);
}
static emul64_nowrite_t *
{
/* Find spot in list */
break;
}
return (cur);
}
static emul64_nowrite_t *
{
(void *) &nw->emul64_blocked,
sizeof (nw->emul64_blocked));
return (nw);
}
static void
{
}
{
return (O_NONE);
return (O_NONE);
return (O_SAME);
return (O_SUBSET);
}
return (O_OVERLAP);
}
/*
* Error logging, printing, and debug print routines
*/
/*VARARGS3*/
static void
{
}
#ifdef EMUL64DEBUG
static void
{
char *p;
int i;
*p++ = '[';
if (i != 0)
*p++ = ' ';
}
*p++ = ']';
*p++ = '\n';
*p = 0;
}
#endif /* EMUL64DEBUG */