sbd_scsi.c revision fcf3ce441efd61da9bb2884968af01cb7c1452cc
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include <sys/byteorder.h>
#include <stmf.h>
#include <lpif.h>
#include <portif.h>
#include <stmf_ioctl.h>
#include <stmf_sbd.h>
#include <sbd_impl.h>
struct stmf_data_buf *initial_dbuf);
/*
* IMPORTANT NOTE:
* =================
* The whole world here is based on the assumption that everything within
* a scsi task executes in a single threaded manner, even the aborts.
* Dont ever change that. There wont be any performance gain but there
* will be tons of race conditions.
*/
void
struct stmf_data_buf *dbuf)
{
int ndx;
int bufs_to_take;
/* Lets try not to hog all the buffers the port has. */
if (iolen == 0)
break;
/* Do not need to do xfer anymore, just complete it */
dbuf->db_data_size = 0;
return;
}
}
do {
/*
* A bad port implementation can keep on failing the
* the request but keep on sending us a false
* minsize.
*/
(minsize >= 512));
return;
}
}
}
void
struct stmf_data_buf *dbuf)
{
return;
}
return; /* wait for all buffers to complete */
else
return;
}
}
void
{
int fast_path;
if (len == 0) {
len = 256;
}
} else if (op == SCMD_READ_G1) {
} else if (op == SCMD_READ_G5) {
} else if (op == SCMD_READ_G4) {
} else {
return;
}
return;
}
}
fast_path = 0;
} else {
fast_path = 1;
}
if (len == 0) {
return;
}
if (initial_dbuf == NULL) {
do {
&minsize, 0);
(minsize >= 512));
if (initial_dbuf == NULL) {
return;
}
}
dbuf = initial_dbuf;
dbuf->db_relative_offset = 0;
} else {
}
return;
}
if (task->task_lu_private) {
} else {
}
scmd->current_ro = 0;
}
void
struct stmf_data_buf *dbuf)
{
int bufs_to_take;
/* Lets try not to hog all the buffers the port has. */
do {
(minsize >= 512));
return;
}
}
}
void
{
int ndx;
return;
}
goto WRITE_XFER_DONE;
}
if (iolen == 0)
break;
break;
}
}
return; /* wait for all buffers to complete */
else
return;
}
if (dbuf_reusable == 0) {
/* free current dbuf and allocate a new one */
do {
(minsize >= 512));
}
return;
}
}
}
void
{
if (op == SCMD_WRITE) {
if (len == 0) {
len = 256;
}
} else if (op == SCMD_WRITE_G1) {
} else if (op == SCMD_WRITE_G5) {
} else if (op == SCMD_WRITE_G4) {
} else {
return;
}
return;
}
}
if (len == 0) {
return;
}
if (initial_dbuf == NULL) {
do {
&minsize, 0);
(minsize >= 512));
if (initial_dbuf == NULL) {
return;
}
if (initial_dbuf->db_data_size >
/* protocol error */
return;
}
}
do_immediate_data = 1;
}
dbuf = initial_dbuf;
if (task->task_lu_private) {
} else {
}
scmd->current_ro = 0;
if (do_immediate_data) {
} else {
}
}
/*
* Utility routine to handle small non performance data transfers to the
* initiators. dbuf is an initial data buf (if any), 'p' points to a data
* buffer which is source of data for transfer, cdb_xfer_size is the
* transfer size based on CDB, cmd_xfer_size is the actual amount of data
* which this command would transfer (the size of data pointed to by 'p').
*/
void
{
} else {
}
if (cmd_xfer_size == 0) {
return;
}
}
return;
}
uint8_t *d;
uint32_t s;
bufsize += s;
}
dbuf->db_relative_offset = 0;
}
}
void
struct stmf_data_buf *dbuf)
{
return;
}
}
void
struct stmf_data_buf *initial_dbuf)
{
uint8_t p[32];
uint64_t s;
s--;
case SCMD_READ_CAPACITY:
if (s & 0xffffffff00000000ull) {
p[0] = p[1] = p[2] = p[3] = 0xFF;
} else {
p[0] = (s >> 24) & 0xff;
p[1] = (s >> 16) & 0xff;
p[2] = (s >> 8) & 0xff;
p[3] = s & 0xff;
}
p[4] = 0; p[5] = 0;
return;
case SCMD_SVC_ACTION_IN_G4:
bzero(p, 32);
p[0] = (s >> 56) & 0xff;
p[1] = (s >> 48) & 0xff;
p[2] = (s >> 40) & 0xff;
p[3] = (s >> 32) & 0xff;
p[4] = (s >> 24) & 0xff;
p[5] = (s >> 16) & 0xff;
p[6] = (s >> 8) & 0xff;
p[7] = s & 0xff;
cdb_len, 32);
return;
}
}
{3, 0x16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 2, 0, 0, 0,
0, 0, 0, 0, 0x80, 0, 0, 0};
{4, 0x16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0x15, 0x18, 0, 0};
void
struct stmf_data_buf *initial_dbuf)
{
switch (page) {
case 0x03:
break;
case 0x04:
break;
case 0x0A:
break;
case MODEPAGE_ALLPAGES:
+ sizeof (sbd_pa);
/*
* If the buffer is big enough, include the block
* descriptor; otherwise, leave it out.
*/
dbd = 1;
}
if (dbd == 0) {
ps += 8;
}
break;
default:
return;
}
(((task->task_additional_flags &
TASK_AF_NO_EXPECTED_XFER_LENGTH) == 0) &&
return;
}
if (p[0] == SCMD_MODE_SENSE) {
} else {
ps -= 2;
}
switch (page) {
case 0x03:
break;
case 0x0A:
break;
case MODEPAGE_ALLPAGES:
if (dbd == 0) {
}
/* FALLTHROUGH */
case 0x04:
if (s > 1024 * 1024 * 1024) {
} else {
}
break;
}
}
void
{
/*
* Basic protocol checks.
*/
return;
}
/*
* Zero byte allocation length is not an error. Just
* return success.
*/
if (cmd_size == 0) {
task->task_cmd_xfer_length = 0;
if (task->task_additional_flags &
}
return;
}
/*
* Standard inquiry
*/
page_length = 31;
return;
}
/*
* EVPD handling
*/
switch (cdbp[2]) {
case 0x00:
page_length = 3;
p[0] = 0;
p[5] = 0x83;
p[6] = 0x86;
break;
case 0x83:
break;
case 0x86:
page_length = 0x3c;
p[0] = 0;
p[1] = 0x86; /* Page 86 response */
p[3] = page_length;
/*
* Bits 0, 1, and 2 will need to be updated
* that is implemented. For now, we're going
* to claim support only for Simple TA.
*/
p[5] = 1;
break;
default:
return;
}
}
{
if ((task->task_lu_private =
return (STMF_SUCCESS);
}
return (STMF_ALLOC_FAILURE);
}
void
{
break;
}
}
}
void
{
/* If we dont have any reservations, just get out. */
return;
}
/* Find the I_T nexus which is holding the reservation. */
break;
}
}
} else {
/*
* We were passed an I_T nexus. If this nexus does not hold
* the reservation, do nothing. This is why this function is
* called "check_and_clear".
*/
return;
}
}
}
/*
* returns non-zero, if this command can be allowed to run even if the
* lu has been reserved by another initiator.
*/
int
{
((cdb0 == SCMD_SVC_ACTION_IN_G4) &&
(cdb1 == SSVC_ACTION_READ_CAPACITY_G4))) {
return (1);
}
return (0);
}
void
{
if (it->sbd_it_session_id ==
return;
}
}
return;
}
!= STMF_SUCCESS) {
return;
}
}
if (task->task_mgmt_function) {
return;
}
if (!sbd_reserve_allow(task)) {
return;
}
}
saa = STMF_SAA_POR;
saa = 0;
} else {
}
} else {
it->sbd_it_ua_conditions = 0;
saa = 0;
}
if (saa) {
return;
}
}
return;
}
return;
}
return;
}
task->task_cmd_xfer_length = 0;
return;
}
return;
}
uint8_t *p;
kmem_free(p, 512);
return;
}
if (cdb1 == SSVC_ACTION_READ_CAPACITY_G4) {
return;
/*
* } else if (cdb1 == SSVC_ACTION_READ_LONG_G4) {
* sbd_handle_read(task, initial_dbuf);
* return;
*/
}
}
/*
* if (cdb0 == SCMD_SVC_ACTION_OUT_G4) {
* if (cdb1 == SSVC_ACTION_WRITE_LONG_G4) {
* sbd_handle_write(task, initial_dbuf);
* return;
* }
* }
*/
/* XXX Implement power management */
task->task_cmd_xfer_length = 0;
return;
}
#if 0
/* XXX Remove #if 0 above */
return;
}
#endif
return;
}
if (cdb0 == SCMD_REQUEST_SENSE) {
/*
* LU provider needs to store unretrieved sense data
* return good status with no sense.
*/
} else {
}
return;
}
if (cdb0 == SCMD_VERIFY) {
/*
* Something more likely needs to be done here.
*/
task->task_cmd_xfer_length = 0;
return;
}
if (cdb1) {
return;
}
if (it->sbd_it_session_id !=
/*
* This can only happen if things were in
* flux.
*/
return;
}
}
}
if (cdb0 == SCMD_RELEASE) {
return;
}
if (cdb0 == SCMD_RESERVE) {
return;
}
if (cdb0 == SCMD_SYNCHRONIZE_CACHE ||
return;
}
/* Report Target Port Groups */
if ((cdb0 == SCMD_MAINTENANCE_IN) &&
return;
}
}
void
{
return;
} else {
}
}
/* ARGSUSED */
void
{
"sbd_send_status_done: this should not have been called");
}
void
{
if (task->task_lu_private) {
(void *)task);
}
}
}
/*
* Aborts are synchronus w.r.t. I/O AND
* All the I/O which SBD does is synchronous AND
* Everything within a task is single threaded.
* IT MEANS
* If this function is called, we are doing nothing with this task
* inside of sbd module.
*/
/* ARGSUSED */
{
if (abort_cmd == STMF_LU_RESET_STATE) {
return (sbd_lu_reset_state(lu));
}
if (abort_cmd == STMF_LU_ITL_HANDLE_REMOVED) {
(sbd_it_data_t *)arg);
return (STMF_SUCCESS);
}
if (task->task_lu_private) {
return (STMF_ABORT_SUCCESS);
}
}
return (STMF_NOT_FOUND);
}
/* ARGSUSED */
void
{
(cmd == STMF_CMD_LU_OFFLINE) ||
(cmd == STMF_ACK_LU_ONLINE_COMPLETE) ||
(cmd == STMF_ACK_LU_OFFLINE_COMPLETE));
switch (cmd) {
case STMF_CMD_LU_ONLINE:
slu->sl_state_not_acked = 0;
} else {
}
}
break;
case STMF_CMD_LU_OFFLINE:
slu->sl_state_not_acked = 0;
} else {
}
}
break;
/* Fallthrough */
slu->sl_state_not_acked = 0;
break;
}
}
/* ARGSUSED */
{
return (STMF_NOT_SUPPORTED);
}
{
return (STMF_FAILURE);
}
return (STMF_SUCCESS);
}
/* ARGSUSED */
static void
struct stmf_data_buf *initial_dbuf)
{
sbd_store_t *sst =
int is_g4 = 0;
int immed;
/*
* Determine if this is a 10 or 16 byte CDB
*/
is_g4 = 1;
/*
* Determine other requested parameters
*
* We don't have a non-volatile cache, so don't care about SYNC_NV.
* Do not support the IMMED bit.
*/
if (immed) {
return;
}
/*
* Check to be sure we're not being asked to sync an LBA
* that is out of range. While checking, verify reserved fields.
*/
if (is_g4) {
return;
}
} else {
return;
}
}
return;
}
return;
}
}