sbd_scsi.c revision 4558d122136f151d62acbbc02ddb42df89a5ef66
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
*/
#include <sys/byteorder.h>
#include <sys/stmf_ioctl.h>
#include <sys/stmf_sbd_ioctl.h>
#include "stmf_sbd.h"
#include "sbd_impl.h"
#define SCSI2_CONFLICT_FREE_CMDS(cdb) ( \
/* ----------------------- */ \
/* Refer Both */ \
/* SPC-2 (rev 20) Table 10 */ \
/* SPC-3 (rev 23) Table 31 */ \
/* ----------------------- */ \
((cdb[0]) == SCMD_INQUIRY) || \
((cdb[0]) == SCMD_LOG_SENSE_G1) || \
((cdb[0]) == SCMD_RELEASE) || \
((cdb[0]) == SCMD_RELEASE_G1) || \
((cdb[0]) == SCMD_REPORT_LUNS) || \
((cdb[0]) == SCMD_REQUEST_SENSE) || \
/* PREVENT ALLOW MEDIUM REMOVAL with prevent == 0 */ \
/* SERVICE ACTION IN with READ MEDIA SERIAL NUMBER (0x01) */ \
(((cdb[0]) == SCMD_SVC_ACTION_IN_G5) && ( \
/* MAINTENANCE IN with service actions REPORT ALIASES (0x0Bh) */ \
/* REPORT DEVICE IDENTIFIER (0x05) REPORT PRIORITY (0x0Eh) */ \
/* REPORT TARGET PORT GROUPS (0x0A) REPORT TIMESTAMP (0x0F) */ \
(((cdb[0]) == SCMD_MAINTENANCE_IN) && ( \
/* ----------------------- */ \
/* SBC-3 (rev 17) Table 3 */ \
/* ----------------------- */ \
/* READ CAPACITY(10) */ \
((cdb[0]) == SCMD_READ_CAPACITY) || \
/* READ CAPACITY(16) */ \
(((cdb[0]) == SCMD_SVC_ACTION_IN_G4) && ( \
/* START STOP UNIT with START bit 0 and POWER CONDITION 0 */ \
(((cdb[0]) == SCMD_START_STOP) && ( \
/* End of SCSI2_CONFLICT_FREE_CMDS */
struct stmf_data_buf *initial_dbuf);
extern int sbd_pgr_reservation_conflict(scsi_task_t *);
extern void sbd_pgr_reset(sbd_lu_t *);
int first_xfer);
/*
* IMPORTANT NOTE:
* =================
* The whole world here is based on the assumption that everything within
* a scsi task executes in a single threaded manner, even the aborts.
* Dont ever change that. There wont be any performance gain but there
* will be tons of race conditions.
*/
void
struct stmf_data_buf *dbuf)
{
int ndx;
int bufs_to_take;
/* Lets try not to hog all the buffers the port has. */
if (iolen == 0)
break;
/* Do not need to do xfer anymore, just complete it */
dbuf->db_data_size = 0;
return;
}
}
do {
/*
* A bad port implementation can keep on failing the
* the request but keep on sending us a false
* minsize.
*/
(minsize >= 512));
return;
}
}
}
/*
* sbd_zcopy: Bail-out switch for reduced copy path.
*
* 0 - read & write off
* 1 - read & write on
* 2 - only read on
* 4 - only write on
*/
static void
{
int ret, final_xfer;
/*
* Calculate the limits on xfer_len to the minimum of :
* - task limit
* - lun limit
* - sbd global limit if set
* - first xfer limit if set
*
* First, protect against silly over-ride value
*/
sbd_max_xfer_len = 0;
}
sbd_1st_xfer_len = 0;
}
if (sbd_max_xfer_len)
/*
* Special case the first xfer if hints are set.
*/
/* global over-ride has precedence */
if (sbd_1st_xfer_len)
else
} else {
first_len = 0;
}
if (first_len) {
first_len = 0;
}
final_xfer = 1;
} else {
/*
* Attempt to end xfer on a block boundary.
* The only way this does not happen is if the
* xfer_len is small enough to stay contained
* within the same block.
*/
final_xfer = 0;
if (xfer_aligned_end > xfer_offset)
}
/*
* Allocate object to track the read and reserve
*/
(nblks * sizeof (stmf_sglist_ent_t));
/*
* Setup the dbuf
*
* XXX Framework does not handle variable length sglists
* properly, so setup db_lu_private and db_port_private
* fields here. db_stmf_private is properly set for
* calls to stmf_free.
*/
/*
* XXX Framework assigns space to PP after db_sglist[0]
*/
}
dbuf->db_xfer_status = 0;
if (final_xfer)
/* Need absolute offset for zvol access */
/*
* Accounting for start of read.
* Note there is no buffer address for the probe yet.
*/
xfer_start = gethrtime();
if (ret != 0) {
/*
* Read failure from the backend.
*/
/* nothing queued, just finish */
} else {
/* process failure when other dbufs finish */
}
return;
}
/*
* Allow PP to do setup
*/
if (xstat != STMF_SUCCESS) {
/*
* This could happen if the driver cannot get the
* DDI resources it needs for this request.
* If other dbufs are queued, try again when the next
* one completes, otherwise give up.
*/
/* completion of previous dbuf will retry */
return;
}
/*
* Done with this command.
*/
if (first_xfer)
else
return;
}
/*
* dbuf is now queued on task
*/
/* XXX leave this in for FW? */
/*
* Do not pass STMF_IOF_LU_DONE so that the zvol
* state can be released in the completion callback.
*/
switch (xstat) {
case STMF_SUCCESS:
break;
case STMF_BUSY:
/*
* The dbuf is queued on the task, but unknown
* to the PP, thus no completion will occur.
*/
/* completion of previous dbuf will retry */
return;
}
/*
* Done with this command.
*/
if (first_xfer)
else
return;
case STMF_ABORTED:
/*
* Completion from task_done will cleanup
*/
return;
}
/*
* Update the xfer progress.
*/
}
}
void
struct stmf_data_buf *dbuf)
{
return;
}
return; /* wait for all buffers to complete */
else
return;
}
/* allocate new dbuf */
do {
(minsize >= 512));
}
return;
}
}
}
/*
* This routine must release the DMU resources and free the dbuf
* in all cases. If this is the final dbuf of the task, then drop
* the reader lock on the LU state. If there are no errors and more
* work to do, then queue more xfer operations.
*/
void
struct stmf_data_buf *dbuf)
{
int scmd_err;
/*
* Release the DMU resources.
*/
/*
* Release the dbuf after retrieving needed fields.
*/
/*
* Release the state lock if this is the last completion.
* If this is the last dbuf on task and all data has been
* transferred or an error encountered, then no more dbufs
* will be queued.
*/
(xfer_status != STMF_SUCCESS));
/* all DMU state has been released */
}
/*
* If there have been no errors, either complete the task
* or issue more data xfer operations.
*/
if (!scmd_err) {
/*
* This chunk completed successfully
*/
/*
* This command completed successfully
*
* Status was sent along with data, so no status
* completion will occur. Tell stmf we are done.
*/
return;
}
/*
* Start more xfers
*/
return;
}
/*
* Sort out the failure
*/
/*
* If a previous error occurred, leave the command active
* and wait for the last completion to send the status check.
*/
}
return;
}
/*
* Must have been a failure on current dbuf
*/
}
}
void
struct stmf_data_buf *dbuf)
{
int ret;
int scmd_err, scmd_xfer_done;
/*
* Allow PP to free up resources before releasing the write bufs
* as writing to the backend could take some time.
*/
/*
* All data was queued and this is the last completion,
* but there could still be an error.
*/
(xfer_status != STMF_SUCCESS));
/* start the accounting clock */
xfer_start = gethrtime();
if (scmd_err) {
/* just return the write buffers */
ret = 0;
} else {
if (scmd_xfer_done)
else
zvio->zvio_flags = 0;
/* write the data */
}
/* finalize accounting */
(gethrtime() - xfer_start));
if (ret != 0) {
/* update the error flag */
scmd_err = 1;
}
/* Release the dbuf */
/*
* Release the state lock if this is the last completion.
* If this is the last dbuf on task and all data has been
* transferred or an error encountered, then no more dbufs
* will be queued.
*/
/* all DMU state has been released */
}
/*
* If there have been no errors, either complete the task
* or issue more data xfer operations.
*/
if (!scmd_err) {
/* This chunk completed successfully */
if (scmd_xfer_done) {
/* This command completed successfully */
} else {
}
return;
}
/*
* Start more xfers
*/
return;
}
/*
* Sort out the failure
*/
}
/*
* Leave the command active until last dbuf completes.
*/
return;
}
}
}
/*
* Handle a copy operation using the zvol interface.
*
* Similar to the sbd_data_read/write path, except it goes directly through
* the zvol interfaces. It can pass a port provider sglist in the
* form of uio which is lost through the vn_rdwr path.
*
* Returns:
* STMF_SUCCESS - request handled
* STMF_FAILURE - request not handled, caller must deal with error
*/
static stmf_status_t
{
/* use the stack for small iovecs */
if (iovcnt > 8) {
} else {
}
/* Convert dbuf sglist to iovec format */
for (i = 0; i < iovcnt; i++) {
tiov++;
}
if (resid != 0) {
return (STMF_FAILURE);
}
/* Setup the uio struct */
/* start the accounting clock */
xfer_start = gethrtime();
scsi_task_t *, task);
/* Fetch the data */
scsi_task_t *, task);
} else {
scsi_task_t *, task);
/* Write the data */
scsi_task_t *, task);
}
/* finalize accounting */
(gethrtime() - xfer_start));
if (ret != 0) {
/* Backend I/O error */
return (STMF_FAILURE);
}
return (STMF_SUCCESS);
}
void
{
int fast_path;
if (len == 0) {
len = 256;
}
} else if (op == SCMD_READ_G1) {
} else if (op == SCMD_READ_G5) {
} else if (op == SCMD_READ_G4) {
} else {
return;
}
return;
}
}
fast_path = 0;
} else {
fast_path = 1;
}
if (len == 0) {
return;
}
/*
* Determine if this read can directly use DMU buffers.
*/
TASK_AF_ACCEPT_LU_DBUF)) /* PP allows it */
{
/*
* Reduced copy path
*/
int ret;
/*
* The sl_access_state_lock will be held shared
* for the entire request and released when all
* dbufs have completed.
*/
return;
}
/*
* Check if setup is more expensive than copying the data.
*
* Use the global over-ride sbd_zcopy_threshold if set.
*/
copy_threshold = (sbd_copy_threshold > 0) ?
if (len < copy_threshold &&
SBD_CMD_SCSI_READ, 0);
/* done with the backend */
if (ret != 0) {
/* backend error */
} else {
/* send along good data */
dbuf->db_relative_offset = 0;
/* XXX keep for FW? */
struct stmf_data_buf *, dbuf,
}
return;
}
/* committed to reduced copy */
if (task->task_lu_private) {
} else {
KM_SLEEP);
}
/*
* Setup scmd to track read progress.
*/
scmd->current_ro = 0;
/*
* Kick-off the read.
*/
return;
}
if (initial_dbuf == NULL) {
do {
&minsize, 0);
(minsize >= 512));
if (initial_dbuf == NULL) {
return;
}
}
dbuf = initial_dbuf;
dbuf->db_relative_offset = 0;
/* XXX keep for FW? */
struct stmf_data_buf *, dbuf,
} else {
}
return;
}
if (task->task_lu_private) {
} else {
}
scmd->current_ro = 0;
}
void
{
int bufs_to_take;
goto DO_WRITE_XFER_DONE;
}
/* Lets try not to hog all the buffers the port has. */
/* free current dbuf and allocate a new one */
}
goto DO_WRITE_XFER_DONE;
}
do {
(minsize >= 512));
}
return;
}
}
}
return;
}
}
void
{
int ret;
/*
* Calculate the limits on xfer_len to the minimum of :
* - task limit
* - lun limit
* - sbd global limit if set
* - first xfer limit if set
*
* First, protect against silly over-ride value
*/
sbd_max_xfer_len = 0;
}
sbd_1st_xfer_len = 0;
}
if (sbd_max_xfer_len)
/*
* Special case the first xfer if hints are set.
*/
/* global over-ride has precedence */
if (sbd_1st_xfer_len)
else
} else {
first_len = 0;
}
if (first_len) {
first_len = 0;
}
/*
* Attempt to end xfer on a block boundary.
* The only way this does not happen is if the
* xfer_len is small enough to stay contained
* within the same block.
*/
if (xfer_aligned_end > xfer_offset)
}
/*
* Allocate object to track the write and reserve
*/
(nblks * sizeof (stmf_sglist_ent_t));
/*
* Setup the dbuf
*
* XXX Framework does not handle variable length sglists
* properly, so setup db_lu_private and db_port_private
* fields here. db_stmf_private is properly set for
* calls to stmf_free.
*/
/*
* XXX Framework assigns space to PP after db_sglist[0]
*/
}
dbuf->db_xfer_status = 0;
/* get the buffers */
if (ret != 0) {
/*
* Could not allocate buffers from the backend;
* treat it like an IO error.
*/
/*
* Nothing queued, so no completions coming
*/
}
/*
* Completions of previous buffers will cleanup.
*/
return;
}
/*
* Allow PP to do setup
*/
if (xstat != STMF_SUCCESS) {
/*
* This could happen if the driver cannot get the
* DDI resources it needs for this request.
* If other dbufs are queued, try again when the next
* one completes, otherwise give up.
*/
/* completion of previous dbuf will retry */
return;
}
/*
* Done with this command.
*/
if (first_xfer)
else
return;
}
/*
* dbuf is now queued on task
*/
switch (xstat) {
case STMF_SUCCESS:
break;
case STMF_BUSY:
/*
* The dbuf is queued on the task, but unknown
* to the PP, thus no completion will occur.
*/
/* completion of previous dbuf will retry */
return;
}
/*
* Done with this command.
*/
if (first_xfer)
else
return;
case STMF_ABORTED:
/*
* Completion code will cleanup.
*/
return;
}
/*
* Update the xfer progress.
*/
}
}
void
{
int ndx;
/*
* Decrement the count to indicate the port xfer
* into the dbuf has completed even though the buf is
* still in use here in the LU provider.
*/
}
return;
}
goto WRITE_XFER_DONE;
}
/*
* Initiate the next port xfer to occur in parallel
* with writing this buf.
*/
}
/*
* If this is going to a zvol, use the direct call to
* sbd_zvol_copy_{read,write}. The direct call interface is
* restricted to PPs that accept sglists, but that is not required.
*/
int commit;
commit) != STMF_SUCCESS)
} else {
if (iolen == 0)
break;
break;
}
}
}
return; /* wait for all buffers to complete */
} else {
/*
* If SYNC_WRITE flag is on then we need to flush
* cache before sending status.
* Note: this may be a no-op because of how
* SL_WRITEBACK_CACHE_DISABLE and
* SL_FLUSH_ON_DISABLED_WRITECACHE are set, but not
* worth code complexity of checking those in this code
* path, SBD_SCSI_CMD_SYNC_WRITE is rarely set.
*/
} else {
}
}
return;
}
}
/*
* Return true if copy avoidance is beneficial.
*/
static int
{
/*
* If there is a global copy threshold over-ride, use it.
* Otherwise use the PP value with the caveat that at least
* 1/2 the data must avoid being copied to be useful.
*/
if (sbd_copy_threshold > 0) {
return (len >= sbd_copy_threshold);
} else {
/* sub-blocksize writes always copy */
return (0);
/*
* Calculate amount of data that will avoid the copy path.
* The calculation is only valid if len >= blksize.
*/
}
}
void
{
uint8_t sync_wr_flag = 0;
return;
}
if (op == SCMD_WRITE) {
if (len == 0) {
len = 256;
}
} else if (op == SCMD_WRITE_G1) {
} else if (op == SCMD_WRITE_G5) {
} else if (op == SCMD_WRITE_G4) {
} else if (op == SCMD_WRITE_VERIFY) {
} else if (op == SCMD_WRITE_VERIFY_G5) {
} else if (op == SCMD_WRITE_VERIFY_G4) {
} else {
return;
}
return;
}
}
if (len == 0) {
return;
}
TASK_AF_ACCEPT_LU_DBUF) && /* PP allows it */
/*
* XXX Note that disallowing initial_dbuf will eliminate
* iSCSI from participating. For small writes, that is
* probably ok. For large writes, it may be best to just
* copy the data from the initial dbuf and use zcopy for
* the rest.
*/
return;
}
/*
* Setup scmd to track the write progress.
*/
if (task->task_lu_private) {
} else {
KM_SLEEP);
}
scmd->current_ro = 0;
return;
}
if (initial_dbuf->db_data_size >
/* protocol error */
return;
}
}
do_immediate_data = 1;
}
dbuf = initial_dbuf;
if (task->task_lu_private) {
} else {
}
scmd->current_ro = 0;
if (do_immediate_data) {
/*
* Account for data passed in this write command
*/
} else {
}
}
/*
* Utility routine to handle small non performance data transfers to the
* initiators. dbuf is an initial data buf (if any), 'p' points to a data
* buffer which is source of data for transfer, cdb_xfer_size is the
* transfer size based on CDB, cmd_xfer_size is the actual amount of data
* which this command would transfer (the size of data pointed to by 'p').
*/
void
{
} else {
}
if (cmd_xfer_size == 0) {
return;
}
}
return;
}
uint8_t *d;
uint32_t s;
bufsize += s;
}
dbuf->db_relative_offset = 0;
}
}
void
struct stmf_data_buf *dbuf)
{
return;
}
}
void
{
} else {
}
if (cdb_xfer_size == 0) {
return;
}
KM_SLEEP);
} else {
}
return;
}
dbuf->db_relative_offset = 0;
} else {
STMF_ABORTED, NULL);
return;
}
}
}
void
{
/*
* For now lets assume we will get only one sglist element
* for short writes. If that ever changes, we should allocate
* a local buffer and copy all the sg elements to one linear space.
*/
return;
}
/* Lets find out who to call */
case SCMD_MODE_SELECT:
case SCMD_MODE_SELECT_G1:
if (st_ret != STMF_SUCCESS) {
}
} else {
}
break;
if (st_ret != STMF_SUCCESS) {
}
} else {
}
break;
default:
/* This should never happen */
STMF_ABORTED, NULL);
}
}
void
struct stmf_data_buf *initial_dbuf)
{
uint8_t p[32];
uint64_t s;
s--;
case SCMD_READ_CAPACITY:
if (s & 0xffffffff00000000ull) {
p[0] = p[1] = p[2] = p[3] = 0xFF;
} else {
p[0] = (s >> 24) & 0xff;
p[1] = (s >> 16) & 0xff;
p[2] = (s >> 8) & 0xff;
p[3] = s & 0xff;
}
p[4] = 0; p[5] = 0;
break;
case SCMD_SVC_ACTION_IN_G4:
bzero(p, 32);
p[0] = (s >> 56) & 0xff;
p[1] = (s >> 48) & 0xff;
p[2] = (s >> 40) & 0xff;
p[3] = (s >> 32) & 0xff;
p[4] = (s >> 24) & 0xff;
p[5] = (s >> 16) & 0xff;
p[6] = (s >> 8) & 0xff;
p[7] = s & 0xff;
cdb_len, 32);
break;
}
}
void
{
if (s < (4ull * 1024ull * 1024ull * 1024ull)) {
*nsectors = 32;
*nheads = 8;
} else {
*nsectors = 254;
*nheads = 254;
}
}
void
{
uint8_t *p;
p = buf; /* buf is assumed to be zeroed out and large enough */
n = 0;
if (cdb[0] == SCMD_MODE_SENSE) {
header_size = 4;
} else {
header_size = 8;
}
/* Now validate the command */
pc_valid = 1;
} else {
pc_valid = 0;
}
return;
}
/* We will update the length in the mode header at the end */
/* Block dev device specific param in mode param header has wp bit */
p[n + dev_spec_param_offset] = BIT_7;
}
n += header_size;
/* We are not going to return any block descriptor */
p[n] = 0x03;
p[n+1] = 0x16;
if (ctrl != 1) {
p[n + 11] = nsectors;
p[n + 20] = 0x80;
}
n += 24;
}
p[n] = 0x04;
p[n + 1] = 0x16;
if (ctrl != 1) {
p[n + 5] = nheads;
p[n + 20] = 0x15;
p[n + 21] = 0x18;
}
n += 24;
}
struct mode_caching *mode_caching_page;
mode_caching_page = (struct mode_caching *)&p[n];
switch (ctrl) {
case (0):
/* Current */
}
break;
case (1):
/* Changeable */
SL_WRITEBACK_CACHE_SET_UNSUPPORTED) == 0) {
}
break;
default:
SL_SAVED_WRITE_CACHE_DISABLE) == 0) {
}
break;
}
n += (sizeof (struct mode_page) +
}
struct mode_control_scsi3 *mode_control_page;
mode_control_page = (struct mode_control_scsi3 *)&p[n];
if (ctrl != 1) {
/* If not looking for changeable values, report this. */
}
n += (sizeof (struct mode_page) +
}
if (cdb[0] == SCMD_MODE_SENSE) {
if (n > 255) {
return;
}
/*
* Mode parameter header length doesn't include the number
* of bytes in the length field, so adjust the count.
* Byte count minus header length field size.
*/
} else {
/* Byte count minus header length field size. */
}
cmd_size, n);
}
void
{
} else {
}
return;
}
if (cmd_xfer_len == 0) {
/* zero byte mode selects are allowed */
return;
}
}
void
{
int i;
hdr_len = 4;
} else {
hdr_len = 8;
}
goto mode_sel_param_len_err;
goto mode_sel_param_len_err;
goto mode_sel_param_len_err;
}
goto mode_sel_param_field_err;
}
if (buf[i]) {
goto mode_sel_param_field_err;
}
}
sret = SBD_SUCCESS;
/* All good. Lets handle the write cache change, if any */
} else {
}
if (sret != SBD_SUCCESS) {
return;
}
/* set on the device passed, now set the flags */
} else {
}
continue;
}
} else {
}
} else {
}
if (sret == SBD_SUCCESS) {
} else {
}
return;
return;
}
/*
* Command support added from SPC-4 r24
* Supports info type 0, 2, 127
*/
void
{
/* Validate the command */
if (cmd_size < 4) {
return;
}
switch (info_type) {
case 0:
/*
* No value is supplied but this info type
* is mandatory.
*/
xfer_size = 4;
break;
case 2:
/* text info must be null terminated */
if (++param_len > 256)
param_len = 256;
break;
case 127:
/* 0 and 2 descriptor supported */
p += 8;
*p = 4; /* set type to 2 (7 hi bits) */
p += 2;
xfer_size = 12;
break;
default:
return;
}
}
/*
* This function parse through a string, passed to it as a pointer to a string,
* by adjusting the pointer to the first non-space character and returns
* Management URLs are stored as a space delimited string in sl_mgmt_url
* field of sbd_lu_t. This function is used to retrieve one url at a time.
*
* i/p : pointer to pointer to a url string
* o/p : Adjust the pointer to the url to the first non white character
* and returns the length of the URL
*/
sbd_parse_mgmt_url(char **url_addr) {
uint16_t url_length = 0;
char *url;
while (*url != '\0') {
(*url_addr)++;
} else {
break;
}
}
while (*url != '\0') {
break;
}
url++;
url_length++;
}
return (url_length);
}
void
{
uint8_t *p;
uint32_t mgmt_url_size = 0;
/*
* Basic protocol checks.
*/
return;
}
/*
* Zero byte allocation length is not an error. Just
* return success.
*/
if (cmd_size == 0) {
task->task_cmd_xfer_length = 0;
if (task->task_additional_flags &
}
return;
}
/*
* Standard inquiry
*/
int i;
struct scsi_inquiry *inq;
inq = (struct scsi_inquiry *)p;
page_length = 69;
} else {
}
} else {
}
} else {
}
/* Adding Version Descriptors */
i = 0;
/* SAM-3 no version */
i++;
/* transport */
case PROTOCOL_FIBRE_CHANNEL:
i++;
break;
case PROTOCOL_PARALLEL_SCSI:
case PROTOCOL_SSA:
case PROTOCOL_IEEE_1394:
/* Currently no claims of conformance */
break;
case PROTOCOL_SRP:
i++;
break;
case PROTOCOL_iSCSI:
i++;
break;
case PROTOCOL_SAS:
case PROTOCOL_ADT:
case PROTOCOL_ATAPI:
default:
/* Currently no claims of conformance */
break;
}
/* SPC-3 no version */
i++;
/* SBC-2 no version */
return;
}
if (sl->sl_mgmt_url) {
} else if (sbd_mgmt_url) {
}
/*
* EVPD handling
*/
/* Default 512 bytes may not be enough, increase bsize if necessary */
}
switch (cdbp[2]) {
case 0x00:
p[0] = byte0;
p[3] = page_length;
/* Supported VPD pages in ascending order */
{
uint8_t i = 5;
p[i++] = 0x80;
p[i++] = 0x83;
if (mgmt_url_size != 0)
p[i++] = 0x85;
p[i++] = 0x86;
}
break;
case 0x80:
if (sl->sl_serial_no_size) {
} else {
/* if no serial num is specified set 4 spaces */
page_length = 4;
}
p[0] = byte0;
p[1] = 0x80;
p[3] = page_length;
break;
case 0x83:
break;
case 0x85:
if (mgmt_url_size == 0) {
goto err_done;
}
{
char *url;
p[0] = byte0;
p[1] = 0x85;
idx = 4;
/* Creating Network Service Descriptors */
while (url_size != 0) {
/* Null terminated and 4 Byte aligned */
/*
* SPC-3r23 : Table 320 (Sec 7.6.5)
* (Network service descriptor format
*
* Note: Hard coding service type as
* "Storage Configuration Service".
*/
p[idx] = 1;
}
/* skip to next mgmt url if any */
}
/* Total descriptor length */
break;
}
case 0x86:
page_length = 0x3c;
p[0] = byte0;
p[1] = 0x86; /* Page 86 response */
p[3] = page_length;
/*
* Bits 0, 1, and 2 will need to be updated
* that is implemented. For now, we're going
* to claim support only for Simple TA.
*/
p[5] = 1;
break;
default:
goto err_done;
}
}
{
if ((task->task_lu_private =
return (STMF_SUCCESS);
}
return (STMF_ALLOC_FAILURE);
}
void
{
break;
}
}
sbd_it_data_t *, it);
}
void
{
/* If we dont have any reservations, just get out. */
return;
}
/* Find the I_T nexus which is holding the reservation. */
break;
}
}
} else {
/*
* We were passed an I_T nexus. If this nexus does not hold
* the reservation, do nothing. This is why this function is
* called "check_and_clear".
*/
return;
}
}
}
void
{
if (it->sbd_it_session_id ==
return;
}
}
return;
}
!= STMF_SUCCESS) {
return;
}
}
}
if (task->task_mgmt_function) {
return;
}
/*
* if we're transitioning between access
* states, return NOT READY
*/
return;
}
/* Checking ua conditions as per SAM3R14 5.3.2 specified order */
saa = STMF_SAA_POR;
}
if (saa) {
return;
}
}
/* Reservation conflict checks */
if (sbd_pgr_reservation_conflict(task)) {
return;
}
return;
}
}
}
/* Rest of the ua conndition checks */
saa = 0;
} else {
}
} else if (it->sbd_it_ua_conditions &
} else if (it->sbd_it_ua_conditions &
} else if (it->sbd_it_ua_conditions &
} else {
it->sbd_it_ua_conditions = 0;
saa = 0;
}
if (saa) {
return;
}
}
if (cdb0 != SCMD_INQUIRY &&
cdb0 != SCMD_MODE_SENSE &&
cdb0 != SCMD_MODE_SENSE_G1 &&
cdb0 != SCMD_MODE_SELECT &&
cdb0 != SCMD_MODE_SELECT_G1 &&
cdb0 != SCMD_RESERVE &&
cdb0 != SCMD_RELEASE &&
cdb0 != SCMD_REQUEST_SENSE &&
cdb0 != SCMD_READ_CAPACITY &&
cdb0 != SCMD_TEST_UNIT_READY &&
cdb0 != SCMD_START_STOP &&
cdb0 != SCMD_READ_G1 &&
cdb0 != SCMD_READ_G4 &&
cdb0 != SCMD_READ_G5 &&
!(cdb0 == SCMD_SVC_ACTION_IN_G4 &&
cdb1 == SSVC_ACTION_READ_CAPACITY_G4) &&
!(cdb0 == SCMD_MAINTENANCE_IN &&
!(cdb0 == SCMD_MAINTENANCE_IN &&
return;
}
/*
* is this a short write?
* if so, we'll need to wait until we have the buffer
* before proxying the command
*/
switch (cdb0) {
case SCMD_MODE_SELECT:
case SCMD_MODE_SELECT_G1:
break;
default:
if (st_ret != STMF_SUCCESS) {
}
return;
}
}
return;
}
return;
}
return;
}
return;
}
if (cdb0 == SCMD_PERSISTENT_RESERVE_OUT) {
return;
}
if (cdb0 == SCMD_PERSISTENT_RESERVE_IN) {
return;
}
if (cdb0 == SCMD_RELEASE) {
if (cdb1) {
return;
}
/* If not owner don't release it, just return good */
if (it->sbd_it_session_id !=
return;
}
}
return;
}
if (cdb0 == SCMD_RESERVE) {
if (cdb1) {
return;
}
/* If not owner, return conflict status */
if (it->sbd_it_session_id !=
return;
}
}
return;
}
if (cdb0 == SCMD_REQUEST_SENSE) {
/*
* LU provider needs to store unretrieved sense data
* return good status with no sense.
*/
} else {
}
return;
}
/* Report Target Port Groups */
if ((cdb0 == SCMD_MAINTENANCE_IN) &&
return;
}
/* Report Identifying Information */
if ((cdb0 == SCMD_MAINTENANCE_IN) &&
return;
}
task->task_cmd_xfer_length = 0;
return;
}
} else {
}
return;
}
uint8_t *p;
kmem_free(p, 512);
return;
}
return;
}
task->task_cmd_xfer_length = 0;
return;
}
return;
}
if (cdb1 == SSVC_ACTION_READ_CAPACITY_G4) {
return;
/*
* } else if (cdb1 == SSVC_ACTION_READ_LONG_G4) {
* sbd_handle_read(task, initial_dbuf);
* return;
*/
}
}
/*
* if (cdb0 == SCMD_SVC_ACTION_OUT_G4) {
* if (cdb1 == SSVC_ACTION_WRITE_LONG_G4) {
* sbd_handle_write(task, initial_dbuf);
* return;
* }
* }
*/
if (cdb0 == SCMD_VERIFY) {
/*
* Something more likely needs to be done here.
*/
task->task_cmd_xfer_length = 0;
return;
}
if (cdb0 == SCMD_SYNCHRONIZE_CACHE ||
return;
}
/*
* Write and Verify use the same path as write, but don't clutter the
* performance path above with checking for write_verify opcodes. We
* rely on zfs's integrity checks for the "Verify" part of Write &
* Verify. (Even if we did a read to "verify" we'd merely be reading
* cache, not actual media.)
* Therefore we
* a) only support this if sbd_is_zvol, and
* b) run the IO through the normal write path with a forced
* sbd_flush_data_cache at the end.
*/
cdb0 == SCMD_WRITE_VERIFY ||
cdb0 == SCMD_WRITE_VERIFY_G4 ||
cdb0 == SCMD_WRITE_VERIFY_G5)) {
return;
}
}
void
{
/*
* Buffers passed in from the LU always complete
* even if the task is no longer active.
*/
case (SBD_CMD_SCSI_READ):
break;
case (SBD_CMD_SCSI_WRITE):
break;
default:
(void *)task);
break;
}
return;
}
return;
case (SBD_CMD_SCSI_READ):
break;
case (SBD_CMD_SCSI_WRITE):
break;
case (SBD_CMD_SMALL_READ):
break;
case (SBD_CMD_SMALL_WRITE):
break;
default:
break;
}
}
/* ARGSUSED */
void
{
"sbd_send_status_done: this should not have been called");
}
void
{
if (task->task_lu_private) {
(void *)task);
}
}
}
/*
* Aborts are synchronus w.r.t. I/O AND
* All the I/O which SBD does is synchronous AND
* Everything within a task is single threaded.
* IT MEANS
* If this function is called, we are doing nothing with this task
* inside of sbd module.
*/
/* ARGSUSED */
{
if (abort_cmd == STMF_LU_RESET_STATE) {
return (sbd_lu_reset_state(lu));
}
if (abort_cmd == STMF_LU_ITL_HANDLE_REMOVED) {
return (STMF_SUCCESS);
}
if (task->task_lu_private) {
return (STMF_ABORT_SUCCESS);
}
}
return (STMF_NOT_FOUND);
}
/*
* This function is called during task clean-up if the
* DB_LU_FLAG is set on the dbuf. This should only be called for
* abort processing after sbd_abort has been called for the task.
*/
void
{
} else {
}
}
/* ARGSUSED */
void
{
(cmd == STMF_CMD_LU_OFFLINE) ||
(cmd == STMF_ACK_LU_ONLINE_COMPLETE) ||
(cmd == STMF_ACK_LU_OFFLINE_COMPLETE));
switch (cmd) {
case STMF_CMD_LU_ONLINE:
}
break;
case STMF_CMD_LU_OFFLINE:
}
break;
/* Fallthrough */
sl->sl_state_not_acked = 0;
break;
}
}
/* ARGSUSED */
{
return (STMF_NOT_SUPPORTED);
}
{
}
} else {
(void) sbd_wcd_set(0, sl);
}
}
return (STMF_FAILURE);
}
return (STMF_SUCCESS);
}
{
int r = 0;
int ret;
if (fsync_done)
goto over_fsync;
return (SBD_FAILURE);
}
} else if (ret != 0) {
return (SBD_FAILURE);
}
}
return (SBD_SUCCESS);
}
/* ARGSUSED */
static void
struct stmf_data_buf *initial_dbuf)
{
int is_g4 = 0;
int immed;
task->task_cmd_xfer_length = 0;
/*
* Determine if this is a 10 or 16 byte CDB
*/
is_g4 = 1;
/*
* Determine other requested parameters
*
* We don't have a non-volatile cache, so don't care about SYNC_NV.
* Do not support the IMMED bit.
*/
if (immed) {
return;
}
/*
* Check to be sure we're not being asked to sync an LBA
* that is out of range. While checking, verify reserved fields.
*/
if (is_g4) {
return;
}
} else {
return;
}
}
return;
}
if (sret != SBD_SUCCESS) {
return;
}
}