/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
*/
/*
* RDMA channel interface for Solaris SCSI RDMA Protocol Target (SRP)
* transport port provider module for the COMSTAR framework.
*/
#include <sys/sysmacros.h>
#include <sys/stmf_ioctl.h>
#include "srp.h"
#include "srpt_impl.h"
#include "srpt_ioc.h"
#include "srpt_stp.h"
#include "srpt_ch.h"
extern srpt_ctxt_t *srpt_ctxt;
extern uint16_t srpt_send_msg_depth;
/*
* Prototypes.
*/
/*
* srpt_ch_alloc()
*/
{
ch->ch_cv_waiters = 0;
ch->ch_req_lim_delta = 0;
ch->ch_ti_iu_len = 0;
&cq_real_size);
if (status != IBT_SUCCESS) {
SRPT_DPRINTF_L1("ch_alloc, send CQ alloc error (%d)",
status);
goto scq_alloc_err;
}
&cq_real_size);
if (status != IBT_SUCCESS) {
SRPT_DPRINTF_L2("ch_alloc, receive CQ alloc error (%d)",
status);
goto rcq_alloc_err;
}
ch_args.rc_control = 0;
/*
* Any SRP IU can result in a number of STMF data buffer transfers
* and those transfers themselves could span multiple initiator
* buffers. Therefore, the number of send WQE's actually required
* can vary. Here we assume that on average an I/O will require
* no more than SRPT_MAX_OUT_IO_PER_CMD send WQE's. In practice
* this will prevent send work queue overrun, but we will also
* inform STMF to throttle I/O should the work queue become full.
*
* If the HCA tells us the max outstanding WRs for a channel is
* lower than our default, use the HCA value.
*/
if (status != IBT_SUCCESS) {
SRPT_DPRINTF_L2("ch_alloc, IBT channel alloc error (%d)",
status);
goto qp_alloc_err;
}
/*
* Create pool of send WQE entries to map send wqe work IDs
* to various types (specifically in error cases where OP
* is not known).
*/
KM_SLEEP);
SRPT_DPRINTF_L2("ch_alloc, SWQE alloc error");
goto qp_alloc_err;
}
}
return (ch);
return (NULL);
}
/*
* srpt_ch_add_ref()
*/
void
{
SRPT_DPRINTF_L4("ch_add_ref, ch (%p), refcnt (%d)",
}
/*
* srpt_ch_release_ref()
*
* A non-zero value for wait causes thread to block until all references
* to channel are released.
*/
void
{
SRPT_DPRINTF_L4("ch_release_ref, ch (%p), refcnt (%d), wait (%d)",
if (wait) {
ch->ch_cv_waiters++;
}
ch->ch_cv_waiters--;
} else {
return;
}
}
/*
* Last thread out frees the IB resources, locks/conditions and memory
*/
if (ch->ch_cv_waiters > 0) {
/* we're not last, wake someone else up */
return;
}
SRPT_DPRINTF_L3("ch_release_ref - release resources");
if (ch->ch_chan_hdl) {
SRPT_DPRINTF_L3("ch_release_ref - free channel");
}
if (ch->ch_scq_hdl) {
}
if (ch->ch_rcq_hdl) {
}
/*
* There should be no IU's associated with this
* channel on the SCSI session.
*/
/*
* Currently only have one channel per session, we will
* need to release a reference when support is added
* for multi-channel target login.
*/
}
}
/*
* srpt_ch_disconnect()
*/
void
{
SRPT_DPRINTF_L3("ch_disconnect, invoked for ch (%p)",
(void *)ch);
/*
* If we are already in the process of disconnecting then
* nothing need be done, CM will call-back into us when done.
*/
SRPT_DPRINTF_L2("ch_disconnect, called when"
" disconnect in progress");
return;
}
/*
* Initiate the sending of the CM DREQ message, the private data
* should be the SRP Target logout IU. We don't really care about
* the remote CM DREP message returned. We issue this in an
* asynchronous manner and will cleanup when called back by CM.
*/
if (status != IBT_SUCCESS) {
SRPT_DPRINTF_L2("ch_disconnect, close RC channel"
" err(%d)", status);
}
}
/*
* srpt_ch_cleanup()
*/
void
{
SRPT_DPRINTF_L3("ch_cleanup, invoked for ch(%p), state(%d)",
/* add a ref for the channel until we're done */
/*
* Make certain the channel is in the target ports list of
* known channels and remove it (releasing the target
* ports reference to the channel).
*/
break;
}
}
SRPT_DPRINTF_L2("ch_cleanup, target channel no"
"longer known to target");
srpt_ch_release_ref(ch, 0);
return;
}
/*
* Don't accept any further incoming requests, and clean
* up the receive queue. The send queue is left alone
* so tasks can finish and clean up (whether normally
* or via abort).
*/
if (ch->ch_rcq_hdl) {
IBT_SUCCESS) {
SRPT_DPRINTF_L4("ch_cleanup, recovering"
" outstanding RX iu(%p)", (void *)iu);
/*
* Channel reference has not yet been added for this
* IU, so do not decrement.
*/
}
}
/*
* Go through the list of outstanding IU for the channel's SCSI
* session and for each either abort or complete an abort.
*/
"ch_cleanup, NULL stmf task");
ASSERT(0);
}
} else {
}
SRPT_DPRINTF_L4("ch_cleanup, aborting "
"task(%p)", (void *)iutask);
STMF_ABORTED, NULL);
}
}
}
srpt_ch_release_ref(ch, 0);
}
/*
* srpt_ch_rsp_comp()
*
* Process a completion for an IB SEND message. A SEND completion
* is for a SRP response packet sent back to the initiator. It
* will not have a STMF SCSI task associated with it if it was
* sent for a rejected IU, or was a task management abort response.
*/
static void
{
/*
* Process the completion regardless whether it's a failure or
* success. At this point, we've processed as far as we can and
* just need to complete the associated task.
*/
if (wc_status != IBT_SUCCESS) {
SRPT_DPRINTF_L2("ch_rsp_comp, WC status err(%d)",
st = STMF_FAILURE;
if (wc_status != IBT_WC_WR_FLUSHED_ERR) {
}
}
/*
* If the IU response completion is not associated with
* with a SCSI task, release the IU to return the resource
* and the reference to the channel it holds.
*/
srpt_ch_release_ref(ch, 0);
return;
}
/*
* We should not get a SEND completion where the task has already
* completed aborting and STMF has been informed.
*/
/*
* Let STMF know we are done.
*/
}
/*
* srpt_ch_data_comp()
*
* Process an IB completion for a RDMA operation. This completion
* should be associated with the last RDMA operation for any
* data buffer transfer.
*/
static void
{
/*
* If work completion indicates non-flush failure, then
* start a channel disconnect (asynchronous) and release
* the reference to the IU. The task will be cleaned
* up with STMF during channel shutdown processing.
*/
if (wc_status != IBT_SUCCESS) {
SRPT_DPRINTF_L2("ch_data_comp, WC status err(%d)",
if (wc_status != IBT_WC_WR_FLUSHED_ERR) {
}
return;
}
/*
* If STMF has requested this task be aborted, then if this is the
* last I/O operation outstanding, notify STMF the task has been
* aborted and ignore the completion.
*/
STMF_ABORTED, NULL);
return;
}
/*
* We should not get an RDMA completion where the task has already
* completed aborting and STMF has been informed.
*/
/*
* Good completion for last RDMA op associated with a data buffer
* I/O, if specified initiate status otherwise let STMF know we are
* done.
*/
if (status == STMF_SUCCESS) {
return;
}
}
}
/*
* srpt_ch_scq_hdlr()
*/
static void
{
int i;
/* Reference channel for the duration of this call */
for (;;) {
&entries);
if (status != IBT_SUCCESS) {
if (status != IBT_CQ_EMPTY) {
/*
* This error should not happen. It indicates
* something abnormal has gone wrong and means
* either a hardware or programming logic error.
*/
"ch_scq_hdlr, unexpected CQ err(%d)",
status);
}
/*
* If we have not rearmed the CQ do so now and poll to
* eliminate race; otherwise we are done.
*/
if (cq_rearmed == 0) {
cq_rearmed = 1;
continue;
} else {
break;
}
}
/*
* A zero work ID indicates this CQE is associated
* with an intermediate post of a RDMA data transfer
* operation. Since intermediate data requests are
* unsignaled, we should only get these if there was
* an error. No action is required.
*/
continue;
}
case SRPT_SWQE_TYPE_RESP:
break;
case SRPT_SWQE_TYPE_DATA:
break;
default:
SRPT_DPRINTF_L2("ch_scq_hdlr, bad type(%d)",
ASSERT(0);
}
}
}
srpt_ch_release_ref(ch, 0);
}
/*
* srpt_ch_rcq_hdlr()
*/
static void
{
int i;
/*
* The channel object will exists while the CQ handler call-back
* is installed.
*/
/*
* If we know a channel disconnect has started do nothing
* and let channel cleanup code recover resources from the CQ.
* We are not concerned about races with the state transition
* since the code will do the correct thing either way. This
* is simply to circumvent rearming the CQ, and it will
* catch the state next time.
*/
SRPT_DPRINTF_L2("ch_rcq_hdlr, channel disconnecting");
srpt_ch_release_ref(ch, 0);
return;
}
for (;;) {
&entries);
if (status != IBT_SUCCESS) {
if (status != IBT_CQ_EMPTY) {
/*
* This error should not happen. It indicates
* something abnormal has gone wrong and means
* either a hardware or programming logic error.
*/
"ch_rcq_hdlr, unexpected CQ err(%d)",
status);
break;
}
/*
* If we have not rearmed the CQ do so now and poll to
* eliminate race; otherwise we are done.
*/
if (cq_rearmed == 0) {
cq_rearmed = 1;
continue;
} else {
break;
}
}
/*
* Check wc_status before proceeding. If the
* status indicates a channel problem, stop processing.
*/
"ch_rcq, unexpected"
" wc_status err(%d)",
goto done;
} else {
/* skip IUs with errors */
"ch_rcq, ERROR comp(%d)",
/* XXX - verify not leaking IUs */
continue;
}
}
/*
* Process the IU.
*/
}
}
done:
srpt_ch_release_ref(ch, 0);
}
/*
* srpt_ch_srp_cmd()
*/
static int
{
int i;
/*
* The SRP specification and SAM require support for bi-directional
* data transfer, so we create a single buffer descriptor list that
* in the IU buffer that covers the data-in and data-out buffers.
* In practice we will just see unidirectional transfers with either
* data-in or data out descriptors. If we were to take that as fact,
* we could reduce overhead slightly.
*/
/*
* additional length is a 6-bit number in 4-byte words, so multiply by 4
* to get bytes.
*/
iu->iu_num_rdescs = 0;
/*
* Examine buffer description for Data In (i.e. data flows
* to the initiator).
*/
if (di_fmt == SRP_DATA_DESC_DIRECT) {
} else if (di_fmt == SRP_DATA_DESC_INDIRECT) {
sizeof (srp_direct_desc_t);
/*
* Some initiators like OFED occasionally use the wrong counts,
* so check total to allow for this. NOTE: we do not support
* reading of the descriptor table from the initiator, so if
* not all descriptors are in the IU we drop the task.
*/
SRPT_DPRINTF_L2("ch_srp_cmd, remote RDMA of"
" descriptors not supported");
SRPT_DPRINTF_L2("ch_srp_cmd, sizeof entry (%d),"
" i_di_cnt(%d), cr_dicnt(%d)",
(uint_t)sizeof (srp_direct_desc_t),
return (1);
}
sizeof (srp_direct_desc_t) * i_di_cnt);
}
/*
* Examine buffer description for Data Out (i.e. data flows
* from the initiator).
*/
if (do_fmt == SRP_DATA_DESC_DIRECT) {
if (di_fmt == SRP_DATA_DESC_DIRECT) {
sizeof (srp_direct_desc_t));
}
iu->iu_num_rdescs++;
} else if (do_fmt == SRP_DATA_DESC_INDIRECT) {
sizeof (srp_direct_desc_t);
/*
* Some initiators like OFED occasionally use the wrong counts,
* so check total to allow for this. NOTE: we do not support
* reading of the descriptor table from the initiator, so if
* not all descriptors are in the IU we drop the task.
*/
SRPT_DPRINTF_L2("ch_srp_cmd, remote RDMA of"
" descriptors not supported");
SRPT_DPRINTF_L2("ch_srp_cmd, sizeof entry (%d),"
" i_do_cnt(%d), cr_docnt(%d)",
(uint_t)sizeof (srp_direct_desc_t),
return (1);
}
sizeof (srp_direct_desc_t) * i_do_cnt);
}
iu->iu_tot_xfer_len = 0;
for (i = 0; i < iu->iu_num_rdescs; i++) {
}
#ifdef DEBUG
if (srpt_errlevel >= SRPT_LOG_L4) {
SRPT_DPRINTF_L4("ch_srp_cmd, iu->iu_tot_xfer_len (%d)",
for (i = 0; i < iu->iu_num_rdescs; i++) {
SRPT_DPRINTF_L4("ch_srp_cmd, rdescs[%d].dd_vaddr"
" (0x%08llx)",
SRPT_DPRINTF_L4("ch_srp_cmd, rdescs[%d].dd_hdl"
SRPT_DPRINTF_L4("ch_srp_cmd, rdescs[%d].dd_len (%d)",
}
SRPT_DPRINTF_L4("ch_srp_cmd, LUN (0x%08lx)",
}
#endif
/*
* The channel has begun disconnecting, so ignore the
* the command returning the IU resources.
*/
return (1);
}
/*
* Once a SCSI task is allocated and assigned to the IU, it
* owns those IU resources, which will be held until STMF
* is notified the task is done (from a lport perspective).
*/
/*
* Could not allocate, return status to the initiator
* indicating that we are temporarily unable to process
* commands. If unable to send, immediately return IU
* resource.
*/
SRPT_DPRINTF_L2("ch_srp_cmd, SCSI task allocation failure");
if (status != IBT_SUCCESS) {
SRPT_DPRINTF_L2("ch_srp_cmd, error(%d) posting error"
" response", status);
return (1);
} else {
return (0);
}
}
if (di_fmt != 0) {
}
if (do_fmt != 0) {
}
switch (cmd->cr_task_attr) {
break;
break;
break;
break;
default:
SRPT_DPRINTF_L2("ch_srp_cmd, reserved task attr (%d)",
cmd->cr_task_attr);
break;
}
if (addlen != 0) {
addlen * 4);
}
/*
* remain in the session's list until STMF is informed by SRP that
* it is done with the task.
*/
SRPT_DPRINTF_L3("ch_srp_cmd, new task (%p) posted",
(void *)iu->iu_stmf_task);
return (0);
}
/*
* srpt_ch_task_mgmt_abort()
*
* Returns 0 on success, indicating we've sent a management response.
* Returns !0 to indicate failure; the IU should be reposted.
*/
static ibt_status_t
{
/*
* Locate the associated task (tag_to_abort) in the
* session's active task list.
*/
break;
}
}
/*
* Take appropriate action based on state of task
* to be aborted:
* 1) No longer exists - do nothing.
* 2) Previously aborted or status queued - do nothing.
* 3) Otherwise - initiate abort.
*/
goto send_mgmt_resp;
}
SRPT_IU_ABORTED | SRPT_IU_RESP_SENT)) != 0) {
goto send_mgmt_resp;
}
/*
* Set aborting flag and notify STMF of abort request. No
* additional I/O will be queued for this IU.
*/
SRPT_DPRINTF_L3("ch_task_mgmt_abort, task found");
if (status != IBT_SUCCESS) {
SRPT_DPRINTF_L2("ch_task_mgmt_abort, err(%d)"
" posting abort response", status);
}
return (status);
}
/*
* srpt_ch_srp_task_mgmt()
*/
static int
{
SRPT_DPRINTF_L3("ch_srp_task_mgmt, SRP TASK MGMT func(%d)",
tsk->tm_function);
/*
* Both tag and lun fileds have the same corresponding offsets
* in both srp_tsk_mgmt_t and srp_cmd_req_t structures. The
* casting will allow us to use the same dtrace translator.
*/
/*
* Task management aborts are processed directly by the SRP driver;
* all other task management requests are handed off to STMF.
*/
switch (tsk->tm_function) {
case SRP_TSK_MGMT_ABORT_TASK:
/*
* Initiate SCSI transport protocol specific task abort
* logic.
*/
if (status != IBT_SUCCESS) {
/* repost this IU */
return (1);
} else {
return (0);
}
break;
break;
case SRP_TSK_MGMT_LUN_RESET:
break;
case SRP_TSK_MGMT_CLEAR_ACA:
break;
default:
/*
* SRP does not support the requested task management
* function; return a not supported status in the response.
*/
SRPT_DPRINTF_L2("ch_srp_task_mgmt, SRP task mgmt fn(%d)"
if (status != IBT_SUCCESS) {
SRPT_DPRINTF_L2("ch_srp_task_mgmt, err(%d) posting"
" response", status);
return (1);
}
return (0);
}
/*
* The channel has begun disconnecting, so ignore the
* the command returning the IU resources.
*/
return (1);
}
/*
* Once a SCSI mgmt task is allocated and assigned to the IU, it
* owns those IU resources, which will be held until we inform
* STMF that we are done with the task (from an lports perspective).
*/
/*
* Could not allocate, return status to the initiator
* indicating that we are temporarily unable to process
* commands. If unable to send, immediately return IU
* resource.
*/
SRPT_DPRINTF_L2("ch_srp_task_mgmt, SCSI task allocation"
" failure");
if (status != IBT_SUCCESS) {
SRPT_DPRINTF_L2("ch_srp_task_mgmt, err(%d) posting"
"busy response", status);
/* repost the IU */
return (1);
}
return (0);
}
/*
* remain in the session's list until STMF is informed by SRP that
* it is done with the task.
*/
SRPT_DPRINTF_L3("ch_srp_task_mgmt, new mgmt task(%p) posted",
(void *)iu->iu_stmf_task);
return (0);
}
/*
* srpt_ch_process_iu()
*/
static void
{
/*
* IU adds reference to channel which will represent a
* a reference by STMF. If for whatever reason the IU
* is not handed off to STMF, then this reference will be
* released. Otherwise, the reference will be released when
* SRP informs STMF that the associated SCSI task is done.
*/
/*
* Validate login RC channel state. Normally active, if
* not active then we need to handle a possible race between the
* receipt of a implied RTU and CM calling back to notify of the
* state transition.
*/
goto repost_iu;
}
case SRP_IU_CMD:
break;
case SRP_IU_TASK_MGMT:
return;
case SRP_IU_I_LOGOUT:
SRPT_DPRINTF_L3("ch_process_iu, SRP INITIATOR LOGOUT");
/*
* Initiators should logout by issuing a CM disconnect
* request (DREQ) with the logout IU in the private data;
* however some initiators have been known to send the
* IU in-band, if this happens just initiate the logout.
* Note that we do not return a response as per the
* specification.
*/
break;
case SRP_IU_AER_RSP:
case SRP_IU_CRED_RSP:
default:
/*
* We don't send asynchronous events or ask for credit
* adjustments, so nothing need be done. Log we got an
* unexpected IU but then just repost the IU to the SRQ.
*/
SRPT_DPRINTF_L2("ch_process_iu, invalid IU from initiator,"
break;
}
if (status == 0) {
return;
}
srpt_ch_release_ref(ch, 0);
}
/*
* srpt_ch_post_send
*/
{
SRPT_DPRINTF_L2("ch_post_send, bad ch state (%d)",
return (IBT_FAILURE);
}
(void *)iu);
SRPT_DPRINTF_L2("ch_post_send, queue full");
return (IBT_FAILURE);
}
if (fence == SRPT_FENCE_SEND) {
}
SRPT_DPRINTF_L4("ch_post_send, posting SRP response to channel"
" ds.ds_va (0x%16llx), ds.ds_key (0x%08x), "
" ds.ds_len (%d)",
if (status != IBT_SUCCESS) {
SRPT_DPRINTF_L2("ch_post_send, post_send failed (%d)",
status);
return (status);
}
return (IBT_SUCCESS);
}
/*
* srpt_ch_alloc_swqe_wrid()
*/
{
return ((ibt_wrid_t)0);
}
ch->ch_swqe_posted++;
return (wrid);
}
/*
* srpt_ch_free_swqe_wrid()
*/
void
{
ch->ch_swqe_posted--;
}