/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
*/
/*
* Hermon Queue Pair Processing Routines
*
* Implements all the routines necessary for allocating, freeing, and
* querying the Hermon queue pairs.
*/
#include <sys/sysmacros.h>
hermon_rsrc_t *qpc);
static int hermon_qpn_avl_compare(const void *q, const void *e);
/*
* hermon_qp_alloc()
* Context: Can be called only from user or kernel context.
*/
int
{
/*
* Extract the necessary info from the hermon_qp_info_t structure
*/
/*
* Verify correctness of alloc_flags.
*
* 1. FEXCH and RSS are only allocated via qp_range.
*/
return (IBT_INVALID_PARAM);
}
qp_is_umap = 0;
/* 2. Make sure only one of these flags is set. */
switch (alloc_flags &
case IBT_QP_USER_MAP:
qp_is_umap = 1;
break;
case IBT_QP_USES_RFCI:
if (type != IBT_UD_RQP)
return (IBT_INVALID_PARAM);
case 1:
break;
case 2:
break;
default:
return (IBT_INVALID_PARAM);
}
break;
case IBT_QP_USES_FCMD:
if (type != IBT_UD_RQP)
return (IBT_INVALID_PARAM);
break;
case 0:
break;
default:
return (IBT_INVALID_PARAM); /* conflicting flags set */
}
/*
* Determine whether QP is being allocated for userland access or
* whether it is being allocated for kernel access. If the QP is
* being allocated for userland access, then lookup the UAR
* page number for the current process. Note: If this is not found
* (e.g. if the process has not previously open()'d the Hermon driver),
* then an error is returned.
*/
if (qp_is_umap) {
if (status != DDI_SUCCESS) {
return (IBT_INVALID_PARAM);
}
} else {
}
/*
* Determine whether QP is being associated with an SRQ
*/
if (qp_srq_en) {
/*
* Check for valid SRQ handle pointers
*/
goto qpalloc_fail;
}
}
/*
*/
(type != IBT_UC_RQP))) {
goto qpalloc_fail;
}
/*
* Check for valid PD handle pointer
*/
goto qpalloc_fail;
}
/*
* If on an SRQ, check to make sure the PD is the same
*/
goto qpalloc_fail;
}
/* Increment the reference count on the protection domain (PD) */
/*
* Check for valid CQ handle pointers
*
* FCMD QPs do not require a receive cq handle.
*/
goto qpalloc_fail1;
}
if ((alloc_flags & IBT_QP_USES_FCMD) == 0) {
goto qpalloc_fail1;
}
} else
/*
* Increment the reference count on the CQs. One or both of these
* could return error if we determine that the given CQ is already
*/
if (status != DDI_SUCCESS) {
goto qpalloc_fail1;
}
if (status != DDI_SUCCESS) {
goto qpalloc_fail2;
}
/*
* Allocate an QP context entry. This will be filled in with all
* the necessary parameters to define the Queue Pair. Unlike
* other Hermon hardware resources, ownership is not immediately
* given to hardware in the final step here. Instead, we must
* wait until the QP is later transitioned to the "Init" state before
* passing the QP to hardware. If we fail here, we must undo all
* the reference count (CQ and PD).
*/
if (status != DDI_SUCCESS) {
goto qpalloc_fail3;
}
/*
* Allocate the software structure for tracking the queue pair
* (i.e. the Hermon Queue Pair handle). If we fail here, we must
* undo the reference counts and the previous resource allocation.
*/
if (status != DDI_SUCCESS) {
goto qpalloc_fail4;
}
/*
* Calculate the QP number from QPC index. This routine handles
* all of the operations necessary to keep track of used, unused,
* and released QP numbers.
*/
if (type == IBT_UD_RQP) {
} else {
if (status != DDI_SUCCESS) {
goto qpalloc_fail5;
}
}
/*
* If this will be a user-mappable QP, then allocate an entry for
* the "userland resources database". This will later be added to
* the database (after all further QP operations are successful).
* If we fail here, we must undo the reference counts and the
* previous resource allocation.
*/
if (qp_is_umap) {
goto qpalloc_fail6;
}
}
/*
* Allocate the doorbell record. Hermon just needs one for the RQ,
* if the QP is not associated with an SRQ, and use uarpg (above) as
* the uar index
*/
if (!qp_srq_en) {
if (status != DDI_SUCCESS) {
goto qpalloc_fail6;
}
}
/*
* We verify that the requested number of SGL is valid (i.e.
* limits). If not, then obviously the same cleanup needs to be done.
*/
if (type == IBT_UD_RQP) {
} else {
}
goto qpalloc_fail7;
}
/*
* Determine this QP's WQE stride (for both the Send and Recv WQEs).
* This will depend on the requested number of SGLs. Note: this
* has the side-effect of also calculating the real number of SGLs
* (for the calculated WQE size).
*
* For QP's on an SRQ, we set these to 0.
*/
if (qp_srq_en) {
qp->qp_rq_log_wqesz = 0;
} else {
}
/* NOTE: currently policy in driver, later maybe IBTF interface */
qp->qp_no_prefetch = 0;
/*
* for prefetching, we need to add the number of wqes in
* the 2k area plus one to the number requested, but
* ONLY for send queue. If no_prefetch == 1 (prefetch off)
* it's exactly TWO wqes for the headroom
*/
if (qp->qp_no_prefetch)
else
/*
* hdrm wqes must be integral since both sq_wqe_size &
* HERMON_QP_OH_SIZE are power of 2
*/
/*
* Calculate the appropriate size for the work queues.
* For send queue, add in the headroom wqes to the calculation.
* Note: All Hermon QP work queues must be a power-of-2 in size. Also
* they may not be any smaller than HERMON_QP_MIN_SIZE. This step is
* to round the requested size up to the next highest power-of-2
*/
/* first, adjust to a minimum and tell the caller the change */
/*
* now, calculate the alloc size, taking into account
* the headroom for the sq
*/
/* if the total is a power of two, reduce it */
}
}
/*
* Next we verify that the rounded-up size is valid (i.e. consistent
* then obviously we have a lot of cleanup to do before returning.
*
* NOTE: the first condition deals with the (test) case of cs_sq
* being just less than 2^32. In this case, the headroom addition
* to the requested cs_sq will pass the test when it should not.
* This test no longer lets that case slip through the check.
*/
(!qp_srq_en && (log_qp_rq_size >
goto qpalloc_fail7;
}
/*
* Allocate the memory for QP work queues. Since Hermon work queues
* are not allowed to cross a 32-bit (4GB) boundary, the alignment of
* the work queue memory is very important. We used to allocate
* work queues (the combined receive and send queues) so that they
* would be aligned on their combined size. That alignment guaranteed
* that they would never cross the 4GB boundary (Hermon work queues
* are on the order of MBs at maximum). Now we are able to relax
* this alignment constraint by ensuring that the IB address assigned
* to the queue memory (as a result of the hermon_mr_register() call)
* is offset from zero.
* Previously, we had wanted to use the ddi_dma_mem_alloc() routine to
* guarantee the alignment, but when attempting to use IOMMU bypass
* mode we found that we were not allowed to specify any alignment
* that was more restrictive than the system page size.
* So we avoided this constraint by passing two alignment values,
* one for the memory allocation itself and the other for the DMA
* handle (for later bind). This used to cause more memory than
* necessary to be allocated (in order to guarantee the more
* restrictive alignment contraint). But by guaranteeing the
* zero-based IB virtual address for the queue, we are able to
* conserve this memory.
*/
/* QP on SRQ sets these to 0 */
if (qp_srq_en) {
rq_wqe_size = 0;
rq_size = 0;
} else {
}
if (qp_is_umap) {
} else {
}
if (status != DDI_SUCCESS) {
goto qpalloc_fail7;
}
/*
* Sort WQs in memory according to stride (*q_wqe_size), largest first
* If they are equal, still put the SQ first
*/
qp->qp_sq_baseaddr = 0;
qp->qp_rq_baseaddr = 0;
/* if this QP is on an SRQ, set the rq_buf to NULL */
if (qp_srq_en) {
} else {
}
} else {
}
if (qp_is_umap == 0) {
goto qpalloc_fail8;
}
if (qp_srq_en) {
} else {
goto qpalloc_fail8;
}
}
}
/*
* Register the memory for the QP work queues. The memory for the
* QP must be registered in the Hermon cMPT tables. This gives us the
* LKey to specify in the QP context later. Note: The memory for
* Hermon work queues (both Send and Recv) must be contiguous and
* registered as a single memory region. Note: If the QP memory is
* user-mappable, force DDI_DMA_CONSISTENT mapping. Also, in order to
* meet the alignment restriction, we pass the "mro_bind_override_addr"
* flag in the call to hermon_mr_register(). This guarantees that the
* resulting IB vaddr will be zero-based (modulo the offset into the
* first page). If we fail here, we still have the bunch of resource
* and reference count cleanup to do.
*/
if (qp_is_umap) {
} else {
/* HERMON_QUEUE_LOCATION_NORMAL */
}
&mr_op, HERMON_QP_CMPT);
if (status != DDI_SUCCESS) {
goto qpalloc_fail9;
}
/*
* Calculate the offset between the kernel virtual address space
* and the IB virtual address space. This will be used when
* posting work requests to properly initialize each WQE.
*/
/*
* Fill in all the return arguments (if necessary). This includes
* real work queue sizes (in wqes), real SGLs, and QP number
*/
/* if this QP is on an SRQ, set these to 0 */
if (qp_srq_en) {
} else {
}
}
}
/*
* Fill in the rest of the Hermon Queue Pair handle.
*/
qp->qp_is_special = 0;
0;
/* if this QP is on an SRQ, set rq_bufsz to 0 */
if (qp_srq_en) {
qp->qp_rq_bufsz = 0;
qp->qp_rq_logqsz = 0;
} else {
}
qp->qp_forward_sqd_event = 0;
qp->qp_sqd_still_draining = 0;
qp->qp_mcg_refcnt = 0;
/*
* If this QP is to be associated with an SRQ, set the SRQ handle
*/
if (qp_srq_en) {
} else {
}
/* Determine the QP service type */
if (type == IBT_RC_RQP) {
} else if (type == IBT_UD_RQP) {
if (alloc_flags & IBT_QP_USES_RFCI)
else if (alloc_flags & IBT_QP_USES_FCMD)
else
} else {
}
/*
* Initialize the RQ WQEs - unlike Arbel, no Rcv init is needed
*/
/*
* Initialize the SQ WQEs - all that needs to be done is every 64 bytes
* set the quadword to all F's - high-order bit is owner (init to one)
* and the rest for the headroom definition of prefetching
*
*/
if (qp_is_umap == 0) {
for (i = 0; i < sq_depth; i++) {
/*
* for each stride, go through and every 64 bytes
* write the init value - having set the address
* once, just keep incrementing it
*/
}
}
}
/* Zero out the QP context */
/*
* Put QP handle in Hermon QPNum-to-QPHdl list. Then fill in the
* "qphdl" and return success
*/
/*
* If this is a user-mappable QP, then we need to insert the previously
* allocated entry into the "userland resources database". This will
* allow for later lookup during devmap() (i.e. mmap()) calls.
*/
if (qp_is_umap) {
}
return (DDI_SUCCESS);
/*
* The following is cleanup for all possible failure cases in this routine
*/
if (qp->qp_sq_wqhdr)
if (qp->qp_rq_wqhdr)
if (qp_is_umap) {
}
if (!qp_srq_en) {
}
/*
* Releasing the QPN will also free up the QPC context. Update
* the QPC context pointer to indicate this.
*/
if (qp->qp_qpn_hdl) {
} else {
}
if (qpc) {
}
return (status);
}
/*
* hermon_special_qp_alloc()
* Context: Can be called only from user or kernel context.
*/
int
{
/*
* Extract the necessary info from the hermon_qp_info_t structure
*/
/*
* Check for valid special QP type (only SMI & GSI supported)
*/
goto spec_qpalloc_fail;
}
/*
* Check for valid port number
*/
goto spec_qpalloc_fail;
}
/*
* Check for valid PD handle pointer
*/
goto spec_qpalloc_fail;
}
/* Increment the reference count on the PD */
/*
* Check for valid CQ handle pointers
*/
goto spec_qpalloc_fail1;
}
/*
* Increment the reference count on the CQs. One or both of these
* could return error if we determine that the given CQ is already
* being used with a non-special QP (i.e. a normal QP).
*/
if (status != DDI_SUCCESS) {
goto spec_qpalloc_fail1;
}
if (status != DDI_SUCCESS) {
goto spec_qpalloc_fail2;
}
/*
* Allocate the special QP resources. Essentially, this allocation
* amounts to checking if the request special QP has already been
* allocated. If successful, the QP context return is an actual
* QP context that has been "aliased" to act as a special QP of the
* appropriate type (and for the appropriate port). Just as in
* hermon_qp_alloc() above, ownership for this QP context is not
* immediately given to hardware in the final step here. Instead, we
* wait until the QP is later transitioned to the "Init" state before
* passing the QP to hardware. If we fail here, we must undo all
* the reference count (CQ and PD).
*/
if (status != DDI_SUCCESS) {
goto spec_qpalloc_fail3;
}
/*
* Allocate the software structure for tracking the special queue
* pair (i.e. the Hermon Queue Pair handle). If we fail here, we
* must undo the reference counts and the previous resource allocation.
*/
if (status != DDI_SUCCESS) {
goto spec_qpalloc_fail4;
}
/*
* Actual QP number is a combination of the index of the QPC and
* the port number. This is because the special QP contexts must
* be allocated two-at-a-time.
*/
/*
* Allocate the doorbell record. Hermon uses only one for the RQ so
* alloc a qp doorbell, using uarpg (above) as the uar index
*/
if (status != DDI_SUCCESS) {
goto spec_qpalloc_fail5;
}
/*
* Calculate the appropriate size for the work queues.
* Note: All Hermon QP work queues must be a power-of-2 in size. Also
* they may not be any smaller than HERMON_QP_MIN_SIZE. This step is
* to round the requested size up to the next highest power-of-2
*/
}
}
/*
* Next we verify that the rounded-up size is valid (i.e. consistent
* then obviously we have a bit of cleanup to do before returning.
*/
goto spec_qpalloc_fail5a;
}
/*
* Next we verify that the requested number of SGL is valid (i.e.
* limits). If not, then obviously the same cleanup needs to be done.
*/
goto spec_qpalloc_fail5a;
}
/*
* Determine this QP's WQE stride (for both the Send and Recv WQEs).
* This will depend on the requested number of SGLs. Note: this
* has the side-effect of also calculating the real number of SGLs
* (for the calculated WQE size).
*/
if (type == IBT_SMI_SQP) {
} else {
}
/*
* Allocate the memory for QP work queues. Since Hermon work queues
* are not allowed to cross a 32-bit (4GB) boundary, the alignment of
* the work queue memory is very important. We used to allocate
* work queues (the combined receive and send queues) so that they
* would be aligned on their combined size. That alignment guaranteed
* that they would never cross the 4GB boundary (Hermon work queues
* are on the order of MBs at maximum). Now we are able to relax
* this alignment constraint by ensuring that the IB address assigned
* to the queue memory (as a result of the hermon_mr_register() call)
* is offset from zero.
* Previously, we had wanted to use the ddi_dma_mem_alloc() routine to
* guarantee the alignment, but when attempting to use IOMMU bypass
* mode we found that we were not allowed to specify any alignment
* that was more restrictive than the system page size.
* So we avoided this constraint by passing two alignment values,
* one for the memory allocation itself and the other for the DMA
* handle (for later bind). This used to cause more memory than
* necessary to be allocated (in order to guarantee the more
* restrictive alignment contraint). But by guaranteeing the
* zero-based IB virtual address for the queue, we are able to
* conserve this memory.
*/
goto spec_qpalloc_fail5a;
}
/*
* Sort WQs in memory according to depth, stride (*q_wqe_size),
* biggest first. If equal, the Send Queue still goes first
*/
qp->qp_sq_baseaddr = 0;
qp->qp_rq_baseaddr = 0;
} else {
}
goto spec_qpalloc_fail6;
}
goto spec_qpalloc_fail6;
}
/*
* Register the memory for the special QP work queues. The memory for
* the special QP must be registered in the Hermon cMPT tables. This
* gives us the LKey to specify in the QP context later. Note: The
* memory for Hermon work queues (both Send and Recv) must be contiguous
* and registered as a single memory region. Also, in order to meet the
* alignment restriction, we pass the "mro_bind_override_addr" flag in
* the call to hermon_mr_register(). This guarantees that the resulting
* IB vaddr will be zero-based (modulo the offset into the first page).
* If we fail here, we have a bunch of resource and reference count
* cleanup to do.
*/
if (status != DDI_SUCCESS) {
goto spec_qpalloc_fail6;
}
/*
* Calculate the offset between the kernel virtual address space
* and the IB virtual address space. This will be used when
* posting work requests to properly initialize each WQE.
*/
/* set the prefetch - initially, not prefetching */
if (qp->qp_no_prefetch)
else
/*
* hdrm wqes must be integral since both sq_wqe_size &
* HERMON_QP_OH_SIZE are power of 2
*/
/*
* Fill in all the return arguments (if necessary). This includes
* real work queue sizes, real SGLs, and QP number (which will be
* either zero or one, depending on the special QP type)
*/
}
/*
* Fill in the rest of the Hermon Queue Pair handle. We can update
* the following fields for use in further operations on the QP.
*/
qp->qp_pkeyindx = 0;
qp->qp_forward_sqd_event = 0;
qp->qp_sqd_still_draining = 0;
qp->qp_mcg_refcnt = 0;
/* All special QPs are UD QP service type */
/*
* Initialize the RQ WQEs - unlike Arbel, no Rcv init is needed
*/
/*
* Initialize the SQ WQEs - all that needs to be done is every 64 bytes
* set the quadword to all F's - high-order bit is owner (init to one)
* and the rest for the headroom definition of prefetching
*
*/
for (i = 0; i < sq_depth; i++) {
/*
* for each stride, go through and every 64 bytes write the
* init value - having set the address once, just keep
* incrementing it
*/
}
}
/* Zero out the QP context */
/*
* Put QP handle in Hermon QPNum-to-QPHdl list. Then fill in the
* "qphdl" and return success
*/
return (DDI_SUCCESS);
/*
* The following is cleanup for all possible failure cases in this routine
*/
if (qp->qp_sq_wqhdr)
if (qp->qp_rq_wqhdr)
}
return (status);
}
/*
* hermon_qp_alloc_range()
* Context: Can be called only from user or kernel context.
*/
int
{
/*
* Extract the necessary info from the hermon_qp_info_t structure
*/
return (IBT_INSUFF_RESOURCE);
return (IBT_INSUFF_RESOURCE);
case 1:
break;
case 2:
break;
default:
return (IBT_INVALID_PARAM);
}
} else
return (IBT_INVALID_PARAM);
/*
* Determine whether QP is being allocated for userland access or
* whether it is being allocated for kernel access. If the QP is
* being allocated for userland access, fail (too complex for now).
*/
return (IBT_NOT_SUPPORTED);
} else {
}
/*
* Determine whether QP is being associated with an SRQ
*/
if (qp_srq_en) {
/*
* Check for valid SRQ handle pointers
*/
return (IBT_SRQ_HDL_INVALID);
}
}
/*
* Check for valid QP service type (only UD supported)
*/
if (type != IBT_UD_RQP) {
return (IBT_QP_SRV_TYPE_INVALID);
}
/*
* Check for valid PD handle pointer
*/
return (IBT_PD_HDL_INVALID);
}
/*
* If on an SRQ, check to make sure the PD is the same
*/
return (IBT_PD_HDL_INVALID);
}
/* set loop variable here, for freeing resources on error */
ii = 0;
/*
* Allocate 2^log2 contiguous/aligned QP context entries. This will
* be filled in with all the necessary parameters to define the
* Queue Pairs. Unlike other Hermon hardware resources, ownership
* is not immediately given to hardware in the final step here.
* Instead, we must wait until the QP is later transitioned to the
* "Init" state before passing the QP to hardware. If we fail here,
* we must undo all the reference count (CQ and PD).
*/
&qpc);
if (status != DDI_SUCCESS) {
return (IBT_INSUFF_RESOURCE);
}
/*
* Need to init the MKEYs for the FEXCH QPs.
*
* For FEXCH QP subranges, we return the QPN base as
* "relative" to the full FEXCH QP range for the port.
*/
else
if (qp_range_p == NULL) {
goto qpalloc_fail0;
}
/* Increment the reference count on the protection domain (PD) */
/* if no send completions, just use rq_cq */
} else {
goto qpalloc_fail1;
}
}
/*
* Increment the reference count on the CQs. One or both of these
* could return error if we determine that the given CQ is already
*/
if (status != DDI_SUCCESS) {
goto qpalloc_fail1;
}
if (status != DDI_SUCCESS) {
goto qpalloc_fail2;
}
/*
* Allocate the software structure for tracking the queue pair
* (i.e. the Hermon Queue Pair handle). If we fail here, we must
* undo the reference counts and the previous resource allocation.
*/
if (status != DDI_SUCCESS) {
goto qpalloc_fail4;
}
/*
* Calculate the QP number from QPC index. This routine handles
* all of the operations necessary to keep track of used, unused,
* and released QP numbers.
*/
/*
* Allocate the doorbell record. Hermon just needs one for the RQ,
* if the QP is not associated with an SRQ, and use uarpg (above) as
* the uar index
*/
if (!qp_srq_en) {
if (status != DDI_SUCCESS) {
goto qpalloc_fail6;
}
}
/*
* We verify that the requested number of SGL is valid (i.e.
* limits). If not, then obviously the same cleanup needs to be done.
*/
goto qpalloc_fail7;
}
/*
* Determine this QP's WQE stride (for both the Send and Recv WQEs).
* This will depend on the requested number of SGLs. Note: this
* has the side-effect of also calculating the real number of SGLs
* (for the calculated WQE size).
*
* For QP's on an SRQ, we set these to 0.
*/
if (qp_srq_en) {
qp->qp_rq_log_wqesz = 0;
} else {
}
/* NOTE: currently policy in driver, later maybe IBTF interface */
qp->qp_no_prefetch = 0;
/*
* for prefetching, we need to add the number of wqes in
* the 2k area plus one to the number requested, but
* ONLY for send queue. If no_prefetch == 1 (prefetch off)
* it's exactly TWO wqes for the headroom
*/
if (qp->qp_no_prefetch)
else
/*
* hdrm wqes must be integral since both sq_wqe_size &
* HERMON_QP_OH_SIZE are power of 2
*/
/*
* Calculate the appropriate size for the work queues.
* For send queue, add in the headroom wqes to the calculation.
* Note: All Hermon QP work queues must be a power-of-2 in size. Also
* they may not be any smaller than HERMON_QP_MIN_SIZE. This step is
* to round the requested size up to the next highest power-of-2
*/
/* first, adjust to a minimum and tell the caller the change */
/*
* now, calculate the alloc size, taking into account
* the headroom for the sq
*/
/* if the total is a power of two, reduce it */
}
}
/*
* Next we verify that the rounded-up size is valid (i.e. consistent
* then obviously we have a lot of cleanup to do before returning.
*
* NOTE: the first condition deals with the (test) case of cs_sq
* being just less than 2^32. In this case, the headroom addition
* to the requested cs_sq will pass the test when it should not.
* This test no longer lets that case slip through the check.
*/
(!qp_srq_en && (log_qp_rq_size >
goto qpalloc_fail7;
}
/*
* Allocate the memory for QP work queues. Since Hermon work queues
* are not allowed to cross a 32-bit (4GB) boundary, the alignment of
* the work queue memory is very important. We used to allocate
* work queues (the combined receive and send queues) so that they
* would be aligned on their combined size. That alignment guaranteed
* that they would never cross the 4GB boundary (Hermon work queues
* are on the order of MBs at maximum). Now we are able to relax
* this alignment constraint by ensuring that the IB address assigned
* to the queue memory (as a result of the hermon_mr_register() call)
* is offset from zero.
* Previously, we had wanted to use the ddi_dma_mem_alloc() routine to
* guarantee the alignment, but when attempting to use IOMMU bypass
* mode we found that we were not allowed to specify any alignment
* that was more restrictive than the system page size.
* So we avoided this constraint by passing two alignment values,
* one for the memory allocation itself and the other for the DMA
* handle (for later bind). This used to cause more memory than
* necessary to be allocated (in order to guarantee the more
* restrictive alignment contraint). But by guaranteeing the
* zero-based IB virtual address for the queue, we are able to
* conserve this memory.
*/
/* QP on SRQ sets these to 0 */
if (qp_srq_en) {
rq_wqe_size = 0;
rq_size = 0;
} else {
}
if (status != DDI_SUCCESS) {
goto qpalloc_fail7;
}
/*
* Sort WQs in memory according to stride (*q_wqe_size), largest first
* If they are equal, still put the SQ first
*/
qp->qp_sq_baseaddr = 0;
qp->qp_rq_baseaddr = 0;
/* if this QP is on an SRQ, set the rq_buf to NULL */
if (qp_srq_en) {
} else {
}
} else {
}
goto qpalloc_fail8;
}
if (qp_srq_en) {
} else {
goto qpalloc_fail8;
}
}
/*
* Register the memory for the QP work queues. The memory for the
* QP must be registered in the Hermon cMPT tables. This gives us the
* LKey to specify in the QP context later. Note: The memory for
* Hermon work queues (both Send and Recv) must be contiguous and
* registered as a single memory region. Note: If the QP memory is
* user-mappable, force DDI_DMA_CONSISTENT mapping. Also, in order to
* meet the alignment restriction, we pass the "mro_bind_override_addr"
* flag in the call to hermon_mr_register(). This guarantees that the
* resulting IB vaddr will be zero-based (modulo the offset into the
* first page). If we fail here, we still have the bunch of resource
* and reference count cleanup to do.
*/
/* HERMON_QUEUE_LOCATION_NORMAL */
&mr_op, HERMON_QP_CMPT);
if (status != DDI_SUCCESS) {
goto qpalloc_fail9;
}
/*
* Calculate the offset between the kernel virtual address space
* and the IB virtual address space. This will be used when
* posting work requests to properly initialize each WQE.
*/
/*
* Fill in all the return arguments (if necessary). This includes
* real work queue sizes (in wqes), real SGLs, and QP number
*/
/* if this QP is on an SRQ, set these to 0 */
if (qp_srq_en) {
} else {
}
}
/*
* Fill in the rest of the Hermon Queue Pair handle.
*/
qp->qp_is_special = 0;
0;
/* if this QP is on an SRQ, set rq_bufsz to 0 */
if (qp_srq_en) {
qp->qp_rq_bufsz = 0;
qp->qp_rq_logqsz = 0;
} else {
}
qp->qp_forward_sqd_event = 0;
qp->qp_sqd_still_draining = 0;
qp->qp_mcg_refcnt = 0;
/*
* If this QP is to be associated with an SRQ, set the SRQ handle
*/
if (qp_srq_en) {
} else {
}
/*
* Initialize the RQ WQEs - unlike Arbel, no Rcv init is needed
*/
/*
* Initialize the SQ WQEs - all that needs to be done is every 64 bytes
* set the quadword to all F's - high-order bit is owner (init to one)
* and the rest for the headroom definition of prefetching.
*/
for (i = 0; i < sq_depth; i++) {
/*
* for each stride, go through and every 64 bytes
* write the init value - having set the address
* once, just keep incrementing it
*/
}
}
}
/* Zero out the QP context */
/*
* Put QP handle in Hermon QPNum-to-QPHdl list. Then fill in the
* "qphdl" and return success
*/
goto for_each_qp;
return (DDI_SUCCESS);
/*
* The following is cleanup for all possible failure cases in this routine
*/
if (qp->qp_sq_wqhdr)
if (qp->qp_rq_wqhdr)
if (!qp_srq_en) {
}
if (ii == 0) {
if (qp_range_p)
} else {
/* qp_range_p and qpc rsrc will be freed in hermon_qp_free */
while (--ii >= 0) {
int free_status;
if (free_status != DDI_SUCCESS)
"error status %x during free",
}
}
return (status);
}
/*
* hermon_qp_free()
* This function frees up the QP resources. Depending on the value
* of the "free_qp_flags", the QP number may not be released until
* a subsequent call to hermon_qp_release_qpn().
*
* Context: Can be called only from user or kernel context.
*/
/* ARGSUSED */
int
{
int status;
/*
* Pull all the necessary information from the Hermon Queue Pair
* handle. This is necessary here because the resource for the
* QP handle is going to be freed up as part of this operation.
*/
/*
* If the QP is part of an MCG, then we fail the qp_free
*/
if (qp->qp_mcg_refcnt != 0) {
status = ibc_get_ci_failure(0);
goto qpfree_fail;
}
/*
* If the QP is not already in "Reset" state, then transition to
* "Reset". This is necessary because software does not reclaim
* ownership of the QP context until the QP is in the "Reset" state.
* If the ownership transfer fails for any reason, then it is an
* indication that something (either in HW or SW) has gone seriously
* wrong. So we print a warning message and return.
*/
status = ibc_get_ci_failure(0);
goto qpfree_fail;
}
/*
* Do any additional handling necessary for the transition
* to the "Reset" state (e.g. update the WRID lists)
*/
status = ibc_get_ci_failure(0);
goto qpfree_fail;
}
}
/*
* If this was a user-mappable QP, then we need to remove its entry
* from the "userland resources database". If it is also currently
* mmap()'d out to a user process, then we need to call
* devmap_devmem_remap() to remap the QP memory to an invalid mapping.
* We also need to invalidate the QP tracking information for the
* user mapping.
*/
&umapdb);
if (status != DDI_SUCCESS) {
return (ibc_get_ci_failure(0));
}
if (status != DDI_SUCCESS) {
"devmap_devmem_remap()");
return (ibc_get_ci_failure(0));
}
}
}
/*
* Put NULL into the Hermon QPNum-to-QPHdl list. This will allow any
* in-progress events to detect that the QP corresponding to this
* number has been freed. Note: it does depend in whether we are
* freeing a special QP or not.
*/
} else if (qp->qp_is_special) {
} else {
}
/*
* Drop the QP lock
* At this point the lock is no longer necessary. We cannot
* protect from multiple simultaneous calls to free the same QP.
* In addition, since the QP lock is contained in the QP "software
* handle" resource, which we will free (see below), it is
* important that we have no further references to that memory.
*/
/*
* Free the QP resources
* Start by deregistering and freeing the memory for work queues.
* Next free any previously allocated context information
* (depending on QP type)
* Finally, decrement the necessary reference counts.
* If this fails for any reason, then it is an indication that
* something (either in HW or SW) has gone seriously wrong. So we
* print a warning message and return.
*/
if (status != DDI_SUCCESS) {
status = ibc_get_ci_failure(0);
goto qpfree_fail;
}
/* Free the memory for the QP */
if (qp->qp_sq_wqhdr)
if (qp->qp_rq_wqhdr)
/* Free the dbr */
if (!qp_srq_en) {
}
/*
* Free up the remainder of the QP resources. Note: we have a few
* different resources to free up depending on whether the QP is a
* special QP or not. As described above, if any of these fail for
* any reason it is an indication that something (either in HW or SW)
* has gone seriously wrong. So we print a warning message and
* return.
*/
if (qp->qp_is_special) {
/* Free up resources for the special QP */
if (status != DDI_SUCCESS) {
status = ibc_get_ci_failure(0);
goto qpfree_fail;
}
int refcnt;
if (refcnt == 0) {
}
} else {
/*
* Check the flags and determine whether to release the
* QPN or not, based on their value.
*/
if (free_qp_flags == IBC_FREE_QP_ONLY) {
} else {
}
}
/* Free the Hermon Queue Pair handle */
/* Decrement the reference counts on CQs, PD and SRQ (if needed) */
if (qp_srq_en == HERMON_QP_SRQ_ENABLED) {
}
/* Set the qphdl pointer to NULL and return success */
return (DDI_SUCCESS);
return (status);
}
/*
* hermon_qp_query()
* Context: Can be called from interrupt or base context.
*/
int
{
int status;
/*
* Grab the temporary QPC entry from QP software state
*/
/* Convert the current Hermon QP state to IBTF QP state */
case HERMON_QP_RESET:
break;
case HERMON_QP_INIT:
break;
case HERMON_QP_RTR:
break;
case HERMON_QP_RTS:
break;
case HERMON_QP_SQERR:
break;
case HERMON_QP_SQD:
if (qp->qp_sqd_still_draining) {
} else {
}
break;
case HERMON_QP_ERR:
break;
default:
return (ibc_get_ci_failure(0));
}
/* SRQ Hook. */
/*
* The following QP information is always returned, regardless of
* the current QP state. Note: Some special handling is necessary
* for calculating the QP number on special QP (QP0 and QP1).
*/
if (qp->qp_is_special) {
} else {
}
/*
* If QP is currently in the "Reset" state, then only the above are
* returned
*/
if (qp_state == IBT_STATE_RESET) {
return (DDI_SUCCESS);
}
/*
* Post QUERY_QP command to firmware
*
* We do a HERMON_NOSLEEP here because we are holding the "qp_lock".
* Since we may be in the interrupt context (or subsequently raised
* to interrupt level by priority inversion), we do not want to block
* in this routine waiting for success.
*/
if (status != HERMON_CMD_SUCCESS) {
if (status == HERMON_CMD_INVALID_STATUS) {
}
return (ibc_get_ci_failure(0));
}
/*
* Fill in the additional QP info based on the QP's transport type.
*/
/* Fill in the UD-specific info */
/* port+1 for port 1/2 */
pmr->pmd_sync_required = 0;
pmr->pmd_phys_buf_list_sz = 0;
pmr->pmd_sync_required = 0;
(heart_beat == 0)) ? IBT_FEXCH_HEART_BEAT_OK :
}
/* Fill in the RC-specific info */
/* Grab the path migration state information */
} else {
}
/* Get the common primary address path fields */
/* Fill in the additional primary address path fields */
/* Get the common alternate address path fields */
/* Fill in the additional alternate address path fields */
/* Get the RNR retry time from primary path */
/* Fill in the UC-specific info */
/* Grab the path migration state information */
} else {
}
/* Get the common primary address path fields */
/* Fill in the additional primary address path fields */
/* Get the common alternate address path fields */
/* Fill in the additional alternate address path fields */
/*
* Set the enable flags based on RDMA enable bits (by
* definition UC doesn't support Atomic or RDMA Read)
*/
} else {
return (ibc_get_ci_failure(0));
}
/*
* Under certain circumstances it is possible for the Hermon hardware
* to transition to one of the error states without software directly
* knowing about it. The QueryQP() call is the one place where we
* have an opportunity to sample and update our view of the QP state.
*/
}
}
return (DDI_SUCCESS);
}
/*
* hermon_qp_create_qpn()
* Context: Can be called from interrupt or base context.
*/
static int
{
/*
* Build a query (for the AVL tree lookup) and attempt to find
* a previously added entry that has a matching QPC index. If
* no matching entry is found, then allocate, initialize, and
* add an entry to the AVL tree.
* If a matching entry is found, then increment its QPN counter
* and reference counter.
*/
/*
* Allocate and initialize a QPN entry, then insert
* it into the AVL tree.
*/
sizeof (hermon_qpn_entry_t), KM_NOSLEEP);
return (DDI_FAILURE);
}
entry->qpn_refcnt = 0;
entry->qpn_counter = 0;
}
/*
* Make the AVL tree entry point to the QP context resource that
* it will be responsible for tracking
*/
/*
* Setup the QP handle to point to the AVL tree entry. Then
* generate the new QP number from the entry's QPN counter value
* and the hardware's QP context table index.
*/
/*
* Increment the reference counter and QPN counter. The QPN
* counter always indicates the next available number for use.
*/
entry->qpn_counter++;
entry->qpn_refcnt++;
return (DDI_SUCCESS);
}
/*
* hermon_qp_release_qpn()
* Context: Can be called only from user or kernel context.
*/
void
int flags)
{
/*
* If we are releasing the QP number here, then we decrement the
* reference count and check for zero references. If there are
* zero references, then we free the QPC context (if it hadn't
* already been freed during a HERMON_QPN_FREE_ONLY free, i.e. for
* reuse with another similar QP number) and remove the tracking
* structure from the QP number AVL tree and free the structure.
* If we are not releasing the QP number here, then, as long as we
* have not exhausted the usefulness of the QPC context (that is,
* re-used it too many times without the reference count having
* gone to zero), we free up the QPC context for use by another
* thread (which will use it to construct a different QP number
* from the same QPC table index).
*/
if (flags == HERMON_QPN_RELEASE) {
entry->qpn_refcnt--;
/*
* If the reference count is zero, then we free the QPC
* context (if it hadn't already been freed in an early
* tracking structure from the QP number AVL tree.
*/
if (entry->qpn_refcnt == 0) {
}
/*
* If the current entry has served it's useful
* purpose (i.e. been reused the maximum allowable
* number of times), then remove it from QP number
* AVL tree and free it up.
*/
}
}
} else if (flags == HERMON_QPN_FREE_ONLY) {
/*
* Even if we are not freeing the QP number, that will not
* always prevent us from releasing the QPC context. In fact,
* since the QPC context only forms part of the whole QPN,
* we want to free it up for use by other consumers. But
* if the reference count is non-zero (which it will always
* be when we are doing HERMON_QPN_FREE_ONLY) and the counter
* has reached its maximum value, then we cannot reuse the
* QPC context until the reference count eventually reaches
* zero (in HERMON_QPN_RELEASE, above).
*/
}
}
}
/*
* hermon_qpn_avl_compare()
* Context: Can be called from user or kernel context.
*/
static int
hermon_qpn_avl_compare(const void *q, const void *e)
{
entry = (hermon_qpn_entry_t *)e;
query = (hermon_qpn_entry_t *)q;
return (-1);
return (+1);
} else {
return (0);
}
}
/*
* hermon_qpn_avl_init()
* Context: Only called from attach() path context
*/
void
{
/* Initialize the lock used for QP number (QPN) AVL tree access */
/* Initialize the AVL tree for the QP number (QPN) storage */
sizeof (hermon_qpn_entry_t),
}
/*
* hermon_qpn_avl_fini()
*/
void
{
void *cookie;
/*
* Empty all entries (if necessary) and destroy the AVL tree
* that was used for QP number (QPN) tracking.
*/
}
/* Destroy the lock used for QP number (QPN) AVL tree access */
}
/*
* hermon_qphdl_from_qpnum()
* Context: Can be called from interrupt or base context.
*
* This routine is important because changing the unconstrained
* portion of the QP number is critical to the detection of a
* potential race condition in the QP event handler code (i.e. the case
* where a QP is freed and alloc'd again before an event for the
* "old" QP can be handled).
*
* While this is not a perfect solution (not sure that one exists)
* it does help to mitigate the chance that this race condition will
* cause us to deliver a "stale" event to the new QP owner. Note:
* this solution does not scale well because the number of constrained
* bits increases (and, hence, the number of unconstrained bits
* decreases) as the number of supported QPs grows. For small and
* intermediate values, it should hopefully provide sufficient
* protection.
*/
{
/* Calculate the QP table index from the qpnum */
}
/*
* hermon_special_qp_rsrc_alloc
* Context: Can be called from interrupt or base context.
*/
static int
{
int status;
if (type == IBT_SMI_SQP) {
/*
* Check here to see if the driver has been configured
* to instruct the Hermon firmware to handle all incoming
* SMP messages (i.e. messages sent to SMA). If so,
* then we will treat QP0 as if it has already been
* allocated (for internal use). Otherwise, if we allow
* the allocation to happen, it will cause unexpected
* behaviors (e.g. Hermon SMA becomes unresponsive).
*/
return (IBT_QP_IN_USE);
}
/*
* If this is the first QP0 allocation, then post
* a CONF_SPECIAL_QP firmware command
*/
if ((flags & HERMON_SPECIAL_QP0_RSRC_MASK) == 0) {
if (status != HERMON_CMD_SUCCESS) {
"command failed: %08x\n",
return (IBT_INSUFF_RESOURCE);
}
}
/*
* Now check (and, if necessary, modify) the flags to indicate
* whether the allocation was successful
*/
return (IBT_QP_IN_USE);
}
} else {
/*
* If this is the first QP1 allocation, then post
* a CONF_SPECIAL_QP firmware command
*/
if ((flags & HERMON_SPECIAL_QP1_RSRC_MASK) == 0) {
if (status != HERMON_CMD_SUCCESS) {
"command failed: %08x\n",
return (IBT_INSUFF_RESOURCE);
}
}
/*
* Now check (and, if necessary, modify) the flags to indicate
* whether the allocation was successful
*/
return (IBT_QP_IN_USE);
}
}
return (DDI_SUCCESS);
}
/*
* hermon_special_qp_rsrc_free
* Context: Can be called from interrupt or base context.
*/
static int
{
int status;
if (type == IBT_SMI_SQP) {
/*
* If this is the last QP0 free, then post a CONF_SPECIAL_QP
* NOW, If this is the last Special QP free, then post a
* CONF_SPECIAL_QP firmware command - it'll stop them all
*/
if (flags) {
if (status != HERMON_CMD_SUCCESS) {
"command failed: %08x\n",
if (status == HERMON_CMD_INVALID_STATUS) {
}
return (ibc_get_ci_failure(0));
}
}
} else {
/*
* If this is the last QP1 free, then post a CONF_SPECIAL_QP
* NOW, if this is the last special QP free, then post a
* CONF_SPECIAL_QP firmware command - it'll stop them all
*/
if (flags) {
if (status != HERMON_CMD_SUCCESS) {
"command failed: %08x\n",
if (status == HERMON_CMD_INVALID_STATUS) {
}
return (ibc_get_ci_failure(0));
}
}
}
return (DDI_SUCCESS);
}
/*
* hermon_qp_sgl_to_logwqesz()
* Context: Can be called from interrupt or base context.
*/
static void
{
switch (wq_type) {
/*
* Use requested maximum SGL to calculate max descriptor size
* (while guaranteeing that the descriptor size is a
* power-of-2 cachelines).
*/
}
/* Make sure descriptor is at least the minimum size */
/* Calculate actual number of SGL (given WQE size) */
sizeof (hermon_hw_snd_wqe_ctrl_t)) >> 4;
break;
/*
* Use requested maximum SGL to calculate max descriptor size
* (while guaranteeing that the descriptor size is a
* power-of-2 cachelines).
*/
}
/* Make sure descriptor is at least the minimum size */
/* Calculate actual number of SGL (given WQE size) */
break;
case HERMON_QP_WQ_TYPE_RECVQ:
/*
* Same as above (except for Recv WQEs)
*/
}
/* Make sure descriptor is at least the minimum size */
/* Calculate actual number of SGL (given WQE size) */
break;
/*
* Same as above (except for MLX transport WQEs). For these
* WQEs we have to account for the space consumed by the
* "inline" packet headers. (This is smaller than for QP1
* below because QP0 is not allowed to send packets with a GRH.
*/
}
/* Make sure descriptor is at least the minimum size */
/* Calculate actual number of SGL (given WQE size) */
break;
/*
* Same as above. For these WQEs we again have to account for
* the space consumed by the "inline" packet headers. (This
* is larger than for QP0 above because we have to account for
* the possibility of a GRH in each packet - and this
* introduces an alignment issue that causes us to consume
* an additional 8 bytes).
*/
}
/* Make sure descriptor is at least the minimum size */
/* Calculate actual number of SGL (given WQE size) */
break;
default:
break;
}
/* Fill in the return values */
}