/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* This file implements the Work Queue Entry (WQE) management in IBMF.
*/
extern int ibmf_trace_level;
extern int ibmf_send_wqes_per_port, ibmf_recv_wqes_per_port;
}
}
int kmflags);
int kmflags);
/*
* ibmf_send_wqe_cache_constructor():
* Constructor for the kmem cache used for send WQEs for special QPs
*/
/* ARGSUSED */
static int
{
"ibmf_send_wqe_cache_constructor() enter, buf = %p, cdarg = %p\n",
/* initialize send WQE context */
"Failed vmem allocation in send WQE cache constructor");
"ibmf_send_wqe_cache_constructor() exit\n");
return (-1);
}
/* Look for the WQE management struct that includes this address */
wqe_mgt->wqes_kmem_sz))) {
break;
}
}
"Address not found in WQE mgt list");
"ibmf_send_wqe_cache_constructor() exit\n");
return (-1);
}
send_wqe->send_wqe_flags = 0;
"ibmf_send_wqe_cache_constructor() exit\n");
return (0);
}
/*
* ibmf_send_wqe_cache_destructor():
* Destructor for send WQE kmem cache for special QPs
*/
/* ARGSUSED */
static void
{
"ibmf_send_wqe_cache_destructor() enter, buf = %p, cdarg = %p\n",
/* Free the vmem allocated for the WQE */
"ibmf_send_wqe_cache_destructor() exit\n");
}
/*
* ibmf_recv_wqe_cache_constructor():
* Constructor for receive WQE kmem cache for special QPs
*/
/* ARGSUSED */
static int
{
"ibmf_recv_wqe_cache_constructor() enter, buf = %p, cdarg = %p\n",
/* initialize recv WQE context */
"Failed vmem allocation in receive WQE cache constructor");
"ibmf_recv_wqe_cache_constructor() exit\n");
return (-1);
}
/* Look for the WQE management struct that includes this address */
wqe_mgt->wqes_kmem_sz))) {
break;
}
}
"Address not found in WQE mgt list");
"ibmf_recv_wqe_cache_constructor() exit\n");
return (-1);
}
recv_wqe->recv_wqe_flags = 0;
"ibmf_recv_wqe_cache_constructor() exit\n");
return (0);
}
/*
* ibmf_recv_wqe_cache_destructor():
* Destructor for receive WQE kmem cache for special QPs
*/
/* ARGSUSED */
static void
{
"ibmf_recv_wqe_cache_destructor() enter, buf = %p, cdarg = %p\n",
/* Free the vmem allocated for the WQE */
"ibmf_recv_wqe_cache_destructor() exit\n");
}
/*
* ibmf_altqp_send_wqe_cache_constructor():
* Constructor for the kmem cache used for send WQEs for alternate QPs
*/
/* ARGSUSED */
int
{
"ibmf_altqp_send_wqe_cache_constructor() enter, buf = %p, "
/* initialize send WQE context */
"", "ibmf_altqp_send_wqe_cache_constructor(): %s\n",
"alternate QP send WQE cache constructor");
"", "ibmf_altqp_send_wqe_cache_constructor() exit\n");
return (-1);
}
/* Look for the WQE management struct that includes this address */
wqe_mgt->wqes_kmem_sz))) {
break;
}
}
"", "ibmf_altqp_send_wqe_cache_constructor(): %s\n",
IBMF_TNF_TRACE, "",
"ibmf_altqp_send_wqe_cache_constructor() exit\n");
return (-1);
}
send_wqe->send_wqe_flags = 0;
"ibmf_altqp_send_wqe_cache_constructor() exit\n");
return (0);
}
/*
* ibmf_altqp_send_wqe_cache_destructor():
* Destructor for send WQE kmem cache for alternate QPs
*/
/* ARGSUSED */
void
{
"ibmf_altqp_send_wqe_cache_destructor() enter, buf = %p, "
/* Free the vmem allocated for the WQE */
"ibmf_altqp_send_wqe_cache_destructor() exit\n");
}
/*
* ibmf_altqp_recv_wqe_cache_constructor():
* Constructor for receive WQE kmem cache for alternate QPs
*/
/* ARGSUSED */
int
{
"ibmf_altqp_recv_wqe_cache_constructor() enter, buf = %p, "
/* initialize recv WQE context */
"", "ibmf_altqp_recv_wqe_cache_constructor(): %s\n",
"Failed vmem allocation in recv WQE cache constructor");
"", "ibmf_altqp_recv_wqe_cache_constructor() exit\n");
return (-1);
}
/* Look for the WQE management struct that includes this address */
wqe_mgt->wqes_kmem_sz))) {
break;
}
}
"ibmf_altqp_recv_wqe_cache_constructor(): %s\n",
"ibmf_recv_wqe_cache_constructor() exit\n");
return (-1);
}
recv_wqe->recv_wqe_flags = 0;
"ibmf_altqp_recv_wqe_cache_constructor() exit\n");
return (0);
}
/*
* ibmf_altqp_recv_wqe_cache_destructor():
* Destructor for receive WQE kmem cache for alternate QPs
*/
/* ARGSUSED */
void
{
"ibmf_altqp_recv_wqe_cache_destructor() enter, buf = %p, "
/* Free the vmem allocated for the WQE */
"ibmf_altqp_recv_wqe_cache_destructor() exit\n");
}
/*
* ibmf_i_init_wqes():
* Create the kmem cache for send and receive WQEs
*/
int
{
/*
* Allocate memory for the WQE management structure
*/
/*
* Allocate memory for the WQEs to be used by the special QPs on this CI
* There are two special QPs per CI port
*/
/* Register the allocated memory */
if (status != IBT_SUCCESS) {
"ibmf_i_init_wqes() exit\n");
return (IBMF_NO_RESOURCES);
}
/* Store the memory registration information */
/* Create a vmem arena for the IB virtual address space */
/* create a kmem cache for the send WQEs */
sizeof (ibmf_send_wqe_t), 0, ibmf_send_wqe_cache_constructor,
/* create a kmem cache for the receive WQEs */
sizeof (ibmf_recv_wqe_t), 0, ibmf_recv_wqe_cache_constructor,
return (IBMF_SUCCESS);
}
/*
* ibmf_i_fini_wqes():
* Destroy the kmem cache for send and receive WQEs
*/
void
{
void *wqe_kmem;
/* Remove the WQE mgt struct from the list */
/* Deregister the memory allocated for the WQEs */
/* Free the kmem allocated for the WQEs */
/* Destroy the mutex */
/* Free the WQE management structure */
}
/* Destroy the kmem_cache for the send WQE */
/* Destroy the kmem_cache for the receive WQE */
/*
* Destroy the vmem arena for the WQEs
* This must be done after the kmem_cache_destroy() calls since
* the cache destructors call vmem_free()
*/
}
/*
* ibmf_i_init_altqp_wqes():
* Create the kmem cache for send and receive WQEs used by alternate QPs
*/
int
{
/*
* Allocate memory for the WQE management structure
*/
/*
* Allocate memory for all the WQEs to be used by this alternate QP
*/
/* Register the allocated memory */
&mem_desc);
if (status != IBT_SUCCESS) {
"ibmf_i_init_altqp_wqes(): %s, status = %d\n",
"ibmf_i_init_altqp_wqes() exit\n");
return (IBMF_NO_RESOURCES);
}
/* Store the memory registration information */
/* Create a vmem arena for the IB virtual address space */
/*
* CAUTION: Do not exceed 32 characters for the kmem cache name, else,
* mdb does not exit (bug 4878751). There is some connection between
* mdb walkers and kmem_caches with the limitation likely to be in the
* mdb code.
*/
/* create a kmem cache for the send WQEs */
sizeof (ibmf_send_wqe_t), 0, ibmf_altqp_send_wqe_cache_constructor,
NULL, 0);
/* create a kmem cache for the receive WQEs */
sizeof (ibmf_recv_wqe_t), 0, ibmf_altqp_recv_wqe_cache_constructor,
NULL, 0);
return (IBMF_SUCCESS);
}
/*
* ibmf_i_fini_altqp_wqes():
* Destroy the kmem cache for send and receive WQEs for alternate QPs
*/
void
{
void *wqe_kmem;
/* Remove the WQE mgt struct from the list */
/* Deregister the memory allocated for the WQEs */
(void) ibt_deregister_mr(
/* Free the kmem allocated for the WQEs */
/* Destroy the WQE mgt struct mutex */
/* Free the WQE management structure */
}
/* Destroy the kmem_cache for the send WQE */
/* Destroy the kmem_cache for the receive WQE */
/*
* Destroy the vmem arena for the WQEs
* This must be done after the kmem_cache_destroy() calls since
* the cache destructors call vmem_free()
*/
}
/*
* ibmf_i_init_send_wqe():
* Initialize a send WQE
*/
/* ARGSUSED */
void
{
"clientp = %p, msg = %p, sglp = %p , wqep = %p, qp_hdl = %p\n",
/* use send wqe pointer as the WR ID */
} else {
}
/*
* We pick the correct msgbuf based on the nature of the transaction.
* Where the send msgbuf is available, we pick it to provide the
* context of the outgoing MAD. Note that if this is a termination
* context, then the send buffer is invalid even if the sequenced
* flags is set because the termination message only has a receive
* buffer set up.
*/
} else {
} else {
}
}
/*
* For unsolicited messages, we only have the sender's MAD at hand.
* So, we must flip the response bit in the method for the outgoing MAD.
*/
}
offset = sizeof (ib_mad_hdr_t);
rmpp_hdr = (ibmf_rmpp_hdr_t *)
IBMF_TNF_TRACE, "",
"ibmf_init_send_wqe: msgimplp = %p, rmpp_type = %d,"
" next_seg = %d, num_pkts = %d\n",
/*
* Initialize the RMPP header
*/
/* first, last packet flags set only for type DATA */
rmpp_ctx->rmpp_flags |=
else
rmpp_ctx->rmpp_flags |=
} else {
data_sz = 0;
}
IBMF_TNF_TRACE, "",
"ibmf_init_send_wqe: msgimplp = %p, rmpp_type = %d,"
" rmpp_flags = 0x%x, rmpp_segnum = %d, pyld_nwl = %d\n",
}
/* determine offset to start class header */
&cl_hdr_sz, &cl_hdr_off);
offset += cl_hdr_off;
}
data_sz);
}
/*
* ibmf_i_init_recv_wqe():
* Initialize a receive WQE
*/
void
{
"qpp = %p, sglp = %p , wqep = %p, ud_dest = %p, qp_hdl = %p\n",
/*
* we set a bit in the WR ID to be able to easily distinguish
* between send completions and recv completions
*/
sizeof (ib_grh_t) + IBMF_MAD_SIZE);
if (ibmf_qp_handle == IBMF_QP_HANDLE_DEFAULT) {
} else {
}
}
/*
* ibmf_i_extend_wqe_cache():
* Extend the kmem WQE cache
*/
int
{
"ibmf_i_extend_wqe_cache() enter, cip = %p, qp_hdl = %p, "
/*
* Allocate memory for the WQE management structure
*/
"ibmf_i_extend_wqe_cache(): %s\n",
"ibmf_i_extend_wqe_cache() exit\n");
return (IBMF_NO_RESOURCES);
}
/* Allocate and register more WQE memory */
block) != IBMF_SUCCESS) {
"ibmf_i_extend_wqe_cache(): %s\n",
"ibmf_i_extend_wqe_cache() exit\n");
return (IBMF_NO_RESOURCES);
}
"ibmf_i_extend_wqe_cache() exit\n");
return (IBMF_SUCCESS);
}
/*
* ibmf_i_extend_wqe_mem():
* Allocate and register more WQE memory, and expand the VMEM arena
*/
static int
{
"ibmf_i_extend_wqe_cache() enter, cip = %p, qp_hdl = %p"
"wqe_mgt = %p, block = %d\n",
/*
* Allocate more memory for the WQEs to be used by the
* specified QP
*/
"ibmf_i_extend_wqe_mem(): %s\n",
"ibmf_i_extend_wqe_mem() exit\n");
return (IBMF_NO_RESOURCES);
}
/* Register the allocated memory */
if (status != IBT_SUCCESS) {
"ibmf_i_extend_wqe_mem(): %s\n",
"ibmf_i_extend_wqe_mem() exit\n");
return (IBMF_NO_RESOURCES);
}
/* Store the memory registration information */
/* Get the VMEM arena based on the QP type */
if (ibmf_qp_handle == IBMF_QP_HANDLE_DEFAULT) {
} else {
}
/* Add these addresses to the vmem arena */
"ibmf_i_extend_wqe_mem(): %s\n",
"ibmf_i_extend_wqe_mem() exit\n");
return (IBMF_NO_RESOURCES);
}
/* Get the WQE management pointers based on the QP type */
if (ibmf_qp_handle == IBMF_QP_HANDLE_DEFAULT) {
/* Add the new wqe management struct to the end of the list */
} else {
/* Add the new wqe management struct to the end of the list */
}
return (IBMF_SUCCESS);
}
/*
* ibmf_i_alloc_send_resources():
* Allocate send resources (the send WQE)
*/
int
{
"ibmf_i_alloc_send_resources() enter, cip = %p, msg = %p, "
/* Get the WQE kmem cache pointer based on the QP type */
if (ibmf_qp_handle == IBMF_QP_HANDLE_DEFAULT)
else {
}
/*
* Allocate a send WQE from the send WQE kmem cache
* Do not block here as we are holding the msgimpl mutex.
*/
/*
* Attempt to extend the cache and then retry the
* kmem_cache_alloc()
* The block argument (third) is set to B_FALSE.
*/
"ibmf_i_alloc_send_resources(): %s\n",
"ibmf_i_alloc_send_resources() exit\n");
return (IBMF_NO_RESOURCES);
} else {
/* Allocation failed again. Give up here. */
1);
IBMF_TNF_ERROR, "",
"ibmf_i_alloc_send_resources(): %s\n",
IBMF_TNF_TRACE, "",
"ibmf_i_alloc_send_resources() exit\n");
return (IBMF_NO_RESOURCES);
}
}
}
if (ibmf_qp_handle == IBMF_QP_HANDLE_DEFAULT) {
cip->ci_wqes_alloced++;
} else {
}
"ibmf_i_alloc_send_resources() exit\n");
return (IBMF_SUCCESS);
}
/*
* ibmf_i_free_send_resources():
* Free send resources (just the send WQE)
*/
/* ARGSUSED */
void
{
"ibmf_i_free_send_resources() enter, cip = %p, msg = %p, "
/* Get the WQE kmem cache pointer based on the QP type */
if (ibmf_qp_handle == IBMF_QP_HANDLE_DEFAULT)
else {
}
/* return the send WQE to the kmem cache */
if (ibmf_qp_handle == IBMF_QP_HANDLE_DEFAULT) {
cip->ci_wqes_alloced--;
if (cip->ci_wqes_alloced == 0)
} else {
if (altqp->isq_wqes_alloced == 0)
}
"ibmf_i_free_send_resources() exit\n");
}