dapl_tavor_ibtf_util.c revision 1ed53a3f65abecaadc1b967e341970ad0f6b2aeb
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2010 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include "dapl.h"
#include "dapl_adapter_util.h"
#include "dapl_evd_util.h"
#include "dapl_cr_util.h"
#include "dapl_lmr_util.h"
#include "dapl_rmr_util.h"
#include "dapl_cookie.h"
#include "dapl_ring_buffer_util.h"
#include "dapl_vendor.h"
#include "dapl_tavor_ibtf_impl.h"
/* Function prototypes */
/*
* The following declarations/fn are to used by the base library
* place holder for now
*/
int g_loopback_connection = 0;
/*
* dapl_ib_cq_alloc
*
* Alloc a CQ
*
* Input:
* ia_handle IA handle
* evd_ptr pointer to EVD struct
* cno_ptr pointer to CNO struct
* cqlen minimum QLen
*
* Output:
* none
*
* Returns:
* DAT_SUCCESS
* DAT_INSUFFICIENT_RESOURCES
*
*/
{
int ia_fd;
int hca_fd;
int retval;
/* cq handle is created even for non-cq type events */
/* since cq handle is where the evd fd gets stored. */
dapl_os_alloc(sizeof (struct dapls_ib_cq_handle));
"cq_alloc: evd_ptr 0x%p, cq_handle == NULL\n",
evd_ptr);
return (DAT_INSUFFICIENT_RESOURCES);
}
/* get the hca information from ia_ptr */
}
}
"cq_alloc: evd 0x%p, flags 0x%x, cookie 0x%llx, hkey 0x%llx,\n"
" cno_hkey 0x%llx, cq_size %d\n", evd_ptr,
/* The next line is only needed for backward compatibility */
/* call into driver to allocate cq */
}
"cq_alloc: created, evd 0x%p, hkey 0x%016llx\n\n", evd_ptr,
/*
* allocate a hash table for wrid management, the key is
* required only for evd which have a CQ mapped to
* it.
*/
"cq_alloc: hash_create failed\n");
sizeof (struct dapls_ib_cq_handle));
return (DAT_INSUFFICIENT_RESOURCES |
}
/* In the case of Arbel or Hermon */
if (mcq->mcq_polldbr_mapoffset != 0 ||
mcq->mcq_polldbr_maplen != 0)
if (mcq->mcq_armdbr_mapoffset != 0 ||
mcq->mcq_armdbr_maplen != 0)
(void *)0, mcq->mcq_maplen,
mcq->mcq_mapoffset);
if (retval != 0) {
"cq_alloc: EVD_FREE err:%s\n",
}
"cq_alloc: DAPL_CQ_ALLOC failed\n");
/* free the hash table we created */
sizeof (struct dapls_ib_cq_handle));
return (DAT_INSUFFICIENT_RESOURCES);
}
/*
* cq_size is the actual depth of the CQ which is 1 more
* than what ibt_alloc_cq reports. However the application
* can only use (cq_size - 1) entries.
*/
}
return (DAT_SUCCESS);
}
/*
* dapl_ib_cq_resize
*
* Resize a CQ
*
* Input:
* evd_ptr pointer to EVD struct
* cqlen new length of the cq
* Output:
* none
*
* Returns:
* DAT_SUCCESS
* DAT_INVALID_HANDLE
* DAT_INTERNAL_ERROR
* DAT_INSUFFICIENT_RESOURCES
*
*/
{
if (DAT_INSUFFICIENT_RESOURCES == dat_status) {
/* attempt to resize back to the current size */
if (DAT_SUCCESS != dat_status) {
/*
* XXX this is catastrophic need to post an event
* to the async evd
*/
return (DAT_INTERNAL_ERROR);
}
}
return (dat_status);
}
/*
* dapli_ib_cq_resize_internal
*
* An internal routine to resize a CQ.
*
* Input:
* evd_ptr pointer to EVD struct
* cqlen new length of the cq
* Output:
* none
*
* Returns:
* DAT_SUCCESS
* DAT_INVALID_HANDLE
* DAT_INSUFFICIENT_RESOURCES
*
*/
static DAT_RETURN
{
int ia_fd;
int hca_fd;
int retval;
"dapls_ib_cq_resize: evd 0x%p cq 0x%p "
"evd_hkey 0x%016llx cqlen %d\n",
/*
* Since CQs are created in powers of 2 with one non-usable slot,
* its possible that the previously allocated CQ has sufficient
* entries. If the current cq is big enough and it is mapped in
* we are done.
*/
return (DAT_SUCCESS);
}
/* unmap the CQ before resizing it */
if (hca_ptr->hermon_resize_cq == 0) {
cq_handle->cq_map_len) < 0)) {
"cq_resize: munmap(%p:0x%llx) failed(%d)\n",
return (DAT_INVALID_HANDLE);
}
/* cq_addr is unmapped and no longer valid */
}
/* The next line is only needed for backward compatibility */
"dapls_ib_cq_resize: evd 0x%p, err: %s\n",
return (DAT_INVALID_HANDLE);
} else { /* Need to retry resize with a smaller qlen */
return (DAT_INSUFFICIENT_RESOURCES);
}
}
/* In the case of Arbel or Hermon */
if (mcq->mcq_polldbr_mapoffset != 0 ||
mcq->mcq_polldbr_maplen != 0)
if (mcq->mcq_armdbr_mapoffset != 0 ||
mcq->mcq_armdbr_maplen != 0)
if (cq_addr == MAP_FAILED ||
if (hca_ptr->hermon_resize_cq == 0)
"cq_resize: mmap failed(%d)\n", errno);
/* Need to retry resize with a smaller qlen */
return (DAT_INSUFFICIENT_RESOURCES);
}
if (hca_ptr->hermon_resize_cq == 0) {
/*
* upon resize the old events are moved to the start of the CQ
* hence we need to reset the consumer index too
*/
cq_handle->cq_consindx = 0;
} else { /* Hermon */
}
return (DAT_SUCCESS);
}
/*
* dapl_ib_cq_free
*
* Free a CQ
*
* Input:
* ia_handle IA handle
* evd_ptr pointer to EVD struct
* Output:
* none
*
* Returns:
* DAT_SUCCESS
* DAT_INVALID_HANDLE
* DAT_INSUFFICIENT_RESOURCES
*
*/
{
int retval;
"dapls_ib_cq_free: evd 0x%p cq 0x%p hkey %016llx\n", evd_ptr,
/* If the cq was mmap'd unmap it before freeing it */
}
DAPL_EVD_FREE, &args);
if (retval != 0) {
"dapls_ib_cq_free: evd 0x%p, err: %s\n",
}
return (DAT_SUCCESS);
}
/*
* dapl_set_cq_notify
*
* Set up CQ completion notifications
*
* Input:
* ia_handle IA handle
* evd_ptr pointer to EVD struct
*
* Output:
* none
*
* Returns:
* DAT_SUCCESS
* DAT_INVALID_HANDLE
* DAT_INSUFFICIENT_RESOURCES
*
*/
/* ARGSUSED */
{
int retval;
"dapls_ib_cq_notify: evd 0x%p cq 0x%p\n", evd_ptr,
(void *)cq_handle);
return (retval);
}
/* ARGSUSED */
{
int retval;
"dapls_set_cqN_notify:evd %p cq %p num_events %d\n", evd_ptr,
(void *)cq_handle, num_events);
return (retval);
}
/*
* dapls_ib_cqd_create
*
* Set up CQ notification event thread
*
* Input:
* ia_handle IA handle
*
* Output:
* none
*
* Returns:
* DAT_SUCCESS
* DAT_INVALID_HANDLE
* DAT_INSUFFICIENT_RESOURCES
*
*/
/* ARGSUSED */
{
return (DAT_SUCCESS);
}
/*
* dapl_cqd_destroy
*
* Destroy CQ notification event thread
*
* Input:
* ia_handle IA handle
*
* Output:
* none
*
* Returns:
* DAT_SUCCESS
* DAT_INVALID_HANDLE
* DAT_INSUFFICIENT_RESOURCES
*
*/
{
int retval;
/* free up the dummy cq */
"dapls_ib_cqd_destroy: cq %p\n", (void *)cq_handle);
DAPL_EVD_FREE, &args);
if (retval != 0) {
"dapls_ib_cqd_destroy: EVD_FREE err:%d errno:%d\n",
}
}
return (DAT_SUCCESS);
}
/*
* dapl_ib_pd_alloc
*
* Alloc a PD
*
* Input:
* ia_handle IA handle
* PZ_ptr pointer to PZEVD struct
*
* Output:
* none
*
* Returns:
* DAT_SUCCESS
* DAT_INSUFFICIENT_RESOURCES
*
*/
{
struct dapls_ib_pd_handle *pd_p;
int retval;
"pd_alloc: ia 0x%p, pz 0x%p, cannot allocate pd\n",
return (DAT_INSUFFICIENT_RESOURCES);
}
DAPL_PD_ALLOC, &args);
if (retval != 0) {
"pd_alloc: ia 0x%p, pz 0x%p, cannot create pd, "
}
"pd_alloc: successful, ia 0x%p, pz 0x%p, hkey %016llx\n",
return (DAT_SUCCESS);
}
/*
* dapl_ib_pd_free
*
* Free a PD
*
* Input:
* ia_handle IA handle
* PZ_ptr pointer to PZ struct
*
* Output:
* none
*
* Returns:
* DAT_SUCCESS
* DAT_INSUFFICIENT_RESOURCES
*
*/
{
struct dapls_ib_pd_handle *pd_p;
int retval;
DAPL_PD_FREE, &args);
if (retval != 0) {
"pd_free: pz 0x%p, cannot free pd\n", pz);
}
return (DAT_SUCCESS);
}
/*
* dapl_ib_mr_register
*
* Register a virtual memory region
*
* Input:
* ia_handle IA handle
* lmr pointer to dapl_lmr struct
* virt_addr virtual address of beginning of mem region
* length length of memory region
*
* Output:
* none
*
* Returns:
* DAT_SUCCESS
* DAT_INSUFFICIENT_RESOURCES
*
*/
{
int ia_fd;
int retval;
"mr_register: lmr 0x%p, ia 0x%p, "
return (DAT_INSUFFICIENT_RESOURCES);
}
"mr_register: lmr 0x%p, ia 0x%p, "
return (DAT_INVALID_PARAMETER);
}
"mr_register: lmr 0x%p, pd_hkey 0x%016llx, vaddr 0x%016llx, "
/* call into driver to allocate MR resource */
if (retval != 0) {
"mr_register: lmr 0x%p, failed (%s)\n",
}
"mr_register: successful, lmr 0x%p, mr_hkey 0x%016llx, "
return (DAT_SUCCESS);
}
/*
* dapl_ib_mr_register_shared
*
* Register a shared virtual memory region
*
* Input:
* ia_handle IA handle
* lmr pointer to dapl_lmr struct
* virt_addr virtual address of beginning of mem region
* cookie shared memory identifer
* length length of memory region
*
* Output:
* none
*
* Returns:
* DAT_SUCCESS
* DAT_INSUFFICIENT_RESOURCES
*
*/
{
int ia_fd, i;
int retval;
"mr_register_shared: lmr 0x%p, ia 0x%p, "
return (DAT_INSUFFICIENT_RESOURCES);
}
"mr_register_shared: lmr 0x%p, ia 0x%p, "
return (DAT_INVALID_PARAMETER);
}
/*CONSTCOND*/
"mr_register_shared: lmr 0x%p, pd_hkey 0x%016llx, "
"vaddr 0x%016llx, len %llu, flags 0x%x\n",
"mr_register_shared: cookie \n0x");
for (i = 4; i >= 0; i--) {
}
/* call into driver to allocate MR resource */
if (retval != 0) {
"mr_register_shared: lmr 0x%p, failed (%s)\n",
}
"mr_register_shared: successful, lmr 0x%p, mr_hkey 0x%016llx, "
return (DAT_SUCCESS);
}
/*
* dapl_ib_mr_deregister
*
* Free a memory region
*
* Input:
* lmr pointer to dapl_lmr struct
*
* Output:
* none
*
* Returns:
* DAT_SUCCESS
* DAT_INSUFFICIENT_RESOURCES
*
*/
{
int retval;
"mr_deregister: lmr 0x%p, hkey 0x%016llx, lmr_ctx 0x%08x\n"
" vaddr 0x%016llx, len %llu, flags 0x%x\n",
/* call into driver to do MR deregister */
if (retval != 0) {
"mr_deregister: lmr 0x%p, failed (%s)\n",
}
"mr_deregister: successful\n\n");
return (DAT_SUCCESS);
}
/*
* dapl_ib_mr_register_lmr
*
* Register a memory region based on attributes of an existing one
*
* Input:
* ia_handle IA handle
* lmr pointer to dapl_lmr struct
* virt_addr virtual address of beginning of mem region
* length length of memory region
*
* Output:
* none
*
* Returns:
* DAT_SUCCESS
* DAT_INSUFFICIENT_RESOURCES
*
*/
{
struct dapls_ib_mr_handle *orig_mr_handle;
int ia_fd;
int retval;
"mr_register_lmr: lmr 0x%p, ia 0x%p, "
return (DAT_INSUFFICIENT_RESOURCES);
}
"mr_register_lmr: lmr 0x%p, hkey 0x%016llx, lmr_ctx 0x%08x\n"
" vaddr 0x%016llx, len %llu, flags 0x%x\n",
/* call into driver to allocate MR resource */
if (retval != 0) {
"mr_register_lmr: failed (%s), orig_hkey (%016llx)\n",
}
"mr_registered_lmr: successful, lmr 0x%p, hkey 0x%016llx\n",
return (DAT_SUCCESS);
}
/*
* dapls_ib_mw_alloc
*
* Bind a protection domain to a memory window
*
* Input:
* rmr Initialized rmr to hold binding handles
*
* Output:
* none
*
* Returns:
* DAT_SUCCESS
* DAT_INSUFFICIENT_RESOURCES
*
*/
{
int ia_fd;
int retval;
ib_hca_handle))->ia_fd;
"mw_alloc: rmr 0x%p, cannot alloc mw_handle\n", rmr);
return (DAT_INSUFFICIENT_RESOURCES);
}
"mw_alloc: rmr 0x%p, pd_hkey 0x%016llx\n",
if (retval != 0) {
}
"mw_alloc: successful, rmr 0x%p, mw_hkey 0x%llx, "
return (DAT_SUCCESS);
}
/*
* dapls_ib_mw_free
*
* Release bindings of a protection domain to a memory window
*
* Input:
* rmr Initialized rmr to hold binding handles
*
* Output:
* none
*
* Returns:
* DAT_SUCCESS
* DAT_INSUFFICIENT_RESOURCES
*
*/
{
int ia_fd;
int retval;
ib_hca_handle))->ia_fd;
if (retval != 0) {
}
return (DAT_SUCCESS);
}
/*
* dapls_ib_mw_bind
*
* Bind a protection domain to a memory window
*
* Input:
* rmr Initialized rmr to hold binding handles
*
* Output:
* none
*
* Returns:
* DAT_SUCCESS
* DAT_INSUFFICIENT_RESOURCES
*
*/
{
int retval;
if (length > 0) {
} else {
}
/*
* wre.wr_flags = (is_signaled) ? IBT_WR_SEND_SIGNAL :
* IBT_WR_NO_FLAGS;
* Till we fix the chan alloc flags do the following -
*/
/* Translate dapl flags */
completion_flags) ? IBT_WR_SEND_FENCE : 0;
/* suppress completions */
completion_flags) ? 0 : IBT_WR_SEND_SIGNAL;
"mw_bind: rmr 0x%p, wr_flags 0x%x, rkey 0x%x, bind_flags 0x%x\n"
" bind_va 0x%llx, bind_len 0x%llx, mem_priv 0x%x\n",
/* This flag is used to control notification of completions */
} else {
/*
* The evd waiter will use threshold to control wakeups
* Hence the event notification will be done via arming the
* CQ so we do not need special notification generation
* hence set suppression to true
*/
}
if (retval != 0) {
}
return (DAT_SUCCESS);
}
/*
* dapls_ib_mw_unbind
*
* Unbind a protection domain from a memory window
*
* Input:
* rmr Initialized rmr to hold binding handles
*
* Output:
* none
*
* Returns:
* DAT_SUCCESS
* DAT_INSUFFICIENT_RESOURCES
*
*/
{
"mw_unbind: rmr 0x%p, enter\n", rmr);
"mw_unbind: rmr 0x%p, exit\n\n", rmr);
return (retval);
}
/*
* Processes async events and calls appropriate callbacks so that events
* can be posted to the async evd.
*/
void
{
/*
* Walk the EPs to match this EP, then invoke the
* routine when we have the EP we need
*/
NULL);
if (ep_ptr ==
break;
}
}
(void *)ep_ptr);
break;
case IBT_ERROR_CQ:
/*
* Walk the EVDs to match this EVD, then invoke the
* routine when we have the EVD we need
*/
if (evd_ptr ==
break;
}
}
(void *)evd_ptr);
break;
case IBT_ERROR_PORT_DOWN:
(void *)async_evd);
break;
default:
/*
* We are not interested in the following events
* case IBT_EVENT_PATH_MIGRATED:
* case IBT_EVENT_COM_EST:
* case IBT_EVENT_SQD:
* case IBT_ERROR_PATH_MIGRATE_REQ:
* case IBT_EVENT_PORT_UP:
*/
"dapls_ib_async_callback: unhandled async code:%x\n",
break;
}
}
/*
* dapls_ib_setup_async_callback
* The reference implementation calls this to register callbacks,
* but since our model of polling for events is based on retrieving
* events by the waiting thread itself this is a NOOP for us.
*/
/* ARGSUSED */
IN unsigned int *callback_handle,
{
return (DAT_SUCCESS);
}
/*
* dapls_ib_query_hca
*
* Set up an asynchronous callbacks of various kinds
*
* Input:
* hca_handl hca handle
* ep_attr attribute of the ep
*
* Output:
* none
*
* Returns:
* DAT_SUCCESS
* DAT_INVALID_PARAMETER
*/
/* these are just arbitrary values for now */
static DAT_RETURN
{
/* max_iov_segments_per_dto is for non-RDMA */
/* all instances of IA */
"\tadapter_name %s\n "
"\tvendor_name %s\n "
"\thardware_version_major 0x%08x\n"
"\tmax_eps %d\n"
"\tmax_dto_per_ep %d\n"
"\tmax_rdma_read_per_ep_in %d\n"
"\tmax_rdma_read_per_ep_out %d\n"
"\tmax_evds %d\n"
"\tmax_evd_qlen %d\n"
"\tmax_iov_segments_per_dto %d\n"
"\tmax_lmrs %d\n"
"\tmax_lmr_block_size 0x%016llx\n"
"\tmax_lmr_virtual_address 0x%016llx\n"
"\tmax_pzs %d\n"
"\tmax_mtu_size 0x%016llx\n"
"\tmax_rdma_size 0x%016llx\n"
"\tmax_rmrs %d\n"
"\tmax_rmr_target_address 0x%016llx\n"
"\tmax_iov_segments_per_rdma_read %d\n"
"\tmax_iov_segments_per_rdma_write %d\n"
"\tmax_rdma_read_in %d\n"
"\tmax_rdma_read_out %d\n"
"\tmax_srqs %d\n"
"\tmax_ep_per_srq %d\n"
"\tmax_recv_per_srq %d\n"
"\n",
return (DAT_SUCCESS);
}
/* ARGSUSED */
static DAT_RETURN
{
return (DAT_SUCCESS);
}
static void
{
}
/* ARGSUSED */
{
return (DAT_INVALID_PARAMETER);
}
if (retval != 0) {
}
}
}
}
return (DAT_INVALID_PARAMETER);
}
return (DAT_SUCCESS);
}
void
{
int head;
if (qp_ptr->qp_srq_enabled) {
/*
* For QPs with SRQ attached store the premature event in the
* SRQ's premature event list
*/
/*
* mark cqe as valid before storing it in the
* premature events list
*/
} else {
}
}
void
{
if (qp->qp_srq_enabled) {
} else {
}
qp->qp_num_premature_events = 0;
}
/*
* Return the premature events to the free list after processing it
* This function is called only for premature events on the SRQ
*/
void
IN int free_index)
{
int tail;
}
/*
* dapls_ib_get_async_event
*
* Translate an asynchronous event type to the DAT event.
* Note that different providers have different sets of errors.
*
* Input:
* cause_ptr provider event cause
*
* Output:
* async_event DAT mapping of error
*
* Returns:
* DAT_SUCCESS
* DAT_NOT_IMPLEMENTED Caller is not interested this event
*/
{
switch (code) {
case IBT_ERROR_CQ:
break;
/* CATASTROPHIC errors */
case IBT_ERROR_PORT_DOWN:
break;
default:
/*
* Errors we are not interested in reporting:
* IBT_EVENT_PATH_MIGRATED
* IBT_ERROR_PATH_MIGRATE_REQ
* IBT_EVENT_COM_EST
* IBT_EVENT_SQD
* IBT_EVENT_PORT_UP
*/
}
return (dat_status);
}
OUT int *num_events)
{
int ia_fd;
int retval;
*num_events = 0;
if (evp_ptr) {
} else {
evp_msg.evp_num_ev = 0;
}
evp_msg.evp_num_polled = 0;
"event_poll: evd 0x%p, hkey 0x%llx, threshold %d,\n"
" timeout 0x%llx, evp_ptr 0x%p, num_ev %d\n",
/*
* Poll the EVD and if there are no events then we wait in
* the kernel.
*/
if (retval != 0) {
"event_poll: evd 0x%p, retval %d err: %s\n",
}
"dapls_ib_event_poll: evd %p nevents %d\n", evd_ptr,
return (DAT_SUCCESS);
}
{
int ia_fd;
int retval;
"event_wakeup: evd 0x%p, hkey 0x%llx\n",
/*
* Wakeup any thread waiting in the kernel on this EVD
*/
if (retval != 0) {
"event_wakeup: evd 0x%p, retval %d err: %s\n",
}
return (DAT_SUCCESS);
}
/*
* dapls_ib_cq_peek is used by dapl_cno_wait(). After the CQ has been
* inspected we arm the CQ if it was empty.
*
*/
void dapls_ib_cq_peek(
{
*num_cqe = 0;
/* No events found in CQ arm it now */
if (*num_cqe == 0) {
"dapls_ib_cq_peek: set_cq_notify\n");
}
}
}
/*
* Modifies the CNO associated to an EVD
*/
{
int ia_fd;
int retval;
if (cno_ptr) {
} else {
evmc_msg.evmc_cno_hkey = 0;
}
"modify_cno: evd 0x%p, hkey 0x%llx, cno 0x%p, cno_hkey 0x%llx\n",
/*
* modify CNO associated with the EVD
*/
if (retval != 0) {
"modify_cno: evd 0x%p, cno %p retval %d err: %s\n",
}
return (DAT_SUCCESS);
}
{
int retval;
if (timeout == DAT_TIMEOUT_INFINITE) {
} else {
}
"cno_wait: cno 0x%p, hkey 0x%016llx, timeout 0x%016llx\n",
if (retval != 0) {
"cno_wait: cno 0x%p ioctl err: %s\n",
}
"cno_wait: woken up, cno 0x%p, evd 0x%p\n\n",
return (DAT_SUCCESS);
}
{
int retval;
"cno_alloc: cno 0x%p, wait_agent != NULL\n", cno_ptr);
return (DAT_NOT_IMPLEMENTED);
}
DAPL_CNO_ALLOC, &args);
if (retval != 0) {
"cno_alloc: cno 0x%p ioctl err: %s\n",
}
"cno_alloc: cno 0x%p allocated, ia_ptr 0x%p, hkey 0x%016llx\n",
return (DAT_SUCCESS);
}
{
int retval;
"cno_free: cno 0x%p, hkey 0x%016llx\n",
if (retval != 0) {
"cno_free: cno 0x%p ioctl err: %s\n",
}
"cno_free: cno 0x%p freed\n", cno_ptr);
return (DAT_SUCCESS);
}
{
if (retval < 0) {
switch (errnum) {
case EINVAL:
return (DAT_INVALID_PARAMETER);
case ENOMEM:
return (DAT_INSUFFICIENT_RESOURCES);
case ETIME:
return (DAT_TIMEOUT_EXPIRED);
case EINTR:
return (DAT_INTERRUPTED_CALL);
case EFAULT:
return (DAT_INTERNAL_ERROR);
default:
return (DAT_INTERNAL_ERROR);
}
} else {
"ERROR: got IBTF error %d\n", retval);
switch (retval) {
/*
* Connecting to a non-existant conn qual gets
* us here
*/
return (DAT_ERROR(DAT_INVALID_PARAMETER,
case IBT_INSUFF_RESOURCE:
return (DAT_ERROR(DAT_INSUFFICIENT_RESOURCES, 0));
case IBT_AR_NOT_REGISTERED:
/*
* forward ipaddr lookup failed
*/
return (DAT_ERROR(DAT_INVALID_ADDRESS, 0));
default:
return (DAT_INTERNAL_ERROR);
}
}
}
typedef struct dapls_ib_dbp_page_s {
struct dapls_ib_dbp_page_s *next;
int fd;
/* Function that returns a pointer to the specified doorbell entry */
{
/* Check to see if page already mapped for entry */
return ((uint32_t *)
}
/* If not, map a new page and prepend to pagelist */
return (MAP_FAILED);
}
return (MAP_FAILED);
}
}