/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*
* UDAPL kernel agent
*/
#include <sys/sysmacros.h>
#include <daplt_if.h>
#include <daplt.h>
/*
* The following variables support the debug log buffer scheme.
*/
#ifdef DEBUG
#else /* DEBUG */
#endif /* DEBUG */
static int daplka_dbginit = 0;
static void daplka_console(const char *, ...);
static void daplka_debug(const char *, ...);
#define DERR \
if (daplka_dbg & 0x100) \
#ifdef DEBUG
#define DINFO \
#define D1 \
if (daplka_dbg & 0x01) \
#define D2 \
if (daplka_dbg & 0x02) \
#define D3 \
if (daplka_dbg & 0x04) \
#define D4 \
if (daplka_dbg & 0x08) \
#else /* DEBUG */
#endif /* DEBUG */
/*
* driver entry points
*/
/*
* types of ioctls
*/
cred_t *, int *);
cred_t *, int *);
cred_t *, int *);
cred_t *, int *);
cred_t *, int *);
cred_t *, int *);
cred_t *, int *);
cred_t *, int *);
/*
* common ioctls and supporting functions
*/
static int daplka_ia_destroy(daplka_resource_t *);
/*
* EP ioctls and supporting functions
*/
cred_t *, int *);
cred_t *, int *);
cred_t *, int *);
cred_t *, int *);
cred_t *, int *);
cred_t *, int *);
static int daplka_ep_destroy(daplka_resource_t *);
static void daplka_hash_ep_free(void *);
static void daplka_timer_info_free(daplka_timer_info_t *);
static void daplka_timer_handler(void *);
static void daplka_timer_dispatch(void *);
static void daplka_timer_thread(void *);
static int daplka_cancel_timer(daplka_ep_resource_t *);
static void daplka_hash_timer_free(void *);
/*
* EVD ioctls and supporting functions
*/
cred_t *, int *);
cred_t *, int *);
cred_t *, int *);
cred_t *, int *);
static int daplka_evd_destroy(daplka_resource_t *);
static void daplka_cq_handler(ibt_cq_hdl_t, void *);
static void daplka_evd_wakeup(daplka_evd_resource_t *,
static void daplka_evd_event_enqueue(daplka_evd_event_list_t *,
static void daplka_hash_evd_free(void *);
/*
* SRQ ioctls and supporting functions
*/
cred_t *, int *);
cred_t *, int *);
cred_t *, int *);
static int daplka_srq_destroy(daplka_resource_t *);
static void daplka_hash_srq_free(void *);
/*
* Miscellaneous ioctls
*/
cred_t *, int *);
cred_t *, int *);
cred_t *, int *);
cred_t *, int *);
/*
* PD ioctls and supporting functions
*/
cred_t *, int *);
cred_t *, int *);
static int daplka_pd_destroy(daplka_resource_t *);
static void daplka_hash_pd_free(void *);
/*
* SP ioctls and supporting functions
*/
cred_t *, int *);
cred_t *, int *);
static int daplka_sp_destroy(daplka_resource_t *);
static void daplka_hash_sp_free(void *);
static void daplka_hash_sp_unref(void *);
/*
* MR ioctls and supporting functions
*/
cred_t *, int *);
cred_t *, int *);
cred_t *, int *);
cred_t *, int *);
cred_t *, int *);
static int daplka_mr_destroy(daplka_resource_t *);
static void daplka_hash_mr_free(void *);
static void daplka_shared_mr_free(daplka_mr_resource_t *);
/*
* MW ioctls and supporting functions
*/
cred_t *, int *);
cred_t *, int *);
static int daplka_mw_destroy(daplka_resource_t *);
static void daplka_hash_mw_free(void *);
/*
* CNO ioctls and supporting functions
*/
cred_t *, int *);
cred_t *, int *);
cred_t *, int *);
static int daplka_cno_destroy(daplka_resource_t *);
static void daplka_hash_cno_free(void *);
/*
* CM handlers
*/
ibt_cm_return_args_t *, void *, ibt_priv_data_len_t);
ibt_cm_return_args_t *, void *, ibt_priv_data_len_t);
/*
* resource management routines
*/
static int daplka_resource_reserve(minor_t *);
static void daplka_resource_init(void);
static void daplka_resource_fini(void);
/*
* hash table routines
*/
static void daplka_hash_walk(daplka_hash_table_t *, int (*)(void *, void *),
void *, krw_t);
void (*)(void *), void (*)(void *));
static void daplka_hash_destroy(daplka_hash_table_t *);
static void daplka_hash_generic_lookup(void *);
static uint32_t daplka_timer_hkey_gen();
/*
* async event handlers
*/
/*
* IBTF wrappers and default limits used for resource accounting
*/
static ibt_status_t
static ibt_status_t
static ibt_status_t
static ibt_status_t
static ibt_status_t
static ibt_status_t
static ibt_status_t
static ibt_status_t
static ibt_status_t
static ibt_status_t
ibt_mr_desc_t *);
static ibt_status_t
static ibt_status_t
static ibt_status_t
/*
* macros for manipulating resource objects.
* these macros can be used on objects that begin with a
* daplka_resource_t header.
*/
}
} else { \
} \
}
MUTEX_DRIVER, NULL); \
}
}
}
}
/*
* depending on the timeout value does a cv_wait_sig or cv_timedwait_sig
*/
}
}
((hca)->hca_ref_cnt != 0 || \
(hca)->hca_qp_count != 0 || \
(hca)->hca_cq_count != 0 || \
(hca)->hca_pd_count != 0 || \
(hca)->hca_mw_count != 0 || \
(hca)->hca_mr_count != 0)
daplka_open, /* cb_open */
daplka_close, /* cb_close */
nodev, /* cb_strategy */
nodev, /* cb_print */
nodev, /* cb_dump */
nodev, /* cb_read */
nodev, /* cb_write */
daplka_ioctl, /* cb_ioctl */
nodev, /* cb_devmap */
nodev, /* cb_mmap */
nodev, /* cb_segmap */
nochpoll, /* cb_chpoll */
ddi_prop_op, /* cb_prop_op */
NULL, /* cb_stream */
CB_REV, /* rev */
nodev, /* int (*cb_aread)() */
nodev /* int (*cb_awrite)() */
};
DEVO_REV, /* devo_rev */
0, /* devo_refcnt */
daplka_info, /* devo_getinfo */
nulldev, /* devo_identify */
nulldev, /* devo_probe */
daplka_attach, /* devo_attach */
daplka_detach, /* devo_detach */
nodev, /* devo_reset */
&daplka_cb_ops, /* devo_cb_ops */
nulldev, /* power */
ddi_quiesce_not_needed, /* devo_quiesce */
};
/*
* Module linkage information for the kernel.
*/
"uDAPL Service Driver",
};
#ifdef _LP64
#else
#endif
};
/*
* daplka_dev holds global driver state and a list of HCAs
*/
/*
* global SP hash table
*/
/*
* timer_info hash table
*/
/*
* shared MR avl tree
*/
static int daplka_shared_mr_cmp(const void *, const void *);
/*
* default kmem flags used by this driver
*/
/*
* taskq used for handling background tasks
*/
/*
* daplka_cm_delay is the length of time the active
* side needs to wait before timing out on the REP message.
*/
/*
* modunload will fail if pending_close is non-zero
*/
NULL,
};
/*
* Module Installation
*/
int
_init(void)
{
int status;
if (status != 0) {
return (status);
}
daplka_dbgnext = 0;
daplka_dbginit = 1;
if (status != DDI_SUCCESS) {
/* undo inits done before mod_install */
}
return (status);
}
/*
* Module Removal
*/
int
_fini(void)
{
int status;
/*
* mod_remove causes detach to be called
*/
return (status);
}
return (status);
}
/*
* Return Module Info.
*/
int
{
}
static void
{
daplka_hca_t *h;
} else {
h = dp->daplka_hca_list_head;
h = h->hca_next;
}
}
static void
{
daplka_hca_t *h;
else {
h = dp->daplka_hca_list_head;
h = h->hca_next;
}
}
static int
{
int j;
/*
* open the HCA for use
*/
if (status != IBT_SUCCESS) {
if (status == IBT_HCA_IN_USE) {
DERR("ibt_open_hca() returned IBT_HCA_IN_USE\n");
} else {
}
return (status);
}
/*
* query HCA to get its info
*/
if (status != IBT_SUCCESS) {
DERR("ibt_query_hca returned %d (hca_guid 0x%llx)\n",
goto out;
}
/*
* query HCA to get info of all ports
*/
if (status != IBT_SUCCESS) {
DERR("ibt_query_all_ports returned %d "
"(hca_guid 0x%llx)\n", status,
goto out;
}
DERR("hca guid 0x%llx, nports %d\n",
for (j = 0; j < hca->hca_nports; j++) {
DERR("port %d: state %d prefix 0x%016llx "
"guid %016llx\n",
}
return (IBT_SUCCESS);
out:
return (status);
}
/*
* this function obtains the list of HCAs from IBTF.
* the HCAs are then opened and the returned handles
* and attributes are stored into the global daplka_dev
* structure.
*/
static int
{
int i;
/*
* get the num & list of HCAs present
*/
if (hca_count != 0) {
/*
* get the info for each available HCA
*/
for (i = 0; i < hca_count; i++)
}
return (IBT_SUCCESS);
else
return (IBT_FAILURE);
}
static int
{
if (status != IBT_SUCCESS) {
DERR("ibt_close_hca returned %d"
" (hca_guid 0x%llx)\n", status,
return (status);
}
}
return (IBT_SUCCESS);
}
/*
* closes all HCAs and frees up the HCA list
*/
static int
{
if (DAPLKA_HCA_BUSY(hca)) {
return (IBT_HCA_RESOURCES_NOT_FREED);
}
return (status);
}
DERR("dapl kernel agent unloaded\n");
return (IBT_SUCCESS);
}
/*
* Attach the device, create and fill in daplka_dev
*/
static int
{
switch (cmd) {
case DDI_ATTACH:
break;
case DDI_RESUME:
return (DDI_SUCCESS);
default:
return (DDI_FAILURE);
}
/*
* Allocate soft data structure
*/
DERR("attach: bad state zalloc\n");
return (DDI_FAILURE);
}
DERR("attach: cannot get soft state\n");
return (DDI_FAILURE);
}
/*
* Stuff private info into dip.
*/
daplka_dev = dp;
/*
* Register driver with IBTF
*/
&dp->daplka_clnt_hdl);
if (retval != IBT_SUCCESS) {
goto error;
}
/* Register to receive SM events */
if (retval != IBT_SUCCESS) {
goto error;
}
/*
* this table is used by cr_handoff
*/
if (retval != 0) {
DERR("attach: cannot create sp hash table\n");
goto error;
}
/*
* this table stores per EP timer information.
* timer_info_t objects are inserted into this table whenever
* a EP timer is set. timers get removed when they expire
* or when they get cancelled.
*/
if (retval != 0) {
DERR("attach: cannot create timer hash table\n");
goto error;
}
/*
* this taskq is currently only used for processing timers.
* other processing may also use this taskq in the future.
*/
if (daplka_taskq == NULL) {
DERR("attach: cannot create daplka_taskq\n");
goto error;
}
/*
* daplka_shared_mr_tree holds daplka_shared_mr_t objects that
* gets retrieved or created when daplka_mr_register_shared is
* called.
*/
sizeof (daplka_shared_mr_t),
/*
* Create the filesystem device node.
*/
DERR("attach: bad create_minor_node\n");
goto error;
}
return (DDI_SUCCESS);
if (shared_mr_tree_allocated) {
}
if (daplka_taskq) {
daplka_taskq = NULL;
}
if (timer_htbl_allocated) {
}
if (sp_htbl_allocated) {
}
if (err != IBT_SUCCESS) {
}
/* unregister SM event notification */
if (err != IBT_SUCCESS) {
}
}
}
return (retval);
}
/*
* Detach - Free resources allocated in attach
*/
/* ARGSUSED */
static int
{
if (cmd != DDI_DETACH) {
return (DDI_FAILURE);
}
if (daplka_resource.daplka_rc_cnt > 0 ||
daplka_pending_close > 0) {
DERR("detach: driver in use\n");
return (DDI_FAILURE);
}
DERR("detach: cannot get soft state\n");
return (DDI_FAILURE);
}
if (err != IBT_SUCCESS) {
return (DDI_FAILURE);
}
/* unregister SM event notification */
if (err != IBT_SUCCESS) {
return (DDI_FAILURE);
}
}
}
daplka_dev = NULL;
/*
* by the time we get here, all clients of dapl should
* have exited and completed their cleanup properly.
* we can assert that all global data structures are now
* empty.
*/
return (DDI_SUCCESS);
}
/* ARGSUSED */
static int
{
switch (infocmd) {
case DDI_INFO_DEVT2DEVINFO:
if (daplka_dev != NULL) {
return (DDI_SUCCESS);
} else {
return (DDI_FAILURE);
}
case DDI_INFO_DEVT2INSTANCE:
*result = 0;
return (DDI_SUCCESS);
default:
return (DDI_FAILURE);
}
}
/*
* creates a EP resource.
* A EP resource contains a RC channel. A EP resource holds a
* reference to a send_evd (for the send CQ), recv_evd (for the
* recv CQ), a connection evd and a PD. These references ensure
* that the referenced resources are not freed until the EP itself
* gets freed.
*/
/* ARGSUSED */
static int
{
int retval;
D3("ep_create: enter\n");
mode);
if (retval != 0) {
return (EFAULT);
}
DERR("ep_create: cannot allocate ep_rp\n");
return (ENOMEM);
}
ep_rp->ep_timer_hkey = 0;
/*
* we don't have to use ep_get_state here because ep_rp is not in
* ep_htbl yet. refer to the description of daplka_ep_set_state
* for details about the EP state machine.
*/
/* get reference to send evd and get cq handle */
DERR("ep_create: ep_snd_evd %llx not found\n",
goto cleanup;
}
DERR("ep_create: ep_snd_evd cq invalid\n");
goto cleanup;
}
/* get reference to recv evd and get cq handle */
DERR("ep_create: ep_rcv_evd %llx not found\n",
goto cleanup;
}
DERR("ep_create: ep_rcv_evd cq invalid\n");
goto cleanup;
}
/* get reference to conn evd */
DERR("ep_create: ep_conn_evd %llx not found\n",
goto cleanup;
}
/* get reference to SRQ if needed */
if (args.ep_srq_attached) {
DERR("ep_create: ep_srq %llx not found\n",
goto cleanup;
}
} else {
}
/* get pd handle */
pd_rp = (daplka_pd_resource_t *)
DERR("ep_create: cannot find pd resource\n");
goto cleanup;
}
/*
* these checks ensure that the requested channel sizes
* are within the limits supported by the chosen HCA.
*/
DERR("ep_create: invalid cs_sq_sgl %d\n",
goto cleanup;
}
DERR("ep_create: invalid cs_rq_sgl %d\n",
goto cleanup;
}
DERR("ep_create: invalid cs_sq %d\n",
goto cleanup;
}
DERR("ep_create: invalid cs_rq %d\n",
goto cleanup;
}
if (args.ep_srq_attached) {
} else {
}
D3("ep_create: sq_sgl %d, rq_sgl %d, sq %d, rq %d, "
"sig_type 0x%x, control 0x%x, portnum %d, clone_chan 0x%p\n",
if (args.ep_srq_attached) {
} else {
}
/* create rc channel */
if (status != IBT_SUCCESS) {
retval = 0;
goto cleanup;
}
/*
* store ep ptr with chan_hdl.
* this ep_ptr is used by the CM handlers (both active and
* passive)
* mutex is only needed for race of "destroy" and "async"
*/
/* Get HCA-specific data_out info */
if (status != IBT_SUCCESS) {
DERR("ep_create: ibt_ci_data_out error(%d)\n",
status);
retval = 0;
goto cleanup;
}
/* insert into ep hash table */
if (retval != 0) {
DERR("ep_create: cannot insert ep resource into ep_htbl\n");
goto cleanup;
}
/*
* at this point, the ep_rp can be looked up by other threads
* if they manage to guess the correct hkey. but they are not
* permitted to operate on ep_rp until we transition to the
* CLOSED state.
*/
/* return hkey to library */
mode);
if (retval != 0) {
goto cleanup;
}
D3("ep_create: exit\n");
return (0);
if (inserted) {
(void **)&free_rp);
/*
* this case is impossible because ep_free will
* wait until our state transition is complete.
*/
DERR("ep_create: cannot remove ep from hash table\n");
return (retval);
}
}
return (retval);
}
/*
* daplka_ep_get_state retrieves the current state of the EP and
* sets the state to TRANSITIONING. if the current state is already
* TRANSITIONING, this function will wait until the state becomes one
* of the other EP states. Most of the EP related ioctls follow the
* call sequence:
*
* new_state = old_state = daplka_ep_get_state(ep_rp);
* ...
* ...some code that affects the EP
* ...
* new_state = <NEW_STATE>;
* daplka_ep_set_state(ep_rp, old_state, new_state);
*
* this call sequence ensures that only one thread may access the EP
* during the time ep_state is in TRANSITIONING. daplka_ep_set_state
* transitions ep_state to new_state and wakes up any waiters blocking
* on ep_cv.
*
*/
static uint32_t
{
D2("get_state: wait for state transition to complete\n");
}
/*
* an ep that is in the FREED state cannot transition
* back to any of the regular states
*/
if (old_state != DAPLKA_EP_STATE_FREED) {
}
return (old_state);
}
/*
* EP state transition diagram
*
* CLOSED<-------------------
* | |
* | |
* ------------------------ |
* | | |
* | | |
* v v |
* CONNECTING ACCEPTING |
* | | | | | |
* | | | | | |
* | | | | | |
* | | |_______|_______| |
* | | | | | |
* | |___________| | | |
* | | | | |
* | v | |---->DISCONNECTED
* | CONNECTED | ^
* v | | |
* ABORTING |---------|--------------|
* | | | |
* | | v |
* | |-------->DISCONNECTING--|
* | |
* |---------------------------------|
*
* *not shown in this diagram:
* -loopback transitions
* -transitions to the FREED state
*/
static boolean_t
{
/*
* reseting to the same state is a no-op and is always
* permitted. transitioning to the FREED state indicates
* that the ep is about to be freed and no further operation
* is allowed on it. to support abrupt close, the ep is
* permitted to transition to the FREED state from any state.
*/
return (B_TRUE);
}
switch (old_state) {
case DAPLKA_EP_STATE_CLOSED:
/*
* this is the initial ep_state.
* a transition to CONNECTING or ACCEPTING may occur
* upon calling daplka_ep_connect or daplka_cr_accept,
* respectively.
*/
if (new_state == DAPLKA_EP_STATE_CONNECTING ||
}
break;
/*
* we transition to this state if daplka_ep_connect
* is successful. from this state, we can transition
* to CONNECTED if daplka_cm_rc_conn_est gets called;
* or to DISCONNECTED if daplka_cm_rc_conn_closed or
* daplka_cm_rc_event_failure gets called. If the
* client calls daplka_ep_disconnect, we transition
* to DISCONNECTING. If a timer was set at ep_connect
* time and if the timer expires prior to any of the
* CM callbacks, we transition to ABORTING and then
* to DISCONNECTED.
*/
if (new_state == DAPLKA_EP_STATE_CONNECTED ||
}
break;
/*
* we transition to this state if daplka_cr_accept
* is successful. from this state, we can transition
* to CONNECTED if daplka_cm_service_conn_est gets called;
* or to DISCONNECTED if daplka_cm_service_conn_closed or
* daplka_cm_service_event_failure gets called. If the
* client calls daplka_ep_disconnect, we transition to
* DISCONNECTING.
*/
if (new_state == DAPLKA_EP_STATE_CONNECTED ||
}
break;
/*
* we transition to this state if a active or passive
* connection gets established. if the client calls
* daplka_ep_disconnect, we transition to the
* DISCONNECTING state. subsequent CM callbacks will
* cause ep_state to be set to DISCONNECTED. If the
* remote peer terminates the connection before we do,
* it is possible for us to transition directly from
* CONNECTED to DISCONNECTED.
*/
if (new_state == DAPLKA_EP_STATE_DISCONNECTING ||
}
break;
/*
* we transition to this state if the client calls
* daplka_ep_disconnect.
*/
if (new_state == DAPLKA_EP_STATE_DISCONNECTED) {
}
break;
case DAPLKA_EP_STATE_ABORTING:
/*
* we transition to this state if the active side
* EP timer has expired. this is only a transient
* state that is set during timer processing. when
* timer processing completes, ep_state will become
* DISCONNECTED.
*/
if (new_state == DAPLKA_EP_STATE_DISCONNECTED) {
}
break;
/*
* we transition to this state if we get a closed
* or event_failure CM callback. an expired timer
* can also cause us to be in this state. this
* is the only state in which we permit the
* ep_reinit operation.
*/
if (new_state == DAPLKA_EP_STATE_CLOSED) {
}
break;
default:
break;
}
if (!valid) {
DERR("ep_transition: invalid state change %d -> %d\n",
}
return (valid);
}
/*
* first check if the transition is valid. then set ep_state
* to new_state and wake up all waiters.
*/
static void
{
if (valid) {
} else {
/*
* this case is impossible.
* we have a serious problem if we get here.
* instead of panicing, we reset the state to
* old_state. doing this would at least prevent
* threads from hanging due to ep_state being
* stuck in TRANSITIONING.
*/
}
}
}
/*
* modifies RC channel attributes.
* currently, only the rdma_in and rdma_out attributes may
* be modified. the channel must be in quiescent state when
* this function is called.
*/
/* ARGSUSED */
static int
{
int retval = 0;
mode);
if (retval != 0) {
return (EFAULT);
}
ep_rp = (daplka_ep_resource_t *)
DERR("ep_modify: cannot find ep resource\n");
return (EINVAL);
}
if (old_state != DAPLKA_EP_STATE_CLOSED &&
goto cleanup;
}
goto cleanup;
}
DERR("ep_modify: invalid epm_rdma_ra_out %d\n",
goto cleanup;
}
}
DERR("ep_modify: epm_rdma_ra_in %d\n",
goto cleanup;
}
}
if (status != IBT_SUCCESS) {
retval = 0;
goto cleanup;
}
/*
* ep_modify does not change ep_state
*/
cleanup:;
return (retval);
}
/*
* Frees a EP resource.
* a EP may only be freed when it is in the CLOSED or
* DISCONNECTED state.
*/
/* ARGSUSED */
static int
{
int retval;
if (retval != 0) {
return (EFAULT);
}
ep_rp = (daplka_ep_resource_t *)
DERR("ep_free: cannot find ep resource\n");
return (EINVAL);
}
/*
* ep cannot be freed if it is in an invalid state.
*/
if (old_state != DAPLKA_EP_STATE_CLOSED &&
goto cleanup;
}
/*
* this is only possible if we have two threads
* calling ep_free in parallel.
*/
DERR("ep_free: cannot find ep resource\n");
goto cleanup;
}
/* there should not be any outstanding timers */
/* remove reference obtained by lookup */
/* UNREF calls the actual free function when refcnt is zero */
return (0);
cleanup:;
/* remove reference obtained by lookup */
return (retval);
}
/*
* The following routines supports the timeout feature of ep_connect.
* Refer to the description of ep_connect for details.
*/
/*
* this is the timer processing thread.
*/
static void
{
if (old_state != DAPLKA_EP_STATE_CONNECTING) {
/* unblock hash_ep_free */
ep_rp->ep_timer_hkey = 0;
/* reset state to original state */
/* this function will also unref ep_rp */
return;
}
ep_rp->ep_timer_hkey = 0;
/*
* we cannot keep ep_state in TRANSITIONING if we call
* ibt_close_rc_channel in blocking mode. this would cause
* a deadlock because the cm callbacks will be blocked and
* will not be able to wake us up.
*/
/*
* when we return from close_rc_channel, all callbacks should have
* completed. we can also be certain that these callbacks did not
* enqueue any events to conn_evd.
*/
if (status != IBT_SUCCESS) {
DERR("timer_thread: ibt_close_rc_channel returned %d\n",
status);
}
/*
* this is the only thread that can transition ep_state out
* of ABORTING. all other ep operations would fail when
* ep_state is in ABORTING.
*/
D2("timer_thread: enqueue event(%p) evdp(%p)\n",
/* this function will also unref ep_rp */
}
/*
* dispatches a thread to continue with timer processing.
*/
static void
{
/*
* keep rescheduling this function until
* taskq_dispatch succeeds.
*/
DERR("timer_dispatch: taskq_dispatch failed, retrying...\n");
}
}
/*
* this function is called by the kernel's callout thread.
* we first attempt to remove the timer object from the
* global timer table. if it is found, we dispatch a thread
* to continue processing the timer object. if it is not
* found, that means the timer has been cancelled by someone
* else.
*/
static void
{
(void) daplka_hash_remove(&daplka_timer_info_htbl,
timer_hkey, (void **)&timerp);
D2("timer_handler: timer already cancelled\n");
return;
}
daplka_timer_dispatch((void *)timerp);
}
/*
* allocates a timer_info object.
* a reference to a EP is held by this object. this ensures
* that the EP stays valid when a timer is outstanding.
*/
static daplka_timer_info_t *
{
DERR("timer_info_alloc: cannot allocate timer info\n");
return (NULL);
}
return (timerp);
}
/*
* Frees the timer_info object.
* we release the EP reference before freeing the object.
*/
static void
{
}
/*
* cancels the timer set by ep_connect.
* returns -1 if timer handling is in progress
* and 0 otherwise.
*/
static int
{
/*
* this function can only be called when ep_state
* is frozen.
*/
if (ep_rp->ep_timer_hkey != 0) {
(void) daplka_hash_remove(&daplka_timer_info_htbl,
/*
* this is possible if the timer_handler has
* removed the timerp but the taskq thread has
* not transitioned the ep_state to DISCONNECTED.
* we need to reset the ep_state to allow the
* taskq thread to continue with its work. the
* taskq thread will set the ep_timer_hkey to 0
* so we don't have to do it here.
*/
DERR("cancel_timer: timer is being processed\n");
return (-1);
}
/*
* we got the timer object. if the handler fires at
* this point, it will not be able to find the object
* and will return immediately. normally, ti_tmo_id gets
* cleared when the handler fires.
*/
/*
* note that untimeout can possibly call the handler.
* we are safe because the handler will be a no-op.
*/
ep_rp->ep_timer_hkey = 0;
}
return (0);
}
/*
* this function is called by daplka_hash_destroy for
* freeing timer_info objects
*/
static void
{
}
/* ARGSUSED */
static uint16_t
{
int i;
for (i = 0; i < sizeof (DAPL_PRIVATE); i++) {
}
return (cksum);
}
/*
* ep_connect is called by the client to initiate a connection to a
* remote service point. It is a non-blocking call. If a non-zero
* timeout is specified by the client, a timer will be set just before
* returning from ep_connect. Upon a successful return from ep_connect,
* the client will call evd_wait to wait for the connection to complete.
* If the connection is rejected or has failed due to an error, the
* client will be notified with an event containing the appropriate error
* code. If the connection is accepted, the client will be notified with
* the CONN_ESTABLISHED event. If the timer expires before either of the
* above events (error or established), a TIMED_OUT event will be delivered
* to the client.
*
* the complicated part of the timer logic is the handling of race
* conditions with CM callbacks. we need to ensure that either the CM or
* the timer thread gets to deliver an event, but not both. when the
* CM callback is about to deliver an event, it always tries to cancel
* the outstanding timer. if cancel_timer indicates a that the timer is
* already being processed, the CM callback will simply return without
* delivering an event. when the timer thread executes, it tries to check
* if the EP is still in CONNECTING state (timers only work on the active
* side). if the EP is not in this state, the timer thread will return
* without delivering an event.
*/
/* ARGSUSED */
static int
{
void *priv_data;
int retval = 0;
D3("ep_connect: enter\n");
mode);
if (retval != 0) {
return (EFAULT);
}
ep_rp = (daplka_ep_resource_t *)
DERR("ep_connect: cannot find ep resource\n");
return (EINVAL);
}
if (old_state != DAPLKA_EP_STATE_CLOSED) {
goto cleanup;
}
DERR("ep_connect: private data len (%d) exceeded "
goto cleanup;
}
/*
* check for remote ipaddress to dgid resolution needs ATS
*/
#if defined(DAPLKA_DEBUG_FORCE_ATS)
#endif /* DAPLKA_DEBUG_FORCE_ATS */
/* check for unidentified dgid */
/*
* setup for ibt_query_ar()
*/
ar_query_s.ar_pkey = 0;
D3("daplka_ep_connect: SA[8] %d.%d.%d.%d\n",
D3("daplka_ep_connect: SA[12] %d.%d.%d.%d\n",
if (status != IBT_SUCCESS) {
retval = 0;
goto cleanup;
}
/*
* dgid identified from SA record
*/
D2("daplka_ep_connect: ATS dgid=%llx:%llx\n",
}
/*
* don't set sid in path_attr saves 1 SA query
* Also makes server side not to write the service record
*/
/* save the connection ep - struct copy */
num_paths = 0;
/* enable APM on remote port but not on loopback case */
}
DERR("ep_connect: ibt_get_paths returned %d paths %d\n",
retval = 0;
goto cleanup;
}
/* fill in the sid directly to path_info */
/* fill in open channel args */
/*
* calculate checksum value of hello message and
* put hello message in networking byte order
*/
if (args.epc_timeout > 0) {
/*
* increment refcnt before passing reference to
* timer_info_alloc.
*/
DERR("ep_connect: cannot allocate timer\n");
/*
* we need to remove the reference if
* allocation failed.
*/
goto cleanup;
}
/*
* We generate our own hkeys so that timer_hkey can fit
* into a pointer and passed as an arg to timeout()
*/
&timer_hkey, (void *)timerp);
if (retval != 0) {
DERR("ep_connect: cannot insert timer info\n");
goto cleanup;
}
D2("ep_connect: timer_hkey = 0x%llx\n",
}
if (status != IBT_SUCCESS) {
retval = 0;
goto cleanup;
}
/*
* if a cm callback gets called at this point, it'll have to wait until
* ep_state becomes connecting (or some other state if another thread
* manages to get ahead of the callback). this guarantees that the
* callback will not touch the timer until it gets set.
*/
/*
* We generate our own 32 bit timer_hkey so that it can fit
* into a pointer
*/
ASSERT(timer_hkey != 0);
}
cleanup:;
/*
* if ibt_open_rc_channel failed, the timerp must still
* be in daplka_timer_info_htbl because neither the cm
* callback nor the timer_handler will be called.
*/
if (timer_inserted) {
ASSERT(timer_hkey != 0);
(void) daplka_hash_remove(&daplka_timer_info_htbl,
timer_hkey, (void **)&new_timerp);
ep_rp->ep_timer_hkey = 0;
}
}
D3("ep_connect: exit\n");
return (retval);
}
/*
* ep_disconnect closes a connection with a remote peer.
* if a connection has not been established, ep_disconnect
* will instead flush all recv bufs posted to this channel.
* if the EP state is CONNECTED, CONNECTING or ACCEPTING upon
* entry to ep_disconnect, the EP state will transition to
* DISCONNECTING upon exit. the CM callbacks triggered by
* ibt_close_rc_channel will cause EP state to become
* DISCONNECTED. This function is a no-op if EP state is
* DISCONNECTED.
*/
/* ARGSUSED */
static int
{
int retval = 0;
mode);
if (retval != 0) {
return (EFAULT);
}
ep_rp = (daplka_ep_resource_t *)
DERR("ep_disconnect: cannot find ep resource\n");
return (EINVAL);
}
if (old_state != DAPLKA_EP_STATE_CONNECTED &&
goto cleanup;
}
if ((old_state == DAPLKA_EP_STATE_DISCONNECTED) ||
D2("ep_disconnect: ep already disconnected\n");
retval = 0;
/* we leave the state as DISCONNECTED */
goto cleanup;
}
if (old_state == DAPLKA_EP_STATE_CONNECTING ||
}
/*
* according to the udapl spec, ep_disconnect should
* flush the channel if the channel is not CONNECTED.
*/
if (old_state == DAPLKA_EP_STATE_CLOSED) {
if (status != IBT_SUCCESS) {
DERR("ep_disconnect: ibt_flush_channel failed %d\n",
status);
}
retval = 0;
/* we leave the state as CLOSED */
goto cleanup;
}
if (status == IBT_SUCCESS) {
return (retval);
} else {
DERR("ep_disconnect: ibt_close_rc_channel returned %d\n",
status);
retval = 0;
}
cleanup:;
return (retval);
}
/*
* this function resets the EP to a usable state (ie. from
* DISCONNECTED to CLOSED). this function is best implemented using
* the ibt_recycle_channel interface. until that is available, we will
* instead clone and tear down the existing channel and replace the
* existing channel with the cloned one.
*/
/* ARGSUSED */
static int
{
int retval = 0;
mode);
if (retval != 0) {
return (EFAULT);
}
ep_rp = (daplka_ep_resource_t *)
DERR("reinit: cannot find ep resource\n");
return (EINVAL);
}
if ((old_state != DAPLKA_EP_STATE_CLOSED) &&
goto cleanup;
}
if (status != IBT_SUCCESS) {
DERR("reinit: unable to clone channel\n");
retval = 0;
goto cleanup;
}
cleanup:;
return (retval);
}
/*
* destroys a EP resource.
* called when refcnt drops to zero.
*/
static int
{
/*
* by the time we get here, we can be sure that
* there is no outstanding timer.
*/
D3("ep_destroy: entering, ep_rp 0x%p, rnum %d\n",
/*
* free rc channel
*/
if (status != IBT_SUCCESS) {
DERR("ep_free: ibt_free_channel returned %d\n",
status);
}
}
/*
* release all references
*/
}
}
}
}
}
return (0);
}
/*
* this function is called by daplka_hash_destroy for
* freeing EP resource objects
*/
static void
{
int retval;
if (retval != 0) {
D2("hash_ep_free: ep_rp 0x%p "
"timer is still being processed\n", ep_rp);
if (ep_rp->ep_timer_hkey != 0) {
D2("hash_ep_free: ep_rp 0x%p "
"waiting for timer_hkey to be 0\n", ep_rp);
}
}
/* call ibt_close_rc_channel regardless of what state we are in */
if (status != IBT_SUCCESS) {
if (old_state == DAPLKA_EP_STATE_CONNECTED ||
DERR("hash_ep_free: ep_rp 0x%p state %d "
"unexpected error %d from close_rc_channel\n",
}
}
}
/*
* creates a EVD resource.
* a EVD is used by the client to wait for events from one
* or more sources.
*/
/* ARGSUSED */
static int
{
int retval = 0;
mode);
if (retval != 0) {
return (EFAULT);
}
~(DAT_EVD_DEFAULT_FLAG | DAT_EVD_SOFTWARE_FLAG)) != 0) {
return (EINVAL);
}
/*
* if the client specified a non-zero cno_hkey, we
* lookup the cno and save the reference for later use.
*/
if (args.evd_cno_hkey > 0) {
cno_rp = (daplka_cno_resource_t *)
DERR("evd_create: cannot find cno resource\n");
goto cleanup;
}
}
(DAT_EVD_DTO_FLAG | DAT_EVD_RMR_BIND_FLAG)) != 0) {
DERR("evd_create: invalid cq size %d",
goto cleanup;
}
if (status != IBT_SUCCESS) {
retval = 0;
goto cleanup;
}
/*
* store evd ptr with cq_hdl
* mutex is only needed for race of "destroy" and "async"
*/
/* Get HCA-specific data_out info */
if (status != IBT_SUCCESS) {
retval = 0;
goto cleanup;
}
(void *)evd_rp);
}
if (retval != 0) {
goto cleanup;
}
/*
* If this evd handles async events need to add to the IA resource
* async evd list
*/
/* add the evd to the head of the list */
}
if (retval != 0) {
goto cleanup;
}
return (0);
cleanup:;
if (inserted) {
(void **)&free_rp);
DERR("evd_create: cannot remove evd\n");
/*
* we can only get here if another thread
* has completed the cleanup in evd_free
*/
return (retval);
}
}
return (retval);
}
/*
* resizes CQ and returns new mapping info to library.
*/
/* ARGSUSED */
static int
{
int retval = 0;
mode);
if (retval != 0) {
return (EFAULT);
}
/* get evd resource */
evd_rp = (daplka_evd_resource_t *)
DERR("cq_resize: cannot find evd resource\n");
return (EINVAL);
}
goto cleanup;
}
/*
* If ibt_resize_cq fails that it is primarily due to resource
* shortage. Per IB spec resize will never loose events and
* a resize error leaves the CQ intact. Therefore even if the
* resize request fails we proceed and get the mapping data
* from the CQ so that the library can mmap it.
*/
if (status != IBT_SUCCESS) {
/* we return the size of the old CQ if resize fails */
} else {
}
D2("cq_resize(%d): done new_sz(%u) real_sz(%u)\n",
/* Get HCA-specific data_out info */
if (status != IBT_SUCCESS) {
/* return ibt_ci_data_out status */
retval = 0;
goto cleanup;
}
mode);
if (retval != 0) {
goto cleanup;
}
cleanup:;
}
return (retval);
}
/*
* Routine to copyin the event poll message so that 32 bit libraries
* can be safely supported
*/
int
{
int retval;
#ifdef _MULTI_DATAMODEL
sizeof (dapl_event_poll32_t), mode);
if (retval != 0) {
return (EFAULT);
}
return (0);
}
#endif
mode);
if (retval != 0) {
return (EFAULT);
}
return (0);
}
/*
* Routine to copyout the event poll message so that 32 bit libraries
* can be safely supported
*/
int
{
int retval;
#ifdef _MULTI_DATAMODEL
sizeof (dapl_event_poll32_t), mode);
if (retval != 0) {
return (EFAULT);
}
return (0);
}
#endif
sizeof (dapl_event_poll_t), mode);
if (retval != 0) {
return (EFAULT);
}
return (0);
}
/*
* fucntion to handle CM REQ RCV private data from Solaris or third parties
*/
/* ARGSUSED */
static void
{
/*
* get private data and len
*/
#if defined(DAPLKA_DEBUG_FORCE_ATS)
/* skip the DAPL_PRIVATE chekcsum check */
#else
/* for remote connects */
/* look up hello message in the CM private data area */
if (clen >= sizeof (DAPL_PRIVATE) &&
D2("daplka_crevent_privdata_post: Solaris msg\n");
return;
}
}
#endif /* DAPLKA_DEBUG_FORCE_ATS */
D2("daplka_crevent_privdata_post: 3rd party msg\n");
/* transpose CM private data into hello message */
if (clen) {
if (clen > DAPL_CONSUMER_MAX_PRIVATE_DATA_SIZE) {
}
} else {
}
/*
* fill in hello message
*/
/* assign sgid and dgid */
/* reverse ip address lookup through ATS */
if (status == IBT_SUCCESS) {
/* determine the address families */
if (ipaddr_ord == 0) {
} else {
}
D3("daplka_privdata_post: family=%d :SA[8] %d.%d.%d.%d\n",
D3("daplka_privdata_post: SA[12] %d.%d.%d.%d\n",
} else {
/* non-conformed third parties */
}
}
/*
* this function is called by evd_wait and evd_dequeue to wait for
* connection events and CQ notifications. typically this function
* is called when the userland CQ is empty and the client has
* specified a non-zero timeout to evd_wait. if the client is
* interested in CQ events, the CQ must be armed in userland prior
* to calling this function.
*/
/* ARGSUSED */
static int
{
int threshold;
void *pd;
int retval = 0;
int rc;
if (retval != 0) {
return (EFAULT);
}
DERR("event_poll: evp_ep cannot be NULL if num_wc=%d",
return (EINVAL);
}
/*
* Note: dequeue requests have a threshold = 0, timeout = 0
*/
/* ensure library is passing sensible values */
if (max_events < threshold) {
DERR("event_poll: max_events(%d) < threshold(%d)\n",
return (EINVAL);
}
/* Do a sanity check to avoid excessive memory allocation */
if (max_events > DAPL_EVD_MAX_EVENTS) {
DERR("event_poll: max_events(%d) > %d",
return (EINVAL);
}
D4("event_poll: threshold(%d) timeout(0x%llx) max_events(%d)\n",
/* get evd resource */
evd_rp = (daplka_evd_resource_t *)
DERR("event_poll: cannot find evd resource\n");
return (EINVAL);
}
/*
* Use event array on the stack if possible
*/
if (max_events <= NUM_EVENTS_PER_POLL) {
} else {
DERR("event_poll: kmem_zalloc failed, evp_size %d",
evp_size);
goto cleanup;
}
}
/*
* The Event poll algorithm is as follows -
* The library passes a buffer big enough to hold "max_events"
* events. max_events is >= threshold. If at any stage we get
* max_events no. of events we bail. The events are polled in
* the following order -
* 1) Check for CR events in the evd_cr_events list
* 2) Check for Connection events in the evd_connection_events list
*
* If after the above 2 steps we don't have enough(>= threshold) events
* we block for CQ notification and sleep. Upon being woken up we start
* at step 1 again.
*/
/*
* Note: this could be 0 or INFINITE or anyother value in microsec
*/
if (args.evp_timeout > 0) {
} else {
/*
* use the max value if we wrapped around
*/
}
}
} else {
timeout = 0;
}
for (;;) {
/*
* If this evd is waiting for CM events check that now.
*/
/* dequeue events from evd_cr_events list */
while (head = daplka_evd_event_dequeue(
&evd_rp->evd_cr_events)) {
/*
* populate the evp array
*/
if (++num_events == max_events) {
goto maxevent_reached;
}
}
}
/* dequeue events from evd_connection_events list */
while ((head = daplka_evd_event_dequeue
(&evd_rp->evd_conn_events))) {
/*
* populate the evp array -
*
*/
} else {
}
n = head->
ibe_ce.ibce_priv_data_ptr, n);
ibce_priv_data_size = n;
}
if (++num_events == max_events) {
goto maxevent_reached;
}
}
}
/* dequeue events from evd_async_events list */
while (head = daplka_evd_event_dequeue(
&evd_rp->evd_async_events)) {
/*
* populate the evp array
*/
if (++num_events == max_events) {
break;
}
}
}
/*
* We have sufficient events for this call so no need to wait
*/
break;
}
evd_rp->evd_waiters++;
/*
* There are no new events and a timeout was specified.
* Note: for CQ events threshold is 0 but timeout is
* not necessarily 0.
*/
timeout) {
if (retval == 0) {
break;
} else if (retval == -1) {
break;
} else {
retval = 0;
continue;
}
}
evd_rp->evd_waiters--;
/*
* If we got woken up by the CQ handler due to events
* in the CQ. Need to go to userland to check for
* CQ events. Or if we were woken up due to S/W events
*/
/* check for userland events only */
if (!(evd_rp->evd_newevents &
break;
}
/*
* Clear newevents since we are going to loopback
* back and check for both CM and CQ events
*/
} else { /* error */
break;
}
}
/*
* At this point retval might have a value that we want to return
* back to the user. So the copyouts shouldn't tamper retval.
*/
sizeof (dapl_ib_event_t), mode);
if (rc != 0) { /* XXX: we are losing events here */
goto cleanup;
}
if (rc != 0) { /* XXX: we are losing events here */
goto cleanup;
}
}
cleanup:;
}
}
return (retval);
}
/* ARGSUSED */
static int
{
int retval;
mode);
if (retval != 0) {
return (EFAULT);
}
/* get evd resource */
evd_rp = (daplka_evd_resource_t *)
DERR("event_wakeup: cannot find evd resource\n");
return (EINVAL);
}
return (retval);
}
/* ARGSUSED */
static int
{
int retval;
mode);
if (retval != 0) {
return (EFAULT);
}
/* get evd resource */
evd_rp = (daplka_evd_resource_t *)
DERR("evd_modify_cno: cannot find evd resource\n");
goto cleanup;
}
if (args.evmc_cno_hkey > 0) {
/* get cno resource corresponding to the new CNO */
cno_rp = (daplka_cno_resource_t *)
DERR("evd_modify_cno: cannot find CNO resource\n");
goto cleanup;
}
} else {
}
/*
* drop the refcnt on the old CNO, the refcnt on the new CNO is
* retained since the evd holds a reference to it.
*/
if (old_cno_rp) {
}
if (evd_rp) {
}
return (retval);
}
/*
* Frees the EVD and associated resources.
* If there are other threads still using this EVD, the destruction
* will defer until the EVD's refcnt drops to zero.
*/
/* ARGSUSED */
static int
{
int retval = 0;
if (retval != 0) {
return (EFAULT);
}
(void **)&evd_rp);
DERR("evd_free: cannot find evd resource\n");
return (EINVAL);
}
/* If this is an async evd remove it from the IA's async evd list */
/* unlink curr from the list */
/*
* if first element in the list update
* the list head
*/
} else {
}
break;
}
}
/* free the curr entry */
}
/* UNREF calls the actual free function when refcnt is zero */
return (0);
}
/*
* destroys EVD resource.
* called when refcnt drops to zero.
*/
static int
{
D3("evd_destroy: entering, evd_rp 0x%p, rnum %d\n",
/*
* free CQ
*/
if (evd_rp->evd_cq_hdl) {
if (status != IBT_SUCCESS) {
}
}
/*
* release reference on CNO
*/
evd_rp->evd_cookie) {
}
}
/*
* discard all remaining events
*/
D2("evd_destroy: discarding CR event: %d\n",
if (len > 0) {
}
}
D2("evd_destroy: discarding CONN event: %d\n",
if (len > 0) {
}
}
DERR("evd_destroy: discarding ASYNC event: %d\n",
}
return (0);
}
static void
{
}
/*
* this handler fires when new completions arrive.
*/
/* ARGSUSED */
static void
{
D3("cq_handler: fired setting evd_newevents\n");
}
/*
* this routine wakes up a client from evd_wait. if evtq and evt
* are non-null, the event evt will be enqueued prior to waking
* up the client. if the evd is associated with a CNO and if there
* are no waiters on the evd, the CNO will be notified.
*/
static void
{
} else {
}
/*
* only wakeup the CNO if there are no waiters on this evd.
*/
}
}
/*
* daplka_evd_event_enqueue adds elem to the end of the event list
* The caller is expected to acquire appropriate locks before
* calling enqueue
*/
static void
{
} else {
/* list is empty */
}
}
/*
* daplka_evd_event_dequeue removes and returns the first element of event
* list. NULL is returned if the list is empty. The caller is expected to
* acquire appropriate locks before calling enqueue.
*/
static daplka_evd_event_t *
{
return (NULL);
}
/* if it was the last element update the tail pointer too */
}
return (head);
}
/*
* A CNO allows the client to wait for notifications from multiple EVDs.
* To use a CNO, the client needs to follow the procedure below:
* 1. allocate a CNO. this returns a cno_hkey that identifies the CNO.
* 2. create one or more EVDs using the returned cno_hkey.
* 3. call cno_wait. when one of the associated EVDs get notified, the
* CNO will also get notified. cno_wait will then return with a
* evd_cookie identifying the EVD that triggered the event.
*
* A note about cno_wait:
* -unlike a EVD, a CNO does not maintain a queue of notifications. For
* example, suppose multiple EVDs triggered a CNO before the client calls
* cno_wait; when the client calls cno_wait, it will return with the
* evd_cookie that identifies the *last* EVD that triggered the CNO. It
* is the responsibility of the client, upon returning from cno_wait, to
* check on all EVDs that can potentially trigger the CNO. the returned
* evd_cookie is only meant to be a hint. there is no guarantee that the
* EVD identified by the evd_cookie still contains an event or still
* exists by the time cno_wait returns.
*/
/*
* allocates a CNO.
* the returned cno_hkey may subsequently be used in evd_create.
*/
/* ARGSUSED */
static int
{
int retval = 0;
DERR("cno_alloc: cannot allocate cno resource\n");
return (ENOMEM);
}
cno_rp->cno_evd_cookie = 0;
/* insert into cno hash table */
if (retval != 0) {
DERR("cno_alloc: cannot insert cno resource\n");
goto cleanup;
}
/* return hkey to library */
mode);
if (retval != 0) {
goto cleanup;
}
return (0);
cleanup:;
if (inserted) {
(void **)&free_rp);
DERR("cno_alloc: cannot remove cno\n");
/*
* we can only get here if another thread
* has completed the cleanup in cno_free
*/
return (retval);
}
}
return (retval);
}
/*
* destroys a CNO.
* this gets called when a CNO resource's refcnt drops to zero.
*/
static int
{
D2("cno_destroy: entering, cno_rp %p, rnum %d\n",
return (0);
}
static void
{
}
/*
* removes the CNO from the cno hash table and frees the CNO
* if there are no references to it. if there are references to
* it, the CNO will be destroyed when the last of the references
* is released. once the CNO is removed from the cno hash table,
* the client will no longer be able to call cno_wait on the CNO.
*/
/* ARGSUSED */
static int
{
int retval = 0;
if (retval != 0) {
return (EINVAL);
}
DERR("cno_free: cannot find cno resource\n");
return (EINVAL);
}
/* UNREF calls the actual free function when refcnt is zero */
return (0);
}
/*
* wait for a notification from one of the associated EVDs.
*/
/* ARGSUSED */
static int
{
int retval = 0;
if (retval != 0) {
return (EINVAL);
}
/* get cno resource */
cno_rp = (daplka_cno_resource_t *)
DERR("cno_wait: cannot find cno resource\n");
return (EINVAL);
}
curr_time = ddi_get_lbolt();
/*
* use the max value if we wrapped around
*/
/*
* clock_t (size long) changes between 32 and 64-bit kernels
*/
}
while (cno_rp->cno_evd_cookie == 0) {
int rval = 0;
if (rval == 0) {
DERR("cno_wait: interrupted\n");
goto cleanup;
} else if (rval == -1) {
DERR("cno_wait: timed out\n");
goto cleanup;
}
}
cno_rp->cno_evd_cookie = 0;
ASSERT(evd_cookie != 0);
D2("cno_wait: returning evd_cookie 0x%p\n",
(void *)(uintptr_t)evd_cookie);
sizeof (dapl_cno_wait_t), mode);
if (retval != 0) {
goto cleanup;
}
cleanup:;
}
return (retval);
}
/*
* this function is called by the client when it decides to
* accept a connection request. a connection request is generated
* when the active side generates REQ MAD to a service point on
* the destination node. this causes the CM service handler
* (daplka_cm_service_req) on the passive side to be callee. This
* handler will then enqueue this connection request to the backlog
* array of the service point. A connection event containing the
* backlog array index and connection request private data is passed
* to the client's service point EVD (sp_evd_res). once the event
* is passed up to the userland, the client may examine the request
* to decide whether to call daplka_cr_accept or dapka_cr_reject.
*/
/* ARGSUSED */
static int
{
int retval = 0;
mode);
if (retval != 0) {
return (EFAULT);
}
DERR("cr_accept: private data len (%d) exceeded "
return (EINVAL);
}
/* get sp resource */
DERR("cr_accept: cannot find sp resource\n");
return (EINVAL);
}
/* get ep resource */
DERR("cr_accept: cannot find ep resource\n");
goto cleanup;
}
/*
* accept is only allowed if ep_state is CLOSED.
* note that after this point, the ep_state is frozen
* (i.e. TRANSITIONING) until we transition ep_state
* to ACCEPTING or back to CLOSED if we get an error.
*/
if (old_state != DAPLKA_EP_STATE_CLOSED) {
goto cleanup;
}
/*
* make sure the backlog index is not bogus.
*/
DERR("cr_accept: invalid backlog index 0x%llx %d\n",
goto cleanup;
}
/*
* make sure the backlog index indeed refers
* to a pending connection.
*/
DERR("cr_accept: invalid conn state %d\n",
conn->spcp_state);
goto cleanup;
}
DERR("cr_accept: sid == NULL\n");
goto cleanup;
}
/*
* a ep_rp with a NULL chan_hdl is impossible.
*/
DERR("cr_accept: ep_chan_hdl == NULL\n");
goto cleanup;
}
/*
* this clears our slot in the backlog array.
* this slot may now be used by other pending connections.
*/
conn->spcp_req_len = 0;
/*
* Set the unique cookie corresponding to the CR to this EP
* so that is can be used in passive side CM callbacks
*/
if (status != IBT_SUCCESS) {
retval = 0;
}
/*
* note that the CM handler may actually be called at this
* point. but since ep_state is still in TRANSITIONING, the
* handler will wait until we transition to ACCEPTING. this
* prevents the case where we set ep_state to ACCEPTING after
* daplka_service_conn_est sets ep_state to CONNECTED.
*/
cleanup:;
}
}
return (retval);
}
/*
* this function is called by the client to reject a
* connection request.
*/
/* ARGSUSED */
static int
{
int retval = 0;
void *sid;
mode);
if (retval != 0) {
return (EFAULT);
}
/* get sp resource */
DERR("cr_reject: cannot find sp resource\n");
return (EINVAL);
}
/*
* make sure the backlog index is not bogus.
*/
DERR("cr_reject: invalid backlog index 0x%llx %d\n",
goto cleanup;
}
/*
* make sure the backlog index indeed refers
* to a pending connection.
*/
DERR("cr_reject: invalid conn state %d\n",
conn->spcp_state);
goto cleanup;
}
DERR("cr_reject: sid == NULL\n");
goto cleanup;
}
/*
* this clears our slot in the backlog array.
* this slot may now be used by other pending connections.
*/
conn->spcp_req_len = 0;
switch (args.crr_reason) {
/* results in IBT_CM_CONSUMER as the reason for reject */
break;
/*FALLTHRU*/
/* results in IBT_CM_NO_RESC as the reason for reject */
break;
default:
/* unexpect reason code */
ASSERT(!"unexpected reject reason code");
break;
}
&proc_reply, NULL, 0);
if (status != IBT_SUCCESS) {
retval = 0;
}
cleanup:;
}
return (retval);
}
/*
* daplka_sp_match is used by daplka_hash_walk for finding SPs
*/
typedef struct daplka_sp_match_s {
static int
{
if (sp_rp->sp_conn_qual ==
D2("daplka_sp_match: found sp, conn_qual %016llu\n",
return (1);
}
return (0);
}
/*
* cr_handoff allows the client to handoff a connection request from
* one service point to another.
*/
/* ARGSUSED */
static int
{
D3("cr_handoff: entering\n");
mode);
if (retval != 0) {
return (EFAULT);
}
/* get sp resource */
DERR("cr_handoff: cannot find sp resource\n");
return (EINVAL);
}
/*
* find the destination service point.
*/
/*
* return if we cannot find the service point
*/
DERR("cr_handoff: new sp not found, conn qual = %llu\n",
goto cleanup;
}
/*
* the spec does not discuss the security implications of this
* function. to be safe, we currently only allow processes
* owned by the same user to handoff connection requests
* to each other.
*/
DERR("cr_handoff: permission denied\n");
goto cleanup;
}
/*
* make sure the backlog index is not bogus.
*/
DERR("cr_handoff: invalid backlog index 0x%llx %d\n",
goto cleanup;
}
/*
* make sure the backlog index indeed refers
* to a pending connection.
*/
DERR("cr_handoff: invalid conn state %d\n",
conn->spcp_state);
goto cleanup;
}
DERR("cr_handoff: sid == NULL\n");
goto cleanup;
}
if (priv_len > 0) {
goto cleanup;
}
}
/*
* this clears our slot in the backlog array.
* this slot may now be used by other pending connections.
*/
conn->spcp_req_len = 0;
/* fill fake_event and call service_req handler */
if (cm_status != IBT_CM_DEFER) {
/*
* if for some reason cm_service_req failed, we
* reject the connection.
*/
if (status != IBT_SUCCESS) {
DERR("cr_handoff: ibt_cm_proceed returned %d\n",
status);
}
retval = 0;
}
cleanup:;
}
}
}
D3("cr_handoff: exiting\n");
return (retval);
}
/*
* returns a list of hca attributes
*/
/* ARGSUSED */
static int
{
int retval;
/*
* Take the ibt_hca_attr_t and stuff them into dapl_hca_attr_t
*/
mode);
if (retval != 0) {
return (EFAULT);
}
return (0);
}
/*
* This routine is passed to hash walk in the daplka_pre_mr_cleanup_callback,
* it frees the mw embedded in the mw resource object.
*/
/* ARGSUSED */
static int
{
/*
* we set mw_hdl to NULL so it won't get freed again
*/
if (status != IBT_SUCCESS) {
}
D3("mr_cb_freemw: mw freed\n");
}
return (0);
}
/*
* This routine is called from HCA driver's umem lock undo callback
* when the memory associated with an MR is being unmapped. In this callback
* we free all the MW associated with the IA and post an unaffiliated
* async event to tell the app that there was a catastrophic event.
* This allows the HCA to deregister the MR in its callback processing.
*/
static void
{
#endif
DERR("daplka_mr_unlock_callback: resource not found, rnum %d\n",
rnum);
return;
}
/*
* MW is being alloced OR MW freeze has already begun. In
* both these cases we wait for that to complete before
* continuing.
*/
}
case DAPLKA_IA_INIT:
break;
case DAPLKA_IA_MW_FROZEN:
/* the mw on this ia have been freed */
D2("daplka_mr_unlock_callback: ia_state %d nothing to do\n",
goto cleanup;
default:
ASSERT(!"daplka_mr_unlock_callback: IA state invalid");
DERR("daplka_mr_unlock_callback: invalid ia_state %d\n",
goto cleanup;
}
/*
* Walk the mw hash table and free the mws. Acquire a writer
* lock since we don't want anyone else traversing this tree
* while we are freeing the MW.
*/
/*
* Currently commented out because Oracle skgxp is incapable
* of handling async events correctly.
*/
/*
* Enqueue an unaffiliated async error event to indicate this
* IA has encountered a problem that caused the MW to freed up
*/
/* Create a fake event, only relevant field is the hca_guid */
ia_rp);
#endif /* _THROW_ASYNC_EVENT_FROM_MRUNLOCKCB */
cleanup:;
}
/*
* registers a memory region.
* memory locking will be done by the HCA driver.
*/
/* ARGSUSED */
static int
{
int retval;
mode);
if (retval != 0) {
return (EINVAL);
}
DERR("mr_register: cannot allocate mr resource\n");
return (ENOMEM);
}
/* get pd handle */
pd_rp = (daplka_pd_resource_t *)
DERR("mr_register: cannot find pd resource\n");
goto cleanup;
}
D3("mr_register: mr_vaddr %p, mr_len %llu, mr_flags 0x%x\n",
if (status != IBT_SUCCESS) {
retval = 0;
goto cleanup;
}
/* Pass the service driver mr cleanup handler to the hca driver */
&mr_cb_data_in, sizeof (mr_cb_data_in));
if (status != IBT_SUCCESS) {
DERR("mr_register: ibt_ci_data_in error(%d) ver(%d)",
retval = 0;
goto cleanup;
}
/* insert into mr hash table */
if (retval != 0) {
DERR("mr_register: cannot insert mr resource into mr_htbl\n");
goto cleanup;
}
sizeof (dapl_mr_register_t), mode);
if (retval != 0) {
goto cleanup;
}
return (0);
cleanup:;
if (inserted) {
(void **)&free_rp);
DERR("mr_register: cannot remove mr from hash table\n");
/*
* we can only get here if another thread
* has completed the cleanup in mr_deregister
*/
return (retval);
}
}
return (retval);
}
/*
* registers a shared memory region.
* the client calls this function with the intention to share the memory
* region with other clients. it is assumed that, prior to calling this
* function, the client(s) are already sharing parts of their address
* space using a mechanism such as SYSV shared memory. the first client
* that calls this function will create and insert a daplka_shared_mr_t
* object into the global daplka_shared_mr_tree. this shared mr object
* will be identified by a unique 40-byte key and will maintain a list
* of mr resources. every time this function gets called with the same
* 40-byte key, a new mr resource (containing a new mr handle generated
* by ibt_register_mr or ibt_register_shared_mr) is created and inserted
* into this list. similarly, every time a shared mr gets deregistered
* or invalidated by a callback, the mr resource gets removed from this
* list. the shared mr object has a reference count. when it drops to
* zero, the shared mr object will be removed from the global avl tree
* and be freed.
*/
/* ARGSUSED */
static int
{
int retval;
sizeof (dapl_mr_register_shared_t), mode);
if (retval != 0) {
return (EINVAL);
}
/*
* find smrp from the global avl tree.
* the 40-byte key is used as the lookup key.
*/
smrp = (daplka_shared_mr_t *)
D2("mr_register_shared: smrp 0x%p, found cookie:\n"
"0x%016llx%016llx%016llx%016llx%016llx\n", smrp,
/*
* if the smrp exists, other threads could still be
* accessing it. we wait until they are done before
* we continue.
*/
smrp->smr_refcnt++;
D2("mr_register_shared: smrp 0x%p, "
"waiting in transitioning state, refcnt %d\n",
}
D2("mr_register_shared: smrp 0x%p, refcnt %d, ready\n",
/*
* we set smr_state to TRANSITIONING to temporarily
* prevent other threads from trying to access smrp.
*/
} else {
D2("mr_register_shared: cannot find cookie:\n"
"0x%016llx%016llx%016llx%016llx%016llx\n",
/*
* if we cannot find smrp, we need to create and
* insert one into daplka_shared_mr_tree
*/
goto cleanup;
}
}
DERR("mr_register_shared: cannot allocate mr resource\n");
goto cleanup;
}
/* get pd handle */
pd_rp = (daplka_pd_resource_t *)
DERR("mr_register_shared: cannot find pd resource\n");
goto cleanup;
}
D2("mr_register_shared: mr_vaddr 0x%p, mr_len %llu, "
"mr_flags 0x%x, mr_as 0x%p, mr_exists %d, smrp 0x%p\n",
/*
* since we are in TRANSITIONING state, we are guaranteed
* that we have exclusive access to smr_mr_list.
*/
/*
* a non-null smr_mr_list indicates that someone
* else has already inserted an mr_resource into
* smr_mr_list. we use the mr_handle from the first
* element as an arg to ibt_register_shared_mr.
*/
D2("mr_register_shared: mem_sattr vaddr 0x%p flags 0x%x\n",
if (status != IBT_SUCCESS) {
DERR("mr_register_shared: "
"ibt_register_shared_mr error %d\n", status);
retval = 0;
goto cleanup;
}
} else {
/*
* an mr does not exist yet. we need to create one
* using ibt_register_mr.
*/
if (status != IBT_SUCCESS) {
DERR("mr_register_shared: "
"ibt_register_mr error %d\n", status);
retval = 0;
goto cleanup;
}
}
/* Pass the service driver mr cleanup handler to the hca driver */
&mr_cb_data_in, sizeof (mr_cb_data_in));
if (status != IBT_SUCCESS) {
DERR("mr_register_shared: ibt_ci_data_in error(%d) ver(%d)",
retval = 0;
goto cleanup;
}
/*
* we bump reference of mr_rp and enqueue it onto smrp.
*/
/* insert into mr hash table */
if (retval != 0) {
DERR("mr_register_shared: cannot insert mr resource\n");
goto cleanup;
}
/*
* at this point, there are two references to our mr resource.
* one is kept in ia_mr_htbl. the other is kept in the list
* within this shared mr object (smrp). when we deregister this
* mr or when a callback invalidates this mr, the reference kept
* by this shared mr object will be removed.
*/
sizeof (dapl_mr_register_shared_t), mode);
if (retval != 0) {
goto cleanup;
}
/*
* set the state to READY to allow others to continue
*/
return (0);
cleanup:;
if (inserted) {
(void **)&free_rp);
DERR("mr_register_shared: "
"cannot remove mr from hash table\n");
/*
* we can only get here if another thread
* has completed the cleanup in mr_deregister
*/
return (retval);
}
}
smrp->smr_refcnt--;
if (smrp->smr_refcnt == 0) {
/*
* the refcnt is 0. if there is anything
* left on the list, it must be ours.
*/
}
} else {
DERR("mr_register_shared: resetting smr_state "
"smrp 0x%p, %d waiters remain\n", smrp,
smrp->smr_refcnt);
/*
* search and remove mr_rp from smr_mr_list
*/
smrp);
break;
}
}
}
/*
* note that smr_state == READY does not necessarily
* mean that smr_mr_list is non empty. for this case,
* we are doing cleanup because of a failure. we set
* the state to READY to allow other threads to
* continue.
*/
}
}
}
return (retval);
}
/*
* registers a memory region using the attributes of an
* existing region.
*/
/* ARGSUSED */
static int
{
int retval;
sizeof (dapl_mr_register_lmr_t), mode);
if (retval != 0) {
return (EINVAL);
}
if (orig_mr_rp == NULL) {
DERR("mr_register_lmr: cannot find mr resource\n");
return (EINVAL);
}
DERR("mr_register_lmr: cannot allocate mr resource\n");
goto cleanup;
}
/* Pass the IO addr that was returned while allocating the orig MR */
if (status != IBT_SUCCESS) {
DERR("mr_register_lmr: ibt_register_shared_mr error %d\n",
status);
retval = 0;
goto cleanup;
}
/* Pass the service driver mr cleanup handler to the hca driver */
&mr_cb_data_in, sizeof (mr_cb_data_in));
if (status != IBT_SUCCESS) {
DERR("mr_register_lmr: ibt_ci_data_in error(%d) ver(%d)",
retval = 0;
goto cleanup;
}
/* insert into mr hash table */
(void *)mr_rp);
if (retval != 0) {
DERR("mr_register: cannot insert mr resource into mr_htbl\n");
goto cleanup;
}
sizeof (dapl_mr_register_lmr_t), mode);
if (retval != 0) {
goto cleanup;
}
if (orig_mr_rp != NULL) {
}
return (0);
cleanup:;
if (inserted) {
(void **)&free_rp);
DERR("mr_register: cannot remove mr from hash table\n");
/*
* we can only get here if another thread
* has completed the cleanup in mr_deregister
*/
return (retval);
}
}
if (orig_mr_rp != NULL) {
}
}
return (retval);
}
/*
* this function is called by mr_deregister and mr_cleanup_callback to
* remove a mr resource from the shared mr object mr_rp->mr_shared_mr.
* if mr_shared_mr is already NULL, that means the region being
* deregistered or invalidated is not a shared mr region and we can
* return immediately.
*/
static void
{
/*
* we need a lock because mr_callback also checks this field.
* for the rare case that mr_deregister and mr_cleanup_callback
* gets called simultaneously, we are guaranteed that smrp won't
* be dereferenced twice because either function will find
* mr_shared_mr to be NULL.
*/
}
smrp->smr_refcnt--;
/*
* search and remove mr_rp from smr_mr_list.
* also UNREF mr_rp because it is no longer
* on the list.
*/
break;
}
}
/*
* since mr_clean_callback may not touch smr_mr_list
* at this time (due to smr_state), we can be sure
* that we can find and remove mr_rp from smr_mr_list
*/
if (smrp->smr_refcnt == 0) {
} else {
D3("shared_mr_free: smrp 0x%p, refcnt %d\n",
}
}
}
/*
* deregisters a memory region.
* if mr is shared, remove reference from global shared mr object.
* release the initial reference to the mr. if the mr's refcnt is
* zero, call mr_destroy to free mr.
*/
/* ARGSUSED */
static int
{
int retval;
mode);
if (retval != 0) {
return (EINVAL);
}
DERR("mr_deregister: cannot find mr resource\n");
return (EINVAL);
}
return (0);
}
/*
* sync local memory regions on RDMA read or write.
*/
/* ARGSUSED */
static int
{
int i, j;
int retval;
if (retval != 0) {
return (EFAULT);
}
/* number of segments bound check */
DERR("mr_sync: number of segments too large\n");
return (EINVAL);
}
/* translate MR sync direction flag */
} else {
DERR("mr_sync: unknown flags\n");
return (EINVAL);
}
/*
* all the segments are going to be sync'd by ibtl together
*/
for (i = 0; i < args.mrs_numseg; i++) {
for (j = 0; j < i; j++) {
DAPLKA_RS_UNREF(mr_rp[j]);
}
DERR("mr_sync: lookup error\n");
return (EINVAL);
}
}
if (status != IBT_SUCCESS) {
}
for (i = 0; i < args.mrs_numseg; i++) {
DAPLKA_RS_UNREF(mr_rp[i]);
}
return (0);
}
/*
* destroys a memory region.
* called when refcnt drops to zero.
*/
static int
{
D3("mr_destroy: entering, mr_rp 0x%p, rnum %d\n",
/*
* deregister mr
*/
if (status != IBT_SUCCESS) {
DERR("mr_destroy: ibt_deregister_mr returned %d\n",
status);
}
D3("mr_destroy: mr deregistered\n");
}
/*
* release reference on PD
*/
}
return (0);
}
/*
* this function is called by daplka_hash_destroy for
* freeing MR resource objects
*/
static void
{
}
/*
* comparison function used for finding a shared mr object
* from the global shared mr avl tree.
*/
static int
{
int i;
for (i = 4; i >= 0; i--) {
return (-1);
}
return (1);
}
}
return (0);
}
/*
* allocates a protection domain.
*/
/* ARGSUSED */
static int
{
int retval;
DERR("pd_alloc: cannot allocate pd resource\n");
return (ENOMEM);
}
if (status != IBT_SUCCESS) {
retval = 0;
goto cleanup;
}
/* insert into pd hash table */
if (retval != 0) {
DERR("pd_alloc: cannot insert pd resource into pd_htbl\n");
goto cleanup;
}
/* return hkey to library */
mode);
if (retval != 0) {
goto cleanup;
}
return (0);
cleanup:;
if (inserted) {
(void **)&free_rp);
DERR("pd_alloc: cannot remove pd from hash table\n");
/*
* we can only get here if another thread
* has completed the cleanup in pd_free
*/
return (retval);
}
}
return (retval);
}
/*
* destroys a protection domain.
* called when refcnt drops to zero.
*/
static int
{
D3("pd_destroy: entering, pd_rp %p, rnum %d\n",
if (status != IBT_SUCCESS) {
}
}
return (0);
}
static void
{
}
/*
* removes the pd reference from ia_pd_htbl and releases the
* initial reference to the pd. also destroys the pd if the refcnt
* is zero.
*/
/* ARGSUSED */
static int
{
int retval;
if (retval != 0) {
return (EINVAL);
}
DERR("pd_free: cannot find pd resource\n");
return (EINVAL);
}
/* UNREF calls the actual free function when refcnt is zero */
return (0);
}
/*
* allocates a memory window
*/
/* ARGSUSED */
static int
{
int retval;
if (retval != 0) {
return (EFAULT);
}
/*
* Allocate and initialize a MW resource
*/
DERR("mw_alloc: cannot allocate mw resource\n");
return (ENOMEM);
}
/* get pd handle */
pd_rp = (daplka_pd_resource_t *)
DERR("mw_alloc: cannot find pd resource\n");
goto cleanup;
}
if (status != IBT_SUCCESS) {
retval = 0;
goto cleanup;
}
case DAPLKA_IA_INIT:
ia_rp->ia_mw_alloccnt++;
retval = 0;
break;
/* another mw_alloc is already in progress increase cnt */
ia_rp->ia_mw_alloccnt++;
retval = 0;
break;
/* FALLTHRU */
case DAPLKA_IA_MW_FROZEN:
/*
* IA is being or already frozen don't allow more MWs to be
* allocated.
*/
DERR("mw_alloc: IA is freezing MWs (state=%d)\n",
break;
default:
ASSERT(!"Invalid IA state in mw_alloc");
break;
}
/* retval is 0 when ia_mw_alloccnt is incremented */
if (retval != 0) {
goto cleanup;
}
/* insert into mw hash table */
mw_hkey = 0;
(void *)mw_rp);
if (retval != 0) {
DERR("mw_alloc: cannot insert mw resource into mw_htbl\n");
ia_rp->ia_mw_alloccnt--;
if (ia_rp->ia_mw_alloccnt == 0) {
}
goto cleanup;
}
D3("mw_alloc: ibt_alloc_mw mw_hdl(%p) mw_rkey(0x%llx)\n",
/*
* We are done with mw_alloc if this was the last mw_alloc
* change state back to DAPLKA_IA_INIT and wake up waiters
* specifically the unlock callback.
*/
ia_rp->ia_mw_alloccnt--;
if (ia_rp->ia_mw_alloccnt == 0) {
}
mode);
if (retval != 0) {
goto cleanup;
}
return (0);
cleanup:;
if (inserted) {
(void **)&free_rp);
DERR("mw_alloc: cannot remove mw from hash table\n");
/*
* we can only get here if another thread
* has completed the cleanup in mw_free
*/
return (retval);
}
}
return (retval);
}
/*
* removes the mw reference from ia_mw_htbl and releases the
* initial reference to the mw. also destroys the mw if the refcnt
* is zero.
*/
/* ARGSUSED */
static int
{
int retval = 0;
if (retval != 0) {
return (EFAULT);
}
(void **)&mw_rp);
DERR("mw_free: cannot find mw resrc (0x%llx)\n",
return (EINVAL);
}
/* UNREF calls the actual free function when refcnt is zero */
return (retval);
}
/*
* destroys the memory window.
* called when refcnt drops to zero.
*/
static int
{
D3("mw_destroy: entering, mw_rp 0x%p, rnum %d\n",
/*
* free memory window
*/
if (status != IBT_SUCCESS) {
}
D3("mw_destroy: mw freed\n");
}
/*
* release reference on PD
*/
}
return (0);
}
static void
{
}
/*
* SRQ ioctls and supporting functions
*/
/* ARGSUSED */
static int
{
int retval;
D3("srq_create: enter\n");
mode);
if (retval != 0) {
return (EFAULT);
}
DERR("srq_create: cannot allocate ep_rp\n");
return (ENOMEM);
}
/* get pd handle */
pd_rp = (daplka_pd_resource_t *)
DERR("srq_create: cannot find pd resource\n");
goto cleanup;
}
/*
* these checks ensure that the requested SRQ sizes
* are within the limits supported by the chosen HCA.
*/
DERR("srq_create: invalid srqs_sz %d\n",
goto cleanup;
}
DERR("srq_create: invalid srqs_sgl %d\n",
goto cleanup;
}
D3("srq_create: srq_sgl %d, srq_sz %d\n",
/* create srq */
if (status != IBT_SUCCESS) {
retval = 0;
goto cleanup;
}
/* Get HCA-specific data_out info */
if (status != IBT_SUCCESS) {
retval = 0;
goto cleanup;
}
/* preparing to copyout map_data back to the library */
/* insert into srq hash table */
if (retval != 0) {
DERR("srq_create: cannot insert srq resource into srq_htbl\n");
goto cleanup;
}
/* return hkey to library */
mode);
if (retval != 0) {
goto cleanup;
}
D3(" sz(%d) sgl(%d)\n",
D3("srq_create: exit\n");
return (0);
if (inserted) {
(void **)&free_rp);
/*
* this case is impossible because ep_free will
* wait until our state transition is complete.
*/
DERR("srq_create: cannot remove srq from hash table\n");
return (retval);
}
}
return (retval);
}
/*
* Resize an existing SRQ
*/
/* ARGSUSED */
static int
{
int retval = 0;
mode);
if (retval != 0) {
return (EFAULT);
}
/* get srq resource */
srq_rp = (daplka_srq_resource_t *)
DERR("srq_resize: cannot find srq resource\n");
return (EINVAL);
}
goto cleanup;
}
/*
* If ibt_resize_srq fails that it is primarily due to resource
* shortage. Per IB spec resize will never loose events and
* a resize error leaves the SRQ intact. Therefore even if the
* resize request fails we proceed and get the mapping data
* from the SRQ so that the library can mmap it.
*/
if (status != IBT_SUCCESS) {
/* we return the size of the old CQ if resize fails */
} else {
}
D2("srq_resize(%d): done new_sz(%u) real_sz(%u)\n",
/* Get HCA-specific data_out info */
if (status != IBT_SUCCESS) {
/* return ibt_ci_data_out status */
retval = 0;
goto cleanup;
}
mode);
if (retval != 0) {
goto cleanup;
}
cleanup:;
}
return (retval);
}
/*
* Frees an SRQ resource.
*/
/* ARGSUSED */
static int
{
int retval;
if (retval != 0) {
return (EFAULT);
}
/*
* this is only possible if we have two threads
* calling ep_free in parallel.
*/
DERR("srq_free: cannot find resource retval(%d) 0x%llx\n",
return (EINVAL);
}
/* UNREF calls the actual free function when refcnt is zero */
return (0);
}
/*
* destroys a SRQ resource.
* called when refcnt drops to zero.
*/
static int
{
D3("srq_destroy: entering, srq_rp 0x%p, rnum %d\n",
/*
* destroy the srq
*/
if (status != IBT_SUCCESS) {
DERR("srq_destroy: ibt_free_srq returned %d\n",
status);
}
}
/*
* release all references
*/
}
return (0);
}
static void
{
}
/*
* This function tells the CM to start listening on a service id.
* It must be called by the passive side client before the client
* can receive connection requests from remote endpoints. If the
* client specifies a non-zero service id (connection qualifier in
* dapl terms), this function will attempt to bind to this service
* id and return an error if the id is already in use. If the client
* specifies zero as the service id, this function will try to find
* the next available service id and return it back to the client.
* To support the cr_handoff function, this function will, in addition
* to creating and inserting an SP resource into the per-IA SP hash
* table, insert the SP resource into a global SP table. This table
* maintains all active service points created by all dapl clients.
* CR handoff locates the target SP by iterating through this global
* table.
*/
/* ARGSUSED */
static int
{
int backlog_size;
int retval = 0;
sizeof (dapl_service_register_t), mode);
if (retval != 0) {
return (EINVAL);
}
DERR("service_register: cannot allocate sp resource\n");
return (ENOMEM);
}
/* check if evd exists */
evd_rp = (daplka_evd_resource_t *)
DERR("service_register: evd resource not found\n");
goto cleanup;
}
/*
* initialize backlog size
*/
} else {
}
/* save the userland sp ptr */
sizeof (daplka_sp_conn_pend_t), daplka_km_flags);
/* save evd resource pointer */
/*
* save ruid here so that we can do a comparison later
* when someone does cr_handoff. the check will prevent
* a malicious app from passing a CR to us.
*/
/* fill in args for register_service */
if (status != IBT_SUCCESS) {
DERR("service_register: ibt_register_service returned %d\n",
status);
retval = 0;
goto cleanup;
}
/* save returned sid */
/* fill in args for bind_service */
D2("service_register: bind(0x%llx:0x%llx)\n",
if (status != IBT_SUCCESS) {
DERR("service_register: ibt_bind_service returned %d\n",
status);
retval = 0;
goto cleanup;
}
/*
* need to bump refcnt because the global hash table will
* have a reference to sp_rp
*/
/* insert into global sp hash table */
sp_rp->sp_global_hkey = 0;
if (retval != 0) {
DERR("service_register: cannot insert sp resource\n");
goto cleanup;
}
/* insert into per-IA sp hash table */
if (retval != 0) {
DERR("service_register: cannot insert sp resource\n");
goto cleanup;
}
/* pass index to application */
sizeof (dapl_service_register_t), mode);
if (retval != 0) {
goto cleanup;
}
return (0);
cleanup:;
/* remove from ia table */
if (sp_hkey != 0) {
DERR("service_register: cannot remove sp\n");
/*
* we can only get here if another thread
* has completed the cleanup in svc_deregister
*/
return (retval);
}
}
/* remove from global table */
if (sp_rp->sp_global_hkey != 0) {
/*
* we get here if either the hash_insert into
* ia_sp_htbl failed or the ddi_copyout failed.
* hash_insert failure implies that we are the
* only thread with a reference to sp. ddi_copyout
* failure implies that svc_deregister could have
* picked up the sp and destroyed it. but since
* we got to this point, we must have removed
* the sp ourselves in hash_remove above and
* that the sp can be destroyed by us.
*/
(void) daplka_hash_remove(&daplka_global_sp_htbl,
DERR("service_register: cannot remove sp\n");
/*
* this case is impossible. see explanation above.
*/
return (retval);
}
sp_rp->sp_global_hkey = 0;
}
/* unreference sp */
if (bumped) {
}
/* destroy sp resource */
return (retval);
}
/*
* deregisters the service and removes SP from the global table.
*/
/* ARGSUSED */
static int
{
int retval;
sizeof (dapl_service_deregister_t), mode);
if (retval != 0) {
return (EINVAL);
}
DERR("service_deregister: cannot find sp resource\n");
return (EINVAL);
}
DERR("service_deregister: cannot find sp resource\n");
}
/* remove the global reference */
}
return (0);
}
/*
* destroys a service point.
* called when the refcnt drops to zero.
*/
static int
{
D3("sp_destroy: entering, sp_rp %p, rnum %d\n",
/*
* it is possible for pending connections to remain
* on an SP. We need to clean them up here.
*/
int i, cnt = 0;
void *spcp_sidp;
for (i = 0; i < sp_rp->sp_backlog_size; i++) {
cnt++;
DERR("sp_destroy: "
"spcp_sid == NULL!\n");
continue;
}
if (status != IBT_SUCCESS) {
DERR("sp_destroy: proceed failed %d\n",
status);
}
}
}
if (cnt > 0) {
DERR("sp_destroy: found %d pending "
"connections\n", cnt);
}
}
sp_rp->sp_bind_hdl);
if (status != IBT_SUCCESS) {
DERR("sp_destroy: ibt_unbind_service "
"failed: %d\n", status);
}
}
sp_rp->sp_srv_hdl);
if (status != IBT_SUCCESS) {
DERR("sp_destroy: ibt_deregister_service "
"failed: %d\n", status);
}
}
sp_rp->sp_backlog_size = 0;
}
/*
* release reference to evd
*/
}
return (0);
}
/*
* this function is called by daplka_hash_destroy for
* freeing SP resource objects
*/
static void
{
int retval;
DERR("sp_free: cannot find sp resource\n");
}
}
}
static void
{
}
/*
* Passive side CM handlers
*/
/*
* processes the REQ_RCV event
*/
/* ARGSUSED */
static ibt_cm_status_t
{
/*
* acquire a slot in the connection backlog of this service point
*/
break;
}
}
/*
* too many pending connections
*/
DERR("service_req: connection pending exceeded %d limit\n",
return (IBT_CM_NO_RESOURCE);
}
/*
* save data for cr_handoff
*/
if (trunc_len > DAPL_MAX_PRIVATE_DATA_SIZE) {
DERR("service_req: private data truncated\n");
}
} else {
conn->spcp_req_len = 0;
}
/*
* create a CR event
*/
DERR("service_req: could not alloc cr_ev\n");
goto cleanup;
}
/*
* save the requestor gid
* daplka_event_poll needs this if this is a third party REQ_RCV
*/
/*
* set event type
*/
if (pr_len == 0) {
} else {
DERR("service_req: could not alloc priv\n");
goto cleanup;
}
}
/*
* tell the active side to expect the processing time to be
* at most equal to daplka_cm_delay
*/
daplka_cm_delay, NULL, 0);
if (status != IBT_SUCCESS) {
goto cleanup;
}
/*
* enqueue cr_ev onto the cr_events list of the EVD
* corresponding to the SP
*/
D2("service_req: enqueue event(%p) evdp(%p) priv_data(%p) "
return (IBT_CM_DEFER);
cleanup:;
/*
* free the cr event
*/
}
}
/*
* release our slot in the backlog array
*/
conn->spcp_req_len = 0;
}
return (cm_status);
}
/*
* processes the CONN_CLOSED event
*/
/* ARGSUSED */
static ibt_cm_status_t
{
ep_rp = (daplka_ep_resource_t *)
DERR("service_conn_closed: ep_rp == NULL\n");
return (IBT_CM_ACCEPT);
}
/*
* verify that the ep_state is either CONNECTED or
* DISCONNECTING. if it is not in either states return
* without generating an event.
*/
if (old_state != DAPLKA_EP_STATE_CONNECTED &&
/*
* we can get here if the connection is being aborted
*/
D2("service_conn_closed: conn aborted, state = %d, "
return (IBT_CM_ACCEPT);
}
/*
* create a DAPL_IB_CME_DISCONNECTED event
*/
DERR("service_conn_closed: cannot alloc disc_ev\n");
return (IBT_CM_ACCEPT);
}
D2("service_conn_closed: enqueue event(%p) evdp(%p) psep(0x%llx)\n",
/*
* transition ep_state to DISCONNECTED
*/
/*
* enqueue event onto the conn_evd owned by ep_rp
*/
return (IBT_CM_ACCEPT);
}
/*
* processes the CONN_EST event
*/
/* ARGSUSED */
static ibt_cm_status_t
{
ep_rp = (daplka_ep_resource_t *)
DERR("service_conn_est: ep_rp == NULL\n");
return (IBT_CM_ACCEPT);
}
/*
* verify that ep_state is ACCEPTING. if it is not in this
* state, return without generating an event.
*/
if (old_state != DAPLKA_EP_STATE_ACCEPTING) {
/*
* we can get here if the connection is being aborted
*/
DERR("service_conn_est: conn aborted, state = %d\n",
return (IBT_CM_ACCEPT);
}
/*
* create a DAPL_IB_CME_CONNECTED event
*/
DERR("service_conn_est: conn_ev alloc failed\n");
return (IBT_CM_ACCEPT);
}
/*
* copy private data into event
*/
if (pr_len > 0) {
DERR("service_conn_est: pr_data alloc failed\n");
return (IBT_CM_ACCEPT);
}
}
D2("service_conn_est: enqueue event(%p) evdp(%p)\n",
/*
* transition ep_state to CONNECTED
*/
/*
* enqueue event onto the conn_evd owned by ep_rp
*/
return (IBT_CM_ACCEPT);
}
/*
* processes the FAILURE event
*/
/* ARGSUSED */
static ibt_cm_status_t
{
/*
* check that we still have a valid cm_channel before continuing
*/
DERR("serice_event_failure: event->cm_channel == NULL\n");
return (IBT_CM_ACCEPT);
}
ep_rp = (daplka_ep_resource_t *)
DERR("service_event_failure: ep_rp == NULL\n");
return (IBT_CM_ACCEPT);
}
/*
* verify that ep_state is ACCEPTING or DISCONNECTING. if it
* is not in either state, return without generating an event.
*/
if (old_state != DAPLKA_EP_STATE_ACCEPTING &&
/*
* we can get here if the connection is being aborted
*/
DERR("service_event_failure: conn aborted, state = %d, "
"cf_code = %d, cf_msg = %d, cf_reason = %d\n", old_state,
return (IBT_CM_ACCEPT);
}
if ((status == IBT_SUCCESS) &&
DERR("service_event_failure: conn abort qpn %d state %d\n",
/* explicit transition the QP to ERROR state */
}
/*
* create an event
*/
DERR("service_event_failure: cannot alloc disc_ev\n");
return (IBT_CM_ACCEPT);
}
/*
* fill in the appropriate event type
*/
case IBT_CM_INVALID_CID:
break;
default:
break;
}
} else {
}
D2("service_event_failure: enqueue event(%p) evdp(%p) cf_code(%d) "
"cf_msg(%d) cf_reason(%d) psep(0x%llx)\n", disc_ev,
/*
* transition ep_state to DISCONNECTED
*/
/*
* enqueue event onto the conn_evd owned by ep_rp
*/
return (IBT_CM_ACCEPT);
}
/*
* this is the passive side CM handler. it gets registered
* when an SP resource is created in daplka_service_register.
*/
static ibt_cm_status_t
{
DERR("service_handler: sp_rp == NULL\n");
return (IBT_CM_NO_RESOURCE);
}
/*
* default is not to return priv data
*/
ret_args->cm_ret_len = 0;
}
case IBT_CM_EVENT_REQ_RCV:
D2("service_handler: IBT_CM_EVENT_REQ_RCV\n");
case IBT_CM_EVENT_REP_RCV:
/* passive side should not receive this event */
D2("service_handler: IBT_CM_EVENT_REP_RCV\n");
return (IBT_CM_DEFAULT);
case IBT_CM_EVENT_CONN_CLOSED:
D2("service_handler: IBT_CM_EVENT_CONN_CLOSED %d\n",
case IBT_CM_EVENT_MRA_RCV:
/* passive side does default processing MRA event */
D2("service_handler: IBT_CM_EVENT_MRA_RCV\n");
return (IBT_CM_DEFAULT);
case IBT_CM_EVENT_CONN_EST:
D2("service_handler: IBT_CM_EVENT_CONN_EST\n");
case IBT_CM_EVENT_FAILURE:
D2("service_handler: IBT_CM_EVENT_FAILURE\n");
case IBT_CM_EVENT_LAP_RCV:
/* active side had initiated a path migration operation */
D2("service_handler: IBT_CM_EVENT_LAP_RCV\n");
return (IBT_CM_ACCEPT);
default:
break;
}
return (IBT_CM_DEFAULT);
}
/*
* Active side CM handlers
*/
/*
* Processes the REP_RCV event. When the passive side accepts the
* connection, this handler is called. We make a copy of the private
* data into the ep so that it can be passed back to userland in when
* the CONN_EST event occurs.
*/
/* ARGSUSED */
static ibt_cm_status_t
{
(int)pr_len);
if (old_state != DAPLKA_EP_STATE_CONNECTING) {
/*
* we can get here if the connection is being aborted
*/
return (IBT_CM_NO_CHANNEL);
}
/*
* we do not cancel the timer here because the connection
* handshake is still in progress.
*/
/*
* save the private data. it will be passed up when
* the connection is established.
*/
if (pr_len > 0) {
}
/*
* we do not actually transition to a different state.
* the state will change when we get a conn_est, failure,
* closed, or timeout event.
*/
return (IBT_CM_ACCEPT);
}
/*
* Processes the CONN_CLOSED event. This gets called when either
* the active or passive side closes the rc channel.
*/
/* ARGSUSED */
static ibt_cm_status_t
{
if (old_state != DAPLKA_EP_STATE_CONNECTED &&
/*
* we can get here if the connection is being aborted
*/
D2("rc_conn_closed: conn aborted, state = %d, "
return (IBT_CM_ACCEPT);
}
/*
* it's ok for the timer to fire at this point. the
* taskq thread that processes the timer will just wait
* until we are done with our state transition.
*/
if (daplka_cancel_timer(ep_rp) != 0) {
/*
* daplka_cancel_timer returns -1 if the timer is
* being processed and 0 for all other cases.
* we need to reset ep_state to allow timer processing
* to continue.
*/
DERR("rc_conn_closed: timer is being processed\n");
return (IBT_CM_ACCEPT);
}
/*
* create a DAPL_IB_CME_DISCONNECTED event
*/
DERR("rc_conn_closed: could not alloc ev\n");
return (IBT_CM_ACCEPT);
}
D2("rc_conn_closed: enqueue event(%p) evdp(%p) closed(%d)\n",
/*
* transition ep_state to DISCONNECTED
*/
/*
* enqueue event onto the conn_evd owned by ep_rp
*/
return (IBT_CM_ACCEPT);
}
/*
* processes the CONN_EST event
*/
/* ARGSUSED */
static ibt_cm_status_t
{
if (old_state != DAPLKA_EP_STATE_CONNECTING) {
/*
* we can get here if the connection is being aborted
*/
return (IBT_CM_ACCEPT);
}
/*
* it's ok for the timer to fire at this point. the
* taskq thread that processes the timer will just wait
* until we are done with our state transition.
*/
if (daplka_cancel_timer(ep_rp) != 0) {
/*
* daplka_cancel_timer returns -1 if the timer is
* being processed and 0 for all other cases.
* we need to reset ep_state to allow timer processing
* to continue.
*/
DERR("rc_conn_est: timer is being processed\n");
return (IBT_CM_ACCEPT);
}
/*
* create a DAPL_IB_CME_CONNECTED event
*/
DERR("rc_conn_est: could not alloc ev\n");
return (IBT_CM_ACCEPT);
}
/*
* The private data passed back in the connection established
* event is what was recvd in the daplka_cm_rc_rep_rcv handler and
* saved in ep resource structure.
*/
if (ep_rp->ep_priv_len > 0) {
DERR("rc_conn_est: could not alloc pr_data\n");
return (IBT_CM_ACCEPT);
}
ep_rp->ep_priv_len);
}
D2("rc_conn_est: enqueue event(%p) evdp(%p) pr_data(0x%p), "
/*
* transition ep_state to CONNECTED
*/
/*
* enqueue event onto the conn_evd owned by ep_rp
*/
return (IBT_CM_ACCEPT);
}
/*
* processes the FAILURE event
*/
/* ARGSUSED */
static ibt_cm_status_t
{
if (old_state != DAPLKA_EP_STATE_CONNECTING &&
/*
* we can get here if the connection is being aborted
*/
DERR("rc_event_failure: conn aborted, state = %d, "
"cf_code = %d, cf_msg = %d, cf_reason = %d\n", old_state,
return (IBT_CM_ACCEPT);
}
/*
* it's ok for the timer to fire at this point. the
* taskq thread that processes the timer will just wait
* until we are done with our state transition.
*/
if (daplka_cancel_timer(ep_rp) != 0) {
/*
* daplka_cancel_timer returns -1 if the timer is
* being processed and 0 for all other cases.
* we need to reset ep_state to allow timer processing
* to continue.
*/
DERR("rc_event_failure: timer is being processed\n");
return (IBT_CM_ACCEPT);
}
if ((status == IBT_SUCCESS) &&
DERR("rc_event_failure: conn abort qpn %d state %d\n",
/* explicit transition the QP to ERROR state */
}
/*
* create an event
*/
DERR("rc_event_failure: cannot alloc disc_ev\n");
return (IBT_CM_ACCEPT);
}
/*
* copy private data into event
*/
if (pr_len > 0) {
DERR("rc_event_failure: cannot alloc pr data\n");
return (IBT_CM_ACCEPT);
}
}
/*
* fill in the appropriate event type
*/
case IBT_CM_CONSUMER:
break;
case IBT_CM_NO_CHAN:
case IBT_CM_NO_RESC:
break;
default:
break;
}
} else {
/* others we'll mark as local failure */
}
D2("rc_event_failure: enqueue event(%p) evdp(%p) cf_code(%d) "
/*
* transition ep_state to DISCONNECTED
*/
/*
* enqueue event onto the conn_evd owned by ep_rp
*/
return (IBT_CM_ACCEPT);
}
/*
* This is the active side CM handler. It gets registered when
* ibt_open_rc_channel is called.
*/
static ibt_cm_status_t
{
DERR("rc_handler: ep_rp == NULL\n");
return (IBT_CM_NO_CHANNEL);
}
/*
* default is not to return priv data
*/
ret_args->cm_ret_len = 0;
}
case IBT_CM_EVENT_REQ_RCV:
/* active side should not receive this event */
D2("rc_handler: IBT_CM_EVENT_REQ_RCV\n");
break;
case IBT_CM_EVENT_REP_RCV:
/* connection accepted by passive side */
D2("rc_handler: IBT_CM_EVENT_REP_RCV\n");
case IBT_CM_EVENT_CONN_CLOSED:
D2("rc_handler: IBT_CM_EVENT_CONN_CLOSED %d\n",
case IBT_CM_EVENT_MRA_RCV:
/* passive side does default processing MRA event */
D2("rc_handler: IBT_CM_EVENT_MRA_RCV\n");
return (IBT_CM_DEFAULT);
case IBT_CM_EVENT_CONN_EST:
D2("rc_handler: IBT_CM_EVENT_CONN_EST\n");
case IBT_CM_EVENT_FAILURE:
D2("rc_handler: IBT_CM_EVENT_FAILURE\n");
default:
break;
}
return (IBT_CM_DEFAULT);
}
/*
* creates an IA resource and inserts it into the global resource table.
*/
/* ARGSUSED */
static int
{
int retval;
mode);
if (retval != 0) {
return (EFAULT);
}
DERR("ia_create: invalid version %d, expected version %d\n",
return (EINVAL);
}
/*
* find the hca with the matching guid
*/
break;
}
}
DERR("ia_create: guid 0x%016llx not found\n",
return (EINVAL);
}
/*
* check whether port number is valid and whether it is up
*/
return (EINVAL);
}
DERR("ia_create: hca_hdl == NULL\n");
return (EINVAL);
}
if (status != IBT_SUCCESS) {
return (0);
}
/* register Address Record */
D3("daplka_ia_create: SA[8] %d.%d.%d.%d\n",
D3("daplka_ia_create: SA[12] %d.%d.%d.%d\n",
if (retval != IBT_SUCCESS) {
DERR("ia_create: failed to register Address Record.\n");
goto cleanup;
}
/*
* create hash tables for all object types
*/
if (retval != 0) {
DERR("ia_create: cannot create ep hash table\n");
goto cleanup;
}
if (retval != 0) {
DERR("ia_create: cannot create mr hash table\n");
goto cleanup;
}
if (retval != 0) {
DERR("ia_create: cannot create mw hash table\n");
goto cleanup;
}
if (retval != 0) {
DERR("ia_create: cannot create pd hash table\n");
goto cleanup;
}
if (retval != 0) {
DERR("ia_create: cannot create evd hash table\n");
goto cleanup;
}
if (retval != 0) {
DERR("ia_create: cannot create cno hash table\n");
goto cleanup;
}
if (retval != 0) {
DERR("ia_create: cannot create sp hash table\n");
goto cleanup;
}
if (retval != 0) {
DERR("ia_create: cannot create srq hash table\n");
goto cleanup;
}
/*
* insert ia_rp into the global resource table
*/
if (retval != 0) {
DERR("ia_create: cannot insert resource\n");
goto cleanup;
}
if (retval != 0) {
goto cleanup;
}
return (0);
cleanup:;
if (inserted) {
/*
* we can return here because another thread must
* have freed up the resource
*/
DERR("ia_create: cannot remove resource\n");
return (retval);
}
}
return (retval);
}
/*
* destroys an IA resource
*/
static int
{
int cnt;
/* deregister Address Record */
if (ia_rp->ia_ar_registered) {
}
/*
* destroy hash tables. make sure resources are
* destroyed in the correct order.
*/
/*
* free the async evd list
*/
cnt = 0;
cnt++;
}
if (cnt > 0) {
}
return (0);
}
static void
{
/*
* Note: this allocation does not zero out the buffer
* since we init all the fields.
*/
DERR("async_event_enqueue: event alloc failed"
continue;
}
/*
* Lookup the async evd corresponding to this ia and enqueue
* evp and wakeup any waiter.
*/
DERR("async_event_enqueue: ia_rp(%p) asycn_evd %llx "
continue;
}
/* decrement refcnt on async_evd */
}
}
/*
* This routine is called in kernel context
*/
/* ARGSUSED */
static void
{
DERR("daplka_rc_async_handler: ev_chan_hdl is NULL\n");
return;
}
DERR("daplka_rc_async_handler: chan_private is NULL\n");
return;
}
/* grab a reference to this ep */
/*
* The endpoint resource has the resource number corresponding to
* the IA resource. Use that to lookup the ia resource entry
*/
D2("daplka_rc_async_handler: resource (%d) not found\n",
ia_rnum);
return;
}
/*
* Create an async event and chain it to the async evd
*/
}
/*
* This routine is called in kernel context
*/
/* ARGSUSED */
static void
{
return;
DERR("daplka_cq_async_handler: get cq private(%p) failed\n",
return;
}
/* grab a reference to this evd resource */
/*
* The endpoint resource has the resource number corresponding to
* the IA resource. Use that to lookup the ia resource entry
*/
DERR("daplka_cq_async_handler: resource (%d) not found\n",
ia_rnum);
return;
}
/*
* Create an async event and chain it to the async evd
*/
/* release all the refcount that were acquired */
}
/*
* This routine is called in kernel context, handles unaffiliated async errors
*/
/* ARGSUSED */
static void
{
int i, j;
/*
* Walk the resource table looking for an ia that matches the
* hca_hdl.
*/
for (i = 0; i < daplka_resource.daplka_rc_len; i++) {
continue;
for (j = 0; j < DAPLKA_RC_BLKSZ; j++) {
continue;
}
/*
* rp is an IA resource check if it belongs
*/
/*
* walk the ep hash table. Acquire a
* reader lock. NULL dgid indicates
* local port up event.
*/
}
}
}
}
static int
{
/*
* find the hca with the matching guid
*/
if (DAPLKA_HCA_BUSY(hca)) {
return (IBT_HCA_RESOURCES_NOT_FREED);
}
break;
}
}
return (IBT_FAILURE);
}
/*
* This routine is called in kernel context
*/
static void
{
switch (code) {
/* These events are affiliated with a the RC channel */
break;
case IBT_ERROR_CQ:
/* This event is affiliated with a the CQ */
D2("daplka_async_handler(): IBT_ERROR_CQ\n");
break;
case IBT_ERROR_PORT_DOWN:
D2("daplka_async_handler(): IBT_PORT_DOWN\n");
break;
case IBT_EVENT_PORT_UP:
D2("daplka_async_handler(): IBT_PORT_UP\n");
if (daplka_apm) {
event);
}
break;
case IBT_HCA_ATTACH_EVENT:
/*
* NOTE: In some error recovery paths, it is possible to
* receive IBT_HCA_ATTACH_EVENTs on already known HCAs.
*/
D2("daplka_async_handler(): IBT_HCA_ATTACH\n");
break;
case IBT_HCA_DETACH_EVENT:
D2("daplka_async_handler(): IBT_HCA_DETACH\n");
/* Free all hca resources and close the HCA. */
(void) daplka_handle_hca_detach_event(event);
break;
case IBT_EVENT_PATH_MIGRATED:
/* This event is affiliated with APM */
D2("daplka_async_handler(): IBT_PATH_MIGRATED.\n");
break;
default:
break;
}
}
/*
* This routine is called in kernel context related to Subnet events
*/
/*ARGSUSED*/
static void
{
switch (code) {
case IBT_SM_EVENT_GID_AVAIL:
/* This event is affiliated with remote port up */
D2("daplka_sm_notice_handler(): IBT_SM_EVENT_GID_AVAIL\n");
if (daplka_apm)
return;
case IBT_SM_EVENT_GID_UNAVAIL:
/* This event is affiliated with remote port down */
D2("daplka_sm_notice_handler(): IBT_SM_EVENT_GID_UNAVAIL\n");
return;
default:
D2("daplka_sm_notice_handler(): unhandled IBT_SM_EVENT_[%d]\n",
code);
return;
}
}
/*
* This routine is called in kernel context, handles Subnet GID avail events
* which correspond to remote port up. Setting up alternate path or path
* migration (failback) has to be initiated from the active side of the
* original connect.
*/
static void
{
int i, j;
D2("daplka_sm_gid_avail: sgid=%llx:%llx dgid=%llx:%llx\n",
/*
* Walk the resource table looking for an ia that matches the sgid
*/
for (i = 0; i < daplka_resource.daplka_rc_len; i++) {
continue;
for (j = 0; j < DAPLKA_RC_BLKSZ; j++) {
continue;
}
/*
* rp is an IA resource check if its gid
* matches with the calling sgid
*/
if ((sgid->gid_prefix ==
/*
* walk the ep hash table. Acquire a
* reader lock.
*/
}
}
}
}
/*
* This routine is called in kernel context to get and set an alternate path
*/
static int
{
D2("daplka_ep_altpath : ibt_get_alt_path()\n");
}
if (status != IBT_SUCCESS) {
DERR("daplka_ep_altpath : ibt_get_alt_path failed %d\n",
status);
return (1);
}
D2("daplka_ep_altpath : ibt_set_alt_path()\n");
if ((status != IBT_SUCCESS) ||
DERR("daplka_ep_altpath : ibt_set_alt_path failed "
return (1);
}
return (0);
}
/*
* This routine is called in kernel context to failback to the original path
*/
static int
{
int i;
D2("daplka_ep_failback ep : sgid=%llx:%llx dgid=%llx:%llx\n",
/*
* daplka_ep_failback is called from daplka_hash_walk
* which holds the read lock on hash table to protect
* the endpoint resource from removal
*/
/* check for unconnected endpoints */
/* first check for ep state */
D2("daplka_ep_failback : endpoints not connected\n");
return (0);
}
/* second check for gids */
D2("daplka_ep_failback : skip unconnected endpoints\n");
return (0);
}
/*
* matching destination ep
* when dgid is NULL, the async event is a local port up.
* dgid becomes wild card, i.e. all endpoints match
*/
/* ignore loopback ep */
D2("daplka_ep_failback : skip loopback endpoints\n");
return (0);
}
} else {
/* matching remote ep */
D2("daplka_ep_failback : unrelated endpoints\n");
return (0);
}
}
/* call get and set altpath with original dgid used in ep_connect */
return (0);
}
/*
* wait for migration state to be ARMed
* e.g. a post_send msg will transit mig_state from REARM to ARM
*/
for (i = 0; i < daplka_query_aft_setaltpath; i++) {
if (status != IBT_SUCCESS) {
DERR("daplka_ep_altpath : ibt_query_rc_channel err\n");
return (0);
}
break;
}
D2("daplka_ep_altpath : query[%d] mig_st=%d\n",
i, chan_attrs.rc_mig_state);
D2("daplka_ep_altpath : P sgid=%llx:%llx dgid=%llx:%llx\n",
D2("daplka_ep_altpath : A sgid=%llx:%llx dgid=%llx:%llx\n",
/* skip failback on ARMed state not reached or env override */
if ((i >= daplka_query_aft_setaltpath) || (daplka_failback == 0)) {
DERR("daplka_ep_altpath : ARMed state not reached\n");
return (0);
}
D2("daplka_ep_failback : ibt_migrate_path() to original ep\n");
if (status != IBT_SUCCESS) {
DERR("daplka_ep_failback : migration failed "
"status %d\n", status);
return (0);
}
/* call get and altpath with NULL dgid to indicate unspecified dgid */
return (0);
}
/*
* IBTF wrappers used for resource accounting
*/
static ibt_status_t
{
if (acct_enabled) {
if (daplka_max_qp_percent != 0 &&
DERR("ibt_alloc_rc_channel: resource limit exceeded "
"(limit %d, count %d)\n", max_qps,
return (IBT_INSUFF_RESOURCE);
}
}
}
return (status);
}
static ibt_status_t
{
if (status != IBT_SUCCESS) {
return (status);
}
if (DAPLKA_RS_ACCT_CHARGED(ep_rp) > 0) {
}
return (status);
}
static ibt_status_t
{
if (acct_enabled) {
if (daplka_max_cq_percent != 0 &&
DERR("ibt_alloc_cq: resource limit exceeded "
"(limit %d, count %d)\n", max_cqs,
return (IBT_INSUFF_RESOURCE);
}
}
}
return (status);
}
static ibt_status_t
{
if (status != IBT_SUCCESS) {
return (status);
}
if (DAPLKA_RS_ACCT_CHARGED(evd_rp) > 0) {
}
return (status);
}
static ibt_status_t
{
if (acct_enabled) {
if (daplka_max_pd_percent != 0 &&
DERR("ibt_alloc_pd: resource limit exceeded "
"(limit %d, count %d)\n", max_pds,
return (IBT_INSUFF_RESOURCE);
}
}
}
return (status);
}
static ibt_status_t
{
if (status != IBT_SUCCESS) {
return (status);
}
if (DAPLKA_RS_ACCT_CHARGED(pd_rp) > 0) {
}
return (status);
}
static ibt_status_t
{
if (acct_enabled) {
if (daplka_max_mw_percent != 0 &&
DERR("ibt_alloc_mw: resource limit exceeded "
"(limit %d, count %d)\n", max_mws,
return (IBT_INSUFF_RESOURCE);
}
}
}
return (status);
}
static ibt_status_t
{
if (status != IBT_SUCCESS) {
return (status);
}
if (DAPLKA_RS_ACCT_CHARGED(mw_rp) > 0) {
}
return (status);
}
static ibt_status_t
{
if (acct_enabled) {
if (daplka_max_mr_percent != 0 &&
DERR("ibt_register_mr: resource limit exceeded "
"(limit %d, count %d)\n", max_mrs,
return (IBT_INSUFF_RESOURCE);
}
}
}
return (status);
}
static ibt_status_t
{
if (acct_enabled) {
if (daplka_max_mr_percent != 0 &&
DERR("ibt_register_shared_mr: resource limit exceeded "
"(limit %d, count %d)\n", max_mrs,
return (IBT_INSUFF_RESOURCE);
}
}
}
return (status);
}
static ibt_status_t
{
if (status != IBT_SUCCESS) {
return (status);
}
if (DAPLKA_RS_ACCT_CHARGED(mr_rp) > 0) {
}
return (status);
}
static ibt_status_t
{
if (acct_enabled) {
if (daplka_max_srq_percent != 0 &&
DERR("ibt_alloc_srq: resource limit exceeded "
"(limit %d, count %d)\n", max_srqs,
return (IBT_INSUFF_RESOURCE);
}
}
}
return (status);
}
static ibt_status_t
{
if (status != IBT_SUCCESS) {
return (status);
}
if (DAPLKA_RS_ACCT_CHARGED(srq_rp) > 0) {
}
return (status);
}
static int
{
int error;
switch (cmd) {
case DAPL_IA_CREATE:
break;
/* can potentially add other commands here */
default:
DERR("daplka_common_ioctl: cmd not supported\n");
error = DDI_FAILURE;
}
return (error);
}
static int
{
int error;
switch (cmd) {
case DAPL_EVD_CREATE:
break;
case DAPL_CQ_RESIZE:
break;
case DAPL_EVENT_POLL:
break;
case DAPL_EVENT_WAKEUP:
break;
case DAPL_EVD_MODIFY_CNO:
break;
case DAPL_EVD_FREE:
break;
default:
DERR("daplka_evd_ioctl: cmd not supported\n");
error = DDI_FAILURE;
}
return (error);
}
static int
{
int error;
switch (cmd) {
case DAPL_EP_MODIFY:
break;
case DAPL_EP_FREE:
break;
case DAPL_EP_CONNECT:
break;
case DAPL_EP_DISCONNECT:
break;
case DAPL_EP_REINIT:
break;
case DAPL_EP_CREATE:
break;
default:
DERR("daplka_ep_ioctl: cmd not supported\n");
error = DDI_FAILURE;
}
return (error);
}
static int
{
int error;
switch (cmd) {
case DAPL_MR_REGISTER:
break;
case DAPL_MR_REGISTER_LMR:
break;
case DAPL_MR_REGISTER_SHARED:
break;
case DAPL_MR_DEREGISTER:
break;
case DAPL_MR_SYNC:
break;
default:
DERR("daplka_mr_ioctl: cmd not supported\n");
error = DDI_FAILURE;
}
return (error);
}
static int
{
int error;
switch (cmd) {
case DAPL_MW_ALLOC:
break;
case DAPL_MW_FREE:
break;
default:
DERR("daplka_mw_ioctl: cmd not supported\n");
error = DDI_FAILURE;
}
return (error);
}
static int
{
int error;
switch (cmd) {
case DAPL_CNO_ALLOC:
break;
case DAPL_CNO_FREE:
break;
case DAPL_CNO_WAIT:
break;
default:
DERR("daplka_cno_ioctl: cmd not supported\n");
error = DDI_FAILURE;
}
return (error);
}
static int
{
int error;
switch (cmd) {
case DAPL_PD_ALLOC:
break;
case DAPL_PD_FREE:
break;
default:
DERR("daplka_pd_ioctl: cmd not supported\n");
error = DDI_FAILURE;
}
return (error);
}
static int
{
int error;
switch (cmd) {
case DAPL_SERVICE_REGISTER:
break;
case DAPL_SERVICE_DEREGISTER:
break;
default:
DERR("daplka_sp_ioctl: cmd not supported\n");
error = DDI_FAILURE;
}
return (error);
}
static int
{
int error;
switch (cmd) {
case DAPL_SRQ_CREATE:
break;
case DAPL_SRQ_RESIZE:
break;
case DAPL_SRQ_FREE:
break;
default:
error = DDI_FAILURE;
break;
}
return (error);
}
static int
{
int error;
switch (cmd) {
case DAPL_CR_ACCEPT:
break;
case DAPL_CR_REJECT:
break;
case DAPL_IA_QUERY:
break;
case DAPL_CR_HANDOFF:
break;
default:
DERR("daplka_misc_ioctl: cmd not supported\n");
error = DDI_FAILURE;
}
return (error);
}
/*ARGSUSED*/
static int
int *rvalp)
{
int error = 0;
return (ENXIO);
}
if (DAPLKA_RS_RESERVED(ia_rp)) {
return (error);
}
goto cleanup;
}
DERR("ioctl: ia_pid %d != pid %d\n",
goto cleanup;
}
switch (cmd & DAPL_TYPE_MASK) {
case DAPL_TYPE_EVD:
break;
case DAPL_TYPE_EP:
break;
case DAPL_TYPE_MR:
break;
case DAPL_TYPE_MW:
break;
case DAPL_TYPE_PD:
break;
case DAPL_TYPE_SP:
break;
case DAPL_TYPE_CNO:
break;
case DAPL_TYPE_MISC:
break;
case DAPL_TYPE_SRQ:
break;
default:
error = DDI_FAILURE;
}
cleanup:;
return (error);
}
/* ARGSUSED */
static int
{
/*
* Char only
*/
return (EINVAL);
}
/*
* Only zero can be opened, clones are used for resources.
*/
return (ENODEV);
}
/*
* - allocate new minor number
* - update devp argument to new device
*/
if (daplka_resource_reserve(&rnum) == 0) {
} else {
return (ENOMEM);
}
return (DDI_SUCCESS);
}
/* ARGSUSED */
static int
{
/*
* Char only
*/
return (EINVAL);
}
/*
* remove from resource table.
*/
/*
* remove the initial reference
*/
}
return (DDI_SUCCESS);
}
/*
* Resource management routines
*
* We start with no resource array. Each time we run out of slots, we
* reallocate a new larger array and copy the pointer to the new array and
* a new resource blk is allocated and added to the hash table.
*
* The resource control block contains:
* root - array of pointer of resource blks
* sz - current size of array.
* len - last valid entry in array.
*
* A search operation based on a resource number is as follows:
* index = rnum / RESOURCE_BLKSZ;
* ASSERT(index < resource_block.len);
* ASSERT(index < resource_block.sz);
* offset = rnum % RESOURCE_BLKSZ;
* ASSERT(offset >= resource_block.root[index]->base);
* ASSERT(offset < resource_block.root[index]->base + RESOURCE_BLKSZ);
* return resource_block.root[index]->blks[offset];
*
* A resource blk is freed when its used count reaches zero.
*/
/*
* initializes the global resource table
*/
static void
daplka_resource_init(void)
{
}
/*
* destroys the global resource table
*/
static void
daplka_resource_fini(void)
{
int i;
for (i = 0; i < daplka_resource.daplka_rc_len; i++) {
int j;
continue;
}
for (j = 0; j < DAPLKA_RC_BLKSZ; j++) {
DERR("resource_fini: non-null slot %d, %p\n",
j, blk->daplka_rcblk_blks[j]);
}
}
}
sizeof (daplka_resource_blk_t *);
}
}
/*
* reserves a slot in the global resource table.
* this is called by the open() syscall. it is needed because
* at open() time, we do not have sufficient information to
* create an IA resource. the library needs to subsequently
* call daplka_ia_create to insert an IA resource into this
* reserved slot.
*/
static int
{
/*
* Try to find an empty slot
*/
for (i = 0; i < daplka_resource.daplka_rc_len; i++) {
D3("resource_alloc: available blks %d\n",
/*
* found an empty slot in this blk
*/
for (j = 0; j < DAPLKA_RC_BLKSZ; j++) {
(j + (i * DAPLKA_RC_BLKSZ));
blk->daplka_rcblk_blks[j] =
return (0);
}
}
/*
* remember first empty slot
*/
empty = i;
}
}
/*
* Couldn't find anything, allocate a new blk
* Do we need to reallocate the root array
*/
if (empty < 0) {
if (daplka_resource.daplka_rc_len ==
/*
* Allocate new array and copy current stuff into it
*/
D3("resource_alloc: increasing no. of buckets to %d\n",
newsz);
if (daplka_resource.daplka_rc_root) {
(int)sizeof (*p));
/*
* Copy old data into new space and
* free old stuff
*/
oldsz);
}
}
D3("resource_alloc: daplka_rc_len %d\n",
}
/*
* Allocate a new blk
*/
/*
* Allocate slot
*/
return (0);
}
/*
* removes resource from global resource table
*/
static daplka_resource_t *
{
int i, j;
i = (int)(rnum / DAPLKA_RC_BLKSZ);
j = (int)(rnum % DAPLKA_RC_BLKSZ);
if (i >= daplka_resource.daplka_rc_len) {
return (NULL);
}
return (NULL);
}
DERR("resource_remove: blk->daplka_rcblk_blks[j] == NULL\n");
return (NULL);
}
p = blk->daplka_rcblk_blks[j];
/*
* free this blk
*/
}
if ((intptr_t)p == DAPLKA_RC_RESERVED) {
return (NULL);
} else {
return (p);
}
}
/*
* inserts resource into the slot designated by rnum
*/
static int
{
/*
* Find resource and lock it in WRITER mode
* search for available resource slot
*/
i = (int)(rnum / DAPLKA_RC_BLKSZ);
j = (int)(rnum % DAPLKA_RC_BLKSZ);
if (i >= daplka_resource.daplka_rc_len) {
return (-1);
}
error = 0;
} else {
DERR("resource_insert: %d not reserved, blk = %p\n",
}
} else {
}
return (error);
}
/*
* finds resource using minor device number
*/
static daplka_resource_t *
{
int i, j;
/*
* Find resource and lock it in READER mode
* search for available resource slot
*/
i = (int)(rnum / DAPLKA_RC_BLKSZ);
j = (int)(rnum % DAPLKA_RC_BLKSZ);
if (i >= daplka_resource.daplka_rc_len) {
return (NULL);
}
D3("resource_lookup: %d not found, blk = %p\n",
} else {
}
} else {
}
return (rp);
}
/*
* generic hash table implementation
*/
/*
* daplka_hash_create:
* initializes a hash table with the specified parameters
*
* input:
* htblp pointer to hash table
*
* nbuckets number of buckets (must be power of 2)
*
* free_func this function is called on each hash
* table element when daplka_hash_destroy
* is called
*
* lookup_func if daplka_hash_lookup is able to find
* the desired object, this function is
* applied on the object before
* daplka_hash_lookup returns
* output:
* none
*
* return value(s):
* EINVAL nbuckets is not a power of 2
* ENOMEM cannot allocate buckets
* 0 success
*/
static int
void (*free_func)(void *), void (*lookup_func)(void *))
{
int i;
DERR("hash_create: nbuckets not power of 2\n");
return (EINVAL);
}
htblp->ht_buckets =
DERR("hash_create: cannot allocate buckets\n");
return (ENOMEM);
}
for (i = 0; i < nbuckets; i++) {
}
return (0);
}
/*
* daplka_hash_insert:
* inserts an object into a hash table
*
* input:
* htblp pointer to hash table
*
* hkeyp pointer to hash key.
* *hkeyp being non-zero means that the caller
* has generated its own hkey. if *hkeyp is zero,
* this function will generate an hkey for the
* caller. it is recommended that the caller
* leave the hkey generation to this function
* because the hkey is more likely to be evenly
* distributed.
*
* objp pointer to object to be inserted into
* hash table
*
* output:
* hkeyp the generated hkey is returned via this pointer
*
* return value(s):
* EINVAL invalid parameter
* ENOMEM cannot allocate hash entry
* 0 successful
*/
static int
{
DERR("hash_insert: hkeyp == NULL\n");
return (EINVAL);
}
DERR("hash_insert: cannot alloc hash_entry\n");
return (ENOMEM);
}
if (*hkeyp == 0) {
/* generate a new key */
if (hkey == 0) {
}
} else {
/* use user generated key */
}
/* only works if ht_nbuckets is a power of 2 */
/* look for duplicate entries */
break;
}
}
DERR("hash_insert: found duplicate hash entry: "
"bucket %d, hkey 0x%016llx\n",
return (EINVAL);
}
if (*hkeyp == 0) {
}
D3("hash_insert: htblp 0x%p, hkey = 0x%016llx, bucket = %d\n",
return (0);
}
/*
* daplka_hash_remove:
* removes object identified by hkey from hash table
*
* input:
* htblp pointer to hash table
*
* hkey hkey that identifies the object to be removed
*
* output:
* objpp pointer to pointer to object.
* if remove is successful, the removed object
* will be returned via *objpp.
*
* return value(s):
* EINVAL cannot find hash entry
* 0 successful
*/
static int
{
break;
}
}
DERR("hash_remove: cannot find hash entry: "
return (EINVAL);
} else {
}
}
D3("hash_remove: removed entry, hkey 0x%016llx, bucket %d, "
"hb_count %d, hb_count %d\n",
return (0);
}
/*
* daplka_hash_walk:
* walks through the entire hash table. applying func on each of
* the inserted objects. stops walking if func returns non-zero.
*
* input:
* htblp pointer to hash table
*
* func function to be applied on each object
*
* farg second argument to func
*
* lockmode can be RW_WRITER or RW_READER. this
* allows the caller to choose what type
* of lock to acquire before walking the
* table.
*
* output:
* none
*
* return value(s):
* none
*/
static void
{
/* needed for warlock */
} else {
}
if (retval != 0) {
break;
}
}
}
}
/*
* daplka_hash_lookup:
* finds object from hkey
*
* input:
* htblp pointer to hash table
*
* hkey hkey that identifies the object to be looked up
*
* output:
* none
*
* return value(s):
* NULL if not found
* object pointer if found
*/
static void *
{
void *objp;
break;
}
}
DERR("hash_lookup: cannot find hash entry: "
return (NULL);
}
}
return (objp);
}
/*
* daplka_hash_destroy:
* destroys hash table. applies free_func on all inserted objects.
*
* input:
* htblp pointer to hash table
*
* output:
* none
*
* return value(s):
* none
*/
static void
{
if (!htblp->ht_initialized) {
DERR("hash_destroy: not initialized\n");
return;
}
/* free all elements from hash table */
/* build list of elements to be freed */
cnt = 0;
cnt++;
}
}
D3("hash_destroy: htblp 0x%p, nbuckets %d, freed %d hash entries\n",
/* free all objects, now without holding the hash table lock */
cnt = 0;
cnt++;
}
}
/* free hash buckets and destroy locks */
htblp->ht_nbuckets = 0;
}
/*
* daplka_hash_getsize:
* return the number of objects in hash table
*
* input:
* htblp pointer to hash table
*
* output:
* none
*
* return value(s):
* number of objects in hash table
*/
static uint32_t
{
return (sz);
}
/*
* this function is used as ht_lookup_func above when lookup is called.
* other types of objs may use a more elaborate lookup_func.
*/
static void
{
}
/*
* Generates a non-zero 32 bit hash key used for the timer hash table.
*/
static uint32_t
{
do {
} while (new_hkey == 0);
return (new_hkey);
}
/*
* The DAPL KA debug logging routines
*/
/*
* Add the string str to the end of the debug log, followed by a newline.
*/
static void
{
/*
* If this is the first time we've written to the log, initialize it.
*/
if (!daplka_dbginit) {
return;
}
/*
* Note the log is circular; if this string would run over the end,
* we copy the first piece to the end and then the last piece to
* the beginning of the log.
*/
if (remlen)
daplka_dbgnext = 0;
}
daplka_dbgnext += length;
if (daplka_dbgnext >= sizeof (daplka_dbgbuf))
daplka_dbgnext = 0;
}
/*
* Add a printf-style message to whichever debug logs we're currently using.
*/
static void
{
/*
* The system prepends the thread id and high resolution time
* (nanoseconds are dropped and so are the upper digits)
* to the specified string.
* The unit for timestamp is 10 microseconds.
* It wraps around every 10000 seconds.
* Ex: gethrtime() = X ns = X/1000 us = X/10000 10 micro sec.
*/
}
static void
{
}