ibtl_handlers.c revision 76c04273c82e93b83f826e73f096a3ece549a8f9
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
*/
/*
* What's in this file?
*
* handling and Completion Queue handling. As the implementation
* evolved, code has been added for other ibc_* interfaces (resume,
* predetach, etc.) that use the same mechanisms as used for asyncs.
*
* Async and CQ handling at interrupt level.
*
* CQ handling is normally done at interrupt level using the CQ callback
* handler to call the appropriate IBT Client (owner of the CQ). For
* clients that would prefer a fully flexible non-interrupt context to
* do their CQ handling, a CQ can be created so that its handler is
* called from a non-interrupt thread. CQ handling is done frequently
* whereas Async handling is expected to occur very infrequently.
*
* Async handling is done by marking (or'ing in of an async_code of) the
* pertinent IBTL data structure, and then notifying the async_thread(s)
* that the data structure has async work to be done. The notification
* occurs by linking the data structure through its async_link onto a
* list of like data structures and waking up an async_thread. This
* list append is not done if there is already async work pending on
* this data structure (IBTL_ASYNC_PENDING).
*
* Async Mutex and CQ Mutex
*
* The global ibtl_async_mutex is "the" mutex used to control access
* to all the data needed by ibc_async_handler. All the threads that
* use this mutex are written so that the mutex is held for very short
* periods of time, and never held while making calls to functions
* that may block.
*
* The global ibtl_cq_mutex is used similarly by ibc_cq_handler and
* the ibtl_cq_thread(s).
*
* Mutex hierarchy
*
* The ibtl_clnt_list_mutex is above the ibtl_async_mutex.
* ibtl_clnt_list_mutex protects all of the various lists.
* The ibtl_async_mutex is below this in the hierarchy.
*
* The ibtl_cq_mutex is independent of the above mutexes.
*
* Threads
*
* There are "ibtl_cq_threads" number of threads created for handling
* Completion Queues in threads. If this feature really gets used,
* then we will want to do some suitable tuning. Similarly, we may
* want to tune the number of "ibtl_async_thread_init".
*
* The function ibtl_cq_thread is the main loop for handling a CQ in a
* thread. There can be multiple threads executing this same code.
* The code sleeps when there is no work to be done (list is empty),
* otherwise it pulls the first CQ structure off the list and performs
* the CQ handler callback to the client. After that returns, a check
* is made, and if another ibc_cq_handler call was made for this CQ,
* the client is called again.
*
* The function ibtl_async_thread is the main loop for handling async
* The code sleeps when there is no work to be done (lists are empty),
* otherwise it pulls the first structure off one of the lists and
* performs the async callback(s) to the client(s). Note that HCA
* async handling is done by calling each of the clients using the HCA.
* When the async handling completes, the data structure having the async
*
* Taskq
*
* The async_taskq is used here for allowing async handler callbacks to
* occur simultaneously to multiple clients of an HCA. This taskq could
* be used for other purposes, e.g., if all the async_threads are in
* use, but this is deemed as overkill since asyncs should occur rarely.
*/
/* Globals */
static char ibtf_handlers[] = "ibtl_handlers";
/* priority for IBTL threads (async, cq, and taskq) */
/* taskq used for HCA asyncs */
#define ibtl_async_taskq system_taskq
/* data for async handling by threads */
static int ibtl_async_thread_exit = 0; /* set if/when thread(s) should exit */
/* async lists for various structures */
/* data for CQ completion handling by threads */
static kcondvar_t ibtl_cq_cv;
static int ibtl_cq_threads = 0; /* total # of cq threads */
static int ibtl_cqs_using_threads = 0; /* total # of cqs using threads */
static int ibtl_cq_thread_exit = 0; /* set if/when thread(s) should exit */
/* value used to tell IBTL threads to exit */
/* Cisco Topspin Vendor ID for Rereg hack */
#define IBT_VENDOR_CISCO 0x05ad
int ibtl_eec_not_supported = 1;
char *ibtl_last_client_name; /* may help debugging */
ibt_node_info_t *);
void
ib_lid_t, ibt_node_info_t *))
{
}
/*
* ibc_async_handler()
*
*
* This is the function called HCA drivers to post various async
* event and errors mention in the IB architecture spec. See
* ibtl_types.h for additional details of this.
*
* This function marks the pertinent IBTF object with the async_code,
* and queues the object for handling by an ibtl_async_thread. If
* the object is NOT already marked for async processing, it is added
* to the associated list for that type of object, and an
* ibtl_async_thread is signaled to finish the async work.
*/
void
{
switch (code) {
case IBT_EVENT_SQD:
case IBT_EVENT_COM_EST_QP:
case IBT_EVENT_EMPTY_QP:
"bad qp handle");
break;
}
switch (code) {
}
if (ibtl_async_qp_list_end == NULL)
else
}
break;
case IBT_ERROR_CQ:
"bad cq handle");
break;
}
if (ibtl_async_cq_list_end == NULL)
else
}
break;
"bad srq handle");
break;
}
if (ibtl_async_srq_list_end == NULL)
else
}
break;
case IBT_EVENT_COM_EST_EEC:
if (ibtl_eec_not_supported) {
"EEC events are disabled.");
break;
}
"bad eec handle");
break;
}
switch (code) {
}
if (ibtl_async_eec_list_end == NULL)
else
}
break;
/* FALLTHROUGH */
case IBT_EVENT_PORT_UP:
case IBT_PORT_CHANGE_EVENT:
case IBT_CLNT_REREG_EVENT:
case IBT_ERROR_PORT_DOWN:
if ((code & IBT_PORT_EVENTS) != 0) {
"ibc_async_handler: bad port #: %d",
break;
}
if (code == IBT_EVENT_PORT_UP) {
/*
* The port is just coming UP we can't have any
* valid older events.
*/
} else if (code == IBT_ERROR_PORT_DOWN) {
/*
* The port is going DOWN older events don't
* count.
*/
} else if (code == IBT_PORT_CHANGE_EVENT) {
/*
* For port UP and DOWN events only the latest
* event counts. If we get a UP after DOWN it
* is sufficient to send just UP and vice versa.
* In the case of port CHANGE event it is valid
* only when the port is UP already but if we
* receive it after UP but before UP is
* delivered we still need to deliver CHANGE
* after we deliver UP event.
*
* We will not get a CHANGE event when the port
* is down or DOWN event is pending.
*/
} else if (code == IBT_CLNT_REREG_EVENT) {
/*
* SM has requested a re-register of
* subscription to SM events notification.
*/
}
}
if (ibtl_async_hca_list_end == NULL)
else
}
break;
default:
"invalid code (0x%x)", code);
}
}
/* Finally, make the async call to the client. */
static void
{
void *client_private;
char *client_name;
/* Record who is being called (just a debugging aid) */
"calling CM for COM_EST");
} else {
"calling client '%s'", client_name);
}
if (async_handler != NULL)
else
"client '%s' has no async handler", client_name);
}
/*
* Inform CM or DM about HCA events.
*
* We use taskqs to allow simultaneous notification, with sleeping.
* Since taskqs only allow one argument, we define a structure
* because we need to pass in more than one argument.
*/
struct ibtl_mgr_s {
void *mgr_clnt_private;
};
/*
* Asyncs of HCA level events for CM and DM. Call CM or DM and tell them
* about the HCA for the event recorded in the ibtl_hca_devinfo_t.
*/
static void
ibtl_do_mgr_async_task(void *arg)
{
if (--hca_devp->hd_async_task_cnt == 0)
}
static void
{
if (ibt_status == IBT_SUCCESS) {
}
}
if (--hca_devp->hd_async_task_cnt == 0)
}
static void
{
struct ibtl_mgr_s *mgrp;
if (async_handler == NULL)
return;
(void) taskq_dispatch(ibtl_async_taskq,
#ifndef lint
#endif
}
static void
void *clnt_private)
{
struct ibtl_mgr_s *mgrp;
if (async_handler == NULL)
return;
TQ_SLEEP);
#ifndef lint
#endif
}
/*
* Per client-device asyncs for HCA level events. Call each client that is
* using the HCA for the event recorded in the ibtl_hca_devinfo_t.
*/
static void
ibtl_hca_client_async_task(void *arg)
{
if (--ibt_hca->ha_async_cnt == 0 &&
} else
if (--hca_devp->hd_async_task_cnt == 0)
if (--clntp->clnt_async_cnt == 0)
}
/*
* Asyncs for HCA level events.
*
* The function continues to run until there are no more async
* to all clients of this HCA. This thread dispatches them via
* the ibtl_async_taskq, then sleeps until all tasks are done.
*
* This thread records the async_code and async_event in the
* ibtl_hca_devinfo_t for all client taskq threads to reference.
*
* This is called from an async or taskq thread with ibtl_async_mutex held.
*/
static void
{
while (hca_devp->hd_async_busy)
for (;;) {
if (code & IBT_ERROR_LOCAL_CATASTROPHIC) {
} else if (code & IBT_ERROR_PORT_DOWN) {
} else if (code & IBT_EVENT_PORT_UP) {
} else if (code & IBT_PORT_CHANGE_EVENT) {
} else if (code & IBT_CLNT_REREG_EVENT) {
} else {
hca_devp->hd_async_codes = 0;
code = 0;
}
if (code == 0) {
break;
}
/* PORT_UP, PORT_CHANGE, PORT_DOWN or ASYNC_REREG */
if ((code & IBT_PORT_EVENTS) != 0) {
port_minus1++) {
/*
* Matching event in this port, let's go handle
* it.
*/
break;
}
if (port_minus1 >= nports) {
/* we checked again, but found nothing */
continue;
}
/* mark it to check for other ports after we're done */
/*
* Copy the event information into hca_devp and clear
* event information from the per port data.
*/
if (temp == IBTL_HCA_PORT_CHG) {
}
}
/*
* Make sure to inform CM, DM, and IBMA if we know of them.
* Also, make sure not to inform them a second time, which
* would occur if they have the HCA open.
*/
/* wait for all tasks to complete */
while (hca_devp->hd_async_task_cnt != 0)
/*
* Hack Alert:
* The ibmf handler would have updated the Master SM LID if it
* was SM LID change event. Now lets check if the new Master SM
* is a Embedded Cisco Topspin SM.
*/
if ((code == IBT_PORT_CHANGE_EVENT) &&
/* wait for node info task to complete */
while (hca_devp->hd_async_task_cnt != 0)
/* wait for all tasks to complete */
while (hca_devp->hd_async_task_cnt != 0)
/* Managers are handled above */
continue;
continue;
continue;
ibt_hca->ha_async_cnt++;
(void) taskq_dispatch(ibtl_async_taskq,
}
/* wait for all tasks to complete */
while (hca_devp->hd_async_task_cnt != 0)
}
hca_devp->hd_async_code = 0;
hca_devp->hd_async_busy = 0;
}
/*
* Asyncs for QP objects.
*
* The function continues to run until there are no more async
*/
static void
{
async_event.ev_fma_ena = 0;
code = 0; /* fallthrough to "kmem_free" */
else if (code & IBT_ERROR_CATASTROPHIC_QP) {
} else if (code & IBT_ERROR_INVALID_REQUEST_QP) {
} else if (code & IBT_ERROR_ACCESS_VIOLATION_QP) {
} else if (code & IBT_ERROR_PATH_MIGRATE_REQ_QP) {
} else if (code & IBT_EVENT_PATH_MIGRATED_QP)
else if (code & IBT_EVENT_SQD)
else if (code & IBT_EVENT_COM_EST_QP)
else if (code & IBT_EVENT_EMPTY_QP)
else {
"async: unexpected QP async code 0x%x", code);
ibtl_qp->qp_async_codes = 0;
code = 0;
}
if (code) {
code, &async_event);
}
sizeof (ibtl_channel_t));
return;
}
}
}
/*
* Asyncs for SRQ objects.
*
* The function continues to run until there are no more async
*/
static void
{
code = 0; /* fallthrough to "kmem_free" */
else if (code & IBT_ERROR_CATASTROPHIC_SRQ)
else if (code & IBT_EVENT_LIMIT_REACHED_SRQ)
else {
"async: unexpected SRQ async code 0x%x", code);
ibtl_srq->srq_async_codes = 0;
code = 0;
}
if (code) {
code, &async_event);
}
return;
}
}
}
/*
* Asyncs for CQ objects.
*
* The function continues to run until there are no more async
*/
static void
{
code = 0; /* fallthrough to "kmem_free" */
else if (code & IBT_ERROR_CQ)
code = IBT_ERROR_CQ;
else {
"async: unexpected CQ async code 0x%x", code);
ibtl_cq->cq_async_codes = 0;
code = 0;
}
if (code) {
code, &async_event);
}
return;
}
}
}
/*
* Asyncs for EEC objects.
*
* The function continues to run until there are no more async
*/
static void
{
async_event.ev_fma_ena = 0;
code = 0; /* fallthrough to "kmem_free" */
else if (code & IBT_ERROR_CATASTROPHIC_EEC) {
} else if (code & IBT_ERROR_PATH_MIGRATE_REQ_EEC) {
} else if (code & IBT_EVENT_PATH_MIGRATED_EEC)
else if (code & IBT_EVENT_COM_EST_EEC)
else {
"async: unexpected code 0x%x", code);
ibtl_eec->eec_async_codes = 0;
code = 0;
}
if (code) {
code, &async_event);
}
return;
}
}
}
#ifdef __lock_lint
#endif
/*
* Loop forever, calling async_handlers until all of the async lists
* are empty.
*/
static void
ibtl_async_thread(void)
{
#ifndef __lock_lint
#endif
"ibtl_async_thread");
#ifndef lint
#endif
for (;;) {
if (ibtl_async_hca_list_start) {
/* remove first entry from list */
if (ibtl_async_hca_list_start == NULL)
} else if (ibtl_async_qp_list_start) {
/* remove from list */
if (ibtl_async_qp_list_start == NULL)
} else if (ibtl_async_srq_list_start) {
/* remove from list */
if (ibtl_async_srq_list_start == NULL)
} else if (ibtl_async_eec_list_start) {
/* remove from list */
if (ibtl_async_eec_list_start == NULL)
} else if (ibtl_async_cq_list_start) {
/* remove from list */
if (ibtl_async_cq_list_start == NULL)
} else {
break;
}
}
#ifndef __lock_lint
#endif
}
void
{
/*
* If there is an active async, mark this object to be freed
* by the async_thread when it's done.
*/
} else { /* free the object now */
}
}
void
{
/* if there is an active async, mark this object to be freed */
} else { /* free the object now */
}
}
void
{
ibtl_srq);
/* if there is an active async, mark this object to be freed */
} else { /* free the object now */
}
}
void
{
ibtl_eec);
/* if there is an active async, mark this object to be freed */
} else { /* free the object now */
}
}
/*
* This function differs from above in that we assume this is called
* from non-interrupt context, and never called from the async_thread.
*/
void
{
ibt_hca);
/* if there is an active async, mark this object to be freed */
if (ibt_hca->ha_async_cnt > 0) {
} else { /* free the object now */
}
}
/*
* Completion Queue Handling.
*
* A completion queue can be handled through a simple callback
* at interrupt level, or it may be queued for an ibtl_cq_thread
* to handle. The latter is chosen during ibt_alloc_cq when the
* IBTF_CQ_HANDLER_IN_THREAD is specified.
*/
static void
{
void *arg;
if (cq_handler != NULL)
else
"no cq_handler for cq %p", ibtl_cq);
}
/*
* Before ibt_free_cq can continue, we need to ensure no more cq_handler
* callbacks can occur. When we get the mutex, we know there are no
* outstanding cq_handler callbacks. We set the cq_handler to NULL to
* prohibit future callbacks.
*/
void
{
if (ibtl_cq->cq_in_thread) {
}
}
}
/*
* Loop forever, calling cq_handlers until the cq list
* is empty.
*/
static void
ibtl_cq_thread(void)
{
#ifndef __lock_lint
#endif
"ibtl_cq_thread");
#ifndef lint
#endif
for (;;) {
if (ibtl_cq_list_start) {
if (ibtl_cq == ibtl_cq_list_end)
}
} else {
if (ibtl_cq_thread_exit == IBTL_THREAD_EXIT)
break;
}
}
#ifndef __lock_lint
#endif
}
/*
* ibc_cq_handler()
*
* Completion Queue Notification Handler.
*
*/
/*ARGSUSED*/
void
{
if (ibtl_cq->cq_in_thread) {
if (ibtl_cq_list_end == NULL)
else
}
return;
} else
}
/*
* ibt_enable_cq_notify()
* Enable Notification requests on the specified CQ.
*
* ibt_cq The CQ handle.
*
* notify_type Enable notifications for all (IBT_NEXT_COMPLETION)
* completions, or the next Solicited completion
* (IBT_NEXT_SOLICITED) only.
*
* Completion notifications are disabled by setting the completion
* handler to NULL by calling ibt_set_cq_handler().
*/
{
}
/*
* ibt_set_cq_handler()
* Register a work request completion handler with the IBTF.
*
* ibt_cq The CQ handle.
*
* completion_handler The completion handler.
*
* arg The IBTF client private argument to be passed
* back to the client when calling the CQ
* completion handler.
*
* Completion notifications are disabled by setting the completion
* handler to NULL. When setting the handler to NULL, no additional
* calls to the previous CQ handler will be initiated, but there may
* be one in progress.
*
* This function does not otherwise change the state of previous
* calls to ibt_enable_cq_notify().
*/
void
void *arg)
{
}
/*
* Inform IBT clients about New HCAs.
*
* We use taskqs to allow simultaneous notification, with sleeping.
* Since taskqs only allow one argument, we define a structure
* because we need to pass in two arguments.
*/
struct ibtl_new_hca_s {
};
static void
{
#ifdef __lock_lint
{
}
#endif
if (--hca_devp->hd_async_task_cnt == 0)
if (--clntp->clnt_async_cnt == 0)
}
/*
* ibtl_announce_new_hca:
*
* o First attach these clients in the given order
* IBMA
* IBCM
*
* o Next attach all other clients in parallel.
*
* NOTE: Use the taskq to simultaneously notify all clients of the new HCA.
* Retval from clients is ignored.
*/
void
{
struct ibtl_new_hca_s *new_hcap;
"ibtl_announce_new_hca: calling IBMF");
KM_SLEEP);
#ifndef lint
#endif
clntp->clnt_async_cnt++;
(void) taskq_dispatch(ibtl_async_taskq,
TQ_SLEEP);
}
break;
}
}
while (clntp->clnt_async_cnt > 0)
KM_SLEEP);
#ifndef lint
#endif
clntp->clnt_async_cnt++;
(void) ibtl_tell_client_about_new_hca(
new_hcap);
}
break;
}
}
KM_SLEEP);
#ifndef lint
#endif
clntp->clnt_async_cnt++;
(void) taskq_dispatch(ibtl_async_taskq,
TQ_SLEEP);
}
break;
}
}
while (clntp->clnt_async_cnt > 0)
"ibtl_announce_new_hca: Calling %s ",
KM_SLEEP);
#ifndef lint
#endif
clntp->clnt_async_cnt++;
(void) taskq_dispatch(ibtl_async_taskq,
TQ_SLEEP);
}
}
}
/* wait for all tasks to complete */
while (hca_devp->hd_async_task_cnt != 0)
/* wakeup thread that may be waiting to send an HCA async */
hca_devp->hd_async_busy = 0;
}
/*
* ibtl_detach_all_clients:
*
* Return value - 0 for Success, 1 for Failure
*
* o First detach general clients.
*
* o Next detach these clients
* IBCM
* IBDM
*
* o Finally, detach this client
* IBMA
*/
int
{
int retval;
hcaguid);
while (hca_devp->hd_async_busy)
/* First inform general clients asynchronously */
if (IBTL_GENERIC_CLIENT(clntp)) {
ibt_hca->ha_async_cnt++;
(void) taskq_dispatch(ibtl_async_taskq,
}
}
/* wait for all clients to complete */
while (hca_devp->hd_async_task_cnt != 0) {
}
/* Go thru the clients and check if any have not closed this HCA. */
retval = 0;
if (IBTL_GENERIC_CLIENT(clntp)) {
"ibtl_detach_all_clients: "
"client '%s' failed to close the HCA.",
retval = 1;
}
}
if (retval == 1)
goto bailout;
/* Next inform IBDM asynchronously */
ibt_hca->ha_async_cnt++;
break;
}
}
/*
* Next inform IBCM.
* As IBCM doesn't perform ibt_open_hca(), IBCM will not be
* accessible via hca_devp->hd_clnt_list.
* ibtl_cm_async_handler will NOT be NULL, if IBCM is registered.
*/
if (ibtl_cm_async_handler) {
/* wait for all tasks to complete */
while (hca_devp->hd_async_task_cnt != 0)
}
/* Go thru the clients and check if any have not closed this HCA. */
retval = 0;
"ibtl_detach_all_clients: "
"client '%s' failed to close the HCA.",
retval = 1;
}
}
if (retval == 1)
goto bailout;
/* Finally, inform IBMA */
ibt_hca->ha_async_cnt++;
(void) taskq_dispatch(ibtl_async_taskq,
} else
"ibtl_detach_all_clients: "
"client '%s' is unexpectedly on the client list",
}
/* wait for IBMA to complete */
while (hca_devp->hd_async_task_cnt != 0) {
}
/* Check if this HCA's client list is empty. */
"ibtl_detach_all_clients: "
"client '%s' failed to close the HCA.",
retval = 1;
} else
retval = 0;
if (retval) {
} else {
hca_devp->hd_async_busy = 0;
}
return (retval);
}
void
{
/* wait for all asyncs based on "ibtl_clnt_list" to complete */
while (clntp->clnt_async_cnt != 0) {
}
}
static void
{
if (--clntp->clnt_async_cnt == 0) {
}
}
static void
{
++clntp->clnt_async_cnt;
}
/*
* Functions and data structures to inform clients that a notification
* has occurred about Multicast Groups that might interest them.
*/
struct ibtl_sm_notice {
};
static void
ibtl_sm_notice_task(void *arg)
{
if (sm_notice_handler != NULL)
}
/*
* Inform the client that MCG notices are not working at this time.
*/
void
{
struct ibtl_sm_notice *noticep;
int i;
for (i = 0; i < ifail->smf_num_sgids; i++) {
#ifndef lint
#endif
(void) taskq_dispatch(ibtl_async_taskq,
}
}
/*
* Inform all clients of the event.
*/
void
{
struct ibtl_sm_notice *noticep;
if (clntp->clnt_sm_trap_handler) {
++clntp->clnt_async_cnt;
(void) taskq_dispatch(ibtl_async_taskq,
}
}
#ifndef lint
#endif
}
/*
* Record the handler for this client.
*/
void
{
#ifndef lint
#endif
}
/*
* ibtl_another_cq_handler_in_thread()
*
* Conditionally increase the number of cq_threads.
* The number of threads grows, based on the number of cqs using threads.
*
* The table below controls the number of threads as follows:
*
* Number of CQs Number of cq_threads
* 0 0
* 1 1
* 2-3 2
* 4-5 3
* 6-9 4
* 10-15 5
* 16-23 6
* 24-31 7
* 32+ 8
*/
#define IBTL_CQ_MAXTHREADS 8
1, 2, 4, 6, 10, 16, 24, 32
};
void
{
kthread_t *t;
int my_idx;
if ((ibtl_cq_threads == IBTL_CQ_MAXTHREADS) ||
return;
}
my_idx = ibtl_cq_threads++;
ibtl_pri - 1);
}
void
ibtl_thread_init(void)
{
}
void
ibtl_thread_init2(void)
{
int i;
static int initted = 0;
kthread_t *t;
if (initted == 1) {
return;
}
initted = 1;
KM_SLEEP);
for (i = 0; i < ibtl_async_thread_init; i++) {
}
for (i = 0; i < ibtl_cq_threads; i++) {
}
}
void
ibtl_thread_fini(void)
{
int i;
/* undo the work done by ibtl_thread_init() */
for (i = 0; i < ibtl_cq_threads; i++)
thread_join(ibtl_cq_did[i]);
if (ibtl_async_did) {
for (i = 0; i < ibtl_async_thread_init; i++)
ibtl_async_thread_init * sizeof (kt_did_t));
}
}
/* ARGSUSED */
{
return (IBT_SUCCESS);
}