/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2004 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
* Copyright 2012 Milan Jurik. All rights reserved.
*/
/*
* This module provides for the management of interconnect adapters
* inter-node connections (aka paths), and IPC. Adapter descriptors are
* maintained on a linked list; one list per adapter devname. Each
* adapter descriptor heads a linked list of path descriptors. There is
* also a linked list of ipc_info descriptors; one for each node. Each
* ipc_info descriptor heads a circular list of ipc tokens (the tokens are
* embedded within a path descriptor). The tokens are used in round robin
* fashion.
*
*
* The exported interface consists of the following functions:
* - rsmka_add_adapter
* - rsmka_remove_adapter
*
* [add_path and remove_path only called for current adapters]
* - rsmka_add_path
* - rsmka_remove_path [a path down request is implicit]
*
* - rsmka_path_up [called at clock ipl for Sun Cluster]
* - rsmka_path_down [called at clock ipl for Sun Cluster]
* - rsmka_disconnect_node [called at clock ipl for Sun Cluster;
* treat like path-down for all node paths;
* can be before node_alive; always before
* node_died.]
*
* [node_alive and node_died are always paired]
* - rsmka_node_alive called after the first cluster path is up
* for this node
* - rsmka_node_died
*
* [set the local node id]
* - rsmka_set_my_nodeid called to set the variable my_nodeid to the
* local node id
*
* Processing for these functions is setup as a state machine supported
* by the data structures described above.
*
* For Sun Cluster these are called from the Path-Manager/Kernel-Agent
* Interface (rsmka_pm_interface.cc).
*
* The functions rsm_path_up, rsm_path_down, and rsm_disconnect_node are
* called at clock interrupt level from the Path-Manager/Kernel-Agent
* Interface which precludes sleeping; so these functions may (optionally)
* defer processing to an independent thread running at normal ipl.
*
*
* lock definitions:
*
* (mutex) work_queue.work_mutex
* protects linked list of work tokens and used
* No other locks acquired when held.
*
* (mutex) adapter_listhead_base.listlock
* protects linked list of adapter listheads
* Always acquired before listhead->mutex
*
*
* (mutex) ipc_info_lock
* protects ipc_info list and sendq token lists
* Always acquired before listhead->mutex
*
* (mutex) listhead->mutex
* protects adapter listhead, linked list of
* adapters, and linked list of paths.
*
* (mutex) path->mutex
* protects the path descriptor.
* work_queue.work_mutex may be acquired when holding
* this lock.
*
* (mutex) adapter->mutex
* protects adapter descriptor contents. used
* mainly for ref_cnt update.
*/
#include <sys/ddi_impldefs.h>
#include <rsm_in.h>
extern void _cplpl_init();
extern void _cplpl_fini();
extern pri_t maxclsyspri;
extern int rsm_hash_size;
extern rsm_node_id_t my_nodeid;
extern rsmhash_table_t rsm_import_segs;
extern void rsmseg_unload(rsmseg_t *);
extern void rsmka_path_monitor_initialize();
extern void rsmka_path_monitor_terminate();
extern adapter_t loopback_adapter;
/*
* Lint errors and warnings are displayed; informational messages
* are suppressed.
*/
/* lint -w2 */
/*
* macros SQ_TOKEN_TO_PATH and WORK_TOKEN_TO_PATH use a null pointer
* for computational purposes. Ignore the lint warning.
*/
/* lint -save -e413 */
/* FUNCTION PROTOTYPES */
adapter_t *rsmka_lookup_adapter(char *, int);
static void path_importer_disconnect(path_t *);
static void do_path_down(path_t *, int);
static void enqueue_work(work_token_t *);
static void destroy_path(path_t *);
static void do_deferred_work(caddr_t);
static int create_ipc_sendq(path_t *);
static void destroy_ipc_info(ipc_info_t *);
void rsmka_pathmanager_cleanup();
void rsmka_release_adapter(adapter_t *);
int rsmka_terminate_workthread_loop = 0;
/* protect ipc_info descriptor manipulation */
/* for synchronization with rsmipc_send() in rsm.c */
/*
* RSMKA PATHMANAGER INITIALIZATION AND CLEANUP ROUTINES
*
*/
/*
* Called from the rsm module (rsm.c) _init() routine
*/
void
{
"rsmka_pathmanager_init enter\n"));
/* initialization for locks and condition variables */
"rsmka_pathmanager_init done\n"));
}
void
{
"rsmka_pathmanager_cleanup enter\n"));
/*
* In processing the remove path callbacks from the path monitor
* object, all deferred work will have been completed. So
* awaken the deferred work thread to give it a chance to exit
* the loop.
*/
/*
* Wait for the deferred work thread to exit before
* destroying the locks and cleaning up other data
* structures.
*/
if (rsm_thread_id)
/*
* Destroy locks & condition variables
*/
while (ipc_info_head)
"rsmka_pathmanager_cleanup done\n"));
}
void
{
"rsm: node %d \n", my_nodeid));
}
/*
* DEFERRED WORK THREAD AND WORK QUEUE SUPPORT ROUTINES
*
*/
/*
* This function is the executable code of the thread which handles
* deferred work. Work is deferred when a function is called at
* clock ipl and processing may require blocking.
*
*
* The thread is created by a call to taskq_create in rsmka_pathmanager_init.
* After creation, a call to taskq_dispatch causes this function to
* execute. It loops forever - blocked until work is enqueued from
* rsmka_do_path_active, do_path_down, or rsmka_disconnect_node.
* rsmka_pathmanager_cleanup (called from _fini) will
* set rsmka_terminate_workthread_loop and the task processing will
* terminate.
*/
static void
{
int work_opcode;
int error;
"rsm_deferred_work");
for (;;) {
goto exit;
}
/* When there is no work to do, block here */
/* Since no work to do, Safe to CPR */
goto exit;
}
}
/*
* Remove a work token and begin work
*/
switch (work_opcode) {
case RSMKA_IPC_UP:
"do_deferred_work:up, path = %lx\n", path));
/*
* path state has changed, if sendq was created,
* destroy it and return. Don't need to worry
* about sendq ref_cnt since no one starts
* using the sendq till path state becomes
* active
*/
if (error == RSM_SUCCESS) {
NULL;
if (sendq_handle != NULL) {
}
}
/* free up work token */
work_token->opcode = 0;
/*
* decrement reference count for the path
* descriptor and signal for synchronization
* with rsmka_remove_path. PATH_HOLD_NOLOCK was
* done by rsmka_path_up.
*/
break;
}
if (error == RSM_SUCCESS) {
"do_deferred_work:success on up\n"));
/* clear flag since sendq_create succeeded */
/*
* now that path is active we send the
* RSMIPC_MSG_SQREADY to the remote endpoint
*/
path->procmsg_cnt = 0;
/* Calculate local incarnation number */
gethrestime(&tv);
/*
* if send fails here its due to some
* non-transient error because QUEUE_FULL is
* not possible here since we are the first
* message on this sendq. The error will cause
* the path to go down anyways, so ignore
* the return value.
*/
(void) rsmipc_send_controlmsg(path,
/* wait for SQREADY_ACK message */
} else {
/*
* sendq create failed possibly because
* the remote end is not yet ready eg.
* handler not registered, set a flag
* so that when there is an indication
* that the remote end is ready
* rsmka_do_path_active will be retried.
*/
}
/* free up work token */
work_token->opcode = 0;
/*
* decrement reference count for the path
* descriptor and signal for synchronization with
* rsmka_remove_path. PATH_HOLD_NOLOCK was done
* by rsmka_path_up.
*/
break;
case RSMKA_IPC_DOWN:
"do_deferred_work:down, path = %lx\n", path));
/*
* Unlike the processing of path_down in the case
* where the RSMKA_NO_SLEEP flag is not set, here,
* the state of the path is changed directly to
* RSMKA_PATH_DOWN. This is because in this case
* where the RSMKA_NO_SLEEP flag is set, any other
* calls referring this path will just queue up
* and will be processed only after the path
* down processing has completed.
*/
/*
* clear the WAIT_FOR_SQACK flag since path is down.
*/
/*
* this wakes up any thread waiting to receive credits
* in rsmipc_send to tell it that the path is down
* thus releasing the sendq.
*/
/* drain the messages from the receive msgbuf */
/*
* The path_importer_disconnect function has to
* be called after releasing the mutex on the path
* in order to avoid any recursive mutex enter panics
*/
"do_deferred_work: success on down\n"));
/*
* decrement reference count for the path
* descriptor and signal for synchronization with
* rsmka_remove_path. PATH_HOLD_NOLOCK was done
* by rsmka_path_down.
*/
#ifdef DEBUG
/*
* Some IPC messages left in the recv_buf,
* they'll be dropped
*/
if (path->msgbuf_cnt != 0)
"path=%lx msgbuf_cnt != 0\n",
#endif
/*
* Don't want to destroy a send queue when a token
* has been acquired; so wait 'til the token is
* no longer referenced (with a cv_wait).
*/
/* destroy the send queue and release the handle */
if (sendq_handle != NULL) {
}
work_token->opcode = 0;
break;
default:
"do_deferred_work: bad work token opcode\n"));
break;
}
}
exit:
/*
* CALLB_CPR_EXIT does a mutex_exit for
* the work_queue.work_mutex
*/
}
/*
* Work is inserted at the tail of the list and processed from the
* head of the list.
*/
static void
{
} else {
}
/* wake up deferred work thread */
}
/*
* If the work_token is found on the work queue, the work is cancelled
* by removing the token from the work queue.
*
* Return true if a work_token was found and cancelled, otherwise return false
*
* enqueue_work increments the path refcnt to make sure that the path doesn't
* go away, callers of cancel_work need to decrement the refcnt of the path to
* which this work_token belongs if a work_token is found in the work_queue
* and cancelled ie. when the return value is B_TRUE.
*/
static boolean_t
{
while (current_token != NULL) {
if (current_token == work_token) {
else
current_token->opcode = 0;
/* found and cancelled work */
"cancelled_work = 0x%p\n", work_token));
break;
}
}
return (cancelled);
}
/*
* EXTERNAL INTERFACES
*
* For Galileo Clustering, these routine are called from
*
*/
/*
*
* If the adapter is supported by rsmpi then initialize an adapter descriptor
* and link it to the list of adapters. The adapter attributes are obtained
* from rsmpi and stored in the descriptor. Finally, a service handler
* for incoming ipc on this adapter is registered with rsmpi.
* A pointer for the adapter descriptor is returned as a cookie to the
* caller. The cookie may be use with subsequent calls to save the time of
* adapter descriptor lookup.
*
* The adapter descriptor maintains a reference count which is intialized
* to 1 and incremented on lookups; when a cookie is used in place of
* a lookup, an explicit ADAPTER_HOLD is required.
*/
void *
{
int adapter_is_supported;
int result;
"rsmka_add_adapter: name = %s instance = %d hwaddr = %llx \n",
/* verify name length */
"rsmka_add_adapter done: name too long\n"));
return (NULL);
}
/* Check if rsmpi supports this adapter type */
if (adapter_is_supported != RSM_SUCCESS) {
"rsmka_add_adapter done: adapter not supported\n"));
return (NULL);
}
/* Get adapter attributes */
if (result != RSM_SUCCESS) {
"rsm: get_controller_attr(%d) Failed %x\n",
return (NULL);
}
"rsmka_add_adapter: register service offset = %d\n", hwaddr));
/*
* create a srv_handler_arg_t object, initialize it and register
* it along with rsm_srv_func. This get passed as the
* rsm_intr_hand_arg_t when the handler gets invoked.
*/
/* Have rsmpi register the ipc receive handler for this adapter */
/*
* Currently, we need to pass in a separate service identifier for
* each adapter. In order to obtain a unique service identifier
* value for an adapter, we add the hardware address of the
* adapter to the base service identifier(RSM_SERVICE which is
* defined as RSM_INTR_T_KA as per the RSMPI specification).
* NOTE: This may result in using some of the service identifier
* values defined for RSM_INTR_T_XPORT(the Sun Cluster Transport).
*/
if (result != RSM_SUCCESS) {
"rsmka_add_adapter done: rsm_register_handler"
"failed %d\n",
instance));
return (NULL);
}
/* Initialize an adapter descriptor and add it to the adapter list */
/* Copy over the attributes from the pointer returned to us */
/*
* With the addition of the topology obtainment interface, applications
* now get the local nodeid from the topology data structure.
*
* adapter->rsm_attr.attr_node_id = my_nodeid;
*/
"rsmka_add_adapter: adapter = %lx\n", adapter));
/* return adapter pointer as a cookie for later fast access */
return ((void *)adapter);
}
/*
* Unlink the adapter descriptor and call rsmka_release_adapter which
* will decrement the reference count and possibly free the desriptor.
*/
{
"rsmka_remove_adapter enter\n"));
"rsmka_remove_adapter: cookie = %lx\n", cookie));
if (flags & RSMKA_USE_COOKIE) {
} else {
/*
* rsmka_lookup_adapter increments the ref_cnt; need
* to decrement here to get true count
*/
}
/* find the adapter in the list and remove it */
break;
} else {
}
}
else
/*
* unregister the handler
*/
"rsmka_remove_adapter done\n"));
return (B_TRUE);
}
/*
* An adapter descriptor will exist from an earlier add_adapter. This
* function does:
* initialize the path descriptor
* initialize the ipc descriptor (it may already exist)
* initialize and link a sendq token for this path
*/
void *
{
/* allocate new path descriptor */
if (flags & RSMKA_USE_COOKIE) {
} else {
}
"rsmka_add_path: adapter = %lx\n", adapter));
/*
* initialize path descriptor
* don't need to increment adapter reference count because
* it can't be removed if paths exist for it.
*/
/* taskq is for sendq on adapter with remote_hwaddr on remote_node */
remote_node, (unsigned long long) remote_hwaddr);
/* allocate the message buffer array */
/*
* init cond variables for synch with rsmipc_send()
* and rsmka_remove_path
*/
/* link path descriptor on adapter path list */
/* link the path sendq token on the ipc_info token list */
/* ADAPTER_HOLD done above by rsmka_lookup_adapter */
return ((void *)path);
}
/*
* Wait for the path descriptor reference count to become zero then
* directly call path down processing. Finally, unlink the sendq token and
* free the path descriptor memory.
*
* Note: lookup_path locks the path and increments the path hold count
*/
void
{
if (flags & RSMKA_USE_COOKIE) {
} else {
/*
* remember, lookup_path increments the reference
* count - so decrement now so we can get to zero
*/
}
"rsmka_remove_path: path = %lx\n", path));
/* attempt to cancel any possibly pending work */
}
}
/*
* The path descriptor ref cnt was set to 1 initially when
* the path was added. So we need to do a decrement here to
* balance that.
*/
case RSMKA_PATH_UP:
/* clear the flag */
break;
case RSMKA_PATH_DOWN:
break;
case RSMKA_PATH_ACTIVE:
/*
* rsmka_remove_path should not call do_path_down
* with the RSMKA_NO_SLEEP flag set since for
* this code path, the deferred work would
* incorrectly do a PATH_RELE_NOLOCK.
*/
do_path_down(path, 0);
break;
default:
"rsm_remove_path: invalid path state %d\n",
return;
}
/*
* wait for all references to the path to be released. If a thread
* was waiting to receive credits do_path_down should wake it up
* since the path is going down and that will cause the sleeping
* thread to release its hold on the path.
*/
}
/*
* remove from ipc token list
* NOTE: use the remote_node value from the path structure
* since for RSMKA_USE_COOKIE being set, the remote_node
* value passed into rsmka_remove_path is 0.
*/
/* unlink from adapter path list and free path descriptor */
}
/*
*
* LOCKING:
* lookup_path locks the path and increments the path hold count. If the remote
* node is not in the alive state, do_path_up will release the lock and
* decrement the hold count. Otherwise rsmka_do_path_active will release the
* lock prior to waking up the work thread.
*
* REF_CNT:
* The path descriptor ref_cnt is incremented here; it will be decremented
* when path up processing is completed in do_path_up or by the work thread
* if the path up is deferred.
*
*/
void *path_cookie, int flags)
{
if (flags & RSMKA_USE_COOKIE) {
} else {
}
return (rval);
}
/*
*
* LOCKING:
* lookup_path locks the path and increments the path hold count. If the
* current state is ACTIVE the path lock is release prior to waking up
* the work thread in do_path_down . The work thread will decrement the hold
* count when the work for this is finished.
*
*
* REF_CNT:
* The path descriptor ref_cnt is incremented here; it will be decremented
* when path down processing is completed in do_path_down or by the work thread
* if the path down is deferred.
*
*/
{
if (flags & RSMKA_USE_COOKIE) {
} else {
}
"rsmka_path_down: path = %lx\n", path));
case RSMKA_PATH_UP:
/* clear the flag */
/*
* release path->mutex since enqueued tasks acquire it.
* Drain all the enqueued tasks.
*/
break;
case RSMKA_PATH_DOWN:
break;
case RSMKA_PATH_ACTIVE:
/*
* Need to release the path refcnt. Either done in do_path_down
* or do_deferred_work for RSMKA_NO_SLEEP being set. Has to be
* done here for RSMKA_NO_SLEEP not set.
*/
if (!(flags & RSMKA_NO_SLEEP))
break;
default:
}
return (rval);
}
/*
* Paths cannot become active until node_is_alive is marked true
* in the ipc_info descriptor for the node
*
* In the event this is called before any paths have been added,
* init_ipc_info if called here.
*
*/
{
"rsmka_node_alive: remote_node = %x\n", remote_node));
"rsmka_node_alive: new ipc_info = %lx\n", ipc_info));
} else {
}
/* rsmipc_send() may be waiting for a sendq_token */
return (B_TRUE);
}
/*
* Paths cannot become active when node_is_alive is marked false
* in the ipc_info descriptor for the node
*/
{
"rsmka_node_died: remote_node = %x\n", remote_node));
return (B_FALSE);
/* rsmipc_send() may be waiting for a sendq_token */
return (B_TRUE);
}
/*
* Treat like path_down for all paths for the specified remote node.
* Always invoked before node died.
*
* NOTE: This routine is not called from the cluster path interface; the
* rsmka_path_down is called directly for each path.
*/
void
{
"rsmka_disconnect_node enter\n"));
"rsmka_disconnect_node: node = %d\n", remote_node));
if (flags & RSMKA_NO_SLEEP) {
while (sendq_token != NULL) {
/* if an up token is enqueued, remove it */
/*
* If the path is active and down work hasn't
* already been setup then down work is needed.
* else
* if up work wasn't canceled because it was
* already being processed then down work is needed
*/
if (down_token->opcode == 0)
} else
}
if (cancelled) {
}
}
/*
* Now that all the work is enqueued, wakeup the work
* thread.
*/
} else {
/* get locked ipc_info descriptor */
while (sendq_token != NULL) {
"rsmka_disconnect_node: path_down"
"for path = %x\n",
path));
(void) rsmka_path_down(0, 0, 0, 0,
break;
}
}
"rsmka_disconnect_node done\n"));
}
/*
* Called from rsm_node_alive - if a path to a remote node is in
* state RSMKA_PATH_UP, transition the state to RSMKA_PATH_ACTIVE with a
* call to rsmka_do_path_active.
*
* REF_CNT:
* The path descriptor ref_cnt is incremented here; it will be decremented
* when path up processing is completed in rsmka_do_path_active or by the work
* thread if the path up is deferred.
*/
static void
{
"pathup_to_pathactive enter\n"));
(void) rsmka_do_path_active(path, 0);
}
break;
}
"pathup_to_pathactive done\n"));
}
/*
* Called from pathup_to_pathactive and do_path_up. The objective is to
* create an ipc send queue and transition to state RSMKA_PATH_ACTIVE.
* For the no sleep case we may need to defer the work using a token.
*
*/
{
int error;
"rsmka_do_path_active enter\n"));
if (flags & RSMKA_NO_SLEEP) {
/* if a down token is enqueued, remove it */
if (cancel_work(down_token)) {
}
/*
* If the path is not active and up work hasn't
* already been setup then up work is needed.
* else
* if down work wasn't canceled because it was
* already being processed then up work is needed
*/
} else
}
else
"rsmka_do_path_active done\n"));
return (B_TRUE);
} else {
/*
* Drop the path lock before calling create_ipc_sendq, shouldn't
* hold locks across calls to RSMPI routines.
*/
/*
* path state has changed, if sendq was created,
* destroy it and return
*/
if (error == RSM_SUCCESS) {
sqhdl);
}
}
"rsmka_do_path_active done: path=%lx not UP\n",
}
if (error == RSM_SUCCESS) {
/* clear flag since sendq_create succeeded */
/*
* now that path is active we send the
* RSMIPC_MSG_SQREADY to the remote endpoint
*/
path->procmsg_cnt = 0;
/* Calculate local incarnation number */
gethrestime(&tv);
/*
* if send fails here its due to some non-transient
* error because QUEUE_FULL is not possible here since
* we are the first message on this sendq. The error
* will cause the path to go down anyways so ignore
* the return value
*/
/* wait for SQREADY_ACK message */
"rsmka_do_path_active success\n"));
} else {
/*
* sendq create failed possibly because
* the remote end is not yet ready eg.
* handler not registered, set a flag
* so that when there is an indication
* that the remote end is ready rsmka_do_path_active
* will be retried.
*/
}
"rsmka_do_path_active done\n"));
}
}
/*
* Called from rsm_path_up.
* If the remote node state is "alive" then call rsmka_do_path_active
* otherwise just transition path state to RSMKA_PATH_UP.
*/
static boolean_t
{
/* path moved to ACTIVE by rsm_sqcreateop_callback - just return */
"do_path_up done: already ACTIVE\n"));
return (B_TRUE);
}
/* initialize the receive msgbuf counters */
path->msgbuf_head = 0;
path->msgbuf_cnt = 0;
path->procmsg_cnt = 0;
/*
* rsmka_check_node_alive acquires ipc_info_lock, in order to maintain
* correct lock ordering drop the path lock before calling it.
*/
if (node_alive == B_TRUE)
else {
}
return (rval);
}
/*
* Called from rsm_remove_path, rsm_path_down, deferred_work.
* Destroy the send queue on this path.
* Disconnect segments being imported from the remote node
* Disconnect segments being imported by the remote node
*
*/
static void
{
if (flags & RSMKA_NO_SLEEP) {
"do_path_down: after work_mutex\n"));
/* if an up token is enqueued, remove it */
if (cancel_work(up_token)) {
}
/*
* If the path is active and down work hasn't
* already been setup then down work is needed.
* else
* if up work wasn't canceled because it was
* already being processed then down work is needed
*/
if (down_token->opcode == 0)
} else
} else
} else {
/*
* Change state of the path to RSMKA_PATH_GOING_DOWN and
* release the path mutex. Any other thread referring
* this path would cv_wait till the state of the path
* remains RSMKA_PATH_GOING_DOWN.
* On completing the path down processing, change the
* state of RSMKA_PATH_DOWN indicating that the path
* is indeed down.
*/
/*
* clear the WAIT_FOR_SQACK flag since path is going down.
*/
/*
* this wakes up any thread waiting to receive credits
* in rsmipc_send to tell it that the path is going down
*/
/*
* drain the messages from the receive msgbuf, the
* tasks in the taskq_thread acquire the path->mutex
* so we drop the path mutex before taskq_wait.
*/
/*
* Disconnect segments being imported from the remote node
* The path_importer_disconnect function needs to be called
* only after releasing the mutex on the path. This is to
* avoid a recursive mutex enter when doing the
* rsmka_get_sendq_token.
*/
/*
* Get the path mutex, change the state of the path to
* RSMKA_PATH_DOWN since the path down processing has
* completed and cv_signal anyone who was waiting since
* the state was RSMKA_PATH_GOING_DOWN.
* NOTE: Do not do a mutex_exit here. We entered this
* routine with the path lock held by the caller. The
* caller eventually releases the path lock by doing a
* mutex_exit.
*/
#ifdef DEBUG
/*
* Some IPC messages left in the recv_buf,
* they'll be dropped
*/
if (path->msgbuf_cnt != 0)
#endif
/* release the rsmpi handle */
}
}
/*
* Search through the list of imported segments for segments using this path
* and unload the memory mappings for each one. The application will
* get an error return when a barrier close is invoked.
* NOTE: This function has to be called only after releasing the mutex on
* the path. This is to avoid any recursive mutex panics on the path mutex
* since the path_importer_disconnect function would end up calling
* rsmka_get_sendq_token which requires the path mutex.
*/
static void
{
int i;
rsmresource_t *p = NULL;
"path_importer_disconnect enter\n"));
for (i = 0; i < rsm_hash_size; i++) {
p = rsm_import_segs.bucket[i];
for (; p; p = p->rsmrc_next) {
if ((p->rsmrc_node == remote_node) &&
(p->rsmrc_adapter == adapter)) {
/*
* In order to make rsmseg_unload and
* path_importer_disconnect thread safe, acquire the
* segment lock here. rsmseg_unload is responsible for
* releasing the lock. rsmseg_unload releases the lock
* just before a call to rsmipc_send or in case of an
* early exit which occurs if the segment was in the
* state RSM_STATE_CONNECTING or RSM_STATE_NEW.
*/
}
}
}
}
"path_importer_disconnect done\n"));
}
/*
*
* ADAPTER UTILITY FUNCTIONS
*
*/
/*
* Allocate new adapter list head structure and add it to the beginning of
* the list of adapter list heads. There is one list for each adapter
* device name (or type).
*/
static adapter_listhead_t *
{
/* allocation and initialization */
/* link into list of listheads */
} else {
}
return (listhead);
}
/*
* Search the list of adapter list heads for a match on name.
*
*/
static adapter_listhead_t *
{
"lookup_adapter_listhead enter\n"));
break;
}
"lookup_adapter_listhead done\n"));
return (listhead);
}
/*
* Get the adapter list head corresponding to devname and search for
* an adapter descriptor with a match on the instance number. If
* successful, increment the descriptor reference count and return
* the descriptor pointer to the caller.
*
*/
{
"rsmka_lookup_adapter enter\n"));
break;
} else
}
}
"rsmka_lookup_adapter done\n"));
return (current);
}
/*
* Called from rsmka_remove_adapter or rsmseg_free.
* rsm_bind() and rsm_connect() store the adapter pointer returned
* from rsmka_getadapter. The pointer is kept in the segment descriptor.
* When the segment is freed, this routine is called by rsmseg_free to decrement
* the adapter descriptor reference count and possibly free the
* descriptor.
*/
void
{
"rsmka_release_adapter enter\n"));
if (adapter == &loopback_adapter) {
"rsmka_release_adapter done\n"));
return;
}
/* decrement reference count */
/*
* if the adapter descriptor reference count is equal to the
* initialization value of one, then the descriptor has been
* unlinked and can now be freed.
*/
}
else
"rsmka_release_adapter done\n"));
}
/*
* Singly linked list. Add to the front.
*/
static void
{
}
/*
* Return adapter descriptor
*
* lookup_adapter_listhead returns with the the list of adapter listheads
* locked. After adding the adapter descriptor, the adapter listhead list
* lock is dropped.
*/
static adapter_t *
{
}
return (adapter);
}
/*
*
* PATH UTILITY FUNCTIONS
*
*/
/*
* Search the per adapter path list for a match on remote node and
* hwaddr. The path ref_cnt must be greater than zero or the path
* is in the process of being removed.
*
* Acquire the path lock and increment the path hold count.
*/
static path_t *
{
/* start at the list head */
break;
else
}
}
return (current);
}
/*
* This interface is similar to lookup_path but takes only the local
* adapter name, instance and remote adapters hwaddr to identify the
* path. This is used in the interrupt handler routines where nodeid
* is not always available.
*/
path_t *
{
"rsm_find_path:adapter=%s:%d,rem=%llx\n",
/*
* its possible that we are here due to an interrupt but the adapter
* has been removed after we received the callback.
*/
return (NULL);
/* start at the list head */
break;
else
}
}
return (current);
}
/*
* Add the path to the head of the (per adapter) list of paths
*/
static void
{
}
/*
* Search the per-adapter list of paths for the specified path, beginning
* at the head of the list. Unlink the path and free the descriptor
* memory.
*/
static void
{
/* start at the list head */
break;
else {
}
}
else
RSMIPC_MAX_MESSAGES * sizeof (msgbuf_elem_t));
}
void
{
"rsmka_enqueue_msgbuf enter\n"));
/* increment the count and advance the tail */
path->msgbuf_cnt++;
path->msgbuf_tail = 0;
} else {
path->msgbuf_tail++;
}
sizeof (rsmipc_request_t));
"rsmka_enqueue_msgbuf done\n"));
}
/*
* get the head of the queue using rsmka_gethead_msgbuf and then call
* rsmka_dequeue_msgbuf to remove it.
*/
void
{
"rsmka_dequeue_msgbuf enter\n"));
if (path->msgbuf_cnt == 0)
return;
path->msgbuf_cnt--;
path->msgbuf_head = 0;
} else {
path->msgbuf_head++;
}
"rsmka_dequeue_msgbuf done\n"));
}
{
if (path->msgbuf_cnt == 0)
return (NULL);
return (head);
}
/*
* Called by rsm_connect which needs the hardware address of the
* remote adapter. A search is done through the paths for the local
* adapter for a match on the specified remote node.
*/
{
break;
}
}
return (remote_node);
}
/*
* Called by rsm_connect which needs the hardware address of the
* remote adapter. A search is done through the paths for the local
* adapter for a match on the specified remote node.
*/
{
break;
}
}
remote_hwaddr = -1;
return (remote_hwaddr);
}
/*
* IPC UTILITY FUNCTIONS
*/
/*
* If an entry exists, return with the ipc_info_lock held
*/
static ipc_info_t *
{
"lookup_ipc_info done: ipc_info is NULL\n"));
return (NULL);
}
"lookup_ipc_info: ipc_info not found\n"));
break;
}
}
return (ipc_info);
}
/*
* Create an ipc_info descriptor and return with ipc_info_lock held
*/
static ipc_info_t *
{
/*
* allocate an ipc_info descriptor and add it to a
* singly linked list
*/
if (ipc_info_head == NULL) {
"init_ipc_info:ipc_info_head = %lx\n", ipc_info));
} else {
}
return (ipc_info);
}
static void
{
}
if (current != ipc_info_head)
else
}
/*
* Sendq tokens are kept on a circular list. If tokens A, B, C, & D are
* on the list headed by ipc_info, then ipc_info points to A, A points to
* D, D to C, C to B, and B to A.
*/
static void
{
"link_sendq_token: new ipc_info = %lx\n", ipc_info));
}
else
"link_sendq_token: ipc_info = %lx\n", ipc_info));
"link_sendq_token: current = %lx\n", token));
} else {
"link_sendq_token: token = %lx\n", token));
}
}
static void
{
"unlink_sendq_token enter\n"));
"ipc_info for %d not found\n", remote_node));
return;
}
for (;;) {
"found token, removed it\n"));
} else {
/* list will be empty */
"removed token, list empty\n"));
}
break;
}
"unlink_sendq_token: token not found\n"));
break;
}
}
}
void
{
}
/*
* A valid ipc token can only be returned if the remote node is alive.
* Tokens are on a circular list. Starting with the current token
* search for a token with an endpoint in state RSM_PATH_ACTIVE.
* rsmipc_send which calls rsmka_get_sendq_token expects that if there are
* multiple paths available between a node-pair then consecutive calls from
* a particular invocation of rsmipc_send will return a sendq that is
* different from the one that was used in the previous iteration. When
* prev_used is NULL it indicates that this is the first interation in a
* specific rsmipc_send invocation.
*
* Updating the current token provides round robin selection and this
* is done only in the first iteration ie. when prev_used is NULL
*/
{
"rsmka_get_sendq_token enter\n"));
"rsmka_get_sendq_token done: ipc_info is NULL\n"));
return (NULL);
}
"rsmka_get_sendq_token done: token=NULL\n"));
return (NULL);
}
for (;;) {
"path %lx\n", path));
} else {
/* found a new token */
break;
}
}
if (token == first_token) {
/*
* we didn't find a new token reuse prev_used
* if the corresponding path is still up
*/
if (prev_used) {
"path %lx\n", path));
} else {
break;
}
}
"rsmka_get_sendq_token: token=NULL\n"));
return (NULL);
}
}
/* change current_token only the first time */
}
"rsmka_get_sendq_token done\n"));
return (token);
} else {
"rsmka_get_sendq_token done\n"));
return (NULL);
}
}
/*
*/
static int
{
int rval;
path));
"create_ipc_sendq: srvc_offset = %lld\n",
srvc_offset));
if (rval == RSM_SUCCESS) {
/* rsmipc_send() may be waiting for a sendq_token */
}
rval));
return (rval);
}
{
"rsmka_check_node_alive done: ipc_info NULL\n"));
return (B_FALSE);
}
"rsmka_check_node_alive done: node is alive\n"));
return (B_TRUE);
} else {
"rsmka_check_node_alive done: node is not alive\n"));
return (B_FALSE);
}
}
/*
* TOPOLOGY IOCTL SUPPORT
*/
static uint32_t
{
int pointer_area_size;
int total_num_of_paths;
/*
* Find the total number of adapters and paths by adding up the
* individual adapter and path counts from all the listheads
*/
total_num_of_paths = 0;
}
#ifdef _MULTI_DATAMODEL
/*
* Add extra 4-bytes to make sure connections header
* is double-word aligned
*/
sizeof (caddr32_t);
else
#else /* _MULTI_DATAMODEL */
#endif /* _MULTI_DATAMODEL */
topology_size = sizeof (rsmka_topology_hdr_t) +
(total_num_of_adapters * sizeof (rsmka_connections_hdr_t)) +
(total_num_of_paths * sizeof (rsmka_remote_cntlr_t));
return (topology_size);
}
static void
{
int cntlr = 0;
#ifdef _MULTI_DATAMODEL
#else
#endif /* _MULTI_DATAMODEL */
/*
* Find the total number of adapters by adding up the
* individual adapter counts from all the listheads
*/
}
/* fill topology header and adjust bufp */
/* leave room for connection pointer area */
#ifdef _MULTI_DATAMODEL
/* make sure bufp is double-word aligned */
sizeof (caddr32_t);
else
#else /* _MULTI_DATAMODEL */
#endif /* _MULTI_DATAMODEL */
/* fill topology from the adapter and path data */
/* fill in user based connection pointer */
#ifdef _MULTI_DATAMODEL
0xffffffff);
} else {
}
#else /* _MULTI_DATAMODEL */
#endif /* _MULTI_DATAMODEL */
MAXNAMELEN, "%s%d",
bufp += sizeof (rsmka_connections_hdr_t);
MAXNAMELEN, "%s%d",
++rem_cntlr;
}
}
}
}
/*
* Called from rsm_ioctl() in rsm.c
* Make sure there is no possiblity of blocking while holding
* adapter_listhead_base.lock
*/
int
{
char *bufp;
"rsmka_topology_ioctl enter\n"));
switch (cmd) {
case RSM_IOCTL_TOPOLOGY_SIZE:
break;
case RSM_IOCTL_TOPOLOGY_DATA:
/*
* The size of the buffer which the caller has allocated
* is passed in. If the size needed for the topology data
* is not sufficient, E2BIG is returned
*/
"rsmka_topology_ioctl done: BAD_ADDR\n"));
return (RSMERR_BAD_ADDR);
}
/* calculate the max size of the topology structure */
max_toposize = sizeof (rsmka_topology_hdr_t) +
RSM_MAX_CTRL * (sizeof (caddr_t) +
sizeof (rsmka_connections_hdr_t)) +
RSM_MAX_NODE * sizeof (rsmka_remote_cntlr_t);
"rsmka_topology_ioctl done: size too large\n"));
return (EINVAL);
}
if (request_size < topology_size) {
"rsmka_topology_ioctl done: E2BIG\n"));
return (E2BIG);
}
/* get the topology data and copyout to the caller */
break;
default:
"rsmka_topology_ioctl: cmd not supported\n"));
error = DDI_FAILURE;
}
"rsmka_topology_ioctl done: %d\n", error));
return (error);
}