/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
*/
/*
* Static function declarations
*/
static void eibnx_gw_is_alive(eibnx_gw_info_t *);
uint_t);
static void eibnx_handle_hca_attach(ib_guid_t);
static void eibnx_handle_hca_detach(ib_guid_t);
/*
* NDI event handle we need
*/
extern ndi_event_hdl_t enx_ndi_event_hdl;
/*
* SM's init type reply flags
*/
(((itr) & SM_INIT_TYPE_REPLY_NO_LOAD_REPLY) == 0)
(((itr) & SM_INIT_TYPE_PRESERVE_CONTENT_REPLY) == 0)
(((itr) & SM_INIT_TYPE_PRESERVE_PRESENCE_REPLY) == 0)
/*
* Port monitor progress flags (all flag values should be non-zero)
*/
/*
* Per-port thread to solicit, monitor and discover EoIB gateways
* and create the corresponding EoIB driver instances on the host.
*/
void
{
info->ti_progress = 0;
/*
* If the port is not active yet, wait for a port up event. The
* async handler, when it sees a port-up event, is expected to
* update the port_monitor's portinfo structure's p_linkstate
* and wake us up with ENX_EVENT_LINK_UP.
*/
(ENX_EVENT_LINK_UP | ENX_EVENT_DIE)) == 0) {
}
goto port_monitor_exit;
}
}
/*
* Locate the multicast groups for sending solicit requests
* to the GW and receiving advertisements from the GW. If
* either of the mcg is not present, wait for them to be
* created by the GW.
*/
(ENX_EVENT_MCGS_AVAILABLE | ENX_EVENT_DIE)) == 0) {
}
goto port_monitor_exit;
}
}
/*
* Setup a shared CQ
*/
ENX_DPRINTF_ERR("eibnx_setup_cq() failed, terminating "
"port monitor for (hca_guid=0x%llx, port_num=0x%x)",
goto port_monitor_exit;
}
/*
* Setup UD channel
*/
ENX_DPRINTF_ERR("eibnx_setup_ud_channel() failed, terminating "
"port monitor for (hca_guid=0x%llx, port_num=0x%x)",
goto port_monitor_exit;
}
/*
* Allocate/initialize any tx/rx buffers
*/
ENX_DPRINTF_ERR("eibnx_setup_bufs() failed, terminating "
"port monitor for (hca_guid=0x%llx, port_num=0x%x)",
goto port_monitor_exit;
}
/*
* Setup completion handler
*/
ENX_DPRINTF_ERR("eibnx_setup_cq_handler() failed, terminating "
"port monitor for (hca_guid=0x%llx, port_num=0x%x)",
goto port_monitor_exit;
}
/*
* Join EoIB multicast groups
*/
ENX_DPRINTF_ERR("eibnx_join_mcgs() failed, terminating ",
"port monitor for (hca_guid=0x%llx, port_num=0x%x)",
goto port_monitor_exit;
}
/*
* Send SOLICIT pkt to the EoIB multicast group
*/
ENX_DPRINTF_ERR("eibnx_fip_solicit_mcast() failed, terminating "
"port monitor for (hca_guid=0x%llx, port_num=0x%x)",
goto port_monitor_exit;
}
deadline) == -1) {
}
}
goto port_monitor_exit;
}
&solicit_period_ticks) != ENX_E_SUCCESS) {
ENX_DPRINTF_WARN("failed to send solicit ucast to "
"gateways (hca_guid=0x%llx, port_num=0x%x)",
}
}
goto periodic_solicit;
}
}
}
}
}
}
}
CALLB_CPR_EXIT(&ci);
}
/*
* Async subnet notices handler registered with IBTF
*/
/*ARGSUSED*/
void
{
switch (sn_evcode) {
case IBT_SM_EVENT_MCG_CREATED:
}
}
break;
case IBT_SM_EVENT_MCG_DELETED:
break;
default:
break;
}
}
/*
* Async event handler registered with IBTF
*/
/*ARGSUSED*/
void
{
switch (code) {
case IBT_ERROR_CQ:
ENX_DPRINTF_ERR("ibt ERROR event 0x%x received "
break;
case IBT_ERROR_PORT_DOWN:
ENX_DPRINTF_WARN("ibt PORT_DOWN event received "
"(hca_guid=0x%llx, port_num=0x%x)",
break;
case IBT_EVENT_PORT_UP:
ENX_DPRINTF_WARN("ibt PORT_UP event received "
"(hca_guid=0x%llx, port_num=0x%x)",
break;
case IBT_PORT_CHANGE_EVENT:
ENX_DPRINTF_WARN("ibt PORT_CHANGE event received "
"(hca_guid=0x%llx, port_num=0x%x)",
break;
case IBT_CLNT_REREG_EVENT:
ENX_DPRINTF_WARN("ibt CLNT_REREG event received "
"(hca_guid=0x%llx, port_num=0x%x)",
break;
case IBT_HCA_ATTACH_EVENT:
ENX_DPRINTF_VERBOSE("ibt HCA_ATTACH event received "
break;
case IBT_HCA_DETACH_EVENT:
ENX_DPRINTF_VERBOSE("ibt HCA_DETACH event received "
break;
default:
ENX_DPRINTF_VERBOSE("ibt UNSUPPORTED event 0x%x received "
break;
}
}
{
cur_lbolt = ddi_get_lbolt64();
return (B_TRUE);
}
return (B_FALSE);
}
static void
{
/*
* We've just received a multicast advertisement from this
* gateway. Multicast or unicast, this means that the gateway
* is alive. Record this timestamp (in ticks).
*/
}
}
static void
{
/*
* We're here when we receive a unicast advertisement from a
* gateway. If this gateway was discovered earlier but was in
* a dead state, this means it has come back alive and become
* aware of us. We may need to inform any EoIB children
* waiting for notification. Note that if this gateway is
* being discovered for the first time now, we wouldn't have
* created the binding eoib node for it (we will do that when
* we return from this routine), so the "rebirth" and "gw info
* update" event postings will be NOPs.
*/
}
/*
* If we have a gateway information update event, we post that
* first, so any rebirth event processed later will have the
* correct gateway information.
*/
if (gwi_changed) {
}
if (post_rebirth_event) {
}
}
/*
* Thread to create eoib nodes and online instances
*/
void
eibnx_create_eoib_node(void)
{
}
/*
* If this is not really a work item, but a request for us to
* die, throwaway all pending work requests and just die.
*/
if (ss->nx_nodeq_thr_die) {
}
CALLB_CPR_EXIT(&ci);
return;
}
/*
* Grab the first node entry from the queue
*/
goto wait_for_node_to_create;
/*NOTREACHED*/
}
/*
* Tx and Rx completion interrupt handler. Guaranteed to be single
* threaded and nonreentrant for this CQ.
*/
void
{
ENX_DPRINTF_DEBUG("eibnx_comp_intr: "
"cq_hdl(0x%llx) != info->ti_cq_hdl(0x%llx), "
return;
}
}
/*
* Send and Receive completion handler functions for EoIB nexus
*/
/*ARGSUSED*/
{
int i;
/*
* Make sure the port monitor isn't killed if we're in the completion
* handler. If the port monitor thread is already being killed, we'll
* stop processing completions.
*/
return ((uint_t)ENX_E_SUCCESS);
}
/*
* Re-arm the notification callback before we start polling
* the completion queue. There's nothing much we can do if the
* enable_cq_notify fails - we issue a warning and move on.
*/
if (ret != IBT_SUCCESS) {
ENX_DPRINTF_WARN("ibt_enable_cq_notify(cq_hdl=0x%llx) "
}
/*
* Handle tx and rx completions
*/
&polled)) == IBT_SUCCESS) {
} else {
}
}
}
/*
* On the way out, make sure we wake up any pending death requestor
* for the port-monitor thread. Note that we need to do a cv_broadcast()
* here since there could be multiple threads sleeping on the event cv
* and we want to make sure all waiters get a chance to see if it's
* their turn.
*/
return (DDI_INTR_CLAIMED);
}
/*
* Rx processing code
*/
static void
{
/*
* We'll simply drop any packet (including broadcast advertisements
* from gws) we receive before we've done our solicitation broadcast.
*/
if (info->ti_mcast_done == 0) {
return;
}
/*
* Skip the GRH and parse the message in the packet
*/
return;
}
/*
* If it was a login ack for one of our children, we need to pass
* it on to the child
*/
return;
}
/*
* Other than that, we only handle gateway advertisements
*/
return;
}
/*
* State machine to create eoib instances. Whether this advertisement
* is from a new gateway or an old gateway that we already know about,
* if this was a unicast response to our earlier solicitation and it's
* the first time we're receiving it from this gateway, we're ready to
* login, so we create the EoIB instance for it.
*/
if (gwi->gw_flag_available == 0) {
} else if (gwi->gw_flag_ucast_advt == 0) {
} else {
}
}
} else {
if (gwi->gw_flag_available == 0) {
} else if (gwi->gw_flag_ucast_advt == 0) {
if (orig_gw_state == ENX_GW_STATE_UNAVAILABLE) {
} else {
}
} else {
}
}
}
/*ARGSUSED*/
static void
{
/*
* Currently, all we do is report
*/
switch (wcerr) {
case IBT_WC_WR_FLUSHED_ERR:
ENX_DPRINTF_VERBOSE("IBT_WC_WR_FLUSHED_ERR seen "
"(hca_guid=0x%llx, port_num=0x%x, wqe_type=0x%x)",
break;
case IBT_WC_LOCAL_CHAN_OP_ERR:
ENX_DPRINTF_ERR("IBT_WC_LOCAL_CHAN_OP_ERR seen "
"(hca_guid=0x%llx, port_num=0x%x, wqe_type=0x%x)",
break;
case IBT_WC_LOCAL_PROTECT_ERR:
ENX_DPRINTF_ERR("IBT_WC_LOCAL_PROTECT_ERR seen "
"(hca_guid=0x%llx, port_num=0x%x, wqe_type=0x%x)",
break;
}
}
static void
{
int ret;
/*
* When we get login acknowledgements, we simply invoke the
* appropriate EoIB driver callback to process it on behalf
* of the driver instance. We will let the callback do error
* checks.
*/
ENX_DPRINTF_DEBUG("no eoib child with instance 0x%x found "
"for (hca_guid=0x%llx, port_num=0x%x)", inst,
return;
}
if (ret != NDI_SUCCESS) {
ENX_DPRINTF_WARN("no login-ack cookie for (hca_guid=0x%llx, "
return;
}
}
static void
{
int ret;
ENX_DPRINTF_WARN("no eoib child bound to gw portid 0x%x "
"found for (hca_guid=0x%llx, port_num=0x%x)",
return;
}
if (ret != NDI_SUCCESS) {
ENX_DPRINTF_WARN("no gw-available cookie for (hca_guid=0x%llx, "
return;
}
}
static void
void *new_gw_info)
{
int ret;
ENX_DPRINTF_WARN("no eoib child bound to gw portid 0x%x "
"found for (hca_guid=0x%llx, port_num=0x%x)",
return;
}
if (ret != NDI_SUCCESS) {
ENX_DPRINTF_WARN("no gw-info-update cookie for "
"(hca_guid=0x%llx, port_num=0x%x, gw_portid=0x%x), "
return;
}
}
static int
{
break;
}
ENX_DPRINTF_WARN("hca hdl (0x%llx) not found in hca list",
return (ENX_E_FAILURE);
}
break;
}
}
ENX_DPRINTF_WARN("portinfo (0x%llx) not found in hca list",
return (ENX_E_FAILURE);
}
return (ENX_E_SUCCESS);
}
static void
{
/*
* Find the port monitor thread that matches the event hca and
* portnum
*/
break;
}
}
return;
/*
* See if we need to rejoin the mcgs for this port and do so if true
*/
if (ret != IBT_SUCCESS) {
return;
ENX_DPRINTF_WARN("ibt_query_hca_ports(port_num=%d) failed, "
pi->p_linkstate);
return;
}
/*
* If our port's base lid has changed, we need to replace
* the saved portinfo in our lists with the new one before
* going further.
*/
size_pi = 0;
}
}
}
/*
* If the port monitor was stuck waiting for the link to come up,
* let it know that it is up now.
*/
}
if (ENX_PORT_PRES_NOT_PRESERVED(itr)) {
(void) eibnx_rejoin_mcgs(ti);
}
}
static void
{
/*
* All we need to do is to start a port monitor for all the ports
* on the new HCA. To do this, go through our current port monitors
* and see if we already have a monitor for this HCA - if so, print
* a warning and return.
*/
ENX_DPRINTF_VERBOSE("hca (guid=0x%llx) already "
"attached", new_hca_guid);
return;
}
}
/*
* If we don't have it in our list, process the HCA and start the
* port monitors
*/
}
}
}
static void
{
/*
* We need to locate all monitor threads for this HCA and stop them
*/
} else {
/*
* Take it out from the good list
*/
if (ti_prev)
else
/*
* And put it in the to-stop list
*/
ti_stop_list = ti;
}
}
/*
* Ask all the port_monitor threads to die.
*/
}
/*
* Now, locate the HCA in our list and release all HCA related
* resources.
*/
} else {
if (hca_prev) {
} else {
}
break;
}
}
if (hca) {
(void) eibnx_cleanup_hca(hca);
}
}