ibd.c revision bd2ee4f4d736b3a98de7cb84206a8cd8d65ccdda
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* An implementation of the IPoIB standard based on PSARC 2001/289.
*/
#include <sys/mac_provider.h>
#include <sys/multidata.h>
/*
* Per-interface tunables
*
* ibd_tx_copy_thresh
* This sets the threshold at which ibd will attempt to do a bcopy of the
* outgoing data into a pre-mapped buffer. The IPoIB driver's send behavior
* is restricted by various parameters, so setting of this value must be
* made after careful considerations only. For instance, IB HCAs currently
* impose a relatively small limit (when compared to ethernet NICs) on the
* length of the SGL for transmit. On the other hand, the ip stack could
* send down mp chains that are quite long when LSO is enabled.
*
* ibd_num_swqe
* Number of "send WQE" elements that will be allocated and used by ibd.
* When tuning this parameter, the size of pre-allocated, pre-mapped copy
* buffer in each of these send wqes must be taken into account. This
* copy buffer size is determined by the value of IBD_TX_BUF_SZ (this is
* currently set to the same value of ibd_tx_copy_thresh, but may be
* changed independently if needed).
*
* ibd_num_rwqe
* Number of "receive WQE" elements that will be allocated and used by
* ibd. This parameter is limited by the maximum channel size of the HCA.
* Each buffer in the receive wqe will be of MTU size.
*
* ibd_num_lso_bufs
* Number of "larger-than-MTU" copy buffers to use for cases when the
* outgoing mblk chain is too fragmented to be used with ibt_map_mem_iov()
* and too large to be used with regular MTU-sized copy buffers. It is
* not recommended to tune this variable without understanding the
* these lso buffers is determined by the value of IBD_LSO_BUFSZ.
*
* ibd_num_ah
* Number of AH cache entries to allocate
*
* ibd_hash_size
* Hash table size for the active AH list
*
* ibd_separate_cqs
* ibd_txcomp_poll
* These boolean variables (1 or 0) may be used to tune the behavior of
* ibd in managing the send and receive completion queues and in deciding
* whether or not transmit completions should be polled or interrupt
* driven (when the completion queues are separate). If both the completion
* queues are interrupt driven, it may not be possible for the handlers to
* be invoked concurrently, depending on how the interrupts are tied on
* the PCI intr line. Note that some combination of these two parameters
* may not be meaningful (and therefore not allowed).
*
* ibd_tx_softintr
* ibd_rx_softintr
* The softintr mechanism allows ibd to avoid event queue overflows if
* the receive/completion handlers are to be expensive. These are enabled
* by default.
*
* ibd_log_sz
* This specifies the size of the ibd log buffer in bytes. The buffer is
* allocated and logging is enabled only when IBD_LOGGING is defined.
*
*/
uint_t ibd_txcomp_poll = 0;
#ifdef IBD_LOGGING
#endif
#define IBD_TX_COPY_THRESH ibd_tx_copy_thresh
#define IBD_TX_BUF_SZ ibd_tx_copy_thresh
#define IBD_NUM_SWQE ibd_num_swqe
#define IBD_NUM_RWQE ibd_num_rwqe
#define IBD_NUM_LSO_BUFS ibd_num_lso_bufs
#define IBD_NUM_AH ibd_num_ah
#define IBD_HASH_SIZE ibd_hash_size
#ifdef IBD_LOGGING
#define IBD_LOG_SZ ibd_log_sz
#endif
/*
* Receive CQ moderation parameters: NOT tunables
*/
/*
* Send CQ moderation parameters: NOT tunables
*/
#define IBD_TXCOMP_COUNT 10
#define IBD_TXCOMP_USEC 300
/*
* Thresholds
*
* When waiting for resources (swqes or lso buffers) to become available,
* the first two thresholds below determine how long to wait before informing
* the network layer to start sending packets again. The IBD_TX_POLL_THRESH
* determines how low the available swqes should go before we start polling
* the completion queue.
*/
#define IBD_FREE_LSOS_THRESH 8
#define IBD_FREE_SWQES_THRESH 20
#define IBD_TX_POLL_THRESH 80
/*
* When doing multiple-send-wr or multiple-recv-wr posts, this value
* determines how many to do at a time (in a single ibt_post_send/recv).
*/
#define IBD_MAX_POST_MULTIPLE 4
/*
* Maximum length for returning chained mps back to crossbow
*/
#define IBD_MAX_RX_MP_LEN 16
/*
* LSO parameters
*/
#define IBD_LSO_MAXLEN 65536
#define IBD_LSO_BUFSZ 8192
#define IBD_PROP_LSO_POLICY "lso-policy"
/*
* Completion queue polling control
*/
#define IBD_RX_CQ_POLLING 0x1
#define IBD_TX_CQ_POLLING 0x2
#define IBD_REDO_RX_CQ_POLLING 0x4
#define IBD_REDO_TX_CQ_POLLING 0x8
/*
* Flag bits for resources to reap
*/
#define IBD_RSRC_SWQE 0x1
#define IBD_RSRC_LSOBUF 0x2
/*
* Async operation types
*/
#define IBD_ASYNC_GETAH 1
#define IBD_ASYNC_JOIN 2
#define IBD_ASYNC_LEAVE 3
#define IBD_ASYNC_PROMON 4
#define IBD_ASYNC_PROMOFF 5
#define IBD_ASYNC_REAP 6
#define IBD_ASYNC_TRAP 7
#define IBD_ASYNC_SCHED 8
#define IBD_ASYNC_LINK 9
#define IBD_ASYNC_EXIT 10
/*
* Async operation states
*/
#define IBD_OP_NOTSTARTED 0
#define IBD_OP_ONGOING 1
#define IBD_OP_COMPLETED 2
#define IBD_OP_ERRORED 3
#define IBD_OP_ROUTERED 4
/*
*/
#define IBD_DRV_STATE_INITIALIZED 0x00001
#define IBD_DRV_RXINTR_ADDED 0x00002
#define IBD_DRV_TXINTR_ADDED 0x00004
#define IBD_DRV_IBTL_ATTACH_DONE 0x00008
#define IBD_DRV_HCA_OPENED 0x00010
#define IBD_DRV_PD_ALLOCD 0x00020
#define IBD_DRV_MAC_REGISTERED 0x00040
#define IBD_DRV_PORT_DETAILS_OBTAINED 0x00080
#define IBD_DRV_BCAST_GROUP_FOUND 0x00100
#define IBD_DRV_ACACHE_INITIALIZED 0x00200
#define IBD_DRV_CQS_ALLOCD 0x00400
#define IBD_DRV_UD_CHANNEL_SETUP 0x00800
#define IBD_DRV_TXLIST_ALLOCD 0x01000
#define IBD_DRV_SCQ_NOTIFY_ENABLED 0x02000
#define IBD_DRV_RXLIST_ALLOCD 0x04000
#define IBD_DRV_BCAST_GROUP_JOINED 0x08000
#define IBD_DRV_ASYNC_THR_CREATED 0x10000
#define IBD_DRV_RCQ_NOTIFY_ENABLED 0x20000
#define IBD_DRV_SM_NOTICES_REGISTERED 0x40000
#define IBD_DRV_STARTED 0x80000
/*
* Miscellaneous constants
*/
#define IBD_SEND 0
#define IBD_RECV 1
#define IB_MGID_IPV4_LOWGRP_MASK 0xFFFFFFFF
#define IBD_DEF_MAX_SDU 2044
#define IBD_DEFAULT_QKEY 0xB1B
#ifdef IBD_LOGGING
#define IBD_DMAX_LINE 100
#endif
/*
* Enumerations for link states
*/
typedef enum {
/*
* Driver State Pointer
*/
void *ibd_list;
/*
* Logging
*/
#ifdef IBD_LOGGING
#endif
/*
* Required system entry points
*/
/*
* Required driver entry points for GLDv3
*/
static int ibd_m_start(void *);
static void ibd_m_stop(void *);
static int ibd_m_promisc(void *, boolean_t);
static int ibd_m_unicst(void *, const uint8_t *);
/*
* Private driver entry points for GLDv3
*/
/*
* Initialization
*/
static int ibd_init_txlist(ibd_state_t *);
static int ibd_init_rxlist(ibd_state_t *);
static int ibd_acache_init(ibd_state_t *);
#ifdef IBD_LOGGING
static void ibd_log_init(void);
#endif
/*
*/
static void ibd_state_fini(ibd_state_t *);
static void ibd_fini_txlist(ibd_state_t *);
static void ibd_fini_rxlist(ibd_state_t *);
static void ibd_acache_fini(ibd_state_t *);
#ifdef IBD_LOGGING
static void ibd_log_fini(void);
#endif
/*
* Allocation/acquire/map routines
*/
static int ibd_alloc_tx_copybufs(ibd_state_t *);
static int ibd_alloc_tx_lsobufs(ibd_state_t *);
uint32_t *);
/*
*/
static void ibd_free_tx_copybufs(ibd_state_t *);
static void ibd_free_tx_lsobufs(ibd_state_t *);
/*
*/
static uint_t ibd_tx_recycle(char *);
static void ibd_rcq_handler(ibt_cq_hdl_t, void *);
static void ibd_scq_handler(ibt_cq_hdl_t, void *);
static void ibd_freemsg_cb(char *);
static void ibd_snet_notices_handler(void *, ib_gid_t,
/*
*/
/*
* Threads
*/
static void ibd_async_work(ibd_state_t *);
/*
* Async tasks
*/
static void ibd_async_setprom(ibd_state_t *);
static void ibd_async_unsetprom(ibd_state_t *);
static void ibd_async_txsched(ibd_state_t *);
/*
* Async task helpers
*/
ipoib_mac_t *, ipoib_mac_t *);
static void ibd_async_done(ibd_state_t *);
/*
*/
static int ibd_get_port_details(ibd_state_t *);
static int ibd_alloc_cqs(ibd_state_t *);
static int ibd_setup_ud_channel(ibd_state_t *);
static int ibd_undo_m_start(ibd_state_t *);
/*
* Miscellaneous helpers
*/
static int ibd_sched_poll(ibd_state_t *, int, int);
static int ibd_resume_transmission(ibd_state_t *);
static void *list_get_head(list_t *);
static void ibd_print_warn(ibd_state_t *, char *, ...);
#ifdef IBD_LOGGING
static void ibd_log(const char *, ...);
#endif
/* Module Driver Info */
static struct modldrv ibd_modldrv = {
&mod_driverops, /* This one is a driver */
"InfiniBand GLDv3 Driver", /* short description */
&ibd_dev_ops /* driver specific ops */
};
/* Module Linkage */
static struct modlinkage ibd_modlinkage = {
};
/*
* Module (static) info passed to IBTL during ibt_attach
*/
static struct ibt_clnt_modinfo_s ibd_clnt_modinfo = {
NULL,
"IPIB"
};
/*
* GLDv3 entry points
*/
#define IBD_M_CALLBACK_FLAGS (MC_GETCAPAB)
static mac_callbacks_t ibd_m_callbacks = {
NULL,
};
/*
*/
{ \
}
#define IBD_CLEAR_SCOPE_PKEY(maddr) \
{ \
}
/*
* Rudimentary debugging support
*/
#ifdef DEBUG
int ibd_debuglevel = 100;
static void
debug_print(int l, char *fmt, ...)
{
if (l < ibd_debuglevel)
return;
}
#define DPRINT debug_print
#else
#define DPRINT
#endif
/*
* Common routine to print warning messages; adds in hca guid, port number
* and pkey to be able to identify the IBA interface.
*/
static void
{
char ibd_print_buf[256];
int len;
0, "hca-guid", 0);
"%s%d: HCA GUID %016llx port %d PKEY %02x ",
}
/*
* Warlock directives
*/
/*
* id_lso_lock
*
* state->id_lso->bkt_nfree may be accessed without a lock to
* determine the threshold at which we have to ask the nw layer
* to resume transmission (see ibd_resume_transmission()).
*/
/*
* id_cq_poll_lock
*/
/*
* id_txpost_lock
*/
/*
* id_rxpost_lock
*/
/*
* id_acache_req_lock
*/
/*
* id_ac_mutex
*
* This mutex is actually supposed to protect id_ah_op as well,
* but this path of the code isn't clean (see update of id_ah_op
* in ibd_async_acache(), immediately after the call to
* ibd_async_mcache()). For now, we'll skip this check by
* declaring that id_ah_op is protected by some internal scheme
* that warlock isn't aware of.
*/
/*
* id_mc_mutex
*/
/*
* id_trap_lock
*/
/*
* id_prom_op
*/
/*
* id_sched_lock
*/
/*
* id_link_mutex
*/
/*
* id_tx_list.dl_mutex
*/
/*
* id_rx_list.dl_mutex
*/
/*
* Items protected by atomic updates
*/
/*
* Non-mutex protection schemes for data elements. Almost all of
* these are non-shared items.
*/
int
_init()
{
int status;
/*
* Sanity check some parameter settings. Tx completion polling
* only makes sense with separate CQs for Tx and Rx.
*/
"Setting ibd_txcomp_poll = 0 for combined CQ");
ibd_txcomp_poll = 0;
}
if (status != 0) {
return (status);
}
if (status != 0) {
return (status);
}
#ifdef IBD_LOGGING
ibd_log_init();
#endif
return (0);
}
int
{
}
int
_fini()
{
int status;
if (status != 0)
return (status);
#ifdef IBD_LOGGING
ibd_log_fini();
#endif
return (0);
}
/*
* Convert the GID part of the mac address from network byte order
* to host order.
*/
static void
{
}
/*
* Create the IPoIB address in network byte order from host order inputs.
*/
static void
{
}
/*
* Send to the appropriate all-routers group when the IBA multicast group
* does not exist, based on whether the target group is v4 or v6.
*/
static boolean_t
{
/*
* Copy the first 4 bytes in without assuming any alignment of
* input mac address; this will have IPoIB signature, flags and
* scope bits.
*/
/*
*/
else
/*
* Does not have proper bits in the mgid address.
*/
return (retval);
}
/*
* Padding for nd6 Neighbor Solicitation and Advertisement needs to be at
* padding by default at the end. The routine which is doing is nce_xmit()
* in ip_ndp.c. It copies the nd_lla_addr after the nd_opt_hdr_t. So when
* the packet comes down from IP layer to the IBD driver, it is in the
* following format: [IPoIB_PTXHDR_T][INET6 packet][ICMP6][OPT_ND_HDR_T]
* This size is 2 bytes followed by [22 bytes of ipoib_machdr]. As a result
* machdr is not 4 byte aligned and had 2 bytes of padding at the end.
*
* The send routine at IBD driver changes this packet as follows:
* [IPoIB_HDR_T][INET6 packet][ICMP6][OPT_ND_HDR_T + 2 bytes of padding]
* followed by [22 bytes of ipoib_machdr] resulting in machdr 4 byte
* aligned.
*
* At the receiving side again ibd_process_rx takes the above packet and
* removes the two bytes of front padding and inserts it at the end. This
* is since the IP layer does not understand padding at the front.
*/
uchar_t *nd_lla_ptr; \
nd_opt_hdr_t *opt; \
int i; \
\
len -= sizeof (nd_neighbor_advert_t); \
(len != 0)) { \
+ IPV6_HDR_LEN + sizeof (nd_neighbor_advert_t)); \
for (i = IPOIB_ADDRL; i > 0; i--) \
*(nd_lla_ptr + i + 1) = \
*(nd_lla_ptr + i - 1); \
} else { \
for (i = 0; i < IPOIB_ADDRL; i++) \
*(nd_lla_ptr + i) = \
*(nd_lla_ptr + i + 2); \
} \
*(nd_lla_ptr + i) = 0; \
*(nd_lla_ptr + i + 1) = 0; \
} \
}
/*
* Address handle entries maintained by the driver are kept in the
* free and active lists. Each entry starts out in the free list;
* it migrates to the active list when primed using ibt_get_paths()
* and ibt_modify_ud_dest() for transmission to a specific destination.
* In the active list, the entry has a reference count indicating the
* number of ongoing/uncompleted transmits that reference it. The
* entry is left in the active list even after the reference count
* goes to 0, since successive transmits can find it there and do
* not need to set up another entry (ie the path information is
* cached using the active list). Entries on the active list are
* also hashed using the destination link address as a key for faster
* lookups during transmits.
*
* For any destination address (unicast or multicast, whatever the
* join states), there will be at most one entry in the active list.
* Entries with a 0 reference count on the active list can be reused
* for a transmit to a new destination, if the free list is empty.
*
* active list does not need a lock (all operations are done by the
* async thread) but updates to the reference count are atomically
* done (increments done by Tx path, decrements by the Tx callback handler).
*/
#define IBD_ACACHE_GET_FREE(state) \
int _ret_; \
}
}
#define IBD_ACACHE_GET_ACTIVE(state) \
/*
* Membership states for different mcg's are tracked by two lists:
* the "non" list is used for promiscuous mode, when all mcg traffic
* needs to be inspected. This type of membership is never used for
* transmission, so there can not be an AH in the active list
* corresponding to a member in this list. This list does not need
* any protection, since all operations are performed by the async
* thread.
*
* "Full" and "SendOnly" membership is tracked using a single list,
* the "full" list. This is because this single list can then be
* searched during transmit to a multicast group (if an AH for the
* mcg is not found in the active list), since at least one type
* of membership must be present before initiating the transmit.
* This list is also emptied during driver detach, since sendonly
* membership acquired during transmit is dropped at detach time
* this list are done only by the async thread, but it is also
* searched in program context (see multicast disable case), thus
* the id_mc_mutex protects the list. The driver detach path also
* deconstructs the "full" list, but it ensures that the async
* thread will not be accessing the list (by blocking out mcg
* trap handling and making sure no more Tx reaping will happen).
*
* Currently, an IBA attach is done in the SendOnly case too,
* although this is not required.
*/
/*
* AH and MCE active list manipulation:
*
* Multicast disable requests and MCG delete traps are two cases
* where the active AH entry for the mcg (if any unreferenced one exists)
* will be moved to the free list (to force the next Tx to the mcg to
* join the MCG in SendOnly mode). Port up handling will also move AHs
* from active to free list.
*
* In the case when some transmits are still pending on an entry
* for an mcg, but a multicast disable has already been issued on the
* mcg, there are some options to consider to preserve the join state
* to ensure the emitted packet is properly routed on the IBA fabric.
* For the AH, we can
* 1. take out of active list at multicast disable time.
* 2. take out of active list only when last pending Tx completes.
* For the MCE, we can
* 3. take out of active list at multicast disable time.
* 4. take out of active list only when last pending Tx completes.
* 5. move from active list to stale list at multicast disable time.
* We choose to use 2,4. We use option 4 so that if a multicast enable
* is tried before the pending Tx completes, the enable code finds the
* mce in the active list and just has to make sure it will not be reaped
* (ie the mcg leave done) when the pending Tx does complete. Alternatively,
* a stale list (#5) that would be checked in the enable code would need
* to be implemented. Option 2 is used, because otherwise, a Tx attempt
* after the multicast disable would try to put an AH in the active list,
* and associate the mce it finds in the active list to this new AH,
* whereas the mce is already associated with the previous AH (taken off
* the active list), and will be removed once the pending Tx's complete
* (unless a reference count on mce's is implemented). One implication of
* using 2,4 is that new Tx's posted before the pending Tx's complete will
* grab new references on the AH, further delaying the leave.
*
* In the case of mcg delete (or create) trap when the port is sendonly
* joined, the AH and MCE handling is different: the AH and MCE has to be
* immediately taken off the active lists (forcing a join and path lookup
* at the next Tx is the only guaranteed means of ensuring a proper Tx
* to an mcg as it is repeatedly created and deleted and goes thru
* reincarnations).
*
* When a port is already sendonly joined, and a multicast enable is
* attempted, the same mce structure is promoted; this ensures only a
* single mce on the active list tracks the most powerful join state.
*
* In the case of port up event handling, the MCE for sendonly membership
* is freed up, and the ACE is put into the free list as soon as possible
* (depending on whether posted Tx's have completed). For fullmembership
* MCE's though, the ACE is similarly handled; but the MCE is kept around
* (a re-JOIN is attempted) only if the DLPI leave has not already been
* done; else the mce is deconstructed (mc_fullreap case).
*
* MCG creation and deletion trap handling:
*
* These traps are unreliable (meaning sometimes the trap might never
* be delivered to the subscribed nodes) and may arrive out-of-order
* since they use UD transport. An alternative to relying on these
* unreliable traps is to poll for mcg presence every so often, but
* instead of doing that, we try to be as conservative as possible
* while handling the traps, and hope that the traps do arrive at
* the subscribed nodes soon. Note that if a node is fullmember
* trap for that mcg (by fullmember definition); if it does, it is
* an old trap from a previous incarnation of the mcg.
*
* Whenever a trap is received, the driver cleans up its sendonly
* membership to the group; we choose to do a sendonly leave even
* on a creation trap to handle the case of a prior deletion of the mcg
* having gone unnoticed. Consider an example scenario:
* T1: MCG M is deleted, and fires off deletion trap D1.
* T2: MCG M is recreated, fires off creation trap C1, which is lost.
* T3: Node N tries to transmit to M, joining in sendonly mode.
* T4: MCG M is deleted, and fires off deletion trap D2.
* T5: N receives a deletion trap, but can not distinguish D1 from D2.
* If the trap is D2, then a LEAVE is not required, since the mcg
* is already deleted; but if it is D1, a LEAVE is required. A safe
* approach is to always LEAVE, but the SM may be confused if it
* receives a LEAVE without a prior JOIN.
*
* Management of the non-membership to an mcg is similar to the above,
* except that if the interface is in promiscuous mode, it is required
* to attempt to re-join the mcg after receiving a trap. Unfortunately,
* if the re-join attempt fails (in which case a warning message needs
* to be printed), it is not clear whether it failed due to the mcg not
* mcg is also racy at best. Thus, the driver just prints a warning
* message when it can not rejoin after receiving a create trap, although
* this might be (on rare occassions) a mis-warning if the create trap is
* received after the mcg was deleted.
*/
/*
* Implementation of atomic "recycle" bits and reference count
* on address handles. This utilizes the fact that max reference
* count on any handle is limited by number of send wqes, thus
* high bits in the ac_ref field can be used as the recycle bits,
* and only the low bits hold the number of pending Tx requests.
* This atomic AH reference counting allows the Tx completion
* handler not to acquire the id_ac_mutex to process every completion,
* thus reducing lock contention problems between completion and
* the Tx path.
*/
#define CYCLEVAL 0x80000
#define GET_REF_CYCLE(ace) ( \
/* \
* Make sure "cycle" bit is set. \
*/ \
)
}
#define SET_CYCLE_IF_REF(ace) ( \
CYCLEVAL ? \
/* \
* Clear the "cycle" bit we just set; \
* ref count known to be 0 from above. \
*/ \
/* \
* We set "cycle" bit; let caller know. \
*/ \
B_TRUE \
)
#define DEC_REF_DO_CYCLE(ace) ( \
CYCLEVAL ? \
/* \
* Ref count known to be 0 from above. \
*/ \
B_TRUE : \
B_FALSE \
)
static void *
{
return (lhead);
}
/*
* This is always guaranteed to be able to queue the work.
*/
static void
{
/* Initialize request */
/*
* Queue provided slot onto request pool.
*/
/* Go, fetch, async thread */
}
/*
* Main body of the per interface async thread.
*/
static void
{
callb_generic_cpr, "ibd_async_work");
for (;;) {
/*
* Once we have done the operation, there is no
* guarantee the request slot is going to be valid,
* it might be freed up (as in IBD_ASYNC_LEAVE, REAP,
* TRAP).
*
* Perform the request.
*/
case IBD_ASYNC_GETAH:
break;
case IBD_ASYNC_JOIN:
case IBD_ASYNC_LEAVE:
break;
case IBD_ASYNC_PROMON:
break;
case IBD_ASYNC_PROMOFF:
break;
case IBD_ASYNC_REAP:
/*
* the req buf contains in mce
* structure, so we do not need
* to free it here.
*/
break;
case IBD_ASYNC_TRAP:
break;
case IBD_ASYNC_SCHED:
break;
case IBD_ASYNC_LINK:
break;
case IBD_ASYNC_EXIT:
#ifndef __lock_lint
#else
#endif
return;
}
} else {
#ifndef __lock_lint
/*
* Nothing to do: wait till new request arrives.
*/
#endif
}
}
/*NOTREACHED*/
}
/*
* Return when it is safe to queue requests to the async daemon; primarily
* for subnet trap and async event handling. Disallow requests before the
* daemon is created, and when interface deinitilization starts.
*/
static boolean_t
{
if (state->id_trap_stop) {
return (B_FALSE);
}
state->id_trap_inprog++;
return (B_TRUE);
}
/*
* Wake up ibd_m_stop() if the unplumb code is waiting for pending subnet
* trap or event handling to complete to kill the async thread and deconstruct
*/
static void
{
if (--state->id_trap_inprog == 0)
}
/*
* Hash functions:
* ibd_hash_by_id: Returns the qpn as the hash entry into bucket.
* ibd_hash_key_cmp: Compares two keys, return 0 on success or else 1.
* These operate on mac addresses input into ibd_send, but there is no
* guarantee on the alignment of the ipoib_mac_t structure.
*/
/*ARGSUSED*/
static uint_t
{
/*
* If the input address is 4 byte aligned, we can just dereference
* it. This is most common, since IP will send in a 4 byte aligned
* IP header, which implies the 24 byte IPoIB psuedo header will be
* 4 byte aligned too.
*/
if ((ptraddr & 3) == 0)
return (hval);
}
static int
{
return (0);
else
return (1);
}
/*
* Initialize all the per interface caches and lists; AH cache,
* MCG list etc.
*/
static int
{
int i;
for (i = 0; i < IBD_NUM_AH; i++, ce++) {
return (DDI_FAILURE);
} else {
}
}
return (DDI_SUCCESS);
}
static void
{
}
}
}
/*
* Search AH active hash list for a cached path to input destination.
* If we are "just looking", hold == F. When we are in the Tx path,
* we set hold == T to grab a reference on the AH so that it can not
* be recycled to a new destination while the Tx request is posted.
*/
static ibd_ace_t *
{
/*
* Do hash search.
*/
if (hold)
return (ptr);
}
return (NULL);
}
/*
* This is called by the tx side; if an initialized AH is found in
* the active list, it is locked down and can be used; if no entry
* is found, an async request is queued to do path resolution.
*/
static ibd_ace_t *
{
/*
* Only attempt to print when we can; in the mdt pattr case, the
* address is not aligned properly.
*/
DPRINT(4,
"ibd_acache_lookup : lookup for %08X:%08X:%08X:%08X:%08X",
}
return (ptr);
}
/*
* Implementation of a single outstanding async request; if
* the operation is not started yet, queue a request and move
* to ongoing state. Remember in id_ah_addr for which address
* we are queueing the request, in case we need to flag an error;
* Any further requests, for the same or different address, until
* the operation completes, is sent back to GLDv3 to be retried.
* The async thread will update id_ah_op with an error indication
* or will set it to indicate the next look up can start; either
* way, it will mac_tx_update() so that all blocked requests come
* back here.
*/
/*
* We did not even find the entry; queue a request
* for it.
*/
}
/*
* Check the status of the pathrecord lookup request
* we had queued before.
*/
state->id_ah_error++;
} else {
/*
* IBD_OP_ROUTERED case: We need to send to the
* all-router MCG. If we can find the AH for
* the mcg, the Tx will be attempted. If we
* do not find the AH, we return NORESOURCES
* to retry.
*/
numwqe);
}
/*
* This case can happen when we get a higher band
* packet. The easiest way is to reset the state machine
* to accommodate the higher priority packet.
*/
}
return (ptr);
}
/*
* Grab a not-currently-in-use AH/PathRecord from the active
* list to recycle to a new destination. Only the async thread
* executes this code.
*/
static ibd_ace_t *
{
/*
* Do plain linear search.
*/
/*
* Note that it is possible that the "cycle" bit
* is set on the AH w/o any reference count. The
* mcg must have been deleted, and the tx cleanup
* just decremented the reference count to 0, but
* hasn't gotten around to grabbing the id_ac_mutex
* to move the AH into the free list.
*/
break;
}
}
return (ptr);
}
/*
* Invoked to clean up AH from active list in case of multicast
* disable and to handle sendonly memberships during mcg traps.
* And for port up processing for multicast and unicast AHs.
* Normally, the AH is taken off the active list, and put into
* the free list to be recycled for a new destination. In case
* Tx requests on the AH have not completed yet, the AH is marked
* for reaping (which will put the AH on the free list) once the Tx's
* complete; in this case, depending on the "force" input, we take
* out the AH from the active list right now, or leave it also for
* the reap operation. Returns TRUE if the AH is taken off the active
* list (and either put into the free list right now, or arranged for
* later), FALSE otherwise.
*/
static boolean_t
{
/*
* Note that the AH might already have the cycle bit set
* on it; this might happen if sequences of multicast
* enables and disables are coming so fast, that posted
* Tx's to the mcg have not completed yet, and the cycle
* bit is set successively by each multicast disable.
*/
if (SET_CYCLE_IF_REF(acactive)) {
if (!force) {
/*
* The ace is kept on the active list, further
* Tx's can still grab a reference on it; the
* ace is reaped when all pending Tx's
* referencing the AH complete.
*/
} else {
/*
* In the mcg trap case, we always pull the
* AH from the active list. And also the port
*/
}
} else {
/*
* Determined the ref count is 0, thus reclaim
* immediately after pulling out the ace from
* the active list.
*/
}
}
return (ret);
}
/*
* Helper function for async path record lookup. If we are trying to
* Tx to a MCG, check our membership, possibly trying to join the
* group if required. If that fails, try to send the packet to the
* all router group (indicated by the redirect output), pointing
* the input mac address to the router mcg address.
*/
static ibd_mce_t *
{
/*
* Check the FullMember+SendOnlyNonMember list.
* Since we are the only one who manipulates the
* id_mc_full list, no locks are needed.
*/
return (mce);
}
/*
* Not found; try to join(SendOnlyNonMember) and attach.
*/
NULL) {
return (mce);
}
/*
* MCGroup not present; try to join the all-router group. If
* any of the following steps succeed, we will be redirecting
* to the all router group.
*/
return (NULL);
/*
* Are we already joined to the router group?
*/
"group\n");
return (mce);
}
/*
* Can we join(SendOnlyNonMember) the router group?
*/
NULL) {
return (mce);
}
return (NULL);
}
/*
* Async path record lookup code.
*/
static void
{
char ret = IBD_OP_NOTSTARTED;
/*
* Check whether we are trying to transmit to a MCG.
* In that case, we need to make sure we are a member of
* the MCG.
*/
/*
* If we can not find or join the group or even
* redirect, error out.
*/
NULL) {
return;
}
/*
* If we got redirected, we need to determine whether
* the AH for the new mcg is in the cache already, and
* not pull it in then; otherwise proceed to get the
* path for the new mcg. There is no guarantee that
* if the AH is currently in the cache, it will still be
* there when we look in ibd_acache_lookup(), but that's
* okay, we will come back here.
*/
if (redirected) {
"%08X:%08X:%08X:%08X:%08X",
return;
}
}
}
/*
* Get an AH from the free list.
*/
/*
* No free ones; try to grab an unreferenced active
* one. Maybe we need to make the active list LRU,
* but that will create more work for Tx callbacks.
* Is there a way of not having to pull out the
* entry from the active list, but just indicate it
* is being recycled? Yes, but that creates one more
* check in the fast lookup path.
*/
/*
* Pretty serious shortage now.
*/
"slot\n");
return;
}
/*
* We could check whether ac_mce points to a SendOnly
* member and drop that membership now. Or do it lazily
* at detach time.
*/
}
/*
* Update the entry.
*/
goto error;
}
goto error;
}
/*
* mce is set whenever an AH is being associated with a
* MCG; this will come in handy when we leave the MCG. The
* lock protects Tx fastpath from scanning the active list.
*/
return;
/*
* We might want to drop SendOnly membership here if we
* joined above. The lock protects Tx callbacks inserting
* into the free list.
*/
}
/*
* While restoring port's presence on the subnet on a port up, it is possible
* that the port goes down again.
*/
static void
{
/*
* this on a link down, since we will be unable to do SA operations,
* defaulting to the lowest speed. Also notice that we update our
* notion of speed before calling mac_link_update(), which will do
* neccesary higher level notifications for speed changes.
*/
}
/*
* Do all the work required to establish our presence on
* the subnet.
*/
if (opcode == IBD_LINK_UP_ABSENT) {
/*
* If in promiscuous mode ...
*/
/*
* Drop all nonmembership.
*/
/*
* Then, try to regain nonmembership to all mcg's.
*/
}
/*
* Drop all sendonly membership (which also gets rid of the
* AHs); try to reacquire all full membership.
*/
else
}
/*
* Recycle all active AHs to free list (and if there are
* pending posts, make sure they will go into the free list
* once the Tx's complete). Grab the lock to prevent
* concurrent Tx's as well as Tx cleanups.
*/
B_TRUE);
/*
* If this is for an mcg, it must be for a fullmember,
* since we got rid of send-only members above when
* processing the mce list.
*/
/*
* Check if the fullmember mce needs to be torn down,
* ie whether the DLPI disable has already been done.
* If so, do some of the work of tx_cleanup, namely
* causing leave (which will fail), detach and
* mce-freeing. tx_cleanup will put the AH into free
* list. The reason to duplicate some of this
* tx_cleanup work is because we want to delete the
* AH right now instead of waiting for tx_cleanup, to
* force subsequent Tx's to reacquire an AH.
*/
}
}
/*
* mac handle is guaranteed to exist since driver does ibt_close_hca()
* (which stops further events from being delivered) before
* mac_unregister(). At this point, it is guaranteed that mac_register
* has already been done.
*/
}
/*
* Check the pkey table to see if we can find the pkey we're looking for.
* Set the pkey index in 'pkix' if found. Return 0 on success and -1 on
* failure.
*/
static int
{
return (0);
}
}
return (-1);
}
/*
* When the link is notified up, we need to do a few things, based
* on the port's current p_init_type_reply claiming a reinit has been
* done or not. The reinit steps are:
* 1. If in InitTypeReply, NoLoadReply == PreserveContentReply == 0, verify
* the old Pkey and GID0 are correct.
* 2. Register for mcg traps (already done by ibmf).
* 3. If PreservePresenceReply indicates the SM has restored port's presence
* in subnet, nothing more to do. Else go to next steps (on async daemon).
* 4. Give up all sendonly memberships.
* 5. Acquire all full memberships.
* 6. In promiscuous mode, acquire all non memberships.
* 7. Recycle all AHs to free list.
*/
static void
{
/*
* Do not send a request to the async daemon if it has not
* yet been created or is being destroyed. If the async
* daemon has not yet been created, we still need to track
* last known state of the link. If this code races with the
* detach path, then we are assured that the detach path has
* not yet done the ibt_close_hca (which waits for all async
* events to complete). If the code races with the attach path,
* the initialization path has already set these up and created
* IBTF resources based on the values.
*/
/*
* If the init code in ibd_m_start hasn't yet set up the
*/
return;
}
/*
* If this routine was called in response to a port down event,
* we just need to see if this should be informed.
*/
if (code == IBT_ERROR_PORT_DOWN) {
goto update_link_state;
}
/*
* If it's not a port down event we've received, try to get the port
* attributes first. If we fail here, the port is as good as down.
* Otherwise, if the link went down by the time the handler gets
* are not valid and this is as bad as a port down anyway.
*/
goto update_link_state;
}
/*
* Check the SM InitTypeReply flags. If both NoLoadReply and
* PreserveContentReply are 0, we don't know anything about the
* data loaded into the port attributes, so we need to verify
* if gid0 and pkey are still valid.
*/
if (((itreply & SM_INIT_TYPE_REPLY_NO_LOAD_REPLY) == 0) &&
((itreply & SM_INIT_TYPE_PRESERVE_CONTENT_REPLY) == 0)) {
/*
* Check to see if the subnet part of GID0 has changed. If
* not, check the simple case first to see if the pkey
* index is the same as before; finally check to see if the
* pkey has been relocated to a different index in the table.
*/
"restart, ret=%d", ibt_status);
}
return;
} else {
}
}
if (port_infop) {
}
/*
* If the old state is the same as the new state, nothing to do
*/
return;
}
/*
* Ok, so there was a link state change; see if it's safe to ask
* the async thread to do the work
*/
if (!ibd_async_safe(state)) {
return;
}
/*
* If we're reporting a link up, check InitTypeReply to see if
* the SM has ensured that the port's presence in mcg, traps,
* etc. is intact.
*/
if (new_link_state == LINK_STATE_DOWN) {
} else {
if ((itreply & SM_INIT_TYPE_PRESERVE_PRESENCE_REPLY) ==
} else {
}
}
/*
* Queue up a request for ibd_async_link() to handle this link
* state change event
*/
}
/*
* invocations of the handler. IBTL might coalesce link transition events,
* invoke the handler with last known state
*/
static void
{
switch (code) {
break;
case IBT_ERROR_CQ:
break;
case IBT_PORT_CHANGE_EVENT:
/*
* Events will be delivered to all instances that have
* done ibt_open_hca() but not yet done ibt_close_hca().
* Only need to do work for our port; IBTF will deliver
* events for other ports on the hca we have ibt_open_hca'ed
* too. Note that id_port is initialized in ibd_attach()
* before we do an ibt_open_hca() in ibd_attach().
*/
break;
}
break;
case IBT_ERROR_PORT_DOWN:
case IBT_CLNT_REREG_EVENT:
case IBT_EVENT_PORT_UP:
/*
* Events will be delivered to all instances that have
* done ibt_open_hca() but not yet done ibt_close_hca().
* Only need to do work for our port; IBTF will deliver
* events for other ports on the hca we have ibt_open_hca'ed
* too. Note that id_port is initialized in ibd_attach()
* before we do an ibt_open_hca() in ibd_attach().
*/
break;
break;
case IBT_HCA_ATTACH_EVENT:
case IBT_HCA_DETACH_EVENT:
/*
* When a new card is plugged to the system, attach_event is
* invoked. Additionally, a cfgadm needs to be run to make the
* card known to the system, and an ifconfig needs to be run to
* plumb up any ibd interfaces on the card. In the case of card
* unplug, a cfgadm is run that will trigger any RCM scripts to
* unplumb the ibd interfaces on the card; when the card is
* actually unplugged, the detach_event is invoked;
* additionally, if any ibd instances are still active on the
* card (eg there were no associated RCM scripts), driver's
* detach routine is invoked.
*/
break;
default:
break;
}
}
static int
{
int ret;
return (DDI_FAILURE);
}
/*
* Note that when we register with mac during attach, we don't
* have the id_macaddr yet, so we'll simply be registering a
* zero macaddr that we'll overwrite later during plumb (in
* ibd_m_start()). Similar is the case with id_mtu - we'll
* update the mac layer with the correct mtu during plumb.
*/
/*
* Register ourselves with the GLDv3 interface
*/
DPRINT(10,
"ibd_register_mac: mac_register() failed, ret=%d", ret);
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
static int
{
/*
* Query the HCA and fetch its attributes
*/
/*
* 1. Set the Hardware Checksum capability. Currently we only consider
* full checksum offload.
*/
}
/*
* 2. Set LSO policy, capability and maximum length
*/
} else {
}
if (hca_attrs.hca_max_lso_size > 0) {
else
} else {
state->id_lso_maxlen = 0;
}
/*
* 3. Set Reserved L_Key capability
*/
}
/*
* 4. Set maximum sqseg value after checking to see if extended sgl
* size information is provided by the hca
*/
} else {
}
}
/*
* 5. Set number of recv and send wqes after checking hca maximum
* channel size
*/
} else {
}
} else {
}
return (DDI_SUCCESS);
}
static int
{
int instance;
if (progress & IBD_DRV_MAC_REGISTERED) {
}
if (progress & IBD_DRV_PD_ALLOCD) {
"protection domain, ret=%d", ret);
}
}
if (progress & IBD_DRV_HCA_OPENED) {
IBT_SUCCESS) {
"HCA device, ret=%d", ret);
}
}
if (progress & IBD_DRV_IBTL_ATTACH_DONE) {
"ibt_detach() failed, ret=%d", ret);
}
}
if (progress & IBD_DRV_TXINTR_ADDED) {
}
if (progress & IBD_DRV_RXINTR_ADDED) {
}
if (progress & IBD_DRV_STATE_INITIALIZED) {
}
return (DDI_SUCCESS);
}
/*
* Attach device to the IO framework.
*/
static int
{
int instance;
int rv;
/*
*/
if (cmd != DDI_ATTACH)
return (DDI_FAILURE);
/*
* Allocate softstate structure
*/
return (DDI_FAILURE);
/*
* Initialize mutexes and condition variables
*/
goto attach_fail;
}
/*
* Allocate rx,tx softintr
*/
if (ibd_rx_softintr == 1) {
"ddi_add_softintr(id_rx), ret=%d", rv);
goto attach_fail;
}
}
if (ibd_tx_softintr == 1) {
"ddi_add_softintr(id_tx), ret=%d", rv);
goto attach_fail;
}
}
/*
* Obtain IBA P_Key, port number and HCA guid and validate
* them (for P_Key, only full members are allowed as per
* IPoIB specification; neither port number nor HCA guid
* can be zero)
*/
goto attach_fail;
}
"port-number", 0)) == 0) {
goto attach_fail;
}
"hca-guid", 0)) == 0) {
hca_guid);
goto attach_fail;
}
/*
* Attach to IBTL
*/
goto attach_fail;
}
/*
* Open the HCA
*/
goto attach_fail;
}
/*
* Record capabilities
*/
/*
* Allocate a protection domain on the HCA
*/
goto attach_fail;
}
/*
* Register ibd interfaces with the Nemo framework
*/
goto attach_fail;
}
/*
* We're done with everything we could to make the attach
* succeed. All the buffer allocations and IPoIB broadcast
* group joins are deferred to when the interface instance
* is actually plumbed to avoid wasting memory.
*/
return (DDI_SUCCESS);
return (DDI_FAILURE);
}
/*
* Detach device from the IO framework.
*/
static int
{
int instance;
/*
*/
if (cmd != DDI_DETACH)
return (DDI_FAILURE);
/*
* Get the instance softstate
*/
/*
* Release all resources we're holding still. Note that if we'd
* done ibd_attach(), ibd_m_start() and ibd_m_stop() correctly
* so far, we should find all the flags we need in id_mac_state.
*/
return (DDI_SUCCESS);
}
/*
* Pre ibt_attach() driver initialization
*/
static int
{
char buf[64];
state->id_trap_inprog = 0;
state->id_tx_busy = 0;
return (DDI_SUCCESS);
}
/*
* Post ibt_detach() driver deconstruction
*/
static void
{
}
/*
* Fetch link speed from SA for snmp ifspeed reporting.
*/
static uint64_t
{
int ret;
/*
* Due to serdes 8b10b encoding on the wire, 2.5 Gbps on wire
* translates to 2 Gbps data rate. Thus, 1X single data rate is
* 2000000000. Start with that as default.
*/
ifspeed = 2000000000;
/*
* Get the port speed from Loopback path information.
*/
goto earlydone;
if (num_paths < 1)
goto earlydone;
/*
* In case SA does not return an expected value, report the default
* speed as 1X.
*/
ret = 1;
case IBT_SRATE_2: /* 1X SDR i.e 2.5 Gbps */
ret = 1;
break;
case IBT_SRATE_10: /* 4X SDR or 1X QDR i.e 10 Gbps */
ret = 4;
break;
case IBT_SRATE_30: /* 12X SDR i.e 30 Gbps */
ret = 12;
break;
case IBT_SRATE_5: /* 1X DDR i.e 5 Gbps */
ret = 2;
break;
case IBT_SRATE_20: /* 4X DDR or 8X SDR i.e 20 Gbps */
ret = 8;
break;
case IBT_SRATE_40: /* 8X DDR or 4X QDR i.e 40 Gbps */
ret = 16;
break;
case IBT_SRATE_60: /* 12X DDR i.e 60 Gbps */
ret = 24;
break;
case IBT_SRATE_80: /* 8X QDR i.e 80 Gbps */
ret = 32;
break;
case IBT_SRATE_120: /* 12X QDR i.e 120 Gbps */
ret = 48;
break;
}
return (ifspeed);
}
/*
* Search input mcg list (id_mc_full or id_mc_non) for an entry
* representing the input mcg mgid.
*/
static ibd_mce_t *
{
/*
* Do plain linear search.
*/
sizeof (ib_gid_t)) == 0)
return (ptr);
}
return (NULL);
}
/*
* Execute IBA JOIN.
*/
static ibt_status_t
{
}
/*
* This code JOINs the port in the proper way (depending on the join
* It also attaches the QPN to the mcg so it can receive those mcg
* packets. This code makes sure not to attach the mcg to the QP if
* that has been previously done due to the mcg being joined with a
* different join state, even though this is not required by SWG_0216,
* refid 3610.
*/
static ibd_mce_t *
{
/*
* For enable_multicast Full member joins, we need to do some
* extra work. If there is already an mce on the list that
* indicates full membership, that means the membership has
* not yet been dropped (since the disable_multicast was issued)
* because there are pending Tx's to the mcg; in that case, just
* mark the mce not to be reaped when the Tx completion queues
* an async reap operation.
*
* If there is already an mce on the list indicating sendonly
* membership, try to promote to full membership. Be careful
* not to deallocate the old mce, since there might be an AH
* pointing to it; instead, update the old mce with new data
* that tracks the full membership.
*/
return (omce);
} else {
}
}
/*
* Allocate the ibd_mce_t to track this JOIN.
*/
return (NULL);
}
/*
* Is an IBA attach required? Not if the interface is already joined
* to the mcg in a different appropriate join state.
*/
if (jstate == IB_MC_JSTATE_NON) {
} else if (jstate == IB_MC_JSTATE_FULL) {
} else { /* jstate == IB_MC_JSTATE_SEND_ONLY_NON */
}
if (do_attach) {
/*
* Do the IBA attach.
*/
"%d\n", ibt_status);
/*
* NOTE that we should probably preserve the join info
* in the list and later try to leave again at detach
* time.
*/
return (NULL);
}
}
/*
* Insert the ibd_mce_t in the proper list.
*/
if (jstate == IB_MC_JSTATE_NON) {
} else {
/*
* Set up the mc_req fields used for reaping the
* mcg in case of delayed tx completion (see
* ibd_tx_cleanup()). Also done for sendonly join in
* case we are promoted to fullmembership later and
* keep using the same mce.
*/
/*
* Check whether this is the case of trying to join
* full member, and we were already joined send only.
* We try to drop our SendOnly membership, but it is
* possible that the mcg does not exist anymore (and
* the subnet trap never reached us), so the leave
* operation might fail.
*/
sizeof (ibt_mcg_info_t));
return (omce);
}
}
return (mce);
}
/*
* Called during port up event handling to attempt to reacquire full
* membership to an mcg. Stripped down version of ibd_join_group().
* Note that it is possible that the mcg might have gone away, and
* gets recreated at this point.
*/
static void
{
/*
* If the mc_fullreap flag is set, or this join fails, a subsequent
* that by adding a boolean flag into ibd_mce_t, if required.
*/
if (mce->mc_fullreap)
return;
"multicast gid %016llx:%016llx",
}
/*
* This code handles delayed Tx completion cleanups for mcg's to which
* disable_multicast has been issued, regular mcg related cleanups during
* disable_multicast, disable_promiscous and mcg traps, as well as
* cleanups during driver detach time. Depending on the join state,
* it deletes the mce from the appropriate list and issues the IBA
* is left on the active list for a subsequent Tx completion cleanup.
*/
static void
{
/*
* Before detaching, we must check whether the other list
* contains the mcg; if we detach blindly, the consumer
* who set up the other list will also stop receiving
* traffic.
*/
if (jstate == IB_MC_JSTATE_FULL) {
/*
* The following check is only relevant while coming
* from the Tx completion path in the reap case.
*/
if (!mce->mc_fullreap)
return;
} else if (jstate == IB_MC_JSTATE_NON) {
} else { /* jstate == IB_MC_JSTATE_SEND_ONLY_NON */
}
/*
* If we are reacting to a mcg trap and leaving our sendonly or
* non membership, the mcg is possibly already gone, so attempting
* to leave might fail. On the other hand, we must try to leave
* anyway, since this might be a trap from long ago, and we could
* have potentially sendonly joined to a recent incarnation of
* the mcg and are about to loose track of this information.
*/
if (do_detach) {
}
}
/*
* Async code executed due to multicast and promiscuous disable requests
* and mcg trap handling; also executed during driver detach. Mostly, a
* leave and detach is done; except for the fullmember case when Tx
* requests are pending, whence arrangements are made for subsequent
* cleanup on Tx completion.
*/
static void
{
if (jstate == IB_MC_JSTATE_NON) {
/*
* In case we are handling a mcg trap, we might not find
* the mcg in the non list.
*/
return;
}
} else {
/*
* In case we are handling a mcg trap, make sure the trap
* is not arriving late; if we have an mce that indicates
* that we are already a fullmember, that would be a clear
* indication that the trap arrived late (ie, is for a
* previous incarnation of the mcg).
*/
if (jstate == IB_MC_JSTATE_SEND_ONLY_NON) {
return;
}
} else {
/*
* If join group failed, mce will be NULL here.
* This is because in GLDv3 driver, set multicast
* will always return success.
*/
return;
}
}
/*
* If no pending Tx's remain that reference the AH
* for the mcg, recycle it from active to free list.
* Else in the IB_MC_JSTATE_FULL case, just mark the AH,
* so the last completing Tx will cause an async reap
* operation to be invoked, at which time we will drop our
* membership to the mcg so that the pending Tx's complete
* successfully. Refer to comments on "AH and MCE active
* list manipulation" at top of this file. The lock protects
* against Tx fast path and Tx cleanup code.
*/
}
if (recycled) {
}
}
/*
* Find the broadcast address as defined by IPoIB; implicitly
* determines the IBA scope, mtu, tclass etc of the link the
* interface is going to be a member of.
*/
static ibt_status_t
{
int i, mcgmtu;
int ret;
/*
* Look for the IPoIB broadcast group.
*/
break;
}
}
if (!found) {
if (ibd_create_broadcast_group) {
/*
* If we created the broadcast group, but failed to
* find it, we can't do anything except leave the
* one we created and return failure.
*/
if (state->id_bgroup_created) {
"absent. Unable to query after create.");
goto find_bgroup_fail;
}
/*
* Create the ipoib broadcast group if it didn't exist
*/
"absent, create failed: ret = %d\n", ret);
return (IBT_FAILURE);
}
goto query_bcast_grp;
} else {
return (IBT_FAILURE);
}
}
/*
* Assert that the mcg mtu <= id_mtu. Fill in updated id_mtu.
*/
"greater than port's maximum MTU %d", mcgmtu,
goto find_bgroup_fail;
}
return (IBT_SUCCESS);
if (state->id_bgroup_created) {
}
return (IBT_FAILURE);
}
static int
{
/*
* Allocate one big chunk for all regular tx copy bufs
*/
}
/*
* Do one memory registration on the entire txbuf area
*/
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
static int
{
int i;
/*
* Allocate the lso bucket
*/
/*
* Allocate the entire lso memory and register it
*/
return (DDI_FAILURE);
}
/*
* Now allocate the buflist. Note that the elements in the buflist and
* the buffers in the lso memory have a permanent 1-1 relation, so we
* can always derive the address of a buflist entry from the address of
* an lso buffer.
*/
KM_SLEEP);
/*
* Set up the lso buf chain
*/
for (i = 0; i < IBD_NUM_LSO_BUFS; i++) {
memp += IBD_LSO_BUFSZ;
lbufp++;
}
/*
* Set up the LSO buffer information in ibd state
*/
return (DDI_SUCCESS);
}
/*
* Statically allocate Tx buffer list(s).
*/
static int
{
int i;
return (DDI_FAILURE);
}
/*
* Allocate and setup the swqe list
*/
for (i = 0; i < state->id_num_swqe; i++) {
return (DDI_FAILURE);
}
/* add to list */
} else {
}
}
return (DDI_SUCCESS);
}
static int
{
int i;
/*
* Determine how many bufs we'd need for the size requested
*/
num_needed++;
/*
* If we don't have enough lso bufs, return failure
*/
return (-1);
}
/*
* Pick the first 'num_needed' bufs from the free list
*/
for (i = 0; i < num_needed; i++) {
}
/*
* If the requested size is not a multiple of IBD_LSO_BUFSZ, we need
* to adjust the last sgl entry's length. Since we know we need atleast
* one, the i-1 use below is ok.
*/
if (frag_sz) {
}
/*
* Update nfree count and return
*/
*nds_p = num_needed;
return (0);
}
static void
{
int i;
for (i = 0; i < nds; i++) {
/*
* Figure out the buflist element this sgl buffer corresponds
* to and put it back at the head
*/
}
}
static void
{
/*
* Unregister txbuf mr
*/
}
/*
* Free txbuf memory
*/
}
static void
{
return;
}
/*
* First, free the buflist
*/
/*
* Unregister the LSO memory and free it
*/
DPRINT(10,
"ibd_free_lsobufs: ibt_deregister_mr failed");
}
/*
* Finally free the bucket
*/
}
/*
* Free the statically allocated Tx buffer list.
*/
static void
{
/*
* Free the allocated swqes
*/
}
}
/*
* Allocate a single send wqe and register it so it is almost
* ready to be posted to the hardware.
*/
static int
{
/* These are set in send */
return (DDI_SUCCESS);
}
/*
* Free an allocated send wqe.
*/
/*ARGSUSED*/
static void
{
}
/*
* Post a rwqe to the hardware and add it to the Rx list. The
* "recycle" parameter indicates whether an old rwqe is being
* recycled, or this is a new one.
*/
static int
{
} else {
}
}
if (state->id_rx_busy) {
if (state->id_rx_head)
else
} else {
do {
/*
* Here we should add dl_cnt before post recv, because
* we would have to make sure dl_cnt is updated before
* the corresponding ibd_process_rx() is called.
*/
if (ibt_status != IBT_SUCCESS) {
(void) atomic_add_32_nv(
"posting failed, ret=%d", ibt_status);
return (DDI_FAILURE);
}
if (rwqe) {
state->id_rx_head =
}
} while (rwqe);
state->id_rx_busy = 0;
}
return (DDI_SUCCESS);
}
/*
* Allocate the statically allocated Rx buffer list.
*/
static int
{
int i;
for (i = 0; i < state->id_num_rwqe; i++) {
return (DDI_FAILURE);
}
return (DDI_FAILURE);
}
}
return (DDI_SUCCESS);
}
/*
* Free the statically allocated Rx buffer list.
*
*/
static void
{
}
}
/*
* Allocate a single recv wqe and register it so it is almost
* ready to be posted to the hardware.
*/
static int
{
return (DDI_FAILURE);
}
return (DDI_FAILURE);
}
NULL) {
return (DDI_FAILURE);
}
IBT_SUCCESS) {
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
/*
* Free an allocated recv wqe.
*/
static void
{
return;
}
/*
* should not be recycled. The freemsg() will invoke
* ibd_freemsg_cb().
*/
}
}
/*
* Delete the rwqe being freed from the rx list.
*/
static void
{
else
else
}
/*
* threaded and nonreentrant for this CQ. When using combined CQ,
* this handles Tx and Rx completions. With separate CQs, this handles
* only Rx completions.
*/
/* ARGSUSED */
static void
{
if (ibd_rx_softintr == 1)
else
}
/*
* Separate CQ handler for Tx completions, when the Tx CQ is in
* interrupt driven mode.
*/
/* ARGSUSED */
static void
{
if (ibd_tx_softintr == 1)
else
(void) ibd_tx_recycle((char *)state);
}
/*
* on a kernel thread (handling can thus block) and can be invoked
* concurrently. The handler can be invoked anytime after it is
* registered and before ibt_detach().
*/
/* ARGSUSED */
static void
{
/*
* The trap handler will get invoked once for every event for
* evert port. The input "gid" is the GID0 of the port the
* trap came in on; we just need to act on traps that came
* to our port, meaning the port on which the ipoib interface
* resides. Since ipoib uses GID0 of the port, we just match
* the gids to check whether we need to handle the trap.
*/
return;
switch (code) {
case IBT_SM_EVENT_UNAVAILABLE:
/*
* If we are in promiscuous mode or have
* sendnonmembers, we need to print a warning
* message right now. Else, just store the
* information, print when we enter promiscuous
* mode or attempt nonmember send. We might
* also want to stop caching sendnonmember.
*/
"degraded due to unavailability of multicast "
"traps");
break;
case IBT_SM_EVENT_AVAILABLE:
/*
* If we printed a warning message above or
* while trying to nonmember send or get into
* promiscuous mode, print an okay message.
*/
"restored due to availability of multicast "
"traps");
break;
case IBT_SM_EVENT_MCG_CREATED:
case IBT_SM_EVENT_MCG_DELETED:
/*
* First check if the instance is being
* [de]initialized; back off then, without doing
* anything more, since we are not sure if the
* async thread is around, or whether we might
* be racing with the detach code in ibd_m_stop()
* that scans the mcg list.
*/
if (!ibd_async_safe(state))
return;
break;
}
}
static void
{
/*
* Atomically search the nonmember and sendonlymember lists and
* delete.
*/
/*
* mcg. Given the unreliable out-of-order mode of trap
* delivery, we can never be sure whether it is a problem
* if the join fails. Thus, we warn the admin of a failure
* if this was a creation trap. Note that the trap might
* actually be reporting a long past event, and the mcg
* might already have been deleted, thus we might be warning
* in vain.
*/
"new multicast gid %016llx:%016llx",
}
/*
* Free the request slot allocated by the subnet event thread.
*/
}
/*
* GLDv3 entry point to get capabilities.
*/
static boolean_t
{
switch (cap) {
case MAC_CAPAB_HCKSUM: {
/*
* We either do full checksum or not do it at all
*/
else
return (B_FALSE);
break;
}
case MAC_CAPAB_LSO: {
/*
* In addition to the capability and policy, since LSO
* relies on hw checksum, we'll not enable LSO if we
* don't have hw checksum. Of course, if the HCA doesn't
* provide the reserved lkey capability, enabling LSO will
* actually affect performance adversely, so we'll disable
* LSO even for that case.
*/
return (B_FALSE);
return (B_FALSE);
if (state->id_hca_res_lkey_capab == 0) {
"disabling LSO");
return (B_FALSE);
}
break;
}
default:
return (B_FALSE);
}
return (B_TRUE);
}
static int
{
/*
* Query for port information
*/
"failed, ret=%d", ret);
return (DDI_FAILURE);
}
/*
* If the link already went down by the time we get here,
* give up
*/
return (DDI_FAILURE);
}
/*
* If the link is active, verify the pkey
*/
"failed, ret=%d", ret);
return (DDI_FAILURE);
}
/*
* Now that the port is active, record the port speed
*/
return (DDI_SUCCESS);
}
static int
{
/*
* Theoretically, there is no point in having more than #rwqe
* plus #swqe cqe's, except that the CQ will be signalled for
* overflow when the last wqe completes, if none of the previous
* cqe's have been polled. Thus, we allocate just a few less wqe's
* to make sure such overflow does not occur.
*/
if (ibd_separate_cqs == 1) {
/*
* Allocate Receive CQ.
*/
} else {
}
"failed, ret=%d\n", ret);
return (DDI_FAILURE);
}
"moderation failed, ret=%d\n", ret);
}
/*
* Allocate Send CQ.
*/
} else {
}
"failed, ret=%d\n", ret);
return (DDI_FAILURE);
}
"moderation failed, ret=%d\n", ret);
}
} else {
/*
*/
} else {
state->id_num_swqe);
}
"failed, ret=%d\n", ret);
return (DDI_FAILURE);
}
}
/*
* Print message in case we could not allocate as many wqe's
* as was requested.
*/
}
}
return (DDI_SUCCESS);
}
static int
{
if (state->id_hca_res_lkey_capab)
"failed, ret=%d\n", ret);
return (DDI_FAILURE);
}
&ud_chan_attr)) != IBT_SUCCESS) {
"failed, ret=%d\n", ret);
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
static int
{
/*
* we need to mark the link state as unknown to prevent nw
* layer from using this instance for any new transfers.
*/
if (progress & IBD_DRV_PORT_DETAILS_OBTAINED) {
}
if (progress & IBD_DRV_STARTED) {
}
/*
* First, stop receive interrupts; this stops the driver from
* handing up buffers to higher layers. Wait for receive buffers
* to be returned and give up after 5 seconds.
*/
if (progress & IBD_DRV_RCQ_NOTIFY_ENABLED) {
attempts = 50;
if (--attempts == 0) {
/*
* There are pending bufs with the network
* layer and we have no choice but to wait
* for them to be done with. Reap all the
* we turned off the notification and
* return failure.
*/
"reclaiming failed");
return (DDI_FAILURE);
}
}
}
if (progress & IBD_DRV_SM_NOTICES_REGISTERED) {
while (state->id_trap_inprog > 0)
}
if (progress & IBD_DRV_SCQ_NOTIFY_ENABLED) {
/*
* Flushing the channel ensures that all pending WQE's
* are marked with flush_error and handed to the CQ. It
* does not guarantee the invocation of the CQ handler.
* This call is guaranteed to return successfully for
* UD QPNs.
*/
IBT_SUCCESS) {
"failed, ret=%d", ret);
}
/*
* Turn off Tx interrupts and poll. By the time the polling
* returns an empty indicator, we are sure we have seen all
* pending Tx callbacks. Note that after the call to
* ibt_set_cq_handler() returns, the old handler is
* guaranteed not to be invoked anymore.
*/
if (ibd_separate_cqs == 1) {
}
}
if (progress & IBD_DRV_ASYNC_THR_CREATED) {
/*
* No new async requests will be posted since the device
* link state has been marked as unknown; completion handlers
* have been turned off, so Tx handler will not cause any
* more IBD_ASYNC_REAP requests.
*
* Queue a request for the async thread to exit, which will
* be serviced after any pending ones. This can take a while,
* specially if the SM is unreachable, since IBMF will slowly
* timeout each SM request issued by the async thread. Reap
* the thread before continuing on, we do not want it to be
* lingering in modunloaded code (or we could move the reap
* to ibd_detach(), provided we keep track of the current
* id_async_thrid somewhere safe).
*/
}
if (progress & IBD_DRV_BCAST_GROUP_JOINED) {
/*
* membership to the broadcast group, and any nonmembership
* acquired during transmits. We do this after the Tx completion
* handlers are done, since those might result in some late
* leaves; this also eliminates a potential race with that
* has also been suppressed at this point. Thus, no locks
* are required while traversing the mc full list.
*/
}
}
if (progress & IBD_DRV_RXLIST_ALLOCD) {
}
if (progress & IBD_DRV_TXLIST_ALLOCD) {
}
if (progress & IBD_DRV_UD_CHANNEL_SETUP) {
IBT_SUCCESS) {
"failed, ret=%d", ret);
}
}
if (progress & IBD_DRV_CQS_ALLOCD) {
if (ibd_separate_cqs == 1) {
IBT_SUCCESS) {
"failed, ret=%d", ret);
}
}
"ret=%d", ret);
}
}
if (progress & IBD_DRV_ACACHE_INITIALIZED) {
}
if (progress & IBD_DRV_BCAST_GROUP_FOUND) {
/*
* If we'd created the ipoib broadcast group and had
* successfully joined it, leave it now
*/
if (state->id_bgroup_created) {
}
}
return (DDI_SUCCESS);
}
/*
* GLDv3 entry point to start hardware.
*/
/*ARGSUSED*/
static int
ibd_m_start(void *arg)
{
int err;
return (DDI_SUCCESS);
/*
* Get port details; if we fail here, very likely the port
* state is inactive or the pkey can't be verified
*/
return (EAGAIN);
}
/*
* Find the IPoIB broadcast group
*/
goto m_start_fail;
}
/*
* Initialize per-interface caches and lists; if we fail here,
* it is most likely due to a lack of resources
*/
goto m_start_fail;
}
/*
* Allocate send and receive completion queues
*/
goto m_start_fail;
}
/*
* Setup a UD channel
*/
goto m_start_fail;
}
/*
* Allocate and initialize the tx buffer list
*/
goto m_start_fail;
}
/*
* If we have separate cqs, create the send cq handler here
*/
IBT_NEXT_COMPLETION)) != IBT_SUCCESS) {
"failed, ret=%d", ret);
goto m_start_fail;
}
}
/*
* Allocate and initialize the rx buffer list
*/
goto m_start_fail;
}
/*
* Join IPoIB broadcast group
*/
goto m_start_fail;
}
/*
* Create the async thread; thread_create never fails.
*/
/*
* When we did mac_register() in ibd_attach(), we didn't register
* the real macaddr and we didn't have the true port mtu. Now that
* we're almost ready, set the local mac address and broadcast
* addresses and update gldv3 about the real values of these
* parameters.
*/
/*
* Setup the receive cq handler
*/
IBT_NEXT_COMPLETION)) != IBT_SUCCESS) {
"failed, ret=%d", ret);
goto m_start_fail;
}
/*
* Setup the subnet notices handler after we've initialized the acache/
* mcache and started the async thread, both of which are required for
* the trap handler to function properly.
*
* Now that the async thread has been started (and we've already done
* a mac_register() during attach so mac_tx_update() can be called
* if necessary without any problem), we can enable the trap handler
* to queue requests to the async thread.
*/
/*
* Indicate link status to GLDv3 and higher layers. By default,
* we assume we are in up state (which must have been true at
* least at the time the broadcast mcg's were probed); if there
* async handler will have updated last known state, which we
* use to tell GLDv3. The async handler will not send any
* notifications to GLDv3 till we reach here in the initialization
* sequence.
*/
return (DDI_SUCCESS);
/*
* If we ran into a problem during ibd_m_start() and ran into
* some other problem during undoing our partial work, we can't
* do anything about it. Ignore any errors we might get from
* ibd_undo_m_start() and just return the original error we got.
*/
(void) ibd_undo_m_start(state);
return (err);
}
/*
* GLDv3 entry point to stop hardware from receiving packets.
*/
/*ARGSUSED*/
static void
ibd_m_stop(void *arg)
{
/*
* Since ibd_m_stop() doesn't expect any return, we cannot
* fail even if we run into some problem with ibd_undo_m_start().
* The best we can do is to leave it in a good state, so
* perhaps a future unplumb will succeed.
*/
(void) ibd_undo_m_start(state);
}
/*
* GLDv3 entry point to modify device's mac address. We do not
* allow address modifications.
*/
static int
{
/*
* Don't bother even comparing the macaddr if we haven't
* completed ibd_m_start().
*/
return (0);
return (0);
else
return (EINVAL);
}
/*
* of here on the async thread.
*/
static void
{
if (op == IBD_ASYNC_JOIN) {
}
} else {
/*
* Here, we must search for the proper mcg_info and
* use that to leave the group.
*/
}
}
/*
* This function queues the operation to the async thread and
* return success for a valid multicast address.
*/
static int
{
/*
* If we haven't completed ibd_m_start(), async thread wouldn't
* have been started and id_bcaddr wouldn't be set, so there's
* no point in continuing.
*/
return (0);
/*
* The incoming multicast address might not be aligned properly
* on a 4 byte boundary to be considered an ipoib_mac_t. We force
* it to look like one though, to get the offsets of the mc gid,
* since we know we are not going to dereference any values with
* the ipoib_mac_t pointer.
*/
/*
* Check validity of MCG address. We could additionally check
* mcg, but since this operation is only invokable by priviledged
* programs anyway, we allow the flexibility to those dlpi apps.
* Note that we do not validate the "scope" of the IBA mcg.
*/
return (EINVAL);
/*
* fill in multicast pkey and scope
*/
/*
* nothing (i.e. we stay JOINed to the broadcast group done in
* ibd_m_start()), to mimic ethernet behavior. IPv4 specifically
* requires to be joined to broadcast groups at all times.
* ibd_join_group() has an ASSERT(omce->mc_fullreap) that also
* depends on this.
*/
return (0);
return (ENOMEM);
if (add) {
} else {
}
return (0);
}
/*
* The blocking part of the IBA promiscuous operations are done
* out of here on the async thread. The dlpireq parameter indicates
* whether this invocation is due to a dlpi request or due to
*/
static void
{
}
}
/*
* The blocking part of the IBA promiscuous operations are done
* out of here on the async thread. The dlpireq parameter indicates
* whether this invocation is due to a dlpi request or due to
*/
static void
{
int i;
char ret = IBD_OP_COMPLETED;
/*
* Obtain all active MC groups on the IB fabric with
* specified criteria (scope + Pkey + Qkey + mtu).
*/
IBT_SUCCESS) {
"groups");
goto done;
}
/*
* Iterate over the returned mcg's and join as NonMember
* to the IP mcg's.
*/
for (i = 0; i < numg; i++) {
/*
* Do a NonMember JOIN on the MC group.
*/
"multicast gid %016llx:%016llx",
}
done:
}
/*
* GLDv3 assumes phys state receives more packets than multi state,
* which is not true for IPoIB. Thus, treat the multi and phys
* promiscuous states the same way to work with GLDv3's assumption.
*/
static int
{
/*
* Async thread wouldn't have been started if we haven't
* passed ibd_m_start()
*/
return (0);
return (ENOMEM);
if (on) {
} else {
}
return (0);
}
/*
* GLDv3 entry point for gathering statistics.
*/
static int
{
switch (stat) {
case MAC_STAT_IFSPEED:
break;
case MAC_STAT_MULTIRCV:
break;
case MAC_STAT_BRDCSTRCV:
break;
case MAC_STAT_MULTIXMT:
break;
case MAC_STAT_BRDCSTXMT:
break;
case MAC_STAT_RBYTES:
break;
case MAC_STAT_IPACKETS:
break;
case MAC_STAT_OBYTES:
break;
case MAC_STAT_OPACKETS:
break;
case MAC_STAT_OERRORS:
break;
case MAC_STAT_IERRORS:
*val = 0;
break;
case MAC_STAT_NOXMTBUF:
break;
case MAC_STAT_NORCVBUF:
default:
return (ENOTSUP);
}
return (0);
}
static void
{
int ret;
if (ibd_txcomp_poll)
if (ret && ibd_txcomp_poll) {
else {
"no memory, can't schedule work slot");
}
}
}
static int
{
int flag;
int met_thresh = 0;
int ret = -1;
}
if (met_thresh) {
ret = 0;
}
if (ret == 0)
return (ret);
}
/*
* Release the send wqe back into free list.
*/
static void
{
/*
* Add back on Tx list for reuse.
*/
}
} else {
}
}
/*
* Acquire a send wqe from free list.
* Returns error number and send wqe pointer.
*/
static int
{
int rc = 0;
/*
* Check and reclaim some of the completed Tx requests.
* If someone else is already in this code and pulling Tx
* completions, no need to poll, since the current lock holder
* will do the work anyway. Normally, we poll for completions
* every few Tx attempts, but if we are short on Tx descriptors,
* we always try to poll.
*/
if ((ibd_txcomp_poll == 1) &&
}
/*
* Grab required transmit wqes.
*/
} else {
/*
* If we did not find the number we were looking for, flag
* no resource. Adjust list appropriately in either case.
*/
}
return (rc);
}
static int
{
/*
* The code in ibd_send would've set 'wr.ud.udwr_dest' by default;
* we need to adjust it here for lso.
*/
/*
* Calculate the LSO header size and set it in the UD LSO structure.
* Note that the only assumption we make is that each of the IPoIB,
* IP and TCP headers will be contained in a single mblk fragment;
* together, the headers may span multiple mblk fragments.
*/
}
}
/*
* If the lso header fits entirely within a single mblk fragment,
* we'll avoid an additional copy of the lso header here and just
* pass the b_rptr of the mblk directly.
*
* If this isn't true, we'd have to allocate for it explicitly.
*/
} else {
/* On work completion, remember to free this allocated hdr */
lso->lso_hdr_sz = 0;
return (-1);
}
}
/*
* Copy in the lso header only if we need to
*/
} else {
break;
}
}
}
return (0);
}
static void
{
return;
/*
* Free any header space that we might've allocated if we
* did an LSO
*/
lso->lso_hdr_sz = 0;
}
}
}
static void
{
uint_t i;
/*
* Enqueue the new node in chain of wqes to send
*/
if (state->id_tx_head) {
} else {
}
/*
* If someone else is helping out with the sends,
* just go back
*/
if (state->id_tx_busy) {
return;
}
/*
* Otherwise, mark the flag to indicate that we'll be
* doing the dispatch of what's there in the wqe chain
*/
while (state->id_tx_head) {
/*
* Collect pending requests, IBD_MAX_POST_MULTIPLE wrs
* at a time if possible, and keep posting them.
*/
}
/*
* Release the txpost lock before posting the
* send request to the hca; if the posting fails
* for some reason, we'll never receive completion
* intimation, so we'll need to cleanup.
*/
/*
* If posting fails for some reason, we'll never receive
* completion intimation, so we'll need to cleanup. But
* we need to make sure we don't clean up nodes whose
* wrs have been successfully posted. We assume that the
* hca driver returns on the first failure to post and
* therefore the first 'num_posted' entries don't need
* cleanup here.
*/
num_posted = 0;
if (ibt_status != IBT_SUCCESS) {
"posting multiple wrs failed: "
"requested=%d, done=%d, ret=%d",
for (i = num_posted; i < n_wrs; i++)
}
/*
* Grab the mutex before we go and check the tx Q again
*/
}
state->id_tx_busy = 0;
}
static int
{
int nmblks;
int i;
/*
* Let's skip ahead to the data if this is LSO
*/
pending_hdr = 0;
if (lsohdr_sz) {
if (frag_len > pending_hdr)
break;
pending_hdr -= frag_len;
}
}
/*
* Calculate the size of message data and number of msg blocks
*/
pktsize = 0;
}
pktsize -= pending_hdr;
/*
* Translating the virtual address regions into physical regions
* for using the Reserved LKey feature results in a wr sgl that
* is a little longer. Since failing ibt_map_mem_iov() is costly,
* we'll fix a high-water mark (65%) for when we should stop.
*/
/*
* We only do ibt_map_mem_iov() if the pktsize is above the
* "copy-threshold", and if the number of mp fragments is less than
* the maximum acceptable.
*/
if ((state->id_hca_res_lkey_capab) &&
(pktsize > IBD_TX_COPY_THRESH) &&
if (i == 0) {
}
}
if (ibt_status != IBT_SUCCESS) {
goto ibd_copy_path;
}
return (0);
}
/*
* Even though this is the copy path for transfers less than
* id_tx_buf_sz, it could still be an LSO packet. If so, it
* is possible the first data mblk fragment (data_mp) still
* contains part of the LSO header that we need to skip.
*/
pending_hdr = 0;
}
return (0);
}
/*
* Copy path for transfers greater than id_tx_buf_sz
*/
return (-1);
}
/*
* Copy the larger-than-id_tx_buf_sz packet into a set of
* fixed-sized, pre-mapped LSO buffers. Note that we might
* need to skip part of the LSO header in the first fragment
* as before.
*/
skip = pending_hdr;
avail = 0;
} else {
skip = 0;
}
}
}
return (0);
}
/*
* Schedule a completion queue polling to reap the resource we're
* short on. If we implement the change to reap tx completions
* in a separate thread, we'll need to wake up that thread here.
*/
static int
{
/*
* If we are asked to queue a work entry, we need to do it
*/
if (q_flag) {
return (-1);
}
return (0);
}
/*
* The passed in packet has this format:
* IPOIB_ADDRL b dest addr :: 2b sap :: 2b 0's :: data
*/
static boolean_t
{
/*
* If we aren't done with the device initialization and start,
* we shouldn't be here.
*/
return (B_FALSE);
/*
* If we don't have an swqe available, schedule a transmit
* completion queue cleanup and hold off on sending more
* more packets until we have some free swqes
*/
return (B_FALSE);
/*
* If a poll cannot be scheduled, we have no choice but
* to drop this packet
*/
return (B_TRUE);
}
/*
* Initialize the commonly used fields in swqe to NULL to protect
* against ibd_tx_cleanup accidentally misinterpreting these on a
* failure.
*/
/*
* Obtain an address handle for the destination.
*/
} else {
DPRINT(5,
"ibd_send: acache lookup %s for %08X:%08X:%08X:%08X:%08X",
/*
* for the poll mode, it is probably some cqe pending in the
* cq. So ibd has to poll cq here, otherwise acache probably
* may not be recycled.
*/
if (ibd_txcomp_poll == 1)
/*
* Here if ibd_acache_lookup() returns EFAULT, it means ibd
* can not find a path for the specific dest address. We
* should get rid of this kind of packet. We also should get
* rid of the packet if we cannot schedule a poll via the
* async thread. For the normal case, ibd will return the
* packet to upper layer and wait for AH creating.
*
* Note that we always queue a work slot entry for the async
* thread when we fail AH lookup (even in intr mode); this is
* due to the convoluted way the code currently looks for AH.
*/
} else {
}
goto ibd_send_fail;
}
/*
* For ND6 packets, padding is at the front of the source lladdr.
* Insert the padding at front.
*/
sizeof (ib_header_info_t))) {
goto ibd_send_fail;
}
}
sizeof (ib_header_info_t));
"failure ");
goto ibd_send_fail;
}
sizeof (ib_header_info_t));
}
/* LINTED: E_CONSTANT_CONDITION */
}
}
/*
* Do LSO and checksum related work here. For LSO send, adjust the
* ud destination, the opcode and the LSO header information to the
* work request.
*/
lsohdr_sz = 0;
} else {
/*
* The routine can only fail if there's no memory; we
* can only drop the packet if this happens
*/
"ibd_send: no memory, lso posting failed");
goto ibd_send_fail;
}
}
else
/*
* Prepare the sgl for posting; the routine can only fail if there's
* no lso buf available for posting. If this is the case, we should
* probably resched for lso bufs to become available and then try again.
*/
} else {
}
goto ibd_send_fail;
}
/*
* Queue the wqe to hardware; since we can now simply queue a
* post instead of doing it serially, we cannot assume anything
* about the 'node' after ibd_post_send() returns.
*/
return (B_TRUE);
if (dofree)
return (rc);
}
/*
* GLDv3 entry point for transmitting datagram.
*/
static mblk_t *
{
}
/* Send fail */
break;
}
}
return (mp);
}
/*
* this handles Tx and Rx completions. With separate CQs, this handles
* only Rx completions.
*/
static uint_t
{
return (DDI_INTR_CLAIMED);
}
/*
* Poll and drain the cq
*/
static uint_t
{
uint_t total_polled = 0;
int i;
/*
* Channel being torn down.
*/
/*
* Only invoke the Tx handler to
* release possibly held resources
* like AH refcount etc. Can not
* invoke Rx handler because it might
* try adding buffers to the Rx pool
* when we are trying to deinitialize.
*/
continue;
} else {
}
}
}
} else {
}
}
}
return (total_polled);
}
/*
* Common code for interrupt handling as well as for polling
* for all completed wqe's while detaching.
*/
static void
{
int redo = 1;
uint_t num_polled = 0;
if (ibd_separate_cqs == 1) {
} else {
}
} else {
}
return;
}
/*
* In some cases (eg detaching), this code can be invoked on
* any cpu after disabling cq notification (thus no concurrency
* exists). Apart from that, the following applies normally:
* The receive completion handling is always on the Rx interrupt
* cpu. Transmit completion handling could be from any cpu if
* Tx CQ is poll driven, but always on Tx interrupt cpu if Tx CQ
* is interrupt driven. Combined completion handling is always
* on the interrupt cpu. Thus, lock accordingly and use the
* proper completion array.
*/
if (ibd_separate_cqs == 1) {
} else {
}
} else {
}
/*
* Poll and drain the CQ
*/
/*
* Enable CQ notifications and redrain the cq to catch any
* completions we might have missed after the ibd_drain_cq()
* above and before the ibt_enable_cq_notify() that follows.
* Finally, service any new requests to poll the cq that
* could've come in after the ibt_enable_cq_notify().
*/
do {
IBT_SUCCESS) {
}
else {
redo = 0;
}
} while (redo);
/*
* If we polled the receive cq and found anything, we need to flush
* it out to the nw layer here.
*/
}
}
/*
* Unmap the memory area associated with a given swqe.
*/
static void
{
DPRINT(10,
"failed in ibt_unmap_mem_iov, ret=%d\n", stat);
}
}
}
/*
* Common code that deals with clean ups after a successful or
* erroneous transmission attempt.
*/
static void
{
/*
* If this was a dynamic mapping in ibd_send(), we need to
* unmap here. If this was an lso buffer we'd used for sending,
* we need to release the lso buf to the pool, since the resource
* is scarce. However, if this was simply a normal send using
* the copybuf (present in each swqe), we don't need to release it.
*/
}
}
/*
* Drop the reference count on the AH; it can be reused
* now for a different destination if there are no more
* posted sends that will use it. This can be eliminated
* if we can always associate each Tx buffer with an AH.
* The ace can be null if we are cleaning up from the
* ibd_send() error path.
*/
/*
* The recycling logic can be eliminated from here
* and put into the async thread if we create another
* list to hold ACE's for unjoined mcg's.
*/
if (DEC_REF_DO_CYCLE(ace)) {
/*
* Check with the lock taken: we decremented
* reference count without the lock, and some
* transmitter might alreay have bumped the
* reference count (possible in case of multicast
* disable when we leave the AH on the active
* list). If not still 0, get out, leaving the
* recycle bit intact.
*
* Atomically transition the AH from active
* to free list, and queue a work request to
* leave the group and destroy the mce. No
* transmitter can be looking at the AH or
* the MCE in between, since we have the
* ac_mutex lock. In the SendOnly reap case,
* it is not neccesary to hold the ac_mutex
* and recheck the ref count (since the AH was
* taken off the active list), we just do it
* to have uniform processing with the Full
* reap case.
*/
if (GET_REF_CYCLE(ace) == 0) {
/*
* Identify the case of fullmember reap as
* opposed to mcg trap reap. Also, port up
* might set ac_mce to NULL to indicate Tx
* cleanup should do no more than put the
* AH in the free list (see ibd_async_link).
*/
/*
* mc_req was initialized at mce
* creation time.
*/
}
}
}
}
/*
* Release the send wqe for reuse.
*/
}
/*
* Hand off the processed rx mp chain to mac_rx()
*/
static void
{
state->id_rx_mp_len = 0;
}
if (mpc) {
}
}
/*
* Processing to be done after receipt of a packet; hand off to GLD
* in the format expected by GLD. The received packet has this
* format: 2b sap :: 00 :: data.
*/
static void
{
/*
* Track number handed to upper layer, and number still
* available to receive packets.
*/
/*
* Adjust write pointer depending on how much data came in.
*/
/*
* Make sure this is NULL or we're in trouble.
*/
"ibd_process_rx: got duplicate mp from rcq?");
}
/*
* the IB link will deliver one of the IB link layer
* headers called, the Global Routing Header (GRH).
* ibd driver uses the information in GRH to build the
* Header_info structure and pass it with the datagram up
* to GLDv3.
* If the GRH is not valid, indicate to GLDv3 by setting
* the VerTcFlow field to 0.
*/
/* if it is loop back packet, just drop it. */
IPOIB_ADDRL) == 0) {
return;
}
sizeof (ipoib_mac_t));
} else {
}
} else {
/*
* It can not be a IBA multicast packet. Must have been
* unicast for us. Just copy the interface address to dst.
*/
sizeof (ipoib_mac_t));
}
/*
* lladdr. However the inet6 layer is not aware of it, hence remove
* the padding from such packets.
*/
sizeof (ipoib_hdr_t))) {
return;
}
sizeof (ipoib_pgrh_t));
}
IPV6_HDR_LEN + len) {
IPV6_HDR_LEN + len)) {
" failed");
return;
}
sizeof (ipoib_pgrh_t) +
sizeof (ipoib_hdr_t));
}
/* LINTED: E_CONSTANT_CONDITION */
}
}
/*
* Update statistics
*/
/*
* Set receive checksum status in mp
* Hardware checksumming can be considered valid only if:
* 1. CQE.IP_OK bit is set
* 2. CQE.CKSUM = 0xffff
* 3. IPv6 routing header is not present in the packet
* 4. If there are no IP_OPTIONS in the IP HEADER
*/
HCK_FULLCKSUM | HCK_FULLCKSUM_OK, 0);
}
/*
* Add this mp to the list of processed mp's to send to
* the nw layer
*/
} else {
}
state->id_rx_mp_len++;
state->id_rx_mp_len = 0;
}
if (mpc) {
}
}
/*
* Callback code invoked from STREAMs when the receive data buffer is
* free for recycling.
*/
static void
ibd_freemsg_cb(char *arg)
{
/*
* If the wqe is being destructed, do not attempt recycling.
*/
return;
} else {
/*
* Upper layer has released held mblk, so we have
* no more use for keeping the old pointer in
* our rwqe.
*/
}
return;
}
return;
}
}
static uint_t
ibd_tx_recycle(char *arg)
{
/*
* Poll for completed entries
*/
/*
* Resume any blocked transmissions if possible
*/
(void) ibd_resume_transmission(state);
return (DDI_INTR_CLAIMED);
}
#ifdef IBD_LOGGING
static void
ibd_log_init(void)
{
ibd_lbuf_ndx = 0;
}
static void
ibd_log_fini(void)
{
if (ibd_lbuf)
ibd_lbuf_ndx = 0;
}
static void
{
char tmpbuf[IBD_DMAX_LINE];
return;
if (msglen >= IBD_DMAX_LINE)
ibd_lbuf_ndx = 0;
}
#endif