/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2005 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _SYS_IB_ADAPTERS_TAVOR_WR_H
#define _SYS_IB_ADAPTERS_TAVOR_WR_H
/*
* Contains all of the prototypes, #defines, and structures necessary
* for the Tavor Work Request Processing Routines
* Specifically it contains #defines, macros, and prototypes for each of
* building each of the various types of WQE and for managing the WRID
* tracking mechanisms.
*/
#ifdef __cplusplus
extern "C" {
#endif
/*
* The following macro is used to convert WQE address and size into the
* "wqeaddrsz" value needed in the tavor_wrid_entry_t (see below).
*/
((size) & TAVOR_WQE_NDS_MASK))
/*
* The following macros are used to calculate pointers to the Send or Receive
* (or SRQ) WQEs on a given QP, respectively
*/
/*
* The following macro is used to calculate the 'wqe_index' field during SRQ
* operation. This returns the index based on the WQE size, that can be used
* to reference WQEs in an SRQ.
*/
/*
* The following macros are used to access specific fields in Directed Route
* MAD packets. We can extract the MgmtClass, "hop pointer", and "hop count".
* We can also update the "hop pointer" as appropriate. Note: Again, because
* of the limited amount of direct handling the Tavor hardware does on special
* QP request (specifically on Directed Route MADs), the driver needs to
* update (as necessary) the "hop pointer" value depending on whether a MAD
* is outbound or inbound (i.e. depending on the relationship between "hop
* pointer" and "hop count" in the given MAD)
*/
}
}
}
if ((mgmtclass) == 0x81) { \
} \
}
/*
* The tavor_wrid_entry_s structure is used internally by the Tavor
* driver to contain all the information necessary for tracking WRIDs.
* Specifically, this structure contains the 64-bit WRID, the 32-bit quantity
* called "wr_wqeaddrsz" (which can also be found in every CQE), and the
* "wr_signaled_dbd" information which indicates whether a given entry was
* signaled or not and whether a doorbell was subsequently rung for this
* particular work request. Note: the latter piece of information is
* particularly useful during completion processing on errored CQEs.
*/
struct tavor_wrid_entry_s {
};
/*
* The tavor_sw_wqe_dbinfo_t structure is used internally by the Tavor
* driver to return information (from the tavor_wqe_mlx_build_nextctl() and
* tavor_wqe_send_build_nextctl() routines) regarding the type of Tavor
* doorbell necessary.
*/
typedef struct tavor_sw_wqe_dbinfo_s {
/*
* The Work Queue Lock (WQL) structure. Each WQHDR (tavor_workq_hdr_t defined
* below) must lock access to the wridlist during any wridlist manipulation.
* Also, any Shared Receive Queue (SRQ) must also be able to lock the wridlist
* since it maintains wridlist's differently than normal QPs. This
* 'tavor_wq_lock_t' structure is shared and accessible through the WQ or the
* SRQ, and refcnt is maintained. The last entity to decrement use of the
* lock, also will free up the memory.
*/
struct tavor_wq_lock_s {
};
/*
* The tavor_wrid_list_hdr_s structure is used internally by the Tavor driver
* to track all the information necessary to manage a queue of WRID entries
* (the tavor_wrid_entry_s struct above).
* It contains some information regarding the status of a given WRID list
* (e.g. head index, tail index, queue full condition, etc.). Note: Although
* some of this information is also kept by the tavor_workq_hdr_s below, what
* is kept here may, in fact, represent the state of an old WRID list. It
* could be different from what is kept in the tavor_workq_hdr_s because this
* WRID list may no longer be the active WRID list. If it is an active list,
* however, then both sets of information should be up-to-date and consistent.
* Several of these structures are chained together on each work queue header
* to form a linked list (using the "wl_next" and "wl_prev"). These structs,
* in turn, each have a pointer to a queue of WRID entries. They also each
* have a pointer to the next "reapable" entry ("wl_reap_next") which is only
* used when a WRID list has been retired and is ready to be freed up.
* Lastly, it has a backpointer to the work queue header to which the WRID
* list belongs (this is for proper handling on removal).
*/
struct tavor_wrid_list_hdr_s {
/* For SRQ */
int wl_free_list_indx;
};
/*
* The tavor_workq_hdr_s structure is used internally by the Tavor driver to
* track all the information necessary to manage the work queues associated
* with a given completion queue. It contains much of the information
* regarding the status of a given work queue (e.g. head index, tail index,
* queue full condition, etc.). Note: This information is kept here (i.e.
* associated with a completion queue) rather than as part of the QP because
* the queue pair may potentially be destroyed while outstanding CQEs still
* remain on the CQ.
* Several of these structures are chained together on each CQ to form a
* linked list (using the "wq_next" and "wq_prev"). These headers, in turn,
* link to the containers for the individual WRID entries (managed with the
* tavor_wrid_list_hdr_s structs above). Note: We keep a list of these
* tavor_wrid_list_hdr_s because a given QP may be used, destroyed (or
* transition to "Reset"), and then reused. The list helps us track where
* to put new WRID entries and where to pull old entries from.
* The "wq_qpn" (QP number) and "wq_send_or_recv" (TAVOR_WR_SEND or
* TAVOR_WR_RECV) are used to uniquely identify the given work queue.
* Lookups into the work queue list (to find a given work queue) will use
* these two fields as identifiers.
*/
struct tavor_workq_hdr_s {
};
typedef struct tavor_workq_compare_s {
/* For Work Request posting */
/* For WRID handling */
#ifdef __cplusplus
}
#endif
#endif /* _SYS_IB_ADAPTERS_TAVOR_WR_H */