/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*
* Copyright (c) 2002-2006 Neterion, Inc.
*/
#ifdef XGE_DEBUG_FP
#include "xgehal-fifo.h"
#endif
{
return txdl_priv;
}
{
#ifdef XGE_DEBUG_ASSERT
/* make sure Xena overwrites the (illegal) t_code value on completion */
#endif
#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
/* sync the TxDL to device */
#endif
/* write the pointer first */
&hw_pair->txdl_pointer);
/* spec: 0x00 = 1 TxD in the list */
}
/*
* according to the XENA spec:
*
* It is important to note that pointers and list control words are
* always written in pairs: in the first write, the host must write a
* pointer, and in the second write, it must write the list control
* word. Any other access will result in an error. Also, all 16 bytes
* reserved bytes.
*/
xge_os_wmb();
/*
* we want touch work_arr in order with ownership bit set to HW
*/
(unsigned long long)ctrl,
#ifdef XGE_HAL_FIFO_DUMP_TXD
#endif
}
{
if (invalid_frags){
"freeing corrupt dtrh %p, fragments %d list size %d",
xge_assert(invalid_frags == 0);
}
while(txdp){
"freeing linked dtrh %p, fragments %d list size %d",
#if defined(XGE_DEBUG_ASSERT) && defined(XGE_OS_MEMORY_CHECK)
current_txdl_priv->allocated = 0;
#endif
if (next_txdl_priv) {
}
else {
"freed linked dtrh fragments %d list size %d",
break;
}
}
xge_assert(frags == 0)
}
{
int i = txdl_count;
do{
xge_assert(i);
#if defined(XGE_DEBUG_ASSERT) && defined(XGE_OS_MEMORY_CHECK)
current_txdl_priv->allocated = 0;
#endif
"dtrh %p restored at offset %d", txdp, i);
} while(current_txdl_priv);
}
/**
* xge_hal_fifo_dtr_private - Retrieve per-descriptor private data.
* @channelh: Channel handle.
* @dtrh: Descriptor handle.
*
* Retrieve per-descriptor private data.
* Note that ULD requests per-descriptor space via
* xge_hal_channel_open().
*
* Returns: private ULD data associated with the descriptor.
* Usage: See ex_xmit{} and ex_tx_compl{}.
*/
{
sizeof(xge_hal_fifo_txdl_priv_t);
}
/**
* xge_hal_fifo_dtr_buffer_cnt - Get number of buffers carried by the
* descriptor.
* @dtrh: Descriptor handle.
*
* Returns: Number of buffers stored in the given descriptor. Can be used
* _after_ the descriptor is set up for posting (see
* xge_hal_fifo_dtr_post()) and _before_ it is deallocated (see
* xge_hal_fifo_dtr_free()).
*
*/
{
}
/**
* xge_hal_fifo_dtr_reserve_many- Reserve fifo descriptors which span more
* than single txdl.
* @channelh: Channel handle.
* @dtrh: Reserved descriptor. On success HAL fills this "out" parameter
* with a valid handle.
* @frags: minimum number of fragments to be reserved.
*
* Reserve TxDL(s) (that is, fifo descriptor)
* for the subsequent filling-in by upper layerdriver (ULD))
* and posting on the corresponding channel (@channelh)
* via xge_hal_fifo_dtr_post().
*
* Returns: XGE_HAL_OK - success;
* XGE_HAL_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
*
* See also: xge_hal_fifo_dtr_reserve_sp(), xge_hal_fifo_dtr_free(),
* xge_hal_ring_dtr_reserve(), xge_hal_status_e{}.
* Usage: See ex_xmit{}.
*/
{
#if defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
unsigned long flags=0;
#endif
frags);
#if defined(XGE_HAL_TX_MULTI_RESERVE)
#elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
#endif
while(alloc_frags < frags) {
(xge_hal_dtr_h *)(void*)&next_txdp);
if (status != XGE_HAL_OK){
"failed to allocate linked fragments rc %d",
status);
if (*dtrh) {
}
if (dang_dtrh) {
}
break;
}
next_txdl_priv->dang_frags = 0;
#if defined(XGE_OS_MEMORY_CHECK)
#endif
if (!curr_txdp || !curr_txdl_priv) {
continue;
}
if (curr_txdl_priv->memblock ==
"linking dtrh %p, with %p",
xge_assert (next_txdp ==
alloc_frags += max_frags;
}
else {
xge_assert(*dtrh);
"dangling dtrh %p, linked with dtrh %p",
}
}
#if defined(XGE_HAL_TX_MULTI_RESERVE)
#elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
#endif
if (status == XGE_HAL_OK) {
/* reset the TxDL's private */
txdl_priv->align_dma_offset = 0;
txdl_priv->align_used_frags = 0;
txdl_priv->bytes_sent = 0;
/* reset TxD0 */
#if defined(XGE_OS_MEMORY_CHECK)
#endif
/* update statistics */
if (txdl_priv->dang_frags){
}
}
return status;
}
/**
* xge_hal_fifo_dtr_reserve - Reserve fifo descriptor.
* @channelh: Channel handle.
* @dtrh: Reserved descriptor. On success HAL fills this "out" parameter
* with a valid handle.
*
* Reserve a single TxDL (that is, fifo descriptor)
* for the subsequent filling-in by upper layerdriver (ULD))
* and posting on the corresponding channel (@channelh)
* via xge_hal_fifo_dtr_post().
*
* Note: it is the responsibility of ULD to reserve multiple descriptors
* for lengthy (e.g., LSO) transmit operation. A single fifo descriptor
* carries up to configured number (fifo.max_frags) of contiguous buffers.
*
* Returns: XGE_HAL_OK - success;
* XGE_HAL_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
*
* See also: xge_hal_fifo_dtr_reserve_sp(), xge_hal_fifo_dtr_free(),
* xge_hal_ring_dtr_reserve(), xge_hal_status_e{}.
* Usage: See ex_xmit{}.
*/
{
#if defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
unsigned long flags=0;
#endif
#if defined(XGE_HAL_TX_MULTI_RESERVE)
#elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
flags);
#endif
#if defined(XGE_HAL_TX_MULTI_RESERVE)
#elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
flags);
#endif
if (status == XGE_HAL_OK) {
/* reset the TxDL's private */
txdl_priv->align_dma_offset = 0;
txdl_priv->align_used_frags = 0;
txdl_priv->dang_frags = 0;
txdl_priv->bytes_sent = 0;
/* reset TxD0 */
#if defined(XGE_OS_MEMORY_CHECK)
#endif
}
return status;
}
/**
* xge_hal_fifo_dtr_reserve_sp - Reserve fifo descriptor and store it in
* the ULD-provided "scratch" memory.
* @channelh: Channel handle.
* @dtr_sp_size: Size of the %dtr_sp "scratch pad" that HAL can use for TxDL.
* @dtr_sp: "Scratch pad" supplied by upper-layer driver (ULD).
*
* Reserve TxDL and fill-in ULD supplied "scratch pad". The difference
* between this API and xge_hal_fifo_dtr_reserve() is (possibly) -
* performance.
*
* If upper-layer uses ULP-defined commands, and if those commands have enough
* all the per-command information into one command, which is typically
* one contiguous block.
*
* Note: Unlike xge_hal_fifo_dtr_reserve(), this function can be used to
* allocate a single descriptor for transmit operation.
*
* See also: xge_hal_fifo_dtr_reserve(), xge_hal_fifo_dtr_free(),
* xge_hal_ring_dtr_reserve(), xge_hal_status_e{}.
*/
{
/* FIXME: implement */
return XGE_HAL_OK;
}
/**
* xge_hal_fifo_dtr_post - Post descriptor on the fifo channel.
* @channelh: Channel handle.
* @dtrh: Descriptor obtained via xge_hal_fifo_dtr_reserve() or
* xge_hal_fifo_dtr_reserve_sp()
* @frags: Number of contiguous buffers that are part of a single
* transmit operation.
*
* Post descriptor on the 'fifo' type channel for transmission.
* Prior to posting the descriptor should be filled in accordance with
*
* See also: xge_hal_fifo_dtr_post_many(), xge_hal_ring_dtr_post().
* Usage: See ex_xmit{}.
*/
{
#if defined(XGE_HAL_TX_MULTI_POST_IRQ)
unsigned long flags = 0;
#endif
#if defined(XGE_HAL_TX_MULTI_POST)
#elif defined(XGE_HAL_TX_MULTI_POST_IRQ)
#endif
#if defined(XGE_HAL_TX_MULTI_POST)
#elif defined(XGE_HAL_TX_MULTI_POST_IRQ)
#endif
}
/**
* xge_hal_fifo_dtr_post_many - Post multiple descriptors on fifo
* channel.
* @channelh: Channel to post descriptor.
* @num: Number of descriptors (i.e., fifo TxDLs) in the %dtrs[].
* @dtrs: Descriptors obtained via xge_hal_fifo_dtr_reserve().
* @frags_arr: Number of fragments carried @dtrs descriptors.
* Note that frag_arr[i] corresponds to descriptor dtrs[i].
*
* Post multi-descriptor on the fifo channel. The operation is atomic:
* all descriptrs are posted on the channel "back-to-back' without
* letting other posts (possibly driven by multiple transmitting threads)
* to interleave.
*
* See also: xge_hal_fifo_dtr_post(), xge_hal_ring_dtr_post().
*/
{
int i;
#if defined(XGE_HAL_TX_MULTI_POST_IRQ)
unsigned long flags = 0;
#endif
#if defined(XGE_HAL_TX_MULTI_POST)
#elif defined(XGE_HAL_TX_MULTI_POST_IRQ)
flags);
#endif
for (i=0; i<num; i++) {
val64 = 0;
if (i == 0) {
} else if (i == num -1) {
}
}
#if defined(XGE_HAL_TX_MULTI_POST)
#elif defined(XGE_HAL_TX_MULTI_POST_IRQ)
flags);
#endif
}
/**
* xge_hal_fifo_dtr_next_completed - Retrieve next completed descriptor.
* @channelh: Channel handle.
* @dtrh: Descriptor handle. Returned by HAL.
* @t_code: Transfer code, as per Xframe User Guide,
* Transmit Descriptor Format.
* Returned by HAL.
*
* Retrieve the _next_ completed descriptor.
* HAL uses channel callback (*xge_hal_channel_callback_f) to notifiy
* upper-layer driver (ULD) of new completed descriptors. After that
* the ULD can use xge_hal_fifo_dtr_next_completed to retrieve the rest
* completions (the very first completion is passed by HAL via
* xge_hal_channel_callback_f).
*
* Implementation-wise, the upper-layer driver is free to call
* xge_hal_fifo_dtr_next_completed either immediately from inside the
* channel callback, or in a deferred fashion and separate (from HAL)
* context.
*
* Non-zero @t_code means failure to process the descriptor.
* The failure could happen, for instance, when the link is
* down, in which case Xframe completes the descriptor because it
* is not able to send the data out.
*
* For details please refer to Xframe User Guide.
*
* Returns: XGE_HAL_OK - success.
* XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
* are currently available for processing.
*
* See also: xge_hal_channel_callback_f{},
* xge_hal_ring_dtr_next_completed().
* Usage: See ex_tx_compl{}.
*/
{
#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
#endif
}
#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
/* sync TxDL to read the ownership
*
* Note: 16bytes means Control_1 & Control_2 */
16,
#endif
/* check whether host owns it */
/* see XGE_HAL_SET_TXD_T_CODE() above.. */
return XGE_HAL_OK;
}
/* no more completions */
*dtrh = 0;
}
/**
* xge_hal_fifo_dtr_free - Free descriptor.
* @channelh: Channel handle.
* @dtr: Descriptor handle.
*
* Free the reserved descriptor. This operation is "symmetrical" to
* xge_hal_fifo_dtr_reserve or xge_hal_fifo_dtr_reserve_sp.
* The "free-ing" completes the descriptor's lifecycle.
*
* After free-ing (see xge_hal_fifo_dtr_free()) the descriptor again can
* be:
*
* - reserved (xge_hal_fifo_dtr_reserve);
*
* - posted (xge_hal_fifo_dtr_post);
*
* - completed (xge_hal_fifo_dtr_next_completed);
*
* - and recycled again (xge_hal_fifo_dtr_free).
*
* For alternative state transitions and more details please refer to
* the design doc.
*
* See also: xge_hal_ring_dtr_free(), xge_hal_fifo_dtr_reserve().
* Usage: See ex_tx_compl{}.
*/
{
#if defined(XGE_HAL_TX_MULTI_FREE_IRQ)
unsigned long flags = 0;
#endif
(xge_hal_fifo_txd_t *)dtr);
#if defined(XGE_HAL_TX_MULTI_FREE)
#elif defined(XGE_HAL_TX_MULTI_FREE_IRQ)
flags);
#endif
txdl_priv->dang_frags = 0;
txdl_priv->alloc_frags = 0;
/* dtrh must have a linked list of dtrh */
/* free any dangling dtrh first */
if (dang_txdp) {
"freeing dangled dtrh %p for %d fragments",
}
/* now free the reserved dtrh list */
"freeing dtrh %p list of %d fragments", dtr,
}
else
#if defined(XGE_DEBUG_ASSERT) && defined(XGE_OS_MEMORY_CHECK)
#endif
#if defined(XGE_HAL_TX_MULTI_FREE)
#elif defined(XGE_HAL_TX_MULTI_FREE_IRQ)
flags);
#endif
}
/**
* xge_hal_fifo_dtr_buffer_set_aligned - Align transmit buffer and fill
* in fifo descriptor.
* @channelh: Channel handle.
* @dtrh: Descriptor handle.
* @frag_idx: Index of the data buffer in the caller's scatter-gather list�
* (of buffers).
* @vaddr: Virtual address of the data buffer.
* @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
* @size: Size of the data buffer (in bytes).
* @misaligned_size: Size (in bytes) of the misaligned portion of the
* specific criteria, which is outside of HAL's domain. See notes below.
*
* This API is part of the transmit descriptor preparation for posting
* (via xge_hal_fifo_dtr_post()). The related "preparation" APIs include
* xge_hal_fifo_dtr_mss_set() and xge_hal_fifo_dtr_cksum_set_bits().
* All three APIs fill in the fields of the fifo descriptor,
* in accordance with the Xframe specification.
* On the PCI-X based systems aligning transmit data typically provides better
* transmit performance. The typical alignment granularity: L2 cacheline size.
* However, HAL does not make assumptions in terms of the alignment granularity;
* this is specified via additional @misaligned_size parameter described above.
* Prior to calling xge_hal_fifo_dtr_buffer_set_aligned(),
* provides a separate xge_hal_check_alignment() API sufficient to cover
* most (but not all) possible alignment criteria.
* If the buffer appears to be aligned, the ULD calls
* xge_hal_fifo_dtr_buffer_set().
* Otherwise, ULD calls xge_hal_fifo_dtr_buffer_set_aligned().
*
* Note; This API is a "superset" of xge_hal_fifo_dtr_buffer_set(). In
* addition to filling in the specified descriptor it aligns transmit data on
* the specified boundary.
* Note: Decision on whether to align or not to align a given contiguous
* transmit buffer is outside of HAL's domain. To this end ULD can use any
* programmable criteria, which can help to 1) boost transmit performance,
*
* See also: xge_hal_fifo_dtr_buffer_set(),
* xge_hal_check_alignment().
*
* See also: xge_hal_fifo_dtr_reserve(), xge_hal_fifo_dtr_post(),
* xge_hal_fifo_dtr_mss_set(), xge_hal_fifo_dtr_cksum_set_bits()
*/
{
int remaining_size;
if (frag_idx != 0) {
}
/* On some systems buffer size could be zero.
* It is the responsibility of ULD and *not HAL* to
* detect it and skip it. */
xge_assert(size > 0);
xge_assert(misaligned_size != 0 &&
xge_assert(remaining_size >= 0);
return XGE_HAL_ERR_OUT_ALIGNED_FRAGS;
}
/* setup new buffer */
txdl_priv->align_dma_offset = 0;
#if defined(XGE_OS_DMA_REQUIRES_SYNC)
/* sync new buffer */
0,
#endif
if (remaining_size) {
txdp++;
}
return XGE_HAL_OK;
}
/**
* xge_hal_fifo_dtr_buffer_append - Append the contents of virtually
* contiguous data buffer to a single physically contiguous buffer.
* @channelh: Channel handle.
* @dtrh: Descriptor handle.
* @vaddr: Virtual address of the data buffer.
* @size: Size of the data buffer (in bytes).
*
* This API is part of the transmit descriptor preparation for posting
* (via xge_hal_fifo_dtr_post()).
* The main difference of this API wrt to the APIs
* xge_hal_fifo_dtr_buffer_set_aligned() is that this API appends the
* contents of virtually contiguous data buffers received from
* upper layer into a single physically contiguous data buffer and the
* device will do a DMA from this buffer.
*
* See Also: xge_hal_fifo_dtr_buffer_finalize(), xge_hal_fifo_dtr_buffer_set(),
* xge_hal_fifo_dtr_buffer_set_aligned().
*/
{
xge_assert(size > 0);
return XGE_HAL_ERR_OUT_ALIGNED_FRAGS;
return XGE_HAL_OK;
}
/**
* xge_hal_fifo_dtr_buffer_finalize - Prepares a descriptor that contains the
* single physically contiguous buffer.
*
* @channelh: Channel handle.
* @dtrh: Descriptor handle.
* @frag_idx: Index of the data buffer in the Txdl list.
*
* This API in conjuction with xge_hal_fifo_dtr_buffer_append() prepares
* a descriptor that consists of a single physically contiguous buffer
* which inturn contains the contents of one or more virtually contiguous
* buffers received from the upper layer.
*
* See Also: xge_hal_fifo_dtr_buffer_append().
*/
int frag_idx)
{
if (frag_idx != 0) {
}
#if defined(XGE_OS_DMA_REQUIRES_SYNC)
/* sync pre-mapped buffer */
0,
#endif
/* increment vaddr_start for the next buffer_append() iteration */
txdl_priv->align_dma_offset = 0;
}
/**
* xge_hal_fifo_dtr_buffer_set - Set transmit buffer pointer in the
* descriptor.
* @channelh: Channel handle.
* @dtrh: Descriptor handle.
* @frag_idx: Index of the data buffer in the caller's scatter-gather list�
* (of buffers).
* @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
* @size: Size of the data buffer (in bytes).
*
* This API is part of the preparation of the transmit descriptor for posting
* (via xge_hal_fifo_dtr_post()). The related "preparation" APIs include
* xge_hal_fifo_dtr_mss_set() and xge_hal_fifo_dtr_cksum_set_bits().
* All three APIs fill in the fields of the fifo descriptor,
* in accordance with the Xframe specification.
*
* See also: xge_hal_fifo_dtr_buffer_set_aligned(),
* xge_hal_check_alignment().
*
* See also: xge_hal_fifo_dtr_reserve(), xge_hal_fifo_dtr_post(),
* xge_hal_fifo_dtr_mss_set(), xge_hal_fifo_dtr_cksum_set_bits()
* Prepare transmit descriptor for transmission (via
* xge_hal_fifo_dtr_post()).
* See also: xge_hal_fifo_dtr_vlan_set().
* Note: Compare with xge_hal_fifo_dtr_buffer_set_aligned().
*
* Usage: See ex_xmit{}.
*/
{
if (frag_idx != 0) {
}
/* Note:
* it is the responsibility of upper layers and not HAL
* detect it and skip zero-size fragment
*/
xge_assert(size > 0);
}
/**
* xge_hal_fifo_dtr_mss_set - Set MSS.
* @dtrh: Descriptor handle.
* @mss: MSS size for _this_ TCP connection. Passed by TCP stack down to the
* ULD, which in turn inserts the MSS into the @dtrh.
*
* This API is part of the preparation of the transmit descriptor for posting
* (via xge_hal_fifo_dtr_post()). The related "preparation" APIs include
* xge_hal_fifo_dtr_buffer_set(), xge_hal_fifo_dtr_buffer_set_aligned(),
* and xge_hal_fifo_dtr_cksum_set_bits().
* All these APIs fill in the fields of the fifo descriptor,
* in accordance with the Xframe specification.
*
* See also: xge_hal_fifo_dtr_reserve(),
* xge_hal_fifo_dtr_post(), xge_hal_fifo_dtr_vlan_set().
* Usage: See ex_xmit{}.
*/
{
}
/**
* xge_hal_fifo_dtr_cksum_set_bits - Offload checksum.
* @dtrh: Descriptor handle.
* @cksum_bits: Specifies which checksums are to be offloaded: IPv4,
*
* Ask Xframe to calculate IPv4 & transport checksums for _this_ transmit
* descriptor.
* This API is part of the preparation of the transmit descriptor for posting
* (via xge_hal_fifo_dtr_post()). The related "preparation" APIs include
* xge_hal_fifo_dtr_mss_set(), xge_hal_fifo_dtr_buffer_set_aligned(),
* and xge_hal_fifo_dtr_buffer_set().
* All these APIs fill in the fields of the fifo descriptor,
* in accordance with the Xframe specification.
*
* See also: xge_hal_fifo_dtr_reserve(),
* xge_hal_fifo_dtr_post(), XGE_HAL_TXD_TX_CKO_IPV4_EN,
* XGE_HAL_TXD_TX_CKO_TCP_EN.
* Usage: See ex_xmit{}.
*/
{
}
/**
* xge_hal_fifo_dtr_vlan_set - Set VLAN tag.
* @dtrh: Descriptor handle.
* @vlan_tag: 16bit VLAN tag.
*
* Insert VLAN tag into specified transmit descriptor.
* The actual insertion of the tag into outgoing frame is done by the hardware.
* See also: xge_hal_fifo_dtr_buffer_set(), xge_hal_fifo_dtr_mss_set().
*/
{
}
/**
* xge_hal_fifo_is_next_dtr_completed - Checks if the next dtr is completed
* @channelh: Channel handle.
*/
{
}
/* check whether host owns it */
return XGE_HAL_OK;
}
/* no more completions */
}