/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2014 QLogic Corporation
* The contents of this file are subject to the terms of the
* QLogic End User License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the License at
* See the License for the specific language governing permissions
* and limitations under the License.
*/
/*
*/
#include "bnxe.h"
{
DMA_ATTR_V0, /* dma_attr_version */
0, /* dma_attr_addr_lo */
0xffffffffffffffff, /* dma_attr_addr_hi */
0xffffffffffffffff, /* dma_attr_count_max */
BNXE_DMA_ALIGNMENT, /* dma_attr_align */
0xffffffff, /* dma_attr_burstsizes */
1, /* dma_attr_minxfer */
0xffffffffffffffff, /* dma_attr_maxxfer */
0xffffffffffffffff, /* dma_attr_seg */
BNXE_MAX_DMA_SGLLEN, /* dma_attr_sgllen */
1, /* dma_attr_granular */
0, /* dma_attr_flags */
};
{
DMA_ATTR_V0, /* dma_attr_version */
0, /* dma_attr_addr_lo */
0xffffffffffffffff, /* dma_attr_addr_hi */
0xffffffffffffffff, /* dma_attr_count_max */
BNXE_DMA_ALIGNMENT, /* dma_attr_align */
0xffffffff, /* dma_attr_burstsizes */
1, /* dma_attr_minxfer */
0xffffffffffffffff, /* dma_attr_maxxfer */
0xffffffffffffffff, /* dma_attr_seg */
1, /* dma_attr_sgllen */
1, /* dma_attr_granular */
0, /* dma_attr_flags */
};
{
int i;
for (i = 0; i < pTxPkt->num_handles; i++)
{
}
pTxPkt->num_handles = 0;
}
{
int i;
if (pTxPkt->num_handles > 0)
{
}
{
}
for (i = 0; i < BNXE_MAX_DMA_HANDLES_PER_PKT; i++)
{
}
pTxPkt->num_handles = 0;
}
{
while (!s_list_is_empty(pPktList))
{
}
}
/*
* Free the mblk and all frag mappings used by each packet in the list
* and then put the entire list on the free queue for immediate use.
*/
int idx,
{
if (s_list_entry_cnt(pPktList) == 0)
{
return;
}
{
if (pTxPkt->num_handles > 0)
{
}
{
}
}
}
/* Must be called with TX lock held!!! */
int idx)
{
int rc;
{
{
return BNXE_TX_DEFERPKT;
}
if (pUM->fmCapabilities &&
{
}
if (rc != LM_STATUS_SUCCESS)
{
/*
* Send failed (probably not enough BDs available)...
* Put the packet back at the head of the wait queue.
*/
return BNXE_TX_DEFERPKT;
}
}
return BNXE_TX_GOODXMIT;
}
int idx)
{
int rc;
if (pUM->fmCapabilities &&
{
}
{
}
else
{
}
if (s_list_entry_cnt(&tmpList))
{
}
if (pTxQ->noTxCredits == 0)
{
/* no need to notify the stack */
return;
}
{
if ((rc == BNXE_TX_GOODXMIT) &&
{
}
}
{
}
if (pTxQ->noTxCredits == 0)
{
{
}
else
{
/* notify the stack that tx resources are now available */
#else
#endif
}
}
}
{
int rc, i;
{
return BNXE_TX_RESOURCES_NO_OS_DMA_RES;
}
{
return BNXE_TX_RESOURCES_TOO_MANY_FRAGS;
}
NULL,
NULL,
&cookie,
&count)) != DDI_DMA_MAPPED)
{
return BNXE_TX_RESOURCES_NO_OS_DMA_RES;
}
/*
* ddi_dma_addr_bind_handle() correctly returns an error if the physical
* fragment count exceeds the maximum fragment count specified in the
* ddi_dma_attrib structure for the current pMblk. However, a packet can
* span multiple mblk's. The purpose of the check below is to make sure we
* do not overflow our fragment count limit based on what has already been
* mapped from this packet.
*/
if (partial)
{
/*
* Going to try a partial dma so (re)set count to the remaining number
* of dma fragments that are available leaving one fragment at the end.
*/
if (count == 0)
{
/*
* No more dma fragments are available. This fragment was not
* mapped and will be copied into the copy buffer along with the
* rest of the packet data.
*/
return BNXE_TX_RESOURCES_TOO_MANY_FRAGS;
}
}
/* map "count" dma fragments */
bindLen = 0;
for (i = 0; i < (count - 1); i++)
{
pFrag++;
}
pTxPkt->num_handles++;
if (partial)
{
/*
* Move the mblk's read pointer past the data that was bound to a DMA
* fragment. Any remaining data will get copied into the copy buffer.
*/
return BNXE_TX_RESOURCES_TOO_MANY_FRAGS;
}
return 0;
}
{
int rc;
/* Walk the chain to get the total pkt length... */
{
}
/*
* If the packet length is under the tx copy threshold then copy
* the all data into the copy buffer.
*/
{
{
{
continue;
}
}
/* Done! */
}
/* Try to DMA map all the blocks... */
{
{
continue;
}
if (tryMap)
{
{
/*
* The fragment was successfully mapped now move on to the
* next one. Here we set pCopyFrag to NULL which represents
* a break of continuous data in the copy buffer. If the
* packet header was copied the first fragment points to the
* beginning of the copy buffer. Since this block was mapped
* any future blocks that have to be copied must be handled by
* a new fragment even though the fragment is pointed to the
* copied data in the copy buffer.
*/
continue;
}
else
{
/*
* The frament was not mapped or was partially mapped. In
* either case we will no longer try to map the remaining
* blocks. All remaining packet data is copied.
*/
}
}
#if 0
{
/* remaining packet is too large (length more than copy buffer) */
return -1;
}
#else
#endif
/*
* If pCopyFrag is already specified then simply update the copy size.
* If not then set pCopyFrag to the next available fragment.
*/
if (pCopyFrag)
{
}
else
{
}
/* update count of bytes in the copy buffer needed for DMA sync */
}
if (copySize > 0)
{
/* DMA sync the copy buffer before sending */
if (pUM->fmCapabilities &&
{
}
if (rc != DDI_SUCCESS)
{
}
}
if (pTxPkt->num_handles == 0)
{
}
return 0;
}
/* this code is derived from that shown in RFC 1071 Section 4.1 */
{
{
/* the inner loop */
}
/* add left-over byte, if any */
if (len)
{
}
/* fold 32-bit sum to 16 bits */
while (sum >> 16)
{
}
}
/*
* Everest1 (i.e. 57710, 57711, 57711E) does not natively support UDP checksums
* and does not know anything about the UDP header and where the checksum field
* is located. It only knows about TCP. Therefore we "lie" to the hardware for
* outgoing UDP packets w/ checksum offload. Since the checksum field offset
* for TCP is 16 bytes and for UDP it is 6 bytes we pass a pointer to the
* hardware that is 10 bytes less than the start of the UDP header. This allows
* the hardware to write the checksum in the correct spot. But the hardware
* will compute a checksum which includes the last 10 bytes of the IP header.
* To correct this we tweak the stack computed pseudo checksum by folding in the
* calculation of the inverse checksum for those final 10 bytes of the IP
* header. This allows the correct checksum to be computed by the hardware.
*/
{
/* calc cksum on last UDP_TCP_CS_OFFSET_DIFF bytes of ip header */
/* substruct the calculated cksum from the udp pseudo cksum */
/* fold 32-bit sum to 16 bits */
while (sum32 >> 16)
{
}
}
{
}
{
/* At least the MAC header... */
#if 0
if (msgSize < sizeof(struct ether_header))
{
return -1;
}
#else
#endif
/* get the Ethernet header */
/* grab the destination mac addr */
if (lso)
{
}
else if (!csFlags)
{
/* no offload requested, just check for VLAN */
{
}
return 0;
}
{
l2HdrLen = sizeof(struct ether_vlan_header);
}
else
{
l2HdrLen = sizeof(struct ether_header);
}
if (csFlags & HCK_IPV4_HDRCKSUM)
{
}
if (csFlags & HCK_PARTIALCKSUM)
{
/*
* For TCP, here we ignore the urgent pointer and size of the
* options. We'll get that info later.
*/
}
else if (lso)
{
/* Solaris doesn't do LSO if there is option in the IP header. */
}
else
{
return 0;
}
{
/* the header is in the first block */
}
else
{
{
/* the header is in the second block */
}
else
{
/* do a pullup to make sure headers are in the first block */
pUM->txMsgPullUp++;
{
return -1;
}
}
}
/* must be IPv4 or IPv6 */
{
}
{
/* get the TCP header */
if (lso)
{
}
}
else
{
/* get the UDP header */
}
return 0;
}
int idx,
{
int rc;
{
}
/* try to recycle if no more packet available */
{
if (pUM->fmCapabilities &&
{
}
if (!numPkts)
{
return BNXE_TX_HDWRFULL;
}
/* steal the first packet from the list before reclaiming */
if (pTxPkt->num_handles)
{
}
{
}
}
#if 0
"-> FCoE L2 TX ->" : "-> L2 TX ->",
#endif
{
if (flags & PRV_TX_VLAN_TAG)
{
}
}
{
goto BnxeTxSendMblk_fail;
}
{
goto BnxeTxSendMblk_fail;
}
/* Now try to send the packet... */
/* Try to reclaim sent packets if available BDs is lower than threshold */
{
if (pUM->fmCapabilities &&
{
}
if (numPkts)
{
}
}
/*
* If there are no packets currently waiting to be sent and there are enough
* BDs available to satisfy this packet then send it now.
*/
{
if (pUM->fmCapabilities &&
{
}
if (pUM->fmCapabilities &&
{
}
if (rc == LM_STATUS_SUCCESS)
{
/* send completely successfully */
return BNXE_TX_GOODXMIT;
}
/*
* Send failed (probably not enough BDs available)...
* Continue on with putting this packet on the wait queue.
*/
}
#if 0
#endif
/*
* If we got here then there are other packets waiting to be sent or there
* aren't enough BDs available. In either case put this packet at the end
* of the waiting queue.
*/
/*
* If there appears to be a sufficient number of BDs available then make a
* quick attempt to send as many waiting packets as possible.
*/
{
return BNXE_TX_GOODXMIT;
}
/* Couldn't send anything! */
return BNXE_TX_DEFERPKT;
pTxQ->txDiscards++;
{
}
/*
* Yes GOODXMIT since mblk was free'd here and this triggers caller to
* try and send the next packet in its chain.
*/
return BNXE_TX_GOODXMIT;
}
int idx)
{
}
int cliIdx)
{
int idx;
switch (cliIdx)
{
case LM_CLI_IDX_FCOE:
break;
case LM_CLI_IDX_NDIS:
{
}
break;
default:
break;
}
}
{
int rc, j;
{
return NULL;
}
NULL,
{
return NULL;
}
size,
NULL,
&length,
{
return NULL;
}
NULL,
NULL,
&cookie,
&count)) != DDI_DMA_MAPPED)
{
return NULL;
}
for (j = 0; j < BNXE_MAX_DMA_HANDLES_PER_PKT; j++)
{
NULL,
&pTxPkt->dmaHandles[j])) !=
{
j, rc);
for(--j; j >= 0; j--) /* unwind */
{
}
return NULL;
}
}
return pTxPkt;
}
int idx)
{
int i;
pTxQ->txDiscards = 0;
{
{
sizeof(struct ether_vlan_header)));
{
/* free existing in freeTxDescQ... */
break;
}
}
}
{
{
sizeof(struct ether_vlan_header)));
{
/* free existing in freeTxDescQ... */
return -1;
}
}
}
return 0;
}
int cliIdx)
{
switch (cliIdx)
{
case LM_CLI_IDX_FCOE:
break;
case LM_CLI_IDX_NDIS:
{
{
break;
}
}
break;
default:
rc = -1;
break;
}
return rc;
}
int idx)
{
/* there could be more than originally allocated but less is bad */
if (s_list_entry_cnt(&tmpList) <
{
}
}
int cliIdx)
{
int idx;
switch (cliIdx)
{
case LM_CLI_IDX_FCOE:
break;
case LM_CLI_IDX_NDIS:
{
}
break;
default:
break;
}
}