#include "lm5710.h"
#include "command.h"
#include "bd_chain.h"
#include "ecore_common.h"
#include "mm.h"
{
//the hw_con_idx_ptr of the rcq_chain points directly to the Rx index in the USTORM part of the non-default status block
if (rcq_chain->hw_con_idx_ptr &&
{
}
return result;
}
/*******************************************************************************
* Description:
* set both rcq, rx bd and rx sge (if valid) prods
* Return:
******************************************************************************/
u16_t const iro_prod_offset,
{
u32_t const ustorm_bar_offset = (IS_CHANNEL_VFDEV(pdev)) ? VF_BAR0_USDM_QUEUES_OFFSET: BAR_USTRORM_INTMEM ;
{
// Ugly FW solution OOO FW wants the
}
//notify the fw of the prod of the RCQ. No need to do that for the Rx bd chain.
if( rx_chain_sge )
{
}
else
{
}
}
/*******************************************************************************
* Description:
* rx_chain_bd always valid, rx_chain_sge valid only in case we are LAH enabled in this queue
* all if() checking will be always done on rx_chain_bd since he is always valid and sge should be consistent
* We verify it in case sge is valid
* all bd_xxx operations will be done on both
* Return:
******************************************************************************/
{
// Verify BD's consistent
{
/* the assumption is that the number of cqes is less or equal to the corresponding rx bds,
therefore if there no cqes left, break */
}
else
{
rx_chain_sge = NULL;
// In TPA we don't add to the RCQ when posting buffers
}
/* Make sure we have a bd left for posting a receive buffer. */
if(packet)
{
// Insert given packet.
{
}
}
else if(!lm_bd_chain_is_empty(bd_chain_to_check))
{
}
// In TPA we won't increment rcq_prod_bseq
while(packet)
{
{
//take care of the RCQ related prod stuff.
//update the prod of the RCQ only AFTER the Rx bd!
/* These were actually produced before by fw, but we only produce them now to make sure they're synced with the rx-chain */
}
#if L2_RX_BUF_SIG
/* make sure signitures exist before and after the buffer */
DbgBreakIfFastPath(SIG(packet->u1.rx.mem_virt - pdev->params.rcv_buffer_offset) != L2PACKET_RX_SIG);
DbgBreakIfFastPath(END_SIG(packet->u1.rx.mem_virt, MAX_L2_CLI_BUFFER_SIZE(pdev, chain_idx)) != L2PACKET_RX_SIG);
#endif /* L2_RX_BUF_SIG */
if( cur_sge )
{
}
pkt_queued++;
{
}
else
{
// Active descriptor must sit in the same entry
}
{
break;
}
/* Make sure we have a bd left for posting a receive buffer. */
}
//update the prod of the RCQ only AFTER the Rx bd!
// This code seems unnecessary maybe should be deleted.
// Im TPA we won't increment rcq_prod_bseq
if(pkt_queued)
{
//notify the fw of the prod
{
lm_rx_set_prods(pdev, rcq_chain->iro_prod_offset, &rcq_chain->bd_chain, rx_chain_bd, rx_chain_sge ,chain_idx);
}
else
{
lm_rx_set_prods(pdev, rcq_chain->iro_prod_offset, &rcq_chain->bd_chain, &LM_RXQ_CHAIN_BD(pdev, chain_idx), &LM_TPA_CHAIN_BD(pdev, chain_idx) ,chain_idx);
}
}
return pkt_queued;
} /* lm_post_buffers */
/**
* @description
* Updates tpa_chain->last_max_cons_sge if there is a new max.
* Basic assumption is that is BD prod is always higher that BD
* cons.
* The minus will tell us who is closer to BD prod.
* @param pdev
* @param chain_idx
* @param new_index
*
* @return STATIC void
*/
{
{
}
/*
Cyclic would have been a nicer sulotion, but adds a limitation on bd ring size that would be (2^15) instead of 2^16
This limitation should be closed done when allocating the TPA BD chain
DbgBreakIf(LM_TPA_CHAIN_BD_NUM_ELEM(_pdev, chain_idx) < (2^15) );
if (CYCLIC_GT_16(sge_index, sge_tpa_chain->last_max_con))
sge_tpa_chain->last_max_con = sge_index;
*/
}
/**
* @description
* The TPA sge consumer will be increments in 64 bit
* resolutions.
* @param pdev
* @param chain_idx
*
* @return STATIC u32_t
*/
{
u16_t i = 0;
// Make sure bds_per_page_mask is a power of 2 that is higher than 64
{
// Just closed a page must refer to page end entries
/* clear page-end entries */
{
}
}
else
{
// Same page
}
}
/**
* @description
* Handle TPA stop code.
* @param pdev
* @param rcvd_list -Global receive list
* @param cqe
* @param chain_idx
* @param pkt_cnt
* @param queue_index
*
* @return STATIC u32_t pkt_cnt number of packets. The number is
* an input parameter and packets add to the global list
* are add.
*/
{
lm_packet_t* pkt = tpa_chain->start_coales_bd[queue_index].packet;//Reads the TPA start coalesce array(PD_R)
u16_t i = 0;
// Total packet size given in end aggregation must be larger than the size given in start aggregation.
// The only case that the both size are equal is if stop aggregation doesn't contain data.
// Indicate to upper layer this is a TPA packet
// Updates the TPA only fields from the CQE
/* make sure packet size is larger than header size */
// Adds this packet descriptor to the global receive list (rcvd_list that is later indicated to miniport).
pkt_cnt++;
// If the TPA stop doesn't contain any new BDs.
if(0 == sge_num_elem )
{
// Total packet size given in end aggregation must be equal to the size given in start aggregation.
// if stop aggregation doesn't contain data.
return pkt_cnt;
}
{
active_entry = LM_TPA_BD_ENTRY_TO_ACTIVE_ENTRY(pdev, chain_idx, mm_le16_to_cpu(cqe->sgl_or_raw_data.sgl[fw_sge_index]));
#if (DBG)
/************start TPA debbug code******************************/
/************end TPA debbug code******************************/
#endif //(DBG)
// For last SGE
pkt_cnt++;
}
#if defined(_NTDDK_)
//PreFast 28182 :Prefast reviewed and suppress this situation shouldn't occur.
#endif // !_NTDDK_
/* Here we assume that the last SGE index is the biggest */
#if defined(_NTDDK_)
#endif // !_NTDDK_
// Find the first cosumer that is a candidate to free and the last.
/* If ring is full enter anyway*/
{
}
/* Now update the cons */
for (i = first_max_set;((i != last_max_set) || (TRUE == b_force_first_enter)); i = LM_TPA_MASK_NEXT_ELEM(pdev, chain_idx, i))
{
if (sge_tpa_chain->mask_array[i])
{
break;
}
i);
loop_cnt_dbg++;
}
return pkt_cnt;
}
/**
* @description
* Handle TPA start code.
* @param pdev
* @param pkt
* @param chain_idx
* @param queue_index
*
* @return STATIC void
*/
{
}
/**
* @description
* Set TPA start known flags.
* This is only an optimization to avoid known if's
* @param pdev
*
* @return STATIC void
*/
{
// TPA is always(only) above IPV4 or IPV6.
DbgBreakIf(FALSE ==
{
// In IPV4 there is always a checksum
// TPA ip cksum is always valid
}
else
{
// In IPV6 there is no checksum
}
// If there was a fagmentation it will be delivered by a regular BD (the TPA aggregation is stoped).
/* check if TCP segment */
// TPA is always above TCP.
// TCP was checked before. TCP checksum must be done by FW in TPA.
// TCP checksum must be valid in a successful TPA aggregation.
/* IN TPA tcp cksum is always validated */
SET_FLAGS(pkt->l2pkt_rx_info->flags , ( GET_FLAGS(pkt->l2pkt_rx_info->flags, (LM_RX_FLAG_IS_TCP_SEGMENT)) << SHIFT_IS_GOOD ) );
}
/**
* @description
* Set regular flags.
* This is only an optimization
* @param pdev
*
* @return STATIC void
*/
STATIC void
{
/* check if IP datagram (either IPv4 or IPv6) */
{
{
/* ip cksum validated */
{
/* invalid ip cksum */
}
else
{
/* valid ip cksum */
}
}
}
// TCP or UDP segment.
{
/* check if TCP segment */
{
}
/* check if UDP segment */
{
}
}
if( GET_FLAGS(pkt->l2pkt_rx_info->flags, (LM_RX_FLAG_IS_TCP_SEGMENT | LM_RX_FLAG_IS_UDP_DATAGRAM)) &&
{
{
SET_FLAGS(pkt->l2pkt_rx_info->flags , ( GET_FLAGS(pkt->l2pkt_rx_info->flags, (LM_RX_FLAG_IS_TCP_SEGMENT | LM_RX_FLAG_IS_UDP_DATAGRAM)) << SHIFT_IS_BAD ) );
}
{
SET_FLAGS(pkt->l2pkt_rx_info->flags , ( GET_FLAGS(pkt->l2pkt_rx_info->flags, (LM_RX_FLAG_IS_TCP_SEGMENT | LM_RX_FLAG_IS_UDP_DATAGRAM)) << SHIFT_IS_BAD ) );
}
else
{
SET_FLAGS(pkt->l2pkt_rx_info->flags , ( GET_FLAGS(pkt->l2pkt_rx_info->flags, (LM_RX_FLAG_IS_TCP_SEGMENT | LM_RX_FLAG_IS_UDP_DATAGRAM)) << SHIFT_IS_GOOD ) );
}
}
else
{
/*Packets with invalid TCP options are reported with L4_XSUM_NO_VALIDATION due to HW limitation. In this case we assume that
their checksum is OK.*/
if(GET_FLAGS(pkt->l2pkt_rx_info->flags, (LM_RX_FLAG_IS_TCP_SEGMENT | LM_RX_FLAG_IS_UDP_DATAGRAM)) &&
{
{
SET_FLAGS(pkt->l2pkt_rx_info->flags , ( GET_FLAGS(pkt->l2pkt_rx_info->flags, (LM_RX_FLAG_IS_TCP_SEGMENT | LM_RX_FLAG_IS_UDP_DATAGRAM)) << SHIFT_IS_BAD ) );
}
else
{
SET_FLAGS(pkt->l2pkt_rx_info->flags , ( GET_FLAGS(pkt->l2pkt_rx_info->flags, (LM_RX_FLAG_IS_TCP_SEGMENT | LM_RX_FLAG_IS_UDP_DATAGRAM)) << SHIFT_IS_GOOD ) );
}
}
}
}
{
//changed, as we dont have fhdr infrastructure
}
{
// len_in_bytes - the length in bytes of the header
// sum - initial checksum
while (len_in_bytes > 1)
{
len_in_bytes -= 2;
hdr++;
}
/* add left-over byte, if any */
if (len_in_bytes)
{
}
return sum;
}
{
// len - the length in words of the header
// returns true iff the checksum (already written in the headr) is valid
// fold 32-bit sum to 16 bits
while (sum >> 16)
{
}
}
{
// returns the ip header length in bytes
{
// ipv4, the lower 4 bit of the 1st byte of ip header
// contains the ip header length in unit of dword(32-bit)
}
return ip_hdr_len;
}
INLINE void
{
// encapsulated packet:
// outer mac | outer ip | gre | inner mac | inner ip | tcp
// minimum encapsultaed packet size is:
// two mac headers + gre header size + tcp header size + two ipv4 headers
if (pkt->l2pkt_rx_info->total_packet_size < (2*ETHERNET_PACKET_HEADER_SIZE + 2*20 + ETHERNET_GRE_SIZE + 20))
{
return;
}
// set hdr to the outer ip header
{
}
// in case this is not standard ETH packet (e.g. managment, or in general non ipv4/ipv6), it is for sure
// not gre so we can end here
// if outer header is ipv4, protocol is the nine'th octet
// if outer header is ipv6, next header is the sixth octet
{
// this is not encapsulated packet, no gre tunneling
// on ipv6 we don't support extension header
return;
}
// get the length of the outer ip header and set hdr to the gre header
/* GRE header
| Bits 0�4 | 5�7 | 8�12 | 13�15 | 16�31 |
| C|0|K|S | Recur | Flags | Version | Protocol Type |
| Checksum (optional) | Reserved |
| Key (optional) |
| Sequence Number (optional) | */
// check that:
// checksum present bit is set to 0
// key present bit is set to 1
// sequence number present bit is set to 0
// protocol type should be always equal to 0x6558 (for encapsulating ethernet packets in GRE)
{
return;
}
// set hdr to the inner mac header
hdr += ETHERNET_GRE_SIZE;
// The first two octets of the tag are the Tag Protocol Identifier (TPID) value of 0x8100.
{
}
// set hdr to the inner ip header
// get the length of the inner ip header
{
// inner ip header is ipv4
// if the ip header checksum of the outer header is ok than validate the ip checksum of the inner header
{
// validate the checksum
{
}
}
// check if protocol field is tcp
{
// create the psuedo header
/* | Bit offset | 0�7 | 8�15 | 16�31 |
| 0 | Source address |
| 32 | Destination address |
| 64 | Zeros | Protocol | TCP length | */
// adding 1 byte of zeros + protocol to the sum
// and adding source and destination address
// calculate the tcp length
// the TCP length field is the length of the TCP header and data (measured in octets).
}
else
{
// no tcp over ip
return;
}
}
{
// inner ip header is ipv6
// check if next header field is tcp
{
// tcp over ipv6
// create the psuedo header
/* | Bit offset | 0�7 | 8�15 | 16�23 | 24�31 |
| 0 | Source address |
| 32 | |
| 64 | |
| 96 | |
| 128 | Destination address |
| 160 | |
| 192 | |
| 224 | |
| 256 | TCP length |
| 288 | Zeros |Next header |*/
// adding 3 byte of zeros + protocol to the sum
// and adding source and destination address
// calculate the tcp length
// in the ip header: the size of the payload in octets, including any extension headers
// reduce the length of the extension headers
}
else
{
// no tcp over ip
return;
}
}
else
{
// no ipv4 or ipv6
return;
}
// set hdr to the tcp header
hdr += inner_ip_hdr_len;
// claculate the checksum of the rest of the packet
// validate the checksum
{
}
else
{
}
}
/*******************************************************************************
* Description:
* Here the RCQ chain is the chain coordinated with the status block, that is,
* the index in the status block describes the RCQ and NOT the rx_bd chain as in
* the case of Teton. We run on the delta between the new consumer index of the RCQ
* which we get from the sb and the old consumer index of the RCQ.
* In cases of both slow and fast path, the consumer of the RCQ is always incremented.
*
* The assumption which we must stick to all the way is: RCQ and Rx bd chain
* have the same size at all times! Otherwise, so help us Alan Bertkey!
*
* Return:
******************************************************************************/
struct _sp_cqes_info *sp_cqes)
{
lm_rx_chain_t* rxq_chain = &LM_RXQ(pdev, chain_idx); //get a hold of the matching Rx bd chain according to index
lm_rcq_chain_t* rcq_chain = &LM_RCQ(pdev, chain_idx); //get a hold of the matching RCQ chain according to index
/* make sure to zeroize the sp_cqes... */
/* Get the new consumer idx. The bd's between rcq_new_idx and rcq_old_idx
* are bd's containing receive packets.
*/
/* The consumer index of the RCQ only, may stop at the end of a page boundary. In
* this case, we need to advance the next to the next one.
* In here we do not increase the cons_bd as well! this is since we're dealing here
* with the new cons index and not with the actual old one for which, as we progress, we
* need to maintain the bd_cons as well.
*/
if((cq_new_idx & lm_bd_chain_usable_bds_per_page(&rcq_chain->bd_chain)) == lm_bd_chain_usable_bds_per_page(&rcq_chain->bd_chain))
{
}
//there is no change in the RCQ consumer index so exit!
if (cq_old_idx == cq_new_idx)
{
return pkt_cnt;
}
while(cq_old_idx != cq_new_idx)
{
//get hold of the cqe, and find out what it's type corresponds to
//update the cons of the RCQ and the bd_prod pointer of the RCQ as well!
//this holds both for slow and fast path!
cqe_type = GET_FLAGS_WITH_OFFSET(cqe->ramrod_cqe.ramrod_type, COMMON_RAMROD_ETH_RX_CQE_TYPE, COMMON_RAMROD_ETH_RX_CQE_TYPE_SHIFT);
//the cqe is a ramrod, so do the ramrod and recycle the cqe.
//TODO: replace this with the #defines: 1- eth ramrod, 2- toe init ofld ramrod
switch(cqe_type)
{
{
/* 13/08/08 NirV: bugbug, temp workaround for dpc watch dog bug,
* ignore toe completions on L2 ring - initiate offload */
{
{
DbgBreakMsgFastPath("too many spe completed\n");
/* we shouldn't get here - there is something very wrong if we did... in this case we will risk
* completing the ramrods - even though we're holding a lock!!! */
/* bugbug... */
return pkt_cnt;
}
}
//update the prod of the RCQ - by this, we recycled the CQE.
#if 0
//in case of ramrod, pop out the Rx bd and push it to the free descriptors list
#endif
break;
}
case RX_ETH_CQE_TYPE_ETH_START_AGG: //Fall through case
{ //enter here in case the cqe is a fast path type (data)
#if DBG
{
return 0;
}
#endif // DBG
#if L2_RX_BUF_SIG
/* make sure signitures exist before and after the buffer */
DbgBreakIfFastPath(END_SIG(pkt->u1.rx.mem_virt, MAX_L2_CLI_BUFFER_SIZE(pdev, chain_idx)) != L2PACKET_RX_SIG);
#endif /* L2_RX_BUF_SIG */
if( rx_chain_sge )
{
}
#if defined(_NTDDK_)
//PreFast 28182 :Prefast reviewed and suppress this situation shouldn't occur.
#endif // !_NTDDK_
/* Advance the rx_old_idx to the start bd_idx of the next packet. */
//cq_old_idx = pkt->u1.rx.next_bd_idx;
{
// total_packet_size is only known in stop_TPA
pkt,
&(cqe->fast_path_cqe),
pkt,
}
else
{
lm_recv_set_pkt_len(pdev, pkt, mm_le16_to_cpu(cqe->fast_path_cqe.pkt_len_or_gro_seg_len), chain_idx);
// In regular mode pkt->l2pkt_rx_info->size == pkt->l2pkt_rx_info->total_packet_size
// We need total_packet_size for Dynamic HC in order not to ask a question there if we are RSC or regular flow.
/* make sure packet size if larger than header size and smaller than max packet size of the specific L2 client */
DbgBreakIfFastPath((pkt->l2pkt_rx_info->total_packet_size < MIN_ETHERNET_PACKET_SIZE) || (pkt->l2pkt_rx_info->total_packet_size > MAX_CLI_PACKET_SIZE(pdev, chain_idx)));
// ShayH:packet->size isn't useed anymore by windows we directly put the data on l2pkt_rx_info->size and l2pkt_rx_info->total_packet_size.
{
DbgBreakIfFastPath( ETH_FP_CQE_RAW != (GET_FLAGS( cqe->fast_path_cqe.type_error_flags, ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL ) >>
//optimized
/* make sure packet size if larger than header size and smaller than max packet size of the specific L2 client */
// TODO_OOO - check with flag
ASSERT_STATIC( sizeof(pkt->u1.rx.sgl_or_raw_data.raw_data) == sizeof(cqe->fast_path_cqe.sgl_or_raw_data.raw_data) );
mm_memcpy( pkt->u1.rx.sgl_or_raw_data.raw_data, cqe->fast_path_cqe.sgl_or_raw_data.raw_data, sizeof(pkt->u1.rx.sgl_or_raw_data.raw_data) );
}
else
{
DbgBreakIfFastPath( ETH_FP_CQE_REGULAR != (GET_FLAGS( cqe->fast_path_cqe.type_error_flags, ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL )>>
}
&(cqe->fast_path_cqe),
pkt,
{
// SW rx checksum for gre encapsulated packets
}
pkt_cnt++;
}
{
}
{
/* fw always set ETH_FAST_PATH_RX_CQE_VLAN_TAG_FLG and pass vlan tag when
packet with vlan arrives but it remove the vlan from the packet only when
it configured to remove vlan using params.vlan_removal_enable
*/
{
}
}
#if defined(_NTDDK_)
#endif // !_NTDDK_
#if DBG
{
}
{
}
{
}
#endif // DBG
/* We use to assert that if we got the PHY_DECODE_ERROR it was always a result of DROP_MAC_ERR, since we don't configure
* DROP_MAC_ERR anymore, we don't expect this flag to ever be on.*/
DbgBreakIfFastPath( GET_FLAGS(cqe->fast_path_cqe.type_error_flags, ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG) );
break;
}
{//TPA stop
&(cqe->end_agg_cqe),
//update the prod of the RCQ - by this, we recycled the CQE.
break;
}
case MAX_ETH_RX_CQE_TYPE:
default:
{
DbgBreakMsg("CQE type not supported");
}
}
}
// TODO: Move index update to a more suitable place
if( rx_chain_sge )
{
}
//notify the fw of the prod
lm_rx_set_prods(pdev, rcq_chain->iro_prod_offset, &rcq_chain->bd_chain, rx_chain_bd, rx_chain_sge ,chain_idx);
return pkt_cnt;
} /* lm_get_packets_rcvd */
struct _lm_device_t *pdev,
struct _sp_cqes_info *sp_cqes)
{
}
return LM_STATUS_SUCCESS;
}
/* called by um whenever packets are returned by client
rxq lock is taken by caller */
void
u32_t const returned_bytes)
{
/* aggregate updates over PCI */
/* HC_RET_BYTES_TH = min(l2_hc_threshold0 / 2 , 16KB) */
#define HC_RET_BYTES_TH(pdev) (((pdev)->params.hc_threshold0[SM_RX_ID] < 32768) ? ((pdev)->params.hc_threshold0[SM_RX_ID] >> 1) : 16384)
/* TODO: Future: Add #updatesTH = 20 */
/* time to update fw ? */
{
/*
!!DP
The test below is to disable dynamic HC for the iSCSI chains
*/
// TODO: VF dhc
if (qidx < LM_MAX_RSS_CHAINS(pdev) && IS_PFDEV(pdev)) /* should be fine, if not, you can go for less robust case of != LM_CLI_RX_CHAIN_IDX(pdev, LM_CLI_IDX_ISCSI) */
{
/* There are HC_USTORM_SB_NUM_INDICES (4) index values for each SB to set and we're using the corresponding U indexes from the microcode consts */
}
}
}