/*******************************************************************************
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*
* Copyright 2014 QLogic Corporation
* The contents of this file are subject to the terms of the
* QLogic End User License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the License at
* See the License for the specific language governing permissions
* and limitations under the License.
*
*
* Module Description:
* This file contains the implementation of slow-path operations
* for L2 + Common. It uses ecore_sp_verbs in most cases.
*
******************************************************************************/
#include "lm5710.h"
// disable warning C4127 (conditional expression is constant)
// for this file (relevant when compiling with W4 warning level)
#endif /* __LINUX */
#pragma warning( default : 4127 )
#endif
#include "mm.h"
#include "context.h"
#include "command.h"
#include "bd_chain.h"
#include "ecore_common.h"
#include "ecore_sp_verbs.h"
#include "debug.h"
{
//Prepare ramrod data
// Send Empty ramrod.
cid,
*(u64_t *)&ramrod_data );
if (lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
/* curr_state may be NULL incase wait isn't required */
if (curr_state != NULL)
{
{
DbgBreakMsg("lm_empty_ramrod_eth: lm_wait_state_change failed");
}
}
return lm_status;
} /* lm_empty_ramrod_eth */
{
switch (ecore_status)
{
case ECORE_SUCCESS:
break;
case ECORE_TIMEOUT:
break;
case ECORE_INVAL:
break;
case ECORE_BUSY:
break;
case ECORE_NOMEM:
break;
case ECORE_PENDING:
break;
case ECORE_EXISTS:
break;
case ECORE_IO:
break;
default:
DbgBreakMsg("Unknwon ecore_status_t");
break;
}
return lm_status;
}
{
DbgBreakIf(!pdev);
{
return FALSE;
}
if ( eq_chain->hw_con_idx_ptr && (mm_le16_to_cpu(*eq_chain->hw_con_idx_ptr) != lm_bd_chain_cons_idx(&eq_chain->bd_chain)))
{
}
return result;
}
{
{
DbgBreakMsg("lm_eth_init_client_init_general_data failed ");
return LM_STATUS_FAILURE;
}
/* General Structure */
general->statistics_counter_id = (general->statistics_en_flg)? stats_cnt_id : DISABLE_STATISTIC_COUNTER_ID_VALUE;
// Don't care data for Non cos clients
{
// FW requires a valid COS number
}
else
{
}
* does this needs to be done even in lm_vf.c lm_vf_pf_acquire_msg
* function? Also how do we handle the check in lm_pf_vf_check_compatibility
*/
else
general->fp_hsi_ver = ETH_FP_HSI_VER_1; // default is v1 since only when conditions above are true HSI is v2
return lm_status;
}
STATIC void
{
// TPA is enabled in run time.(TPA is disabled in init time)
rx->max_tpa_queues = 0;
rx->cache_line_alignment_log_size = (u8_t)LOG2(CACHE_LINE_SIZE/* TODO mm_get_cache_line_alignment()*/);
{
}
else
{
if(IS_MF_AFEX_MODE(pdev))
{
// In NIV we must remove default VLAN.
}
}
rx->cqe_page_base.lo = mm_cpu_to_le32(lm_bd_chain_phys_addr(&pdev->rx_info.rcq_chain[cid].bd_chain, 0).as_u32.low);
rx->cqe_page_base.hi = mm_cpu_to_le32(lm_bd_chain_phys_addr(&pdev->rx_info.rcq_chain[cid].bd_chain, 0).as_u32.high);
{
/* TODO: for now... doesn't have to be leading cid, anyone can get the approx mcast... */
}
if(rx_chain_sge)
{
/* override bd_buff_size if we are in LAH enabled mode */
rx->sge_buff_size = mm_cpu_to_le16(MAX_L2_CLI_BUFFER_SIZE(pdev, cid) - (u16_t)pdev->params.l2_cli_con_params[cid].lah_size - (u16_t)pdev->params.rcv_buffer_offset - CACHE_LINE_SIZE);
}
else
{
rx->max_bytes_on_bd = mm_cpu_to_le16(MAX_L2_CLI_BUFFER_SIZE(pdev, cid) - (u16_t)pdev->params.rcv_buffer_offset - CACHE_LINE_SIZE);
rx->max_sges_for_packet = 0;
rx->sge_buff_size = 0;
}
{
}
{
}
{
}
{
}
else
{
DbgBreakIf(1);
}
// Avoiding rings thresholds verification is aimed for eVBD
// which receives its buffers and SGEs only after client init
// is completed.(eVBD receives the buffers and SGEs only after
// client setup is completed.)
/* FC */
{
low_thresh = mm_cpu_to_le16(min(desired_cqe_bd_low_thresh, (u16_t)((LM_RXQ(pdev, cid).common.desc_cnt)/4)));
high_thresh = mm_cpu_to_le16(min(desired_cqe_bd_high_thresh, (u16_t)((LM_RXQ(pdev, cid).common.desc_cnt)/2)));
rx->sge_pause_thr_low = 0;
rx->sge_pause_thr_high = 0;
}
}
STATIC void
{
/* Status block index init we do for Rx + Tx together so that we ask which cid we are only once */
{
}
{
// OOO CID doesn't really has a TX client this is don't
// care data for FW.
}
{
}
{
if (IS_MF_AFEX_MODE(pdev))
{
}
}
{
// This isn't realy cid it is the chain index
}
else
{
DbgBreakIf(1);
}
/* TX Data (remaining , sb index above...) */
/* ooo cid doesn't have a tx chain... */
{
tx->tx_bd_page_base.hi = mm_cpu_to_le32(lm_bd_chain_phys_addr(&pdev->tx_info.chain[cid].bd_chain, 0).as_u32.high);
tx->tx_bd_page_base.lo = mm_cpu_to_le32(lm_bd_chain_phys_addr(&pdev->tx_info.chain[cid].bd_chain, 0).as_u32.low);
}
/* Tx Switching... */
{
}
else
{
}
tx->refuse_outband_vlan_flg = 0;
// for encapsulated packets
// the hw ip header will be the inner ip header, the hw will incremnet the inner ip id.
// the fw ip header will be the outer ip header, this means that if the outer ip header is ipv4, its ip id will not be incremented.
// In case of non-Lso encapsulated packets with L4 checksum offload, the pseudo checksum location - on BD
// In case of non-Lso encapsulated packets with outer L3 ip checksum offload, the pseudo checksum location - on BD
}
{
/* If MP is enabled, we need to take care of tx-only connections, which use the
* regular connection client-idx... the rest are split into regular eth
* and vfs... */
if (MM_DCB_MP_L2_IS_ENABLE(pdev))
{
{
return client_info_idx;
}
}
#ifdef VF_INVOLVED
{
}
else
#endif
{
}
return client_info_idx;
}
{
/* If MP is enabled, we need to take care of tx-only connections, which use the
* regular connection client-idx... the rest are split into regular eth
* and vfs... */
if (MM_DCB_MP_L2_IS_ENABLE(pdev))
{
{
}
}
#ifdef VF_INVOLVED
{
}
else
#endif
{
}
return fw_client_id;
}
{
{
DbgBreakMsg("lm_eth_init_tx_queue_data: the chain isn't TX only " );
return LM_STATUS_FAILURE;
}
/* a bit redundant, but just so we're clear on terminology... */
/* Since ramrods are sent sequentially for tx only clients, and then regular client, and
* we won't have a case of these being sent in parallel, we can safely use the client_init_data_virt
* of the regular eth connection for the tx only connection.
* This way, we don't need to allocate client_info for tx only connections.
*/
{
return LM_STATUS_FAILURE;
}
/* General Structure */
chain_id);
if(LM_STATUS_SUCCESS != lm_status)
{
return lm_status;
}
/* Tx Data */
sb_id);
return LM_STATUS_SUCCESS;
}
{
{
return LM_STATUS_FAILURE;
}
{
return LM_STATUS_FAILURE;
}
/* General Structure */
cid);
if(LM_STATUS_SUCCESS != lm_status)
{
return lm_status;
}
/* Rx Data */
&(client_init_data_virt->rx),
cid,
sb_id);
/* Tx Data */
&(client_init_data_virt->tx),
cid,
sb_id);
return LM_STATUS_SUCCESS;
}
/**
* @assumptions: STRONG ASSUMPTION: This function is not
* called for SRIOV / MP connections...
*/
)
{
struct client_update_ramrod_data * client_update_data_virt = pdev->client_info[client_idx].update.data_virt;
{
return LM_STATUS_FAILURE;
}
// We will send a client update ramrod in any case we can we don't optimize this flow.
// Client setup may already took the correct NIV value but the ramrod will be sent anyway
if((LM_CON_STATE_OPEN != con_state) &&
{
// Clinet is not in a state that it can recieve the ramrod
return LM_STATUS_ABORTED;
}
/* We don't expect this function to be called for non eth regular connections.
* If we hit this assert it means we need support for SRIOV + AFEX
*/
{
return LM_STATUS_FAILURE;
}
client_update_data_virt->func_id = FUNC_ID(pdev); /* FIXME: VFID needs to be given here for VFs... */
cid,
if (lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
lm_status = lm_wait_state_change(pdev, &pdev->client_info[client_idx].update.state, LM_CLI_UPDATE_RECV);
return lm_status;
}
lm_status_t lm_establish_eth_con(struct _lm_device_t *pdev, u8_t const chain_idx, u8_t sb_id, u8_t attributes_bitmap)
{
{
}
{
/* TODO: VF??? */
{
// Regular client or OOO CID
}
else
{
// TX only client or FWD
}
if(LM_STATUS_SUCCESS != lm_status)
{
DbgBreakMsg("lm_establish_eth_con: lm_eth_init_client_init_data or lm_eth_init_tx_queue_data failed \n ");
{
}
return lm_status;
}
}
/* When we setup the RCQ ring we should advance the CQ cons by MAX_NUM_RAMRODS - the FWD CID is the only connection without an RCQ
* therefore we skip this operation for forward */
{
{
}
}
else
{
{
}
{
}
else
{
DbgBreakMsg(" lm_establish_eth_con: cmd_id not set ");
{
}
return LM_STATUS_FAILURE;
}
}
// Move to state ramrod sent must be done before ramrod is realy sent
{
cid,
type,
}
#ifdef VF_INVOLVED
else
{
}
#endif
if (lm_status != LM_STATUS_SUCCESS)
{
{
}
return lm_status;
}
{
}
return lm_status;
} /* lm_establish_eth_con */
/**
* @description
* Send all the ramrods and wait for there return.
* @param pdev
* @param chain_idx_base
*
* @return lm_status_t
* status success is returned if all the ramrods where received.
* Status failure is returned if not all the ramrods were
* received.
*/
{
if(LM_STATUS_SUCCESS != lm_status)
{
DbgBreakMsg(" Ramrod send failed ");
return lm_status;
}
return lm_status;
}
/**
* @description
* Update the ramrod IPVX according to the current and required
* state.
* @param pdev
* @param chain_idx
* @param vbd_rsc_ipvx_bit - The VBD TPA ipvX bit.
*
* @return STATIC u8_t - The HSI IPVX eth_tpa_update_command
*/
{
// Add ramrod send code
{
}
{
}
else
{
}
return ramrod_ipvx;
}
/**
* @description
* Fill and send TPA ramrod.
* @param pdev
* @param chain_idx
*/
{
// Add ramrod send code
{
DbgBreakMsg("lm_tpa_send_ramrod : invalid paramters");
return LM_STATUS_FAILURE;
}
/* TPA mode to use (LRO or GRO) */
/* maximal TPA queues allowed for this client */
/* The maximal number of SGEs that can be used for one packet. depends on MTU and SGE size. must be 0 if SGEs are disabled */
tpa_chain->ramrod_data_virt->max_sges_for_packet = DIV_ROUND_UP_BITS(pdev->params.l2_cli_con_params[chain_idx].mtu, LM_TPA_PAGE_BITS);
// Avoiding rings thresholds verification is aimed for eVBD
// which receives its buffers and SGEs only after client init
// is completed.(eVBD receives the buffers and SGEs only after
// client setup is completed.)
/* Size of the buffers pointed by SGEs */
/* maximal size for the aggregated TPA packets, reprted by the host */
ASSERT_STATIC((LM_TPA_MAX_AGG_SIZE * LM_TPA_PAGE_SIZE) < MAX_VARIABLE_VALUE(tpa_chain->ramrod_data_virt->max_agg_size));
//u32_t sge_page_base_lo /* The address to fetch the next sges from (low) */;
tpa_chain->ramrod_data_virt->sge_page_base_lo = mm_cpu_to_le32(tpa_chain_bd->bd_chain_phy.as_u32.low);
//u32_t sge_page_base_hi /* The address to fetch the next sges from (high) */;
tpa_chain->ramrod_data_virt->sge_page_base_hi = mm_cpu_to_le32(tpa_chain_bd->bd_chain_phy.as_u32.high);
//u16_t sge_pause_thr_low /* number of remaining sges under which, we send pause message */;
//u16_t sge_pause_thr_high /* number of remaining sges above which, we send un-pause message */;
return lm_status;
}
/**
* @description
* Run on all RSS chains and send the ramrod on each one.
* @param pdev
* @param chain_idx_base
*/
{
// Number of ramrods expected in receive
#ifdef VF_INVOLVED
{
if (lm_status == LM_STATUS_SUCCESS) {
{
if (tpa_info->update_cookie)
{
}
}
}
}
else
#endif
{
{
if(LM_STATUS_SUCCESS != lm_status)
{
DbgBreakMsg(" Ramrod send failed ");
break;
}
}
}
return lm_status;
}
/**
* @description
* Fill and send function_update_data ramrod.
* @param pdev
*/
{
// check that we are not in the middle of handling another encapsulated packets offload set request (1 pending)
{
return lm_status;
}
// remember this for mm_set_done call (called on completion of the ramrod)
// mm_set_done will free memory of query_set_info
// GRE config for the function will be updated according to the gre_tunnel_rss and nvgre_clss_en fields
{
data->tunn_clss_en = 0;
}
else
{
}
0, //Don't care
if (lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
return LM_STATUS_PENDING;
}
/**
* This function is a general eq ramrod fuanction that waits
* synchroniously for it's completion.
*
* @param pdev
* cmd_id -The ramrod command ID
* data -ramrod data
* curr_state - what to poll on
* curr_state Current state.
* new_state - what we're waiting for.
* @return lm_status_t SUCCESS / TIMEOUT on waiting for
* completion
*/
{
0, //Don't care
data );
if (lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
return lm_status;
} /* lm_eq_ramrod_post_sync */
static lm_status_t
const u8_t send_ramrod)
{
DbgMessage(pdev, WARN/*INFORMi|INFORMl2sp*/, "#lm_halt_eth_con cid=%d fw_client_idx=%d client_info=%d(%d)\n",cid, fw_client_idx,
{
return LM_STATUS_FAILURE;
}
{
}
if(FALSE == send_ramrod)
{
{
}
return LM_STATUS_SUCCESS;
}
// Send ramrod
/* convert halt_ramrod_data to a big-endian friendly format */
cid,
{
}
if (lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
return lm_status;
} /* lm_halt_eth_con */
{
{
DbgBreak();
return LM_STATUS_FAILURE;
}
{
return LM_STATUS_SUCCESS; /* Not supported for VFs */
}
cid,
0);
if (lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
return lm_status;
}
{
/* VIA PF!!!!!!*/
{
DbgBreak();
return LM_STATUS_FAILURE;
}
cid,
0);
if (lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
return lm_status;
} /* lm_cfc_del_eth_con */
{
if (lm_status != LM_STATUS_SUCCESS) {
return lm_status;
}
return LM_STATUS_SUCCESS;
}
{
/* halt and terminate ramrods (lm_{halt,terminate}_eth_con) are not sent for the forward channel connection.
therefore we just change the state from OPEN to TERMINATE, and send the cfc del ramrod */
if (lm_status != LM_STATUS_SUCCESS) {
return lm_status;
}
return LM_STATUS_SUCCESS;
}
const u8_t send_halt_ramrod)
{
if (lm_fl_reset_is_inprogress(pdev)) {
return LM_STATUS_SUCCESS;
}
#ifdef VF_INVOLVED
if (IS_CHANNEL_VFDEV(pdev)) {
return lm_status;
}
#endif
if (lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
if (lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
if (lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
if (MM_DCB_MP_L2_IS_ENABLE(pdev))
{
}
else
{
}
if (cid < max_eth_cid) {
}
if (lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
return LM_STATUS_SUCCESS;
}
{
{
return LM_STATUS_INVALID_PARAMETER;
}
} /* lm_eth_wait_state_change */
/**lm_func_update_post_command Post a func_update ramrod and
* wait for its completion.
* Must be called from a work item.
*
* @param pdev the device
* @param command the ramrod cmd_id (NONE_CONNECTION_TYPE is
* assumed)
* @param data the ramrod data
*
* @return lm_status_t LM_STATUS_SUCCESS on success, some other
* failure code on failure.
*/
{
DbgBreakIf(pdev->slowpath_info.l2mp_func_update_ramrod_state != L2MP_FUNC_UPDATE_RAMROD_NOT_POSTED);
lm_status = lm_eq_ramrod_post_sync(pdev,RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, data_phys.as_u64,CMD_PRIORITY_NORMAL,&pdev->slowpath_info.l2mp_func_update_ramrod_state, L2MP_FUNC_UPDATE_RAMROD_POSTED, L2MP_FUNC_UPDATE_RAMROD_COMPLETED);
if (LM_STATUS_SUCCESS != lm_status)
{
goto _exit;
}
return lm_status;
}
/*********************** NIV **************************************/
{
(NIV_RAMROD_NOT_POSTED == curr_state));
lm_status = lm_eq_ramrod_post_sync(pdev,command,data,CMD_PRIORITY_NORMAL,&pdev->slowpath_info.niv_ramrod_state, niv_ramrod_state, NIV_RAMROD_COMPLETED);
if (LM_STATUS_SUCCESS != lm_status)
{
goto _exit;
}
return lm_status;
}
{
data->lb_mode_en = FALSE; //if a VIF update was received it means we're connected to a switch, so we're not in LB mode.
lm_status = lm_niv_post_command(pdev,RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, data_phys.as_u64, NIV_RAMROD_VIF_UPDATE_POSTED);
return lm_status;
}
{
lm_status = lm_niv_post_command(pdev,RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, *((u64_t*)(&data)), NIV_RAMROD_VIF_LISTS_POSTED);
return lm_status;
}
/****************** CLASSIFICATION ********************************/
/**
*
* @param pdev
* @param mac_addr - array of size ETHERNET_ADDRESS_SIZE
* containing a valid mac addresses
* @param vlan_tag - vlan tag to be set with mac address
* @param chain_idx - which chain to set the mac on. Chain_idx
* will be transformed to a l2 client-id
* @param cookie - will be returned to MM layer on completion
* @param set - set or remove mac address
* @param is_encap_inner_mac_filter - set if we filter according
* to inner mac (VMQ offload of
* encapsulated packets)
*
* @return lm_status_t SUCCESS on syncrounous success, PENDING
* if completion will be called later, FAILURE o/w
*/
void* cookie,
{
{
DbgBreakMsg("lm_set_mac_addr: invalid params\n");
return LM_STATUS_INVALID_PARAMETER;
}
if (lm_reset_is_inprogress(pdev))
{
return LM_STATUS_SUCCESS;
}
#ifdef VF_INVOLVED
if (IS_CHANNEL_VFDEV(pdev))
{
lm_status = lm_vf_pf_set_q_filters(pdev, LM_CLI_IDX_NDIS, cookie, Q_FILTER_MAC, mac_addr, ETHERNET_ADDRESS_SIZE,vlan_tag, b_set);
return lm_status;
}
#endif
DbgMessage(pdev, WARN/*INFORMl2sp*/, "lm_set_mac_addr: b_set=%d chain_idx=%d!!!\n", b_set, chain_idx);
/* Prepare ramrod params to be sent to ecore layer... */
if (vlan_tag != LM_SET_CAM_NO_VLAN_FILTER)
{
mm_memcpy( ramrod_param.user_req.u.vlan_mac.mac, mac_addr, sizeof(ramrod_param.user_req.u.vlan_mac.mac));
}
else
{
}
/* Set the cookie BEFORE sending the ramrod!!!! ramrod may complete in the mean time... */
switch (lm_cli_idx)
{
case LM_CLI_IDX_NDIS:
break;
case LM_CLI_IDX_ISCSI:
break;
default:
/* Nothing... */
break;
}
if( LM_STATUS_PENDING != lm_status )
{
}
return lm_status;
}
/**
* setting a mac address and is therefore implemented in a
* separate function. It require deleting a previous vlan
* tag if one was set, and changing rx-filtering rules. The
* change in rx-filtering rules has to do with "any-vlan".
* If no vlan is set we want "any-vlan" otherwise we want
* to remove the any-vlan, this requires another ramrod.
* The way this is implemented is as follows:
* executing them (sp-verbs feature don't send EXEC)
* 2. If need to set rx-mask, turn on a flag that will
* be checked on completion of rx-mask, in
* lm_eq_handle_rx_filter.., we look at this flag and
* if it's on execute the vlan pending command
* (sp-verbs CONT feature).
*
* @param pdev
* @param vlan_tag - vlan tag to be set
* @param chain_idx - which chain to set the vlan on. Chain_idx
* will be transformed to a l2 client-id
* @param cookie - will be returned to MM layer on completion
* @param set - set or remove vlan
*
* @return lm_status_t SUCCESS on syncrounous success, PENDING
* if completion will be called later, FAILURE o/w
*/
void* cookie,
{
if (lm_reset_is_inprogress(pdev))
{
return LM_STATUS_SUCCESS;
}
#ifdef VF_INVOLVED
if (IS_CHANNEL_VFDEV(pdev))
{
/* 9/22/11 Michals: should we support this for VFs??? */
return LM_STATUS_FAILURE;
}
#endif
/* Prepare ramrod params to be sent to ecore layer... */
if (CHIP_IS_E1x(pdev))
{
return LM_STATUS_FAILURE;
}
{
return LM_STATUS_EXISTING_OBJECT;
}
/* Set the cookie BEFORE sending the ramrod!!!! ramrod may complete in the mean time... */
if (b_set)
{
/* If we're just setting vlan, check if we need to delete the old one first... */
{
/* don't really care about the status... */
}
/* Prepare for the setting... */
}
else
{
}
/* Determine if rx-mask needs to be changed as a result of this update. */
/* If we don't need to change the mask we need to execute commands now, otherwise they'll
be executed from rx filter completion */
if (!b_set_rx_mask )
{
}
if( (LM_STATUS_PENDING != lm_status) )
{
return lm_status;
}
/* see function description to understand this better... */
if (b_set_rx_mask)
{
}
return lm_status;
}
/**
* Move a filter from one chain idx to another atomically
*
* @param pdev
*
* @param mac_addr - array of size ETHERNET_ADDRESS_SIZE
* containing a valid mac addresses
* @param vlan_tag - vlan tag to be set with mac address
* @param src_chain_idx - which chain to remove the mac from
* @param dest_chain_idx - which chain to set the mac on
* @param cookie - will be returned to MM layer on completion
*
* @return lm_status_t
*/
{
{
DbgBreakMsg("lm_move_mac_addr: invalid params\n");
return LM_STATUS_INVALID_PARAMETER;
}
if (lm_reset_is_inprogress(pdev))
{
return LM_STATUS_SUCCESS;
}
#ifdef VF_INVOLVED
if (IS_CHANNEL_VFDEV(pdev))
{
DbgBreakMsg("lm_move_mac_addr: Move not expected on VF\n");
return lm_status;
}
#endif
DbgMessage(pdev, INFORMl2sp, "lm_move_mac_addr: [%d]:[%d]:[%d]:[%d]:[%d]:[%d] set=%d chain_idx=%d!!!\n",
/* Prepare ramrod params to be sent to ecore layer... */
if (vlan_tag != LM_SET_CAM_NO_VLAN_FILTER)
{
mm_memcpy( ramrod_param.user_req.u.vlan_mac.mac, mac_addr, sizeof(ramrod_param.user_req.u.vlan_mac.mac));
}
else
{
}
/* Set the cookie BEFORE sending the ramrod!!!! ramrod may complete in the mean time... */
if ( LM_STATUS_PENDING == lm_status )
{
/* FIXME: VF MACS in NIG stay??*/
}
else
{
}
return lm_status;
}
/**
* @Description
* Waits for the last set-mac called to complete, could be
* set-mac or set-mac-vlan...
* @param pdev
* @param chain_idx - the same chain-idx that the set-mac was
* called on
*
* @return lm_status_t SUCCESS or TIMEOUT
*/
{
if (lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
if (!CHIP_IS_E1(pdev))
{
}
return lm_status;
}
/**
* @Description
* Waits for the last set-vlan called to complete
* @param pdev
* @param chain_idx - the same chain-idx that the set-vlan was
* called on
*
* @return lm_status_t SUCCESS or TIMEOUT
*/
{
if (!CHIP_IS_E1x(pdev))
{
}
return lm_status;
}
/**
* Description
* Clears all the mac address that are set on a certain cid...
* @param pdev
* @param chain_idx - which chain_idx to clear macs on...
*
* @assumptions: Called in PASSIVE_LEVEL!! function sleeps...
* @return lm_status_t
*/
{
{
/* mac_vlan_obj only relevant for chips that are not E1... */
{
break;
}
{
{
}
ecore_status = vlan_mac_obj->delete_all( pdev, ramrod_params.vlan_mac_obj, &ramrod_params.user_req.vlan_mac_flags, &ramrod_params.ramrod_flags );
if (lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
}
}
return lm_status;
}
/**
* Description
* Restores all the mac address that are set on a certain
* cid (after sleep / hibernate...)
* @param pdev
* @param chain_idx - which chain_idx to clear macs on...
*
* @assumptions: Called in PASSIVE_LEVEL!! function sleeps...
* @return lm_status_t
*/
{
do
{
if (lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
}
/* Take care of the pairs and vlans as well... */
if (!CHIP_IS_E1(pdev))
{
do
{
if (lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
}
if (!CHIP_IS_E1x(pdev))
{
do
{
if (lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
}
return lm_status;
}
/************************ RX FILTERING ***************************************/
/**
* @Description
* for RX + TX, since tx switching is enabled FW needs to
* know the configuration for tx filtering as well. The
* configuration is almost semmetric for rx / tx except for
* the case of promiscuous in which case rx is in
* accept_unmatched and Tx is in accept_all (meaning all
* traffic is sent to loopback channel)
*
* @Assumptions
* - An inter client lock is taken by the caller
* @Return
* - Success / Pending or Failure
*/
{
unsigned long rx_accept_flags = 0;
unsigned long tx_accept_flags = 0;
if (lm_reset_is_inprogress(pdev))
{
return LM_STATUS_SUCCESS;
}
#ifdef VF_INVOLVED
if (IS_CHANNEL_VFDEV(pdev))
{
return lm_vf_pf_set_q_filters(pdev, chain_idx, FALSE, Q_FILTER_RX_MASK, (u8_t*)&rx_mask, sizeof(lm_rx_mask_t), LM_SET_CAM_NO_VLAN_FILTER, FALSE);
}
#endif
{
/* No need to send a filter that has already been set...
return immediately */
return LM_STATUS_SUCCESS;
}
/* initialize accept flags in ECORE language */
{
}
else
{
}
/* find the desired filtering configuration */
{
/* In NPAR + vm_switch_enable mode, we need to turn on the ACCEPT_ALL_UNICAST for TX to make
* sure all traffic passes on the loopback channel to enable non-enlighted vms to communicate (vms that we don't
* have their MAC set) .
* We turn it on once we're in promiscuous, which signals that there is probablly vms up that need
* this feature. */
{
}
}
{
/* accept matched ucast */
}
{
/* accept matched mcast */
}
{
/* accept all mcast */
}
{
/* accept matched bcast */
}
{
/* TBD: there is no usage in Miniport for this flag */
}
/* Prepare ramrod parameters */
// We set in lm_cli_idx always 0 (LM_CLI_IDX_NDIS) for E1x and lm_cli_idx for e2.
// LM_CLI_IDX_NDIS is an occasional choice and could be any of the LM_CLI_IDX
//
// * rx_mode_rdata PER INDEX is problematic because:
// each client run over the bits of the previous client
//
// * rx_mode_rdata NOT PER INDEX is problematic because:
// in e2.0 when we send a ramrod, the rdata is same memory for all
// clients and therefore in case of parallel run of rx_mask of clients
// one of the ramrods actually won't be sent with the correct data
//
// * Conclusion: we have here a problem which make a conflict that both E1.0/E1.5 and E2 work without issues.
// This issue should be resolved in a proper way which should be discussed.
//
// This note is related to the following two CQ's:
// CQ53609 - eVBD:57712: evbda!lm_sq_complete+7ca; Assert is seen while running ACPI S1 S3 sleep stress test
// CQ53444 - OIS Certs: iSCSI Ping Test Fails
if(LM_CLI_IDX_MAX <= lm_cli_idx)
{
DbgBreakMsg(" lm_cli_idx has an invalid value");
return LM_STATUS_FAILURE;
}
/* Must be set before the ramrod... */
if (lm_status == LM_STATUS_SUCCESS)
{
}
else if (lm_status == LM_STATUS_REQUEST_NOT_ACCEPTED)
{
/* Sq is blocked... meaning we're in error recovery, this is our one outstanding oid.
* mark ecore as done, return PENDING to UM, don't clear cookie. This means miniport
* will eventually get a completion as part of the re-initialization of the chip... */
}
return lm_status;
} /* lm_set_rx_mask */
/* Waits for the set=-rx-mode to complete*/
{
return lm_status;
}
/************************* MULTICAST *****************************************/
struct ecore_mcast_ramrod_params *p)
{
if (!mc_addrs) {
return LM_STATUS_INVALID_PARAMETER;
}
d_list_clear(&p->mcast_list);
{
DbgMessage(pdev, INFORMl2sp, "mc_addrs[%d]:mc_addrs[%d]:mc_addrs[%d]:mc_addrs[%d]:mc_addrs[%d]:mc_addrs[%d]\n",
/* move on to next mc addr */
mc_mac++;
}
p->mcast_list_len = mc_count;
return LM_STATUS_SUCCESS;
}
struct ecore_mcast_ramrod_params *p)
{
if (mc_mac)
{
/* note that p->mcast_list_len is now set to 0 after processing */
}
}
/**
* @Description
* Function configures a list of multicast addresses. Or
* resets the list previously configured
*
* @param pdev
* @param mc_addrs - array of multicast addresses. NULL if unset is required
* @param buf_len - length of the buffer - 0 if unset is required
* @param cookie - will be returned on completion
* @param lm_cli_idx - which lm client to send request on
*
* @return lm_status_t - SUCCESS on syncrounous completion
* PENDING on asyncounous completion
* FAILURE o/w
*/
{
#ifdef VF_INVOLVED
if (IS_CHANNEL_VFDEV(pdev)) {
return lm_vf_pf_set_q_filters(pdev, lm_cli_idx, cookie, Q_FILTER_MC, mc_addrs, buf_len, LM_SET_CAM_NO_VLAN_FILTER, FALSE);
}
#endif
{
DbgBreakMsg("size must be greater than zero for a valid client\n");
return LM_STATUS_FAILURE;
}
/* Initialize params sent to ecore layer */
/* Need to split to groups of 16 for E2... due to hsi restraint*/
if (mc_addrs)
{
}
/* Cookie must be set before sending the ramord, since completion could arrive before
* we return and the cookie must be in place*/
ecore_status = ecore_config_mcast(pdev, &rparam, (mc_addrs != NULL)? ECORE_MCAST_CMD_ADD : ECORE_MCAST_CMD_DEL);
if (lm_status == LM_STATUS_SUCCESS)
{
}
if (mc_addrs)
{
}
return lm_status;
} /* lm_set_mc */
void * cookie,
{
#ifdef VF_INVOLVED
if (IS_CHANNEL_VFDEV(pdev))
{
}
#endif
/* Cookie must be set before sending the ramord, since completion could arrive before
* we return and the cookie must be in place*/
if (lm_status == LM_STATUS_SUCCESS)
{
}
return lm_status;
}
/**
* Description
* This routine is called to wait for the multicast set
* completion. It must be called in passive level since it
* may sleep
* @param pdev
* @param lm_cli_idx the cli-idx that the multicast was sent on.
*
* @return lm_status SUCCESS on done, TIMEOUT o/w
*/
{
return lm_status;
}
/************************* RSS ***********************************************/
/**
* Description: update RSS key in slowpath
* Assumptions:
* - given key_size is promised to be either 40 or 16 (promised by NDIS)
* Return:
*/
/**
* @Description: Update RSS key in driver rss_hash_key array and
* check if it has changed from previous key.
*
* @param pdev
* @param hash_key - hash_key received from NDIS
* @param key_size
*
* @return u8_t TRUE if changed, FALSE o/w
*/
{
u32_t i = 0;
/* check params */
{
DbgBreak();
return LM_STATUS_INVALID_PARAMETER;
}
/* Note: MSB (that is hash_key[0]) should be placed in MSB of register KEYRSS9, regardless the key size */
/* GilR 4/4/2007 - assert on key_size==16/40? */
{
val = 0;
if (i < key_size)
{
"KEYRSS[%d:%d]=0x%x, written to RSS_REG=%d\n",
i += 4;
}
else
{
"OUT OF KEY size, writing 0x%x to RSS_REG=%d\n",
}
{ /* key changed */
key_changed = TRUE;
}
}
if (key_changed)
{
}
return key_changed;
}
/**
* @Description
* Enable RSS for Eth with given indirection table also updates the rss key
* in searcher (for previous chips...- done by sp-verbs)
*
* @Assumptions
* - given table_size is promised to be power of 2 (promised by NDIS),
* or 1 in case of RSS disabling
* - the indices in the given chain_indirection_table are chain
* indices converted by UM layer...
* - given key_size is promised to be either 40 or 16 (promised by NDIS)
*
* @param pdev
* @param chain_indirection_table - array of size @table_size containing chain numbers
* @param table_size - size of @indirection_table
* @param hash_key - new hash_key to be configured. 0 means no key
* @param key_size
* @param hash_type
* @param sync_with_toe - This field indicates that the completion to the mm layer
* should take into account the fact that toe rss update will
* be sent as well. A counter will be increased in lm for this purpose
* @param cookie - will be returned on completion
*
* @return lm_status_t - SUCCESS on syncrounous completion
* PENDING on asyncounous completion
* FAILURE o/w
*/
{
u8_t i = 0;
/* check params */
{
DbgBreak();
return LM_STATUS_INVALID_PARAMETER;
}
if (hash_type &
{
return LM_STATUS_INVALID_PARAMETER;
}
/* RSS mode */
/* Fixme --> anything else ?*/
/* Translate the hash type to "ecore" */
{
}
{
}
{
}
{
}
{
reconfigure = TRUE;
}
/* set rss result mask according to table size
(table_size is promised to be power of 2) */
{
/* Hash bits */
reconfigure = TRUE;
}
for (i = 0; i < table_size; i++)
{
{
DbgMessage(pdev, INFORMl2sp, "RssIndTable[%02d]=0x%x (Changed from 0x%x)\n", i, value, pdev->slowpath_info.last_set_indirection_table[i]);
reconfigure = TRUE;
}
}
mm_memcpy(params.ind_table, pdev->slowpath_info.last_set_indirection_table, sizeof(params.ind_table));
if (hash_key)
{
if (key_changed)
{
reconfigure = TRUE;
}
}
/* Not expected, that toe will update and ETH not, but just to make sure, if sync_with_toe
* is true it means toe reconfigured... so eth must to to take care of sync... */
if (reconfigure || sync_with_toe)
{
/* If we're not syncing with toe, it means that these counters have not
* been increased by toe, and need to be increased here. */
if (!sync_with_toe)
{
}
#ifdef VF_INVOLVED
if (IS_CHANNEL_VFDEV(pdev))
{
lm_status = lm_vf_pf_update_rss(pdev, NULL, params.rss_flags, params.rss_result_mask, params.ind_table, params.rss_key);
if (lm_status == LM_STATUS_SUCCESS)
{
}
}
else
#endif
{
}
if (lm_status == LM_STATUS_SUCCESS)
{
}
}
return lm_status;
}
/**
* @Description
* This routine disables rss functionality by sending a
* ramrod to FW.
*
* @param pdev
* @param cookie - will be returned on completion
* @param sync_with_toe - true means this call is synced with
* toe, and completion will be called only
* when both toe + eth complete. Eth needs
* to know this (reason in code) *
*
* @return lm_status_t - SUCCESS on syncrounous completion
* PENDING on asyncounous completion
* FAILURE o/w
*/
{
u8_t i = 0;
/* RSS mode */
/* If we're not syncing with toe, it means that these counters have not
* been increased by toe, and need to be increased here. */
if (!sync_with_toe)
{
}
{
}
#ifdef VF_INVOLVED
if (IS_CHANNEL_VFDEV(pdev))
{
lm_status = lm_vf_pf_update_rss(pdev, NULL, params.rss_flags, params.rss_result_mask, params.ind_table, params.rss_key);
if (lm_status == LM_STATUS_SUCCESS)
{
}
}
else
#endif
{
}
if (lm_status == LM_STATUS_SUCCESS)
{
}
return lm_status;
} /* lm_disable_rss */
/**
* @Description
* complete
*
* @param pdev
*
* @return lm_status_t lm_status_t SUCCESS or TIMEOUT
*/
{
return lm_status;
}
#ifdef VF_INVOLVED
{
return lm_status;
}
#endif
/************************** EQ HANDLING *******************************************/
static INLINE void lm_eq_handle_function_start_eqe(struct _lm_device_t * pdev, union event_ring_elem * elem)
{
NONE_CONNECTION_TYPE, 0);
}
static INLINE void lm_eq_handle_function_stop_eqe(struct _lm_device_t * pdev, union event_ring_elem * elem)
{
NONE_CONNECTION_TYPE, 0);
}
static INLINE void lm_eq_handle_cfc_del_eqe(struct _lm_device_t * pdev, union event_ring_elem * elem)
{
{ /* cfc del completion for eth cid */
DbgMessage(pdev, WARNeq, "lm_service_eq_intr: EVENT_RING_OPCODE_CFC_DEL_WB - calling lm_extract_ramrod_req!\n");
}
else
{ /* cfc del completion for toe cid */
if (error) {
{
DbgBreakIfAll(1);
}
{
DbgMessage(pdev, WARNl4sp, "lm_eth_comp_cb: RAMROD_CMD_ID_ETH_CFC_DEL(0x%x) - %d resending!\n", cid,
cid,
0 );
}
else
{
DbgBreakIfAll(1);
}
}
else
{
}
}
(elem->message.opcode == EVENT_RING_OPCODE_CFC_DEL)? RAMROD_CMD_ID_COMMON_CFC_DEL : RAMROD_CMD_ID_COMMON_CFC_DEL_WB,
}
static INLINE void lm_eq_handle_fwd_setup_eqe(struct _lm_device_t * pdev, union event_ring_elem * elem)
{
}
{
if(LM_CLI_IDX_MAX <= lm_cli_idx)
{
DbgBreakMsg(" lm_eq_handle_mcast_eqe lm_cli_idx is invalid ");
return;
}
/* Clear pending state for the last command */
/* If there are pending mcast commands - send them */
{
if (lm_status == LM_STATUS_PENDING)
{
}
else if (lm_status != LM_STATUS_SUCCESS)
{
DbgBreakMsg("Unexpected pending mcast command failed\n");
}
}
if (indicate_done)
{
{
}
}
if (CHIP_IS_E1(pdev))
{
}
else
{
}
}
static INLINE void lm_eq_handle_classification_eqe(struct _lm_device_t * pdev, union event_ring_elem * elem)
{
unsigned long ramrod_flags = 0;
int i;
/* Relevant to 57710, mcast is implemented as "set-macs"*/
if (type == ECORE_FILTER_MCAST_PENDING)
{
return;
}
switch (type)
{
case ECORE_FILTER_MAC_PENDING:
break;
break;
break;
default:
/* unknown ER handling*/
/* Special handling for case that type is unknown (error recovery flow)
* check which object is pending, and clear the relevant one. */
{
}
{
}
break;
}
// We expect here only these 2 status (CQ61418)
{
"lm_eq_handle_classification_eqe: commands' length is above CLASSIFY_RULES_COUNT (the maximum length of commands' list for one execution), ecore_status = %d", ecore_status);
}
// verify that the mac_local mac_add1 & mac_add2 are continuous
ASSERT_STATIC( OFFSETOF( eth_stats_info_t, mac_local )+ sizeof(pdev->vars.stats.stats_mirror.stats_drv.drv_info_to_mfw.eth_stats.mac_local) == OFFSETOF( eth_stats_info_t, mac_add1 ) );
ASSERT_STATIC( OFFSETOF( eth_stats_info_t, mac_add1 ) + sizeof(pdev->vars.stats.stats_mirror.stats_drv.drv_info_to_mfw.eth_stats.mac_add1) == OFFSETOF( eth_stats_info_t, mac_add2 ) );
{
{
}
else
{
// We want to keep only eth mac this is for E3 only but we keep it anyway also for E2...
for (i = 0; i < 3; i++)
mm_mem_zero(pdev->vars.stats.stats_mirror.stats_drv.drv_info_to_mfw.eth_stats.mac_local + i, sizeof(u8_t));
p_ecore_vlan_mac_obj->get_n_elements(pdev, p_ecore_vlan_mac_obj ,3, pdev->vars.stats.stats_mirror.stats_drv.drv_info_to_mfw.eth_stats.mac_local + MAC_PAD, MAC_PAD, ETH_ALEN);
}
}
{
}
if (CHIP_IS_E1x(pdev))
{
}
else
{
}
}
{
/* Order is important!!!
* stats use a predefined ramrod. We need to make sure that we first complete the ramrod, which will
* take it out of sq-completed list, and only after that mark the ramrod as completed, so that a new
* ramrod can be sent!.
*/
mm_write_barrier(); /* barrier to make sure command before this line completes before executing the next line! */
}
static INLINE void lm_eq_handle_filter_rules_eqe(struct _lm_device_t * pdev, union event_ring_elem * elem)
{
struct ecore_vlan_mac_ramrod_params p;
// FIXME: pdev->client_info[cid].mac_obj.raw.clear_pending(&pdev->client_info[cid].mac_obj.raw);
{
}
lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_CMD_ID_ETH_FILTER_RULES, ETH_CONNECTION_TYPE, cid);
{
p.ramrod_flags = 0;
ecore_config_vlan_mac(pdev, &p);
}
}
static INLINE void lm_eq_handle_rss_update_eqe(struct _lm_device_t * pdev, union event_ring_elem * elem)
{
#ifdef VF_INVOLVED
#endif
#ifdef VF_INVOLVED
{
}
else
#endif
{
{
{
}
}
}
}
/**lm_eq_handle_niv_function_update_eqe
* handle a NIV function update completion.
*
* @param pdev the device
* @param elem the CQE
*/
static INLINE void lm_eq_handle_function_update_eqe(struct _lm_device_t * pdev, union event_ring_elem * elem)
{
{
{
}
{
}
break;
break;
{
}
break;
// In case of link update, indicate the link status to miniport, else it is just
// svid update which doesnt need anymore processing.
{
}
{
DbgBreak();
}
break;
default:
DbgBreakMsg("lm_eq_handle_function_update_eqe unknown source");
break;
}
NONE_CONNECTION_TYPE, 0);
}
/**lm_eq_handle_niv_function_update_eqe
* handle a NIV lists update completion.
*
* @param pdev the device
* @param elem the CQE
*/
static INLINE void lm_eq_handle_niv_vif_lists_eqe(struct _lm_device_t * pdev, union event_ring_elem * elem)
{
(!lm_reset_is_inprogress(pdev)));
{
}
if(!lm_reset_is_inprogress(pdev))
{
}
NONE_CONNECTION_TYPE, 0);
}
#ifdef VF_INVOLVED
static INLINE void lm_eq_handle_vf_flr_eqe(struct _lm_device_t * pdev, union event_ring_elem * elem)
{
if (!vf_info) {
DbgBreakMsg("lm_eq_handle_vf_flr_eqe: vf_info is not found\n");
return;
}
if ((vf_info->vf_stats.vf_stats_state != VF_STATS_NONE) && (vf_info->vf_stats.vf_stats_state != VF_STATS_REQ_IN_PROCESSING)) {
}
}
static INLINE void lm_eq_handle_malicious_vf_eqe(struct _lm_device_t * pdev, union event_ring_elem * elem)
{
if (vf_info) {
}
}
#endif
static INLINE lm_status_t lm_service_eq_elem(struct _lm_device_t * pdev, union event_ring_elem * elem)
{
/* handle eq element */
{
break;
break;
break;
break;
break;
break;
break;
break;
break;
break;
break;
break;
break;
#ifdef VF_INVOLVED
case EVENT_RING_OPCODE_VF_FLR:
break;
break;
#endif
default:
DbgBreakMsg("Unknown elem type received on eq\n");
return LM_STATUS_FAILURE;
}
return LM_STATUS_SUCCESS;
}
/**
* @Description
* handle cqes of the event-ring, should be called from dpc if index in status block was changed
* @param pdev
*
* @return lm_status_t SUCCESS or FAILURE (if unknown completion)
*/
{
{
}
/* there is no change in the EQ consumer index so exit! */
if (cq_old_idx == cq_new_idx)
{
return LM_STATUS_SUCCESS;
} else {
DbgMessage(pdev, INFORMeq , "EQ consumer index: cq_old_idx=0x%x, cq_new_idx=0x%x!\n",cq_old_idx,cq_new_idx);
}
while(cq_old_idx != cq_new_idx)
{
/* get hold of the cqe, and find out what it's type corresponds to */
{
return LM_STATUS_FAILURE;
}
if (lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
#ifdef __LINUX
#endif //__LINUX
/* Recycle the cqe */
} /* while */
/* update producer */
return LM_STATUS_SUCCESS;
} /* lm_service_eq_intr */
/**
* @Description
* This function completes eq completions immediately
* (without fw completion).
*
* @param pdev
* @param spe
*/
{
/* We need to build the "elem" based on the spe */
if ((pending->type & SPE_HDR_T_CONN_TYPE) == ETH_CONNECTION_TYPE) /* Some Ethernets complete on Eq. */
{
switch (cmd)
{
break;
break;
break;
break;
break;
break;
default:
DbgBreakMsg("Unknown elem type received on eq\n");
}
}
{
switch (cmd)
{
break;
break;
break;
break;
break;
break;
break;
break;
break;
default:
DbgBreakMsg("Unknown elem type received on eq\n");
}
}
}
/*********************** SQ RELATED FUNCTIONS ***************************/
/* TODO: move more functions from command.h to here. */
{
{
DbgBreakIf(!pdev);
DbgBreakIf(!cb) ;
return;
}
}
{
{
DbgBreakIf(!pdev);
return;
}
}
{
}
/**
* @Description
* function completes pending slow path requests instead of
* FW. Used in error recovery flow.
*
* @Assumptions:
* interrupts at this point are disabled and dpcs are
* flushed, thus no one else can complete these...
*
* @param pdev
*/
{
/* unexpected if not under error recovery */
do
{
/* Find the first entry that hasn't been handled yet. */
/* We just peek and don't pop since completion of this pending request should contain removing
* it from the completion list. However, it may not happen immediately */
/* Look for the first entry that is "pending" but not completion_called yet. */
{
}
/* Mark pending completion as "handled" so that we don't handle it again... */
if (pending)
{
}
if (pending)
{
{
}
else
{
DbgBreakMsg("unsupported pending sq: Not implemented yet\n");
}
}
/*
* lm_sq_post_pending can only cause (via lm_sq_flush)
* lm_sq_complete_pending_requests DPC to be scheduled if
* pdev->sq_info.sq_comp_scheduled==FALSE. Such scheduling
* is acompnied by sq_comp_scheduled being set to TRUE.
*
* If we avoid setting pdev->sq_info.sq_comp_scheduled to FALSE,
* we are gurenteed lm_sq_complete_pending_requests will not be
* re-scheduled here.
*/
/*
* We are done completing pending requests in pending_list. However, any
* new sp requests created by callbacks, need service.
*
* As we are outside the SPQ lock, this DPC may be preempted,
* lm_sq_flush may have been called somewhere before this point.
*/
/*
* check if there is more to be flushed (new SPQ that entered after
* the "while".)
*/
if ((pdev->sq_info.sq_state == SQ_STATE_PENDING) && !d_list_is_empty(&pdev->sq_info.pending_complete))
{
}
else
{
}
}
{
{
schedule_wi = TRUE;
}
if (schedule_wi)
{
/* Alternative: WorkItem...
lm_status = MM_REGISTER_LPME(pdev, lm_sq_complete_pending_requests, FALSE, FALSE);
if (lm_status == LM_STATUS_SUCCESS)
{
return LM_STATUS_PENDING;
}
*/
if (lm_status == LM_STATUS_SUCCESS)
{
}
}
return lm_status;
}
{
{
return LM_STATUS_INVALID_PARAMETER;
}
return LM_STATUS_SUCCESS;
}
{
{
return LM_STATUS_INVALID_PARAMETER;
}
return LM_STATUS_SUCCESS;
}
{
{
}
return empty;
}
/**
* @Description
* Posts from the normal + high priority lists as much as it
* can towards the FW.
*
* @Assumptions
* called under SQ_LOCK!!!
*
* @param pdev
*
* @return lm_status_t PENDING: if indeed requests were posted,
* SUCCESS o/w
*/
{
{
if(!pending)
break;
DbgMessage(pdev, INFORM, "lm_sq_post: priority=%d, command=%d, type=%d, cid=%d num_pending_normal=%d\n",
}
/* post high priority sp */
{
if(!pending)
break;
DbgMessage(pdev, INFORM, "lm_sq_post: priority=%d, command=%d, type=%d, cid=%d num_pending_normal=%d\n",
}
return lm_status;
}
/**
* Description
* Add the entry to the pending SP list.
* Try to add entry's from the list to the sq_chain if possible.(there is are less then 8 ramrod commands pending)
*
* @param pdev
* @param pending - The pending list entry.
* @param priority - (high or low) to witch list to insert the pending list entry.
*
* @return lm_status_t: LM_STATUS_SUCCESS on success or
* LM_STATUS_REQUEST_NOT_ACCEPTED if slowpath queue is
* in blocked state.
*/
struct sq_pending_command * pending,
{
DbgBreakIf(! pdev);
{
// This state is valid in case hw failure such as fan failure happened.
// so we removed assert was here before and changed only to trace CQ62337
return LM_STATUS_REQUEST_NOT_ACCEPTED;
}
/* We shouldn't be posting any entries if the function-stop has already been posted... */
if (((mm_le32_to_cpu(pending->command.hdr.conn_and_cmd_data) & SPE_HDR_T_CMD_ID)>>SPE_HDR_T_CMD_ID_SHIFT) != RAMROD_CMD_ID_COMMON_FUNCTION_STOP)
{
DbgBreakIf((pdev->eq_info.function_state == FUNCTION_STOP_POSTED) || (pdev->eq_info.function_state == FUNCTION_STOP_COMPLETED));
}
switch( priority )
{
case CMD_PRIORITY_NORMAL:
/* add the request to the list tail*/
break;
case CMD_PRIORITY_MEDIUM:
/* add the request to the list head*/
break;
case CMD_PRIORITY_HIGH:
/* add the request to the list head*/
break;
default:
DbgBreakIf( 1 ) ;
// TODO_ROLLBACK - free sq_pending_command
return LM_STATUS_INVALID_PARAMETER ;
}
{
}
if (lm_status == LM_STATUS_PENDING)
{
/* New slowpath was posted in pending state... make sure to flush sq
* after this... */
{
}
}
if (sq_flush)
{
}
return lm_status ;
}
/*
post a ramrod to the sq
takes the sq pending list spinlock and adds the request
will not block
but the actuall posting to the sq might be deffered until there is room
MUST only have one request pending per CID (this is up to the caller to enforce)
*/
{
DbgBreakIf(! pdev);
/* allocate a new command struct and fill it */
if( !pending )
{
DbgBreakIf(1);
return LM_STATUS_FAILURE ;
}
return lm_status ;
}
/*
inform the sq mechanism of completed ramrods
because the completions arrive on the fast-path rings
the fast-path needs to inform the sq that the ramrod has been serviced
will not block
does not take any locks
*/
{
DbgMessage(pdev, INFORM, "lm_sq_complete: priority=%d, command=%d, type=%d, cid=%d num_pending_normal=%d\n",
switch( priority )
{
case CMD_PRIORITY_NORMAL:
case CMD_PRIORITY_MEDIUM:
break;
case CMD_PRIORITY_HIGH:
break;
default:
DbgBreakIf( 1 ) ;
break;
}
/* update sq consumer */
/* Search for the completion in the pending_complete list*/
/* Currently only supported if error recovery is supported */
{
}
{
while (pending)
{
{
/* got it... remove from list and free it */
{
}
break;
}
}
}
else
{
/* TODO_ER: on no validation, just take the head... Workaround for mc-diag */
{
DbgBreakMsg("lm_sq_complete pending is NULL");
}
else
{
{
}
}
}
}
/**
* @description
* do any deffered posting pending on the sq, will take the list spinlock
* will not block. Check sq state, if its pending (it means no hw...) call flush
* at the end, which will take care of completing these completions internally.
* @param pdev
*
* @return lm_status_t SUCCESS: is no pending requests were sent. PENDING if a
* if pending request was sent.
*/
{
{
DbgBreakIf(!pdev);
return LM_STATUS_INVALID_PARAMETER;
}
if (lm_status == LM_STATUS_PENDING)
{
/* New slowpath was posted in pending state... make sure to flush sq
* after this... */
{
}
}
if (sq_flush)
{
}
return lm_status;
}
/*********************** ETH SLOWPATH RELATED FUNCTIONS ***************************/
{
#ifdef VF_INVOLVED
#endif
DbgBreakIf(!pdev);
switch (command)
{
"lm_eth_comp_cb: RAMROD ETH SETUP completed for cid=%d, - calling lm_extract_ramrod_req!\n", cid);
break;
"lm_eth_comp_cb: RAMROD ETH SETUP completed for cid=%d, - calling lm_extract_ramrod_req!\n", cid);
break;
"lm_eth_comp_cb: RAMROD ETH Update completed for cid=%d, - calling lm_extract_ramrod_req!\n", cid);
break;
case RAMROD_CMD_ID_ETH_HALT:
DbgMessage(pdev, WARNl2sp, "lm_eth_comp_cb:RAMROD_CMD_ID_ETH_HALT- calling lm_extract_ramrod_req!\n");
break;
case RAMROD_CMD_ID_ETH_EMPTY:
DbgMessage(pdev, WARNl2sp, "lm_eth_comp_cb:RAMROD_CMD_ID_ETH_EMPTY- calling lm_extract_ramrod_req!\n");
break;
DbgMessage(pdev, WARNl2sp, "lm_eth_comp_cb:RAMROD_CMD_ID_ETH_TPA_UPDATE- calling lm_extract_ramrod_req!\n");
#ifdef VF_INVOLVED
if (MM_DCB_MP_L2_IS_ENABLE(pdev))
{
}
else
{
}
{
}
else
#endif
{
{
}
{
if (tpa_info->update_cookie)
{
}
}
}
{
return; /*To prevent lm_sq_completion processing for non existing (not submited) pending item*/
}
break;
DbgMessage(pdev, WARNl2sp, "lm_eth_comp_cb:RAMROD_CMD_ID_ETH_TERMINATE- calling lm_extract_ramrod_req!\n");
break;
default:
break;
}
#ifdef __LINUX
#endif //__LINUX
}
/**
* @Description
* Function is the callback function for completing eth
* completions when no chip access exists. Part of
* "complete-pending-sq" flow
* @param pdev
* @param spe
*/
{
/* The idea is to prepare a cqe and call: common_ramrod_eth_rx_cqe */
{
/* Ramrods that complete on the EQ */
break;
/* Ramrods that complete on the RCQ */
case RAMROD_CMD_ID_ETH_HALT:
case RAMROD_CMD_ID_ETH_EMPTY:
break;
default:
DbgBreakMsg("Unknown cmd");
}
}
u8_t lm_check_mac_addr_exist(struct _lm_device_t *pdev, u8_t chain_idx, u8_t *mac_addr, u16_t vlan_tag, u8_t is_encap_inner_mac_filter)
{
classification_ramrod_data = {{{0}}};
{
DbgBreakMsg("lm_move_mac_addr: invalid params\n");
return LM_STATUS_INVALID_PARAMETER;
}
#if 0
if (lm_reset_is_inprogress(pdev))
{
return LM_STATUS_SUCCESS;
}
#endif
if (vlan_tag != LM_SET_CAM_NO_VLAN_FILTER)
{
mm_memcpy(classification_ramrod_data.vlan_mac.mac, mac_addr, sizeof(classification_ramrod_data.vlan_mac.mac));
}
else
{
mm_memcpy(classification_ramrod_data.mac.mac, mac_addr, sizeof(classification_ramrod_data.mac.mac) );
}
if (ecore_status == ECORE_EXISTS)
{
}
else if (ecore_status == ECORE_SUCCESS)
{
}
else
{
DbgBreak();
}
return is_exist;
}
{
struct client_update_ramrod_data * client_update_data_virt = pdev->client_info[client_idx].update.data_virt;
{
return LM_STATUS_FAILURE;
}
// We will send a client update ramrod in any case we can we don't optimize this flow.
// Client setup may already took the correct NIV value but the ramrod will be sent anyway
if((LM_CON_STATE_OPEN != con_state) &&
{
// Clinet is not in a state that it can recieve the ramrod
return LM_STATUS_ABORTED;
}
/* We don't expect this function to be called for non eth regular connections.
* If we hit this assert it means we need support for SRIOV + AFEX
*/
{
return LM_STATUS_FAILURE;
}
client_update_data_virt->func_id = FUNC_ID(pdev); /* FIXME: VFID needs to be given here for VFs... */
cid,
if (lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
lm_status = lm_wait_state_change(pdev, &pdev->client_info[client_idx].update.state, LM_CLI_UPDATE_RECV);
return lm_status;
}