/*******************************************************************************
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*
* Copyright 2014 QLogic Corporation
* The contents of this file are subject to the terms of the
* QLogic End User License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the License at
* See the License for the specific language governing permissions
* and limitations under the License.
*
*
* Module Description:
* This file contains functions that deal with resource allocation and setup
*
******************************************************************************/
#include "lm5710.h"
#include "bd_chain.h"
#include "command.h"
#include "ecore_common.h"
#include "577xx_int_offsets.h"
#include "bcmtype.h"
// should be same as ceil (math.h) doesn't support u64_t
)
{
{
return LM_STATUS_INVALID_PARAMETER;
}
{
return LM_STATUS_SUCCESS;
}
if (sb_lock_id == DEF_STATUS_BLOCK_INDEX)
{
}
/* make sure that the sb is not during processing while we
* clear the pointer */
*hw_con_idx_ptr = NULL;
if (lm_reset_is_inprogress(pdev))
{
return LM_STATUS_SUCCESS;
}
switch (hc_sb_info->hc_sb) {
case STATUS_BLOCK_SP_SL_TYPE:
LM_INTMEM_WRITE16(pdev, CSTORM_SP_HC_SYNC_LINE_INDEX_OFFSET(hc_sb_info->hc_index_value,func), 0, BAR_CSTRORM_INTMEM);
LM_INTMEM_READ16(pdev, CSTORM_SP_HC_SYNC_LINE_INDEX_OFFSET(hc_sb_info->hc_index_value,func), &rd_val, BAR_CSTRORM_INTMEM);
DbgBreakIfAll(rd_val != 0);
LM_INTMEM_WRITE16(pdev, (CSTORM_SP_STATUS_BLOCK_OFFSET(func) + OFFSETOF(struct hc_sp_status_block, index_values) + (hc_sb_info->hc_index_value * sizeof(u16_t))), 0, BAR_CSTRORM_INTMEM);
LM_INTMEM_READ16 (pdev, (CSTORM_SP_STATUS_BLOCK_OFFSET(func) + OFFSETOF(struct hc_sp_status_block, index_values) + (hc_sb_info->hc_index_value * sizeof(u16_t))), &rd_val, BAR_CSTRORM_INTMEM);
DbgBreakIfAll(rd_val != 0);
break;
{
return LM_STATUS_INVALID_PARAMETER;
}
LM_INTMEM_WRITE32(PFDEV(pdev), CSTORM_HC_SYNC_LINE_DHC_OFFSET(hc_sb_info->hc_index_value, fw_sb_id), 0, BAR_CSTRORM_INTMEM);
LM_INTMEM_READ32(PFDEV(pdev), CSTORM_HC_SYNC_LINE_DHC_OFFSET(hc_sb_info->hc_index_value, fw_sb_id), &rd_val_32, BAR_CSTRORM_INTMEM);
DbgBreakIfAll(rd_val_32 != 0);
//Go to zeroing index value without break
case STATUS_BLOCK_NORMAL_TYPE:
LM_INTMEM_WRITE16(PFDEV(pdev), CSTORM_HC_SYNC_LINE_INDEX_E1X_OFFSET(hc_sb_info->hc_index_value, fw_sb_id), 0, BAR_CSTRORM_INTMEM);
LM_INTMEM_READ16(PFDEV(pdev), CSTORM_HC_SYNC_LINE_INDEX_E1X_OFFSET(hc_sb_info->hc_index_value, fw_sb_id), &rd_val, BAR_CSTRORM_INTMEM);
} else {
LM_INTMEM_WRITE16(PFDEV(pdev), CSTORM_HC_SYNC_LINE_INDEX_E2_OFFSET(hc_sb_info->hc_index_value, fw_sb_id), 0, BAR_CSTRORM_INTMEM);
LM_INTMEM_READ16(PFDEV(pdev), CSTORM_HC_SYNC_LINE_INDEX_E2_OFFSET(hc_sb_info->hc_index_value, fw_sb_id), &rd_val, BAR_CSTRORM_INTMEM);
}
DbgBreakIfAll(rd_val != 0);
if (CHIP_IS_E1x(pdev)) {
LM_INTMEM_WRITE16(PFDEV(pdev), (CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id) + OFFSETOF(struct hc_status_block_e1x, index_values) + (hc_sb_info->hc_index_value * sizeof(u16_t))), 0, BAR_CSTRORM_INTMEM);
LM_INTMEM_READ16 (PFDEV(pdev), (CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id) + OFFSETOF(struct hc_status_block_e1x, index_values) + (hc_sb_info->hc_index_value * sizeof(u16_t))), &rd_val, BAR_CSTRORM_INTMEM);
} else {
LM_INTMEM_WRITE16(PFDEV(pdev), (CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id) + OFFSETOF(struct hc_status_block_e2, index_values) + (hc_sb_info->hc_index_value * sizeof(u16_t))), 0, BAR_CSTRORM_INTMEM);
LM_INTMEM_READ16 (PFDEV(pdev), (CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id) + OFFSETOF(struct hc_status_block_e2, index_values) + (hc_sb_info->hc_index_value * sizeof(u16_t))), &rd_val, BAR_CSTRORM_INTMEM);
}
break;
default:
DbgBreakIf(1);
}
/* We read from the same memory and verify that it's 0 to make sure that the value was written to the grc and was not delayed in the pci */
DbgBreakIfAll(rd_val != 0);
return LM_STATUS_SUCCESS;
}
/*
* reset txq, rxq, rcq counters for L2 client connection
*
* assumption: the cid equals the chain idx
*/
/**
* @Description:
* allocate given num of coalesce buffers, and queue them in the txq chain.
* 1 buffer is allocated for LSO packets, and the rest are allocated with
* MTU size.
* @Return:
* lm_status
*/
static lm_status_t
{
/* check arguments */
{
return LM_STATUS_FAILURE;
}
"#lm_allocate_coalesce_buffers, coalesce_buf_cnt=%d\n",
if(coalesce_buf_cnt == 0)
{
return LM_STATUS_SUCCESS;
}
{
return LM_STATUS_RESOURCE;
}
/* Create a list of frame buffer descriptors. */
{
&coalesce_buf->link);
coalesce_buf++;
}
/* Have at least one coalesce buffer large enough to copy
* an LSO frame. */
&txq->coalesce_buf_list);
/* Determine the total memory for the coalesce buffers. */
mem_left = 0;
&txq->coalesce_buf_list);
while(coalesce_buf)
{
&coalesce_buf->link);
}
mem_size = 0;
/* Initialize all the descriptors to point to a buffer. */
&txq->coalesce_buf_list);
while(coalesce_buf)
{
/* Allocate a small block of memory at a time. */
if(mem_size == 0)
{
while(coalesce_buf)
{
{
break;
}
&coalesce_buf->link);
}
{
return LM_STATUS_RESOURCE;
}
}
/* Go to the next packet buffer. */
&coalesce_buf->link);
}
{
DbgBreakMsg("Memory allocation out of sync\n");
return LM_STATUS_FAILURE;
}
return LM_STATUS_SUCCESS;
} /* lm_allocate_coalesce_buffers */
{
/* check arguments */
{
return LM_STATUS_FAILURE;
}
/* alloc the chain */
{
return LM_STATUS_RESOURCE;
}
return lm_allocate_coalesce_buffers(
pdev,
cid);
} /* lm_alloc_txq */
{
/* check arguments */
{
return LM_STATUS_FAILURE;
}
/* alloc the chain(s) */
rx_chain_idx_max = LM_RXQ_IS_CHAIN_SGE_VALID( pdev, cid ) ? LM_RXQ_CHAIN_IDX_SGE : LM_RXQ_CHAIN_IDX_BD;
{
bd_chain->bd_chain_virt = mm_alloc_phys_mem( pdev, mem_size, &bd_chain->bd_chain_phy, 0, mm_cli_idx);
{
return LM_STATUS_RESOURCE;
}
}
return LM_STATUS_SUCCESS;
} /* lm_alloc_rxq */
{
/* check arguments */
{
return LM_STATUS_FAILURE;
}
"#lm_alloc_rcq, idx=%d, page_cnt=%d\n",
/* alloc the chain */
{
return LM_STATUS_RESOURCE;
}
return LM_STATUS_SUCCESS;
} /* lm_alloc_rcq */
/**
* @description
* Allocte TPA chain
* @param pdev
* @param cid -chain index.
* @param page_cnt - Number of BD pages
* @param desc_cnt - Number of descriptor counts
* @param bds_per_page - Number of BDs per page.
*
* @return lm_status_t
*/
{
/* check arguments */
{
return LM_STATUS_FAILURE;
}
/************ Alocate BD chain********************************/
bd_chain->bd_chain_virt = mm_alloc_phys_mem( pdev, mem_size, &bd_chain->bd_chain_phy, 0, mm_cli_idx);
{
return LM_STATUS_RESOURCE;
}
// The number of SGE bd entries
/************ Alocate active descriptor array********************************/
mem_size *= sizeof(lm_packet_t *);
{
return LM_STATUS_RESOURCE;
}
/************ Alocate mask_array descriptor array********************************/
{
return LM_STATUS_RESOURCE;
}
/************ Alocate TPA ramrod data********************************/
mem_size = sizeof(struct tpa_update_ramrod_data);
tpa_chain->ramrod_data_virt = mm_alloc_phys_mem(pdev, mem_size, &tpa_chain->ramrod_data_phys, 0, mm_cli_idx);
{
return LM_STATUS_RESOURCE ;
}
return LM_STATUS_SUCCESS;
} /* lm_alloc_tpa */
{
{
}
{
}
{
}
{
}
{
}
else
{
}
return resource;
}
{
/* check arguments */
{
return LM_STATUS_FAILURE;
}
lm_bd_chain_setup(pdev, bd_chain, bd_chain->bd_chain_virt, bd_chain->bd_chain_phy, bd_chain->page_cnt, sizeof(struct eth_tx_bd), /* is full? */0, TRUE);
cid,
{
/* iro_dhc_offste not initialized on purpose --> not expected for FWD channel */
}
{
/* iro_dhc_offste not initialized on purpose --> not expected for FWD channel */
}
{
/* iro_dhc_offste not initialized on purpose --> not expected for FWD channel */
}
{
DbgBreakMsg("OOO doesn't have a txq");
return LM_STATUS_FAILURE;
}
else
{
const u8_t byte_counter_id = CHIP_IS_E1x(pdev)? LM_FW_SB_ID(pdev, sb_id) : LM_FW_DHC_QZONE_ID(pdev, sb_id);
// Assign the TX chain consumer pointer to the consumer index in the status block. TBD: rename HC_INDEX_C_ETH_TX_CQ_CONS as its inappropriate
{
return LM_STATUS_FAILURE ;
}
// This isn't realy cid it is the chain index
sb_indexes[tx_sb_index_number] = 0;
{
LM_TXQ(pdev, cid).hc_sb_info.iro_dhc_offset = CSTORM_BYTE_COUNTER_OFFSET(byte_counter_id, tx_sb_index_number);
}
else
{
}
}
return LM_STATUS_SUCCESS;
} /* lm_setup_txq */
{
static u8_t const eth_rx_size_arr[LM_RXQ_CHAIN_IDX_MAX] = {sizeof(struct eth_rx_bd), sizeof(struct eth_rx_sge)};
const u8_t byte_counter_id = CHIP_IS_E1x(pdev)? LM_FW_SB_ID(pdev, sb_id) : LM_FW_DHC_QZONE_ID(pdev, sb_id);
/* check arguments */
{
return LM_STATUS_FAILURE;
}
rx_chain_idx_max = LM_RXQ_IS_CHAIN_SGE_VALID( pdev, cid ) ? LM_RXQ_CHAIN_IDX_SGE : LM_RXQ_CHAIN_IDX_BD;
{
lm_bd_chain_setup(pdev, bd_chain, bd_chain->bd_chain_virt, bd_chain->bd_chain_phy,bd_chain->page_cnt, eth_rx_size_arr[rx_chain_idx_cur], /* is full? */0, TRUE);
}
/* We initilize the hc_sb_info here for completeness. The fw updates are actually done by rcq-chain, but the dynamic-host-coalescing based on rx-chain */
{
rxq_chain->hc_sb_info.iro_dhc_offset = CSTORM_BYTE_COUNTER_OFFSET(byte_counter_id, HC_INDEX_ETH_RX_CQ_CONS);
}
else
{
rxq_chain->hc_sb_info.iro_dhc_offset = sizeof(struct cstorm_queue_zone_data) * LM_FW_DHC_QZONE_ID(pdev, sb_id)
+ sizeof(u32_t) * HC_INDEX_ETH_RX_CQ_CONS;
DbgMessage(pdev, WARN, "Dhc offset is 0x%x for VF Q Zone %d\n",rxq_chain->hc_sb_info.iro_dhc_offset,LM_FW_DHC_QZONE_ID(pdev, sb_id));
}
return LM_STATUS_SUCCESS;
} /* lm_setup_rxq */
{
/* check arguments */
{
return LM_STATUS_FAILURE;
}
if (CHIP_IS_E1x(pdev))
{
}
else
{
{
}
}
//if(pdev->params.l2_rx_desc_cnt[0]) /* if removed. was not required */
lm_bd_chain_setup(pdev, bd_chain, bd_chain->bd_chain_virt, bd_chain->bd_chain_phy,bd_chain->page_cnt, sizeof(union eth_rx_cqe), /* is full? */0, TRUE);
//number of Bds left in the RCQ must be at least the same with its corresponding Rx chain.
DbgBreakIf(lm_bd_chain_avail_bds(&rxq_chain->chain_arr[LM_RXQ_CHAIN_IDX_BD]) <= lm_bd_chain_avail_bds(&rcq_chain->bd_chain));
{
DbgBreakIf( !lm_bd_chains_are_consistent( &rxq_chain->chain_arr[LM_RXQ_CHAIN_IDX_BD], &rxq_chain->chain_arr[LM_RXQ_CHAIN_IDX_SGE]) );
}
// Assign the RCQ chain consumer pointer to the consumer index in the status block.
{
if (CHIP_IS_E2E3(pdev)) {
}
}
{
if (CHIP_IS_E2E3(pdev)) {
}
}
{
// Any SB that isn't RSS share the same SB.
// basically we will want the ISCSI OOO to work on the same SB that ISCSI works.(This does happen see the line above)
// Even if we want to count on ISCSI and make sure we will work on the same SB:
// 1.There is no promise on the order the ISCSI nminiport will call
// ISCSI_KWQE_OPCODE_INIT1 (lm_sc_init inits pdev->iscsi_info.l5_eq_base_chain_idx) or
// 2.OOO is general code that doesn't depend on a protocol (ISCSI).
//TODO_OOO Ask Michal regarding E2 if we need LM_FW_SB_ID
if (CHIP_IS_E2E3(pdev)) {
}
}
else /* NDIS */
{
const u8_t byte_counter_id = CHIP_IS_E1x(pdev)? LM_FW_SB_ID(pdev, sb_id) : LM_FW_DHC_QZONE_ID(pdev, sb_id);
}
{
return LM_STATUS_FAILURE ;
}
{
rcq_chain->hc_sb_info.iro_dhc_offset = CSTORM_BYTE_COUNTER_OFFSET(byte_counter_id, HC_INDEX_ETH_RX_CQ_CONS);
}
else
{
}
}
return LM_STATUS_SUCCESS;
} /* lm_setup_rcq */
)
{
{
return LM_STATUS_INVALID_PARAMETER;
}
{
DbgBreakMsg(" invalid chain ");
return LM_STATUS_INVALID_PARAMETER;
}
{
if (lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
}
{
if (lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
if (lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
}
{
if (lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
}
return LM_STATUS_SUCCESS;
}
/*
* reset txq, rxq, rcq counters for L2 client connection
*
* assumption: the cid equals the chain idx
*/
{
{
return LM_STATUS_INVALID_PARAMETER;
}
if (MM_DCB_MP_L2_IS_ENABLE(pdev))
{
}
else
{
}
if (cid >= max_eth_cid)
{
return LM_STATUS_INVALID_PARAMETER;
}
/* for this connection, next time we'll load it */
// Regardless the attributes we "clean' the TX status block
{
{
DbgBreakMsg(" Invalid TX chain index ");
return LM_STATUS_INVALID_PARAMETER;
}
/* first set the hw consumer index pointers to null, and only then clear the pkt_idx value
* to avoid a race when servicing interrupt at the same time */
lm_clear_chain_sb_cons_idx(pdev, sb_id, &LM_TXQ(pdev, cid).hc_sb_info, &LM_TXQ(pdev, cid).hw_con_idx_ptr);
}
{
{
DbgBreakMsg(" Invalid RX chain index ");
return LM_STATUS_INVALID_PARAMETER;
}
lm_clear_chain_sb_cons_idx(pdev, sb_id, &LM_RCQ(pdev, cid).hc_sb_info, &LM_RCQ(pdev, cid).hw_con_idx_ptr);
}
//s_list_init(&LM_RXQ(pdev, cid).active_descq, NULL, NULL, 0);
//s_list_init(&LM_RXQ(pdev, cid).free_descq, NULL, NULL, 0);
return LM_STATUS_SUCCESS;
}
)
{
{
return LM_STATUS_INVALID_PARAMETER;
}
{
DbgBreakMsg(" invalid chain ");
return LM_STATUS_INVALID_PARAMETER;
}
{
cid,
if (lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
}
{
cid,
if (lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
cid,
if (lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
}
{
(!(POWER_OF_2(bds_per_page))))
{
DbgBreakMsg(" Illegal TPA params");
return LM_STATUS_FAILURE;
}
sizeof(l2_tpa_bd_page_cnt) * BITS_PER_BYTE);
cid,
if (lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
}
return LM_STATUS_SUCCESS;
}
{
CHK_NULL(cli_params) ||
{
return LM_STATUS_INVALID_PARAMETER;
}
mm_memcpy(&pdev->params.l2_cli_con_params[chain_idx], cli_params, sizeof(struct _lm_client_con_params_t));
{
// update rxq_chain strucutre
}
return LM_STATUS_SUCCESS;
}
{
(LM_CLI_IDX_MAX <= lm_cli_idx))
{
DbgBreakMsg(" lm_init_client_con lm_cli_idx has an invalid value");
return LM_STATUS_INVALID_PARAMETER;
}
if (b_alloc)
{
if (lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
{
/* On allocation, init the clients objects... do this only on allocation, on setup, we'll need
* the info to reconfigure... */
if (!CHIP_IS_E1(pdev))
{
}
if (!CHIP_IS_E1x(pdev))
{
}
}
}
return lm_status;
}
{
0,
{
return LM_STATUS_RESOURCE ;
}
return LM_STATUS_SUCCESS;
}
{
/* check arguments */
{
return LM_STATUS_FAILURE;
}
/* alloc the chain */
{
return LM_STATUS_RESOURCE;
}
return LM_STATUS_SUCCESS;
}
{
u8_t i = 0;
{
//Init data
client_init_data_virt = mm_alloc_phys_mem(pdev, mem_size_init, &pdev->client_info[i].client_init_data_phys, 0, LM_RESOURCE_COMMON);
{
return LM_STATUS_RESOURCE ;
}
//update data
client_update_data_virt = mm_alloc_phys_mem(pdev, mem_size_update, &pdev->client_info[i].update.data_phys, 0, LM_RESOURCE_COMMON);
{
return LM_STATUS_RESOURCE ;
}
}
return LM_STATUS_SUCCESS;
}
{
u8_t i = 0;
{
//Init
{
return LM_STATUS_FAILURE ;
}
//update
{
return LM_STATUS_FAILURE ;
}
}
return LM_STATUS_SUCCESS;
}
/**
* @description
* The next page entrys are static and wont be used by active
* descriptor array and mask array.
* @param pdev
* @param chain_idx
*
* @return STATIC void
*/
{
u16_t i = 0;
u16_t j = 0;
{
/* clear page-end entries */
for(j = 0; j < lm_bd_chain_bds_skip_eop(bd_chain); j++ )
{
bd_entry++;
}
}
}
/**
* @description
* Clear TPA parameters. TPA can be disabled between NDIS bind
* unbind but the RX cahin will stay used.
* @param pdev
* @param cid
*/
{
/* check arguments */
{
return LM_STATUS_FAILURE;
}
/***************** SGE chain setup *************************************/
return LM_STATUS_SUCCESS;
}
/**
* @description
*
* @param pdev
* @param cid
*
* @return lm_status_t
*/
{
u16_t i = 0;
/* check arguments */
{
return LM_STATUS_FAILURE;
}
/***************** TPA chain setup ************************************/
{
}
/***************** SGE common setup ************************************/
/***************** SGE chain setup *************************************/
lm_bd_chain_setup(pdev, bd_chain, bd_chain->bd_chain_virt, bd_chain->bd_chain_phy,bd_chain->page_cnt, LM_TPA_BD_ELEN_SIZE, /* is full? */0, TRUE);
{
}
/***************** Mask entry prepare *************************************/
{
}
cid);
return LM_STATUS_SUCCESS;
} /* lm_setup_tpa */
{
/* The spq dont have next bd */
pdev->sq_info.sq_chain.bd_left = USABLE_BDS_PER_PAGE(sizeof(struct slow_path_element), TRUE); /* prod == cons means empty chain */
return LM_STATUS_SUCCESS;
}
{
/* check arguments */
{
return LM_STATUS_FAILURE;
}
lm_bd_chain_setup(pdev, bd_chain, bd_chain->bd_chain_virt, bd_chain->bd_chain_phy, bd_chain->page_cnt, sizeof(union event_ring_elem), /* is full? */TRUE, TRUE);
return LM_STATUS_SUCCESS;
}
{
ecore_init_mac_credit_pool(pdev, &pdev->slowpath_info.macs_pool, FUNC_ID(pdev), CHIP_IS_E1x(pdev)? VNICS_PER_PORT(pdev) : VNICS_PER_PATH(pdev));
ecore_init_vlan_credit_pool(pdev, &pdev->slowpath_info.vlans_pool, FUNC_ID(pdev), CHIP_IS_E1x(pdev)? VNICS_PER_PORT(pdev) : VNICS_PER_PATH(pdev));
{
}
return LM_STATUS_SUCCESS;
}
/**
* Description:
* allocate slowpath resources
*/
static lm_status_t
{
u8_t i = 0;
{
if (b_alloc)
{
slowpath_data->mac_rdata[i] =
sizeof(*slowpath_data->mac_rdata[i]),
&slowpath_data->mac_rdata_phys[i],
0,
slowpath_data->rx_mode_rdata[i] =
sizeof(*slowpath_data->rx_mode_rdata[i]),
0,
slowpath_data->mcast_rdata[i] =
sizeof(*slowpath_data->mcast_rdata[i]),
0,
}
{
return LM_STATUS_RESOURCE ;
}
}
if (b_alloc)
{
slowpath_data->rss_rdata = mm_alloc_phys_mem(pdev, sizeof(*slowpath_data->rss_rdata), &slowpath_data->rss_rdata_phys, 0, LM_RESOURCE_COMMON);
}
{
return LM_STATUS_RESOURCE ;
}
if (b_alloc)
{
slowpath_data->func_start_data = mm_alloc_phys_mem(pdev, sizeof(*slowpath_data->func_start_data), &slowpath_data->func_start_data_phys, 0, LM_RESOURCE_COMMON);
}
{
return LM_STATUS_RESOURCE ;
}
if (b_alloc)
{
slowpath_data->niv_function_update_data = mm_alloc_phys_mem(pdev, sizeof(*slowpath_data->niv_function_update_data), &slowpath_data->niv_function_update_data_phys, 0, LM_RESOURCE_COMMON);
}
{
return LM_STATUS_RESOURCE ;
}
mm_mem_zero(slowpath_data->niv_function_update_data, sizeof(*slowpath_data->niv_function_update_data));
if (b_alloc)
{
slowpath_data->l2mp_func_update_data = mm_alloc_phys_mem(pdev, sizeof(*slowpath_data->l2mp_func_update_data), &slowpath_data->l2mp_func_update_data_phys, 0, LM_RESOURCE_COMMON);
}
{
return LM_STATUS_RESOURCE ;
}
if (b_alloc)
{
slowpath_data->encap_function_update_data = mm_alloc_phys_mem(pdev, sizeof(*slowpath_data->encap_function_update_data), &slowpath_data->encap_function_update_data_phys, 0, LM_RESOURCE_COMMON);
}
{
return LM_STATUS_RESOURCE ;
}
mm_mem_zero(slowpath_data->encap_function_update_data, sizeof(*slowpath_data->encap_function_update_data));
if (b_alloc)
{
slowpath_data->ufp_function_update_data = mm_alloc_phys_mem(pdev, sizeof(*slowpath_data->ufp_function_update_data), &slowpath_data->ufp_function_update_data_phys, 0, LM_RESOURCE_COMMON);
}
{
return LM_STATUS_RESOURCE ;
}
mm_mem_zero(slowpath_data->ufp_function_update_data, sizeof(*slowpath_data->ufp_function_update_data));
return LM_STATUS_SUCCESS ;
}
{
if (!CHIP_IS_E1(pdev))
{
0,
cli_idx);
}
else
{
0,
cli_idx);
}
return ilt_client_page_virt_address;
}
/* Description:
*/
{
// lm_sq_info_t* sq_info = NULL ;
u32_t i = 0 ;
{
return LM_STATUS_INVALID_PARAMETER ;
}
DbgMessage(pdev, INFORMi , "### lm_common_setup_alloc_resc b_is_alloc=%s\n", b_is_alloc ? "TRUE" : "FALSE" );
// Status blocks allocation. We allocate mem both for the default and non-default status blocks
// there is 1 def sb and 16 non-def sb per port.
// non-default sb: index 0-15, default sb: index 16.
if (CHIP_IS_E1x(pdev))
{
}
else
{
}
{
if( b_is_alloc )
{
vars->status_blocks_arr[sb_id].host_hc_status_block.e1x_sb = mm_alloc_phys_mem(pdev, mem_size, &sb_phy_address, 0, mm_cli_idx);
if (CHIP_IS_E1x(pdev))
{
vars->status_blocks_arr[sb_id].hc_status_block_data.e1x_sb_data.common.host_sb_addr.lo = sb_phy_address.as_u32.low;
vars->status_blocks_arr[sb_id].hc_status_block_data.e1x_sb_data.common.host_sb_addr.hi = sb_phy_address.as_u32.high;
}
else
{
vars->status_blocks_arr[sb_id].hc_status_block_data.e2_sb_data.common.host_sb_addr.lo = sb_phy_address.as_u32.low;
vars->status_blocks_arr[sb_id].hc_status_block_data.e2_sb_data.common.host_sb_addr.hi = sb_phy_address.as_u32.high;
}
}
{
return LM_STATUS_RESOURCE ;
}
}
if( b_is_alloc )
{
0,
}
{
return LM_STATUS_RESOURCE ;
}
/* Now reset the status-block ack values back to zero. */
/* Register common and ethernet connection types completion callback. */
/* SlowPath Info */
if (lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
/* Client Info */
if( b_is_alloc )
{
if (lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
}
if (lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
// Context (roundup ( MAX_CONN / CONN_PER_PAGE) We may configure the CDU to have more than max_func_connections, specifically, we will
// configure the CDU to have max_port_connections since it is a per-port register and not per-func, but it is OK to allocate
// less for the cdu, and allocate only what will be used in practice - which is what is configured in max_func_connectinos.
alloc_num = vars->context_cdu_num_pages = (params->max_func_connections / params->num_context_in_page) +
//TODO: optimize the roundup
//TODO: assert that we did not go over the limit
// allocate buffer pointers
if( b_is_alloc )
{
}
{
return LM_STATUS_RESOURCE ;
}
else if ( b_is_alloc )
{
}
if( b_is_alloc )
{
}
{
return LM_STATUS_RESOURCE ;
}
else if ( b_is_alloc )
{
}
/* TBD: for starters, we'll just allocate each page seperatly, to save space in the future, we may want */
for( i = 0 ;i < alloc_num; i++)
{
if( b_is_alloc )
{
}
{
return LM_STATUS_RESOURCE ;
}
}
// Searcher T1 (roundup to log2 of 64*MAX_CONN), T2 is 1/4 of T1. The searcher has a 'per-function' register we configure
// with the number of max connections, therefore, we use the max_func_connections. It can be different per function and independent
alloc_num = vars->searcher_t1_num_pages = max((alloc_size / params->ilt_client_page_size),(u32_t)1);
if( b_is_alloc )
{
}
{
return LM_STATUS_RESOURCE ;
}
else if ( b_is_alloc )
{
}
if( b_is_alloc )
{
}
{
return LM_STATUS_RESOURCE ;
}
else if ( b_is_alloc )
{
}
for( i = 0 ; i < alloc_num; i++ )
{
if( b_is_alloc )
{
}
{
return LM_STATUS_RESOURCE ;
}
}
// allocate searcher T2 table
// T2 does not entered into the ILT)
if ( b_is_alloc )
{
}
{
return LM_STATUS_RESOURCE ;
}
else if (b_is_alloc)
{
}
if (b_is_alloc)
{
}
{
return LM_STATUS_RESOURCE ;
}
for( i = 0 ; i < alloc_num; i++)
{
if (b_is_alloc )
{
}
{
return LM_STATUS_RESOURCE ;
}
}
// Timer block array (MAX_CONN*8) phys uncached. Timer block has a per-port register that defines it's size, and the amount of
// memory we allocate MUST match this number, therefore we have to allocate the amount of max_port_connections.
if( b_is_alloc )
{
}
{
return LM_STATUS_RESOURCE ;
}
else if ( b_is_alloc )
{
}
if ( b_is_alloc )
{
}
{
return LM_STATUS_RESOURCE ;
}
else if ( b_is_alloc )
{
}
for( i = 0 ;i < alloc_num; i++)
{
if( b_is_alloc )
{
}
{
return LM_STATUS_RESOURCE ;
}
}
// QM queues (128*MAX_CONN) QM has a per-port register that defines it's size, and the amount of
// memory we allocate MUST match this number, therefore we have to allocate the amount of max_port_connections.
if( b_is_alloc )
{
}
{
return LM_STATUS_RESOURCE ;
}
else if (b_is_alloc)
{
}
if( b_is_alloc )
{
}
{
return LM_STATUS_RESOURCE ;
}
else if (b_is_alloc)
{
}
for( i=0 ;i < alloc_num; i++)
{
if (b_is_alloc)
{
}
{
return LM_STATUS_RESOURCE ;
}
}
// common scratchpad buffer for dmae copies of less than 4 bytes
if( b_is_alloc )
{
8,
0,
{
return LM_STATUS_RESOURCE ;
}
}
return LM_STATUS_SUCCESS ;
}
{
pdev->ecore_info.gunzip_buf = mm_alloc_phys_mem(pdev, FW_BUF_SIZE, &pdev->ecore_info.gunzip_phys, PHYS_MEM_TYPE_NONCACHED, LM_RESOURCE_COMMON);
{
return LM_STATUS_RESOURCE ;
}
return LM_STATUS_SUCCESS;
}
/**lm_dmae_resc_alloc
* Allocate and initialize the TOE and default DMAE contexts.
* The statistics DMAE context is set-up in lm_stats_alloc_resc.
*
* @param pdev the device to use.
*
* @return lm_status_t LM_STATUS_SUCCESS on success, some other
* failure code on failure.
*/
{
//allocate and initialize the default DMAE context (used for init, WB access etc...)
if( LM_STATUS_SUCCESS != lm_status )
{
return lm_status ;
}
{
return LM_STATUS_FAILURE;
}
//allocate and initialize the TOE DMAE context
if( LM_STATUS_SUCCESS != lm_status )
{
return lm_status ;
}
TRUE);
{
return LM_STATUS_FAILURE;
}
return lm_status;
}
/* Description:
* This routine is called during driver initialization. It is responsible
* for allocating memory resources needed by the driver for common init.
* This routine calls the following mm routines:
* mm_alloc_mem, mm_alloc_phys_mem, and mm_init_packet_desc. */
{
{
return LM_STATUS_INVALID_PARAMETER ;
}
#ifdef VF_INVOLVED
if (LM_STATUS_SUCCESS != lm_status)
return lm_status;
}
#endif
// Cleaning after driver unload
if(LM_STATUS_SUCCESS != lm_status)
{
return lm_status;
}
/* alloc forward chain */
{
/* Allocate Event-Queue: only the pf has an event queue */
if(LM_STATUS_SUCCESS != lm_status)
{
return lm_status;
}
if(LM_STATUS_SUCCESS != lm_status)
{
return lm_status;
}
}
{
}
#ifdef VF_INVOLVED
else
{
}
#endif
if(LM_STATUS_SUCCESS != lm_status)
{
return lm_status;
}
if( LM_STATUS_SUCCESS != lm_status )
{
return lm_status ;
}
if( LM_STATUS_SUCCESS != lm_status )
{
return lm_status ;
}
// Init context allocation system
if( LM_STATUS_SUCCESS != lm_status )
{
return lm_status ;
}
// CAM mirror?
/* alloc for ecore */
if( LM_STATUS_SUCCESS != lm_status )
{
return lm_status ;
}
}
else if (IS_CHANNEL_VFDEV(pdev))
{
// Init context allocation system
if( LM_STATUS_SUCCESS != lm_status )
{
return lm_status ;
}
if( LM_STATUS_SUCCESS != lm_status )
{
return lm_status ;
}
}
/* FIXME: (MichalS : should be called by um, but this requires lm-um api, so should rethink...) */
if( LM_STATUS_SUCCESS != lm_status )
{
return lm_status ;
}
return lm_setup_resc(pdev);
}
/* Description:
* This routine is called during driver initialization. It is responsible
* for initilazing memory resources needed by the driver for common init.
* This routine calls the following mm routines:
* mm_alloc_mem, mm_alloc_phys_mem, and mm_init_packet_desc. */
{
u32_t i = 0 ;
u32_t j = 0 ;
{
return LM_STATUS_INVALID_PARAMETER ;
}
mm_mem_zero(rx_info->appr_mc.mcast_add_hash_bit_array, sizeof(rx_info->appr_mc.mcast_add_hash_bit_array));
// adjust the FWD Tx ring consumer - default sb
if(LM_STATUS_SUCCESS != lm_status)
{
return lm_status;
}
}
/* setup mac flitering to drop all for all clients */
// lm_status = lm_setup_tstorm_mac_filter(pdev); FIXME - necessary??
if(LM_STATUS_SUCCESS != lm_status)
{
return lm_status;
}
}
}
#ifdef VF_INVOLVED
else {
}
#endif
if(LM_STATUS_SUCCESS != lm_status)
{
return lm_status;
}
if(LM_STATUS_SUCCESS != lm_status)
{
return lm_status;
}
/* Only pfdev has an event-queue */
{
if(LM_STATUS_SUCCESS != lm_status)
{
return lm_status;
}
}
// Initialize T1
for( i = 0 ; i < vars->searcher_t1_num_pages ; i ++)
{
}
// Initialize T2 first we make each next filed point to its address +1 then we fixup the edges
for(i=0 ; i < vars->searcher_t2_num_pages ; i ++)
{
{
*(u64_t*)((char*)vars->searcher_t2_virt_addr_table[i]+j+56) = vars->searcher_t2_phys_addr_table[i].as_u64+j+64; //64bit pointer
}
// now fix up the last line in the block to point to the next block
{
// this is not the last block
*(u64_t*)((char*)vars->searcher_t2_virt_addr_table[i]+j) = vars->searcher_t2_phys_addr_table[i+1].as_u64; //64bit pointer
}
}
for( i=0 ;i < vars->timers_linear_num_pages; i++)
{
}
#if defined(EMULATION_DOORBELL_FULL_WORKAROUND)
#endif
if(lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
// init_context
if(lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
}
else if (IS_CHANNEL_VFDEV(pdev))
{
if(lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
}
return lm_status;
}
/**
* @description
* Indicate packets from the free descriptor list and the given list
* @param pdev
* @param packet_list - A list of packets to indicate.
* @param idx - Chain index.
* @param is_stat_handle - Is updating statistic is needed.
*/
STATIC void
{
for(; ;)
{
// Run on all the free list
{
break;
}
if(is_stat_handle)
{
}
#endif
}
if (!s_list_is_empty(packet_list))
{
#endif
}
}
/*******************************************************************************
* Description:
*
* Return:
******************************************************************************/
const lm_abort_op_t abort_op,
{
u16_t i = 0;
switch(abort_op)
{
case ABORT_OP_RX_CHAIN:
{
// Verify BD's consistent
/* indicate packets from the active descriptor list */
for(; ;)
{
{
break;
}
if( rx_chain_sge )
{
}
// if in shutdown flow or not if in d3 flow ?
if (abort_op == ABORT_OP_INDICATE_RX_CHAIN)
{
#endif
}
else
{
}
}
if ( ABORT_OP_INDICATE_RX_CHAIN == abort_op )
{
/* indicate packets from the free descriptor list */
idx,
TRUE);
}
} // ABORT_OP_INDICATE_RX_CHAIN
// Fall Through
case ABORT_OP_TPA_CHAIN:
{
/* indicate packets from the active descriptor list */
{
// Run on all the valid active descriptor
// Valid active descriptors can only be beteen the consumer to the producers
{
{
DbgBreakMsg(" Packet is null suppose to be null");
continue;
}
// if in shutdown flow or not if in d3 flow ?
if ((abort_op == ABORT_OP_INDICATE_TPA_CHAIN) ||
{
#if (DBG)
/************start TPA debbug code******************************/
/************end TPA debbug code********************************/
#endif //DBG
#endif
}
else
{
}
}
}
if ((abort_op == ABORT_OP_INDICATE_TPA_CHAIN) ||
{
#if (DBG)
/************start TPA debbug code******************************/
// Total packet aborted
tpa_chain->dbg_params.pck_ret_abort += s_list_entry_cnt(&packet_list) + s_list_entry_cnt(&rx_common->free_descq);
{
DbgBreakMsg("VBD didn't return all packets this chain ");
}
/************end TPA debbug code******************************/
#endif //DBG
/* indicate packets from the free descriptor list */
idx,
FALSE);
#if (DBG)
/************start TPA debbug code******************************/
// make sure all packets were abort
{
DbgBreakMsg("VBD didn't return all packets this chain ");
}
/************end TPA debbug code******************************/
#endif //DBG
}
break;
} // ABORT_OP_INDICATE_TPA_CHAIN
{
for(; ;)
{
{
break;
}
/* return coalesce buffer to the chain's pool */
}
}
if (!s_list_is_empty(&packet_list))
{
}
// changed from pdev->params.l2_tx_bd_page_cnt[idx] to pdev->params.l2_tx_bd_page_cnt[0]
break;
} // ABORT_OP_INDICATE_TX_CHAIN
default:
{
DbgBreakMsg("unknown abort operation.\n");
break;
}
} //switch
} /* lm_abort */
#include "57710_int_offsets.h"
#include "57711_int_offsets.h"
#include "57712_int_offsets.h"
{
switch(chip_num)
{
case CHIP_NUM_5710:
break;
case CHIP_NUM_5711:
case CHIP_NUM_5711E:
break;
case CHIP_NUM_5712:
case CHIP_NUM_5713:
case CHIP_NUM_5712E:
case CHIP_NUM_5713E:
case CHIP_NUM_57800:
case CHIP_NUM_57810:
case CHIP_NUM_57840_4_10:
case CHIP_NUM_57840_2_20:
case CHIP_NUM_57840_OBSOLETE:
case CHIP_NUM_57811:
break;
default:
return -1; // for now not supported, can't have all three...
}
return 0;
}