lm_vf.c revision d14abf155341d55053c76eeec58b787a456b753b
#ifdef VF_INVOLVED
#include "lm5710.h"
#include "command.h"
#include "igu_def.h"
{
res = (PFDEV(pdev)->vars.connections[VF_TO_PF_CID(pdev,LM_SW_LEADING_RSS_CID(pdev))].con_state != LM_CON_STATE_CLOSE);
if (res) {
}
return res;
}
{
free_sb = lm_vf_get_free_resource(pf_dev->pf_resources.free_sbs, pf_dev->params.base_fw_ndsb, max_num, num_rss);
if (free_sb != 0xff) {
DbgMessage3(pf_dev,FATAL,"lm_vf_get_free_sbs(%d-%d): %d\n",pf_dev->params.base_fw_ndsb, max_num, free_sb);
} else {
DbgMessage2(pf_dev,FATAL,"lm_vf_get_free_sbs(%d-%d): No more free SBs\n",pf_dev->params.base_fw_ndsb, max_num);
}
return free_sb;
}
{
free_cli = lm_vf_get_free_resource(pf_dev->pf_resources.free_clients, pf_dev->params.base_fw_client_id, max_num, num_rss);
if (free_cli != 0xff) {
DbgMessage3(pf_dev,FATAL,"lm_vf_get_free_clients(%d-%d): %d\n",pf_dev->params.base_fw_client_id, max_num, free_cli);
} else {
DbgMessage2(pf_dev,FATAL,"lm_vf_get_free_clients(%d-%d): No more free clients\n",pf_dev->params.base_fw_client_id, max_num);
}
return free_cli;
}
{
u8_t min_num = pf_dev->params.vnics_per_port + VNIC_ID(pf_dev) * ((MAX_NUM_OF_STATS - pf_dev->params.vnics_per_port) / pf_dev->params.vnics_per_port);
u8_t max_num = min_num + (MAX_NUM_OF_STATS - pf_dev->params.vnics_per_port) / pf_dev->params.vnics_per_port;
if (free_st_id != 0xff) {
} else {
DbgMessage3(pf_dev,FATAL,"lm_vf_get_free_stats: No more free stats counters(%d,%d)\n",min_num,max_num);
DbgMessage1(pf_dev,FATAL,"lm_vf_get_free_stats: vnic_per_port is %d)\n",pf_dev->params.vnics_per_port);
}
return free_st_id;
}
{
if (free_cam_offset != 0xff) {
} else {
}
return free_cam_offset;
}
{
/* TODO: anything else to prepare for VF? */
return LM_STATUS_SUCCESS;
}
{
DbgMessage2(pdev, FATAL, "A: 0x%x, S: 0x%x\n", bar_addr->as_u32.low, pdev->hw_info.bar_size[bar_num]);
return LM_STATUS_SUCCESS;
}
{
if (!(val & ME_REG_VF_VALID)) {
return LM_STATUS_FAILURE;
}
return LM_STATUS_FAILURE;
}
pdev->params.vf_num_in_pf = pdev->params.vf_num_in_path - PFDEV(pdev)->hw_info.sriov_info.first_vf_in_pf;
DbgMessage2(pdev, FATAL, "vf_num_in_path=%d vf_num_in_pf=%d\n", pdev->params.vf_num_in_path, pdev->params.vf_num_in_pf);
return LM_STATUS_SUCCESS;
}
{
// TODO
return LM_STATUS_SUCCESS;
}
{
/* TODO: what HW needs to be initialized at this stage */
/* TODO: VF Database for FLR needs? */
#ifndef _VBD_CMD_
#endif
if (lm_status == LM_STATUS_SUCCESS) {
lm_vf_acquire_resource(pf_dev->pf_resources.free_sbs, LM_FW_SB_ID(pf_dev, RSS_ID_TO_SB_ID(rss_id)), 1);
DbgMessage2(pf_dev, FATAL, "SB%d is allocated for PF[%d] itself\n", LM_FW_SB_ID(pf_dev, RSS_ID_TO_SB_ID(rss_id)), FUNC_ID(pf_dev));
lm_vf_acquire_resource(pf_dev->pf_resources.free_clients, LM_FW_CLI_ID(pf_dev, RSS_ID_TO_CID(rss_id)), 1);
DbgMessage2(pf_dev, FATAL, "Client%d is allocated for PF[%d] itself\n", LM_FW_CLI_ID(pf_dev, RSS_ID_TO_CID(rss_id)), FUNC_ID(pf_dev));
}
DbgMessage2(pf_dev, FATAL, "Stats%d is allocated for PF[%d] itself\n", LM_STATS_CNT_ID(pf_dev), FUNC_ID(pf_dev));
}
return lm_status;
}
{
/* TODO: Clean VF Database for FLR needs? */
DbgMessage2(pf_dev, FATAL, "vf disable: clear VFs connections from %d till %d\n",start_cid, end_cid);
}
#ifndef _VBD_CMD_
#endif
if (lm_is_function_after_flr(pf_dev)) {
return LM_STATUS_SUCCESS;
}
/* if MCP does not exist for each vf in pf, need to pretend to it and disable igu vf_msix and internal vfid enable bit */
}
/* This is a clear-on-write register, therefore we actually write 1 to the bit we want to reset */
//REG_WR(pf_dev, PGLUE_B_REG_DISABLE_FLR_SRIOV_DISABLED, PGLUE_B_DISABLE_FLR_SRIOV_DISABLED_REG_DISABLE_SRIOV_DISABLED_REQUEST);*/
}
return lm_status;
}
/* Description:
*/
{
u32_t alloc_size = 0 ;
u8_t mm_cli_idx = 0 ;
{
return LM_STATUS_INVALID_PARAMETER ;
}
DbgMessage1(pdev, FATAL , "### VF lm_common_setup_alloc_resc b_is_alloc=%s\n", b_is_alloc ? "TRUE" : "FALSE" );
// Status blocks allocation. We allocate mem both for the default and non-default status blocks
// there is 1 def sb and 16 non-def sb per port.
// non-default sb: index 0-15, default sb: index 16.
{
if( b_is_alloc )
{
pdev->vars.status_blocks_arr[sb_id].host_hc_status_block.e2_sb = mm_alloc_phys_mem(pdev, mem_size, &sb_phy_address, 0, mm_cli_idx);
pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e2_sb_data.common.host_sb_addr.lo = sb_phy_address.as_u32.low;
pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e2_sb_data.common.host_sb_addr.hi = sb_phy_address.as_u32.high;
}
{
return LM_STATUS_RESOURCE ;
}
}
/* SlowPath Info */
if (lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
if (b_is_alloc)
{
if (lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
}
if (lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
return LM_STATUS_SUCCESS;
}
{
if (lm_status == LM_STATUS_SUCCESS) {
(volatile void *)((u8_t*)pdev->vars.mapped_bar_addr[BAR_0] + index*LM_DQ_CID_SIZE + VF_BAR0_DB_OFFSET);
}
}
return lm_status;
}
//static vf_info_t tmp_vf_info;
{
DbgMessage2(pdev,FATAL,"lm_vf_allocate_resc_in_pf: VF %d requests resources from PF %d\n",ABS_VFID(pdev),FUNC_ID(pdev));
} else {
lm_vf_acquire_resource(PFDEV(pdev)->pf_resources.free_sbs, pdev->params.base_fw_ndsb, pdev->params.sb_cnt);
lm_vf_acquire_resource(PFDEV(pdev)->pf_resources.free_clients, pdev->params.base_fw_client_id, pdev->params.sb_cnt);
lm_vf_acquire_resource(PFDEV(pdev)->pf_resources.free_cam_offsets, pdev->params.base_cam_offset, 1);
}
/* For now, qzone_id == sb_id, but this is not a requirement */
}
pdev->params.base_fw_client_id, pdev->params.base_fw_ndsb, pdev->params.base_cam_offset, pdev->params.base_fw_stats_id);
return lm_status;
}
{
u8_t i;
// Init SPQ
/* Driver should zero the slow path queue data before enabling the function in XSTORM.
Until now firmware was doing this but it cannot scale for VFs, so this zeroing was removed from firmware.
The driver should write zeros to XSTORM_SPQ_DATA_OFFSET(function).
The size of this structure is given in XSTORM_SPQ_DATA_SIZE.
For VFs, the XSTORM_VF_SPQ_DATA_OFFSET(vfid) should be used. To do it via GRC is preferrable */
for (i = 0; i < XSTORM_SPQ_DATA_SIZE/sizeof(u32_t); i++) {
REG_WR(PFDEV(pdev),XSEM_REG_FAST_MEMORY + XSTORM_VF_SPQ_DATA_OFFSET(ABS_VFID(pdev)) + i*sizeof(u32_t),0);
}
REG_WR(PFDEV(pdev),XSEM_REG_FAST_MEMORY + (XSTORM_VF_SPQ_PAGE_BASE_OFFSET(ABS_VFID(pdev))),pdev->sq_info.sq_chain.bd_chain_phy.as_u32.low);
REG_WR(PFDEV(pdev),XSEM_REG_FAST_MEMORY + (XSTORM_VF_SPQ_PAGE_BASE_OFFSET(ABS_VFID(pdev)) + 4),pdev->sq_info.sq_chain.bd_chain_phy.as_u32.high);
REG_WR(PFDEV(pdev),XSEM_REG_FAST_MEMORY + (XSTORM_VF_SPQ_PROD_OFFSET(ABS_VFID(pdev))),pdev->sq_info.sq_chain.prod_idx);
if(LM_STATUS_SUCCESS != lm_status)
{
return lm_status;
}
/*
Enable the function in STORMs
*/
LM_INTMEM_WRITE8(PFDEV(pdev), XSTORM_VF_TO_PF_OFFSET(function_fw_id), FUNC_ID(pdev), BAR_XSTRORM_INTMEM);
LM_INTMEM_WRITE8(PFDEV(pdev), CSTORM_VF_TO_PF_OFFSET(function_fw_id), FUNC_ID(pdev), BAR_CSTRORM_INTMEM);
LM_INTMEM_WRITE8(PFDEV(pdev), TSTORM_VF_TO_PF_OFFSET(function_fw_id), FUNC_ID(pdev), BAR_TSTRORM_INTMEM);
LM_INTMEM_WRITE8(PFDEV(pdev), USTORM_VF_TO_PF_OFFSET(function_fw_id), FUNC_ID(pdev), BAR_USTRORM_INTMEM);
return LM_STATUS_SUCCESS;
}
{
if (lm_reset_is_inprogress(pdev)) {
if (!lm_vf_fl_reset_is_inprogress(pdev)) {
PFDEV(pdev)->vars.connections[VF_TO_PF_CID(pdev,LM_SW_LEADING_RSS_CID(pdev))].con_state = LM_CON_STATE_CLOSE;
DbgMessage1(pdev,FATAL,"lm_vf_chip_reset: recycle resources (including connection) for VF(%d)\n",ABS_VFID(pdev));
}
return lm_status;
}
/*
Disable the function in STORMs
*/
return lm_status;
}
{
lm_vf_release_resource(PFDEV(pdev)->pf_resources.free_sbs, pdev->params.base_fw_ndsb, pdev->params.sb_cnt);
lm_vf_release_resource(PFDEV(pdev)->pf_resources.free_clients, pdev->params.base_fw_client_id, pdev->params.sb_cnt);
lm_vf_release_resource(PFDEV(pdev)->pf_resources.free_cam_offsets, pdev->params.base_cam_offset, 1);
}
return lm_status;
}
{
* Need to use Pretend in order to do this. Note: once we do pretend
* all accesses to SPLIT-68 will be done as if-vf...
* Bits. Bits [13:10] - Reserved. Bits [9:4] - VFID. Bits [3] - VF valid. Bits [2:0] - PFID.
*/
if (lm_status == LM_STATUS_SUCCESS) {
switch (was_err_num) {
case 0:
break;
case 1:
break;
case 2:
break;
case 3:
break;
default:
was_err_reg = 0;
DbgBreak();
}
if (was_err_reg) {
REG_WR(PFDEV(pdev), was_err_reg, was_err_value); /* PglueB - Clear the was_error indication of the relevant function*/
}
/* IGU Initializations */
}
} else {
}
return lm_status;
}
{
u8_t i;
/* Need to use pretend for VF */
}
/* set Parent PF */
num_segs = (INTR_BLK_MODE(pdev) == INTR_BLK_MODE_BC)? IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
prod_idx = (IGU_BASE_NDSB(pdev) + sb_id)*num_segs; /* bc-assumption consecutive pfs, norm-no assumption */
for (i = 0; i < num_segs;i++) {
}
/* Give Consumer updates with value '0' */
}
return status;
}
{
/* Need to use pretend for VF */
return LM_STATUS_SUCCESS;
}
/* disable both bits, for INTA, MSI and MSI-X. */
RESET_FLAGS(val, (IGU_VF_CONF_MSI_MSIX_EN | IGU_VF_CONF_SINGLE_ISR_EN | IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_PARENT_MASK));
}
{
}
{
}
{
if (vf_flr_inprogess) {
}
return vf_flr_inprogess;
}
{
DbgBreak();
return 0;
}
{
DbgBreak();
return 0;
}
{
DbgBreak();
return 0;
}
lm_status_t lm_vf_pf_set_q_filters(struct _lm_device_t * pdev, u8 vf_qid, u8_t to_indicate, q_filter_type filter_type, u8_t * pbuf, u32_t buf_len, u16_t vlan_tag, u8_t set_mac)
{
DbgBreak();
return LM_STATUS_FAILURE;
}
lm_status_t lm_vf_pf_set_q_filters_list(struct _lm_device_t * pdev, u8 vf_qid, u8_t to_indicate, q_filter_type filter_type, d_list_t * pbuf, u16_t vlan_tag, u8_t set_mac)
{
DbgBreak();
return LM_STATUS_FAILURE;
}
{
DbgBreak();
return LM_STATUS_FAILURE;
}
{
DbgBreak();
return LM_STATUS_FAILURE;
}
{
DbgBreak();
return LM_STATUS_FAILURE;
}
#endif