lm_hw_init_reset.c revision d14abf155341d55053c76eeec58b787a456b753b
/*******************************************************************************
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*
* Copyright 2014 QLogic Corporation
* The contents of this file are subject to the terms of the
* QLogic End User License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the License at
* See the License for the specific language governing permissions
* and limitations under the License.
*
*
* Module Description:
* This file contains functions that handle chip init and reset
*
******************************************************************************/
#include "lm5710.h"
#include "command.h"
#include "bd_chain.h"
#include "ecore_init.h"
#include "ecore_init_ops.h"
// the phys address is shifted right 12 bits and has an added 1=valid bit added to the 53rd bit
// then since this is a wide register(TM) we split it into two 32 bit writes
do { \
if (CHIP_IS_E1(pdev)) { \
REG_WR(pdev,(PORT_ID(pdev) ? PXP2_REG_PSWRQ_##blk##1_L2P: PXP2_REG_PSWRQ_##blk##0_L2P),((last)<<10 | (first))); \
} else { \
} \
} while(0)
/* offset valid
e1,e1h,e2,e3 save / restore */
typedef enum {
LM_RESET_NIG_OP_SAVE = 0,
typedef struct _lm_nig_save_restore_data_t
{
struct {
} reg_valid; /* 1 if valid for chip 0 o/`w */
{
}
{
}
{
}
/**
* @Description
* This function checks if there is optionally a attention
* pending that is recoverable. If it is, then we won't
* assert in the locations that call reset_is_inprogress,
* because there's a high probability we'll overcome the
* error with recovery
* @param pdev
*
* @return u8_t
*/
{
{
return FALSE;
}
}
{
return reset_in_progress;
}
/*
*------------------------------------------------------------------------
* FLR in progress handling -
*-------------------------------------------------------------------------
*/
{
{
}
else
{
}
return;
}
{
return;
}
{
}
{
if (is_after_flr)
{
{
}
else
{
}
}
return is_after_flr;
}
{
u16_t pretend_value = 0;
u32_t cleanup_complete = 0;
u32_t pcie_caps_offset = 0;
#endif
struct sdm_op_gen final_cleanup;
// TODO - use here pdev->vars.clk_factor
if (CHIP_REV_IS_EMUL(pdev))
{
}
else if (CHIP_REV_IS_FPGA(pdev))
{
}
else
{
factor = 1;
}
{
/* Re-enable target PF read access */
/*Poll on CFC per-pf usage-counter until its 0*/
pdev->flr_stats.cfc_usage_counter = REG_WAIT_VERIFY_VAL(pdev, CFC_REG_NUM_LCIDS_INSIDE_PF, 0, wait_ms);
DbgMessage(pdev, FATAL, "%d*%dms waiting for zeroed CFC per pf usage counter\n",pdev->flr_stats.cfc_usage_counter,DEFAULT_WAIT_INTERVAL_MICSEC);
//return LM_STATUS_FAILURE;
/* Poll on DQ per-pf usage-counter (until full dq-cleanup is implemented) until its 0*/
DbgMessage(pdev, FATAL, "%d*%dms waiting for zeroed DQ per pf usage counter\n", pdev->flr_stats.dq_usage_counter, DEFAULT_WAIT_INTERVAL_MICSEC);
/* Poll on QM per-pf usage-counter until its 0*/
pdev->flr_stats.qm_usage_counter = REG_WAIT_VERIFY_VAL(pdev, QM_REG_PF_USG_CNT_0 + 4*FUNC_ID(pdev),0, wait_ms);
DbgMessage(pdev, FATAL, "%d*%dms waiting for zeroed QM per pf usage counter\n", pdev->flr_stats.qm_usage_counter, DEFAULT_WAIT_INTERVAL_MICSEC);
/* Poll on TM per-pf-usage-counter until its 0 */
pdev->flr_stats.tm_vnic_usage_counter = REG_WAIT_VERIFY_VAL(pdev, TM_REG_LIN0_VNIC_UC + 4*PORT_ID(pdev),0, wait_ms);
pdev->flr_stats.tm_num_scans_usage_counter = REG_WAIT_VERIFY_VAL(pdev, TM_REG_LIN0_NUM_SCANS + 4*PORT_ID(pdev),0, wait_ms);
pdev->flr_stats.dmae_cx = REG_WAIT_VERIFY_VAL(pdev, lm_dmae_idx_to_go_cmd(DMAE_WB_ACCESS_FUNCTION_CMD(FUNC_ID(pdev))), 0, wait_ms);
pdev->flr_stats.tm_num_scans_usage_counter, DEFAULT_WAIT_INTERVAL_MICSEC, DMAE_WB_ACCESS_FUNCTION_CMD(FUNC_ID(pdev)));
}
else
{
/*
VF FLR only part
a. Wait until there are no pending ramrods for this VFid in the PF DB. - No pending VF's pending ramrod. It's based on "FLR not during driver load/unload".
What about set MAC?
b. Send the new "L2 connection terminate" ramrod for each L2 CID that was used by the VF,
including sending the doorbell with the "terminate" flag. - Will be implemented in FW later
c. Send CFC delete ramrod on all L2 connections of that VF (set the CDU-validation field to "invalid"). - part of FW cleanup. VF_TO_PF_CID must initialized in
PF CID array*/
/* 3. Poll on the DQ per-function usage-counter until it's 0. */
if (lm_status == LM_STATUS_SUCCESS)
{
pdev->flr_stats.dq_usage_counter = REG_WAIT_VERIFY_VAL(PFDEV(pdev), DORQ_REG_VF_USAGE_CNT, 0, wait_ms);
DbgMessage(pdev, FATAL, "%d*%dms waiting for DQ per vf usage counter\n", pdev->flr_stats.dq_usage_counter, DEFAULT_WAIT_INTERVAL_MICSEC);
}
else
{
return lm_status;
}
}
/* 4. Activate the FW cleanup process by activating AggInt in the FW with GRC. Set the bit of the relevant function in the AggInt bitmask,
to indicate to the FW which function is being cleaned. Wait for the per-function completion indication in the Cstorm RAM
*/
cleanup_complete = 0xFFFFFFFF;
LM_INTMEM_READ32(PFDEV(pdev),CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(function_for_clean_up),&cleanup_complete, BAR_CSTRORM_INTMEM);
if (cleanup_complete)
{
DbgBreak();
}
final_cleanup.command = (XSTORM_AGG_INT_FINAL_CLEANUP_INDEX << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM;
final_cleanup.command |= (XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE;
final_cleanup.command |= (function_for_clean_up << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX;
pdev->flr_stats.final_cleanup_complete = REG_WAIT_VERIFY_VAL(PFDEV(pdev), BAR_CSTRORM_INTMEM + CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(function_for_clean_up), 1, wait_ms);
DbgMessage(pdev, FATAL, "%d*%dms waiting for final cleanup compete\n", pdev->flr_stats.final_cleanup_complete, DEFAULT_WAIT_INTERVAL_MICSEC);
/* Lets cleanup for next FLR final-cleanup... */
LM_INTMEM_WRITE32(PFDEV(pdev),CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(function_for_clean_up),0, BAR_CSTRORM_INTMEM);
/* 5. ATC cleanup. This process will include the following steps (note that ATC will not be available for phase2 of the
integration and the following should be added only in phase3):
a. Optionally, wait 2 ms. This is not a must. The driver can start polling (next steps) immediately,
but take into account that it may take time till the done indications will be set.
b. Wait until INVALIDATION_DONE[function] = 1
c. Write-clear INVALIDATION_DONE[function] */
/* 6. Verify PBF cleanup. Do the following for all PBF queues (queues 0,1,4, that will be indicated below with N):
a. Make sure PBF command-queue is flushed: Read pN_tq_occupancy. Let's say that the value is X.
This number indicates the number of occupied transmission-queue lines.
Poll on pN_tq_occupancy and pN_tq_lines_freed_cnt until one of the following:
i. pN_tq_occupancy is 0 (queue is empty). OR
ii. pN_tq_lines_freed_cnt equals has advanced (cyclically) by X (all lines that were in the queue were processed). */
{
switch (idx)
{
case 0:
pbf_reg_pN_tq_lines_freed_cnt = (CHIP_IS_E3B0(pdev)) ? PBF_REG_TQ_LINES_FREED_CNT_Q0 : PBF_REG_P0_TQ_LINES_FREED_CNT;
break;
case 1:
pbf_reg_pN_tq_lines_freed_cnt = (CHIP_IS_E3B0(pdev)) ? PBF_REG_TQ_LINES_FREED_CNT_Q1 : PBF_REG_P1_TQ_LINES_FREED_CNT;
break;
case 2:
pbf_reg_pN_tq_occupancy = (CHIP_IS_E3B0(pdev)) ? PBF_REG_TQ_OCCUPANCY_LB_Q : PBF_REG_P4_TQ_OCCUPANCY;
pbf_reg_pN_tq_lines_freed_cnt = (CHIP_IS_E3B0(pdev)) ? PBF_REG_TQ_LINES_FREED_CNT_LB_Q : PBF_REG_P4_TQ_LINES_FREED_CNT;
break;
}
DbgMessage(pdev, FATAL, "TQ_LINES_FREED_CNT[%d]: s:%x\n", (idx == 2) ? 4 : idx, tq_freed_cnt_start);
{
{
}
else
{
DbgBreak();
break;
}
}
}
/* b. Make sure PBF transmission buffer is flushed: read pN_init_crd once and keep it in variable Y.
Read pN_credit and keep it in X. Poll on pN_credit and pN_internal_crd_freed until one of the following:
i. (Y - pN_credit) is 0 (transmission buffer is empty). OR
ii. pN_internal_crd_freed_cnt has advanced (cyclically) by Y-X (all transmission buffer lines that were occupied were freed).*/
{
u32_t pbf_reg_pN_init_crd = 0;
u32_t pbf_reg_pN_credit = 0;
switch (idx)
{
case 0:
pbf_reg_pN_internal_crd_freed = (CHIP_IS_E3B0(pdev)) ? PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 : PBF_REG_P0_INTERNAL_CRD_FREED_CNT;
break;
case 1:
pbf_reg_pN_internal_crd_freed = (CHIP_IS_E3B0(pdev)) ? PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 : PBF_REG_P1_INTERNAL_CRD_FREED_CNT;
break;
case 2:
pbf_reg_pN_internal_crd_freed = (CHIP_IS_E3B0(pdev)) ? PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q : PBF_REG_P4_INTERNAL_CRD_FREED_CNT;
break;
}
inernal_freed_crd_last = inernal_freed_crd_start = REG_RD(PFDEV(pdev), pbf_reg_pN_internal_crd_freed);
DbgMessage(pdev, FATAL, "INTERNAL_CRD_FREED[%d]: s:%x\n", (idx == 2) ? 4 : idx, inernal_freed_crd_start);
while ((credit_last != init_crd)
{
{
}
else
{
DbgMessage(pdev, FATAL, "INTERNAL_CRD_FREED[%d]: c:%x\n", (idx == 2) ? 4 : idx, inernal_freed_crd_last);
DbgBreak();
break;
}
}
}
/* 7. Wait for 100ms in order to make sure that the chip is clean, including all PCI related paths
(in Emulation the driver can wait for 10ms*EmulationFactor, i.e.: 20s). This is especially required if FW doesn't implement
the flows in Optional Operations (future enhancements).) */
/* 8. Verify that the transaction-pending bit of each of the function in the Device Status Register in the PCIe is cleared. */
{
{
DbgBreak();
}
}
#else
DbgBreak();
#endif
/* 9. Initialize the function as usual this should include also re-enabling the function in all the HW blocks and Storms that
were disabled by the MCP and cleaning relevant per-function information in the chip (internal RAM related information, IGU memory etc.).
a. In case of VF, PF resources that were allocated for previous VF can be re-used by the new VF. If there are resources
that are not needed by the new VF then they should be cleared.
b. Note that as long as slow-path prod/cons update to Xstorm is not atomic, they must be cleared by the driver before setting
the function to "enable" in the Xstorm.
c. Don't forget to enable the VF in the PXP or the DMA operation for PF in the PXP. */
{
}
{
#ifdef VF_INVOLVED
//lm_vf_enable_vf(pdev);
#endif
}
return lm_status;
}
{
}
{
/* TODO : Implement... */
DbgBreakMsg("ECORE_GUNZIP NOT IMPLEMENTED\n");
return FALSE;
}
{
}
{
}
//The bug is that the RBC doesn't get out of reset after we reset the RBC.
{
#if defined(_VBD_CMD_) //This function is not needed in vbd_cmd env.
return;
#endif
if (CHIP_IS_E1x(pdev))
{
//a.Wait 60 microseconds only for verifying the ~64 cycles have passed.
if(0 == (val & MISC_REGISTERS_RESET_REG_1_RST_RBCP))
{
//If bit 28 is '0' - This means RBCP block is in reset.(one out of reset)
// Take RBC out of reset.
}
}
}
{
}
{
}
{
}
* includeing or excluding the nig (b_with_nig)
*/
{
// save values of registers
if( b_with_nig )
{
// Ugly patch - we need to prevent nig reset - to be fixed SOON (TODO T7.2?)
// save values + write zeros
{
}
// wait 200 msec before we reset the nig so all packets will pass thorugh
// 200000 and not 50*4000 since we want this wait to be "only" 200000ms
// when we used 50*4000 method, the actual sleep time was much higher (more than 16 seconds...!)
// this caused hw lock timeout (16sec) in lm_reset_device_if_undi_active() funciton.
do
{
val = 0;
// first 200000ms we always wait...
// check values of FTQ and verify they are all one
// if not wait 200000ms up to 5 times...(1 second)
{
}
// Debug break only if MCP is detected (NVM is not empty)
if (lm_is_mcp_detected(pdev))
{
}
}
/* reset device */
if (CHIP_IS_E3(pdev))
{
// New blocks that need to be taken out of reset
// Mstat0 - bit 24 of RESET_REG_2
// Mstat1 - bit 25 of RESET_REG_2
}
if( b_with_nig )
{
/* take the NIG out of reset */
// restore....
{
}
}
// rbc_reset_workaround() should be called AFTER nig is out of reset
// otherwise the probability that nig will be accessed by bootcode while
// it is in reset is very high (this will cause GRC_TIMEOUT)
// TODO - we still need to deal with CQ45947 (calling rbc_reset_workaround before nig is out of reset will
// cause the grc_timeout to happen
DbgMessage(pdev, WARN, "lm_reset_path:%sreset rbcp wait [begin]\n", b_with_nig ? " (with NIG) ": " ");
DbgMessage(pdev, WARN, "lm_reset_path:%sreset rbcp wait [end]\n", b_with_nig ? " (with NIG) ": " ");
}
/*
* quote from bnx2x:
*
* "previous driver DMAE transaction may have occurred when pre-boot stage ended
* and boot began, or when kdump kernel was loaded. Either case would invalidate
* the addresses of the transaction, resulting in was-error bit set in the pci
* causing all hw-to-host pcie transactions to timeout. If this happened we want
* to clear the interrupt which detected this from the pglueb and the was done
* bit"
*/
{
if ( CHIP_IS_E1x(pdev) )
{
// case accessed, so we do nothing in case chip is earlier than E2 (CQ63388, CQ63302).
return;
}
{
DbgMessage(pdev, WARNi, "lm_reset_prev_interrupted_dmae: was error bit was found to be set in pglueb upon startup. Clearing");
}
}
// return TRUE if function is hidden
const u8_t port_factor,
const lm_chip_port_mode_t port_mode )
{
u8_t func_config_id = 0;
#define E2_4P_PF_NUM(path, port, pf) (((pf) << 2) | ((port) << 1) | (path)) /* pf: 0..1 ==> pf_num: 0..7 */
#define E2_PF_NUM(path, port, pf) ((port_mode == LM_CHIP_PORT_MODE_4) ? E2_4P_PF_NUM(path, port, pf) : E2_2P_PF_NUM(path, port, pf))
if( CHIP_IS_E1_PARAM(chip_id) )
{
DbgBreakMsg("We should not reach this line\n");
return b_hidden;
}
if( CHIP_IS_E1x_PARAM(chip_id) )
{
}
else
{
}
if( mf_config & FUNC_MF_CFG_FUNC_HIDE )
{
}
return b_hidden;
}
{
lm_loader_response resp = 0;
u32_t rst_dorq_val = 0;
u8_t last_valid_vnic = 0;
static const lm_loader_opcode opcode_arr[] = {LM_LOADER_OPCODE_LOAD, LM_LOADER_OPCODE_UNLOAD_WOL_DIS} ;
u8_t port_factor = 0;
u8_t vnics_per_port = 0;
/*
* Clear possible previously interrupted DMAE which may have left PCI inaccessible.
*/
/*
* Check if device is active and was previously initialized by
* UNDI driver. UNDI driver initializes CID offset for normal bell
* to 0x7.
*/
{
// dorq is out of reset
{
}
if( UNDI_ACTIVE_INDICATION_VAL == val )
{
}
else
{
// We call here with FALSE since there might be a race (only here)
// that lm_hw_clear_all_locks() will clear the lock altough it is acquired
// and than we get ASSERT in checked builds.
// so this FALSE here is only to prevent ASSERT on checked builds when ER enabled (CQ60944).
// undi is not active, nothing to do.
return;
}
}
else
{
// lock is already taken by other func we have nothing to do though this is NOT acceptable we get here...
return;
}
{
/* TBD: E1H - when MCP is not present, determine if possible to get here */
DbgBreakMsg("lm_reset_device_if_undi_active: reading from shmem when MCP is not present\n");
}
switch( port_mode )
{
case LM_CHIP_PORT_MODE_NONE: // E1.0/E1.5: we enter this if() one time - for one of the functions, and and mailbox func numbers are 0 and 1
case LM_CHIP_PORT_MODE_4: // E2
vnics_per_port = (LM_CHIP_PORT_MODE_4 == port_mode )? 2 : pdev->params.vnics_per_port; // for 4-port it is always 2. for others its upon param
break;
case LM_CHIP_PORT_MODE_2:
port_max = 1; // E2: we enter this if() maximum twice - once for each path, and mailbox func number is 0 for both times
port_factor = 2;
break;
default:
DbgBreakMsg("we should not reach this line!");
break;
}
// We do here two opcode iterations, each one of them for all ports...
// 1. first iteration(s) will "tell" the mcp that all ports are loaded (MCP accepts LOAD requests for ports that are already loaded.)
// This way we cann assure that driver is the "owner" of the hardware (includes NIG)
// So we can reset the nig.
//
// 2. second iteration(s) will "tell" the mcp that all ports are unloaded so we can "come clean" for regular driver load flow
{
{
// Check what is the last valid vnic (non hidden one)
{
if( CHIP_IS_E1(pdev) )
{
// we don't have func_mf_config in E1. To prevent invalid access to shmem - break.
last_valid_vnic = 0;
break;
}
port,
vnic,
port_mode );
if( !b_hidden )
{
// this is the reason we make this loop twice (here and below)
}
}
{
// NOTE: it seems that these two line are redundant after we have the new FUNC_MAILBOX_ID macro
// keep it for now
if( !CHIP_IS_E1(pdev) )
{
port,
vnic,
port_mode );
if( b_hidden )
{
continue;
}
}
// get fw_wr_seq for the func
if( LM_LOADER_RESPONSE_UNLOAD_COMMON == resp )
{
}
{
// INTR_BLK_TYPE is not valid since we don't have this information at this phase yet.
if ( CHIP_IS_E1x(pdev) )
{
if( b_first_non_hidden_iter ) // This might be redundent but since before BCV change this code was running once per port we keep it as it is
{
}
}
if( b_first_non_hidden_iter ) // per port no need to run more than once
{
// mask AEU signal
}
if( last_valid_vnic == vnic )
{
// TODO: Reset take into account mstat - dealed better in main branch where reset chip issue is tidier,
// leaving this for integrate...
// save nig swap register before NIG reset
// reset the chip with nig
// restore nig swap register
}// nig reset
}
} // vnic loop
} // port loop
} // opcode loop
// We expect that last reposne will be LM_LOADER_RESPONSE_UNLOAD_COMMON
if( LM_LOADER_RESPONSE_UNLOAD_COMMON != resp )
{
}
// restore original function number
// after the unlock the chip/path is in reset for sure, then second port won't see 7 in the DORQ_REG_NORM_CID_OFST
} // lm_reset_device_if_undi_active
/**lm_disable_function_in_nig
* Configure the NIG LLH so that packets targeting the given PF
* are marked as "classification failed".
* This function must be called before sending the FUNCTION_STOP
* ramrod.
*
* @param pdev the PF to disable.
*
* @return lm_status_t LM_STATUS_SUCCESS on success, some other
* failure value on failure.
*/
{
u32_t nig_entry_idx = 0;
const u32_t nig_mem_enable_base_offset = (PORT_ID(pdev) ? NIG_REG_LLH1_FUNC_MEM_ENABLE : NIG_REG_LLH0_FUNC_MEM_ENABLE);
const u32_t nig_mem2_enable_base_offset = (PORT_ID(pdev) ? NIG_REG_P1_LLH_FUNC_MEM2_ENABLE : NIG_REG_P0_LLH_FUNC_MEM2_ENABLE);
if (!IS_MULTI_VNIC(pdev))
{
return LM_STATUS_SUCCESS;
}
if (IS_MF_SD_MODE(pdev))
{
/* for SD mode, clear NIG_REG_LLH1_FUNC_EN */
lm_set_func_en(pdev, FALSE); /* if function should be enabled it will be set when wol is configured */
}
{
/*for NPAR/NPAR-SD mode, clear every NIG LLH entry by clearing NIG_REG_LLH1_FUNC_MEM_ENABLE for every entry in both
NIG mem1 and mem2.*/
{
}
{
}
}
else
{
DbgBreakMsg("Invalid MF mode.");
}
return lm_status;
}
/**
* This function sends the function-stop ramrod and waits
* synchroniously for its completion
*
* @param pdev
*
* @return lm_status_t SUCCESS / TIMEOUT on waiting for
* completion
*/
{
0,
0 );
if (lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
return lm_status;
} /* lm_function_stop */
{
#ifdef VF_INVOLVED
{
return lm_status;
}
#endif
{
return LM_STATUS_SUCCESS;
}
{
}
{
}
{
DbgBreak();
}
/* Function stop has been sent, we should now block slowpath commands */
return lm_status;
}
/* This function clears the pf enable bit in the pglue-b and cfc, to make sure that if any requests
* are made on this function they will be dropped before they can cause any fatal errors. */
{
//REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0);
}
{
{
return;
}
/* clean ILT table
* disabled function are not going to access the table anymore:
* - TM: already disabled in "reset function part"
* - SRC: In order to make sure SRC request is not initiated:
* - in MF mode, we clean the ILT table in the per func phase, after LLH was already disabled
* - in SF mode, we clean the ILT table in the per port phase, after port link was already reset */
for (k=0;k<ILT_NUM_PAGE_ENTRIES_PER_FUNC;temp++,k++)
{
}
/* Timers workaround bug for E2 phase3: if this is vnic-3, we need to set the entire ilt range for this timers. */
{
}
else
{
}
}
/**
* Function takes care of resetting everything related to the
* function stage
*
* @param pdev
* @param cleanup - this indicates whether we are in the last
* "Reset" function to be called, if so we need
* to do some cleanups here, otherwise they'll be
* done in later stages
*
* @return lm_status_t
*/
{
/*It assumed that all protocols are down all unload ramrod already completed*/
{
// disconnect from NIG attention
{
}
else
{
}
}
/* Configure IGU */
{
}
/* Timer stop scan.*/
{
if (!val)
{
break;
}
// in case reset in progress
// we won't get completion so no need to wait
{
break;
}
}
/*timeout*/
/* shutdown bug - in case of shutdown it's quite possible that the timer blocks hangs the scan never ends */
if (!lm_reset_is_inprogress(pdev))
{
}
// reset the fw statistics (so next time client is up data will be correct)
// if we don't call it here - we'll see in statistics 4GB+real
/* Timers workaround bug: before cleaning the ilt we need to disable the pf-enable bit in the pglc + cfc */
if (cleanup)
{ /* pdev->params.multi_vnics_mode, function that gets response "port/common" does this in the lm_reset_port_part */
if (!CHIP_IS_E1x(pdev))
{
}
}
/* Disable the function and status blocks in the STORMs unless under FLR (don't want to intefere
* with FW flow) */
if (!lm_reset_is_inprogress(pdev))
{
{
}
}
return LM_STATUS_SUCCESS;
}
{
/*It assumed that all protocols are down all unload ramrod already completed*/
/* TODO Configure ACPI pattern if required. */
/* TODO Close the NIG port (also include congestion management toward XCM).*/
// disable attention from nig
// Do not rcv packets to BRB
// Do not direct rcv packets that are not for MCP to the brb
// If DCBX is enabled we always want to go back to ETS disabled.
// NIG is not reset
if(IS_DCB_ENABLED(pdev))
{
}
// reset external phy to cause link partner to see link down
/* Configure AEU.*/
/* shutdown bug - in case of shutdown don't bother with clearing the BRB or the ILT */
if (!lm_reset_is_inprogress(pdev))
{
/* Wait a timeout (100msec).*/
/* Check for BRB port occupancy. If BRB is not empty driver starts the ChipErrorRecovery routine.*/
/* brb1 not empty */
if (val)
{
return LM_STATUS_TIMEOUT;
}
if (!CHIP_IS_E1x(pdev))
{
}
/* link is closed and BRB is empty, can safely delete SRC ILT table: */
}
return LM_STATUS_SUCCESS;
}
/**
* @Description
* This function checks whether a certain data entry
* (register in NIG) is valid for current phase and chip.
* @param pdev
* @param data: A register in the nig with data on when it is
* valid
*
* @return INLINE u8_t TRUE: if entry is valid FALSE o/w
*/
const lm_nig_save_restore_data_t * data,
{
{
return FALSE;
}
{
return FALSE;
}
if (CHIP_IS_E1(pdev))
{
}
else if (CHIP_IS_E1H(pdev))
{
}
else if (CHIP_IS_E2(pdev))
{
}
else
{
}
}
// This function should be called only if we are on MCP lock
// This function should be called only on E1.5 or on E2 (width of PXP2_REG_PGL_PRETEND_FUNC_xx reg is 16bit)
{
if (CHIP_IS_E1(pdev))
{
return LM_STATUS_FAILURE;
}
{
return LM_STATUS_INVALID_PARAMETER;
}
switch (ABS_FUNC_ID(pdev))
{
case 0:
break;
case 1:
break;
case 2:
break;
case 3:
break;
case 4:
break;
case 5:
break;
case 6:
break;
case 7:
break;
default:
break;
}
if( 0 == offset )
{
return LM_STATUS_INVALID_PARAMETER;
}
if(offset)
{
}
return LM_STATUS_SUCCESS;
}
/**
* @Description
* This function is called between saving the nig registers
* and restoring them. It's purpose is to do any special
* handling that requires knowing what the registers that
* were read are and before restoring them. It can change
* the values of other registers based on knowledge
* obtained by values of different registers.
*
* Current processing rules:
* NIG_REG_LLHX_FUNC_EN should be set to '1' if
* lm_get_func_en is valid. otherwise it
* will remain '0'. Only under sd mode.
*
* @param pdev
* @param reg_offsets_port
* @param reg_port_arr
* @param reg_port_arr_size
*/
{
/* Current processing only has to do with SD multi function mode. this if should be removed
* if the case changes... */
if (!IS_MF_SD_MODE(pdev))
{
return;
}
/* We loop on all the registers to make sure we access the correct offset: incase someone moves it. */
{
{
{
}
}
}
}
{
switch(save_or_restore)
{
case LM_RESET_NIG_OP_SAVE:
break;
case LM_RESET_NIG_OP_RESTORE:
break;
case LM_RESET_NIG_OP_PROCESS:
return; /* Return on purpose: processing is done in a separate function */
default:
break;
}
if( pretend_func_id != abs_func_id )
{
}
{
{
if( b_save )
{
}
else
{
}
}
}
{
if( b_save)
{
}
else
{
}
}
if( pretend_func_id != abs_func_id )
{
}
}
/*
1. save known essential NIG values (port swap, WOL nwuf for all funcs)
2. Pretend to relevant func - for split register as well
3. Resets the device and the NIG.
4. Restore known essential NIG values (port swap and WOL nwuf).
*/
void
{
u8_t abs_func_vector = 0;
static const u32_t offset_base_wb[PORT_MAX] = { NIG_REG_LLH0_ACPI_BE_MEM_DATA, NIG_REG_LLH1_ACPI_BE_MEM_DATA };
// List of registers that are split-4 (different addresses per port, but same per function)
/* List of registers that are "global" for all funcitons in path offset valid
e1,e1h,e2,e3 save / restore */
const lm_nig_save_restore_data_t non_split_offsets[] = { { NIG_REG_PORT_SWAP, {1, 1, 0, 1}, (LM_NIG_SAVE | LM_NIG_RESTORE) },
static u64_t reg_nig_port_restore_wb[MAX_FUNC_NUM][NIG_REG_LLH0_ACPI_BE_MEM_DATA_SIZE/2] = {{0}} ; // the nwuf data
// Note:
// Due to kernel stack limitation we use reg_nig_port_restore(_wb) as static variables.
// At first glance, it doesn't look good BUT avoiding multiple access to the values is assured:
// mcp locking mechanism LOAD_COMMON etc
// Currently we work with max 8 PF, in case of a change - need to verify code is still valid
// verify enum values
for( lm_reset_nig_op = LM_RESET_NIG_OP_SAVE; lm_reset_nig_op < LM_RESET_NIG_OP_MAX; lm_reset_nig_op++ )
{
{
// we skip non-marked functions
{
continue;
}
// choose the correct idx_port
// save for 1st iteariton
// restore for 2nd iteration
idx,
} // for func iterations
// This code section should be done once and anyway!
if ( LM_RESET_NIG_OP_SAVE == lm_reset_nig_op)
{
{
{
}
}
//reset chip with NIG!!
// save nig swap register and global acpi enable before NIG reset
{
{
}
}
} // save iteartion only code
} // lm_reset_device_with_nig
void
{
/* Reset the HW blocks that are listed in section 4.13.18.*/
{
/* In case of shutdown we reset the NIG as well */
}
else
{
}
/* According to E1/E1H/E2 Recovery flow spec, as long as MCP does not support process kill, "close the gates"
* should be disabled while no drivers are loaded. The last driver that unloads should disable "close the gates"
*/
}
{
lm_loader_opcode opcode = 0 ;
lm_loader_response resp = 0 ;
#ifdef VF_INVOLVED
{
if (lm_status != LM_STATUS_SUCCESS)
{
}
return;
}
#endif
// depends on reason, send relevant message to MCP
switch( reason )
{
case LM_REASON_WOL_SUSPEND:
break ;
case LM_REASON_NO_WOL_SUSPEND:
break ;
case LM_REASON_DRIVER_UNLOAD:
// in case we do support wol_cap, we ignore OS configuration and
// we decide upon nvm settings (CQ49516 - S5 WOL functionality to always look at NVRAM WOL Setting)
{
// enabled_wols so the mac address will be written by lm_set_d3_mpkt()
}
else
{
}
break;
default:
break;
}
if ( !CHIP_IS_E1(pdev) )
{
{
}
else
{
}
// We do expect that register value will be consistent with multi_vnics_mode.
if (!lm_fl_reset_is_inprogress(pdev))
{
}
}
{
{
}
return;
}
// magic packet should be programmed before unload request send to MCP
if (!IS_ASSIGNED_TO_VM_PFDEV(pdev))
{
}
// nwuf is programmed before chip reset since if we reset the NIG we resotre all function anyway
switch (resp)
{
break;
break;
//Check if there is dbus work
break;
default:
DbgBreakIfAll(1);
}
// unset pmf flag needed for D3 state
if (resp != LM_LOADER_RESPONSE_UNLOAD_DONE )
{
DbgBreakIfAll(1);
}
}
/**
* This function sends the "function-start" ramrod and waits
* synchroniously for it's completion. Called from the
* chip-start flow.
*
* @param pdev
*
* @return lm_status_t SUCCESS / TIMEOUT on waiting for
* completion
*/
{
{
return LM_STATUS_INVALID_PARAMETER;
}
{
}
else
{
}
/* NIV_TODO: func_start_data->vif_id = mm_cpu_to_le16(??) */
/* TODO: For Modifying Ether type of Outer VLAN to SVLAN:
To use, first set these registers to to SVLAN Ethertype (0x88a8)
PRS_REG_VLAN_TYPE_0
PBF_REG_VLAN_TYPE_0
NIG_REG_LLH_OUTER_VLAN_TYPE_1
*/
{
}
else
// Function start is sent when the first miniport clients binds. (Can be also FCOE or iSCSI)
// The requirement for NW multiple priority is only known to eVBD when the NDIS miniport binds.
{
// Multiple priority enabled (only from D3 flow)
}
else
{
}
// encapsulated packets offload is disabled by default
// in case of an error, restore last fw state.
{
func_start_data->tunn_clss_en = 0;
}
else
{
}
{
}
{
/* modify sd_vlan_force_pri_val through registry */
}
0,
if (lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
return lm_status;
} /* lm_function_start */
{
{
return LM_STATUS_SUCCESS; //lm_vf_chip_start(pdev);
}
if ( max_bw != 0 )
{
//we assume that if one of the BW registry parameters is not 0, then so is the other one.
DbgBreakIf(min_bw == 0);
if (LM_STATUS_SUCCESS != lm_status)
{
return lm_status;
}
}
/* Chip is initialized. We are now about to send first ramrod we can open slow-path-queue */
if ( LM_STATUS_SUCCESS != lm_status )
{
return lm_status;
}
// start timer scan after leading connection ramrod.
if ( LM_STATUS_SUCCESS != lm_status )
{
goto on_err ;
}
if( LM_STATUS_SUCCESS != lm_status )
{
}
return lm_status;
}
/*
*Function Name:lm_read_fw_stats_ptr
*
*Parameters:
*
*Description: read stats_ptr ( port and func) from shmem
*
*Assumption: stats scratch pad address from MCP can not change on run time (bc upgrade is not valid)
* in case bc upgraded - need to stop statistics and read addresses again
*Returns:
*
*/
void lm_setup_read_mgmt_stats_ptr( struct _lm_device_t *pdev, IN const u32_t mailbox_num, OUT u32_t* OPTIONAL fw_port_stats_ptr, OUT u32_t* OPTIONAL fw_func_stats_ptr )
{
{
// E2 TODO: move this to lm_main and get info at get_shmem_info...
#define NO_MCP_WA_FW_FUNC_STATS_PTR (0xAF900)
#define NO_MCP_WA_FW_PORT_STATS_PTR (0xAFA00)
if ( 0 != fw_func_stats_ptr)
{
}
if ( 0 != fw_port_stats_ptr)
{
}
return;
}
if ( NULL != fw_func_stats_ptr )
{
// read func_stats address
// Backward compatibility adjustments for Bootcode v4.0.8 and below
if( 0xf80a0000 == *fw_func_stats_ptr )
{
DbgMessage(pdev, FATAL , "lm_read_fw_stats_ptr: boot code earlier than v4.0.8 fw_mb=%p-->NULL\n", *fw_func_stats_ptr );
*fw_func_stats_ptr = 0;//NULL
}
DbgMessage(pdev, WARN , "lm_read_fw_stats_ptr: pdev->vars.fw_func_stats_ptr=%p\n", *fw_func_stats_ptr );
}
if ( NULL != fw_port_stats_ptr )
{
// read port_stats address
DbgMessage(pdev, WARN, "lm_read_fw_stats_ptr: pdev->vars.fw_port_stats_ptr=%p\n", *fw_port_stats_ptr );
}
}
/**lm_init_get_modes_bitmap
* Get the representation of the device's configuration as
* inittool init-modes flags.
*
* @param pdev the device to use
*
* @return u32_t a bitmap with the appropriate INIT_MODE_XXX
* flags set.
*/
static u32_t
{
if (CHIP_REV_IS_ASIC(pdev))
{
}
else if (CHIP_REV_IS_FPGA(pdev))
{
}
else if (CHIP_REV_IS_EMUL(pdev))
{
}
else
{
}
{
}
else if ((CHIP_PORT_MODE(pdev) == LM_CHIP_PORT_MODE_2)||(CHIP_PORT_MODE(pdev) == LM_CHIP_PORT_MODE_NONE))
{
}
else
{
}
if (CHIP_IS_E2(pdev))
{
}
else if (CHIP_IS_E3(pdev))
{
if (CHIP_REV_IS_ASIC(pdev))
{
}
else
{
}
if ((chip_rev == CHIP_REV_Ax))
{
}
else if (chip_rev == CHIP_REV_Bx)
{
/* Multiple cos mode is relevant to E3 B0 only... */
{
case LM_COS_MODE_COS3:
break;
case LM_COS_MODE_COS6:
break;
default:
DbgBreakMsg("Unknown Cos Mode");
}
}
else
{
}
}
else
{
}
{
{
case MULTI_FUNCTION_SD:
break;
case MULTI_FUNCTION_SI:
break;
case MULTI_FUNCTION_AFEX:
break;
default:
}
}
else
{
}
#if defined(LITTLE_ENDIAN)
#else
#endif
//validation
return flags;
}
/**lm_ncsi_get_shmem_address
* @brief get ncsi shmem address
* @param lm_device
*
* @return ncsi_oem_shmem address or 0 if doesn't exists
*/
static u32_t
{
u32_t shmem2_size = 0;
u32_t ncsi_oem_data_addr = 0;
if ( shmem2_size > offset )
{
}
return ncsi_oem_data_addr;
}
/**
* @brief: Writes product version to shmem (for NCSI)
*
* No endian conversion is needed if data type is u32. Although, MCP is big endian, basic storage unit is u32.
* Unless you access individual byte, writing a 32-bit word in shmem from host DOES NOT need any endian conversion.
* In other word, if host driver write 0x12345678 to a 4-byte location in shmem, MCP will read it correctly. eVBD doesn�t need to do mm_cpu_to_be32.
*
* @param[in] lm_device
*
* @return LM_STATUS_SUCCESS if written, other if not.
*/
static lm_status_t
{
if ( 0 == ncsi_oem_data_addr )
{
return LM_STATUS_FAILURE;
}
return LM_STATUS_SUCCESS;
}
{
u32_t ver_num_prev = 0;
u32_t i = 0;
u8_t num_dwords = 0;
/* inbox will only load with bootcode 7.4 and above, in which this field exists
* for sure. So if it's zero, we're not an inbox driver.
*/
if ( 0 == ncsi_oem_data_addr )
{
return FALSE;
}
/* First figure out if we're reading a string or a number, T7.0 and inbox used
* strings, whereas T7.2 and above use just the product ver as a u32_t. We do
* this by reading the unused fields
*/
if (0 == val)
{
/* Previous version is not inbox... we're ok... */
return FALSE;
}
/* Now read the version string -> as if we are inbox. This will read the values
* from the unused fields as well. */
for (i = 0; i < num_dwords; i++)
{
}
/* Now we just need to figure out if the engineering number is != 0,
* and version is more than 7.0.35.94 (inbox version) that'll mean we're inbox...
* the string looks like this: vXX.XX.XX.XX, X are digits.
*/
p = ver_str;
if (*p != 'v')
{
/* Not inbox... */
return FALSE;
}
p++; // we took away the v, now it looks like this: XX.XX.XX.XX
for (i = 0; i < 4; i++)
{
mult = 1;
while ((*p != '.') && /* Between separator */
(IS_DIGIT(*p)) && /* Is a digit */
(p < ver_str_end)) /* Doesn't overrun array */
{
p++;
}
p++;
}
/* Save for debugging */
(ver_num[0] << 24) |
ver_num[3] ;
/* Check inbox: 7.0.35.xx make sure xx != 0*/
{
return TRUE;
}
return FALSE;
}
/**
* @brief Writes FCoE capabilites to shmem (for NCSI)
* No endian conversion is needed if data type is u32. Although, MCP is big endian, basic storage unit is u32.
* Unless you access individual byte, writing a 32-bit word in shmem from host DOES NOT need any endian conversion.
* In other word, if host driver write 0x12345678 to a 4-byte location in shmem, MCP will read it correctly. eVBD doesn�t need to do mm_cpu_to_be32.
*
* @param lm_device
*
* @return LM_STATUS_SUCCESS if written, FAILED if not
*/
{
u8_t i = 0;
u32_t* buf32 = (u32_t*)(&pdev->vars.stats.stats_mirror.stats_drv.drv_info_to_shmem.fcoe_capabilities);
static const u8_t idx_max = sizeof(pdev->vars.stats.stats_mirror.stats_drv.drv_info_to_shmem.fcoe_capabilities)/sizeof(u32_t);
if ( 0 == ncsi_oem_data_addr )
{
return LM_STATUS_FAILURE;
}
if ( bc_rev < bc_rev_min )
{
// not supported before this bootcode.
return LM_STATUS_INVALID_PARAMETER;
}
// populate fcoe_features
// no endian conversion is needed if data type is u32. Although, MCP is big endian, basic storage unit is u32.
// Unless you access individual byte, writing a 32-bit word in shmem from host DOES NOT need any endian conversion.
// In other word, if host driver write 0x12345678 to a 4-byte location in shmem, MCP will read it correctly. eVBD doesn�t need to do mm_cpu_to_be32.
for (i = 0; i < idx_max; i++)
{
buf32[i]);
}
return LM_STATUS_SUCCESS;
}
{
/* Take Chip Blocks out of Reset */
if (CHIP_IS_E3(pdev))
{
// New blocks that need to be taken out of reset
// Mstat0 - bit 24 of RESET_REG_2
// Mstat1 - bit 25 of RESET_REG_2
}
// BMAC is not out of reset
{
// init multifunction_mode reg. For E3 - this is done in the port-phase, and can differ between ports...
{
}
// TBD: E1H, consider disabling grc timeout enable
}
/* Chip is out of reset */
/* Timers bug workaround. The chip has just been taken out of reset. We need to make sure that all the functions (except this one)
* are marked as disabled in the PGLC + CFC to avoid timer bug to occur */
if (!CHIP_IS_E1x(pdev))
{
/* 4-port mode or 2-port mode we need to turn of master-enable for everyone, after that, turn it back on for self.
* so, we disregard multi-function or not, and always disable for all functions on the given path, this means 0,2,4,6 for
* path 0 and 1,3,5,7 for path 1 */
{
{
continue;
}
}
/* Error recovery: we may have caused a BSOD during last error recovery attempt leaving some locks taken and attentions on,
* code below sort of "recovers" from a failed recovery.
*/
{
/* Clear the general attention used to notify second engine: just incase it was left turned on... */
}
}
}
{
{
return;
}
// init aeu_mask_attn_func_0/1:
// - SF mode: bits 3-7 are masked. only bits 0-2 are in use
// - MF mode: bit 3 is masked. bits 0-2 are in use as in SF.
// bits 4-7 are used for "per vnic group attention"
if(!CHIP_IS_E1(pdev))
{
// For DCBX we need to enable group 4 even in SF.
val |= 0x10;
}
// If SPIO5 is set to generate interrupts, enable it for this port
if (val & MISC_SPIO_SPIO5)
{
// fan failure handling
// add SPIO5 to group
}
{
/* Under error recovery we use general attention 20 (bit 18) therefore
* we need to enable it*/
}
}
{
{
return;
}
{
// enable hw interrupt from PXP on usdm overflow bit 16 on INT_MASK_0
}
}
{
u32_t i = 0;
{
return;
}
// static init
// runtime init
#ifdef __BIG_ENDIAN
#endif
// on E 1.5 fpga set number of max pcie tag number to 5
{
}
// verify PXP init finished (we want to use the DMAE)
/* Timers bug workaround E2 only. We need to set the entire ILT to have entries with value "0" and valid bit on.
* This needs to be done by the first PF that is loaded in a path (i.e. common phase)
*/
if (!CHIP_IS_E1x(pdev))
{
/* Step 1: set zeroes to all ilt page entries with valid bit on */
for (i=0; i < ILT_NUM_PAGE_ENTRIES; i++)
{
}
/* Step 2: set the timers first/last ilt entry to point to the entire range to prevent ILT range error */
{
}
/* set E2 HW for 64B cache line alignment */
/* TODO: align according to runtime cache line size */
}
}
{
}
{
if (!CHIP_IS_E1x(pdev))
{
}
}
{
#define PXP2_NUM_TABLES 4
for (k=0;k<PXP2_NUM_TABLES;k++)
{
// j is the first table entry line for this block temp is the number of the last written entry (each entry is 8 octets long)
j=temp;
{
}
first_ilt[k] = j;
}
if (!CHIP_IS_E1x(pdev))
{
/* Timers workaround bug: function init part. Need to wait 20msec after initializing ILT,
* needed to make sure there are no requests in one of the PXP internal queues with "old" ILT addresses */
}
}
{
{
return;
}
// write arbitrary buffer to DMAE, hw memory setup phase
}
{
u8_t i = 0;
{
return;
}
/* nullify PTRTBL */
for (i=0; i<64; i++)
{
}
/* nullify extended PTRTBL (E1H only) */
if (CHIP_IS_E1H(pdev))
{
for (i=0; i<64; i++)
{
}
}
/* softrest pulse */
/* We initialize the QM with max_common_conns, this way, the value is identical for all queues and it saves
* the driver the need for knowing the mapping of the physical queses to functions.
* Since we assume writing the same value to all queue entries, we can do this in the common phase and just initialize
* all queues the same */
/* physical queues mapping :
* E1 queues:
* - q[0-63].
* - initialized via QM_REG_BASEADDR and QM_REG_PTRTBL REG
* - port0 uses q[0-15], port1 uses q[32-47], q[16-31,48-63] are not used
*
* E1.5 queues:
* - _ON TOP OF_ E1 queues !
* - q[64-127]
**/
/* Initialize QM Queues */
#define QM_QUEUES_PER_FUNC 16
/* To eliminate the need of the driver knowing the exact function --> queue mapping, we simply initialize all queues, even for E1
* we initialize all 64 queues (as if we had 4 functions). For E1H we initialize the extension as well. */
{
for (i = 0; i < QM_QUEUES_PER_FUNC; i++)
{
}
}
if (CHIP_IS_E1H(pdev))
{
{
for (i=0; i<QM_QUEUES_PER_FUNC; i++)
{
REG_WR(pdev,QM_REG_BASEADDR_EXT_A +4*(func*QM_QUEUES_PER_FUNC+i) , pdev->hw_info.max_common_conns * 4*i);
}
}
}
}
{
if (!CHIP_IS_E1x(pdev))
{
/* Array of PF Enable bits, each pf needs to set its own,
* is set to 'zero' by MCP on PF FLR */
}
}
{
{
return;
}
/* The same for all functions on port, therefore we use the max_port_connections */
REG_WR(pdev, (PORT_ID(pdev) ? QM_REG_CONNNUM_1 : QM_REG_CONNNUM_0), pdev->hw_info.max_common_conns/16 -1);
}
{
{
return;
}
/* when more then 64K connections per _port_ are supported, we need to change the init value for LIN0/1_SCAN_TIME */
/* The same for all functions on port, therefore we need to use the max_port_connections */
REG_WR(pdev,(PORT_ID(pdev) ? TM_REG_LIN1_MAX_ACTIVE_CID : TM_REG_LIN0_MAX_ACTIVE_CID), (pdev->hw_info.max_port_conns/32)-1);
}
{
{
return;
}
// TBD: consider setting to the OS page size
if (CHIP_REV_IS_ASIC(pdev))
{
// enable hw interrupt from doorbell Q
}
}
{
#ifdef VF_INVOLVED
if (!CHIP_IS_E1x(pdev) && (IS_BASIC_VIRT_MODE_MASTER_PFDEV(pdev) || IS_CHANNEL_VIRT_MODE_MASTER_PFDEV(pdev)))
{
//that is used by all HW blocks and FW
PF connections in the beginning (L2 connections),
then VF connections, and then the rest of PF connections */
REG_WR(pdev, DORQ_REG_VF_NORM_CID_WND_SIZE, LM_VF_CID_WND_SIZE(pdev)); /* should reflect the maximal number of connections in a VF.
0 for single connection */
#if 0
REG_WR(pdev, DORQ_REG_VF_NORM_CID_OFST, LM_DQ_CID_BITS - 3); /*means the number of bits in a VF doorbell.
For 8B doorbells it should be 0, 128B should be 4 */
#endif
/*In addition, in order to configure the way that the DQ builds the CID,
the driver should also configure the DQ security checks for the VFs,
thresholds for VF-doorbells, VF CID range. In the first step it's possible
to configure all these checks in a way that disables validation checks:
DQ security checks for VFs - configure single rule (out of 16) with mask = 0x1 and value = 0x0.
CID range - 0 to 0x1ffff
VF doorbell thresholds - according to the DQ size. */
}
#endif
}
{
}
{
if (!CHIP_IS_E1x(pdev))
{
if (IS_MF_AFEX_MODE(pdev))
{
}
else
{
/* Ovlan exists only if we are in path multi-function + switch-dependent mode, in switch-independent there is no ovlan headers */
REG_WR(pdev, PBF_REG_HDRS_AFTER_BASIC, (pdev->params.path_has_ovlan ? 7 : 6)); //Bit-map indicating which L2 hdrs may appear after the basic Ethernet header.
}
}
}
{
if (!CHIP_IS_E1x(pdev))
{
}
}
{
if (CHIP_IS_E1x(pdev))
{
// on E1H we do support enable pause
{
// special emulation and FPGA values for pause no pause
high = 513;
low = 0;
}
else
{
if (IS_MULTI_VNIC(pdev))
{
// A - 24KB + MTU(in K) *4
// A - 24*4 + 150; (9600*4)/256 - (mtu = jumbo = 9600)
low = 246;
}
else
{
{
// A - 40KB low = 40*4
low = 160;
}
else
{
// A - 24KB + MTU(in K) *4
}
}
// B - 14KB High = low+14*4
}
}
{
}
}
{
{
return;
}
if (!CHIP_IS_E1(pdev))
{
}
if (!CHIP_IS_E1x(pdev))
{
if (IS_MF_AFEX_MODE(pdev))
{
{
}
}
else
{
{
/* Ovlan exists only if we are in multi-function + switch-dependent mode, in switch-independent there is no ovlan headers */
REG_WR(pdev, PRS_REG_HDRS_AFTER_BASIC, (pdev->params.path_has_ovlan ? 7 : 6)); //Bit-map indicating which L2 hdrs may appear after the basic Ethernet header.
}
}
}
}
{
if (IS_MF_AFEX_MODE(pdev))
{
{
REG_WR(pdev, (0 == PORT_ID(pdev))? PRS_REG_HDRS_AFTER_BASIC_PORT_0 :PRS_REG_HDRS_AFTER_BASIC_PORT_1 , 0xE);
REG_WR(pdev, (0 == PORT_ID(pdev))? PRS_REG_HDRS_AFTER_TAG_0_PORT_0 :PRS_REG_HDRS_AFTER_TAG_0_PORT_1 , 0x6);
REG_WR(pdev, (0 == PORT_ID(pdev))? PRS_REG_MUST_HAVE_HDRS_PORT_0 :PRS_REG_MUST_HAVE_HDRS_PORT_1 , 0xA);
}
}
else
{
{
/* Ovlan exists only if we are in multi-function + switch-dependent mode, in switch-independent there is no ovlan headers */
REG_WR(pdev, (0 == PORT_ID(pdev))? PRS_REG_HDRS_AFTER_BASIC_PORT_0:PRS_REG_HDRS_AFTER_BASIC_PORT_1, (IS_MF_SD_MODE(pdev) ? 7 : 6)); //Bit-map indicating which L2 hdrs may appear after the basic Ethernet header.
}
}
}
{
{
return;
}
}
{
if (!CHIP_IS_E1x(pdev))
{
/* reset VFC memories - relevant only for E2, has to be done before initialing semi blocks which also
* initialize VFC blocks. */
}
}
{
/*
Passive buffer REG setup - Dual port memory in semi passive buffer in E1 must be read once before used
NOTE: This code is needed only for E1 though we will leave it as it is since it makes no harm and doesn't effect performance
*/
{
}
}
{
if (!CHIP_IS_E1x(pdev))
{
}
}
{
{
return;
}
if (CHIP_IS_E1x(pdev))
{
// update threshold
REG_WR(pdev,(PORT_ID(pdev) ? PBF_REG_P1_ARB_THRSH : PBF_REG_P0_ARB_THRSH),(MAXIMUM_PACKET_SIZE/16));
// update init credit
REG_WR(pdev,(PORT_ID(pdev) ? PBF_REG_P1_INIT_CRD : PBF_REG_P0_INIT_CRD),(MAXIMUM_PACKET_SIZE/16) + 553 -22);
// probe changes
}
}
{
{
return;
}
}
{
// tell the searcher where the T2 table is
REG_WR(pdev, (PORT_ID(pdev) ? SRC_REG_COUNTFREE1 : SRC_REG_COUNTFREE0) ,pdev->vars.searcher_t2_num_pages * pdev->params.ilt_client_page_size/64);
REG_WR_IND(pdev, (PORT_ID(pdev) ? SRC_REG_FIRSTFREE1 : SRC_REG_FIRSTFREE0),pdev->vars.searcher_t2_phys_addr_table[0].as_u32.low);
REG_WR_IND(pdev, (PORT_ID(pdev) ? SRC_REG_FIRSTFREE1 : SRC_REG_FIRSTFREE0)+4,pdev->vars.searcher_t2_phys_addr_table[0].as_u32.high);
REG_WR(pdev, (PORT_ID(pdev) ? SRC_REG_NUMBER_HASH_BITS1 : SRC_REG_NUMBER_HASH_BITS0),pdev->context_info->searcher_hash.num_hash_bits);
}
{
{
return;
}
// static initialization only for Common part
/* configure cdu to work with cdu-validation. TODO: Move init to hw init tool */
}
{
u32_t cfc_init_reg = 0;
{
return;
}
/* init cfc with user configurable number of connections in cfc */
// enable context validation interrupt from CFC
#ifdef VF_INVOLVED
if (!CHIP_IS_E1x(pdev) && (IS_BASIC_VIRT_MODE_MASTER_PFDEV(pdev) || IS_CHANNEL_VIRT_MODE_MASTER_PFDEV(pdev)))
{
/* with vfs - due to flr.. we don't want cfc to give attention on error from pxp,
* in regular environemt - we want this error bit5:
* The CDU responded with an error bit #0 (PCIe error) DORQ client has separate control
* for this exec error
*/
}
else
{
}
#else
#endif
}
{
{
return;
}
if(CHIP_IS_E1(pdev))
{
}
}
{
{
return;
}
if(CHIP_IS_E1H(pdev))
{
}
}
{
/* Enable IGU debugging feature */
#if 0 /* uncomment if you want to enable igu debug command for function 0, more changes required for different functions - will also need to define u32_t val=0*/
/* Configure fid = PF (bit 6) and function 0 (PF#0)*/
#endif
}
{
{
return;
}
{
/* E2 TODO: make sure that misc is updated accordingly and that three lines below are not required */
/* Let's enable the function in the IGU - this is to enable consumer updates */
/* Producer memory:
* E2 mode: address 0-135 match to the mapping memory;
* 136 - PF0 default prod; 137 PF1 default prod; 138 - PF2 default prod; 139 PF3 default prod;
* 140 - PF0 - ATTN prod; 141 - PF1 - ATTN prod; 142 - PF2 - ATTN prod; 143 - PF3 - ATTN prod;
* 144-147 reserved.
* E1.5 mode - In backward compatible mode; for non default SB; each even line in the memory
* holds the U producer and each odd line hold the C producer. The first 128 producer are for
* NDSB (PF0 - 0-31; PF1 - 32-63 and so on).
* The last 20 producers are for the DSB for each PF. each PF has five segments
* (the order inside each segment is PF0; PF1; PF2; PF3) - 128-131 U prods; 132-135 C prods; 136-139 X prods; 140-143 T prods; 144-147 ATTN prods;
*/
/* non-default-status-blocks*/
{
prod_idx = (IGU_BASE_NDSB(pdev) + sb_id)*num_segs; /* bc-assumption consecutive pfs, norm-no assumption */
for (i = 0; i < num_segs;i++)
{
}
/* Give Consumer updates with value '0' */
/* Send cleanup command */
}
/* default-status-blocks */
{
}
else
{
}
base_prod = (INTR_BLK_MODE(pdev) == INTR_BLK_MODE_BC) ? (IGU_BC_BASE_DSB_PROD + dsb_idx) : (IGU_NORM_BASE_DSB_PROD + dsb_idx);
for (i = 0; i < num_segs; i++)
{
}
/* Send cleanup command */
/* Reset statistics msix / attn */
/* E2 TODO: these should become driver const once rf-tool supports split-68 const. */
}
}
{
if (CHIP_IS_E2(pdev) || CHIP_IS_E1H(pdev)) /* E3 supports this per port - and is therefore done in the port phase */
{
}
/* E1HOV mode was removed in E2 and is replaced with hdrs-after-basic... */
if (CHIP_IS_E1H(pdev))
{
}
}
{
if (!CHIP_IS_E3(pdev))
{
}
if (!CHIP_IS_E1x(pdev))
{
/* MF-mode can be set separately per port in E3, and therefore is done here... for E2 and before it is done in the common phase */
if (CHIP_IS_E3(pdev))
{
REG_WR(pdev,(PORT_ID(pdev)? NIG_REG_LLH1_MF_MODE: NIG_REG_LLH_MF_MODE), IS_MULTI_VNIC(pdev) ? 1 : 0);
}
}
if (!CHIP_IS_E1(pdev))
{
/* LLH0/1_BRB1_DRV_MASK_MF MF SF
mask_no_outer_vlan 0 1
mask_outer_vlan 1 0*/
u32_t val = IS_MF_SD_MODE(pdev) ? NIG_LLH0_BRB1_DRV_MASK_MF_REG_LLH0_BRB1_DRV_MASK_OUTER_VLAN : NIG_LLH0_BRB1_DRV_MASK_MF_REG_LLH0_BRB1_DRV_MASK_NO_OUTER_VLAN;
ASSERT_STATIC(NIG_LLH0_BRB1_DRV_MASK_MF_REG_LLH0_BRB1_DRV_MASK_OUTER_VLAN == NIG_LLH1_BRB1_DRV_MASK_MF_REG_LLH1_BRB1_DRV_MASK_OUTER_VLAN);
ASSERT_STATIC(NIG_LLH0_BRB1_DRV_MASK_MF_REG_LLH0_BRB1_DRV_MASK_NO_OUTER_VLAN == NIG_LLH1_BRB1_DRV_MASK_MF_REG_LLH1_BRB1_DRV_MASK_NO_OUTER_VLAN);
if (!CHIP_IS_E1x(pdev))
{
if (IS_MF_SD_MODE(pdev))
{
}
else
{
}
}
}
}
{
if (mf)
{
{
}
else
{
}
}
}
{
{
return;
}
/* Reset pciex errors */
if (!CHIP_IS_E1x(pdev))
{
REG_WR(pdev,PCICFG_OFFSET + PXPCS_TL_CONTROL_5, (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 | PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
(PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 | PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 | PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
(PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 | PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 | PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
}
}
{
{
return;
}
/* Reset pciex errors */
}
{
/* Timers bug workaround: disables the pf_master bit in pglue at common phase, we need to enable it here before
* any dmae access are attempted. Therefore we manually added the enable-master to the port phase (it also happens
* in the function phase) */
if (!CHIP_IS_E1x(pdev))
{
}
}
{
if (!CHIP_IS_E1x(pdev))
{
/* 1. Timers bug workaround. There may be an error here. do this only if func_id=6, otherwise
* an error isn't expected
* 2. May be an error due to FLR.
*/
}
}
{
if (!CHIP_IS_E1x(pdev))
{
//REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,1);
}
}
{
/* Error Recovery : attach some attentions to close-the-g8 NIG + PXP2 */
}
// for PRS BRB mem setup
{
wb_write[0] = 0x55555555 ;
// TBD: consider use DMAE to these writes
// Ethernet source and destination addresses
// #SOP
wb_write[0] = 0x09000000 ;
// NON-IP protocol
// EOP, eop_bvalid = 0
}
{
u8_t i = 0;
#ifdef _VBD_CMD_
return;
#endif
//First part
// Disable inputs of parser neighbor blocks
// Write 0 to parser credits for CFC search request
// send Ethernet packet
// TODO: Reset NIG statistic
// Wait until NIG register shows 1 packet of size 0x10
cnt = 1000;
while (cnt)
{
if (val == 0x10)
{
break;
}
cnt--;
}
if (val != 0x10)
{
DbgBreakIfAll(1);
}
// Wait until PRS register shows 1 packet
cnt = 1000;
while (cnt)
{
if (val == 0x1)
{
break;
}
cnt--;
}
if (val != 0x1)
{
DbgBreakIfAll(1);
}
// End of part 1
// #Reset and init BRB,PRS
init_brb1_common( pdev );
// "Start of part 2"
// Disable inputs of parser neighbor blocks
// Write 0 to parser credits for CFC search request
// send 10 Ethernet packets
for (i=0;i<10;i++)
{
}
// Wait until NIG register shows 10+1 packets of size 11*0x10 = 0xb0
cnt = 1000;
while (cnt)
{
if (val == 0xb0)
{
break;
}
cnt--;
}
if (val != 0xb0)
{
DbgBreakIfAll(1);
}
// Wait until PRS register shows 2 packet
if (val != 0x2)
{
DbgBreakIfAll(1);
}
// Write 1 to parser credits for CFC search request
// Wait until PRS register shows 3 packet
// Wait until NIG register shows 1 packet of size 0x10
if (val != 0x3)
{
DbgBreakIfAll(1);
}
// clear NIG EOP FIFO
for (i=0;i<11;i++)
{
}
// #Reset and init BRB,PRS
init_brb1_common( pdev );
// everest_init_part( pdev, BLCNUM_NIG ,COMMON, hw);
// Enable inputs of parser neighbor blocks
}
{
/* ip_id_mask (determines how the ip id (ipv4) rolls over, (init value currently constant: 'half')) */
/* TODO need to add constant in common constant */
LM_INTMEM_WRITE16(pdev, USTORM_ETH_DYNAMIC_HC_PARAM_OFFSET, (u16_t)pdev->params.l2_dynamic_hc_min_bytes_per_packet, BAR_USTRORM_INTMEM);
if (!CHIP_IS_E1x(pdev))
{
{
}
else
{
}
}
}
{
/* Licensing with no MCP workaround. */
{
/* If there is no MCP then there is no shmem_base, therefore we write to an absolute address. port 1 is 28 bytes away. */
#define SHMEM_ABSOLUTE_LICENSE_ADDRESS 0xaff3c
DbgMessage(pdev, WARN, "writing reg: %p\n", SHMEM_ABSOLUTE_LICENSE_ADDRESS + (PORT_ID(pdev) * 0x1c));
}
if(CHIP_IS_E1H(pdev))
{
/* in a non-mf-aware chip, we don't need to take care of all the other functions */
{
/* Set all mac filter drop flags to '0' to make sure we don't accept packets for vnics that aren't up yet... do this for each vnic! */
LM_INTMEM_WRITE32(pdev,TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + OFFSETOF(struct tstorm_eth_mac_filter_config, ucast_drop_all), 0, BAR_TSTRORM_INTMEM);
LM_INTMEM_WRITE32(pdev,TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + OFFSETOF(struct tstorm_eth_mac_filter_config, ucast_accept_all), 0, BAR_TSTRORM_INTMEM);
LM_INTMEM_WRITE32(pdev,TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + OFFSETOF(struct tstorm_eth_mac_filter_config, mcast_drop_all), 0, BAR_TSTRORM_INTMEM);
LM_INTMEM_WRITE32(pdev,TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + OFFSETOF(struct tstorm_eth_mac_filter_config, mcast_accept_all), 0, BAR_TSTRORM_INTMEM);
LM_INTMEM_WRITE32(pdev,TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + OFFSETOF(struct tstorm_eth_mac_filter_config, bcast_accept_all), 0, BAR_TSTRORM_INTMEM);
}
}
// for now only in multi vnic mode for min max cmng
if (IS_MULTI_VNIC(pdev))
{
// first time always use 10000 for 10G
}
/* Tx switching is only enabled if in MF SI mode and npar_vm_switching is enabled...*/
{
//In switch independent mode, driver must enable TCP TX switching using XSTORM_TCP_TX_SWITCHING_EN_OFFSET.
}
else
{
{
}
}
}
{
struct event_ring_data eq_data = {{0}};
{
LM_INTMEM_WRITE32(pdev, addr + (sizeof(u32_t) * index), *((u32 *)&eq_data + index), BAR_CSTRORM_INTMEM);
}
}
{
/* status blocks are done in init_status_blocks() */ /* need to be write using GRC don't generate interrupt spq prod init WB */
REG_WR(pdev,XSEM_REG_FAST_MEMORY + (XSTORM_SPQ_PAGE_BASE_OFFSET(func)),pdev->sq_info.sq_chain.bd_chain_phy.as_u32.low);
REG_WR(pdev,XSEM_REG_FAST_MEMORY + (XSTORM_SPQ_PAGE_BASE_OFFSET(func)) + 4,pdev->sq_info.sq_chain.bd_chain_phy.as_u32.high);
/* Initialize the event-queue */
/* Todo: Init indirection table */
if(CHIP_IS_E1(pdev))
{
// Should run only for E1 (begining fw 6.4.10). In earlier versions (e.g. 6.2) the workaorund is relevant for E1.5 as well.
/* add for PXP dual port memory setup */
LM_INTMEM_WRITE32(pdev,USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),lm_bd_chain_phys_addr(&pdev->eq_info.eq_chain.bd_chain, 0).as_u32.low, BAR_USTRORM_INTMEM); /* need to check */
LM_INTMEM_WRITE32(pdev,USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func)+4,lm_bd_chain_phys_addr(&pdev->eq_info.eq_chain.bd_chain, 0).as_u32.high, BAR_USTRORM_INTMEM); /* need to check */
}
//init dynamic hc
LM_INTMEM_WRITE32(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func), pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_RX_ID].threshold[0], BAR_CSTRORM_INTMEM);
LM_INTMEM_WRITE32(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+4, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_RX_ID].threshold[1], BAR_CSTRORM_INTMEM);
LM_INTMEM_WRITE32(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+8, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_RX_ID].threshold[2], BAR_CSTRORM_INTMEM);
/*Set DHC scaling factor for L4*/
LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+12, (16 - (u8_t)pdev->params.l4_hc_scaling_factor), BAR_CSTRORM_INTMEM);
/*Reset DHC scaling factors for rest of protocols*/
ASSERT_STATIC( 4 == ARRSIZE(pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_RX_ID].hc_timeout0) ) ;
ASSERT_STATIC( 4 == ARRSIZE(pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_RX_ID].hc_timeout1) ) ;
ASSERT_STATIC( 4 == ARRSIZE(pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_RX_ID].hc_timeout2) ) ;
ASSERT_STATIC( 4 == ARRSIZE(pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_RX_ID].hc_timeout3) ) ;
/*Set DHC timeout 0 for all protocols*/
LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+16, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_RX_ID].hc_timeout0[0], BAR_CSTRORM_INTMEM);
LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+17, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_RX_ID].hc_timeout0[1], BAR_CSTRORM_INTMEM);
LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+18, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_RX_ID].hc_timeout0[2], BAR_CSTRORM_INTMEM);
LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+19, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_RX_ID].hc_timeout0[3], BAR_CSTRORM_INTMEM);
/*Set DHC timeout 1 for all protocols*/
LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+20, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_RX_ID].hc_timeout1[0], BAR_CSTRORM_INTMEM);
LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+21, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_RX_ID].hc_timeout1[1], BAR_CSTRORM_INTMEM);
LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+22, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_RX_ID].hc_timeout1[2], BAR_CSTRORM_INTMEM);
LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+23, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_RX_ID].hc_timeout1[3], BAR_CSTRORM_INTMEM);
/*Set DHC timeout 2 for all protocols*/
LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+24, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_RX_ID].hc_timeout2[0], BAR_CSTRORM_INTMEM);
LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+25, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_RX_ID].hc_timeout2[1], BAR_CSTRORM_INTMEM);
LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+26, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_RX_ID].hc_timeout2[2], BAR_CSTRORM_INTMEM);
LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+27, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_RX_ID].hc_timeout2[3], BAR_CSTRORM_INTMEM);
/*Set DHC timeout 3 for all protocols*/
LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+28, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_RX_ID].hc_timeout3[0], BAR_CSTRORM_INTMEM);
LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+29, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_RX_ID].hc_timeout3[1], BAR_CSTRORM_INTMEM);
LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+30, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_RX_ID].hc_timeout3[2], BAR_CSTRORM_INTMEM);
LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+31, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_RX_ID].hc_timeout3[3], BAR_CSTRORM_INTMEM);
#define TX_DHC_OFFSET 32
LM_INTMEM_WRITE32(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+TX_DHC_OFFSET, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_TX_ID].threshold[0], BAR_CSTRORM_INTMEM);
LM_INTMEM_WRITE32(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+TX_DHC_OFFSET+4, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_TX_ID].threshold[1], BAR_CSTRORM_INTMEM);
LM_INTMEM_WRITE32(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+TX_DHC_OFFSET+8, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_TX_ID].threshold[2], BAR_CSTRORM_INTMEM);
/*Reset DHC scaling factors for all protocols*/
LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+TX_DHC_OFFSET+12, 0, BAR_CSTRORM_INTMEM);
LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+TX_DHC_OFFSET+13, 0, BAR_CSTRORM_INTMEM);
LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+TX_DHC_OFFSET+14, 0, BAR_CSTRORM_INTMEM);
LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+TX_DHC_OFFSET+15, 0, BAR_CSTRORM_INTMEM);
/*Set DHC timeout 0 for all protocols*/
LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+TX_DHC_OFFSET+16, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_TX_ID].hc_timeout0[0], BAR_CSTRORM_INTMEM);
LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+TX_DHC_OFFSET+17, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_TX_ID].hc_timeout0[1], BAR_CSTRORM_INTMEM);
LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+TX_DHC_OFFSET+18, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_TX_ID].hc_timeout0[2], BAR_CSTRORM_INTMEM);
LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+TX_DHC_OFFSET+19, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_TX_ID].hc_timeout0[3], BAR_CSTRORM_INTMEM);
/*Set DHC timeout 1 for all protocols*/
LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+TX_DHC_OFFSET+20, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_TX_ID].hc_timeout1[0], BAR_CSTRORM_INTMEM);
LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+TX_DHC_OFFSET+21, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_TX_ID].hc_timeout1[1], BAR_CSTRORM_INTMEM);
LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+TX_DHC_OFFSET+22, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_TX_ID].hc_timeout1[2], BAR_CSTRORM_INTMEM);
LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+TX_DHC_OFFSET+23, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_TX_ID].hc_timeout1[3], BAR_CSTRORM_INTMEM);
/*Set DHC timeout 2 for all protocols*/
LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+TX_DHC_OFFSET+24, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_TX_ID].hc_timeout2[0], BAR_CSTRORM_INTMEM);
LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+TX_DHC_OFFSET+25, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_TX_ID].hc_timeout2[1], BAR_CSTRORM_INTMEM);
LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+TX_DHC_OFFSET+26, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_TX_ID].hc_timeout2[2], BAR_CSTRORM_INTMEM);
LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+TX_DHC_OFFSET+27, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_TX_ID].hc_timeout2[3], BAR_CSTRORM_INTMEM);
/*Set DHC timeout 3 for all protocols*/
LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+TX_DHC_OFFSET+28, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_TX_ID].hc_timeout3[0], BAR_CSTRORM_INTMEM);
LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+TX_DHC_OFFSET+29, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_TX_ID].hc_timeout3[1], BAR_CSTRORM_INTMEM);
LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+TX_DHC_OFFSET+30, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_TX_ID].hc_timeout3[2], BAR_CSTRORM_INTMEM);
LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+TX_DHC_OFFSET+31, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_TX_ID].hc_timeout3[3], BAR_CSTRORM_INTMEM);
/* E1H specific init */
{
}
/* Below statements forces FW to trace SP operation. This debugger feature may be involved via initialization correspnding params value
in bootleg or/and via undocumented registry value (per function). Disableing statistics is highly recommmended using this debug option*/
{
}
{
}
{
}
{
}
/* Enable the function in STORMs */
}
{
/* shutdown bug - clear the shutdown inprogress flag*/
/* Must be done before DMAE */
init_misc_common( pdev );
init_pxp_common ( pdev );
init_pxp2_common( pdev );
init_atc_common ( pdev );
init_dmae_common( pdev );
init_tcm_common ( pdev );
init_ucm_common ( pdev );
init_ccm_common ( pdev );
init_xcm_common ( pdev );
init_qm_common ( pdev );
init_tm_common ( pdev );
init_dq_common ( pdev );
init_brb1_common( pdev );
init_tsdm_common( pdev );
init_csdm_common( pdev );
init_usdm_common( pdev );
init_xsdm_common( pdev );
// syncronize rtc of the semi's
init_upb_common( pdev );
init_xpb_common( pdev );
init_pbf_common( pdev );
{
/* don't zeroize msix memory - this overrides windows OS initialization */
}
// TBD: E1H - determine whether to move from here, or have "wait for blks done" function
//finish CFC init
// moved here because of timing problem
// we need to enable inputs here.
if (CHIP_IS_E1(pdev))
{
// read NIG statistic
// PRS BRB memory setup only after full power cycle
if(val == 0)
{
}
}
/* One time initialization of the phy:
in 2-port-mode - only for the first device on a chip!
in 4-port-mode - always */
{
if (!CHIP_IS_E1x(pdev))
{
}
// Apply common init only in case LFA is not supported by MFW.
{
}
}
//clear PXP2 attentions
// set dcc_support in case active
{
}
///Write driver NIV support
if (IS_MF_AFEX_MODE(pdev))
{
}
{
//we clear all the other capabilites flags and set just DRV_FLAGS_CAPABALITIES_LOADED_SUPPORTED
LM_SHMEM2_WRITE(pdev, OFFSETOF(shmem2_region_t, drv_capabilities_flag[FUNC_MAILBOX_ID(pdev)]), DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED);
}
/* Enable parity error only for E2 and above */
if (!CHIP_IS_E1x(pdev))
{
}
}
{
/* Probe phys on board - must happen before lm_reset_link*/
init_qm_port ( pdev);
init_tm_port ( pdev);
init_dq_port ( pdev);
init_pbf_port( pdev );
init_hc_port( pdev);
elink_init_mod_abs_int(pdev, &pdev->vars.link, CHIP_ID(pdev), pdev->hw_info.shmem_base, pdev->hw_info.shmem_base2, port);
// iSCSI FW expect bit 28 to be set
{
}
// Clear the shared port bit of the DCBX completion
}
{
{
//we clear all the other capabilites flags and set just DRV_FLAGS_CAPAIALITIES_LOADED_SUPPORTED
LM_SHMEM2_WRITE(pdev, OFFSETOF(shmem2_region_t, drv_capabilities_flag[func_mb_id]), DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED | (pdev->params.mtu_max << DRV_FLAGS_MTU_SHIFT));
}
init_pxp2_func( pdev );
/* Probe phys on board */
{
}
}
/**
* @Description
* The purpose of this function is to check that the chip
* is ready for initialization. Most checks are done in
* get_dev_info, however, due to Diag requirements its
* possible that certain things are not configured properly
* but get_dev_info passed. At time of writing this
* function it was IGU configuration in E3, but in the
* future there may be more things like this...
*
* @param pdev
*
* @return TRUE / FALSE
*/
{
if (( blk_type == INTR_BLK_IGU) &&
( blk_mode == INTR_BLK_MODE_NORM))
{
{
return FALSE;
}
}
return TRUE;
}
{
#ifdef _VBD_
#endif
return lm_status;
}
/* Description:
* The main function of this routine is to initialize the
* hardware. it configues all hw blocks in several phases acording to mcp response:
* 1. common blocks
* 2. per function blocks
*/
{
lm_loader_response resp = 0;
#ifdef VF_INVOLVED
{
return lm_vf_chip_init(pdev);
}
#endif
if (!lm_chip_ready_for_init(pdev))
{
return LM_STATUS_FAILURE;
}
/* Check if we need to reset the device:
* This can happen for two reasons:
* 1. Undi was active
{
}
// init mcp sequences
if( LM_STATUS_SUCCESS != lm_status )
{
DbgBreakMsg("lm_mcp_cmd_init failed!\n");
return lm_status ;
}
/* Save the load response */
// This should be first call after load request since we must complete
// these settings in 5 seconds (MCP keepalive timeout or start pulse)
if( LM_LOADER_RESPONSE_INVALID != resp )
{
if (IS_ASSIGNED_TO_VM_PFDEV(pdev))
{
//Validate FW if Port or Function
switch (resp)
{
if (!lm_is_fw_version_valid(pdev))
{
return LM_STATUS_BAD_SIGNATURE;
}
break;
default:
break;
}
}
// We need to call it here since init_funciton_part use these pointers
lm_setup_read_mgmt_stats_ptr(pdev, FUNC_MAILBOX_ID(pdev), &pdev->vars.fw_port_stats_ptr, &pdev->vars.fw_func_stats_ptr );
}
if (!IS_DRIVER_PULSE_ALWAYS_ALIVE(pdev))
{
{
DbgBreak();
}
}
// update mps and mrrs from pcicfg
if (!IS_ASSIGNED_TO_VM_PFDEV(pdev))
{
}
switch (resp)
{
if (LM_STATUS_SUCCESS != lm_status)
{
return lm_status;
}
#ifdef _VBD_
#endif
{
int i = 0;
for (i = 0; i < VNICS_PER_PATH(pdev); i++)
{
}
}
// going to the port part no break
// Clear pervious dbus info which may have been left
// during error recovery (if any)
//Check if there is dbus work
#ifdef _VBD_
if (lm_is_function_after_flr(pdev))
{
{
if(lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
}
else
{
}
}
#endif
// If we are here, DMAE is ready (from common part init) - set it for TRUE for non-first devices
// set device as pmf
// going to the function part - fall through
#ifdef _VBD_
if (lm_is_function_after_flr(pdev))
{
{
if(lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
}
else
{
}
}
#endif
// If we are here, DMAE is ready (from port part init) - set it for TRUE for non-first devices
#ifndef __BIG_ENDIAN
#endif
break;
default:
DbgBreakIfAll(1);
}
if (resp != LM_LOADER_RESPONSE_LOAD_DONE)
{
DbgBreakIfAll(1);
}
/* Read MF config parameters: there is a time window between MF
* configuration initialization and DCC attention, allowing DCC
* link state change to go unnoticed. This may cause wrong link
* state to be seen by clients, hence re-sync here.
*/
if (IS_MF_MODE_CAPABLE(pdev))
{
}
// TBD link training
return LM_STATUS_SUCCESS;
}