/*******************************************************************************
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*
* Copyright 2014 QLogic Corporation
* The contents of this file are subject to the terms of the
* QLogic End User License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the License at
* See the License for the specific language governing permissions
* and limitations under the License.
*
*
* Module Description:
*
*
* History:
* 02/05/07 Alon Elhanani Inception.
******************************************************************************/
#include "lm5710.h"
#include "license.h"
#include "mcp_shmem.h"
#include "command.h"
#include "debug.h"
// does HW statistics is active
// only if we are PMF && collect_enabled is on!
#define LM_STATS_IS_HW_ACTIVE(_pdev) ( _pdev->vars.stats.stats_collect.stats_hw.b_collect_enabled && \
// do _cmd statement only if in SF mode
// we use this macro since in MF mode we don't maintain non-mandatory statistics so to prevent inconsistently - we don't use them at all
#define LM_STATS_64_TO_HI_LO( _x_64_, _hi_lo ) ( _hi_lo##_hi = (u32_t)U64_HI( _x_64_ ) ); ( _hi_lo##_lo = (u32_t)U64_LO( _x_64_ ) );
#define LM_STATS_HI_LO_TO_64( _hi_lo, _x_64_ ) ( _x_64_ = (((u64_t)(_hi_lo##_hi) << 32) | (_hi_lo##_lo)) )
/**
* driver stats are stored as 64bits where the lower bits store
* the value and the upper bits store the wraparound count.
* different stat fields are stored with different data sizes
* and the following macros help in storing values in the
* "overflow count" part of a 64bit value and seperating it from
* the actual data.
*/
/**lm_update_wraparound_if_needed
* This function checks the old and new values, and returns a
* either the new data with the old wraparound count, or (if a
* wraparound has occured) the new data with an incremented
* wraparound count.
*
* val_current can be given in either little-endian or
* big-endian byte ordering. the values returned are always in
* host byte order.
*
* @param data_bits the number of data bits in the values
* @param val_current the newly collected value. the byte
* ordering is detemined by
* @param b_swap_bytes
* @param val_prev the the previously saved value in host byte
* order
* @param b_swap_bytes TRUE if val_current is byte-swapped (i.e
* given as little-endian on a big-endian
* machine), FALSE otherwise.
*
* @return u64_t the new data with an appropriate wraparound
* count.
*/
static u64_t lm_update_wraparound_if_needed(u8_t data_bits, u64_t val_current, u64_t val_prev, u8_t b_swap_bytes)
{
if(b_swap_bytes)
{
/*We assume that only 32bit stats will ever need to be byte-swapped. this is because
all HW data is byte-swapped by DMAE as needed, and the 64bit FW stats are swapped
by the REGPAIR macros.*/
}
{
}
(val_current & DATA_MASK(data_bits))); /*take the overflow count we calculated, and the data from the new value*/
}
/**
* The following macros handle the wraparound-count for FW
* stats. Note that in the 32bit case (i.e any stats that are
* not REGPAIRs), the bytes have to swapped if the host byte
* order is not little-endian.
*/
/* function checks if there is a pending completion for statistics and a pending dpc to handle the completion:
* for cases where VBD gets a bit starved - we don't want to assert if chip isn't stuck and we have a pending completion
*/
#ifdef _VBD_CMD_
extern volatile u32_t* g_everest_sim_flags_ptr;
#endif
/*******************************************************************************
* Description:
*
* Return:
******************************************************************************/
#ifdef VF_INVOLVED
#endif
)
{
const u32_t i = LM_CLI_IDX_NDIS;
#ifdef VF_INVOLVED
} else
#endif
{
}
switch(stats_type)
{
// ioc IfHCOutPkts
break;
// ioc IfHCInPkts
break;
#ifdef VF_INVOLVED
#endif
#define LM_STATS_ERROR_DISCARD_SUM( _pdev, _i ) _pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[_i].checksum_discard + \
_pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[_i].packets_too_big_discard + \
break;
case LM_STATS_RCV_CRC_ERROR:
// Spec. 9
#ifdef VF_INVOLVED
#endif
// ioc Dot3StatsFCSErrors
break;
case LM_STATS_ALIGNMENT_ERROR:
// Spec. 10
#ifdef VF_INVOLVED
#endif
{
}
else
{
}
// ioc Dot3StatsAlignmentErrors
break;
// Spec. 18
#ifdef VF_INVOLVED
#endif
{
}
else
{
}
// ioc Dot3StatsSingleCollisionFrames
break;
// Spec. 19
#ifdef VF_INVOLVED
#endif
{
}
else
{
stats->as_u64 = LM_STATS_HW_GET_MACS_U64(pdev, stats_tx.tx_stat_dot3statsmultiplecollisionframes ) ;
}
// ioc Dot3StatsMultipleCollisionFrame
break;
case LM_STATS_FRAMES_DEFERRED:
// Spec. 40 (not in mini port)
#ifdef VF_INVOLVED
#endif
// ioc Dot3StatsDeferredTransmissions
break;
case LM_STATS_MAX_COLLISIONS:
// Spec. 21
#ifdef VF_INVOLVED
#endif
// ioc Dot3StatsExcessiveCollisions
break;
// Spec. 6
break;
// Spec. 7
break;
break;
break;
break;
break;
#ifdef VF_INVOLVED
#endif
{
}
else
{
stats->as_u64 = LM_STATS_HW_GET_MACS_U64(pdev, stats_tx.tx_stat_dot3statsinternalmactransmiterrors ) ;
}
break;
case LM_STATS_RCV_OVERRUN:
#ifdef VF_INVOLVED
#endif
stats->as_u64+= pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.port_statistics.xxoverflow_discard ;
break;
case LM_STATS_XMIT_UNDERRUN:
//These counters are always zero
#ifdef VF_INVOLVED
#endif
break;
// ioc IfInMBUFDiscards
break;
case LM_STATS_BYTES_RCV:
// ioc IfHCInOctets
break;
case LM_STATS_BYTES_XMIT:
// ioc IfHCOutOctets
break;
case LM_STATS_IF_IN_DISCARDS:
#ifdef VF_INVOLVED
{
}
else
#endif
{
}
stats->as_u64+= stats_fw->eth_tstorm_common.client_statistics[i].no_buff_discard ; // LM_STATS_RCV_NO_BUFFER_DROP
stats->as_u64+= stats_fw->eth_ustorm_common.client_statistics[i].ucast_no_buff_pkts ; // LM_STATS_RCV_NO_BUFFER_DROP
stats->as_u64+= stats_fw->eth_ustorm_common.client_statistics[i].mcast_no_buff_pkts ; // LM_STATS_RCV_NO_BUFFER_DROP
stats->as_u64+= stats_fw->eth_ustorm_common.client_statistics[i].bcast_no_buff_pkts ; // LM_STATS_RCV_NO_BUFFER_DROP
#ifdef VF_INVOLVED
#endif
{
}
stats->as_u64+= stats_fw->eth_tstorm_common.port_statistics.xxoverflow_discard ; // LM_STATS_RCV_OVERRUN
break;
break;
break;
break;
break;
break;
break;
/*
case LM_STATS_IF_IN_ERRORS:
case LM_STATS_IF_OUT_ERRORS:
stats->as_u32.low = 0;
stats->as_u32.high = 0;
break;
*/
default:
break;
}
//DbgMessage(pdev, WARN, "lm_get_stats: stats_type=0x%X val=%d\n", stats_type, stats->as_u64);
return lm_status;
} /* lm_get_stats */
/*******************************************************************************
* Description:
* Zero the mirror statistics (probably after miniport was down in windows, 'driver unload' on ediag)
*
* Return:
******************************************************************************/
{
}
/*
* lm_edebug_if_is_stats_disabled returns TRUE if statistics gathering is
* disabled according to edebug-driver interface implemented through SHMEM2
* field named edebug_driver_if. Otherwise, return FALSE.
*/
static u32_t
{
{
if (shmem2_size > offset)
{
{
return TRUE;
}
}
}
return FALSE;
}
{
// zero no completion counter
{
return LM_STATUS_SUCCESS;
}
#ifdef VF_INVOLVED
#ifndef __LINUX
{
}
#endif
#endif
/* send FW stats ramrod */
if (lm_status == LM_STATUS_SUCCESS)
{
// increamant ramrod counter (for debugging)
++stats_fw->stats_ramrod_cnt ;
}
return lm_status;
}
// main stats function called from timer
{
{
DbgBreakIf(!pdev) ;
return;
}
#ifdef _VBD_CMD_
pdev->vars.stats.stats_collect.stats_fw.b_collect_enabled = val && pdev->vars.stats.stats_collect.stats_fw.b_collect_enabled;
#endif
/* disable statistics if FW SP trace is involved */
{
return;
}
/* if stats gathering is disabled according to edebug-driver i/f - return */
{
return;
}
{
// verify that previous ramrod cb is finished
{
// using a variable to have event log since the message is too long
// update timer_wakeup_no_completion_max
{
}
/* We give up in two case:
* 1. We got here #NO_COMPLETION times without having a stats-completion pending to be handled
* 2. There is a completion pending to be handled - but it still hasn't been handled in #COMP_NOT_HANDLED times
* we got here. #COMP_NOT_HANDLED > #NO_COMPLETION*/
{
{
}
/* shutdown bug - BSOD only if shutdown is not in progress */
if (!lm_reset_is_inprogress(pdev))
{
/* BSOD */
{
}
}
}
/* check interrupt mode on 57710A0 boards */
// this is total wake up no completion - for debuging
}
else
{
}
} // fw collect enabled
if( LM_STATS_IS_HW_ACTIVE(pdev) )
{
// if link is not up - we can simply pass this call (optimization)
{
// we can call dmae only if link is up, and we must check it with lock
{
// assign values for relevant mac type which is up - inside the lock due to consistecy reasons
lm_stats_hw_assign( pdev ) ;
}
// assign to statistics to MCP
lm_stats_mgmt_assign( pdev ) ;
} // link is up
} // LM_STATS_IS_HW_ACTIVE
pdev->vars.stats.stats_collect.stats_hw.b_is_link_up ) // when there is no link - no use writing to mgmt
{
lm_stats_mgmt_assign( pdev ) ;
}
}
{
/* read interrupt mask from IGU - check that default-status-block bit is off... */
} // TODO add IGU complement
/* check bit 0 is masked (value 0) and that cstorm in default-status-block has increased. */
{
return TRUE;
}
return FALSE; /* no pending completion */
}
/**lm_stats_get_dmae_operation
* The statistics module uses two pre-allocated DMAE operations
* instead of allocating and releasing a DMAE operation on every
* statistics collection. There is an operation for EMAC
* statistics, and an operation for BMAC or MSTAT statistics
* This function returns the appropriate DMAE operation based on
* current MAC setting.
*
*
* @param pdev the device to use.
*
* @return lm_dmae_operation_t* the DMAE operation to use for
* collection HW statistics from the current MAC.
*/
static lm_dmae_operation_t*
{
{
}
{
}
else
{
return NULL;
}
}
/*
*Function Name:lm_stats_dmae
*
*Parameters:
*
*Description:
* collect stats from hw using dmae
*Returns:
*
*/
{
{
return LM_STATUS_FAILURE;
}
if (LM_STATUS_ABORTED == lm_status)
{
//if the DMAE operation was interrupted by lm_reset_is_inprogress, it's OK and we can treat it as success.
}
return lm_status ;
}
/*
*Function Name:lm_stats_clear_emac_stats
*
*Parameters:
*
*Description:
* resets all emac statistics counter registers
*Returns:
*
*/
{
u32_t i = 0 ;
u32_t j = 0 ;
1,
{
return LM_STATUS_INVALID_PARAMETER ;
}
{
for( j = 0 ; j < count_limit[i]; j++ )
{
dummy = REG_RD( pdev, emac_base + reg_start[i]+(j*sizeof(u32_t))) ; /*Clear stats registers by reading from from ReadClear RX/RXerr/TX STAT banks*/
}
}
return LM_STATUS_SUCCESS ;
}
/*
*Function Name:lm_stats_on_update_state
*
*Parameters:
*
*Description:
* This function should be called on one of two occasions:
* - When link is down
* - When PMF is going down (meaning - changed to another PMF)
* Function must be called under PHY LOCK
* 1. in case no link - do nothing
* 2. make last query to hw stats for current link
* 3. assign to mirror host structures
* 4. assign to MCP (managment)
* 5. saves the copy in mirror
*Returns:
*
*/
{
{
DbgBreakIf( !pdev ) ;
return LM_STATUS_INVALID_PARAMETER ;
}
{
return LM_STATUS_LINK_DOWN ;
}
if ( LM_STATS_IS_HW_ACTIVE(pdev) )
{
// call statistics for the last time before link down
if( LM_STATUS_SUCCESS != lm_status )
{
}
// assign last values before link down
lm_stats_hw_assign( pdev ) ;
}
// assign to statistics to mgmt
lm_stats_mgmt_assign( pdev ) ;
return lm_status;
}
// NOTE: this function must be called under PHY LOCK!
// - 3. keep latest stats in a copy
// - 4. if emac - reset all stats registers!
// - 5. if up - change b_link_down_is_on flag to FALSE
{
{
DbgBreakIf( !pdev ) ;
return LM_STATUS_INVALID_PARAMETER ;
}
{
{
return LM_STATUS_SUCCESS;
}
// get stats for the last time, assign to managment and save copy to mirror
if( LM_STATUS_SUCCESS != lm_status )
{
return lm_status ;
}
{
case MAC_TYPE_EMAC:
break;
case MAC_TYPE_BMAC: // nothing to do - bigmac resets itself anyway
break;
case MAC_TYPE_UMAC: // nothing to do - mstat resets anyway
case MAC_TYPE_XMAC:
break;
default:
case MAC_TYPE_NONE:
DbgBreakMsg( "mac_type not acceptable\n" ) ;
return LM_STATUS_INVALID_PARAMETER ;
}
// Set current to 0
}
else
{
}
return lm_status ;
}
/**lm_stats_alloc_hw_query
* Allocate buffers for the MAC and NIG stats. If the chip has
* an EMAC block, memory will be allocated for it's stats.
* otherwise only the non-EMAC and NIG buffers will be
* allocated. The non-EMAC buffer will be of the proper size for
*
* @param pdev the pdev to initialize
*
* @return lm_status_t LM_STATUS_SUCCESS on success,
* LM_STATUS_FAILURE on failure.
*/
{
{
// Allocate continuous memory for statistics buffers to be read from hardware. This can probably be changed to
// allocate max(emac, bmac) instead of emac+bmac, but need to make sure there are no races in the transition from
// 1G link to 10G link or vice-versa
stats_hw->u.s.addr_emac_stats_query = mm_alloc_phys_mem(pdev, alloc_size, &phys_addr ,PHYS_MEM_TYPE_NONCACHED, LM_RESOURCE_COMMON );
DbgMessage(NULL, INFORM, "lm_stats_alloc_hw_query: allocated a block of size %d at %x\n", alloc_size, stats_hw->u.s.addr_emac_stats_query);
{
return LM_STATUS_FAILURE ;
}
stats_hw->u.s.addr_bmac1_stats_query = (struct _stats_bmac1_query_t*)((u8_t*)stats_hw->u.s.addr_emac_stats_query + sizeof(struct _stats_emac_query_t)) ;
stats_hw->u.s.addr_bmac2_stats_query = (struct _stats_bmac2_query_t*)((u8_t*)stats_hw->u.s.addr_emac_stats_query + sizeof(struct _stats_emac_query_t)) ;
stats_hw->addr_nig_stats_query = (struct _stats_nig_query_t*)((u8_t*)stats_hw->u.s.addr_bmac1_stats_query + sizeof(union _stats_bmac_query_t)) ;
DbgMessage(NULL, INFORM, "lm_stats_alloc_hw_query: addr_bmac1_stats_query = %x, addr_bmac2_stats_query=%x, addr_nig_stats_query=%x\n", stats_hw->u.s.addr_bmac1_stats_query, stats_hw->u.s.addr_bmac2_stats_query, stats_hw->addr_nig_stats_query);
}
else
{
mac_stats_alloc_size = sizeof(struct _stats_mstat_query_t);
stats_hw->u.addr_mstat_stats_query = mm_alloc_phys_mem(pdev, alloc_size, &phys_addr ,PHYS_MEM_TYPE_NONCACHED, LM_RESOURCE_COMMON );
DbgMessage(NULL, INFORM, "lm_stats_alloc_hw_query: allocated a block of size %d at %x\n", alloc_size, stats_hw->u.addr_mstat_stats_query);
{
return LM_STATUS_FAILURE ;
}
stats_hw->addr_nig_stats_query = (struct _stats_nig_query_t*)((u8_t*)stats_hw->u.addr_mstat_stats_query + sizeof(struct _stats_mstat_query_t)) ;
DbgMessage(NULL, INFORM, "lm_stats_alloc_hw_query: stats_hw->addr_nig_stats_query=%x\n", stats_hw->addr_nig_stats_query);
}
return LM_STATUS_SUCCESS;
}
{
/* Total number of FW statistics requests =
* 1 for port stats + 1 for PF stats + 1 for queue stats + 1 for FCoE stats + 1 for toe stats */
#ifndef __LINUX
}
#endif
/* Request is built from stats_query_header and an array of
* stats_query_cmd_group each of which contains
* STATS_QUERY_CMD_COUNT rules. The real number or requests is
* configured in the stats_query_header.
*/
#ifndef __LINUX
DbgMessage(pdev, WARN, "%d stats groups to support %d VFs\n",num_groups, pdev->hw_info.sriov_info.total_vfs);
}
#endif
num_groups * sizeof(struct stats_query_cmd_group);
/* Data for statistics requests + stats_conter
*
* stats_counter holds per-STORM counters that are incremented
* when STORM has finished with the current request.
*/
sizeof(struct per_pf_stats) +
sizeof(struct per_queue_stats) * num_queue_stats +
sizeof(struct toe_stats_query) +
sizeof(struct fcoe_statistics_params) +
sizeof(struct stats_counter);
stats_fw->fw_stats = mm_alloc_phys_mem(pdev, alloc_size, &stats_fw->fw_stats_mapping ,PHYS_MEM_TYPE_NONCACHED, LM_RESOURCE_COMMON );
{
return LM_STATUS_RESOURCE;
}
/* Set shortcuts */
return LM_STATUS_SUCCESS;
}
/*
*Function Name: lm_stats_alloc_drv_info_to_mfw_resc
*
*Parameters:
*
*Description:
* Allocates physical memory to be used for OCBB statisics query by MFW needed for E3+ only
*Returns:
*
*/
{
if( CHIP_IS_E3(pdev) )
{
// since it is a union it doesn't matter
drv_info_to_mfw->addr.eth_stats = mm_alloc_phys_mem(pdev, alloc_size, &phys_addr ,PHYS_MEM_TYPE_NONCACHED, LM_RESOURCE_COMMON );
{
}
}
return lm_status;
}
// allocate memory both for hw and fw statistics
{
{
DbgBreakIf(!pdev) ;
return LM_STATUS_INVALID_PARAMETER ;
}
if( lm_status != LM_STATUS_SUCCESS )
{
// stats is not such a big deal if not working but since we
// only allocate here buffer, it doesn't matter since next alloc will also fail...
return lm_status;
}
if( lm_status != LM_STATUS_SUCCESS )
{
// OCBB is not such a big deal if not working but since we
// only allocate here buffer, it doesn't matter since next alloc will also fail...
return lm_status;
}
if(lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
{
case 0:
{
}
break;
case 1:
{
}
break;
default:
{
DbgBreak();
return LM_STATUS_INVALID_PARAMETER;
}
break;
}
//create the locking policy for the stats DMAE context
lm_status = lm_dmae_locking_policy_create(pdev, LM_PROTECTED_RESOURCE_DMAE_STATS, LM_DMAE_LOCKING_POLICY_TYPE_PER_PF, &stats_dmae_context_info->locking_policy);
if(lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
//create the stats DMAE context
{
DbgBreak();
return LM_STATUS_FAILURE;
}
//create the non-EMAC DMAE operation
pdev->vars.stats.stats_collect.stats_hw.non_emac_dmae_operation = lm_dmae_operation_create_sgl(pdev, TRUE, stats_dmae_context_info->context);
//create the EMAC DMAE operation if needed
{
pdev->vars.stats.stats_collect.stats_hw.emac_dmae_operation = lm_dmae_operation_create_sgl(pdev, TRUE, stats_dmae_context_info->context);
}
else
{
}
return LM_STATUS_SUCCESS ;
}
/**lm_stats_hw_setup_nig
* Add the DMAE command for reading NIG stats to the non-EMAC
* DMAE context.
*
* @param pdev the device to initialize
* @param dmae_operation the operation to setup for reading NIG
* statistics
*
* @return lm_status_t LM_STATUS_SUCCESS on success, some other
* failure value on failure.
*/
{
lm_dmae_address_t source = lm_dmae_address((0==PORT_ID(pdev))?NIG_REG_STAT0_BRB_DISCARD : NIG_REG_STAT1_BRB_DISCARD,
lm_dmae_address_t dest = lm_dmae_address(pdev->vars.stats.stats_collect.stats_hw.nig_stats_phys_addr.as_u64,
lm_status = lm_dmae_operation_add_sge(pdev, dmae_operation, source, dest, sizeof(struct _stats_nig_query_t ) / sizeof(u32_t));
return lm_status;
}
/**
* This struct is used to describe a DMAE SGE. It is used by the
* lm_status_setup_xxx and lm_stats_set_dmae_operation_sges
* functions.
*
*/
struct lm_stats_sge_descr_t{
};
/**lm_stats_set_dmae_operation_sges
* Set the SGEs of a DMAE operation according to the supplied
* SGE descriptor array. If the DMAE operation had any SGEs
* defined before, this function removes them.
*
* @param pdev the device to use
* @param operation the operation to modify
* @param sge_descr the array of SGE descriptors
* @param num_sges the number of SGE descriptors
*
* @return lm_status_t LM_STATUS_SUCCESS on success, some other
* failure value on failure.
*/
static lm_status_t lm_stats_set_dmae_operation_sges(lm_device_t* pdev, lm_dmae_operation_t* operation, struct lm_stats_sge_descr_t* sge_descr, u8_t num_sges)
{
//after returning from D3 there may be some SGEs set up here.
{
lm_status = lm_dmae_operation_add_sge(pdev, operation, sge_source, sge_dest, sge_descr[sge_idx].length);
if (LM_STATUS_SUCCESS != lm_status)
{
DbgBreak();
return lm_status;
}
}
return lm_status;
}
/**lm_stats_hw_setup_emac
* setup the DMAE SGL for the EMAC stats DMAE context
*
* @param pdev the device to initialize
*
* @return lm_status_t LM_STATUS_SUCCESS on success, some other
* value on failure.
*/
{
const u16_t sge1_len = sizeof(pdev->vars.stats.stats_collect.stats_hw.u.s.addr_emac_stats_query->stats_rx );
const u16_t sge2_len = sizeof(pdev->vars.stats.stats_collect.stats_hw.u.s.addr_emac_stats_query->stats_rx_err );
struct lm_stats_sge_descr_t sges[3] = {{0}}; //we can't use an initializer because DOS compiler requires that all initializers be constant.
if (LM_STATUS_SUCCESS != lm_status)
{
DbgBreakMsg("Failed to initialize EMAC stats DMAE operation.\n");
return lm_status;
}
if (LM_STATUS_SUCCESS != lm_status)
{
DbgBreakMsg("Failed to initialize NIG stats DMAE operation.\n");
return lm_status;
}
return lm_status;
}
/**lm_stats_hw_setup_non_emac
* Setup the DMAE SGL for the non-EMAC stats DMAE context. This
* function assumes that the MAC statistics themselves can be
* read with 2 DMAE transactions.
*
*
* @param pdev the device to initialize
* @param paddr_base the base physical address where the
* statistics data will be copied.
* @param grc_base the base GRC address of the required stats
* block (e.g NIG_REG_INGRESS_BMAC0_MEM or
* GRCBASE_MSTAT0)
* @param block1_start offset of the first register in the first
* transaction.
* @param block1_size size (in bytes) of the first DMAE
* transaction.
* @param block2_start offset of the first register in the
* second transaction.
* @param block2_size size (in bytes) of the second DMAE
* transaction.
*
* @return lm_status_t LM_STATUS_SUCCESS on success, some other
* value on failure.
*/
{
lm_dmae_operation_t* operation = (lm_dmae_operation_t*)pdev->vars.stats.stats_collect.stats_hw.non_emac_dmae_operation;
if (LM_STATUS_SUCCESS != lm_status)
{
DbgBreakMsg("Failed to initialize non-EMAC stats DMAE operation.\n");
return lm_status;
}
if (LM_STATUS_SUCCESS != lm_status)
{
DbgBreakMsg("Failed to initialize NIG stats DMAE operation.\n");
return lm_status;
}
return lm_status;
}
/**lm_stats_hw_setup_bmac
* @see lm_stats_hw_setup_non_emac for more details.
*
* @param pdev the device to initialize.
*
* @return lm_status_t LM_STATUS_SUCCESS on success, some other
* value on failure.
*/
{
// nig :GRCBASE_NIG, reg name (NIG_XXX)
switch( port )
{
case 0:
break;
case 1:
if (!CHIP_IS_E1x(pdev))
{
}
break;
default:
break;
}
if (CHIP_IS_E1x(pdev))
{
bmac_tx_stat_size = sizeof(pdev->vars.stats.stats_collect.stats_hw.u.s.addr_bmac1_stats_query->stats_tx);
bmac_rx_stat_size = sizeof(pdev->vars.stats.stats_collect.stats_hw.u.s.addr_bmac1_stats_query->stats_rx);
}
else
{
bmac_tx_stat_size = sizeof(pdev->vars.stats.stats_collect.stats_hw.u.s.addr_bmac2_stats_query->stats_tx);
bmac_rx_stat_size = sizeof(pdev->vars.stats.stats_collect.stats_hw.u.s.addr_bmac2_stats_query->stats_rx);
}
return lm_status;
}
/**lm_stats_hw_setup_mstat
* Setup the MSTAT stats DMAE transactions.
* @see lm_stats_hw_setup_non_emac for more details.
*
* @param pdev the device to initialize.
*
* @return lm_status_t LM_STATUS_SUCCESS on success, some other
* value on failure.
*/
{
DbgMessage(pdev, INFORM, "lm_stats_hw_setup_mstat: mstat_tx_start=%x, mstat_tx_size=%x, mstat_rx_start=%x, mstat_rx_size=%x\n",mstat_tx_start,mstat_tx_size,mstat_rx_start, mstat_rx_size);
switch(port)
{
case 0:
break;
case 1:
break;
default:
break;
}
return lm_status;
}
/* Description:
* setups resources regarding hw stats (init fields)
* set offsets serials of hw reads, either from EMAC & BIGMAC or from MSTAT block
*/
{
/* enable hw collect with mstat only if it's not fpga and not a 4-domain emulation compile... */
u8_t b_enable_collect = HAS_MSTAT(pdev)? ((CHIP_REV_IS_EMUL(pdev) && (CHIP_BONDING(pdev) == 0)) || CHIP_REV_IS_ASIC(pdev)) : TRUE;
{
if(lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
}
else
{
if(lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
if(lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
}
pdev->vars.stats.stats_collect.stats_hw.b_collect_enabled = b_enable_collect ; // HW stats are not supported on E3 FPGA.
return lm_status ;
} /* lm_stats_hw_setup */
/**
* This function will prepare the statistics ramrod data the way
* we will only have to increment the statistics counter and
* send the ramrod each time we have to.
*
* @param pdev
*/
{
stats_hdr->drv_stats_counter = 0;
/* storm_counters struct contains the counters of completed
* statistics requests per storm which are incremented by FW
* each time it completes hadning a statistics ramrod. We will
* check these counters in the timer handler and discard a
* (statistics) ramrod completion.
*/
/* prepare to the first stats ramrod (will be completed with
* the counters equal to zero) - init counters to somethig different.
*/
mm_memset(&stats_fw->fw_stats_data->storm_counters, 0xff, sizeof(stats_fw->fw_stats_data->storm_counters) );
/**** Port FW statistics data ****/
/* For port query index is a DONT CARE */
/**** PF FW statistics data ****/
/* For PF query index is a DONT CARE */
/**** Toe query ****/
if ( !CHIP_IS_E1x(pdev) )
{
// FW will assert if we send this kind for chip < E2
/**** FCoE query ****/
}
else
{
// if no FCoE, we need to decrease command count by one
}
/**** Clients' queries ****/
/* TODO : VF! more stats? */
}
#ifdef VF_INVOLVED
{
process_it = TRUE;
cmd_cnt++;
}
process_it = TRUE;
cmd_cnt++;
}
if (process_it) {
}
}
}
}
#endif
/* Description:
* setups fw statistics parameters
*/
{
stats_fw->drv_counter = 0 ;
stats_fw->b_collect_enabled = pdev->params.fw_stats_init_value ; // change to TRUE in order to enable fw stats
/* Prepare the constatnt slow-path command (For stats we don't allocate a new one each time) */
0 /* cid: Don't care */,
FALSE /* don't release sp mem*/);
/* Prepare the FW stats ramrod request structure (can do this just once) */
}
/*
*------------------------------------------------------------------------
* lm_stats_fw_check_update_done -
*
* check done flags and update flags
*
*------------------------------------------------------------------------
*/
{
if CHK_NULL( ptr_stats_flags_done )
{
return;
}
return;
}
// For each storm still wasn't done, we check and if done - set, so next time
// we won't need to check again
// eth xstorm
{
{
}
}
// eth tstorm
{
{
}
}
// eth ustorm
{
{
}
}
// eth cstorm
{
{
}
}
}
/**
* @Desription: Checks if FW completed last statistic update, if
* it did it assigns the statistics
*
* @param pdev
*
* @return lm_status_t LM_STATUS_SUCCESS if FW has completed
* LM_STATUS_BUSY if it hasn't yet completed
*/
{
{
DbgBreakIf( !pdev ) ;
return LM_STATUS_INVALID_PARAMETER;
}
/* First check if the ramrod has completed, if it hasn't don't bother checking
* dma completion yet, we need both of them to complete before sending another
* ramrod. */
{
}
{
// check done flags and update the falg if there was a change
// Check if we can assign any of the storms
{
// assign stats that are ready
#ifdef VF_INVOLVED
#ifndef __LINUX
}
#endif
#endif
}
// did all storms were assigned
{
}
else
{
#ifdef VF_INVOLVED
#ifndef __LINUX
} else {
}
}
}
}
#endif
#endif
// barrier (for IA64) is to assure that the counter will be incremented BEFORE
// the complation_done flag is set to TRUE.
// in order to assure correct drv_counter sent to fw in lm_stats_on_timer (CQ48772)
{
}
// now we can notify timer that cb is done!
}
}
return lm_status;
}
void
{
//Tx
}
void
{
//Section 0
//Section 1
LM_SIGN_EXTEND_VALUE_32(collect->rx_stat1.fcoe_rx_drop_pkt_cnt, mirror->fcoe_rx_drop_pkt_cnt_tstorm);
}
void
{
//Section 2
LM_SIGN_EXTEND_VALUE_32(collect->rx_stat2.fcoe_rx_drop_pkt_cnt, mirror->fcoe_rx_drop_pkt_cnt_ustorm);
}
/*
*------------------------------------------------------------------------
* lm_stats_fw_assign -
*
* assign values from fw shared memory to the lm structs
*
*------------------------------------------------------------------------
*/
void lm_stats_fw_assign( struct _lm_device_t *pdev, u32_t stats_flags_done, u32_t* ptr_stats_flags_assigned )
{
int arr_cnt = 0 ;
u8_t i = 0 ;
{
return;
}
// assign reg_pair fw collected into fw mirror
// assign u32 fw collected into fw mirror + do sign extension
// eth xstorm
{
// regpairs
REGPAIR_TO_U64(pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[cli_id].unicast_bytes_sent,
pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.xstorm_queue_statistics.ucast_bytes_sent);
// regpairs
REGPAIR_TO_U64(pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[cli_id].multicast_bytes_sent,
pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.xstorm_queue_statistics.mcast_bytes_sent);
// regpairs
REGPAIR_TO_U64(pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[cli_id].broadcast_bytes_sent,
pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.xstorm_queue_statistics.bcast_bytes_sent);
pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[cli_id].total_sent_bytes =
pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[cli_id].unicast_bytes_sent +
pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[cli_id].multicast_bytes_sent +
pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[cli_id].broadcast_bytes_sent;
// non regpairs
LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.xstorm_queue_statistics.ucast_pkts_sent,
pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[cli_id].unicast_pkts_sent );
LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.xstorm_queue_statistics.mcast_pkts_sent,
pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[cli_id].multicast_pkts_sent );
LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.xstorm_queue_statistics.bcast_pkts_sent,
pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[cli_id].broadcast_pkts_sent );
LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.xstorm_queue_statistics.error_drop_pkts,
pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[cli_id].error_drop_pkts );
pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[cli_id].total_sent_pkts =
pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[cli_id].unicast_pkts_sent+
pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[cli_id].multicast_pkts_sent +
pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[cli_id].broadcast_pkts_sent;
/* TOE Stats for Xstorm */
for ( i = 0; i < arr_cnt; i++)
{
}
if( !CHIP_IS_E1x(pdev) )
{
}
}
// eth tstorm
{
// regpairs
REGPAIR_TO_U64(pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[cli_id].rcv_unicast_bytes,
pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.tstorm_queue_statistics.rcv_ucast_bytes );
REGPAIR_TO_U64(pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[cli_id].rcv_broadcast_bytes,
pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.tstorm_queue_statistics.rcv_bcast_bytes );
REGPAIR_TO_U64(pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[cli_id].rcv_multicast_bytes,
pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.tstorm_queue_statistics.rcv_mcast_bytes );
// FIXME REGPAIR_TO_U64(pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[cli_id].rcv_error_bytes,
// pdev->vars.stats.stats_collect.stats_fw.addr_eth_stats_query->tstorm_common.client_statistics[cnt_id].rcv_error_bytes );
// eth tstorm - non regpairs
LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.tstorm_queue_statistics.checksum_discard,
pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[cli_id].checksum_discard );
LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.tstorm_queue_statistics.pkts_too_big_discard,
pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[cli_id].packets_too_big_discard );
LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.tstorm_queue_statistics.rcv_ucast_pkts,
pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[cli_id].rcv_unicast_pkts );
LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.tstorm_queue_statistics.rcv_bcast_pkts,
pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[cli_id].rcv_broadcast_pkts );
LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.tstorm_queue_statistics.rcv_mcast_pkts,
pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[cli_id].rcv_multicast_pkts );
LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.tstorm_queue_statistics.no_buff_discard,
pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[cli_id].no_buff_discard );
LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.tstorm_queue_statistics.ttl0_discard,
LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->port.tstorm_port_statistics.mf_tag_discard,
/* Port Statistics */
LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->port.tstorm_port_statistics.mac_filter_discard, \
LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->port.tstorm_port_statistics.brb_truncate_discard, \
LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->port.tstorm_port_statistics.mac_discard, \
// toe tstorm
for ( i = 0; i < arr_cnt; i++)
{
}
if( !CHIP_IS_E1x(pdev) )
{
}
}
// eth ustorm
{
// regpairs
REGPAIR_TO_U64(pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[cli_id].ucast_no_buff_bytes,
pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.ustorm_queue_statistics.ucast_no_buff_bytes );
REGPAIR_TO_U64(pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[cli_id].mcast_no_buff_bytes,
pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.ustorm_queue_statistics.mcast_no_buff_bytes );
REGPAIR_TO_U64(pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[cli_id].bcast_no_buff_bytes,
pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.ustorm_queue_statistics.bcast_no_buff_bytes );
REGPAIR_TO_U64(pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[cli_id].coalesced_bytes,
pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.ustorm_queue_statistics.coalesced_bytes );
// non regpairs
LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.ustorm_queue_statistics.ucast_no_buff_pkts,
pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[cli_id].ucast_no_buff_pkts );
LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.ustorm_queue_statistics.mcast_no_buff_pkts,
pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[cli_id].mcast_no_buff_pkts );
LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.ustorm_queue_statistics.bcast_no_buff_pkts,
pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[cli_id].bcast_no_buff_pkts );
LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.ustorm_queue_statistics.coalesced_pkts,
pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[cli_id].coalesced_pkts );
LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.ustorm_queue_statistics.coalesced_events,
pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[cli_id].coalesced_events );
LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.ustorm_queue_statistics.coalesced_aborts,
pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[cli_id].coalesced_aborts );
if( !CHIP_IS_E1x(pdev) )
{
}
}
{
// toe cstorm
}
}
#ifdef VF_INVOLVED
void lm_pf_stats_vf_fw_assign(struct _lm_device_t *pdev, u32_t stats_flags_done, u32_t* ptr_stats_flags_assigned)
{
{
return;
}
// eth xstorm
{
// regpairs
// regpairs
// regpairs
// non regpairs
}
}
// eth tstorm
{
// regpairs
// FIXME REGPAIR_TO_U64(pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[cli_id].rcv_error_bytes,
// pdev->vars.stats.stats_collect.stats_fw.addr_eth_stats_query->tstorm_common.client_statistics[cnt_id].rcv_error_bytes );
// eth tstorm - non regpairs
}
}
// eth ustorm
{
// regpairs
REGPAIR_TO_U64(pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[cli_id].coalesced_bytes,
pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.ustorm_queue_statistics.coalesced_bytes );
// non regpairs
LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.ustorm_queue_statistics.coalesced_pkts,
pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[cli_id].coalesced_pkts );
LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.ustorm_queue_statistics.coalesced_events,
pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[cli_id].coalesced_events );
LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.ustorm_queue_statistics.coalesced_aborts,
pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[cli_id].coalesced_aborts );
}
}
{
}
}
#endif
/**lm_stats_hw_macs_assign
*
* THIS FUNCTION MUST BE CALLED INSIDE PHY LOCK
*
* The mirrored statistics store 2 copies of the MAC stats:
* CURRENT and TOTAL. the reason for this is that each PF has
* it's own MAC and when a PMF change occures, the new PMF
* would start with all MAC stats equal to 0. in this case
* CURRENT would be zeroed on the next collection, but TOTAL
* would still have the old stats.
* because of this, TOTAL is updated according to the difference
* between the old value and the new value.
*
* the following function updates a field in the CURRENT block
* and returns the value to be added to the TOTAL block
*
* @param bits the number of data bits in the field
* @param field_collect_val the value collected from the HW
* @param field_mirror_val a pointer to the relevant field in
* the CURRENT block
*
* @return the difference between the new value and the old
* value - this should be added to the relevant field in
* the TOTAL block.
*
* @see stats_macs_idx_t , lm_stats_hw_t
*/
{
/*MSTAT has no wraparound logic, and it's stat values are zeroed on each read.
This means that what we read is the difference in the stats since the last read,
so we should just update the counters and exit.
EMAC and BMAC stats have wraparound logic and are not zeroed on read, so we handle
the wraparound if needed and return the difference between the old value and the
new value.*/
{
return field_collect_val;
}
else
{
*field_mirror_val = lm_update_wraparound_if_needed(bits, field_collect_val, *field_mirror_val,FALSE/*no need to swap bytes on HW stats*/) ;
return *field_mirror_val - prev;
}
}
if (mac_query->field_collect != 0) { DbgMessage(pdev, INFORM, "assigning %s[=%x] to %s, width %d.\n", #field_collect, mac_query->field_collect, #field_mirror, field_width ); } \
field_width, \
#define LM_STATS_HW_MAC_ASSIGN_U32( field_collect, field_mirror ) LM_STATS_HW_MAC_ASSIGN(field_collect, field_mirror, 32)
#define LM_STATS_HW_MAC_ASSIGN_U36( field_collect, field_mirror ) LM_STATS_HW_MAC_ASSIGN(field_collect, field_mirror, 36)
#define LM_STATS_HW_MAC_ASSIGN_U42( field_collect, field_mirror ) LM_STATS_HW_MAC_ASSIGN(field_collect, field_mirror, 42)
LM_SIGN_EXTEND_VALUE_##bits( pdev->vars.stats.stats_collect.stats_hw.addr_##block_name##_stats_query->field_collect, \
#define LM_STATS_HW_NIG_ASSIGN_U32(block_name,field_collect,field_mirror) LM_STATS_HW_NIG_ASSIGN_UXX(32, block_name,field_collect,field_mirror)
/* The code below is duplicated for bmac1, bmac2 and mstat, the structure mac_query differs between them and therefore
* needs to be done this way (to avoid duplicating the code) */
{\
/* Maps bmac_query into macs sturct */ \
/* Spec .1-5 (N/A) */ \
/* Spec .6 */ \
if (!IS_MULTI_VNIC(pdev)) { \
LM_STATS_HW_MAC_ASSIGN( stats_tx.tx_gtgca, stats_tx.tx_stat_ifhcoutucastpkts_bmac_bca, _field_width); \
LM_STATS_HW_MAC_ASSIGN( stats_tx.tx_gtpkt, stats_tx.tx_stat_ifhcoutucastpkts_bmac_pkt , _field_width); \
LM_STATS_HW_MAC_ASSIGN( stats_tx.tx_gtmca, stats_tx.tx_stat_ifhcoutucastpkts_bmac_mca , _field_width); \
/* Spec .7 */ \
/* Spec .8 */ \
} \
/* Spec .9 */ \
/* Spec .10-11 (N/A) */ \
/* Spec .12 */ \
/* Spec .13 */ \
LM_STATS_HW_MAC_ASSIGN( stats_rx.rx_grovr, stats_rx.rx_stat_dot3statsframestoolong, _field_width); \
/* Spec .14 (N/A) */ \
/* Spec .15 */ \
LM_STATS_HW_MAC_ASSIGN( stats_rx.rx_grxpf, stats_rx.rx_stat_xoffpauseframesreceived, _field_width); \
/* Spec .17 */ \
/* Spec .18-21 (N/A) */ \
/* Spec .22 */ \
LM_STATS_HW_MAC_ASSIGN( stats_rx.rx_grxpf, stats_rx.rx_stat_maccontrolframesreceived_bmac_xpf, _field_width); \
LM_STATS_HW_MAC_ASSIGN( stats_rx.rx_grxcf, stats_rx.rx_stat_maccontrolframesreceived_bmac_xcf, _field_width); \
/* Spec .23-29 (N/A) */ \
/* Spec. 30 */ \
/* Spec. 31 */ \
LM_STATS_HW_MAC_ASSIGN( stats_tx.tx_gt127, stats_tx.tx_stat_etherstatspkts65octetsto127octets, _field_width); \
/* Spec. 32 */ \
LM_STATS_HW_MAC_ASSIGN( stats_tx.tx_gt255, stats_tx.tx_stat_etherstatspkts128octetsto255octets, _field_width); \
/* Spec. 33 */ \
LM_STATS_HW_MAC_ASSIGN( stats_tx.tx_gt511, stats_tx.tx_stat_etherstatspkts256octetsto511octets, _field_width); \
/* Spec. 34 */ \
LM_STATS_HW_MAC_ASSIGN( stats_tx.tx_gt1023, stats_tx.tx_stat_etherstatspkts512octetsto1023octets, _field_width); \
/* Spec. 35 */ \
LM_STATS_HW_MAC_ASSIGN( stats_tx.tx_gt1518, stats_tx.tx_stat_etherstatspkts1024octetsto1522octet, _field_width); \
/* Spec. 36 */ \
LM_STATS_HW_MAC_ASSIGN( stats_tx.tx_gt2047, stats_tx.tx_stat_etherstatspktsover1522octets_bmac_2047, _field_width); \
LM_STATS_HW_MAC_ASSIGN( stats_tx.tx_gt4095, stats_tx.tx_stat_etherstatspktsover1522octets_bmac_4095, _field_width); \
LM_STATS_HW_MAC_ASSIGN( stats_tx.tx_gt9216, stats_tx.tx_stat_etherstatspktsover1522octets_bmac_9216, _field_width); \
LM_STATS_HW_MAC_ASSIGN( stats_tx.tx_gt16383, stats_tx.tx_stat_etherstatspktsover1522octets_bmac_16383, _field_width);\
/* Spec. 38 */ \
/* Spec. 39 */ \
/* Spec. 40 (N/A) */ \
/* Spec. 41 */ \
LM_STATS_HW_MAC_ASSIGN( stats_tx.tx_gterr, stats_tx.tx_stat_dot3statsinternalmactransmiterrors, _field_width); \
/* Spec. 42 (N/A) */ \
/* Spec. 43 */ \
/* Spec. 44 */ \
/* Spec. 45 */ \
/* Spec. 46 (N/A) */ \
/* Spec. 47 */ \
}
//Assign the registers that do not exist in MSTAT or have a different size and therefore can't
//be a part of LM_STATS_NON_EMAC_ASSIGN_CODE
#define LM_STATS_BMAC_ASSIGN_CODE \
{ \
}
/* The code below is duplicated for bmac2 and mstat, the structure mac_query differs between them and therefore
* needs to be done this way (to avoid duplicating the code) */
{\
/* Rx PFC Packet Counter*/ \
}
//can't be a part of LM_STATS_NON_EMAC_ASSIGN_CODE.
//Also, some fields are read from EMAC stats on devices that have an EMAC block but must be read
//from MSTAT on devices that don't have one.
#define LM_STATS_MSTAT_ASSIGN_CODE \
{ \
if (!IS_MULTI_VNIC(pdev)) {\
LM_STATS_HW_MAC_ASSIGN(stats_rx.rx_gr255, stats_rx.rx_stat_etherstatspkts128octetsto255octets, 39);\
LM_STATS_HW_MAC_ASSIGN(stats_rx.rx_gr511, stats_rx.rx_stat_etherstatspkts256octetsto511octets, 39);\
LM_STATS_HW_MAC_ASSIGN(stats_rx.rx_gr1023, stats_rx.rx_stat_etherstatspkts512octetsto1023octets, 39);\
LM_STATS_HW_MAC_ASSIGN(stats_rx.rx_gr1518, stats_rx.rx_stat_etherstatspkts1024octetsto1522octets, 39);\
}\
}
/**lm_stats_hw_emac_assign
* Copy the stats data from the BMAC1 stats values to the
* generic struct used by the driver. This function must be
* called after lm_stats_hw_collect that copies the data from
* the hardware registers to the host's memory.
*
*
* @param pdev the device to use.
*/
{
/* Macros required for macros used in this code */
volatile struct _stats_bmac1_query_t *mac_query = pdev->vars.stats.stats_collect.stats_hw.u.s.addr_bmac1_stats_query;
}
/**lm_stats_hw_emac_assign
* Copy the stats data from the BMAC2 stats values to the
* generic struct used by the driver. This function must be
* called after lm_stats_hw_collect that copies the data from
* the hardware registers to the host's memory.
*
*
* @param pdev the device to use.
*/
{
volatile struct _stats_bmac2_query_t *mac_query = pdev->vars.stats.stats_collect.stats_hw.u.s.addr_bmac2_stats_query;
}
/**lm_stats_hw_emac_assign
* Copy the stats data from the MSTAT stats values to the
* generic struct used by the driver. This function must be
* called after lm_stats_hw_collect that copies the data from
* the hardware registers to the host's memory.
*
*
* @param pdev the device to use.
*/
{
volatile struct _stats_mstat_query_t *mac_query = pdev->vars.stats.stats_collect.stats_hw.u.addr_mstat_stats_query;
}
/**lm_stats_hw_emac_assign
* Copy the stats data from the EMAC stats values to the generic
* struct used by the driver. This function must be called after
* lm_stats_hw_collect that copies the data from the hardware
* registers to the host's memory.
*
*
* @param pdev the device to use.
*/
{
volatile struct _stats_emac_query_t *mac_query = pdev->vars.stats.stats_collect.stats_hw.u.s.addr_emac_stats_query;
LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_etherstatsfragments, stats_rx.rx_stat_etherstatsfragments ) ;
if (!IS_MULTI_VNIC(pdev)) {
LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_ifhcinmulticastpkts, stats_rx.rx_stat_ifhcinmulticastpkts );
LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_ifhcinbroadcastpkts, stats_rx.rx_stat_ifhcinbroadcastpkts );
LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_etherstatspkts64octets, stats_rx.rx_stat_etherstatspkts64octets );
LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_etherstatspkts65octetsto127octets, stats_rx.rx_stat_etherstatspkts65octetsto127octets );
LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_etherstatspkts128octetsto255octets, stats_rx.rx_stat_etherstatspkts128octetsto255octets );
LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_etherstatspkts256octetsto511octets, stats_rx.rx_stat_etherstatspkts256octetsto511octets );
LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_etherstatspkts512octetsto1023octets, stats_rx.rx_stat_etherstatspkts512octetsto1023octets);
LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_etherstatspkts1024octetsto1522octets, stats_rx.rx_stat_etherstatspkts1024octetsto1522octets);
LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_etherstatspktsover1522octets, stats_rx.rx_stat_etherstatspktsover1522octets);
LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_ifhcoutmulticastpkts, stats_tx.tx_stat_ifhcoutmulticastpkts);
LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_ifhcoutbroadcastpkts, stats_tx.tx_stat_ifhcoutbroadcastpkts);
}
LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_dot3statsfcserrors, stats_rx.rx_stat_dot3statsfcserrors ) ;
LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_dot3statsalignmenterrors, stats_rx.rx_stat_dot3statsalignmenterrors ) ;
LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_dot3statscarriersenseerrors, stats_rx.rx_stat_dot3statscarriersenseerrors ) ;
LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_xonpauseframesreceived, stats_rx.rx_stat_xonpauseframesreceived ) ;
LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_xoffpauseframesreceived, stats_rx.rx_stat_xoffpauseframesreceived ) ;
LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_maccontrolframesreceived, stats_rx.rx_stat_maccontrolframesreceived ) ;
LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_dot3statsframestoolong, stats_rx.rx_stat_dot3statsframestoolong ) ;
LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_etherstatsjabbers, stats_rx.rx_stat_etherstatsjabbers ) ;
LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_etherstatsundersizepkts, stats_rx.rx_stat_etherstatsundersizepkts ) ;
LM_STATS_HW_MAC_ASSIGN_U32(stats_rx_err.rx_stat_falsecarriererrors, stats_rx_err.rx_stat_falsecarriererrors ) ;
LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_etherstatscollisions, stats_tx.tx_stat_etherstatscollisions ) ;
LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_dot3statssinglecollisionframes, stats_tx.tx_stat_dot3statssinglecollisionframes ) ;
LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_dot3statsmultiplecollisionframes, stats_tx.tx_stat_dot3statsmultiplecollisionframes ) ;
LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_dot3statsdeferredtransmissions, stats_tx.tx_stat_dot3statsdeferredtransmissions ) ;
LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_dot3statsexcessivecollisions, stats_tx.tx_stat_dot3statsexcessivecollisions ) ;
LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_dot3statslatecollisions, stats_tx.tx_stat_dot3statslatecollisions ) ;
LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_etherstatspkts64octets, stats_tx.tx_stat_etherstatspkts64octets ) ;
LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_etherstatspkts65octetsto127octets, stats_tx.tx_stat_etherstatspkts65octetsto127octets ) ;
LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_etherstatspkts128octetsto255octets, stats_tx.tx_stat_etherstatspkts128octetsto255octets ) ;
LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_etherstatspkts256octetsto511octets, stats_tx.tx_stat_etherstatspkts256octetsto511octets ) ;
LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_etherstatspkts512octetsto1023octets, stats_tx.tx_stat_etherstatspkts512octetsto1023octets ) ;
LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_etherstatspkts1024octetsto1522octet, stats_tx.tx_stat_etherstatspkts1024octetsto1522octet ) ;
LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_etherstatspktsover1522octets, stats_tx.tx_stat_etherstatspktsover1522octets ) ;
LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_dot3statsinternalmactransmiterrors, stats_tx.tx_stat_dot3statsinternalmactransmiterrors ) ;
}
{
{
}
{
}
{
}
{
}
else
{
DbgBreakIf((pdev->vars.mac_type != MAC_TYPE_EMAC) && (pdev->vars.mac_type == MAC_TYPE_BMAC) && !HAS_MSTAT(pdev) );
}
//nig
{
if (!IS_MULTI_VNIC(pdev))
{
}
{
//Note: this must occur after the other HW stats have been assigned.
stats_macs_t* assigned_hw_stats = &pdev->vars.stats.stats_mirror.stats_hw.macs[STATS_MACS_IDX_TOTAL];
/*NIG pkt0 counts packets with sizes 1024-1522 bytes. MSTAT has an equivalent register.*/
nig_ex_stats->egress_mac_pkt0 = assigned_hw_stats->stats_tx.tx_stat_etherstatspkts1024octetsto1522octet;
/*NIG pkt1 counts packets of size 1523 and up. We sum the required MSTAT values to get the right result.
Note that the field names are somewhat misleading, since they don't count sizes 1522-XXXX but [1522-2047],[2048-4095],[4096-9216],[9217-14383]
(see MSTAT low level design document).
*/
nig_ex_stats->egress_mac_pkt1 = assigned_hw_stats->stats_tx.tx_stat_etherstatspktsover1522octets_bmac_2047+
}
else
{
LM_SIGN_EXTEND_VALUE_36( pdev->vars.stats.stats_collect.stats_hw.nig_ex_stats_query.egress_mac_pkt0, pdev->vars.stats.stats_mirror.stats_hw.nig_ex.egress_mac_pkt0 ) ;
LM_SIGN_EXTEND_VALUE_36( pdev->vars.stats.stats_collect.stats_hw.nig_ex_stats_query.egress_mac_pkt1, pdev->vars.stats.stats_mirror.stats_hw.nig_ex.egress_mac_pkt1 ) ;
}
}
}
/*
*Function Name: lm_drv_info_to_mfw_assign_eth
*
*Parameters:
*
*Description:
* assign drv_info eth stats from different places in the pdev to "mirror" (vars.stats.stats_mirror.stats_drv.drv_info_to_mfw.eth_stats)
*Returns:
*
*/
{
{
return;
}
#define DRV_INFO_TO_MFW_NOT_SUPPORTED 0
/* Locally Admin Addr. BigEndian EIU48. Actual size is 6 bytes */
/* Additional Programmed MAC Addr 1. 2*/
// stats_eth->mac_local, mac_add1, mac_add2 - NO NEED to update here since they are already updated in lm_eq_handle_classification_eqe
/* MTU Size. Note : Negotiated MTU */
/* LSO MaxOffloadSize. */
/* LSO MinSegmentCount. */
/* Num Offloaded Connections TCP_IPv4. */
stats_eth->ipv4_ofld_cnt = pdev->vars.stats.stats_mirror.stats_drv.drv_toe.ipv[STATS_IP_4_IDX].currently_established;
/* Num Offloaded Connections TCP_IPv6. */
stats_eth->ipv6_ofld_cnt = pdev->vars.stats.stats_mirror.stats_drv.drv_toe.ipv[STATS_IP_6_IDX].currently_established;
/* Promiscuous Mode. non-zero true */
stats_eth->promiscuous_mode = ( 0 != GET_FLAGS( pdev->client_info[client_id].last_set_rx_mask, LM_RX_MASK_PROMISCUOUS_MODE ) );
/* TX Descriptors Queue Size */
/* RX Descriptors Queue Size */
/* TX Descriptor Queue Avg Depth. % Avg Queue Depth since last poll */
/* RX Descriptors Queue Avg Depth. % Avg Queue Depth since last poll */
/* IOV_Offload. 0=none; 1=MultiQueue, 2=VEB 3= VEPA*/
/* Num VF assigned to this PF. */
/* Feature_Flags. */
} /* lm_drv_info_to_mfw_assign_eth */
/*
*Function Name: lm_stats_drv_info_to_mfw_assign
*
*Parameters:
*
*Description:
* Upon the opcode assign relevant stats from "mirror" to physical memory in "collect"
* then, MFW will read this data.
*Returns:
*
*/
lm_status_t lm_stats_drv_info_to_mfw_assign( struct _lm_device_t *pdev, const enum drv_info_opcode drv_info_op )
{
void* dest = (void*)pdev->vars.stats.stats_collect.drv_info_to_mfw.addr.eth_stats; // this is a union so doesn't matter if etc/iscsi/fcoe
{
// dest might be NULL if we got here in chip id < E3
DbgBreakIf(!dest);
return LM_STATUS_FAILURE;
}
switch(drv_info_op)
{
case ETH_STATS_OPCODE:
// We gather eth stats from already known data
break;
case ISCSI_STATS_OPCODE:
// storage data is set by miniport
break;
case FCOE_STATS_OPCODE:
// storage data is set by miniport
break;
default:
break;
}
if( LM_STATUS_SUCCESS == lm_status)
{
// Zero buffer
// Copy relevant field
}
return lm_status;
} /* lm_stats_drv_info_to_mfw_assign */
// resets mirror fw statistics
{
{
DbgBreakIf(!pdev) ;
}
mm_memset( &pdev->vars.stats.stats_mirror.stats_fw, 0, sizeof(pdev->vars.stats.stats_mirror.stats_fw) ) ;
}
{
stats->pfc_frames_received = (LM_STATS_HW_GET_MACS_U64( pdev, stats_rx.rx_stat_pfcPacketCounter ) );
}
{
stats->tx_no_desc = 0 ;
stats->tx_no_map_reg = 0 ;
stats->rx_phy_err = 0 ;
stats->rx_alignment = 0;
stats->rx_short_packet = 0 ;
stats->rx_giant_packet = 0 ;
}
{
stats->TxL2AssemblyBufUse = pdev->vars.stats.stats_mirror.stats_drv.drv_eth.tx_l2_assembly_buf_use ;
}
{
idx = STATS_IP_4_IDX ;
stats->CurrentlyIpv4Established = pdev->vars.stats.stats_mirror.stats_drv.drv_toe.ipv[idx].currently_established ;
idx = STATS_IP_6_IDX ;
stats->CurrentlyIpv6Established = pdev->vars.stats.stats_mirror.stats_drv.drv_toe.ipv[idx].currently_established ;
stats->RxIndicateReturnPendingCnt = pdev->vars.stats.stats_mirror.stats_drv.drv_toe.rx_indicate_return_pending_cnt ;
stats->RxIndicateReturnDoneCnt = pdev->vars.stats.stats_mirror.stats_drv.drv_toe.rx_indicate_return_done_cnt ;
stats->TxL4AssemblyBufUse = pdev->vars.stats.stats_mirror.stats_drv.drv_toe.tx_l4_assembly_buf_use ;
}
{
// TODO - change IOCTL structure to be per client
stats->IfHCInOctets = pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[idx].rcv_broadcast_bytes +
pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[idx].rcv_multicast_bytes +
stats->IfHCOutOctets = pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[idx].total_sent_bytes ;
stats->IfHCInUcastPkts = (pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[idx].rcv_unicast_pkts ) ;
stats->IfHCInMulticastPkts = (pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[idx].rcv_multicast_pkts ) ;
stats->IfHCInBroadcastPkts = (pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[idx].rcv_broadcast_pkts ) ;
stats->IfHCInUcastOctets = (pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[idx].rcv_unicast_bytes ) ;
stats->IfHCInMulticastOctets = (pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[idx].rcv_multicast_bytes ) ;
stats->IfHCInBroadcastOctets = (pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[idx].rcv_broadcast_bytes ) ;
stats->IfHCOutUcastOctets = (pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[idx].unicast_bytes_sent ) ;
stats->IfHCOutMulticastOctets = (pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[idx].multicast_bytes_sent ) ;
stats->IfHCOutBroadcastOctets = (pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[idx].broadcast_bytes_sent ) ;
stats->IfHCOutPkts = (pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[idx].total_sent_pkts ) ;
#ifdef VF_INVOLVED
,NULL
#endif
) ;
#ifdef VF_INVOLVED
,NULL
#endif
) ;
#ifdef VF_INVOLVED
,NULL
#endif
) ;
stats->IfHCInPkts = pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[idx].rcv_broadcast_pkts +
pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[idx].rcv_multicast_pkts +
stats->IfHCInFalseCarrierErrors = (LM_STATS_HW_GET_MACS_U64( pdev, stats_rx_err.rx_stat_falsecarriererrors ) );
stats->Dot3StatsInternalMacTransmitErrors = (LM_STATS_HW_GET_MACS_U64( pdev, stats_tx.tx_stat_dot3statsinternalmactransmiterrors )) ;
stats->Dot3StatsCarrierSenseErrors = (LM_STATS_HW_GET_MACS_U64( pdev, stats_rx.rx_stat_dot3statscarriersenseerrors )) ;
stats->Dot3StatsFCSErrors = (LM_STATS_HW_GET_MACS_U64( pdev, stats_rx.rx_stat_dot3statsfcserrors )) ;
stats->Dot3StatsAlignmentErrors = (LM_STATS_HW_GET_MACS_U64( pdev, stats_rx.rx_stat_dot3statsalignmenterrors )) ;
stats->Dot3StatsSingleCollisionFrames = (LM_STATS_HW_GET_MACS_U64( pdev, stats_tx.tx_stat_dot3statssinglecollisionframes )) ;
stats->Dot3StatsMultipleCollisionFrames = (LM_STATS_HW_GET_MACS_U64( pdev, stats_tx.tx_stat_dot3statsmultiplecollisionframes )) ;
stats->Dot3StatsDeferredTransmissions = (LM_STATS_HW_GET_MACS_U64( pdev, stats_tx.tx_stat_dot3statsdeferredtransmissions )) ;
stats->Dot3StatsExcessiveCollisions = (LM_STATS_HW_GET_MACS_U64( pdev, stats_tx.tx_stat_dot3statsexcessivecollisions )) ;
stats->Dot3StatsLateCollisions = (LM_STATS_HW_GET_MACS_U64( pdev, stats_tx.tx_stat_dot3statslatecollisions )) ;
stats->EtherStatsCollisions = (LM_STATS_HW_GET_MACS_U64( pdev, stats_tx.tx_stat_etherstatscollisions )) ;
stats->EtherStatsFragments = (LM_STATS_HW_GET_MACS_U64( pdev, stats_rx.rx_stat_etherstatsfragments )) ;
stats->EtherStatsUndersizePkts = (LM_STATS_HW_GET_MACS_U64( pdev, stats_rx.rx_stat_etherstatsundersizepkts )) ;
stats->EtherStatsOverrsizePkts = (LM_STATS_HW_GET_MACS_U64( pdev, stats_rx.rx_stat_dot3statsframestoolong )) ;
stats->EtherStatsPktsTx64Octets = (LM_STATS_HW_GET_MACS_U64( pdev, stats_tx.tx_stat_etherstatspkts64octets )) ;
stats->EtherStatsPktsTx65Octetsto127Octets = (LM_STATS_HW_GET_MACS_U64( pdev, stats_tx.tx_stat_etherstatspkts65octetsto127octets )) ;
stats->EtherStatsPktsTx128Octetsto255Octets = (LM_STATS_HW_GET_MACS_U64( pdev, stats_tx.tx_stat_etherstatspkts128octetsto255octets )) ;
stats->EtherStatsPktsTx256Octetsto511Octets = (LM_STATS_HW_GET_MACS_U64( pdev, stats_tx.tx_stat_etherstatspkts256octetsto511octets )) ;
stats->EtherStatsPktsTx512Octetsto1023Octets = (LM_STATS_HW_GET_MACS_U64( pdev, stats_tx.tx_stat_etherstatspkts512octetsto1023octets)) ;
stats->EtherStatsPktsTx1024Octetsto1522Octets = (pdev->vars.stats.stats_mirror.stats_hw.nig_ex.egress_mac_pkt0) ;
stats->EtherStatsPktsTxOver1522Octets = (pdev->vars.stats.stats_mirror.stats_hw.nig_ex.egress_mac_pkt1) ;
stats->XonPauseFramesReceived = (LM_STATS_HW_GET_MACS_U64( pdev, stats_rx.rx_stat_xonpauseframesreceived )) ;
stats->XoffPauseFramesReceived = (LM_STATS_HW_GET_MACS_U64( pdev, stats_rx.rx_stat_xoffpauseframesreceived )) ;
stats->MacControlFramesReceived = (LM_STATS_HW_GET_MACS_U64( pdev, stats_rx.rx_stat_maccontrolframesreceived )) ;
stats->MacControlFramesReceived += (LM_STATS_HW_GET_MACS_U64( pdev, stats_rx.rx_stat_maccontrolframesreceived_bmac_xcf )) ;
#ifdef VF_INVOLVED
,NULL
#endif
) ;
// TBD - IfInErrorsOctets - naming and support
stats->IfInErrorsOctets = 0;
stats->IfInFramesL2FilterDiscards = (pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.port_statistics.mac_filter_discard) ;
stats->IfInTTL0Discards = (pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[idx].ttl0_discard) ;
stats->IfInxxOverflowDiscards = (pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.port_statistics.xxoverflow_discard) ;
stats->IfInMBUFDiscards = (pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[idx].no_buff_discard );
stats->IfInMBUFDiscards += (pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[idx].ucast_no_buff_pkts );
stats->IfInMBUFDiscards += (pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[idx].mcast_no_buff_pkts );
stats->IfInMBUFDiscards += (pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[idx].bcast_no_buff_pkts );
{
/* v2 statistics */
}
{
stats_v3->v3.coalesced_pkts = pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[idx].coalesced_pkts;
stats_v3->v3.coalesced_bytes = pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[idx].coalesced_bytes;
stats_v3->v3.coalesced_events = pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[idx].coalesced_events;
stats_v3->v3.coalesced_aborts = pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[idx].coalesced_aborts;
}
}
{
// IP4
idx = STATS_IP_4_IDX ;
stats->InTCP4Segments = pdev->vars.stats.stats_mirror.stats_fw.toe_tstorm_toe.statistics[idx].ip_in_receives ;
stats->OutTCP4Segments = pdev->vars.stats.stats_mirror.stats_fw.toe_xstorm_toe.statistics[idx].tcp_out_segments ;
stats->RetransmittedTCP4Segments = pdev->vars.stats.stats_mirror.stats_fw.toe_xstorm_toe.statistics[idx].tcp_retransmitted_segments ;
stats->InTCP4Errors = pdev->vars.stats.stats_mirror.stats_fw.toe_tstorm_toe.statistics[idx].tcp_in_errors ;
stats->InIP4Receives = pdev->vars.stats.stats_mirror.stats_fw.toe_tstorm_toe.statistics[idx].ip_in_receives ;
stats->InIP4HeaderErrors = pdev->vars.stats.stats_mirror.stats_fw.toe_tstorm_toe.statistics[idx].ip_in_header_errors ;
stats->InIP4Discards = pdev->vars.stats.stats_mirror.stats_fw.toe_tstorm_toe.statistics[idx].ip_in_discards ;
stats->InIP4Delivers = pdev->vars.stats.stats_mirror.stats_fw.toe_tstorm_toe.statistics[idx].ip_in_delivers ;
stats->InIP4Octets = pdev->vars.stats.stats_mirror.stats_fw.toe_tstorm_toe.statistics[idx].ip_in_octets ;
stats->OutIP4Octets = pdev->vars.stats.stats_mirror.stats_fw.toe_xstorm_toe.statistics[idx].ip_out_octets ;
stats->InIP4TruncatedPackets = pdev->vars.stats.stats_mirror.stats_fw.toe_tstorm_toe.statistics[idx].ip_in_truncated_packets ;
// IP6
idx = STATS_IP_6_IDX ;
stats->InTCP6Segments = pdev->vars.stats.stats_mirror.stats_fw.toe_tstorm_toe.statistics[idx].ip_in_receives ;
stats->OutTCP6Segments = pdev->vars.stats.stats_mirror.stats_fw.toe_xstorm_toe.statistics[idx].tcp_out_segments ;
stats->RetransmittedTCP6Segments = pdev->vars.stats.stats_mirror.stats_fw.toe_xstorm_toe.statistics[idx].tcp_retransmitted_segments ;
stats->InTCP6Errors = pdev->vars.stats.stats_mirror.stats_fw.toe_tstorm_toe.statistics[idx].tcp_in_errors ;
stats->InIP6Receives = pdev->vars.stats.stats_mirror.stats_fw.toe_tstorm_toe.statistics[idx].ip_in_receives ;
stats->InIP6HeaderErrors = pdev->vars.stats.stats_mirror.stats_fw.toe_tstorm_toe.statistics[idx].ip_in_header_errors ;
stats->InIP6Discards = pdev->vars.stats.stats_mirror.stats_fw.toe_tstorm_toe.statistics[idx].ip_in_discards ;
stats->InIP6Delivers = pdev->vars.stats.stats_mirror.stats_fw.toe_tstorm_toe.statistics[idx].ip_in_delivers ;
stats->InIP6Octets = pdev->vars.stats.stats_mirror.stats_fw.toe_tstorm_toe.statistics[idx].ip_in_octets ;
stats->OutIP6Octets = pdev->vars.stats.stats_mirror.stats_fw.toe_xstorm_toe.statistics[idx].ip_out_octets ;
stats->InIP6TruncatedPackets = pdev->vars.stats.stats_mirror.stats_fw.toe_tstorm_toe.statistics[idx].ip_in_truncated_packets ;
}
{
}
}
{
}
}
/*
*------------------------------------------------------------------------
* lm_stats_mgmt_assign_func
*
* assign values from different 'mirror' structures into host_func_stats_t structure
* that will be sent later to mgmt
* NOTE: function must be called under PHY_LOCK (since it uses REG_WR_DMAE interface)
*------------------------------------------------------------------------
*/
{
{
return;
}
{
return;
}
#ifdef VF_INVOLVED
,NULL
#endif
) ;
{
DbgMessage(pdev, WARNstat, "lm_stats_mcp_assign: lm_get_stats type=0x%X failed. lm_status=0x%X", stats_type, lm_status ) ;
}
else
{
// calculate 'total' rcv (total+discards)
val += (pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[LM_CLI_IDX_NDIS].rcv_error_bytes) ;
}
#ifdef VF_INVOLVED
,NULL
#endif
) ;
{
DbgMessage(pdev, WARNstat, "lm_stats_mcp_assign: lm_get_stats type=0x%X failed. lm_status=0x%X", stats_type, lm_status ) ;
}
else
{
}
#ifdef VF_INVOLVED
,NULL
#endif
) ;
{
DbgMessage(pdev, WARNstat, "lm_stats_mcp_assign: lm_get_stats type=0x%X failed. lm_status=0x%X", stats_type, lm_status ) ;
}
else
{
}
#ifdef VF_INVOLVED
,NULL
#endif
) ;
{
DbgMessage(pdev, WARNstat, "lm_stats_mcp_assign: lm_get_stats type=0x%X failed. lm_status=0x%X", stats_type, lm_status ) ;
}
else
{
}
#ifdef VF_INVOLVED
,NULL
#endif
) ;
{
DbgMessage(pdev, WARNstat, "lm_stats_mcp_assign: lm_get_stats type=0x%X failed. lm_status=0x%X", stats_type, lm_status ) ;
}
else
{
}
#ifdef VF_INVOLVED
,NULL
#endif
) ;
{
DbgMessage(pdev, WARNstat, "lm_stats_mcp_assign: lm_get_stats type=0x%X failed. lm_status=0x%X", stats_type, lm_status ) ;
}
else
{
}
#ifdef VF_INVOLVED
,NULL
#endif
) ;
{
DbgMessage(pdev, WARNstat, "lm_stats_mcp_assign: lm_get_stats type=0x%X failed. lm_status=0x%X", stats_type, lm_status ) ;
}
else
{
}
#ifdef VF_INVOLVED
,NULL
#endif
) ;
{
DbgMessage(pdev, WARNstat, "lm_stats_mcp_assign: lm_get_stats type=0x%X failed. lm_status=0x%X", stats_type, lm_status ) ;
}
else
{
}
// Calculate the size to be written through DMAE
// This code section must be under phy lock!
} // lm_stats_mgmt_assign
/*
*------------------------------------------------------------------------
* lm_stats_mgmt_read_base -
*
* read values from mgmt structures into host_func_stats_t base structure
* this is as a basic value that will be added when function report statistics
* NOTE: function must be called under PHY_LOCK (since it uses REG_RD_DMAE interface)
*------------------------------------------------------------------------
*/
{
{
return;
}
{
return;
}
{
return;
}
// This code section must be under phy lock!
} // lm_stats_mgmt_read_base
/*
*------------------------------------------------------------------------
* lm_stats_mgmt_clear_all_func -
*
* clear mgmt statistics for all function
* should be called on init port part. first function should clear all other functions mail box
* NOTE: function must be called under PHY_LOCK (since it uses REG_WR_DMAE interface)
*------------------------------------------------------------------------
*/
{
// use current pdev stats_mcp_func for all function - (zeroed buffer)
{
if( 0 != fw_func_stats_ptr )
{
// This code section must be under phy lock!
// writes zero
}
{
// only one iteration functionand one for E1 !
break;
}
}
} // lm_stats_mgmt_clear_all_func
/*
*Function Name:lm_stats_port_to_from
*
*Parameters:
* b_is_to TRUE - to MCP
* b_is_to FLASE - from MCP
*Description:
*
*Returns:
*
*/
{
u8_t i = 0 ;
// OLD PMF:
// copy all EMAC 'reset' to 'total'
//
// NEW PMF:
// copy all EMAC 'total' to 'reset'
//
// NONE is up:
// copy only 'reset' to 'total'
{
case MAC_TYPE_EMAC:
case MAC_TYPE_BMAC:
case MAC_TYPE_UMAC:
case MAC_TYPE_XMAC:
break;
case MAC_TYPE_NONE:
break;
default:
DbgBreakMsg( "mac_type not acceptable" ) ;
return;
}
if( _b_is_to )\
{ \
} \
else \
{ \
}
for( i = stats_macs_idx; i < STATS_MACS_IDX_MAX; i++ )
{
LM_STATS_PMF_TO_FROM( rx_stat_dot3statsalignmenterrors, stats_rx.rx_stat_dot3statsalignmenterrors, b_is_to ) ; // BMAC 0
LM_STATS_PMF_TO_FROM( rx_stat_dot3statscarriersenseerrors, stats_rx.rx_stat_dot3statscarriersenseerrors, b_is_to ) ; // BMAC 0
LM_STATS_PMF_TO_FROM( rx_stat_etherstatsundersizepkts, stats_rx.rx_stat_etherstatsundersizepkts, b_is_to ) ;
// Exception - don't migrate this parameter (mandatory NDIS parameter)
//LM_STATS_PMF_TO_FROM( rx_stat_dot3statsframestoolong, stats_rx.rx_stat_dot3statsframestoolong, b_is_to ) ;
LM_STATS_PMF_TO_FROM( rx_stat_xonpauseframesreceived, stats_rx.rx_stat_xonpauseframesreceived, b_is_to ) ; // BMAC 0
LM_STATS_PMF_TO_FROM( rx_stat_xoffpauseframesreceived, stats_rx.rx_stat_xoffpauseframesreceived, b_is_to ) ;
LM_STATS_PMF_TO_FROM( tx_stat_dot3statssinglecollisionframes, stats_tx.tx_stat_dot3statssinglecollisionframes, b_is_to ) ; // BMAC 0
LM_STATS_PMF_TO_FROM( tx_stat_dot3statsmultiplecollisionframes, stats_tx.tx_stat_dot3statsmultiplecollisionframes, b_is_to ) ; // BMAC 0
LM_STATS_PMF_TO_FROM( tx_stat_dot3statslatecollisions, stats_tx.tx_stat_dot3statslatecollisions, b_is_to ) ; // BMAC 0
LM_STATS_PMF_TO_FROM( tx_stat_dot3statsexcessivecollisions, stats_tx.tx_stat_dot3statsexcessivecollisions, b_is_to ) ; // BMAC 0
LM_STATS_PMF_TO_FROM( rx_stat_maccontrolframesreceived, stats_rx.rx_stat_maccontrolframesreceived, b_is_to ) ;
LM_STATS_PMF_TO_FROM( rx_stat_mac_xpf, stats_rx.rx_stat_maccontrolframesreceived_bmac_xpf, b_is_to ) ; // EMAC 0 BMAC only
LM_STATS_PMF_TO_FROM( rx_stat_mac_xcf, stats_rx.rx_stat_maccontrolframesreceived_bmac_xcf, b_is_to ) ; // EMAC 0 BMAC only
LM_STATS_PMF_TO_FROM( tx_stat_etherstatspkts64octets, stats_tx.tx_stat_etherstatspkts64octets, b_is_to ) ;
LM_STATS_PMF_TO_FROM( tx_stat_etherstatspkts65octetsto127octets, stats_tx.tx_stat_etherstatspkts65octetsto127octets, b_is_to ) ;
LM_STATS_PMF_TO_FROM( tx_stat_etherstatspkts128octetsto255octets, stats_tx.tx_stat_etherstatspkts128octetsto255octets, b_is_to ) ;
LM_STATS_PMF_TO_FROM( tx_stat_etherstatspkts256octetsto511octets, stats_tx.tx_stat_etherstatspkts256octetsto511octets, b_is_to ) ;
LM_STATS_PMF_TO_FROM( tx_stat_etherstatspkts512octetsto1023octets, stats_tx.tx_stat_etherstatspkts512octetsto1023octets, b_is_to ) ;
LM_STATS_PMF_TO_FROM( tx_stat_etherstatspkts1024octetsto1522octets, stats_tx.tx_stat_etherstatspkts1024octetsto1522octet, b_is_to ) ;
LM_STATS_PMF_TO_FROM( tx_stat_etherstatspktsover1522octets, stats_tx.tx_stat_etherstatspktsover1522octets, b_is_to ) ;
LM_STATS_PMF_TO_FROM( tx_stat_mac_2047, stats_tx.tx_stat_etherstatspktsover1522octets_bmac_2047, b_is_to ) ; // EMAC 0 BMAC only
LM_STATS_PMF_TO_FROM( tx_stat_mac_4095, stats_tx.tx_stat_etherstatspktsover1522octets_bmac_4095, b_is_to ) ; // EMAC 0 BMAC only
LM_STATS_PMF_TO_FROM( tx_stat_mac_9216, stats_tx.tx_stat_etherstatspktsover1522octets_bmac_9216, b_is_to ) ; // EMAC 0 BMAC only
LM_STATS_PMF_TO_FROM( tx_stat_mac_16383, stats_tx.tx_stat_etherstatspktsover1522octets_bmac_16383, b_is_to ) ; // EMAC 0 BMAC only
LM_STATS_PMF_TO_FROM( rx_stat_etherstatsfragments, stats_rx.rx_stat_etherstatsfragments, b_is_to ) ;
LM_STATS_PMF_TO_FROM( tx_stat_dot3statsdeferredtransmissions, stats_tx.tx_stat_dot3statsdeferredtransmissions, b_is_to ) ; // BMAC 0
LM_STATS_PMF_TO_FROM( tx_stat_dot3statsinternalmactransmiterrors, stats_tx.tx_stat_dot3statsinternalmactransmiterrors, b_is_to ) ;
LM_STATS_PMF_TO_FROM( tx_stat_etherstatscollisions, stats_tx.tx_stat_etherstatscollisions, b_is_to ) ; // BMAC 0
LM_STATS_PMF_TO_FROM( tx_stat_ifhcoutbadoctets, stats_tx.tx_stat_ifhcoutbadoctets, b_is_to ) ; // BMAC 0
LM_STATS_PMF_TO_FROM( rx_stat_dot3statscarriersenseerrors, stats_rx.rx_stat_dot3statscarriersenseerrors, b_is_to ) ; // BMAC 0
}
// NIG, MSTAT and EEE
if( b_is_to)
{
}
else
{
}
}
/*
* \brief Calculate MCP status port size
*
* Calculate the size to be written.
*
* This logic is required as b10_l2_chip_statistics_t may increase in size
* (due to driver change), while MCP area reserved does not follow suit
* (as is the case, for example, when the driver and MFW do not version-
* match).
*
* This logic calculates the size available based on MFW version, and an
* additional shmem item added to specifically report size available, thus
* making future changes to statistics MCP size proof.
*
*/
{
{
sizeof_port_stats = min((size_t)sizeof_port_satas_shmem, sizeof(pdev->vars.stats.stats_mirror.stats_mcp_port));
}
else
{
if (b_bc_pfc_support)
{
// "pfc_frames_rx_lo" is the last member of host_port_stats_t for that MFW version.
}
else
{
// "not_used" is the last member of host_port_stats_t for that MFW version.
}
}
sizeof_port_stats /= sizeof(u32_t) ;
/*
* we are returning only 16 bits of the size calculated. Check (CHK version only) if the size
* is too big to be held in 16 bits, which either indicate an error wrt size, or DMAE
* about to be provided with a task too big.
*/
return (u16_t)sizeof_port_stats;
}
/*
*Function Name:lm_stats_port_zero
*
*Parameters:
*
*Description:
* This function should be called by first function on port (PMF) - zeros MCP scatrch pad
*Returns:
*
*/
{
{
/* This could happen and therefore is not considered an error */
return LM_STATUS_SUCCESS;
}
// Calculate the size to be written through DMAE
// This code section must be under phy lock!
size ) ;
return lm_status ;
}
/*
*Function Name:lm_stats_port_save
*
*Parameters:
*
*Description:
* This function should be called before PMF is unloaded in order to preserve statitiscs for the next PMF
* ASSUMPTION: function must be called under PHY_LOCK (since it uses REG_WR_DMAE interface)
* ASSUMPTION: link can not change at this point and until PMF is down
*Returns:
*
*/
{
{
/* This could happen and therefore is not considered an error */
return LM_STATUS_SUCCESS;
}
// Calculate the size to be written through DMAE
// This code section must be under phy lock!
size ) ;
return lm_status ;
}
/*
*Function Name:lm_stats_port_load
*
*Parameters:
*
*Description:
* This function should be called before a new PMF is loaded in order to restore statitiscs from the previous PMF
* vars.is_pmf should be set to TRUE only after this function completed!
* ASSUMPTION: function must be called under PHY_LOCK (since it uses REG_RD_DMAE interface)
* ASSUMPTION: link can not change at this point and until PMF is up
*Returns:
*
*/
{
{
/* This could happen and therefore is not considered an error */
return LM_STATUS_SUCCESS;
}
// Calculate the size to be written through DMAE
// This code section must be under phy lock!
size ) ;
return lm_status ;
}
/*
*------------------------------------------------------------------------
* lm_stats_mgmt_assign
*
* write values from mgmt structures into func and port base structure
* NOTE: function must be called under PHY_LOCK (since it uses REG_RD_DMAE interface)
*------------------------------------------------------------------------
*/
{
{
return;
}
{
return;
}
{
}
{
// only PMF should assign port statistics
{
}
}
}
/*
*Function Name:lm_stats_on_pmf_update
*
*Parameters:
* b_on:
* TRUE - the device is beocming now a PMF
* FALSE - the device is now going down and transfering PMF to another device
*Description:
* the function should be called under PHY LOCK.
* TRUE when a device becoming a PMF and before the link status changed from last state when previous PMF was down after call for mcp driver load
* FALSE when a device going down and after the link status saved and can not be changed (interrupts are disabled) before call for mcp driver unload
*Returns:
*
*/
{
{
return LM_STATUS_INVALID_PARAMETER;
}
if( b_on )
{
}
else
{
// check for success, but link down is a valid situation!
// we need to save port stats only if link is down
// if link is up, it was already made on call to lm_stats_on_update_state.
if( LM_STATUS_LINK_DOWN == lm_status )
{
}
}
return lm_status ;
}
/*
*Function Name:lm_stats_on_pmf_init
*
*Parameters:
*
*Description:
* call this function under PHY LOCK when FIRST ever PMF is on
*Returns:
*
*/
{
{
return LM_STATUS_INVALID_PARAMETER;
}
return lm_status ;
}
{
// call the dmae commands sequance
if( LM_STATUS_SUCCESS != lm_status )
{
return lm_status;
}
// read two more NIG registers in the regular way - on E3 these do not exist!!!
if (!CHIP_IS_E3(pdev))
{
REG_RD_DMAE( pdev, pkt0, &pdev->vars.stats.stats_collect.stats_hw.nig_ex_stats_query.egress_mac_pkt0 );
REG_RD_DMAE( pdev, pkt1, &pdev->vars.stats.stats_collect.stats_hw.nig_ex_stats_query.egress_mac_pkt1 );
}
// EEE is only supported in E3 chip
if (CHIP_IS_E3(pdev))
{
}
return lm_status ;
}
/*
*Function Name:lm_stats_init_port_part
*
*Parameters:
*
*Description:
* call this function under PHY LOCK on port init
*Returns:
*
*/
{
}
/*
*Function Name:lm_stats_init_port_part
*
*Parameters:
*
*Description:
* call this function under PHY LOCK on function init
*Returns:
*
*/
{
{
}
}