#ifndef ECORE_ERASE
#ifdef __LINUX
#include <asm/byteorder.h>
#include <linux/etherdevice.h>
#endif
/* Always define ECORE_OOO for VBD */
#define ECORE_OOO
#include "bcmtype.h"
#include "utils.h"
#include "lm5710.h"
#include "ecore_sp_verbs.h"
#include "command.h"
#include "debug.h"
#include "ecore_common.h"
/************************ Debug print macros **********************************/
#else
#define ECORE_MSG
#endif
/************************ Error prints ****************************************/
#else
#define ECORE_ERR
#endif
/*********************** ECORE WRAPPER MACROS ********************************/
/*
* (differs from VBD set_flags, get_flags)
*/
do {\
} while (0)
/************************ TODO for LM people!!! *******************************/
/************************ Lists ***********************************************/
pos; \
/**
* ECORE_LIST_FOR_EACH_ENTRY_SAFE - iterate over list of given type
* @pos: the type * to use as a loop cursor.
* @n: another type * to use as temporary storage
* @head: the head for your list.
* @member: the name of the list_struct within the struct.
*
* iterate over list of given type safe against removal of list entry
*/
do { \
d_list_clear(head); \
} while (0)
do { \
} while (0)
do { \
} while (0)
do { \
} while (0)
do { \
d_list_clear(new_head); \
} while (0)
{
}
/************************ Per compilation target ******************************/
#ifdef __LINUX
/* Other */
/* Mutex related */
#else /* ! LINUX */
#define ECORE_UNLIKELY(x) (x)
#define ECORE_LIKELY(x) (x)
/* Mutex related */
/* Atomic Bit Manipulation */
/* Other */
do { \
} while (0)
/*
* In VBD We'll wait 10,000 times 100us (1 second) +
* 2360 times 25000us (59sec) = total 60 sec
* (Winodws only note) the 25000 wait will cause
* wait to be without CPU stall (look in win_util.c)
*/
do { \
} while (0)
{
return set;
}
#endif /* END if "per LM target type" */
/* Spin lock related */
#endif /* not ECORE_ERASE */
#if defined(__FreeBSD__) && !defined(NOT_LINUX)
#include "bxe.h"
#include "ecore_init.h"
#ifdef ECORE_ERASE
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#if (LINUX_VERSION_CODE >= 0x02061b) && !defined(BNX2X_DRIVER_DISK) && !defined(__VMKLNX__) /* BNX2X_UPSTREAM */
#endif
#include "bnx2x.h"
#include "bnx2x_cmn.h"
#include "bnx2x_sp.h"
#endif
#endif
/**** Exe Queue interfaces ****/
/**
* ecore_exe_queue_init - init the Exe Queue object
*
* @o: pointer to the object
* @exe_len: length
* @owner: pointer to the owner
* @validate: validate function pointer
* @optimize: optimize function pointer
* @exec: execute function pointer
* @get: get function pointer
*/
struct ecore_exe_queue_obj *o,
int exe_len,
union ecore_qable_obj *owner,
{
mm_memset(o, 0, sizeof(*o));
ECORE_LIST_INIT(&o->exe_queue);
ECORE_LIST_INIT(&o->pending_comp);
o->exe_chunk_len = exe_len;
/* Owner specific callbacks */
exe_len);
}
struct ecore_exeq_elem *elem)
{
}
{
int cnt = 0;
#ifdef ECORE_ERASE
spin_lock_bh(&o->lock);
#endif
struct ecore_exeq_elem)
cnt++;
#ifdef ECORE_ERASE
spin_unlock_bh(&o->lock);
#endif
return cnt;
}
/**
* ecore_exe_queue_add - add a new element to the execution queue
*
* @pdev: driver handle
* @o: queue
* @cmd: new command to add
* @restore: true - do not optimize the command
*
* If the element is optimized or is illegal, frees it.
*/
struct ecore_exe_queue_obj *o,
struct ecore_exeq_elem *elem,
{
int rc;
ECORE_SPIN_LOCK_BH(&o->lock);
if (!restore) {
/* Try to cancel this element queue */
if (rc)
goto free_and_exit;
/* Check if this request is ok */
if (rc) {
goto free_and_exit;
}
}
/* If so, add it to the execution queue */
ECORE_SPIN_UNLOCK_BH(&o->lock);
return ECORE_SUCCESS;
ECORE_SPIN_UNLOCK_BH(&o->lock);
return rc;
}
struct _lm_device_t *pdev,
struct ecore_exe_queue_obj *o)
{
while (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) {
struct ecore_exeq_elem,
link);
}
}
/**
* ecore_exe_queue_step - execute one execution chunk atomically
*
* @pdev: driver handle
* @o: queue
* @ramrod_flags: flags
*
* (Should be called while holding the exe_queue->lock).
*/
struct ecore_exe_queue_obj *o,
unsigned long *ramrod_flags)
{
/* Next step should not be performed until the current is finished,
* unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
* properly clear object internals without sending any command to the FW
* which also implies there won't be any completion to clear the
* 'pending' list.
*/
if (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) {
} else {
return ECORE_PENDING;
}
}
/* Run through the pending commands list and create a next
* execution chunk.
*/
while (!ECORE_LIST_IS_EMPTY(&o->exe_queue)) {
struct ecore_exeq_elem,
link);
/* Prevent from both lists being empty when moving an
* element. This will allow the call of
* ecore_exe_queue_empty() without locking.
*/
mb();
} else
break;
}
/* Sanity check */
if (!cur_len)
return ECORE_SUCCESS;
if (rc < 0)
/* In case of an error return the commands back to the queue
* and reset the pending_comp.
*/
else if (!rc)
/* If zero is returned, means there are no outstanding pending
* completions and we may dismiss the pending list.
*/
return rc;
}
{
/* Don't reorder!!! */
mb();
}
struct _lm_device_t *pdev)
{
pdev);
}
/************************ raw_obj functions ***********************************/
{
/*
* !! converts the value returned by ECORE_TEST_BIT such that it
* is guaranteed not to be truncated regardless of BOOL definition.
*
* Note we cannot simply define the function's return value type
* to match the type returned by ECORE_TEST_BIT, as it varies by
*/
}
{
}
{
}
/**
* ecore_state_wait - wait until the given bit(state) is cleared
*
* @pdev: device handle
* @state: state which is to be cleared
* @state_p: state buffer
*
*/
unsigned long *pstate)
{
/* can take a while if any port is running */
#ifndef ECORE_ERASE
/* In VBD We'll wait 10,000 times 100us (1 second) +
* 2360 times 25000us (59sec) = total 60 sec
* (Winodws only note) the 25000 wait will cause wait
* to be without CPU stall (look in win_util.c)
*/
#endif
if (CHIP_REV_IS_EMUL(pdev))
cnt *= 20;
while (cnt--) {
#ifdef ECORE_STOP_ON_ERROR
#endif
return ECORE_SUCCESS;
}
#ifndef ECORE_ERASE
/* in case reset is in progress we won't get completion */
if (lm_reset_is_inprogress(pdev))
return 0;
#endif
return ECORE_IO;
}
/* timeout! */
#ifdef ECORE_STOP_ON_ERROR
ecore_panic();
#endif
return ECORE_TIMEOUT;
}
{
}
/* credit handling callbacks */
{
DbgBreakIf(!mp);
}
{
DbgBreakIf(!mp);
}
{
DbgBreakIf(!vp);
}
{
DbgBreakIf(!vp);
}
{
return FALSE;
return FALSE;
}
return TRUE;
}
{
}
{
}
{
}
{
}
{
return FALSE;
return FALSE;
}
return TRUE;
}
/**
* __ecore_vlan_mac_h_write_trylock - try getting the writer lock on vlan mac
* head list.
*
* @pdev: device handle
* @o: vlan_mac object
*
* @details: Non-blocking implementation; should be called under execution
* queue lock.
*/
struct ecore_vlan_mac_obj *o)
{
if (o->head_reader) {
return ECORE_BUSY;
}
return ECORE_SUCCESS;
}
/**
* __ecore_vlan_mac_h_exec_pending - execute step instead of a previous step
* which wasn't able to run due to a taken lock on vlan mac head list.
*
* @pdev: device handle
* @o: vlan_mac object
*
* @details Should be called under execution queue lock; notice it might release
* and reclaim it during its run.
*/
struct ecore_vlan_mac_obj *o)
{
int rc;
o->head_exe_request = FALSE;
o->saved_ramrod_flags = 0;
if (rc != ECORE_SUCCESS) {
ECORE_ERR("execution of pending commands failed with rc %d\n",
rc);
#ifdef ECORE_STOP_ON_ERROR
ecore_panic();
#endif
}
}
/**
* __ecore_vlan_mac_h_pend - Pend an execution step which couldn't have been
* called due to vlan mac head list lock being taken.
*
* @pdev: device handle
* @o: vlan_mac object
* @ramrod_flags: ramrod flags of missed execution
*
* @details Should be called under execution queue lock.
*/
struct ecore_vlan_mac_obj *o,
unsigned long ramrod_flags)
{
o->head_exe_request = TRUE;
}
/**
* __ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
*
* @pdev: device handle
* @o: vlan_mac object
*
* @details Should be called under execution queue lock. Notice if a pending
* execution exists, it would perform it - possibly releasing and
* reclaiming the execution queue lock.
*/
struct ecore_vlan_mac_obj *o)
{
/* It's possible a new pending execution was added since this writer
* executed. If so, execute again. [Ad infinitum]
*/
while(o->head_exe_request) {
}
}
/**
* ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
*
* @pdev: device handle
* @o: vlan_mac object
*
* @details Notice if a pending execution exists, it would perform it -
* possibly releasing and reclaiming the execution queue lock.
*/
struct ecore_vlan_mac_obj *o)
{
}
/**
* __ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
*
* @pdev: device handle
* @o: vlan_mac object
*
* @details Should be called under the execution queue lock. May sleep. May
* release and reclaim execution queue lock during its run.
*/
struct ecore_vlan_mac_obj *o)
{
/* If we got here, we're holding lock --> no WRITER exists */
o->head_reader++;
o->head_reader);
return ECORE_SUCCESS;
}
/**
* ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
*
* @pdev: device handle
* @o: vlan_mac object
*
* @details May sleep. Claims and releases execution queue lock during its run.
*/
struct ecore_vlan_mac_obj *o)
{
int rc;
return rc;
}
/**
* __ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
*
* @pdev: device handle
* @o: vlan_mac object
*
* @details Should be called under execution queue lock. Notice if a pending
* execution exists, it would be performed if this was the last
* reader. possibly releasing and reclaiming the execution queue lock.
*/
struct ecore_vlan_mac_obj *o)
{
if (!o->head_reader) {
ECORE_ERR("Need to release vlan mac reader lock, but lock isn't taken\n");
#ifdef ECORE_STOP_ON_ERROR
ecore_panic();
#endif
} else {
o->head_reader--;
o->head_reader);
}
/* It's possible a new pending execution was added, and that this reader
* was last - if so we need to execute the command.
*/
if (!o->head_reader && o->head_exe_request) {
/* Writer release will do the trick */
}
}
/**
* ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
*
* @pdev: device handle
* @o: vlan_mac object
*
* @details Notice if a pending execution exists, it would be performed if this
* was the last reader. Claims and releases the execution queue lock
* during its run.
*/
struct ecore_vlan_mac_obj *o)
{
}
/**
* ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
*
* @pdev: device handle
* @o: vlan_mac object
* @n: number of elements to get
* @base: base address for element placement
* @stride: stride between elements (in bytes)
*/
{
int counter = 0;
int read_lock;
if (read_lock != ECORE_SUCCESS)
ECORE_ERR("get_n_elements failed to get vlan mac reader lock; Access without lock\n");
/* traverse list */
struct ecore_vlan_mac_registry_elem) {
if (counter < n) {
counter++;
}
}
if (read_lock == ECORE_SUCCESS) {
}
}
/* check_add() callbacks */
struct ecore_vlan_mac_obj *o,
union ecore_classification_ramrod_data *data)
{
ECORE_MSG(pdev, "Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for ADD command\n", data->mac.mac[0], data->mac.mac[1], data->mac.mac[2], data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]);
return ECORE_INVAL;
/* Check if a requested MAC already exists */
struct ecore_vlan_mac_registry_elem)
return ECORE_EXISTS;
return ECORE_SUCCESS;
}
struct ecore_vlan_mac_obj *o,
union ecore_classification_ramrod_data *data)
{
struct ecore_vlan_mac_registry_elem)
return ECORE_EXISTS;
return ECORE_SUCCESS;
}
struct ecore_vlan_mac_obj *o,
union ecore_classification_ramrod_data *data)
{
data->vlan_mac.mac[0], data->vlan_mac.mac[1], data->vlan_mac.mac[2], data->vlan_mac.mac[3], data->vlan_mac.mac[4], data->vlan_mac.mac[5], data->vlan_mac.vlan);
struct ecore_vlan_mac_registry_elem)
ETH_ALEN)) &&
return ECORE_EXISTS;
return ECORE_SUCCESS;
}
/* check_del() callbacks */
static struct ecore_vlan_mac_registry_elem *
struct ecore_vlan_mac_obj *o,
union ecore_classification_ramrod_data *data)
{
ECORE_MSG(pdev, "Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for DEL command\n", data->mac.mac[0], data->mac.mac[1], data->mac.mac[2], data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]);
struct ecore_vlan_mac_registry_elem)
return pos;
return NULL;
}
static struct ecore_vlan_mac_registry_elem *
struct ecore_vlan_mac_obj *o,
union ecore_classification_ramrod_data *data)
{
struct ecore_vlan_mac_registry_elem)
return pos;
return NULL;
}
static struct ecore_vlan_mac_registry_elem *
struct ecore_vlan_mac_obj *o,
union ecore_classification_ramrod_data *data)
{
data->vlan_mac.mac[0], data->vlan_mac.mac[1], data->vlan_mac.mac[2], data->vlan_mac.mac[3], data->vlan_mac.mac[4], data->vlan_mac.mac[5], data->vlan_mac.vlan);
struct ecore_vlan_mac_registry_elem)
ETH_ALEN)) &&
return pos;
return NULL;
}
/* check_move() callback */
struct ecore_vlan_mac_obj *src_o,
struct ecore_vlan_mac_obj *dst_o,
union ecore_classification_ramrod_data *data)
{
int rc;
/* Check if we can delete the requested configuration from the first
* object.
*/
/* check if configuration can be added */
/* If this classification can not be added (is already set)
* or can't be deleted - return an error.
*/
return FALSE;
return TRUE;
}
struct _lm_device_t *pdev,
struct ecore_vlan_mac_obj *src_o,
struct ecore_vlan_mac_obj *dst_o,
union ecore_classification_ramrod_data *data)
{
return FALSE;
}
{
return rx_tx_flag;
}
{
return;
if (index > ECORE_LLH_CAM_MAX_PF_LINE)
return;
if (add) {
/* LLH_FUNC_MEM is a u64 WB register */
}
}
/**
* ecore_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
*
* @pdev: device handle
* @o: queue for which we want to configure this rule
* @add: if TRUE the command is an ADD command, DEL otherwise
* @opcode: CLASSIFY_RULE_OPCODE_XXX
* @hdr: pointer to a header to setup
*
*/
struct eth_classify_cmd_header *hdr)
{
hdr->cmd_general_data |=
if (add)
hdr->cmd_general_data |=
}
/**
* ecore_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
*
* @cid: connection id
* @type: ECORE_FILTER_XXX_PENDING
* @hdr: pointer to header to setup
* @rule_cnt:
*
* currently we always configure one rule and echo field to contain a CID and an
* opcode type.
*/
{
(type << ECORE_SWCID_SHIFT));
}
/* hw_config() callbacks */
struct ecore_vlan_mac_obj *o,
int cam_offset)
{
/* Set LLH CAM entry: currently only iSCSI and ETH macs are
* relevant. In addition, current implementation is tuned for a
* single ETH MAC.
*
* When multiple unicast ETH MACs PF configuration in switch
* independent mode is required (NetQ, multiple netdev MACs,
* etc.), consider better utilisation of 8 per function MAC
* entries in the LLH register. There is also
* NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
* total number of CAM entries to 16.
*
* Currently we won't configure NIG for MACs other than a primary ETH
* MAC and iSCSI L2 MAC.
*
* If this MAC is moving from one Queue to another, no need to change
* NIG configuration.
*/
if (cmd != ECORE_VLAN_MAC_MOVE) {
}
/* Reset the ramrod data buffer for the first rule */
if (rule_idx == 0)
/* Setup a command header */
/* Set a MAC itself */
/* MOVE: Add a rule that will add this MAC to the target Queue */
if (cmd == ECORE_VLAN_MAC_MOVE) {
rule_entry++;
rule_cnt++;
/* Setup ramrod data */
/* Set a MAC itself */
u.mac.is_inner_mac);
}
/* Set the ramrod data header */
/* TODO: take this to the higher level in order to prevent multiple
writing */
rule_cnt);
}
/**
* ecore_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
*
* @pdev: device handle
* @o: queue
* @type:
* @cam_offset: offset in cam memory
* @hdr: pointer to a header to setup
*
*/
struct mac_configuration_hdr *hdr)
{
struct ecore_raw_obj *r = &o->raw;
(type << ECORE_SWCID_SHIFT));
}
{
struct ecore_raw_obj *r = &o->raw;
if (add) {
opcode);
/* Set a MAC in a ramrod data */
} else
}
{
}
/**
* ecore_set_one_mac_e1x - fill a single MAC rule ramrod data
*
* @pdev: device handle
* @o: ecore_vlan_mac_obj
* @elem: ecore_exeq_elem
* @rule_idx: rule_idx
* @cam_offset: cam_offset
*/
struct ecore_vlan_mac_obj *o,
int cam_offset)
{
/* 57710 and 57711 do not support MOVE command,
* so it's either ADD or DEL
*/
/* Reset the ramrod data buffer */
}
struct ecore_vlan_mac_obj *o,
int cam_offset)
{
/* Reset the ramrod data buffer for the first rule */
if (rule_idx == 0)
/* Set a rule header */
vlan);
/* Set a VLAN itself */
/* MOVE: Add a rule that will add this MAC to the target Queue */
if (cmd == ECORE_VLAN_MAC_MOVE) {
rule_entry++;
rule_cnt++;
/* Setup ramrod data */
/* Set a VLAN itself */
}
/* Set the ramrod data header */
/* TODO: take this to the higher level in order to prevent multiple
writing */
rule_cnt);
}
struct ecore_vlan_mac_obj *o,
struct ecore_exeq_elem *elem,
int rule_idx, int cam_offset)
{
/* Reset the ramrod data buffer for the first rule */
if (rule_idx == 0)
/* Set a rule header */
/* Set VLAN and MAC themselves */
/* MOVE: Add a rule that will add this MAC to the target Queue */
if (cmd == ECORE_VLAN_MAC_MOVE) {
rule_entry++;
rule_cnt++;
/* Setup ramrod data */
/* Set a VLAN itself */
}
/* Set the ramrod data header */
/* TODO: take this to the higher level in order to prevent multiple
writing */
rule_cnt);
}
/**
* ecore_set_one_vlan_mac_e1h -
*
* @pdev: device handle
* @o: ecore_vlan_mac_obj
* @elem: ecore_exeq_elem
* @rule_idx: rule_idx
* @cam_offset: cam_offset
*/
struct ecore_vlan_mac_obj *o,
struct ecore_exeq_elem *elem,
int rule_idx, int cam_offset)
{
/* 57710 and 57711 do not support MOVE command,
* so it's either ADD or DEL
*/
/* Reset the ramrod data buffer */
}
/**
*
* @pdev: device handle
* @p: command parameters
* @ppos: pointer to the cookie
*
* previously configured elements list.
*
* from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken
* into an account
*
* pointer to the cookie - that should be given back in the next call to make
* function handle the next element. If *ppos is set to NULL it will restart the
* iterator. If returned *ppos == NULL this means that the last element has been
* handled.
*
*/
struct ecore_vlan_mac_ramrod_params *p,
struct ecore_vlan_mac_registry_elem **ppos)
{
struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
/* If list is empty - there is nothing to do here */
if (ECORE_LIST_IS_EMPTY(&o->head)) {
return 0;
}
/* make a step... */
struct ecore_vlan_mac_registry_elem,
link);
else
struct ecore_vlan_mac_registry_elem);
/* If it's the last step - return NULL */
/* Prepare a 'user_req' */
/* Set the command */
/* Set vlan_mac_flags */
/* Set a restore bit */
return ecore_config_vlan_mac(pdev, p);
}
* pointer to an element with a specific criteria and NULL if such an element
* hasn't been found.
*/
struct ecore_exe_queue_obj *o,
struct ecore_exeq_elem *elem)
{
/* Check pending for execution commands */
struct ecore_exeq_elem)
sizeof(*data)) &&
return pos;
return NULL;
}
struct ecore_exe_queue_obj *o,
struct ecore_exeq_elem *elem)
{
/* Check pending for execution commands */
struct ecore_exeq_elem)
sizeof(*data)) &&
return pos;
return NULL;
}
struct ecore_exe_queue_obj *o,
struct ecore_exeq_elem *elem)
{
/* Check pending for execution commands */
struct ecore_exeq_elem)
sizeof(*data)) &&
return pos;
return NULL;
}
/**
* ecore_validate_vlan_mac_add - check if an ADD command can be executed
*
* @pdev: device handle
* @qo: ecore_qable_obj
* @elem: ecore_exeq_elem
*
* Checks that the requested configuration can be added. If yes and if
* requested, consume CAM credit.
*
* The 'validate' is run after the 'optimize'.
*
*/
union ecore_qable_obj *qo,
struct ecore_exeq_elem *elem)
{
int rc;
/* Check the registry */
if (rc) {
return rc;
}
/* Check if there is a pending ADD command for this
*/
return ECORE_EXISTS;
}
/* TODO: Check the pending MOVE from other objects where this
* object is a destination object.
*/
/* Consume the credit if not requested not to */
o->get_credit(o)))
return ECORE_INVAL;
return ECORE_SUCCESS;
}
/**
* ecore_validate_vlan_mac_del - check if the DEL command can be executed
*
* @pdev: device handle
* @qo: quable object to check
* @elem: element that needs to be deleted
*
* Checks that the requested configuration can be deleted. If yes and if
* requested, returns a CAM credit.
*
* The 'validate' is run after the 'optimize'.
*/
union ecore_qable_obj *qo,
struct ecore_exeq_elem *elem)
{
/* If this classification can not be deleted (doesn't exist)
* - return a ECORE_EXIST.
*/
if (!pos) {
return ECORE_EXISTS;
}
/* Check if there are pending DEL or MOVE commands for this
*/
/* Check for MOVE commands */
ECORE_ERR("There is a pending MOVE command already\n");
return ECORE_INVAL;
}
/* Check for DEL commands */
return ECORE_EXISTS;
}
/* Return the credit to the credit pool if not requested not to */
o->put_credit(o))) {
ECORE_ERR("Failed to return a credit\n");
return ECORE_INVAL;
}
return ECORE_SUCCESS;
}
/**
* ecore_validate_vlan_mac_move - check if the MOVE command can be executed
*
* @pdev: device handle
* @qo: quable object to check (source)
* @elem: element that needs to be moved
*
* Checks that the requested configuration can be moved. If yes and if
* requested, returns a CAM credit.
*
* The 'validate' is run after the 'optimize'.
*/
union ecore_qable_obj *qo,
struct ecore_exeq_elem *elem)
{
/* Check if we can perform this operation based on the current registry
* state.
*/
return ECORE_INVAL;
}
/* Check if there is an already pending DEL or MOVE command for the
* source object or ADD command for a destination object. Return an
* error if so.
*/
/* Check DEL on source */
ECORE_ERR("There is a pending DEL command on the source queue already\n");
return ECORE_INVAL;
}
/* Check MOVE on source */
return ECORE_EXISTS;
}
/* Check ADD on destination */
ECORE_ERR("There is a pending ADD command on the destination queue already\n");
return ECORE_INVAL;
}
/* Consume the credit if not requested not to */
return ECORE_INVAL;
/* return the credit taken from dest... */
return ECORE_INVAL;
}
return ECORE_SUCCESS;
}
union ecore_qable_obj *qo,
struct ecore_exeq_elem *elem)
{
case ECORE_VLAN_MAC_ADD:
case ECORE_VLAN_MAC_DEL:
case ECORE_VLAN_MAC_MOVE:
default:
return ECORE_INVAL;
}
}
union ecore_qable_obj *qo,
struct ecore_exeq_elem *elem)
{
int rc = 0;
/* If consumption wasn't required, nothing to do */
return ECORE_SUCCESS;
case ECORE_VLAN_MAC_ADD:
case ECORE_VLAN_MAC_MOVE:
break;
case ECORE_VLAN_MAC_DEL:
break;
default:
return ECORE_INVAL;
}
return ECORE_INVAL;
return ECORE_SUCCESS;
}
/**
* ecore_wait_vlan_mac - passively wait for 5 seconds until all work completes.
*
* @pdev: device handle
* @o: ecore_vlan_mac_obj
*
*/
struct ecore_vlan_mac_obj *o)
{
while (cnt--) {
/* Wait for the current command to complete */
if (rc)
return rc;
/* Wait until there are no pending commands */
if (!ecore_exe_queue_empty(exeq))
else
return ECORE_SUCCESS;
}
return ECORE_TIMEOUT;
}
struct ecore_vlan_mac_obj *o,
unsigned long *ramrod_flags)
{
if (rc != ECORE_SUCCESS) {
/** Calling function should not diffrentiate between this case
* and the case in which there is already a pending ramrod
*/
rc = ECORE_PENDING;
} else {
}
return rc;
}
/**
* ecore_complete_vlan_mac - complete one VLAN-MAC ramrod
*
* @pdev: device handle
* @o: ecore_vlan_mac_obj
* @cqe:
* @cont: if TRUE schedule next execution chunk
*
*/
struct ecore_vlan_mac_obj *o,
union event_ring_elem *cqe,
unsigned long *ramrod_flags)
{
struct ecore_raw_obj *r = &o->raw;
int rc;
/* Clearing the pending list & raw state should be made
* atomically (as execution flow assumes they represent the same)
*/
/* Reset pending list */
/* Clear pending */
r->clear_pending(r);
/* If ramrod failed this is most likely a SW bug */
return ECORE_INVAL;
/* Run the next bulk of pending commands if requested */
if (rc < 0)
return rc;
}
/* If there is more work to do return PENDING */
if (!ecore_exe_queue_empty(&o->exe_queue))
return ECORE_PENDING;
return ECORE_SUCCESS;
}
/**
* ecore_optimize_vlan_mac - optimize ADD and DEL commands.
*
* @pdev: device handle
* @o: ecore_qable_obj
* @elem: ecore_exeq_elem
*/
union ecore_qable_obj *qo,
struct ecore_exeq_elem *elem)
{
case ECORE_VLAN_MAC_ADD:
break;
case ECORE_VLAN_MAC_DEL:
break;
default:
/* Don't handle anything other than ADD or DEL */
return 0;
}
/* If we found the appropriate element - delete it */
if (pos) {
/* Return the credit of the optimized command */
ECORE_VLAN_MAC_ADD) && !o->put_credit(o)) {
ECORE_ERR("Failed to return the credit for the optimized ADD command\n");
return ECORE_INVAL;
} else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
ECORE_ERR("Failed to recover the credit from the optimized DEL command\n");
return ECORE_INVAL;
}
}
"ADD" : "DEL");
return 1;
}
return 0;
}
/**
* ecore_vlan_mac_get_registry_elem - prepare a registry element
*
* @pdev: device handle
* @o:
* @elem:
* @restore:
* @re:
*
* prepare a registry element according to the current command request.
*/
struct _lm_device_t *pdev,
struct ecore_vlan_mac_obj *o,
struct ecore_exeq_elem *elem,
struct ecore_vlan_mac_registry_elem **re)
{
/* Allocate a new registry element if needed. */
if (!restore &&
if (!reg_elem)
return ECORE_NOMEM;
/* Get a new CAM offset */
/* This shall never happen, because we have checked the
* CAM availability in the 'validate'.
*/
DbgBreakIf(1);
return ECORE_INVAL;
}
/* Set a VLAN-MAC data */
sizeof(reg_elem->u));
/* Copy the flags (needed for DEL and RESTORE flows) */
} else /* DEL, RESTORE */
return ECORE_SUCCESS;
}
/**
* ecore_execute_vlan_mac - execute vlan mac command
*
* @pdev: device handle
* @qo:
* @exe_chunk:
* @ramrod_flags:
*
* go and send a ramrod!
*/
union ecore_qable_obj *qo,
unsigned long *ramrod_flags)
{
struct ecore_raw_obj *r = &o->raw;
/* If DRIVER_ONLY execution is requested, cleanup a registry
* and exit. Otherwise send a ramrod to FW.
*/
if (!drv_only) {
DbgBreakIf(r->check_pending(r));
/* Set pending */
r->set_pending(r);
/* Fill the ramrod data */
struct ecore_exeq_elem) {
/* We will add to the target object in MOVE command, so
* change the object for a CAM search.
*/
if (cmd == ECORE_VLAN_MAC_MOVE)
else
cam_obj = o;
®_elem);
if (rc)
goto error_exit;
/* Push a new entry into the registry */
if (!restore &&
((cmd == ECORE_VLAN_MAC_ADD) ||
(cmd == ECORE_VLAN_MAC_MOVE)))
/* Configure a single command in a ramrod data buffer */
/* MOVE command consumes 2 entries in the ramrod data */
if (cmd == ECORE_VLAN_MAC_MOVE)
idx += 2;
else
idx++;
}
/* No need for an explicit memory barrier here as long as we
* ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
* read. If the memory read is removed we will have to put a
* full memory barrier there (inside ecore_sp_post()).
*/
r->rdata_mapping.as_u64,
if (rc)
goto error_exit;
}
/* Now, when we are done with the ramrod - clean up the registry */
struct ecore_exeq_elem) {
if ((cmd == ECORE_VLAN_MAC_DEL) ||
(cmd == ECORE_VLAN_MAC_MOVE)) {
}
}
if (!drv_only)
return ECORE_PENDING;
else
return ECORE_SUCCESS;
r->clear_pending(r);
/* Cleanup a registry in case of a failure */
struct ecore_exeq_elem) {
if (cmd == ECORE_VLAN_MAC_MOVE)
else
cam_obj = o;
/* Delete all newly added above entries */
if (!restore &&
((cmd == ECORE_VLAN_MAC_ADD) ||
(cmd == ECORE_VLAN_MAC_MOVE))) {
if (reg_elem) {
}
}
}
return rc;
}
struct _lm_device_t *pdev,
struct ecore_vlan_mac_ramrod_params *p)
{
struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
/* Allocate the execution queue element */
if (!elem)
return ECORE_NOMEM;
/* Set the command 'length' */
case ECORE_VLAN_MAC_MOVE:
break;
default:
}
/* Fill the object specific info */
/* Try to add a new command to the pending list */
}
/**
*
* @pdev: device handle
* @p:
*
*/
struct ecore_vlan_mac_ramrod_params *p)
{
struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
/*
* Add new elements to the execution list for commands that require it.
*/
if (!cont) {
if (rc)
return rc;
}
/* If nothing will be executed further in this iteration we want to
* return PENDING if there are pending commands
*/
if (!ecore_exe_queue_empty(&o->exe_queue))
rc = ECORE_PENDING;
}
/* Execute commands if required */
&p->ramrod_flags);
if (rc < 0)
return rc;
}
/* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
* then user want to wait until the last command is done.
*/
/* Wait maximum for the current exe_queue length iterations plus
* one (for the current pending command).
*/
while (!ecore_exe_queue_empty(&o->exe_queue) &&
max_iterations--) {
/* Wait for the current command to complete */
if (rc)
return rc;
/* Make a next step */
p->vlan_mac_obj,
&p->ramrod_flags);
if (rc < 0)
return rc;
}
return ECORE_SUCCESS;
}
return rc;
}
/**
* ecore_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
*
* @pdev: device handle
* @o:
* @vlan_mac_flags:
* @ramrod_flags: execution flags to be used for this deletion
*
* if the last operation has completed successfully and there are no
* more elements left, positive value if the last operation has completed
* successfully and there are more previously configured elements, negative
* value is current operation has failed.
*/
struct ecore_vlan_mac_obj *o,
unsigned long *vlan_mac_flags,
unsigned long *ramrod_flags)
{
struct ecore_vlan_mac_ramrod_params p;
unsigned long flags;
int read_lock;
int rc = 0;
/* Clear pending commands first */
struct ecore_exeq_elem) {
if (ECORE_VLAN_MAC_CMP_FLAGS(flags) ==
if (rc) {
ECORE_ERR("Failed to remove command\n");
return rc;
}
}
}
/* Prepare a command request */
mm_memset(&p, 0, sizeof(p));
p.vlan_mac_obj = o;
p.ramrod_flags = *ramrod_flags;
/* Add all but the last VLAN-MAC to the execution queue without actually
* execution anything.
*/
if (read_lock != ECORE_SUCCESS)
return read_lock;
struct ecore_vlan_mac_registry_elem) {
if (ECORE_VLAN_MAC_CMP_FLAGS(flags) ==
if (rc < 0) {
ECORE_ERR("Failed to add a new DEL command\n");
return rc;
}
}
}
p.ramrod_flags = *ramrod_flags;
return ecore_config_vlan_mac(pdev, &p);
}
{
}
struct ecore_credit_pool_obj *macs_pool,
struct ecore_credit_pool_obj *vlans_pool)
{
ECORE_LIST_INIT(&o->head);
o->head_reader = 0;
o->head_exe_request = FALSE;
o->saved_ramrod_flags = 0;
o->vlans_pool = vlans_pool;
o->wait = ecore_wait_vlan_mac;
}
struct ecore_vlan_mac_obj *mac_obj,
struct ecore_credit_pool_obj *macs_pool)
{
/* CAM credit pool handling */
if (CHIP_IS_E1x(pdev)) {
/* Exe Queue */
} else {
/* Exe Queue */
}
}
struct ecore_vlan_mac_obj *vlan_obj,
struct ecore_credit_pool_obj *vlans_pool)
{
if (CHIP_IS_E1x(pdev)) {
ECORE_ERR("Do not support chips others than E2 and newer\n");
BUG();
} else {
/* Exe Queue */
}
}
struct ecore_vlan_mac_obj *vlan_mac_obj,
struct ecore_credit_pool_obj *macs_pool,
struct ecore_credit_pool_obj *vlans_pool)
{
(union ecore_qable_obj *)vlan_mac_obj;
/* CAM pool handling */
/* CAM offset is relevant for 57710 and 57711 chips only which have a
* single CAM for both MACs and VLAN-MAC pairs. So the offset
* will be taken from MACs' pool object only.
*/
if (CHIP_IS_E1(pdev)) {
ECORE_ERR("Do not support chips others than E2\n");
BUG();
} else if (CHIP_IS_E1H(pdev)) {
/* Exe Queue */
} else {
/* Exe Queue */
}
}
/* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
struct tstorm_eth_mac_filter_config *mac_filters,
{
}
struct ecore_rx_mode_ramrod_params *p)
{
/* update the pdev MAC filter structure */
(struct tstorm_eth_mac_filter_config *)p->rdata;
/* initial setting is drop-all */
/* In e1x there we only take into account rx accept flag since tx switching
* isn't enabled. */
/* accept matched ucast */
drop_all_ucast = 0;
/* accept matched mcast */
drop_all_mcast = 0;
/* accept all mcast */
drop_all_ucast = 0;
accp_all_ucast = 1;
}
/* accept all mcast */
drop_all_mcast = 0;
accp_all_mcast = 1;
}
/* accept (all) bcast */
accp_all_bcast = 1;
/* accept unmatched unicasts */
unmatched_unicast = 1;
"accp_mcast 0x%x\naccp_bcast 0x%x\n",
/* write the MAC filter structure*/
/* The operation is completed */
return ECORE_SUCCESS;
}
/* Setup ramrod data */
struct eth_classify_header *hdr,
{
}
unsigned long *accept_flags,
struct eth_filter_rules_cmd *cmd,
{
/* start with 'drop-all' */
}
}
}
/* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
if (clear_accept_all) {
}
}
struct ecore_rx_mode_ramrod_params *p)
{
int rc;
/* Reset the ramrod data buffer */
/* Setup ramrod data */
/* Tx (internal switching) */
FALSE);
}
/* Rx */
FALSE);
}
/* If FCoE Queue configuration has been requested configure the Rx and
* internal switching modes for this queue in separate rules.
*
* FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
* MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
*/
/* Tx (internal switching) */
TRUE);
rule_idx++;
}
/* Rx */
TRUE);
rule_idx++;
}
}
/* Set the ramrod header (most importantly - number of rules to
* configure).
*/
p->tx_accept_flags);
/* No need for an explicit memory barrier here as long as we
* ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
* read. If the memory read is removed we will have to put a
* full memory barrier there (inside ecore_sp_post()).
*/
/* Send a ramrod */
p->cid,
p->rdata_mapping.as_u64,
if (rc)
return rc;
/* Ramrod completion is pending */
return ECORE_PENDING;
}
struct ecore_rx_mode_ramrod_params *p)
{
}
struct ecore_rx_mode_ramrod_params *p)
{
/* Do nothing */
return ECORE_SUCCESS;
}
struct ecore_rx_mode_ramrod_params *p)
{
int rc;
/* Configure the new classification in the chip */
if (rc < 0)
return rc;
/* Wait for a ramrod completion if was requested */
if (rc)
return rc;
}
return rc;
}
struct ecore_rx_mode_obj *o)
{
if (CHIP_IS_E1x(pdev)) {
} else {
}
}
/********************* Multicast verbs: SET, CLEAR ****************************/
{
}
struct ecore_mcast_mac_elem {
};
struct ecore_pending_mcast_cmd {
union {
} data;
* practically used in 57712 handling only, where one pending
* command may be handled in a few operations. As long as for
* other chips every operation handling is completed in a
* single ramrod, there is no need to utilize this field.
*/
#ifndef ECORE_ERASE
#endif
};
struct ecore_mcast_obj *o)
{
return ECORE_TIMEOUT;
return ECORE_SUCCESS;
}
struct ecore_mcast_obj *o,
struct ecore_mcast_ramrod_params *p,
enum ecore_mcast_cmd cmd)
{
int total_sz;
p->mcast_list_len : 0);
/* If the command is empty ("handle pending commands only"), break */
if (!p->mcast_list_len)
return ECORE_SUCCESS;
macs_list_len * sizeof(struct ecore_mcast_mac_elem);
/* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
if (!new_cmd)
return ECORE_NOMEM;
cmd, macs_list_len);
#ifndef ECORE_ERASE
#endif
switch (cmd) {
case ECORE_MCAST_CMD_ADD:
cur_mac = (struct ecore_mcast_mac_elem *)
/* Push the MACs of the current command into the pending command
* MACs list: FIFO
*/
struct ecore_mcast_list_elem) {
cur_mac++;
}
break;
case ECORE_MCAST_CMD_DEL:
break;
case ECORE_MCAST_CMD_RESTORE:
break;
default:
return ECORE_INVAL;
}
/* Push the new pending command to the tail of the pending list: FIFO */
o->set_sched(o);
return ECORE_PENDING;
}
/**
* ecore_mcast_get_next_bin - get the next set bin (index)
*
* @o:
* @last: index to start looking from (including)
*
* returns the next found (set) bin or a negative value if none is found.
*/
{
for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
return cur_bit;
}
}
inner_start = 0;
}
/* None found */
return -1;
}
/**
* ecore_mcast_clear_first_bin - find the first set bin and clear it
*
* @o:
*
* returns the index of the found bin or -1 if none is found
*/
{
if (cur_bit >= 0)
return cur_bit;
}
{
return rx_tx_flag;
}
struct ecore_mcast_obj *o, int idx,
union ecore_mcast_config_data *cfg_data,
enum ecore_mcast_cmd cmd)
{
struct ecore_raw_obj *r = &o->raw;
(struct eth_multicast_rules_ramrod_data *)(r->rdata);
int bin;
/* Get a bin and update a bins' vector */
switch (cmd) {
case ECORE_MCAST_CMD_ADD:
break;
case ECORE_MCAST_CMD_DEL:
/* If there were no more bins to clear
* (ecore_mcast_clear_first_bin() returns -1) then we would
* clear any (0xff) bin.
* See ecore_mcast_validate_e2() for explanation when it may
* happen.
*/
break;
case ECORE_MCAST_CMD_RESTORE:
break;
default:
return;
}
}
/**
* ecore_mcast_handle_restore_cmd_e2 - restore configuration from the registry
*
* @pdev: device handle
* @o:
* @start_bin: index in the registry to start from (including)
* @rdata_idx: index in the ramrod data to start from
*
* returns last handled bin index or -1 if all bins have been handled
*/
int *rdata_idx)
{
/* go through the registry and configure the bins from it */
cnt++;
/* Break if we reached the maximum number
* of rules.
*/
if (cnt >= o->max_cmd_len)
break;
}
return cur_bin;
}
int *line_idx)
{
cnt++;
pmac_pos->mac[0], pmac_pos->mac[1], pmac_pos->mac[2], pmac_pos->mac[3], pmac_pos->mac[4], pmac_pos->mac[5]);
/* Break if we reached the maximum number
* of rules.
*/
if (cnt >= o->max_cmd_len)
break;
}
/* if no more MACs to configure - we are done */
}
int *line_idx)
{
cnt++;
/* Break if we reached the maximum
* number of rules.
*/
if (cnt >= o->max_cmd_len)
break;
}
/* If we cleared all bins - we are done */
}
int *line_idx)
{
line_idx);
/* If o->set_restore returned -1 we are done */
else
/* Start from the next bin next time */
}
struct ecore_mcast_ramrod_params *p)
{
int cnt = 0;
struct ecore_mcast_obj *o = p->mcast_obj;
case ECORE_MCAST_CMD_ADD:
break;
case ECORE_MCAST_CMD_DEL:
break;
case ECORE_MCAST_CMD_RESTORE:
&cnt);
break;
default:
return ECORE_INVAL;
}
/* If the command has been completed - remove it from the list
* and free the memory
*/
&o->pending_cmds_head);
}
/* Break if we reached the maximum number of rules */
if (cnt >= o->max_cmd_len)
break;
}
return cnt;
}
struct ecore_mcast_obj *o, struct ecore_mcast_ramrod_params *p,
int *line_idx)
{
struct ecore_mcast_list_elem) {
cnt++;
mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2], mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5]);
}
}
struct ecore_mcast_obj *o, struct ecore_mcast_ramrod_params *p,
int *line_idx)
{
for (i = 0; i < p->mcast_list_len; i++) {
cnt++;
p->mcast_list_len - i - 1);
}
}
/**
* ecore_mcast_handle_current_cmd -
*
* @pdev: device handle
* @p:
* @cmd:
* @start_cnt: first line in the ramrod data that may be used
*
* This function is called iff there is enough place for the current command in
* the ramrod data.
* Returns number of lines filled in the ramrod data in total.
*/
struct ecore_mcast_ramrod_params *p,
enum ecore_mcast_cmd cmd,
int start_cnt)
{
struct ecore_mcast_obj *o = p->mcast_obj;
switch (cmd) {
case ECORE_MCAST_CMD_ADD:
break;
case ECORE_MCAST_CMD_DEL:
break;
case ECORE_MCAST_CMD_RESTORE:
break;
default:
return ECORE_INVAL;
}
/* The current command has been handled */
p->mcast_list_len = 0;
return cnt;
}
struct ecore_mcast_ramrod_params *p,
enum ecore_mcast_cmd cmd)
{
struct ecore_mcast_obj *o = p->mcast_obj;
switch (cmd) {
/* DEL command deletes all currently configured MACs */
case ECORE_MCAST_CMD_DEL:
o->set_registry_size(o, 0);
/* Don't break */
/* RESTORE command will restore the entire multicast configuration */
case ECORE_MCAST_CMD_RESTORE:
/* Here we set the approximate amount of work to do, which in
* fact may be only less as some MACs in postponed ADD
* command(s) scheduled before this command may fall into
* the same bin and the actual number of bins set in the
* registry would be less than we estimated here. See
* ecore_mcast_set_one_rule_e2() for further details.
*/
p->mcast_list_len = reg_sz;
break;
case ECORE_MCAST_CMD_ADD:
case ECORE_MCAST_CMD_CONT:
/* Here we assume that all new MACs will fall into new bins.
* However we will correct the real registry size after we
* handle all pending commands.
*/
break;
default:
return ECORE_INVAL;
}
/* Increase the total number of MACs pending to be configured */
o->total_pending_num += p->mcast_list_len;
return ECORE_SUCCESS;
}
struct ecore_mcast_ramrod_params *p,
int old_num_bins)
{
struct ecore_mcast_obj *o = p->mcast_obj;
o->set_registry_size(o, old_num_bins);
o->total_pending_num -= p->mcast_list_len;
}
/**
* ecore_mcast_set_rdata_hdr_e2 - sets a header values
*
* @pdev: device handle
* @p:
* @len: number of rules to handle
*/
struct ecore_mcast_ramrod_params *p,
{
(struct eth_multicast_rules_ramrod_data *)(r->rdata);
}
/**
* ecore_mcast_refresh_registry_e2 - recalculate the actual number of set bins
*
* @pdev: device handle
* @o:
*
* Recalculate the actual number of set bins in the registry using Brian
* Kernighan's algorithm: it's execution complexity is as a number of set bins.
*
* returns 0 for the compliance with ecore_mcast_refresh_registry_e1().
*/
struct ecore_mcast_obj *o)
{
int i, cnt = 0;
for (i = 0; i < ECORE_MCAST_VEC_SZ; i++) {
}
o->set_registry_size(o, cnt);
return ECORE_SUCCESS;
}
struct ecore_mcast_ramrod_params *p,
enum ecore_mcast_cmd cmd)
{
struct ecore_mcast_obj *o = p->mcast_obj;
/* Reset the ramrod data buffer */
/* If there are no more pending commands - clear SCHEDULED state */
if (ECORE_LIST_IS_EMPTY(&o->pending_cmds_head))
o->clear_sched(o);
/* The below may be TRUE iff there was enough room in ramrod
* data for all pending commands and for the current
* command. Otherwise the current command would have been added
* to the pending commands and p->mcast_list_len would have been
* zeroed.
*/
if (p->mcast_list_len > 0)
/* We've pulled out some MACs - update the total number of
* outstanding.
*/
o->total_pending_num -= cnt;
/* send a ramrod */
DbgBreakIf(o->total_pending_num < 0);
/* Update a registry size if there are no more pending operations.
*
* We don't want to change the value of the registry size if there are
* pending operations because we want it to always be equal to the
* exact or the approximate number (see ecore_mcast_validate_e2()) of
* set bins after the last requested operation in order to properly
*
* Note that we update the registry itself during command(s) handling
* - see ecore_mcast_set_one_rule_e2(). That's because for 57712 we
* know in this scope what the actual state of bins configuration is
* going to be after this ramrod.
*/
if (!o->total_pending_num)
/* If CLEAR_ONLY was requested - don't send a ramrod and clear
* RAMROD_PENDING status immediately.
*/
return ECORE_SUCCESS;
} else {
/* No need for an explicit memory barrier here as long as we
* ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
* read. If the memory read is removed we will have to put a
* full memory barrier there (inside ecore_sp_post()).
*/
/* Send a ramrod */
if (rc)
return rc;
/* Ramrod completion is pending */
return ECORE_PENDING;
}
}
struct ecore_mcast_ramrod_params *p,
enum ecore_mcast_cmd cmd)
{
/* Mark, that there is a work to do */
p->mcast_list_len = 1;
return ECORE_SUCCESS;
}
struct ecore_mcast_ramrod_params *p,
int old_num_bins)
{
/* Do nothing */
}
do { \
} while (0)
struct ecore_mcast_obj *o,
struct ecore_mcast_ramrod_params *p,
{
int bit;
struct ecore_mcast_list_elem) {
mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2], mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5], bit);
/* bookkeeping... */
bit);
}
}
struct ecore_mcast_obj *o, struct ecore_mcast_ramrod_params *p,
{
int bit;
for (bit = ecore_mcast_get_next_bin(o, 0);
bit >= 0;
}
}
/* On 57711 we write the multicast MACs' approximate match
* table by directly into the TSTORM's internal RAM. So we don't
* really need to handle any tricks to make it work.
*/
struct ecore_mcast_ramrod_params *p,
enum ecore_mcast_cmd cmd)
{
int i;
struct ecore_mcast_obj *o = p->mcast_obj;
struct ecore_raw_obj *r = &o->raw;
/* If CLEAR_ONLY has been requested - clear the registry
* and clear a pending bit.
*/
/* Set the multicast filter bits before writing it into
* the internal memory.
*/
switch (cmd) {
case ECORE_MCAST_CMD_ADD:
break;
case ECORE_MCAST_CMD_DEL:
"Invalidating multicast MACs configuration\n");
/* clear the registry */
break;
case ECORE_MCAST_CMD_RESTORE:
break;
default:
return ECORE_INVAL;
}
/* Set the mcast filter in the internal memory */
for (i = 0; i < MC_HASH_SIZE; i++)
} else
/* clear the registry */
/* We are done */
r->clear_pending(r);
return ECORE_SUCCESS;
}
struct ecore_mcast_ramrod_params *p,
enum ecore_mcast_cmd cmd)
{
struct ecore_mcast_obj *o = p->mcast_obj;
switch (cmd) {
/* DEL command deletes all currently configured MACs */
case ECORE_MCAST_CMD_DEL:
o->set_registry_size(o, 0);
/* Don't break */
/* RESTORE command will restore the entire multicast configuration */
case ECORE_MCAST_CMD_RESTORE:
p->mcast_list_len = reg_sz;
cmd, p->mcast_list_len);
break;
case ECORE_MCAST_CMD_ADD:
case ECORE_MCAST_CMD_CONT:
/* Multicast MACs on 57710 are configured as unicast MACs and
* there is only a limited number of CAM entries for that
* matter.
*/
if (p->mcast_list_len > o->max_cmd_len) {
ECORE_ERR("Can't configure more than %d multicast MACs on 57710\n",
o->max_cmd_len);
return ECORE_INVAL;
}
/* Every configured MAC should be cleared if DEL command is
* called. Only the last ADD command is relevant as long as
* every ADD commands overrides the previous configuration.
*/
if (p->mcast_list_len > 0)
o->set_registry_size(o, p->mcast_list_len);
break;
default:
return ECORE_INVAL;
}
/* We want to ensure that commands are executed one by one for 57710.
* Therefore each none-empty command will consume o->max_cmd_len.
*/
if (p->mcast_list_len)
o->total_pending_num += o->max_cmd_len;
return ECORE_SUCCESS;
}
struct ecore_mcast_ramrod_params *p,
int old_num_macs)
{
struct ecore_mcast_obj *o = p->mcast_obj;
o->set_registry_size(o, old_num_macs);
/* If current command hasn't been handled yet and we are
* here means that it's meant to be dropped and we have to
* update the number of outstanding MACs accordingly.
*/
if (p->mcast_list_len)
o->total_pending_num -= o->max_cmd_len;
}
struct ecore_mcast_obj *o, int idx,
union ecore_mcast_config_data *cfg_data,
enum ecore_mcast_cmd cmd)
{
struct ecore_raw_obj *r = &o->raw;
(struct mac_configuration_cmd *)(r->rdata);
/* copy mac */
}
}
/**
* ecore_mcast_set_rdata_hdr_e1 - set header values in mac_configuration_cmd
*
* @pdev: device handle
* @p:
* @len: number of rules to handle
*/
struct ecore_mcast_ramrod_params *p,
{
(struct mac_configuration_cmd *)(r->rdata);
}
/**
* ecore_mcast_handle_restore_cmd_e1 - restore command for 57710
*
* @pdev: device handle
* @o:
* @start_idx: index in the registry to start from
* @rdata_idx: index in the ramrod data to start from
*
* restore command for 57710 is like all other commands - always a stand alone
* command - start_idx and rdata_idx will always be 0. This function will always
* succeed.
* returns -1 to comply with 57712 variant.
*/
int *rdata_idx)
{
int i = 0;
/* go through the registry and configure the MACs from it. */
struct ecore_mcast_mac_elem) {
i++;
cfg_data.mac[0], cfg_data.mac[1], cfg_data.mac[2], cfg_data.mac[3], cfg_data.mac[4], cfg_data.mac[5]);
}
*rdata_idx = i;
return -1;
}
{
struct ecore_mcast_obj *o = p->mcast_obj;
int cnt = 0;
/* If nothing to be done - return */
if (ECORE_LIST_IS_EMPTY(&o->pending_cmds_head))
return 0;
/* Handle the first command */
struct ecore_pending_mcast_cmd, link);
case ECORE_MCAST_CMD_ADD:
link, struct ecore_mcast_mac_elem) {
cnt++;
pmac_pos->mac[0], pmac_pos->mac[1], pmac_pos->mac[2], pmac_pos->mac[3], pmac_pos->mac[4], pmac_pos->mac[5]);
}
break;
case ECORE_MCAST_CMD_DEL:
break;
case ECORE_MCAST_CMD_RESTORE:
break;
default:
return ECORE_INVAL;
}
return cnt;
}
/**
* ecore_get_fw_mac_addr - revert the ecore_set_fw_mac_addr().
*
* @fw_hi:
* @fw_mid:
* @fw_lo:
* @mac:
*/
{
}
/**
* ecore_mcast_refresh_registry_e1 -
*
* @pdev: device handle
* @cnt:
*
* Check the ramrod data first entry flag to see if it's a DELETE or ADD command
* and update the registry correspondingly: if ADD - allocate a memory and add
* the entries to the registry (list), if DELETE - clear the registry and free
* the memory.
*/
struct ecore_mcast_obj *o)
{
/* If first entry contains a SET bit - the command was ADD,
* otherwise - DEL_ALL
*/
/* Break if it was a RESTORE command */
return ECORE_SUCCESS;
if (!elem) {
ECORE_ERR("Failed to allocate registry memory\n");
return ECORE_NOMEM;
}
}
} else {
struct ecore_mcast_mac_elem,
link);
}
return ECORE_SUCCESS;
}
struct ecore_mcast_ramrod_params *p,
enum ecore_mcast_cmd cmd)
{
struct ecore_mcast_obj *o = p->mcast_obj;
/* Reset the ramrod data buffer */
/* First set all entries as invalid */
for (i = 0; i < o->max_cmd_len ; i++)
/* Handle pending commands first */
/* If there are no more pending commands - clear SCHEDULED state */
if (ECORE_LIST_IS_EMPTY(&o->pending_cmds_head))
o->clear_sched(o);
/* The below may be TRUE iff there were no pending commands */
if (!cnt)
/* For 57710 every command has o->max_cmd_len length to ensure that
* commands are done one at a time.
*/
o->total_pending_num -= o->max_cmd_len;
/* send a ramrod */
/* Set ramrod header (in particular, a number of entries to update) */
/* update a registry: we need the registry contents to be always up
* to date in order to be able to execute a RESTORE opcode. Here
* we use the fact that for 57710 we sent one command at a time
* hence we may take the registry update out of the command handling
* and do it in a simpler way here.
*/
if (rc)
return rc;
/* If CLEAR_ONLY was requested - don't send a ramrod and clear
* RAMROD_PENDING status immediately.
*/
return ECORE_SUCCESS;
} else {
/* No need for an explicit memory barrier here as long as we
* ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
* read. If the memory read is removed we will have to put a
* full memory barrier there (inside ecore_sp_post()).
*/
/* Send a ramrod */
if (rc)
return rc;
/* Ramrod completion is pending */
return ECORE_PENDING;
}
}
{
}
{
}
int n)
{
}
int n)
{
}
struct ecore_mcast_ramrod_params *p,
enum ecore_mcast_cmd cmd)
{
struct ecore_mcast_obj *o = p->mcast_obj;
struct ecore_raw_obj *r = &o->raw;
/* This is needed to recover number of currently configured mcast macs
* in case of failure.
*/
old_reg_size = o->get_registry_size(o);
/* Do some calculations and checks */
if (rc)
return rc;
/* Return if there is no work to do */
if ((!p->mcast_list_len) && (!o->check_sched(o)))
return ECORE_SUCCESS;
/* Enqueue the current command to the pending list if we can't complete
* it in the current iteration
*/
if (r->check_pending(r) ||
if (rc < 0)
goto error_exit1;
/* As long as the current command is in a command list we
* don't need to handle it separately.
*/
p->mcast_list_len = 0;
}
if (!r->check_pending(r)) {
/* Set 'pending' state */
r->set_pending(r);
/* Configure the new classification in the chip */
if (rc < 0)
goto error_exit2;
/* Wait for a ramrod completion if was requested */
}
return rc;
r->clear_pending(r);
return rc;
}
{
}
{
}
{
}
{
}
#ifndef ECORE_ERASE
struct ecore_mcast_obj *o,
struct ecore_mcast_ramrod_params *p,
enum ecore_mcast_cmd cmd);
struct ecore_mcast_obj *o,
struct ecore_mcast_obj *o, int idx,
union ecore_mcast_config_data *cfg_data,
enum ecore_mcast_cmd cmd);
#endif
struct ecore_mcast_obj *mcast_obj,
{
if (CHIP_IS_E1(pdev)) {
if (CHIP_REV_IS_SLOW(pdev))
else
/* 57710 is the only chip that uses the exact match for mcast
* at the moment.
*/
} else if (CHIP_IS_E1H(pdev)) {
/* 57711 doesn't send a ramrod, so it has unlimited credit
* for one command.
*/
} else {
/* TODO: There should be a proper HSI define for this number!!!
*/
}
}
/*************************** Credit handling **********************************/
/**
* atomic_add_ifless - add if the result is less than a given value.
*
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...if (v + a) is less than u.
*
* returns TRUE if (v + a) was less than u, and FALSE otherwise.
*
*/
{
int c, old;
c = ecore_atomic_read(v);
for (;;) {
if (ECORE_UNLIKELY(c + a >= u))
return FALSE;
old = ecore_atomic_cmpxchg((v), c, c + a);
if (ECORE_LIKELY(old == c))
break;
c = old;
}
return TRUE;
}
/**
* atomic_dec_ifmoe - dec if the result is more or equal than a given value.
*
* @v: pointer of type atomic_t
* @a: the amount to dec from v...
* @u: ...if (v - a) is more or equal than u.
*
* returns TRUE if (v - a) was more or equal than u, and FALSE
* otherwise.
*/
{
int c, old;
c = ecore_atomic_read(v);
for (;;) {
if (ECORE_UNLIKELY(c - a < u))
return FALSE;
old = ecore_atomic_cmpxchg((v), c, c - a);
if (ECORE_LIKELY(old == c))
break;
c = old;
}
return TRUE;
}
{
smp_mb();
smp_mb();
return rc;
}
{
smp_mb();
/* Don't let to refill if credit + cnt > pool_sz */
smp_mb();
return rc;
}
{
int cur_credit;
smp_mb();
return cur_credit;
}
int cnt)
{
return TRUE;
}
struct ecore_credit_pool_obj *o,
int *offset)
{
*offset = -1;
/* Find "internal cam-offset" then add to base for this object... */
/* Skip the current vector if there are no free entries in it */
if (!o->pool_mirror[vec])
continue;
/* If we've got here we are going to find a free entry */
i < BIT_VEC64_ELEM_SZ; idx++, i++)
/* Got one!! */
return TRUE;
}
}
return FALSE;
}
struct ecore_credit_pool_obj *o,
int offset)
{
if (offset < o->base_pool_offset)
return FALSE;
offset -= o->base_pool_offset;
return FALSE;
/* Return the entry to the pool */
return TRUE;
}
struct ecore_credit_pool_obj *o,
int offset)
{
return TRUE;
}
struct ecore_credit_pool_obj *o,
int *offset)
{
*offset = -1;
return TRUE;
}
/**
* ecore_init_credit_pool - initialize credit pool internals.
*
* @p:
* @base: Base entry in the CAM to use.
* @credit: pool size.
*
* If base is negative no CAM entries handling will be performed.
* If credit is negative pool operations will always succeed (unlimited pool).
*
*/
{
/* Zero the object first */
mm_memset(p, 0, sizeof(*p));
/* Set the table to all 1s */
/* Init a pool as full */
/* The total poll size */
p->base_pool_offset = base;
/* Commit the change */
smp_mb();
p->check = ecore_credit_pool_check;
/* if pool credit is negative - disable the checks */
if (credit >= 0) {
p->put = ecore_credit_pool_put;
p->get = ecore_credit_pool_get;
} else {
}
/* If base is negative - disable entries handling */
if (base < 0) {
}
}
{
/* TODO: this will be defined in consts as well... */
int cam_sz;
if (CHIP_IS_E1(pdev)) {
/* In E1, Multicast is saved in cam... */
if (!CHIP_REV_IS_SLOW(pdev))
else
} else if (CHIP_IS_E1H(pdev)) {
/* CAM credit is equally divided between all active functions
* on the PORT!.
*/
if ((func_num > 0)) {
if (!CHIP_REV_IS_SLOW(pdev))
else
} else {
/* this should never happen! Block MAC operations. */
ecore_init_credit_pool(p, 0, 0);
}
} else {
/*
* CAM credit is equaly divided between all active functions
* on the PATH.
*/
if ((func_num > 1)) {
if (!CHIP_REV_IS_SLOW(pdev))
/ func_num
else
/* No need for CAM entries handling for 57712 and
* newer.
*/
} else if (func_num == 1) {
if (!CHIP_REV_IS_SLOW(pdev))
else
/* No need for CAM entries handling for 57712 and
* newer.
*/
} else {
/* this should never happen! Block MAC operations. */
ecore_init_credit_pool(p, 0, 0);
}
}
}
struct ecore_credit_pool_obj *p,
{
if (CHIP_IS_E1x(pdev)) {
/* There is no VLAN credit in HW on 57710 and 57711 only
* MAC / MAC-VLAN can be set
*/
ecore_init_credit_pool(p, 0, -1);
} else {
/* CAM credit is equally divided between all active functions
* on the PATH.
*/
if (func_num > 0) {
} else
/* this should never happen! Block VLAN operations. */
ecore_init_credit_pool(p, 0, 0);
}
}
/****************** RSS Configuration ******************/
#if defined(ECORE_ERASE) && !defined(__FreeBSD__)
/**
* bnx2x_debug_print_ind_table - prints the indirection table configuration.
*
* @bp: driver handle
* @p: pointer to rss configuration
*
* Prints it when NETIF_MSG_IFUP debug level is configured.
*/
struct bnx2x_config_rss_params *p)
{
int i;
for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
/* Print 4 bytes in a line */
if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) &&
(((i + 1) & 0x3) == 0)) {
}
}
}
#endif /* ECORE_ERASE && !__FreeBSD__ */
/**
* ecore_setup_rss - configure RSS
*
* @pdev: device handle
* @p: rss configuration
*
* sends on UPDATE ramrod for that matter.
*/
struct ecore_config_rss_params *p)
{
struct ecore_rss_config_obj *o = p->rss_obj;
struct ecore_raw_obj *r = &o->raw;
(struct eth_rss_update_ramrod_data *)(r->rdata);
int rc;
/* Set an echo field */
(r->state << ECORE_SWCID_SHIFT));
/* RSS mode */
#endif
/* RSS capabilities */
/* Hashing mask */
/* RSS engine ID */
/* Indirection table */
/* Remember the last configuration */
#if defined(ECORE_ERASE) && !defined(__FreeBSD__)
/* Print the indirection table */
if (netif_msg_ifup(bp))
#endif
/* RSS keys */
}
/* No need for an explicit memory barrier here as long as we
* ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
* read. If the memory read is removed we will have to put a
* full memory barrier there (inside ecore_sp_post()).
*/
/* Send a ramrod */
r->cid,
r->rdata_mapping.as_u64,
if (rc < 0)
return rc;
return ECORE_PENDING;
}
{
}
struct ecore_config_rss_params *p)
{
int rc;
struct ecore_rss_config_obj *o = p->rss_obj;
struct ecore_raw_obj *r = &o->raw;
/* Do nothing if only driver cleanup was requested */
p->ramrod_flags);
return ECORE_SUCCESS;
}
r->set_pending(r);
if (rc < 0) {
r->clear_pending(r);
return rc;
}
return rc;
}
struct ecore_rss_config_obj *rss_obj,
{
}
#ifdef ECORE_ERASE
/********************** Queue state object ***********************************/
/**
* ecore_queue_state_change - perform Queue state change transition
*
* @pdev: device handle
* @params: parameters to perform the transition
*
* returns 0 in case of successfully completed transition, negative error
* code in case of failure, positive (EBUSY) value if there is a completion
* to that is still pending (possible only if RAMROD_COMP_WAIT is
* not set in params->ramrod_flags for asynchronous commands).
*
*/
struct ecore_queue_state_params *params)
{
/* Check that the requested transition is legal */
if (rc) {
return ECORE_INVAL;
}
/* Set "pending" bit */
/* Don't send a command if only driver cleanup was requested */
else {
/* Send a ramrod */
if (rc) {
o->next_state = ECORE_Q_STATE_MAX;
return rc;
}
if (rc)
return rc;
return ECORE_SUCCESS;
}
}
}
struct ecore_queue_state_params *params)
{
/* ACTIVATE and DEACTIVATE commands are implemented on top of
* UPDATE command.
*/
if ((cmd == ECORE_Q_CMD_ACTIVATE) ||
(cmd == ECORE_Q_CMD_DEACTIVATE))
else
return bit;
}
struct ecore_queue_sp_obj *o,
enum ecore_queue_cmd cmd)
{
}
/**
* ecore_queue_comp_cmd - complete the state change command.
*
* @pdev: device handle
* @o:
* @cmd:
*
* Checks that the arrived completion is expected.
*/
struct ecore_queue_sp_obj *o,
enum ecore_queue_cmd cmd)
{
ECORE_ERR("Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d\n",
return ECORE_INVAL;
}
if (o->next_tx_only >= o->max_cos)
/* >= because tx only must always be smaller than cos since the
* primary connection supports COS 0
*/
ECORE_ERR("illegal value for next tx_only: %d. max cos was %d",
o->next_tx_only, o->max_cos);
"Completing command %d for queue %d, setting state to %d\n",
if (o->next_tx_only) /* print num tx-only if any exist */
o->state = o->next_state;
o->num_tx_only = o->next_tx_only;
o->next_state = ECORE_Q_STATE_MAX;
/* It's important that o->state and o->next_state are
* updated before o->pending.
*/
wmb();
return ECORE_SUCCESS;
}
struct ecore_queue_state_params *cmd_params,
struct client_init_ramrod_data *data)
{
/* Rx data */
/* IPv6 TPA supported for E2 and above only */
}
struct ecore_queue_sp_obj *o,
struct ecore_general_setup_params *params,
struct client_init_general_data *gen_data,
unsigned long *flags)
{
} else
flags);
flags);
}
struct ecore_txq_setup_params *params,
struct client_init_tx_data *tx_data,
unsigned long *flags)
{
/* Don't configure any Tx switching mode during queue SETUP */
}
struct rxq_pause_params *params,
struct client_init_rx_data *rx_data)
{
/* flow control data */
}
struct ecore_rxq_setup_params *params,
struct client_init_rx_data *rx_data,
unsigned long *flags)
{
#ifdef ECORE_UPSTREAM /* ECORE_UPSTREAM */
rx_data->vmqueue_mode_en_flg = 0;
#else
#endif
#ifdef ECORE_OOO /* ! ECORE_UPSTREAM */
#endif
/* Always start in DROP_ALL mode */
/* We don't set drop flags */
rx_data->drop_ip_cs_err_flg = 0;
rx_data->drop_tcp_cs_err_flg = 0;
rx_data->drop_ttl0_flg = 0;
rx_data->drop_udp_cs_err_flg = 0;
flags);
}
/* silent vlan removal */
}
/* initialize the general, tx and rx parts of a queue object */
struct ecore_queue_state_params *cmd_params,
struct client_init_ramrod_data *data)
{
}
/* initialize the general and tx parts of a tx-only queue object */
struct ecore_queue_state_params *cmd_params,
struct tx_queue_init_ramrod_data *data)
{
}
/**
*
* @pdev: device handle
* @params:
*
* - HC: Rx and Tx
* - CDU context validation
*
*/
struct ecore_queue_state_params *params)
{
/* Tx HC configuration */
hc_usec);
}
/* Rx HC configuration */
hc_usec);
}
/* Set CDU context validation values */
}
/* As no ramrod is sent, complete the command immediately */
mmiowb();
smp_mb();
return ECORE_SUCCESS;
}
struct ecore_queue_state_params *params)
{
(struct client_init_ramrod_data *)o->rdata;
/* Clear the ramrod data */
/* Fill the ramrod data */
/* No need for an explicit memory barrier here as long as we
* ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
* read. If the memory read is removed we will have to put a
* full memory barrier there (inside ecore_sp_post()).
*/
return ecore_sp_post(pdev,
}
struct ecore_queue_state_params *params)
{
(struct client_init_ramrod_data *)o->rdata;
/* Clear the ramrod data */
/* Fill the ramrod data */
/* No need for an explicit memory barrier here as long as we
* ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
* read. If the memory read is removed we will have to put a
* full memory barrier there (inside ecore_sp_post()).
*/
return ecore_sp_post(pdev,
}
struct ecore_queue_state_params *params)
{
(struct tx_queue_init_ramrod_data *)o->rdata;
#ifdef ECORE_OOO /* ! ECORE_UPSTREAM */
#endif
ECORE_ERR("queue[%d]: cid_index (%d) is out of range\n",
return ECORE_INVAL;
}
/* Clear the ramrod data */
/* Fill the ramrod data */
/* No need for an explicit memory barrier here as long as we
* ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
* read. If the memory read is removed we will have to put a
* full memory barrier there (inside ecore_sp_post()).
*/
}
struct ecore_queue_sp_obj *obj,
struct ecore_queue_update_params *params,
struct client_update_ramrod_data *data)
{
/* Client ID of the client to update */
/* Function ID of the client to update */
/* Default VLAN value */
/* Inner VLAN stripping */
¶ms->update_flags);
¶ms->update_flags);
/* Outer VLAN stripping */
¶ms->update_flags);
¶ms->update_flags);
/* Drop packets that have source MAC that doesn't belong to this
* Queue.
*/
¶ms->update_flags);
¶ms->update_flags);
/* Activate/Deactivate */
data->activate_flg =
¶ms->update_flags);
/* Enable default VLAN */
¶ms->update_flags);
¶ms->update_flags);
/* silent vlan removal */
¶ms->update_flags);
¶ms->update_flags);
/* tx switching */
¶ms->update_flags);
¶ms->update_flags);
/* PTP */
¶ms->update_flags);
¶ms->update_flags);
}
struct ecore_queue_state_params *params)
{
(struct client_update_ramrod_data *)o->rdata;
ECORE_ERR("queue[%d]: cid_index (%d) is out of range\n",
return ECORE_INVAL;
}
/* Clear the ramrod data */
/* Fill the ramrod data */
/* No need for an explicit memory barrier here as long as we
* ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
* read. If the memory read is removed we will have to put a
* full memory barrier there (inside ecore_sp_post()).
*/
}
/**
* ecore_q_send_deactivate - send DEACTIVATE command
*
* @pdev: device handle
* @params:
*
* implemented using the UPDATE command.
*/
struct ecore_queue_state_params *params)
{
}
/**
* ecore_q_send_activate - send ACTIVATE command
*
* @pdev: device handle
* @params:
*
* implemented using the UPDATE command.
*/
struct ecore_queue_state_params *params)
{
}
struct ecore_queue_sp_obj *obj,
struct ecore_queue_update_tpa_params *params,
struct tpa_update_ramrod_data *data)
{
}
struct ecore_queue_state_params *params)
{
(struct tpa_update_ramrod_data *)o->rdata;
/* Clear the ramrod data */
/* Fill the ramrod data */
/* Add the function id inside the type, so that sp post function
* doesn't automatically add the PF func-id, this is required
* for operations done by PFs on behalf of their VFs
*/
((o->func_id) << SPE_HDR_FUNCTION_ID_SHIFT);
/* No need for an explicit memory barrier here as long as we
* ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
* read. If the memory read is removed we will have to put a
* full memory barrier there (inside ecore_sp_post()).
*/
}
struct ecore_queue_state_params *params)
{
#if !defined(ECORE_ERASE) || defined(__FreeBSD__)
/* build eth_halt_ramrod_data.client_id in a big-endian friendly way */
/* No need for an explicit memory barrier here as long as we
* ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
* read. If the memory read is removed we will have to put a
* full memory barrier there (inside ecore_sp_post()).
*/
return ecore_sp_post(pdev,
#else
#endif
}
struct ecore_queue_state_params *params)
{
ECORE_ERR("queue[%d]: cid_index (%d) is out of range\n",
return ECORE_INVAL;
}
}
struct ecore_queue_state_params *params)
{
ECORE_ERR("queue[%d]: cid_index (%d) is out of range\n",
return ECORE_INVAL;
}
}
struct ecore_queue_state_params *params)
{
o->cids[ECORE_PRIMARY_CID_INDEX], 0,
}
struct ecore_queue_state_params *params)
{
case ECORE_Q_CMD_INIT:
case ECORE_Q_CMD_DEACTIVATE:
case ECORE_Q_CMD_ACTIVATE:
case ECORE_Q_CMD_UPDATE:
case ECORE_Q_CMD_UPDATE_TPA:
case ECORE_Q_CMD_HALT:
case ECORE_Q_CMD_CFC_DEL:
case ECORE_Q_CMD_TERMINATE:
case ECORE_Q_CMD_EMPTY:
default:
return ECORE_INVAL;
}
}
struct ecore_queue_state_params *params)
{
case ECORE_Q_CMD_SETUP:
case ECORE_Q_CMD_INIT:
case ECORE_Q_CMD_DEACTIVATE:
case ECORE_Q_CMD_ACTIVATE:
case ECORE_Q_CMD_UPDATE:
case ECORE_Q_CMD_UPDATE_TPA:
case ECORE_Q_CMD_HALT:
case ECORE_Q_CMD_CFC_DEL:
case ECORE_Q_CMD_TERMINATE:
case ECORE_Q_CMD_EMPTY:
default:
return ECORE_INVAL;
}
}
struct ecore_queue_state_params *params)
{
case ECORE_Q_CMD_SETUP:
case ECORE_Q_CMD_INIT:
case ECORE_Q_CMD_DEACTIVATE:
case ECORE_Q_CMD_ACTIVATE:
case ECORE_Q_CMD_UPDATE:
case ECORE_Q_CMD_UPDATE_TPA:
case ECORE_Q_CMD_HALT:
case ECORE_Q_CMD_CFC_DEL:
case ECORE_Q_CMD_TERMINATE:
case ECORE_Q_CMD_EMPTY:
default:
return ECORE_INVAL;
}
}
/**
* ecore_queue_chk_transition - check state machine of a regular Queue
*
* @pdev: device handle
* @o:
* @params:
*
* (not Forwarding)
* It both checks if the requested command is legal in a current
* state and, if it's legal, sets a `next_state' in the object
* that will be used in the completion flow to set the `state'
* of the object.
*
* returns 0 if a requested command is a legal transition,
* ECORE_INVAL otherwise.
*/
struct ecore_queue_sp_obj *o,
struct ecore_queue_state_params *params)
{
/* Forget all pending for completion commands if a driver only state
* transition has been requested.
*/
o->pending = 0;
o->next_state = ECORE_Q_STATE_MAX;
}
/* Don't allow a next state transition if we are in the middle of
* the previous one.
*/
if (o->pending) {
ECORE_ERR("Blocking transition since pending was %lx\n",
o->pending);
return ECORE_BUSY;
}
switch (state) {
case ECORE_Q_STATE_RESET:
if (cmd == ECORE_Q_CMD_INIT)
break;
if (cmd == ECORE_Q_CMD_SETUP) {
else
}
break;
case ECORE_Q_STATE_ACTIVE:
if (cmd == ECORE_Q_CMD_DEACTIVATE)
else if ((cmd == ECORE_Q_CMD_EMPTY) ||
(cmd == ECORE_Q_CMD_UPDATE_TPA))
else if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
next_tx_only = 1;
}
else if (cmd == ECORE_Q_CMD_HALT)
else if (cmd == ECORE_Q_CMD_UPDATE) {
/* If "active" state change is requested, update the
* state accordingly.
*/
&update_params->update_flags) &&
else
}
break;
case ECORE_Q_STATE_MULTI_COS:
if (cmd == ECORE_Q_CMD_TERMINATE)
else if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
}
else if ((cmd == ECORE_Q_CMD_EMPTY) ||
(cmd == ECORE_Q_CMD_UPDATE_TPA))
else if (cmd == ECORE_Q_CMD_UPDATE) {
/* If "active" state change is requested, update the
* state accordingly.
*/
&update_params->update_flags) &&
else
}
break;
if (cmd == ECORE_Q_CMD_CFC_DEL) {
if (next_tx_only == 0)
else
}
break;
case ECORE_Q_STATE_INACTIVE:
if (cmd == ECORE_Q_CMD_ACTIVATE)
else if ((cmd == ECORE_Q_CMD_EMPTY) ||
(cmd == ECORE_Q_CMD_UPDATE_TPA))
else if (cmd == ECORE_Q_CMD_HALT)
else if (cmd == ECORE_Q_CMD_UPDATE) {
/* If "active" state change is requested, update the
* state accordingly.
*/
&update_params->update_flags) &&
if (o->num_tx_only == 0)
else /* tx only queues exist for this queue */
} else
}
break;
case ECORE_Q_STATE_STOPPED:
if (cmd == ECORE_Q_CMD_TERMINATE)
break;
case ECORE_Q_STATE_TERMINATED:
if (cmd == ECORE_Q_CMD_CFC_DEL)
break;
default:
}
/* Transition is assured */
if (next_state != ECORE_Q_STATE_MAX) {
o->next_state = next_state;
o->next_tx_only = next_tx_only;
return ECORE_SUCCESS;
}
return ECORE_INVAL;
}
#ifdef ECORE_OOO /* ! ECORE_UPSTREAM */
/**
* ecore_queue_chk_fwd_transition - check state machine of a Forwarding Queue.
*
* @pdev: device handle
* @o:
* @params:
*
* It both checks if the requested command is legal in a current
* state and, if it's legal, sets a `next_state' in the object
* that will be used in the completion flow to set the `state'
* of the object.
*
* returns 0 if a requested command is a legal transition,
* ECORE_INVAL otherwise.
*/
struct ecore_queue_sp_obj *o,
struct ecore_queue_state_params *params)
{
switch (state) {
case ECORE_Q_STATE_RESET:
if (cmd == ECORE_Q_CMD_INIT)
break;
if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
else
}
break;
case ECORE_Q_STATE_ACTIVE:
case ECORE_Q_STATE_INACTIVE:
if (cmd == ECORE_Q_CMD_CFC_DEL)
break;
default:
}
/* Transition is assured */
if (next_state != ECORE_Q_STATE_MAX) {
o->next_state = next_state;
return ECORE_SUCCESS;
}
return ECORE_INVAL;
}
#endif
struct ecore_queue_sp_obj *obj,
void *rdata,
{
/* We support only ECORE_MULTI_TX_COS Tx CoS at the moment */
if (CHIP_IS_E1x(pdev))
else
#ifdef ECORE_OOO /* ! ECORE_UPSTREAM */
else
#endif
}
/* return a queue object's logical state*/
struct ecore_queue_sp_obj *obj)
{
case ECORE_Q_STATE_ACTIVE:
case ECORE_Q_STATE_MULTI_COS:
return ECORE_Q_LOGICAL_STATE_ACTIVE;
case ECORE_Q_STATE_RESET:
case ECORE_Q_STATE_INACTIVE:
case ECORE_Q_STATE_STOPPED:
case ECORE_Q_STATE_TERMINATED:
case ECORE_Q_STATE_FLRED:
return ECORE_Q_LOGICAL_STATE_STOPPED;
default:
return ECORE_INVAL;
}
}
/********************** Function state object *********************************/
struct ecore_func_sp_obj *o)
{
/* in the middle of transaction - return INVALID state */
if (o->pending)
return ECORE_F_STATE_MAX;
/* unsure the order of reading of o->pending and o->state
* o->pending should be read first
*/
rmb();
return o->state;
}
struct ecore_func_sp_obj *o,
enum ecore_func_cmd cmd)
{
}
/**
* ecore_func_state_change_comp - complete the state machine transition
*
* @pdev: device handle
* @o:
* @cmd:
*
* Called on state change transition. Completes the state
* machine transition only - no HW interaction.
*/
struct ecore_func_sp_obj *o,
enum ecore_func_cmd cmd)
{
ECORE_ERR("Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d\n",
cur_pending, o->next_state);
return ECORE_INVAL;
}
"Completing command %d for func %d, setting state to %d\n",
o->state = o->next_state;
o->next_state = ECORE_F_STATE_MAX;
/* It's important that o->state and o->next_state are
* updated before o->pending.
*/
wmb();
return ECORE_SUCCESS;
}
/**
* ecore_func_comp_cmd - complete the state change command
*
* @pdev: device handle
* @o:
* @cmd:
*
* Checks that the arrived completion is expected.
*/
struct ecore_func_sp_obj *o,
enum ecore_func_cmd cmd)
{
/* Complete the state machine part first, check if it's a
* legal completion.
*/
return rc;
}
/**
* ecore_func_chk_transition - perform function state machine transition
*
* @pdev: device handle
* @o:
* @params:
*
* It both checks if the requested command is legal in a current
* state and, if it's legal, sets a `next_state' in the object
* that will be used in the completion flow to set the `state'
* of the object.
*
* returns 0 if a requested command is a legal transition,
* ECORE_INVAL otherwise.
*/
struct ecore_func_sp_obj *o,
struct ecore_func_state_params *params)
{
/* Forget all pending for completion commands if a driver only state
* transition has been requested.
*/
o->pending = 0;
o->next_state = ECORE_F_STATE_MAX;
}
/* Don't allow a next state transition if we are in the middle of
* the previous one.
*/
if (o->pending)
return ECORE_BUSY;
switch (state) {
case ECORE_F_STATE_RESET:
if (cmd == ECORE_F_CMD_HW_INIT)
break;
if (cmd == ECORE_F_CMD_START)
else if (cmd == ECORE_F_CMD_HW_RESET)
break;
case ECORE_F_STATE_STARTED:
if (cmd == ECORE_F_CMD_STOP)
/* afex ramrods can be sent only in started mode, and only
* if not pending for function_stop ramrod completion
* for these events - next state remained STARTED.
*/
else if ((cmd == ECORE_F_CMD_AFEX_UPDATE) &&
else if ((cmd == ECORE_F_CMD_AFEX_VIFLISTS) &&
/* Switch_update ramrod can be sent in either started or
* tx_stopped state, and it doesn't change the state.
*/
else if ((cmd == ECORE_F_CMD_SWITCH_UPDATE) &&
else if ((cmd == ECORE_F_CMD_SET_TIMESYNC) &&
else if (cmd == ECORE_F_CMD_TX_STOP)
break;
case ECORE_F_STATE_TX_STOPPED:
if ((cmd == ECORE_F_CMD_SWITCH_UPDATE) &&
else if ((cmd == ECORE_F_CMD_SET_TIMESYNC) &&
else if (cmd == ECORE_F_CMD_TX_START)
break;
default:
}
/* Transition is assured */
if (next_state != ECORE_F_STATE_MAX) {
o->next_state = next_state;
return ECORE_SUCCESS;
}
return ECORE_INVAL;
}
/**
* ecore_func_init_func - performs HW init at function stage
*
* @pdev: device handle
* @drv:
*
* Init HW when the current phase is
* FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
* HW blocks.
*/
const struct ecore_func_sp_drv_ops *drv)
{
}
/**
* ecore_func_init_port - performs HW init at port stage
*
* @pdev: device handle
* @drv:
*
* Init HW when the current phase is
* FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
* FUNCTION-only HW blocks.
*
*/
const struct ecore_func_sp_drv_ops *drv)
{
if (rc)
return rc;
}
/**
* ecore_func_init_cmn_chip - performs HW init at chip-common stage
*
* @pdev: device handle
* @drv:
*
* Init HW when the current phase is
* FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
* PORT-only and FUNCTION-only HW blocks.
*/
const struct ecore_func_sp_drv_ops *drv)
{
if (rc)
return rc;
}
/**
* ecore_func_init_cmn - performs HW init at common stage
*
* @pdev: device handle
* @drv:
*
* Init HW when the current phase is
* FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
* PORT-only and FUNCTION-only HW blocks.
*/
const struct ecore_func_sp_drv_ops *drv)
{
if (rc)
return rc;
}
struct ecore_func_state_params *params)
{
int rc = 0;
/* Prepare buffers for unzipping the FW */
if (rc)
return rc;
/* Prepare FW */
if (rc) {
ECORE_ERR("Error loading firmware\n");
goto init_err;
}
/* Handle the beginning of COMMON_XXX pases separately... */
switch (load_code) {
if (rc)
goto init_err;
break;
if (rc)
goto init_err;
break;
if (rc)
goto init_err;
break;
if (rc)
goto init_err;
break;
default:
rc = ECORE_INVAL;
}
/* In case of success, complete the command immediately: no ramrods
* have been sent.
*/
if (!rc)
return rc;
}
/**
* ecore_func_reset_func - reset HW at function stage
*
* @pdev: device handle
* @drv:
*
* Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
* FUNCTION-only HW blocks.
*/
const struct ecore_func_sp_drv_ops *drv)
{
}
/**
* ecore_func_reset_port - reser HW at port stage
*
* @pdev: device handle
* @drv:
*
* Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
* FUNCTION-only and PORT-only HW blocks.
*
* !!!IMPORTANT!!!
*
* It's important to call reset_port before reset_func() as the last thing
* reset_func does is pf_disable() thus disabling PGLUE_B, which
* makes impossible any DMAE transactions.
*/
const struct ecore_func_sp_drv_ops *drv)
{
}
/**
* ecore_func_reset_cmn - reser HW at common stage
*
* @pdev: device handle
* @drv:
*
* Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
* FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
* COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
*/
const struct ecore_func_sp_drv_ops *drv)
{
}
struct ecore_func_state_params *params)
{
switch (reset_phase) {
break;
break;
break;
default:
ECORE_ERR("Unknown reset_phase (0x%x) from MCP\n",
break;
}
/* Complete the command immediately: no ramrods have been sent. */
return ECORE_SUCCESS;
}
struct ecore_func_state_params *params)
{
(struct function_start_data *)o->rdata;
/* Fill the ramrod data with provided parameters */
if (start_params->class_fail_ethtype) {
}
/** @@@TMP - until FW 7.10.7 (which will introduce an HSI change)
* `sd_vlan_eth_type' will replace ethertype in SD mode even if
* it's set to 0; This will probably break SD, so we're setting it
* to ethertype 0x8100 for now.
*/
if (start_params->sd_vlan_eth_type)
else
/* No need for an explicit memory barrier here as long as we
* ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
* read. If the memory read is removed we will have to put a
* full memory barrier there (inside ecore_sp_post()).
*/
}
struct ecore_func_state_params *params)
{
(struct function_update_data *)o->rdata;
/* Fill the ramrod data with provided parameters */
&switch_update_params->changes)) {
}
&switch_update_params->changes)) {
rdata->sd_vlan_tag =
}
&switch_update_params->changes)) {
}
&switch_update_params->changes)) {
}
&switch_update_params->changes)) {
}
/* No need for an explicit memory barrier here as long as we
* ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
* read. If the memory read is removed we will have to put a
* full memory barrier there (inside ecore_sp_post()).
*/
}
struct ecore_func_state_params *params)
{
(struct function_update_data *)o->afex_rdata;
/* Fill the ramrod data with provided parameters */
/* No need for an explicit memory barrier here as long as we
* ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
* read. If the memory read is removed we will have to put a
* full memory barrier there (inside ecore_sp_post()).
*/
"afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
}
static
struct ecore_func_state_params *params)
{
(struct afex_vif_list_ramrod_data *)o->afex_rdata;
/* Fill the ramrod data with provided parameters */
/* send in echo type of sub command */
/* No need for an explicit memory barrier here as long as we
* ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
* read. If the memory read is removed we will have to put a
* full memory barrier there (inside ecore_sp_post()).
*/
/* this ramrod sends data directly and not through DMA mapping */
}
struct ecore_func_state_params *params)
{
}
struct ecore_func_state_params *params)
{
}
struct ecore_func_state_params *params)
{
(struct flow_control_configuration *)o->rdata;
int i;
/* No need for an explicit memory barrier here as long as we
* ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
* read. If the memory read is removed we will have to put a
* full memory barrier there (inside ecore_sp_post()).
*/
}
struct ecore_func_state_params *params)
{
(struct set_timesync_ramrod_data *)o->rdata;
/* Fill the ramrod data with provided parameters */
DP(BNX2X_MSG_SP, "Set timesync command params: drift_cmd = %d, offset_cmd = %d, add_sub_drift = %d, drift_val = %d, drift_period = %d, offset_lo = %d, offset_hi = %d\n",
}
struct ecore_func_state_params *params)
{
case ECORE_F_CMD_HW_INIT:
case ECORE_F_CMD_START:
case ECORE_F_CMD_STOP:
case ECORE_F_CMD_HW_RESET:
case ECORE_F_CMD_AFEX_UPDATE:
case ECORE_F_CMD_TX_STOP:
case ECORE_F_CMD_TX_START:
case ECORE_F_CMD_SET_TIMESYNC:
default:
return ECORE_INVAL;
}
}
struct ecore_func_sp_obj *obj,
struct ecore_func_sp_drv_ops *drv_iface)
{
}
/**
* ecore_func_state_change - perform Function state change transition
*
* @pdev: device handle
* @params: parameters to perform the transaction
*
* returns 0 in case of successfully completed transition,
* negative error code in case of failure, positive
* (EBUSY) value if there is a completion to that is
* still pending (possible only if RAMROD_COMP_WAIT is
* not set in params->ramrod_flags for asynchronous
* commands).
*/
struct ecore_func_state_params *params)
{
/* Check that the requested transition is legal */
if ((rc == ECORE_BUSY) &&
msleep(10);
}
if (rc == ECORE_BUSY) {
ECORE_ERR("timeout waiting for previous ramrod completion\n");
return rc;
}
} else if (rc) {
return rc;
}
/* Set "pending" bit */
/* Don't send a command if only driver cleanup was requested */
} else {
/* Send a ramrod */
if (rc) {
o->next_state = ECORE_F_STATE_MAX;
return rc;
}
if (rc)
return rc;
return ECORE_SUCCESS;
}
}
}
#endif