#ifdef VF_INVOLVED
#include "lm5710.h"
#include "bd_chain.h"
#include "577xx_int_offsets.h"
#include "context.h"
#include "command.h"
extern void lm_int_igu_ack_sb(lm_device_t *pdev, u8_t rss_id, u8_t storm_id, u16_t sb_index, u8_t int_op, u8_t is_update_idx);
//#define LM_VF_PM_MESS_STATE_READY_TO_SEND 0
//#define LM_VF_PM_MESS_STATE_SENT 1
/**********************VF_PF FUNCTIONS**************************************/
/**
* and then writes to the "addr-valid" in the trigger-zone... this causes the FW to wake
* up and handle the message.
*
* @param pdev
* @param mess
*
* @return lm_status_t
*/
{
return (pdev->vars.is_pf_provides_mac && (pdev->vars.is_pf_restricts_lamac || pdev->vars.is_pf_rejected_lamac));
}
static u8_t lm_vf_check_mac_restriction(struct _lm_device_t *pdev, struct pfvf_acquire_resp_tlv *pf_resp)
{
}
static lm_status_t lm_pf_get_queues_number(struct _lm_device_t *pdev, lm_vf_info_t *vf_info, u8_t *num_rxqs, u8_t * num_txqs)
{
}
{
}
static lm_status_t lm_pf_get_macs(struct _lm_device_t *pdev, lm_vf_info_t *vf_info, u8_t *permanent_mac_addr, u8_t *current_mac_addr)
{
}
struct vf_pf_msg_acquire *request)
{
{
// here we handle cases where HSI version of PF is not compatible with HSI version of VF
// Until this code section was added, VF always returned 0 so we fail request for old VF's
// Currenly (22/9/2011) we consider all VF that return ANY value (not 0) as valid
// once HSI will change, we'll need to enter here logic that will say:
// if( ( 0 == vf_fw_hsi_version) || ( some condition with vf_fw_hsi_version )
}
else
{
{
}
else
{
vf_info->fp_hsi_ver = 0;
}
}
{
/* VF FP HSI VER is newer than PF... treat as mismatch */
}
{
}
return status;
}
{
u8_t i = 0;
DbgBreakIf(!(pdev && vf_info && vf_info->pf_vf_response.request_virt_addr && vf_info->pf_vf_response.response_virt_addr));
if (status != SW_PFVF_STATUS_SUCCESS)
{
return lm_status;
}
{
}
vf_info->num_sbs = response->resc.num_sbs = min (vf_info->num_allocated_chains, request->resc_request.num_sbs);
{
}
{
{
}
#ifdef _VBD_
//Generate message
#endif
}
vf_info->num_rxqs = response->resc.num_rxqs = min(vf_info->num_sbs, request->resc_request.num_rxqs);
vf_info->num_txqs = response->resc.num_txqs = min(vf_info->num_sbs, request->resc_request.num_txqs);
vf_info->num_mac_filters = response->resc.num_mac_filters = min(num_mac_filters, request->resc_request.num_mac_filters);
vf_info->num_vlan_filters = response->resc.num_vlan_filters = min(num_vlan_filters, request->resc_request.num_vlan_filters);
vf_info->num_mc_filters = response->resc.num_mc_filters = min(num_mc_filters, request->resc_request.num_mc_filters);
//#ifdef UPDATED_MAC
{
}
//#endif
return lm_status;
}
{
u32_t i;
DbgBreakIf(!(pdev && vf_info && vf_info->pf_vf_response.request_virt_addr && vf_info->pf_vf_response.response_virt_addr));
// DbgBreak();
//lm_status = lm_pf_enable_vf(pdev, vf_info->abs_vf_id);
}
for (i = 0; i < XSTORM_SPQ_DATA_SIZE/sizeof(u32_t); i++) {
REG_WR(PFDEV(pdev),XSEM_REG_FAST_MEMORY + XSTORM_VF_SPQ_DATA_OFFSET(vf_info->abs_vf_id) + i*sizeof(u32_t),0);
}
REG_WR(PFDEV(pdev),XSEM_REG_FAST_MEMORY + (XSTORM_VF_SPQ_PAGE_BASE_OFFSET(vf_info->abs_vf_id)) + 4,0);
}
/*lm_status = lm_set_rx_mask(pdev, LM_CLI_IDX_NDIS, LM_RX_MASK_ACCEPT_NONE);
if(LM_STATUS_SUCCESS != lm_status)
{
DbgMessage(pdev, FATAL, "lm_set_rx_mask(LM_RX_MASK_ACCEPT_NONE) returns %d\n",lm_status);
return lm_status;
}*/
/*
Enable the function in STORMs
*/
LM_INTMEM_WRITE8(PFDEV(pdev), XSTORM_VF_TO_PF_OFFSET(function_fw_id), FUNC_ID(pdev), BAR_XSTRORM_INTMEM);
LM_INTMEM_WRITE8(PFDEV(pdev), CSTORM_VF_TO_PF_OFFSET(function_fw_id), FUNC_ID(pdev), BAR_CSTRORM_INTMEM);
LM_INTMEM_WRITE8(PFDEV(pdev), TSTORM_VF_TO_PF_OFFSET(function_fw_id), FUNC_ID(pdev), BAR_TSTRORM_INTMEM);
LM_INTMEM_WRITE8(PFDEV(pdev), USTORM_VF_TO_PF_OFFSET(function_fw_id), FUNC_ID(pdev), BAR_USTRORM_INTMEM);
if (lm_status == LM_STATUS_SUCCESS) {
} else {
DbgBreak();
}
return lm_status;
}
{
// lm_rcq_chain_t * rcq_chain = NULL;
DbgBreakIf(!(pdev && vf_info && vf_info->pf_vf_response.request_virt_addr && vf_info->pf_vf_response.response_virt_addr));
vf_info->vf_chains[q_id].tpa_ramrod_data_virt = mm_alloc_phys_mem(pdev, mem_size, &vf_info->vf_chains[q_id].tpa_ramrod_data_phys, 0, LM_RESOURCE_NDIS);
{
return LM_STATUS_RESOURCE ;
}
}
}
}
if (lm_status == LM_STATUS_SUCCESS) {
type,
}
if (lm_status == LM_STATUS_SUCCESS) {
if (q_id == 0) {
}
}
}
} else if (lm_status == LM_STATUS_PENDING) {
} else {
}
return lm_status;
}
// ASSUMPTION: CALLED IN PASSIVE LEVEL!!!
static lm_status_t lm_pf_vf_fill_set_q_filters_response(struct _lm_device_t *pdev, lm_vf_info_t *vf_info)
{
// DbgBreak();
lm_status = lm_set_rx_mask(pdev, LM_SW_VF_CLI_ID(vf_info,request->vf_qid), LM_RX_MASK_ACCEPT_NONE, NULL);
if (lm_status == LM_STATUS_PENDING)
{
}
} else {
if (GET_FLAGS(request->rx_mask,VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST | VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST |
VFPF_RX_MASK_ACCEPT_ALL_MULTICAST | VFPF_RX_MASK_ACCEPT_ALL_UNICAST | VFPF_RX_MASK_ACCEPT_BROADCAST) ==
VFPF_RX_MASK_ACCEPT_ALL_MULTICAST | VFPF_RX_MASK_ACCEPT_ALL_UNICAST | VFPF_RX_MASK_ACCEPT_BROADCAST)) {
{
lm_status = lm_set_rx_mask(pdev, LM_SW_VF_CLI_ID(vf_info,request->vf_qid), LM_RX_MASK_PROMISCUOUS_MODE, NULL);
if (lm_status == LM_STATUS_PENDING)
{
}
}
else
{
}
}
if (!rx_mask)
{
}
}
}
}
if (lm_status == LM_STATUS_PENDING)
{
}
}
}
}
} else {
}
if (lm_status == LM_STATUS_PENDING) {
}
} else {
//
}
}
}
if (lm_status == LM_STATUS_SUCCESS) {
} else if (lm_status == LM_STATUS_PENDING) {
DbgBreak();
} else {
}
return lm_status;
}
static lm_status_t lm_pf_vf_fill_teardown_q_response(struct _lm_device_t *pdev, lm_vf_info_t *vf_info)
{
//DbgBreak();
if (q_id == 0) {
}
DbgMessage(pdev, WARN, "lm_pf_vf_fill_teardown_q_response for VF[%d]: stats_cnt: %d\n",vf_info->relative_vf_id,vf_info->vf_stats.vf_stats_cnt);
DbgMessage(pdev, WARN, "lm_pf_vf_fill_teardown_q_response for VF[%d]: stats_cnt: %d\n",vf_info->relative_vf_id,vf_info->vf_stats.vf_stats_cnt);
if (lm_status != LM_STATUS_SUCCESS) {
if (lm_status != LM_STATUS_ABORTED)
{
DbgBreak();
}
return lm_status;
}
}
{
}
else
{
}
if (lm_status == LM_STATUS_SUCCESS) {
} else if (lm_status == LM_STATUS_PENDING) {
DbgBreak();
} else {
}
return lm_status;
}
static lm_status_t lm_pf_vf_fill_close_vf_response(struct _lm_device_t *pdev, lm_vf_info_t *vf_info)
{
//DbgBreak();
}
if (lm_status != LM_STATUS_SUCCESS) {
DbgBreak();
} else {
}
{
}
else
{
}
}
vf_info->vf_si_num_of_active_q = 0;
/*
Disable the function in STORMs
*/
}
}
if (lm_status == LM_STATUS_SUCCESS) {
} else if (lm_status == LM_STATUS_PENDING) {
DbgBreak();
} else {
}
return lm_status;
}
static lm_status_t lm_pf_vf_fill_release_vf_response(struct _lm_device_t *pdev, lm_vf_info_t *vf_info)
{
return lm_status;
}
static lm_status_t lm_pf_vf_fill_update_rss_response(struct _lm_device_t *pdev, lm_vf_info_t *vf_info)
{
// DbgBreak();
}
if (lm_status == LM_STATUS_SUCCESS) {
} else {
}
return lm_status;
}
{
//DbgBreak();
lm_status = lm_pf_tpa_send_vf_ramrod(pdev, vf_info, q_idx, (u8_t)request->rsc_ipv4_state, (u8_t)request->rsc_ipv6_state);
if(LM_STATUS_SUCCESS != lm_status)
{
DbgBreakMsg(" Ramrod send failed ");
break;
}
}
if (lm_status == LM_STATUS_SUCCESS) {
} else {
}
return lm_status;
}
{
DbgBreakIf(!(pdev && IS_CHANNEL_VIRT_MODE_MASTER_PFDEV(pdev) && vf_info && (vf_info->pf_vf_response.req_resp_state == VF_PF_REQUEST_IN_PROCESSING)));
DbgMessage(pdev, WARNvf, "lm_pf_process_standard_request %d for VF[%d]\n",requst_hdr->opcode,vf_info->relative_vf_id);
{
}
else
{
switch (requst_hdr->opcode)
{
case PFVF_OP_ACQUIRE:
{
break;
}
{
break;
}
if (lm_status == LM_STATUS_SUCCESS)
{
}
break;
case PFVF_OP_INIT_VF:
{
break;
}
break;
}
break;
case PFVF_OP_SETUP_Q:
break;
}
break;
}
break;
case PFVF_OP_SET_Q_FILTERS:
break;
}
break;
case PFVF_OP_ACTIVATE_Q:
break;
}
break;
case PFVF_OP_DEACTIVATE_Q:
break;
}
break;
case PFVF_OP_TEARDOWN_Q:
break;
}
break;
}
break;
case PFVF_OP_CLOSE_VF:
break;
}
{
break;
}
break;
case PFVF_OP_RELEASE_VF:
break;
}
break;
}
break;
case PFVF_OP_UPDATE_RSS:
break;
}
break;
case PFVF_OP_UPDATE_RSC:
break;
}
break;
default:
return LM_STATUS_FAILURE;
}
}
if (lm_status != LM_STATUS_PENDING)
{
}
return lm_status;
}
lm_status_t lm_pf_notify_standard_request_ready(struct _lm_device_t *pdev, lm_vf_info_t *vf_info, u8_t * set_done)
{
DbgBreakIf(!(pdev && IS_CHANNEL_VIRT_MODE_MASTER_PFDEV(pdev) && vf_info && (vf_info->pf_vf_response.req_resp_state != VF_PF_REQUEST_IN_PROCESSING)));
switch (requst_hdr->opcode) {
case PFVF_OP_ACQUIRE:
DbgBreak();
break;
case PFVF_OP_INIT_VF:
DbgBreak();
break;
case PFVF_OP_SETUP_Q:
DbgBreak();
break;
}
break;
case PFVF_OP_SET_Q_FILTERS:
break;
case PFVF_OP_ACTIVATE_Q:
break;
case PFVF_OP_DEACTIVATE_Q:
break;
case PFVF_OP_TEARDOWN_Q:
break;
case PFVF_OP_CLOSE_VF:
DbgBreak();
break;
}
break;
case PFVF_OP_RELEASE_VF:
//return LM_STATUS_FAILURE;
DbgBreak();
break;
}
break;
default:
DbgBreak();
break;
}
return lm_status;
}
static lm_status_t lm_vf_pf_send_message_to_hw_channel(struct _lm_device_t * pdev, lm_vf_pf_message_t * mess)
{
} else {
}
/* VF_REG_WR(pdev, VF_BAR0_CSDM_GLOBAL_OFFSET +
OFFSETOF(struct cstorm_function_zone_data,non_trigger)
+ OFFSETOF(struct trigger_function_zone,vf_pf_channel)
+ OFFSETOF(struct vf_pf_channel_zone_trigger, addr_valid),
message_phys_addr.as_u32.low);*/
return LM_STATUS_SUCCESS;
}
lm_status_t lm_vf_pf_send_request_to_sw_channel(struct _lm_device_t * pdev, lm_vf_pf_message_t * mess)
{
return lm_status;
}
lm_status_t lm_vf_pf_recv_response_from_sw_channel(struct _lm_device_t * pdev, lm_vf_pf_message_t * mess)
{
//mess->message_size - hdr->resp_msg_offset;
} else {
}
while (length) {
lm_status = mm_vf_pf_read_block_from_sw_channel(pdev, VF_TO_PF_STANDARD_BLOCK_ID, (u8_t*)buffer + received_offset, &received_length);
if (lm_status != LM_STATUS_SUCCESS) {
break;
}
if (!received_offset) {
break;
}
}
}
return lm_status;
}
{
if (IS_HW_CHANNEL_VIRT_MODE(pdev)) {
} else if (IS_SW_CHANNEL_VIRT_MODE(pdev)) {
} else {
DbgBreakMsg("lm_vf_pf_channel_send: UNKNOWN channel type\n");
return LM_STATUS_FAILURE;
}
}
return lm_status;
}
static lm_status_t lm_vf_pf_channel_wait_response(struct _lm_device_t * pdev, lm_vf_pf_message_t * mess)
{
u32_t to_cnt = 10000 + 2360; // We'll wait 10,000 times 100us (1 second) + 2360 times 25000us (59sec) = total 60 sec
/* check args */
DbgBreak();
return LM_STATUS_INVALID_PARAMETER;
}
/* wait for message done */
}
}
{
sum_delay_us += delay_us;
// in case reset in progress
// we won't get completion so no need to wait
if( lm_reset_is_inprogress(pdev) ) {
break;
} else if (IS_SW_CHANNEL_VIRT_MODE(pdev)) {
}
}
DbgMessage(pdev, WARN, "lm_vf_pf_channel_wait_response: message done(%dus waiting)\n",sum_delay_us);
} else {
switch (lm_status)
{
break;
case LM_STATUS_SUCCESS:
default:
if (!lm_reset_is_inprogress(pdev))
{
#if defined(_VBD_)
DbgBreak();
#endif
}
break;
}
}
return lm_status;
}
{
}
}
static lm_vf_pf_message_t * lm_vf_pf_channel_get_message_to_send(struct _lm_device_t * pdev, const u32_t opcode)
{
#ifndef __LINUX
DbgMessage(pdev, FATAL, "VF_PF Channel: pdev->vars.vf_pf_mess.state is %d\n",pdev->vars.vf_pf_mess.state);
return NULL;
}
#else
DbgMessage(pdev, FATAL, "VF_PF Channel: pdev->vars.vf_pf_mess.state is %d\n",pdev->vars.vf_pf_mess.state);
#endif
if (IS_SW_CHANNEL_VIRT_MODE(pdev))
{
pdev->vars.vf_pf_mess.message_size = ((sizeof(union vf_pf_msg) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK)
}
else if (IS_HW_CHANNEL_VIRT_MODE(pdev))
{
pdev->vars.vf_pf_mess.message_size = ((sizeof(union vfpf_tlvs) + sizeof(struct channel_list_end_tlv) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK)
+ ((sizeof(union pfvf_tlvs) + sizeof(struct channel_list_end_tlv) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK)
}
else
{
DbgBreakMsg("lm_vf_pf_channel_get_message_to_send: UNKNOWN channel type\n");
return NULL;
}
pdev->vars.vf_pf_mess.message_virt_addr = mm_alloc_phys_mem(pdev, pdev->vars.vf_pf_mess.message_size,
{
return NULL;
}
if (IS_HW_CHANNEL_VIRT_MODE(pdev))
{
((sizeof(union vfpf_tlvs) + sizeof(struct channel_list_end_tlv) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK)
+ ((sizeof(union pfvf_tlvs) + sizeof(struct channel_list_end_tlv) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK);
pdev->vars.vf_pf_mess.bulletin_virt_addr = (u8_t*)pdev->vars.vf_pf_mess.message_virt_addr + buletin_offset;
}
}
switch (opcode) {
case PFVF_OP_ACQUIRE:
if (IS_SW_CHANNEL_VIRT_MODE(pdev))
{
}
else
{
resp_offset = (sizeof(struct vfpf_acquire_tlv) + sizeof(struct channel_list_end_tlv) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK;
}
break;
case PFVF_OP_INIT_VF:
if (IS_SW_CHANNEL_VIRT_MODE(pdev))
{
}
else
{
resp_offset = (sizeof(struct vfpf_init_tlv) + sizeof(struct channel_list_end_tlv) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK;
}
break;
case PFVF_OP_SETUP_Q:
if (IS_SW_CHANNEL_VIRT_MODE(pdev))
{
}
else
{
resp_offset = (sizeof(struct vfpf_setup_q_tlv) + sizeof(struct channel_list_end_tlv) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK;
}
break;
case PFVF_OP_SET_Q_FILTERS:
if (IS_SW_CHANNEL_VIRT_MODE(pdev))
{
resp_offset = (sizeof(struct vf_pf_msg_set_q_filters) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK;
}
else
{
resp_offset = (sizeof(struct vfpf_set_q_filters_tlv) + sizeof(struct channel_list_end_tlv) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK;
}
break;
#if 0
case PFVF_OP_ACTIVATE_Q:
if (IS_SW_CHANNEL_VIRT_MODE(pdev))
{
}
else
{
DbgBreakMsg("lm_vf_pf_channel_get_message_to_send: HW_CHANNEL is not implemented yet\n");
return NULL;
}
break;
case PFVF_OP_DEACTIVATE_Q:
if (IS_SW_CHANNEL_VIRT_MODE(pdev))
{
}
else
{
DbgBreakMsg("lm_vf_pf_channel_get_message_to_send: HW_CHANNEL is not implemented yet\n");
return NULL;
}
break;
#endif
case PFVF_OP_TEARDOWN_Q:
if (IS_SW_CHANNEL_VIRT_MODE(pdev))
{
}
else
{
resp_offset = (sizeof(struct vfpf_q_op_tlv) + sizeof(struct channel_list_end_tlv) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK;
}
break;
case PFVF_OP_CLOSE_VF:
if (IS_SW_CHANNEL_VIRT_MODE(pdev))
{
}
else
{
resp_offset = (sizeof(struct vfpf_close_tlv) + sizeof(struct channel_list_end_tlv) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK;
}
break;
case PFVF_OP_RELEASE_VF:
if (IS_SW_CHANNEL_VIRT_MODE(pdev))
{
}
else
{
resp_offset = (sizeof(struct vfpf_release_tlv) + sizeof(struct channel_list_end_tlv) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK;
}
break;
case PFVF_OP_UPDATE_RSS:
if (IS_SW_CHANNEL_VIRT_MODE(pdev))
{
}
else
{
resp_offset = (sizeof(struct vfpf_rss_tlv) + sizeof(struct channel_list_end_tlv) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK;
}
break;
case PFVF_OP_UPDATE_RSC:
if (IS_SW_CHANNEL_VIRT_MODE(pdev))
{
}
else
{
resp_offset = (sizeof(struct vfpf_tpa_tlv) + sizeof(struct channel_list_end_tlv) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK;
}
break;
default:
DbgBreak();
return NULL;
}
if (IS_SW_CHANNEL_VIRT_MODE(pdev))
{
pdev->vars.vf_pf_mess.done = (u16_t*)((u8_t *)pdev->vars.vf_pf_mess.message_virt_addr + resp_offset);
}
else
{
}
}
{
if (IS_SW_CHANNEL_VIRT_MODE(pdev))
{
struct pf_vf_msg_acquire_resp * p_sw_resp = (struct pf_vf_msg_acquire_resp *)pdev->pf_vf_acquiring_resp;
running_index = pdev->vars.status_blocks_arr[sb_id].host_hc_status_block.vf_sb[p_sw_resp->pfdev_info.indices_per_sb + sm_idx];
}
else if (IS_HW_CHANNEL_VIRT_MODE(pdev))
{
struct pfvf_acquire_resp_tlv * p_hw_resp = (struct pfvf_acquire_resp_tlv *)pdev->pf_vf_acquiring_resp;;
running_index = pdev->vars.status_blocks_arr[sb_id].host_hc_status_block.vf_sb[p_hw_resp->pfdev_info.indices_per_sb + sm_idx];
}
else
{
DbgBreak();
}
return mm_le16_to_cpu(running_index);
}
{
if (IS_SW_CHANNEL_VIRT_MODE(pdev))
{
struct pf_vf_msg_acquire_resp * p_sw_resp = (struct pf_vf_msg_acquire_resp *)pdev->pf_vf_acquiring_resp;
}
else if (IS_HW_CHANNEL_VIRT_MODE(pdev))
{
struct pfvf_acquire_resp_tlv * p_hw_resp = (struct pfvf_acquire_resp_tlv *)pdev->pf_vf_acquiring_resp;;
}
else
{
DbgBreak();
}
}
{
if (IS_SW_CHANNEL_VIRT_MODE(pdev))
{
struct pf_vf_msg_acquire_resp * p_sw_resp = (struct pf_vf_msg_acquire_resp *)pdev->pf_vf_acquiring_resp;
}
else if (IS_HW_CHANNEL_VIRT_MODE(pdev))
{
struct pfvf_acquire_resp_tlv * p_hw_resp = (struct pfvf_acquire_resp_tlv *)pdev->pf_vf_acquiring_resp;;
}
else
{
DbgBreak();
}
return 0;
}
{
if (lm_status == LM_STATUS_SUCCESS) {
if (IS_SW_CHANNEL_VIRT_MODE(pdev))
{
case SW_PFVF_STATUS_SUCCESS:
DbgMessage(pdev, WARN, "VF_PF Channel: Message %d(%d) is completed successfully\n",mess_hdr->opcode, resp_hdr->opcode);
break;
case SW_PFVF_STATUS_FAILURE:
break;
default:
break;
}
}
else if (IS_HW_CHANNEL_VIRT_MODE(pdev))
{
{
case PFVF_STATUS_SUCCESS:
break;
case PFVF_STATUS_FAILURE:
case PFVF_STATUS_NO_RESOURCE:
break;
default:
break;
}
}
else
{
DbgBreak();
}
}
return lm_status;
}
{
if (!pf_mess)
{
DbgBreak();
return lm_status;
}
if (IS_SW_CHANNEL_VIRT_MODE(pdev))
{
// mess->vfdev_info.vf_pf_msg_size = sizeof(union vf_pf_msg);
/* the following fields are for debug purposes */
sw_mess->vfdev_info.vf_fw_hsi_version = pdev->ver_num_fw; /* Must not be zero otherwise, VF will yellow bang */
sw_mess->vfdev_info.fp_hsi_ver = ETH_FP_HSI_VER_1; /* We don't want to break support for old/new VF/PF so we retrun v1 */
sw_mess->resc_request.num_rxqs = sw_mess->resc_request.num_txqs = sw_mess->resc_request.num_sbs = LM_SB_CNT(pdev);
}
else if (IS_HW_CHANNEL_VIRT_MODE(pdev))
{
hw_mess->vfdev_info.fp_hsi_ver = ETH_FP_HSI_VER_1; /* We don't want to break support for old/new VF/PF so we retrun v1 */
hw_mess->resc_request.num_rxqs = hw_mess->resc_request.num_txqs = hw_mess->resc_request.num_sbs = LM_SB_CNT(pdev);
}
else
{
DbgBreak();
return LM_STATUS_FAILURE;
}
if (lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
// FIXME TODO
if (lm_status == LM_STATUS_SUCCESS)
{
if (IS_SW_CHANNEL_VIRT_MODE(pdev))
{
{
}
else
{
{
case SW_PFVF_STATUS_SUCCESS:
break;
case SW_PFVF_STATUS_FAILURE:
break;
default:
break;
}
// We update here the status of pf_acquire
// in order to let the UM layer of the VF to report
// in the event log the relevant event log message
}
}
else if (IS_HW_CHANNEL_VIRT_MODE(pdev))
{
{
}
else
{
{
case PFVF_STATUS_SUCCESS:
break;
case PFVF_STATUS_FAILURE:
case PFVF_STATUS_NO_RESOURCE:
break;
default:
break;
}
// We update here the status of pf_acquire
// in order to let the UM layer of the VF to report
// in the event log the relevant event log message
}
}
else
{
DbgBreak();
}
}
if (lm_status == LM_STATUS_SUCCESS)
{
if (IS_SW_CHANNEL_VIRT_MODE(pdev))
{
{
pdev->pf_vf_acquiring_resp = mm_alloc_mem(pdev, sizeof(struct pf_vf_msg_acquire_resp),LM_RESOURCE_COMMON);
{
return LM_STATUS_RESOURCE;
}
else
{
DbgMessage(pdev, FATAL, "VF_PF Channel: pdev->pf_vf_acquiring_resp is allocated (%db)\n",sizeof(struct pf_vf_msg_acquire_resp));
}
}
// FIXME TODO
// override for now to make sure we get correct answer...
{
}
DbgMessage(pdev, FATALvf, "presp->pfdev_info.indices_per_sb = %d\n", presp->pfdev_info.indices_per_sb);
{
if (!max_dq)
{
max_dq = 1;
}
}
else
{
return LM_STATUS_INVALID_PARAMETER;
}
/* IGU specific data */
/* TODO: don't assume consecutiveness... */
{
{
}
}
/* TODO: get this from presp... here for purpose of rx_mask... */
//pdev->hw_info.chip_id |= CHIP_REV_EMUL;
{
}
{
}
else
{
}
}
else if (IS_HW_CHANNEL_VIRT_MODE(pdev))
{
{
pdev->pf_vf_acquiring_resp = mm_alloc_mem(pdev, sizeof(struct pfvf_acquire_resp_tlv),LM_RESOURCE_COMMON);
{
return LM_STATUS_RESOURCE;
}
else
{
DbgMessage(pdev, FATAL, "VF_PF Channel: pdev->pf_vf_acquiring_resp is allocated (%db)\n",sizeof(struct pfvf_acquire_resp_tlv));
}
}
// FIXME TODO
DbgMessage(pdev, FATALvf, "presp->pfdev_info.indices_per_sb = %d\n", presp->pfdev_info.indices_per_sb);
{
if (!max_dq)
{
max_dq = 1;
}
}
else
{
return LM_STATUS_INVALID_PARAMETER;
}
/* IGU specific data */
/* TODO: don't assume consecutiveness... */
{
{
}
}
/* TODO: get this from presp... here for purpose of rx_mask... */
//pdev->hw_info.chip_id |= CHIP_REV_EMUL;
{
}
{
}
{
}
else
{
}
}
else
{
DbgBreak();
}
}
return lm_status;
}
{
if (!pf_mess) {
DbgBreak();
return lm_status;
}
if (IS_SW_CHANNEL_VIRT_MODE(pdev))
{
mess->sb_addr[sb_id] = pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.vf_sb_phy_address.as_u64;
}
}
else if (IS_HW_CHANNEL_VIRT_MODE(pdev))
{
mess->sb_addr[sb_id] = pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.vf_sb_phy_address.as_u64;
}
}
else
{
DbgBreak();
return LM_STATUS_FAILURE;
}
if (lm_status != LM_STATUS_SUCCESS) {
}
return lm_status;
}
{
&& pdev->pf_vf_acquiring_resp));
if (!pf_mess) {
DbgBreak();
return lm_status;
}
if (IS_SW_CHANNEL_VIRT_MODE(pdev))
{
if (validation_flag & RX_Q_VALIDATE) {
}
} else {
}
/* sb + hc info */
if ((pdev->params.int_coalesing_mode == LM_INT_COAL_PERIODIC_SYNC)/* && !pdev->params.int_coalesing_mode_disabled_by_ndis*/) {
mess->rxq.hc_rate = (u16_t)pdev->params.int_per_sec_rx[HC_PARAMS_ETH_INDEX]; /* desired interrupts per sec. *//* valid iff VFPF_QUEUE_FLG_HC */
if (pdev->params.enable_dynamic_hc[HC_PARAMS_ETH_INDEX] && (presp->pfdev_info.pf_cap & PFVF_CAP_DHC)) {
}
}
/* rx buffer info */
}
if (validation_flag & TX_Q_VALIDATE) {
if ((pdev->params.int_coalesing_mode == LM_INT_COAL_PERIODIC_SYNC)/* && pdev->params.int_coalesing_mode_disabled_by_ndis*/) {
mess->txq.hc_rate = (u16_t)pdev->params.int_per_sec_tx[HC_PARAMS_ETH_INDEX]; /* desired interrupts per sec. *//* valid iff VFPF_QUEUE_FLG_HC */
}
}
}
else if (IS_HW_CHANNEL_VIRT_MODE(pdev))
{
if (validation_flag & RX_Q_VALIDATE) {
#if 0
}
}
else
#endif
{
}
/* sb + hc info */
if ((pdev->params.int_coalesing_mode == LM_INT_COAL_PERIODIC_SYNC)/* && !pdev->params.int_coalesing_mode_disabled_by_ndis*/) {
mess->rxq.hc_rate = (u16_t)pdev->params.int_per_sec_rx[HC_PARAMS_ETH_INDEX]; /* desired interrupts per sec. *//* valid iff VFPF_QUEUE_FLG_HC */
if (pdev->params.enable_dynamic_hc[HC_PARAMS_ETH_INDEX] && (presp->pfdev_info.pf_cap & PFVF_CAP_DHC)) {
}
}
if (!vf_qid)
{
}
/* rx buffer info */
}
if (validation_flag & TX_Q_VALIDATE) {
if ((pdev->params.int_coalesing_mode == LM_INT_COAL_PERIODIC_SYNC)/* && pdev->params.int_coalesing_mode_disabled_by_ndis*/) {
mess->txq.hc_rate = (u16_t)pdev->params.int_per_sec_tx[HC_PARAMS_ETH_INDEX]; /* desired interrupts per sec. *//* valid iff VFPF_QUEUE_FLG_HC */
}
}
}
else
{
DbgBreak();
return LM_STATUS_FAILURE;
}
if (lm_status != LM_STATUS_SUCCESS) {
}
return lm_status;
}
{
if (!pf_mess) {
DbgBreak();
return lm_status;
}
if (IS_SW_CHANNEL_VIRT_MODE(pdev))
{
}
else if (IS_HW_CHANNEL_VIRT_MODE(pdev))
{
}
else
{
DbgBreak();
return LM_STATUS_FAILURE;
}
if (lm_status != LM_STATUS_SUCCESS) {
}
return lm_status;
}
lm_status_t lm_vf_pf_set_q_filters(struct _lm_device_t * pdev, u8 vf_qid, void * cookie, q_filter_type filter_type, u8_t * pbuf, u32_t buf_len,
{
DbgBreakIf(!(pdev && IS_CHANNEL_VFDEV(pdev) && (vf_qid < LM_SB_CNT(pdev)) && pdev->pf_vf_acquiring_resp));
if (!pf_mess) {
DbgBreak();
return lm_status;
}
if (IS_SW_CHANNEL_VIRT_MODE(pdev))
{
switch (filter_type) {
case Q_FILTER_MAC:
if (!is_clear) {
}
if (is_clear) {
} else {
}
mm_memcpy(mess->filters[idx_entries].dest_mac, pbuf + idx_entries*ETHERNET_ADDRESS_SIZE, ETHERNET_ADDRESS_SIZE);
if (vlan_tag != LM_SET_CAM_NO_VLAN_FILTER) {
}
}
if (mess->n_mac_vlan_filters) {
}
break;
case Q_FILTER_VLAN:
DbgBreak();
break;
case Q_FILTER_MC:
if (!is_clear) {
}
if (is_clear) {
} else {
mm_memcpy(&mess->multicast[idx_entries][0], pbuf + idx_entries*ETHERNET_ADDRESS_SIZE, ETHERNET_ADDRESS_SIZE);
}
}
if (mess->n_multicast) {
}
break;
case Q_FILTER_RX_MASK:
VFPF_RX_MASK_ACCEPT_ALL_MULTICAST | VFPF_RX_MASK_ACCEPT_ALL_UNICAST | VFPF_RX_MASK_ACCEPT_BROADCAST);
}
}
}
}
}
DbgMessage(pdev, FATAL, "Q_FILTER_RX_MASK: mess->rx_mask=%x mess->flags=%x\n", mess->rx_mask, mess->flags);
break;
default:
break;
}
{
}
}
else if (IS_HW_CHANNEL_VIRT_MODE(pdev))
{
switch (filter_type) {
case Q_FILTER_MAC:
if (!is_clear) {
}
if (is_clear) {
} else {
}
mm_memcpy(mess->filters[idx_entries].mac, pbuf + idx_entries*ETHERNET_ADDRESS_SIZE, ETHERNET_ADDRESS_SIZE);
if (vlan_tag != LM_SET_CAM_NO_VLAN_FILTER) {
}
}
if (mess->n_mac_vlan_filters) {
}
break;
case Q_FILTER_VLAN:
DbgBreak();
break;
case Q_FILTER_MC:
if (!is_clear) {
}
if (is_clear) {
} else {
mm_memcpy(&mess->multicast[idx_entries][0], pbuf + idx_entries*ETHERNET_ADDRESS_SIZE, ETHERNET_ADDRESS_SIZE);
}
}
if (mess->n_multicast) {
}
break;
case Q_FILTER_RX_MASK:
VFPF_RX_MASK_ACCEPT_ALL_MULTICAST | VFPF_RX_MASK_ACCEPT_ALL_UNICAST | VFPF_RX_MASK_ACCEPT_BROADCAST);
}
}
}
}
}
DbgMessage(pdev, FATAL, "Q_FILTER_RX_MASK: mess->rx_mask=%x mess->flags=%x\n", mess->rx_mask, mess->flags);
break;
default:
break;
}
{
}
}
else
{
DbgBreak();
return LM_STATUS_FAILURE;
}
if (send_it) {
if (lm_status != LM_STATUS_SUCCESS) {
}
} else {
}
return lm_status;
}
lm_status_t lm_vf_pf_set_q_filters_list(struct _lm_device_t * pdev, u8 vf_qid, void * cookie, q_filter_type filter_type, d_list_t * pbuf,
{
DbgBreak();
return LM_STATUS_FAILURE;
}
lm_status_t lm_vf_pf_update_rss(struct _lm_device_t *pdev, void * cookie, u32_t rss_flags, u8_t rss_result_mask, u8_t * ind_table, u32_t * rss_key)
{
if (!pf_mess) {
DbgBreak();
return lm_status;
}
if (IS_SW_CHANNEL_VIRT_MODE(pdev))
{
}
else if (IS_HW_CHANNEL_VIRT_MODE(pdev))
{
}
}
else
{
DbgBreak();
return LM_STATUS_FAILURE;
}
if (lm_status != LM_STATUS_SUCCESS) {
}
return lm_status;
}
{
if (!pf_mess) {
DbgBreak();
return lm_status;
}
if (IS_SW_CHANNEL_VIRT_MODE(pdev))
{
}
else if (IS_HW_CHANNEL_VIRT_MODE(pdev))
{
{
}
mess->tpa_client_info.max_sges_for_packet = DIV_ROUND_UP_BITS((u16_t)pdev->params.l2_cli_con_params[0].mtu, LM_TPA_PAGE_BITS);
}
else
{
DbgBreak();
return LM_STATUS_FAILURE;
}
if (lm_status != LM_STATUS_SUCCESS) {
}
return lm_status;
}
{
if (!pf_mess) {
DbgBreak();
return lm_status;
}
if (IS_SW_CHANNEL_VIRT_MODE(pdev))
{
}
else if (IS_HW_CHANNEL_VIRT_MODE(pdev))
{
}
else
{
DbgBreak();
return LM_STATUS_FAILURE;
}
if (lm_status != LM_STATUS_SUCCESS) {
}
return lm_status;
}
{
if (!pf_mess) {
DbgBreak();
return lm_status;
}
if (IS_SW_CHANNEL_VIRT_MODE(pdev))
{
}
else if (IS_HW_CHANNEL_VIRT_MODE(pdev))
{
}
else
{
DbgBreak();
return LM_STATUS_FAILURE;
}
if (lm_status != LM_STATUS_SUCCESS) {
return lm_status;
}
// FIXME TODO
if (lm_status == LM_STATUS_SUCCESS) {
if (IS_SW_CHANNEL_VIRT_MODE(pdev))
{
} else {
case SW_PFVF_STATUS_SUCCESS:
break;
case SW_PFVF_STATUS_FAILURE:
break;
default:
break;
}
}
}
}
return lm_status;
}
{
}
{
}
{
return FALSE;
}
{
return LM_STATUS_FAILURE;
}
pdev->params.vf_num_in_path = (pdev->params.debug_me_register & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT;
return LM_STATUS_SUCCESS;
}
{
//u32_t alloc_size = 0 ;
void * p_sb;
//DbgBreakIf(!(presp && (sb_id < presp->pfdev_info.indices_per_sb)));
{
return LM_STATUS_INVALID_PARAMETER ;
}
DbgMessage(pdev, FATAL, "### VF lm_common_setup_alloc_resc b_is_alloc=%s\n", b_is_alloc ? "TRUE" : "FALSE" );
// Status blocks allocation. We allocate mem both for the default and non-default status blocks
// there is 1 def sb and 16 non-def sb per port.
// non-default sb: index 0-15, default sb: index 16.
if (IS_CHANNEL_VFDEV(pdev)) {
if (IS_SW_CHANNEL_VIRT_MODE(pdev))
{
mem_size = (sizeof(u16_t) * (presp->pfdev_info.indices_per_sb + 2) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK;
}
else if (IS_HW_CHANNEL_VIRT_MODE(pdev))
{
mem_size = (sizeof(u16_t) * (presp->pfdev_info.indices_per_sb + 2) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK;
}
else
{
DbgBreak();
return LM_STATUS_FAILURE;
}
} else {
}
{
if( b_is_alloc )
{
if (IS_CHANNEL_VFDEV(pdev)) {
pdev->vars.status_blocks_arr[sb_id].host_hc_status_block.vf_sb = p_sb = mm_alloc_phys_mem(pdev, mem_size, &sb_phy_address, 0, mm_cli_idx);
pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.vf_sb_phy_address.as_u32.low = sb_phy_address.as_u32.low;
pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.vf_sb_phy_address.as_u32.high = sb_phy_address.as_u32.high;
} else {
pdev->vars.status_blocks_arr[sb_id].host_hc_status_block.e2_sb = p_sb = mm_alloc_phys_mem(pdev, mem_size, &sb_phy_address, 0, mm_cli_idx);
pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e2_sb_data.common.host_sb_addr.lo = sb_phy_address.as_u32.low;
pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e2_sb_data.common.host_sb_addr.hi = sb_phy_address.as_u32.high;
}
}
else
{
if (IS_CHANNEL_VFDEV(pdev)) {
} else {
}
}
{
return LM_STATUS_RESOURCE ;
}
}
#if 0
//CAM
alloc_size = sizeof(struct mac_configuration_cmd) ;
if( b_is_alloc )
{
0,
}
{
return LM_STATUS_RESOURCE ;
}
if( b_is_alloc )
{
0,
}
{
return LM_STATUS_RESOURCE ;
}
#endif
return LM_STATUS_SUCCESS;
}
{
if (lm_status == LM_STATUS_SUCCESS) {
#if 0
"ACK_enable" (even "ACK_disable/ACK_enable") does not help when IGU block is stuck from previous VM shutdown/reboot (not ACKed sunbitted interrupt interrupt).
if (lm_status == LM_STATUS_SUCCESS)
{
{
/* Give Consumer updates with value '0' */
}
}
#endif
}
/* Temporary FIXME TODO: is this the right location??? */
{
{
case 10:
break;
case 100:
break;
case 1000:
break;
case 2500:
break;
case 20000:
break;
case 10000:
default:
break;
}
}
else
{
}
return lm_status;
}
{
if (IS_SW_CHANNEL_VIRT_MODE(pdev))
{
}
}
}
else if (IS_HW_CHANNEL_VIRT_MODE(pdev))
{
}
}
}
else
{
DbgBreak();
return LM_STATUS_FAILURE;
}
if (lm_status == LM_STATUS_SUCCESS) {
}
if (lm_status == LM_STATUS_SUCCESS) {
}
return lm_status;
}
{
if (lm_reset_is_inprogress(pdev)) {
return LM_STATUS_SUCCESS;
}
if (lm_status == LM_STATUS_SUCCESS) {
} else {
if (lm_status == LM_STATUS_REQUEST_NOT_ACCEPTED) {
}
}
if (lm_status == LM_STATUS_SUCCESS) {
}
}
return lm_status;
}
{
if (lm_reset_is_inprogress(pdev)) {
return LM_STATUS_SUCCESS;
}
if (lm_status == LM_STATUS_SUCCESS) {
}
return lm_status;
}
{
return FALSE;
}
{
// Cleaning after driver unload
return LM_STATUS_SUCCESS;
}
{
return LM_STATUS_SUCCESS;
}
{
return LM_STATUS_SUCCESS;
}
{
return LM_STATUS_SUCCESS;
}
{
/* TODO?? */
return LM_STATUS_SUCCESS;
}
{
struct pf_vf_bulletin_content volatile *bulletin = (struct pf_vf_bulletin_content *)pdev->vars.vf_pf_mess.bulletin_virt_addr;
{
return PFVF_BB_CHANNEL_IS_NOT_ACTIVE;
}
{
{
if ((bulletin->length >= sizeof(bulletin->crc)) && (bulletin->length <= sizeof(union pf_vf_bulletin))
&& (bulletin->crc == mm_crc32((u8_t*)bulletin + sizeof(bulletin->crc), bulletin->length - sizeof(bulletin->crc), BULLETIN_CRC_SEED)))
break;
}
if (attempts == BULLETIN_ATTEMPTS)
{
return PFVF_BB_CHANNEL_CRC_ERR;
}
{
}
{
{
return PFVF_BB_VALID_MAC;
}
}
}
return PFVF_BB_NO_UPDATE;
}
{
u8_t i;
/* Need to use pretend for VF */
}
/* set Parent PF */
igu_sb_cnt = vf_info->num_allocated_chains; // pdev->hw_info.intr_blk_info.igu_info.vf_igu_info[abs_vf_id].igu_sb_cnt;
prod_idx = LM_VF_IGU_SB_ID(vf_info,sb_id)*num_segs; /* bc-assumption consecutive pfs, norm-no assumption */
for (i = 0; i < num_segs;i++) {
}
}
return lm_status;
}
{
/* Need to use pretend for VF */
return LM_STATUS_SUCCESS;
}
/* disable both bits, for INTA, MSI and MSI-X. */
RESET_FLAGS(val, (IGU_VF_CONF_MSI_MSIX_EN | IGU_VF_CONF_SINGLE_ISR_EN | IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_PARENT_MASK));
return lm_status;
}
{
* Need to use Pretend in order to do this. Note: once we do pretend
* all accesses to SPLIT-68 will be done as if-vf...
* Bits. Bits [13:10] - Reserved. Bits [9:4] - VFID. Bits [3] - VF valid. Bits [2:0] - PFID.
*/
if (lm_status == LM_STATUS_SUCCESS) {
switch (was_err_num) {
case 0:
break;
case 1:
break;
case 2:
break;
case 3:
break;
default:
was_err_reg = 0;
DbgBreak();
}
if (was_err_reg) {
REG_WR(PFDEV(pdev), was_err_reg, was_err_value); /* PglueB - Clear the was_error indication of the relevant function*/
}
/* IGU Initializations */
}
} else {
DbgBreak();
}
return lm_status;
}
{
return LM_STATUS_SUCCESS;
}
if (lm_status == LM_STATUS_SUCCESS) {
} else {
}
return lm_status;
}
/*Master Channel Virt*/
{
if (!vf_info) {
DbgBreakMsg("lm_pf_create_vf: vf_info is not found\n");
return LM_STATUS_FAILURE;
}
if (num_of_vf_avaiable_chains == 0)
{
return LM_STATUS_RESOURCE;
}
chains_resource_acquired = lm_pf_acquire_vf_chains_resources(pdev, vf_info->relative_vf_id, num_of_vf_avaiable_chains);
if (!chains_resource_acquired) {
DbgBreak();
return LM_STATUS_RESOURCE;
}
#if 0
if (lm_status == LM_STATUS_PENDING)
{
/* Synchrounous complete */
}
#endif
} else {
}
return lm_status;
}
{
if (!vf_info) {
DbgBreakMsg("lm_pf_remove_vf: vf_info is not found\n");
return LM_STATUS_FAILURE;
}
}
if (lm_status != LM_STATUS_SUCCESS) {
DbgBreak();
} else {
}
if (con_state != LM_CON_STATE_CLOSE)
{
if (con_state != LM_CON_STATE_OPEN) {
DbgBreak();
} else {
if (lm_status != LM_STATUS_SUCCESS)
{
DbgBreak();
return lm_status;
}
}
}
}
vf_info->vf_si_num_of_active_q = 0;
} else {
}
return lm_status;
}
{
// TODO - use here pdev->vars.clk_factor
if (CHIP_REV_IS_EMUL(pdev))
{
}
else if (CHIP_REV_IS_FPGA(pdev))
{
}
else
{
factor = 1;
}
/*
VF FLR only part
a. Wait until there are no pending ramrods for this VFid in the PF DB. - No pending VF's pending ramrod. It's based on "FLR not during driver load/unload".
What about set MAC?
b. Send the new "L2 connection terminate" ramrod for each L2 CID that was used by the VF,
including sending the doorbell with the "terminate" flag. - Will be implemented in FW later
c. Send CFC delete ramrod on all L2 connections of that VF (set the CDU-validation field to "invalid"). - part of FW cleanup. VF_TO_PF_CID must initialized in
PF CID array*/
/* 3. Poll on the DQ per-function usage-counter until it's 0. */
if (lm_status == LM_STATUS_SUCCESS) {
pdev->flr_stats.dq_usage_counter = REG_WAIT_VERIFY_VAL(PFDEV(pdev), DORQ_REG_VF_USAGE_CNT, 0, wait_ms);
DbgMessage(pdev, FATAL, "%d*%dms waiting for DQ per vf usage counter\n", pdev->flr_stats.dq_usage_counter, DEFAULT_WAIT_INTERVAL_MICSEC);
} else {
return lm_status;
}
/* 4. Activate the FW cleanup process by activating AggInt in the FW with GRC. Set the bit of the relevant function in the AggInt bitmask,
to indicate to the FW which function is being cleaned. Wait for the per-function completion indication in the Cstorm RAM
*/
cleanup_complete = 0xFFFFFFFF;
LM_INTMEM_READ32(PFDEV(pdev),CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(function_for_clean_up),&cleanup_complete, BAR_CSTRORM_INTMEM);
if (cleanup_complete) {
DbgBreak();
}
final_cleanup.command = (XSTORM_AGG_INT_FINAL_CLEANUP_INDEX << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM;
final_cleanup.command |= (XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE;
final_cleanup.command |= (function_for_clean_up << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX;
pdev->flr_stats.final_cleanup_complete = REG_WAIT_VERIFY_VAL(PFDEV(pdev), BAR_CSTRORM_INTMEM + CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(function_for_clean_up), 1, wait_ms);
DbgMessage(pdev, FATAL, "%d*%dms waiting for final cleanup compete\n", pdev->flr_stats.final_cleanup_complete, DEFAULT_WAIT_INTERVAL_MICSEC);
/* Lets cleanup for next FLR final-cleanup... */
LM_INTMEM_WRITE32(PFDEV(pdev),CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(function_for_clean_up),0, BAR_CSTRORM_INTMEM);
/* 5. ATC cleanup. This process will include the following steps (note that ATC will not be available for phase2 of the
integration and the following should be added only in phase3):
a. Optionally, wait 2 ms. This is not a must. The driver can start polling (next steps) immediately,
but take into account that it may take time till the done indications will be set.
b. Wait until INVALIDATION_DONE[function] = 1
c. Write-clear INVALIDATION_DONE[function] */
/* 6. Verify PBF cleanup. Do the following for all PBF queues (queues 0,1,4, that will be indicated below with N):
a. Make sure PBF command-queue is flushed: Read pN_tq_occupancy. Let's say that the value is X.
This number indicates the number of occupied transmission-queue lines.
Poll on pN_tq_occupancy and pN_tq_lines_freed_cnt until one of the following:
i. pN_tq_occupancy is 0 (queue is empty). OR
ii. pN_tq_lines_freed_cnt equals has advanced (cyclically) by X (all lines that were in the queue were processed). */
switch (idx) {
case 0:
pbf_reg_pN_tq_lines_freed_cnt = (CHIP_IS_E3B0(pdev)) ? PBF_REG_TQ_LINES_FREED_CNT_Q0 : PBF_REG_P0_TQ_LINES_FREED_CNT;
break;
case 1:
pbf_reg_pN_tq_lines_freed_cnt = (CHIP_IS_E3B0(pdev)) ? PBF_REG_TQ_LINES_FREED_CNT_Q1 : PBF_REG_P1_TQ_LINES_FREED_CNT;
break;
case 2:
pbf_reg_pN_tq_occupancy = (CHIP_IS_E3B0(pdev)) ? PBF_REG_TQ_OCCUPANCY_LB_Q : PBF_REG_P4_TQ_OCCUPANCY;
pbf_reg_pN_tq_lines_freed_cnt = (CHIP_IS_E3B0(pdev)) ? PBF_REG_TQ_LINES_FREED_CNT_LB_Q : PBF_REG_P4_TQ_LINES_FREED_CNT;
break;
}
DbgMessage(pdev, FATAL, "TQ_LINES_FREED_CNT[%d]: s:%x\n", (idx == 2) ? 4 : idx, tq_freed_cnt_start);
} else {
DbgBreak();
break;
}
}
}
/* b. Make sure PBF transmission buffer is flushed: read pN_init_crd once and keep it in variable Y.
Read pN_credit and keep it in X. Poll on pN_credit and pN_internal_crd_freed until one of the following:
i. (Y - pN_credit) is 0 (transmission buffer is empty). OR
ii. pN_internal_crd_freed_cnt has advanced (cyclically) by Y-X (all transmission buffer lines that were occupied were freed).*/
switch (idx) {
case 0:
pbf_reg_pN_internal_crd_freed = (CHIP_IS_E3B0(pdev)) ? PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 : PBF_REG_P0_INTERNAL_CRD_FREED_CNT;
break;
case 1:
pbf_reg_pN_internal_crd_freed = (CHIP_IS_E3B0(pdev)) ? PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 : PBF_REG_P1_INTERNAL_CRD_FREED_CNT;
break;
case 2:
pbf_reg_pN_internal_crd_freed = (CHIP_IS_E3B0(pdev)) ? PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q : PBF_REG_P4_INTERNAL_CRD_FREED_CNT;
break;
}
inernal_freed_crd_last = inernal_freed_crd_start = REG_RD(PFDEV(pdev), pbf_reg_pN_internal_crd_freed);
DbgMessage(pdev, FATAL, "INTERNAL_CRD_FREED[%d]: s:%x\n", (idx == 2) ? 4 : idx, inernal_freed_crd_start);
while ((credit_last != init_crd)
} else {
DbgMessage(pdev, FATAL, "INTERNAL_CRD_FREED[%d]: c:%x\n", (idx == 2) ? 4 : idx, inernal_freed_crd_last);
DbgBreak();
break;
}
}
}
/* 7. Wait for 100ms in order to make sure that the chip is clean, including all PCI related paths
(in Emulation the driver can wait for 10ms*EmulationFactor, i.e.: 20s). This is especially required if FW doesn't implement
the flows in Optional Operations (future enhancements).) */
/* 9. Initialize the function as usual this should include also re-enabling the function in all the HW blocks and Storms that
were disabled by the MCP and cleaning relevant per-function information in the chip (internal RAM related information, IGU memory etc.).
a. In case of VF, PF resources that were allocated for previous VF can be re-used by the new VF. If there are resources
that are not needed by the new VF then they should be cleared.
b. Note that as long as slow-path prod/cons update to Xstorm is not atomic, they must be cleared by the driver before setting
the function to "enable" in the Xstorm.
c. Don't forget to enable the VF in the PXP or the DMA operation for PF in the PXP. */
{
#ifdef VF_INVOLVED
#endif
}
return lm_status;
}
{
if (!vf_info) {
DbgBreakMsg("lm_pf_fl_vf_reset_set_inprogress: vf_info is not found\n");
return;
} else {
}
}
{
if (!vf_info) {
DbgBreakMsg("lm_pf_fl_vf_reset_clear_inprogress: vf_info is not found\n");
return;
} else {
}
}
{
if (!vf_info) {
DbgBreakMsg("lm_pf_fl_vf_reset_is_inprogress: vf_info is not found\n");
return FALSE;
} else {
}
}
{
DbgMessage(pdev, WARN, "VF[%d%d)] is not closed yet\n", vf_info->relative_vf_id, vf_info->abs_vf_id);
}
if (lm_status != LM_STATUS_SUCCESS) {
DbgBreak();
} else {
}
{
}
else
{
}
}
vf_info->vf_si_num_of_active_q = 0;
// if (!(vf_info->was_malicious || vf_info->was_flred))
{
/*
Disable the function in STORMs
*/
}
}
}
}
DbgMessage(pdev, WARN, "VF[%d%d)] is not released yet\n", vf_info->relative_vf_id, vf_info->abs_vf_id);
}
return lm_status;
}
lm_status_t lm_pf_tpa_send_vf_ramrod(struct _lm_device_t *pdev, lm_vf_info_t *vf_info, u32_t q_idx, u8_t update_ipv4, u8_t update_ipv6)
{
// Add ramrod send code
{
DbgBreakMsg("lm_tpa_send_ramrod : invalid paramters");
return LM_STATUS_FAILURE;
}
/* maximal TPA queues allowed for this client */
/* The maximal number of SGEs that can be used for one packet. depends on MTU and SGE size. must be 0 if SGEs are disabled */
tpa_chain->tpa_ramrod_data_virt->max_sges_for_packet = DIV_ROUND_UP_BITS(tpa_chain->mtu, LM_TPA_PAGE_BITS);
/* Size of the buffers pointed by SGEs */
ASSERT_STATIC(LM_TPA_PAGE_SIZE < MAX_VARIABLE_VALUE(tpa_chain->tpa_ramrod_data_virt->sge_buff_size));
/* maximal size for the aggregated TPA packets, reprted by the host */
ASSERT_STATIC((LM_TPA_MAX_AGG_SIZE * LM_TPA_PAGE_SIZE) < MAX_VARIABLE_VALUE(tpa_chain->tpa_ramrod_data_virt->max_agg_size));
tpa_chain->tpa_ramrod_data_virt->max_agg_size = mm_cpu_to_le16(LM_TPA_MAX_AGG_SIZE * LM_TPA_PAGE_SIZE);
//u32_t sge_page_base_lo /* The address to fetch the next sges from (low) */;
//u32_t sge_page_base_hi /* The address to fetch the next sges from (high) */;
//u16_t sge_pause_thr_low /* number of remaining sges under which, we send pause message */;
//u16_t sge_pause_thr_high /* number of remaining sges above which, we send un-pause message */;
type,
return lm_status;
}
{
if (IS_SW_CHANNEL_VIRT_MODE(pdev))
{
struct pf_vf_msg_acquire_resp * presp = (struct pf_vf_msg_acquire_resp *)pdev->pf_vf_acquiring_resp;
}
}
else if (IS_HW_CHANNEL_VIRT_MODE(pdev))
{
}
}
else
{
DbgBreak();
}
}
return is_rsc_supported;
}
{
{
DbgBreakMsg("lm_pf_init_vf_filters : invalid paramters");
}
else
{
}
return;
}
{
{
DbgBreakMsg("lm_pf_allow_vf_promiscuous_mode : invalid paramters");
}
else
{
}
return;
}
{
#ifdef _VBD_CMD_
return;
#endif
/* Not supported in backward compatible mode! */
{
return;
}
{
DbgBreakMsg("lm_pf_int_vf_igu_sb_cleanup : invalid paramters");
return;
}
{
DbgBreakMsg("lm_pf_int_vf_igu_sb_cleanup : only available on Host/PF side");
return;
}
/* Cleanup can be done only via GRC access using the producer update command */
/* wait for clean up to finish */
{
}
{
DbgMessage(pdev, FATAL, "Unable to finish IGU cleanup - set: igu_sb_id %d offset %d bit %d (cnt %d)\n",
}
/* Now we clear the cleanup-bit... same command without cleanup_set... */
/* wait for clean up to finish */
{
}
{
DbgMessage(pdev, FATAL, "Unable to finish IGU cleanup - clear: igu_sb_id %d offset %d bit %d (cnt %d)\n",
}
}
#endif
/* */