#include "lm5710.h"
#include "everest_iscsi_constants.h"
#include "everest_l5cm_constants.h"
#include "577xx_int_offsets.h"
#include "bd_chain.h"
#include "command.h"
#include "lm_sp_req_mgr.h"
#include "lm_l4sp.h"
#include "lm_l4if.h"
#include "lm_l5if.h"
#include "mm_l5if.h"
#include "mm_l4if.h"
#include "mm.h"
)
{
}
OUT void** pbl_virt_table,
)
{
{
/* allocPblMem - illegal pblSize */
return LM_STATUS_INVALID_PARAMETER;
}
if (rt_mem)
{
*pbl_size,
0,
{
*pbl_size = 0;
return LM_STATUS_RESOURCE;
}
pbl_entries * sizeof(void *),
if CHK_NULL(*pbl_virt_table)
{
*pbl_size = 0;
return LM_STATUS_RESOURCE;
}
}
else
{
*pbl_size,
0,
{
*pbl_size = 0;
return LM_STATUS_RESOURCE;
}
pbl_entries * sizeof(void *),
if CHK_NULL(*pbl_virt_table)
{
*pbl_size = 0;
return LM_STATUS_RESOURCE;
}
}
return LM_STATUS_SUCCESS;
}
IN void* buf_base_virt,
OUT void** pbl_virt_table,
{
{
return LM_STATUS_INVALID_PARAMETER;
}
lm_status = lm_alloc_pbl_mem(pdev, *pbl_entries, pbl_virt, pbl_phy, pbl_virt_table, rt_mem, pbl_size, mm_cli_idx);
if (lm_status != LM_STATUS_SUCCESS)
{
*pbl_entries = 0;
return lm_status;
}
lm_status = lm_bd_chain_pbl_set_ptrs(buf_base_virt, *buf_base_phy, *pbl_virt, *pbl_virt_table, *pbl_entries);
if (lm_status != LM_STATUS_SUCCESS)
{
if (rt_mem)
{
}
*pbl_entries = 0;
*pbl_size = 0;
return lm_status;
}
return LM_STATUS_SUCCESS;
}
{
/* check arguments */
{
return LM_STATUS_FAILURE;
}
DbgMessage(pdev, INFORMi | INFORMl5sp, "#lm_alloc_eq, eq_chain=%p, page_cnt=%d\n", eq_chain, page_cnt);
/* alloc the chain */
if(!eq_addr_save->b_allocated)
{
0,
cli_idx);
{
return LM_STATUS_RESOURCE;
}
// For debugging
}
else
{
}
return LM_STATUS_SUCCESS;
} /* lm_alloc_eq */
{
/* check arguments */
{
return LM_STATUS_INVALID_PARAMETER;
}
bd_chain->bd_chain_phy, (u16_t)bd_chain->page_cnt, sizeof(struct iscsi_kcqe), 1/*0*/, is_chain_mode);
/* verify that EQ size is not too large */
{
return LM_STATUS_FAILURE;
}
idx,
// Assign the EQ chain consumer pointer to the consumer index in the status block.
{
return LM_STATUS_FAILURE;
}
/*
if (IS_E2(pdev)) {
pdev->vars.status_blocks_arr[idx].host_hc_status_block.e2_sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS] = 0;
LM_SC_EQ(pdev, idx).hw_con_idx_ptr =
&(pdev->vars.status_blocks_arr[idx].host_hc_status_block.e2_sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS]);
} else {
pdev->vars.status_blocks_arr[idx].host_hc_status_block.e1x_sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS] = 0;
LM_SC_EQ(pdev, idx).hw_con_idx_ptr =
&(pdev->vars.status_blocks_arr[idx].host_hc_status_block.e1x_sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS]);
}
*/
return LM_STATUS_SUCCESS;
} /* lm_sc_setup_eq */
/**
*
* @description
* Allocate EQ PBL to pass to FW in init ramrod
* @param pdev
* @param eq_chain
* @param pbl
* @param eq_addr_save
*
* @return lm_status_t
*/
{
/* check arguments */
{
return LM_STATUS_INVALID_PARAMETER;
}
// For D3 case
{
&pbl->pbl_entries,
if (lm_status != LM_STATUS_SUCCESS)
{
return LM_STATUS_FAILURE;
}
}
return lm_status;
}
{
/* check arguments */
{
return LM_STATUS_INVALID_PARAMETER;
}
1/*0*/); /* EQ is considered full of blank entries */
/* verify that EQ size is not too large */
{
return LM_STATUS_FAILURE;
}
idx,
// Assign the EQ chain consumer pointer to the consumer index in the status block.
{
return LM_STATUS_FAILURE;
}
/*
if (IS_E2(pdev)) {
pdev->vars.status_blocks_arr[idx].host_hc_status_block.e2_sb->sb.index_values[HC_INDEX_FCOE_EQ_CONS] = 0;
LM_FC_EQ(pdev, idx).hw_con_idx_ptr =
&(pdev->vars.status_blocks_arr[idx].host_hc_status_block.e2_sb->sb.index_values[HC_INDEX_FCOE_EQ_CONS]);
} else {
pdev->vars.status_blocks_arr[idx].host_hc_status_block.e1x_sb->sb.index_values[HC_INDEX_FCOE_EQ_CONS] = 0;
LM_FC_EQ(pdev, idx).hw_con_idx_ptr =
&(pdev->vars.status_blocks_arr[idx].host_hc_status_block.e1x_sb->sb.index_values[HC_INDEX_FCOE_EQ_CONS]);
}
*/
return LM_STATUS_SUCCESS;
} /* lm_fc_setup_eq */
/** Description
* Callback function for cids being recylced
*/
void lm_sc_recycle_cid_cb(
struct _lm_device_t *pdev,
void *cookie,
{
{
DbgBreakIf(1);
return;
}
/* un-block the manager... */
{
&iscsi->pending_ofld3);
}
/* we can now unblock any pending slow-paths */
}
{
{
return;
}
if (iscsi)
{
}
kcqe.completion_status = LM_STATUS_SUCCESS; /* TODO_ER: Fixme: do we want this?? maybe ok since l5 is aware of er... */
}
)
{
u16_t i = 0;
{
return LM_STATUS_INVALID_PARAMETER;
}
/* Allocate global buffer */
0,
{
return LM_STATUS_RESOURCE;
}
/* cid recycled cb registration */
/* Sq-completion cb registration (sq that get completed internally in driver */
// Except global_buff and pdev->iscsi_info all other fileds should be zero
{
DbgBreakIf(0 != chk_buf[i]);
}
// Except global_buff and pdev->iscsi_info all other fileds should be zero
{
DbgBreakIf(0 != chk_buf[i]);
}
return LM_STATUS_SUCCESS;
} /* lm_sc_alloc_resc */
/*******************************************************************************
* Description:
*
* Return:
******************************************************************************/
const u32_t max_func_cons,
const u16_t reserved_eq_elements,
const u16_t eqes_per_page,
const u16_t max_eq_pages
)
{
/* Init EQs - create page chains */
return eq_page_cnt;
}
/*******************************************************************************
* Description:
*
* Return:
******************************************************************************/
)
{
{
DbgBreakMsg("lm_fc_free_init_resc failed");
return LM_STATUS_INVALID_PARAMETER;
}
return lm_status;
}
)
{
{
return LM_STATUS_INVALID_PARAMETER;
}
{
lm_clear_chain_sb_cons_idx(pdev, eq_idx, &LM_FC_EQ(pdev, eq_idx).hc_sb_info, &LM_FC_EQ(pdev, eq_idx).hw_con_idx_ptr);
}
return lm_status;
} /* lm_fc_clear_d0_resc */
)
{
{
return LM_STATUS_INVALID_PARAMETER;
}
pdev,
cid);
return lm_status;
} /* lm_fc_clear_resc */
/*******************************************************************************
* Description:
*
* Return:
******************************************************************************/
)
{
{
DbgBreakMsg("lm_sc_free_init_resc failed");
return LM_STATUS_INVALID_PARAMETER;
}
return lm_status;
}
)
{
{
return LM_STATUS_INVALID_PARAMETER;
}
{
lm_clear_chain_sb_cons_idx(pdev, eq_idx, &LM_SC_EQ(pdev, eq_idx).hc_sb_info, &LM_SC_EQ(pdev, eq_idx).hw_con_idx_ptr);
}
return lm_status;
} /* lm_sc_clear_d0_resc */
)
{
{
return LM_STATUS_INVALID_PARAMETER;
}
pdev,
cid);
return lm_status;
} /* lm_sc_clear_resc */
{
{
return lm_status;
}
0,
return lm_status;
}
/*******************************************************************************
* Description:
*
* Return:
******************************************************************************/
)
{
{
return LM_STATUS_FAILURE;
}
{
return LM_STATUS_FAILURE;
}
{
return LM_STATUS_INVALID_PARAMETER;
}
/* the number of cqs is used to determine the number of eqs */
{
}
// Only one EQ chain is supported.
{
DbgMessage(pdev, INFORM, "lm_sc_init: l5_eq_chain_cnt=%d\n.\n",pdev->iscsi_info.run_time.l5_eq_chain_cnt);
DbgBreakMsg("lm_sc_init: pdev->iscsi_info.l5_eq_chain_cnt is bigger than 1.\n");
return LM_STATUS_FAILURE;
}
/* TOE when RSS is disabled, ISCSI and FCOE will use the same NDSB. */
// if (!pdev->params.l4_enable_rss) {
// RESET_FLAGS(pdev->params.sb_cpu_affinity, 1 << LM_TOE_RSS_BASE_CHAIN_INDEX(&pdev->lmdev));
// }
/* round up HQ size to fill an entire page */
pdev->iscsi_info.run_time.hq_size = (u16_t)(hq_pbl_entries * (LM_PAGE_SIZE / sizeof(struct iscsi_hq_bd)));
/* Init EQs - create page chains */
// The size of the EQ in iSCSI is <num iscsi connections> * 2 +slowpath.
// I.e. for each connection there should be room for 1 fastpath completion and 1 error notification.
MAX_EQ_PAGES);// Sub the next BD page.
{
lm_status = lm_l5_alloc_eq(pdev, &LM_SC_EQ(pdev, eq_sb_idx), &LM_EQ_ADDR_SAVE_SC(pdev, eq_sb_idx) , eq_page_cnt, LM_CLI_IDX_ISCSI);
if (lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
if (lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
}
SET_FLAGS( tstorm_l5cm_tcp_flags_param.flags, delayed_ack_en << TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_EN_SHIFT);
// in case size change, we need to change LM_INTMEM_WRITEXX macro etc...
/* Init internal RAM */
/* init Tstorm RAM */
LM_INTMEM_WRITE16(pdev, TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(func), req1->num_tasks_per_conn, BAR_TSTRORM_INTMEM);
LM_INTMEM_WRITE64(pdev, TSTORM_ISCSI_ERROR_BITMAP_OFFSET(func), *((u64_t *)&req2->error_bit_map), BAR_TSTRORM_INTMEM);
LM_INTMEM_WRITE16(pdev, TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(func), tstorm_l5cm_tcp_flags_param.flags, BAR_TSTRORM_INTMEM);
/* init Ustorm RAM */
LM_INTMEM_WRITE16(pdev, USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(func), req1->rq_buffer_size, BAR_USTRORM_INTMEM);
LM_INTMEM_WRITE16(pdev, USTORM_ISCSI_NUM_OF_TASKS_OFFSET(func), req1->num_tasks_per_conn, BAR_USTRORM_INTMEM);
LM_INTMEM_WRITE16(pdev, USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(func), req2->max_cq_sqn, BAR_USTRORM_INTMEM);
LM_INTMEM_WRITE16(pdev, USTORM_ISCSI_R2TQ_SIZE_OFFSET(func), (u16_t)pdev->iscsi_info.run_time.num_of_tasks * ISCSI_MAX_NUM_OF_PENDING_R2TS, BAR_USTRORM_INTMEM);
LM_INTMEM_WRITE64(pdev, USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(func), pdev->iscsi_info.bind.global_buff_base_phy.as_u64, BAR_USTRORM_INTMEM);
LM_INTMEM_WRITE64(pdev, USTORM_ISCSI_ERROR_BITMAP_OFFSET(func), *((u64_t *)&req2->error_bit_map), BAR_USTRORM_INTMEM);
/* init Xstorm RAM */
LM_INTMEM_WRITE16(pdev, XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(func), req1->num_tasks_per_conn, BAR_XSTRORM_INTMEM);
LM_INTMEM_WRITE16(pdev, XSTORM_ISCSI_HQ_SIZE_OFFSET(func), pdev->iscsi_info.run_time.hq_size, BAR_XSTRORM_INTMEM);
LM_INTMEM_WRITE16(pdev, XSTORM_ISCSI_SQ_SIZE_OFFSET(func), req1->num_tasks_per_conn, BAR_XSTRORM_INTMEM);
LM_INTMEM_WRITE16(pdev, XSTORM_ISCSI_R2TQ_SIZE_OFFSET(func), req1->num_tasks_per_conn * ISCSI_MAX_NUM_OF_PENDING_R2TS, BAR_XSTRORM_INTMEM);
/* init Cstorm RAM */
LM_INTMEM_WRITE16(pdev, CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(func), req1->num_tasks_per_conn, BAR_CSTRORM_INTMEM);
{
LM_INTMEM_WRITE16(pdev, CSTORM_ISCSI_EQ_PROD_OFFSET(func, eq_idx), lm_bd_chain_prod_idx(&LM_SC_EQ(pdev, eq_sb_idx).bd_chain), BAR_CSTRORM_INTMEM);
LM_INTMEM_WRITE32(pdev, CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(func, eq_idx), lm_bd_chain_phys_addr(&LM_SC_EQ(pdev, eq_sb_idx).bd_chain, 1).as_u32.low, BAR_CSTRORM_INTMEM);
LM_INTMEM_WRITE32(pdev, 4 + CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(func, eq_idx), lm_bd_chain_phys_addr(&LM_SC_EQ(pdev, eq_sb_idx).bd_chain, 1).as_u32.high, BAR_CSTRORM_INTMEM);
LM_INTMEM_WRITE32(pdev, CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(func, eq_idx), lm_bd_chain_phys_addr(&LM_SC_EQ(pdev, eq_sb_idx).bd_chain, 0).as_u32.low, BAR_CSTRORM_INTMEM);
LM_INTMEM_WRITE32(pdev, 4 + CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(func, eq_idx), lm_bd_chain_phys_addr(&LM_SC_EQ(pdev, eq_sb_idx).bd_chain, 0).as_u32.high, BAR_CSTRORM_INTMEM);
LM_INTMEM_WRITE8 (pdev, CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(func, eq_idx), 1, BAR_CSTRORM_INTMEM); // maybe move to init tool
LM_INTMEM_WRITE16(pdev, CSTORM_ISCSI_EQ_SB_NUM_OFFSET(func, eq_idx), LM_FW_SB_ID(pdev,eq_sb_idx), BAR_CSTRORM_INTMEM);
LM_INTMEM_WRITE8 (pdev, CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(func, eq_idx), HC_INDEX_ISCSI_EQ_CONS, BAR_CSTRORM_INTMEM);
}
LM_INTMEM_WRITE16(pdev, CSTORM_ISCSI_HQ_SIZE_OFFSET(func), pdev->iscsi_info.run_time.hq_size, BAR_CSTRORM_INTMEM);
LM_INTMEM_WRITE16(pdev, CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(func), req2->max_cq_sqn, BAR_CSTRORM_INTMEM);
return LM_STATUS_SUCCESS;
} /* lm_sc_init */
/* Get dma memory for init ramrod */
{
{
sizeof(lm_fcoe_slow_path_phys_data_t),
0,
{
return LM_STATUS_RESOURCE;
}
}
return LM_STATUS_SUCCESS;
}
{
{
return LM_STATUS_INVALID_PARAMETER;
}
// Only one EQ chain is supported.
{
DbgBreakMsg("lm_fc_init: pdev->fcoe_info.run_time.num_of_cqs is bigger than 1.\n");
return LM_STATUS_INVALID_PARAMETER;
}
/* TOE when RSS is disabled, ISCSI and FCOE will use the same NDSB. */
{
return LM_STATUS_RESOURCE;
}
// Init EQs - create page chains
{
lm_status = lm_l5_alloc_eq(pdev, &LM_FC_EQ(pdev, eq_sb_idx),&LM_EQ_ADDR_SAVE_FC(pdev, eq_sb_idx),eq_page_cnt, LM_CLI_IDX_FCOE);
if (lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
if (lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
if (lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
}
/* Set up the ramrod params */
/* waiting for new HSI */
ramrod_params->fcoe_init.eq_pbl_base.lo = mm_cpu_to_le32(LM_FC_PBL(pdev, pdev->fcoe_info.run_time.fc_eq_base_chain_idx).pbl_phys_table_phys.as_u32.low);
ramrod_params->fcoe_init.eq_pbl_base.hi = mm_cpu_to_le32(LM_FC_PBL(pdev, pdev->fcoe_info.run_time.fc_eq_base_chain_idx).pbl_phys_table_phys.as_u32.high);
ramrod_params->fcoe_init.eq_pbl_size = mm_cpu_to_le32(LM_FC_PBL(pdev, pdev->fcoe_info.run_time.fc_eq_base_chain_idx).pbl_entries);
ramrod_params->fcoe_init.eq_prod = mm_cpu_to_le16(lm_bd_chain_prod_idx(&LM_FC_EQ(pdev, pdev->fcoe_info.run_time.fc_eq_base_chain_idx).bd_chain));
ramrod_params->fcoe_init.sb_num = mm_cpu_to_le16(LM_FW_SB_ID(pdev,pdev->fcoe_info.run_time.fc_eq_base_chain_idx));
if (IS_SD_UFP_MODE(pdev))
{
}
if (lm_status != LM_STATUS_SUCCESS)
{
/* only one we know off... */
/* Command wasn't posted, so we need to complete it from here. */
}
// completion is asynchronous
return LM_STATUS_SUCCESS;
} /* lm_fc_init */
/** Description
* Callback function for cids being recylced
*/
void
struct _lm_device_t *pdev,
void *cookie,
{
{
DbgBreakIf(1);
return;
}
/* un-block the manager... */
/* we can now unblock any pending slow-paths */
}
{
{
return;
}
if (fcoe)
{
}
kcqe.completion_status = LM_STATUS_SUCCESS; /* Fixme: do we want this?? maybe ok since l5 is aware of er... */
switch (cmd)
{
break;
break;
break;
break;
break;
break;
break;
}
}
/**
* @description
* Returns the max FCOE task supported.
* In oreder to know the max task enabled refer to
* pdev->params.max_fcoe_task
* @param pdev
*
* @return u32_t
*/
{
/* FCOE supports a maximum of MAX_FCOE_FUNCS_PER_ENGINE per engine.
* Incase of mf / 4-port mode it means we can have more than one fcoe function
* on an engine - in which case we'll need to divide the number of tasks between them.
* However, in single function mode, on a 2-port chip (i.e. one function on the engine)
* the fcoe function will have all the tasks allocated to it
*/
{
}
return max_fcoe_task;
}
/**
*
*
* @description
*
* @param pdev
*
* @return STATIC void
*/
STATIC void
{
{
return ;
}
}
/**
*
*
* @description
*
* @param pdev
*
* @return lm_status_t
*/
{
{
return LM_STATUS_INVALID_PARAMETER;
}
/* cid recycled cb registration */
/* Sq-completion cb registration (sq that get completed internally in driver */
/* Get physical memory for RAMROD commands */
if (lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
return LM_STATUS_SUCCESS;
} /* lm_fc_alloc_resc */
{
{
return LM_STATUS_INVALID_PARAMETER;
}
DbgBreakIf(!tcp);
{
/* currently there is no specific completion status handling, only success / fail */
/* but originally the flags are those of toe_initiate_offload_ramrod_data */
comp_status = 1;
}
/* toe lock is taken inside */
return LM_STATUS_SUCCESS;
}
{
{
return LM_STATUS_FAILURE;
}
switch (op_code)
{
if (mm_sc_is_omgr_enabled(pdev))
{
}
else
{
}
break;
case RAMROD_CMD_ID_ETH_EMPTY:
break;
break;
case L5CM_RAMROD_CMD_ID_QUERY:
break;
default:
return LM_STATUS_INVALID_PARAMETER;
}
return LM_STATUS_SUCCESS;
}
{
{
return LM_STATUS_INVALID_PARAMETER;
}
op_code = kcqe->op_code; /* Store the opcode, the function below may modify it (internal searcher), need to keep for sq_complete later on */
{
/* case ISCSI_KCQE_OPCODE_INIT:
lm_status = mm_sc_complete_init_request(pdev, kcqe);
if (lm_status != LM_STATUS_SUCCESS)
{
DbgMessage(pdev, WARN, "lm_sc_complete_slow_path_request: lm_sc_complete_init_request failed.\n");
}
break;
if (lm_status != LM_STATUS_SUCCESS)
{
DbgMessage(pdev, WARN, "lm_sc_complete_slow_path_request: lm_sc_complete_l4_ofld_request failed.\n");
}
break;
if (lm_status != LM_STATUS_SUCCESS)
{
DbgMessage(pdev, WARN, "lm_sc_complete_slow_path_request: lm_sc_complete_update_request failed.\n");
}
break;
case L5CM_RAMROD_CMD_ID_QUERY:
lm_status = lm_sc_complete_l4_upload_request(pdev, kcqe->op_code, SW_CID(kcqe->iscsi_conn_context_id));
break;
default:
}
return lm_status;
}
/* Handle FC related ramrod completions */
{
{
return LM_STATUS_INVALID_PARAMETER;
}
{
{
break;
}
{
if(!fcoe)
{
DbgBreakIf(!fcoe);
break;
}
break;
}
{
if(!fcoe)
{
DbgBreakIf(!fcoe);
break;
}
break;
}
{
/* Disable is complete, now we need to send the terminate ramrod */
if(!fcoe)
{
DbgBreakIf(!fcoe);
break;
}
break;
}
{
break;
}
{
break;
}
case FCOE_RAMROD_CMD_ID_TERMINATE_CONN: /* Internal VBD not passed up... */
{
/* Terminate is complete, now we need to send the CFC delete ramrod */
if(!fcoe)
{
DbgBreakIf(!fcoe);
break;
}
break;
}
default:
{
break;
}
}
if( b_valid )
{
}
return lm_status;
}
{
if (eq->hw_con_idx_ptr &&
{
}
return result;
}
{
if (eq->hw_con_idx_ptr &&
{
}
return result;
}
)
{
{
return LM_STATUS_INVALID_PARAMETER;
}
{
return LM_STATUS_INVALID_PARAMETER;
}
switch (op_code)
{
break;
break;
default:
return LM_STATUS_INVALID_PARAMETER;
}
return LM_STATUS_SUCCESS;
}
{
{
return LM_STATUS_INVALID_PARAMETER;
}
if (lm_status != LM_STATUS_SUCCESS)
{
}
*l5_kcqe_num = 0;
*l5_kcqe_start = NULL;
return lm_status;
}
{
{
return LM_STATUS_INVALID_PARAMETER;
}
if (lm_status != LM_STATUS_SUCCESS)
{
}
*fcoe_kcqe_num = 0;
*fcoe_kcqe_start = NULL;
return lm_status;
}
void
{
{
DbgBreakIf(!pdev);
return;
}
while (eq_old_idx != eq_new_idx)
{
/* get next consumed kcqe */
/* we got to the end of the page, if we have some kcqe that we need to indicate, */
/* do it now, cause we can't assume that the memorey of the pages is contiguous */
{
if (l5_kcqe_num != 0)
{
}
/* check cons index again */
if (eq_old_idx != eq_new_idx)
{
/* get next consumed cqe */
{
/* shouldn't have happened, got second null from the bd */
DbgBreakIf(!kcqe);
break;
}
}
else
{
/* the new kcqe was the last one we got, break */
break;
}
}
{
case ISCSI_RAMROD_CMD_ID_INIT:
case L5CM_RAMROD_CMD_ID_QUERY:
/* first, complete fast path and error indication, if any */
if (l5_kcqe_num != 0)
{
}
if (lm_status != LM_STATUS_SUCCESS)
{
}
break;
/* Fast path events, no break on purpose */
default:
if (l5_kcqe_start == NULL)
{
}
l5_kcqe_num++;
break;
}
}
/* complete left fast path events */
if (l5_kcqe_num != 0)
{
}
/* update EQ prod in RAM */
LM_INTMEM_WRITE16(pdev, CSTORM_ISCSI_EQ_PROD_OFFSET(FUNC_ID(pdev), eq_num), lm_bd_chain_prod_idx(&eq_chain->bd_chain), BAR_CSTRORM_INTMEM);
}
void
{
{
DbgBreakIf(!pdev);
return;
}
while (eq_old_idx != eq_new_idx)
{
/* get next consumed kcqe */
/* we got to the end of the page, if we have some kcqe that we need to indicate, */
/* do it now, cause we can't assume that the memorey of the pages is contiguous */
{
if (fcoe_kcqe_num != 0)
{
}
/* check cons index again */
if (eq_old_idx != eq_new_idx)
{
/* get next consumed cqe */
{
/* shouldn't have happened, got second null from the bd */
DbgBreakIf(!kcqe);
break;
}
}
else
{
/* the new kcqe was the last one we got, break */
break;
}
}
/* first, complete fast path completion notification and error indication, if any */
if (fcoe_kcqe_num != 0)
{
}
{
{
if (lm_status != LM_STATUS_SUCCESS)
{
}
break;
}
default:
{
if (fcoe_kcqe_start == NULL)
{
}
break;
}
}
}
/* complete left fast path events */
if (fcoe_kcqe_num != 0)
{
}
/* update EQ prod in RAM */
LM_INTMEM_WRITE16(pdev, USTORM_FCOE_EQ_PROD_OFFSET(FUNC_ID(pdev)), lm_bd_chain_prod_idx(&eq_chain->bd_chain), BAR_USTRORM_INTMEM);
}
{
/* Allocate slopwath request data */
0,
{ /* can't allocate task array */
return LM_STATUS_RESOURCE;
}
/* Allocate task array */
iscsi->task_array.base_size = pdev->iscsi_info.run_time.num_of_tasks * sizeof(struct iscsi_task_context_entry);
0,
{ /* can't allocate task array */
return LM_STATUS_RESOURCE;
}
TRUE,
if (lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
/* Allocate R2TQ */
iscsi->r2tq.base_size = pdev->iscsi_info.run_time.num_of_tasks * ISCSI_MAX_NUM_OF_PENDING_R2TS * ISCSI_R2TQE_SIZE;
0,
{ /* can't allocate R2TQ */
return LM_STATUS_RESOURCE;
}
TRUE,
if (lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
/* Allocate HQ */
0,
{ /* can't allocate HQ */
return LM_STATUS_RESOURCE;
}
TRUE,
if (lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
return lm_status;
}
/*******************************************************************************
* Description:
*
* Return:
******************************************************************************/
)
{
{
return LM_STATUS_INVALID_PARAMETER;
}
/* save the miniport's conn id */
/* Boot connections physical resources are allocated during bind, and not during offload... */
if (!iscsi->b_resources_allocated)
{
if (lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
}
/* Allocate CID */
if (lm_status == LM_STATUS_PENDING)
{
}
else if (lm_status != LM_STATUS_SUCCESS)
{
/* failed to allocate CID */
return lm_status;
}
/* save the returned cid */
/* the allocated slow path request phys data for iscsi will be used in the tcp_state.sp_data, for the query request */
lm_status = lm_sp_req_manager_set_sp_data(pdev, iscsi->cid, iscsi->sp_req_data.virt_addr, iscsi->sp_req_data.phys_addr);
if (lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
return LM_STATUS_PENDING; /* Too soon to initialize context */
}
return LM_STATUS_SUCCESS;
} /* lm_sc_alloc_con_resc */
void lm_sc_free_con_phys_mem(
)
{
{
mm_rt_free_phys_mem(pdev, sizeof(*iscsi->sp_req_data.virt_addr), iscsi->sp_req_data.virt_addr, iscsi->sp_req_data.phys_addr, mm_cli_idx);
}
mm_rt_free_phys_mem(pdev, iscsi->task_array.base_size, iscsi->task_array.base_virt, iscsi->task_array.base_phy, mm_cli_idx);
}
mm_rt_free_phys_mem(pdev, iscsi->task_array.pbl_size, iscsi->task_array.pbl_phys_table_virt, iscsi->task_array.pbl_phys_table_phys, mm_cli_idx);
}
mm_rt_free_mem(pdev, iscsi->task_array.pbl_virt_table, iscsi->task_array.pbl_entries * sizeof(void *), mm_cli_idx);
}
mm_rt_free_phys_mem(pdev, iscsi->r2tq.base_size, iscsi->r2tq.base_virt, iscsi->r2tq.base_phy, mm_cli_idx);
}
mm_rt_free_phys_mem(pdev, iscsi->r2tq.pbl_size, iscsi->r2tq.pbl_phys_table_virt, iscsi->r2tq.pbl_phys_table_phys, mm_cli_idx);
}
mm_rt_free_mem(pdev, iscsi->r2tq.pbl_virt_table, iscsi->r2tq.pbl_entries * sizeof(void *), mm_cli_idx);
}
mm_rt_free_phys_mem(pdev, iscsi->hq.base_size, iscsi->hq.base_virt, iscsi->hq.base_phy, mm_cli_idx);
}
mm_rt_free_phys_mem(pdev, iscsi->hq.pbl_size, iscsi->hq.pbl_phys_table_virt, iscsi->hq.pbl_phys_table_phys, mm_cli_idx);
}
}
}
/*******************************************************************************
* Description:
*
* Return:
******************************************************************************/
)
{
{
return LM_STATUS_INVALID_PARAMETER;
}
notify_fw = 0;
}
}
if (!iscsi->b_keep_resources)
{
}
return LM_STATUS_SUCCESS;
}
/* Free the ramrod memory and the CID */
{
{
return LM_STATUS_INVALID_PARAMETER;
}
{
{
notify_fw = 0;
}
}
return LM_STATUS_SUCCESS;
}
/*******************************************************************************
* Description:
*
* Return:
******************************************************************************/
struct iscsi_kwqe_conn_offload1 *req1,
struct iscsi_kwqe_conn_offload2 *req2,
struct iscsi_kwqe_conn_offload3 *req3
)
{
u32_t i;
{
return LM_STATUS_INVALID_PARAMETER;
}
{
return LM_STATUS_INVALID_PARAMETER;
}
/* get context */
"iscsi->ctx_virt=%p, iscsi->ctx_phys_high=%x, iscsi->ctx_phys_low=%x\n",
// init xstorm aggregative context
// init xstorm storm context
//iscsi context
/* advance the SQ pbl_base cause it's pointing the SQ_DB */
//!!DP
ctx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi = iscsi->r2tq.pbl_phys_table_virt[0].as_u32.high;
//ctx->xstorm_st_context.iscsi.max_outstanding_r2ts = ISCSI_DEFAULT_MAX_OUTSTANDING_R2T;
SET_FIELD(ctx->xstorm_st_context.iscsi.flags.flags, XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA, ISCSI_DEFAULT_IMMEDIATE_DATA);
SET_FIELD(ctx->xstorm_st_context.iscsi.flags.flags, XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T, ISCSI_DEFAULT_INITIAL_R2T);
SET_FIELD(ctx->xstorm_st_context.iscsi.flags.flags, XSTORM_ISCSI_CONTEXT_FLAGS_B_EN_HEADER_DIGEST, ISCSI_DEFAULT_HEADER_DIGEST);
SET_FIELD(ctx->xstorm_st_context.iscsi.flags.flags, XSTORM_ISCSI_CONTEXT_FLAGS_B_EN_DATA_DIGEST, ISCSI_DEFAULT_DATA_DIGEST);
// init tstorm storm context
ctx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE + (ISCSI_DEFAULT_HEADER_DIGEST ? ISCSI_DIGEST_SIZE : 0);
SET_FIELD(ctx->tstorm_st_context.iscsi.flags, TSTORM_ISCSI_ST_CONTEXT_SECTION_B_HDR_DIGEST_EN, ISCSI_DEFAULT_HEADER_DIGEST);
SET_FIELD(ctx->tstorm_st_context.iscsi.flags, TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DATA_DIGEST_EN, ISCSI_DEFAULT_DATA_DIGEST);
//To enable the timer block.
// init ustorm storm context
/* advance the RQ pbl_base cause it's pointing the RQ_DB */
//!!DP
/* qp_first_pte[0] will contain the first PTE of the RQ */
/* Set up the first CQ, the first PTE info is contained in req2 */
{
/* For now we only support a single CQ */
return LM_STATUS_INVALID_PARAMETER;
#if 0
/* Set up additional CQs */
{
#if 0
{
return LM_STATUS_INVALID_PARAMETER;
}
#endif
}
#endif
}
SET_FIELD(ctx->ustorm_st_context.negotiated_rx, USTORM_ISCSI_ST_CONTEXT_MAX_RECV_PDU_LENGTH, ISCSI_DEFAULT_MAX_PDU_LENGTH);
SET_FIELD(ctx->ustorm_st_context.negotiated_rx_and_flags, USTORM_ISCSI_ST_CONTEXT_MAX_BURST_LENGTH, ISCSI_DEFAULT_MAX_BURST_LENGTH);
SET_FIELD(ctx->ustorm_st_context.negotiated_rx, USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS, ISCSI_DEFAULT_MAX_OUTSTANDING_R2T);
SET_FIELD(ctx->ustorm_st_context.negotiated_rx_and_flags, USTORM_ISCSI_ST_CONTEXT_B_HDR_DIGEST_EN, ISCSI_DEFAULT_HEADER_DIGEST);
SET_FIELD(ctx->ustorm_st_context.negotiated_rx_and_flags, USTORM_ISCSI_ST_CONTEXT_B_DATA_DIGEST_EN, ISCSI_DEFAULT_DATA_DIGEST);
// init cstorm storm context
SET_FIELD(ctx->cstorm_st_context.flags, CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN, ISCSI_DEFAULT_HEADER_DIGEST);
SET_FIELD(ctx->cstorm_st_context.flags, CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN, ISCSI_DEFAULT_DATA_DIGEST);
{
}
/* now we need to configure the cdu-validation data */
return LM_STATUS_SUCCESS;
}
{
{
return LM_STATUS_INVALID_PARAMETER;
}
/* get context */
"fcoe->ctx_virt=%p, fcoe->ctx_phys_high=%x, fcoe->ctx_phys_low=%x\n",
/* now we need to configure the cdu-validation data */
return LM_STATUS_SUCCESS;
}
{
{
return LM_STATUS_INVALID_PARAMETER;
}
/* save the miniport's conn id */
/* Allocate CID */
if (lm_status == LM_STATUS_PENDING)
{
}
else if (lm_status != LM_STATUS_SUCCESS)
{
/* failed to allocate CID */
return lm_status;
}
/* save the returned cid */
{
return LM_STATUS_PENDING; /* Too soon to initialize context */
}
return LM_STATUS_SUCCESS;
} /* lm_fc_alloc_con_resc */
struct _lm_device_t *pdev,
{
memcpy(&ramrod_params->fcoe_ofld.offload_kwqe1, &fcoe->ofld1, sizeof(struct fcoe_kwqe_conn_offload1));
memcpy(&ramrod_params->fcoe_ofld.offload_kwqe2, &fcoe->ofld2, sizeof(struct fcoe_kwqe_conn_offload2));
memcpy(&ramrod_params->fcoe_ofld.offload_kwqe3, &fcoe->ofld3, sizeof(struct fcoe_kwqe_conn_offload3));
memcpy(&ramrod_params->fcoe_ofld.offload_kwqe4, &fcoe->ofld4, sizeof(struct fcoe_kwqe_conn_offload4));
return lm_status;
}
struct _lm_device_t *pdev,
struct fcoe_kwqe_conn_enable_disable *enable)
{
memcpy(&ramrod_params->fcoe_enable.enable_disable_kwqe, enable, sizeof(struct fcoe_kwqe_conn_enable_disable));
return lm_status;
}
struct _lm_device_t *pdev,
struct fcoe_kwqe_conn_enable_disable *disable)
{
return lm_status;
}
struct _lm_device_t *pdev)
{
0);
return lm_status;
}
struct _lm_device_t *pdev,
struct fcoe_kwqe_stat *stat)
{
{
return LM_STATUS_RESOURCE;
}
return lm_status;
}
struct _lm_device_t *pdev,
{
0);
return lm_status;
}