lm_l5st.h revision d14abf155341d55053c76eeec58b787a456b753b
/*******************************************************************************
* lm_l5st.h - L5 lm data structures
******************************************************************************/
#ifndef _LM_L5ST_H
#define _LM_L5ST_H
#include "everest_iscsi_constants.h"
#include "57xx_fcoe_constants.h"
#include "57xx_iscsi_constants.h"
#include "57xx_iscsi_rfc_constants.h"
#include "lm_l4st.h"
/* utility macros */
#define SET_FIELD(fieldName, mask, newVal) ((fieldName) = (((fieldName) & ~mask) | (((newVal) << mask ## _SHIFT) & mask)))
/*This is needed because the BD chain has not been set yet*/
/* The number of bds (EQEs) per page including the last bd which is used as
* a pointer to the next bd page. */
#define ISCSI_EQES_PER_PAGE(_is_next_ptr_needed) (USABLE_BDS_PER_PAGE(sizeof(struct iscsi_kcqe),_is_next_ptr_needed))
#define FCOE_EQES_PER_PAGE(_is_next_ptr_needed) (USABLE_BDS_PER_PAGE(sizeof(struct fcoe_kcqe),_is_next_ptr_needed))
/* offset within a EQ page of the next page address */
/* max number of eq chains, everst convention */
/* max EQ pages limitation */
#define MAX_EQ_PAGES (256)
/* The number of useable bds per page. This number does not include
* the last bd at the end of the page. */
//#define MAX_EQ_BD_PER_PAGE ((u32_t) (ISCSI_EQES_PER_PAGE - 1))
#define MAX_EQ_SIZE_FCOE(_is_next_ptr_needed) (MAX_EQ_PAGES * (FCOE_EQES_PER_PAGE(_is_next_ptr_needed) -1))
#define MAX_EQ_SIZE_ISCSI(_is_next_ptr_needed) (MAX_EQ_PAGES * (ISCSI_EQES_PER_PAGE(_is_next_ptr_needed) -1))
/* number of bits to shift to edjeust the page_size from the kwqe_init2 to 0 */
#define ISCSI_PAGE_BITS_SHIFT (8)
/* layer mask value in the KCQEs */
/* pbl data */
typedef struct _lm_iscsi_pbl_t
{
void *base_virt;
void *pbl_virt_table;
typedef struct _lm_eq_addr_t
{
void *bd_chain_virt; /* virt addr of first page of the chain */
typedef struct _lm_eq_addr_save_t
{
/*******************************************************************************
* iSCSI info that will be allocated in the bind phase.
* This is the only parameters that stays valid when iscsi goes to hibernate.
******************************************************************************/
typedef struct _lm_iscsi_info_bind_alloc_t
{
typedef struct _lm_iscsi_statistics_t
{
/*******************************************************************************
* iSCSI info that will be allocated in the bind phase.
* These parameters become not valid when iscsi goes to hibernate.
******************************************************************************/
typedef struct _lm_iscsi_info_real_time_t
{
/* L5 eq */
#define LM_SC_EQ_BASE_CHAIN_INDEX(pdev) ((pdev)->iscsi_info.run_time.l5_eq_base_chain_idx) /* that is first L5 SB */
/* 'for loop' macros on L5 eq chains */
for ((eq_idx) = (pdev)->iscsi_info.run_time.l5_eq_base_chain_idx; (eq_idx) < (u32_t)((pdev)->iscsi_info.run_time.l5_eq_base_chain_idx + (pdev)->iscsi_info.run_time.l5_eq_chain_cnt); (eq_idx)++)
/*******************************************************************************
* iSCSI info.
******************************************************************************/
typedef struct _lm_iscsi_info_t
{
struct _lm_device_t *pdev;
// Paramters that stay valid in D3 and are allocated in bind time.
// Paramters that are not valid in D3 and are allocated after bind time.
{
struct iscsi_kwqe_conn_update kwqe;
};
typedef union _lm_iscsi_slow_path_phys_data_t
{
typedef struct _lm_iscsi_slow_path_data_t {
typedef struct _lm_iscsi_slow_path_request_t
{
#define SP_REQUEST_SC_INIT 0
#define SP_REQUEST_SC_ADD_NEW_CONNECTION 1
#define SP_REQUEST_SC_UPDATE 2
#define SP_REQUEST_SC_TERMINATE_OFFLOAD 3
#define SP_REQUEST_SC_TERMINATE1_OFFLOAD 4
#define SP_REQUEST_SC_QUERY 5
typedef struct _lm_iscsi_state_t
{
struct iscsi_context* ctx_virt;
void *db_data;
//iscsi_kwqe_t **pending_kwqes;
/* RAMRODs used for FCOE */
typedef union _lm_fcoe_slow_path_phys_data_t
{
struct fcoe_init_ramrod_params fcoe_init;
struct fcoe_stat_ramrod_params fcoe_stat;
typedef struct _lm_fcoe_state_t
{
struct fcoe_context* ctx_virt;
struct fcoe_kwqe_conn_offload1 ofld1;
struct fcoe_kwqe_conn_offload2 ofld2;
struct fcoe_kwqe_conn_offload3 ofld3;
struct fcoe_kwqe_conn_offload4 ofld4;
/*******************************************************************************
* FCoE info that will be allocated in the bind phase.
* This is the only parameters that stays valid when FCoE goes to hibernate.
******************************************************************************/
/* pbl data */
typedef struct _lm_fcoe_pbl_t
{
void *pbl_virt_table;
typedef struct _lm_fcoe_info_bind_alloc_t
{
/* FCOE Miniport guarantees that they don't post more than once KWQE at a time,
* so there's no need to allocate per-connection ramrod buffer, A single fcoe per-client
* ramrod buffer (pdev->fcoe_info.bind.ramrod_mem_phys) can be used for all KWQEs.*/
void *ramrod_mem_virt;
/*******************************************************************************
* FCoE info that will be allocated in the bind phase.
* These parameters become not valid when FCoE goes to hibernate.
******************************************************************************/
typedef struct _lm_fcoe_info_run_time_t
{
for ((eq_idx) = (pdev)->fcoe_info.run_time.fc_eq_base_chain_idx; (eq_idx) < (u32_t)((pdev)->fcoe_info.run_time.fc_eq_base_chain_idx + (pdev)->fcoe_info.run_time.num_of_cqs); (eq_idx)++)
/*******************************************************************************
* FCOE info.
******************************************************************************/
typedef struct _lm_fcoe_info_t
{
struct _lm_device_t *pdev;
// Paramters that stay valid in D3 and are allocated in bind time.
// Paramters that are not valid in D3 and are allocated after bind time.
#endif