#ifndef _LM5710_H
#define _LM5710_H
#ifndef __BIG_ENDIAN
#ifndef LITTLE_ENDIAN
#define LITTLE_ENDIAN
#endif
#else
#undef LITTLE_ENDIAN
#ifndef BIG_ENDIAN
#define BIG_ENDIAN
#endif
#ifndef BIG_ENDIAN_HOST
#define BIG_ENDIAN_HOST
#endif
#endif
#ifndef INLINE
#if DBG
#define INLINE
#else
#define INLINE __inline
#endif
#endif
#if !defined(LITTLE_ENDIAN) && !defined(BIG_ENDIAN)
#error "Missing either LITTLE_ENDIAN or BIG_ENDIAN definition."
#endif
#define ECORE_NIV
#ifdef __LINUX
#include <linux/types.h>
#endif
#include "bcmtype.h"
#include "debug.h"
#include "igu_def.h"
#include "microcode_constants.h"
#include "fcoe_constants.h"
#include "toe_constants.h"
#include "tcp_constants.h"
#include "eth_constants.h"
#include "5710_hsi.h"
#include "lm5710_hsi.h"
#include "pcics_reg_driver.h"
#include "bigmac_addresses.h"
#include "misc_bits.h"
#include "emac_reg_driver.h"
#include "dmae_clients.h"
#include "prs_flags.h"
#include "57712_reg.h"
#include "grc_addr.h"
#include "bd_chain_st.h"
#include "lm_sp_req_mgr.h"
#include "license.h"
#include "mcp_shmem.h"
#include "lm_dcbx_mp.h"
#ifndef elink_dev
#define elink_dev _lm_device_t
#endif
#include "clc.h"
#define DEVICE_TYPE_PF 0
#define DEVICE_TYPE_VF 1
#define VT_NONE 0
#define VT_BASIC_VF 1
#define VT_CHANNEL_VF 2
#define VT_ASSIGNED_TO_VM_PF 3
#define VT_HW_CHANNEL_TYPE 0
#define VT_SW_CHANNEL_TYPE 1
#define IS_CHANNEL_VFDEV(pdev) (((pdev)->params.device_type == DEVICE_TYPE_VF) && ((pdev)->params.virtualization_type == VT_CHANNEL_VF))
#define IS_BASIC_VIRT_MODE_MASTER_PFDEV(pdev) (((pdev)->params.device_type == DEVICE_TYPE_PF) && ((pdev)->params.virtualization_type == VT_BASIC_VF))
#define IS_CHANNEL_VIRT_MODE_MASTER_PFDEV(pdev) (((pdev)->params.device_type == DEVICE_TYPE_PF) && ((pdev)->params.virtualization_type == VT_CHANNEL_VF))
#define IS_ASSIGNED_TO_VM_PFDEV(pdev) (((pdev)->params.device_type == DEVICE_TYPE_PF) && ((pdev)->params.virtualization_type == VT_ASSIGNED_TO_VM_PF))
#define DBG_DMP_IS_ONLINE(pdev) IS_ASSIGNED_TO_VM_PFDEV(pdev)
#define IS_HW_CHANNEL_VIRT_MODE(pdev) (((pdev)->params.virtualization_type == VT_CHANNEL_VF) && ((pdev)->params.channel_type == VT_HW_CHANNEL_TYPE))
#define IS_SW_CHANNEL_VIRT_MODE(pdev) (((pdev)->params.virtualization_type == VT_CHANNEL_VF) && ((pdev)->params.channel_type == VT_SW_CHANNEL_TYPE))
#define IS_PFDEV(pdev) (((pdev)->pf_dev == NULL) && ((pdev)->params.device_type == DEVICE_TYPE_PF))
#define IS_VFDEV(pdev) (((pdev)->pf_dev != NULL) || ((pdev)->params.device_type == DEVICE_TYPE_VF))
#define PFDEV(pdev) (pdev)
#define LM_VF_MAX_RVFID_SIZE 6
#define LM_MAX_VF_CID_WND_SIZE 4
#define LM_MAX_VF_CHAINS_PER_PF (1 << LM_MAX_VF_CID_WND_SIZE)
#define LM_VF_CID_WND_SIZE(_pdev) (((_pdev)->hw_info.sriov_info.max_chains_per_vf) ? (_pdev)->hw_info.sriov_info.vf_cid_wnd_size : LM_MAX_VF_CID_WND_SIZE)
#define LM_VF_CHAINS_PER_PF(_pdev) (((_pdev)->hw_info.sriov_info.max_chains_per_vf) ? (_pdev)->hw_info.sriov_info.max_chains_per_vf : LM_MAX_VF_CHAINS_PER_PF)
#define LM_VF_NUM_CIDS_MASK(_pdev) ((1 << LM_VF_CID_WND_SIZE(_pdev)) - 1)
#define LM_VF_CID_BASE(_pdev) (1 << (LM_VF_MAX_RVFID_SIZE + LM_VF_CID_WND_SIZE(_pdev)))
#define LM_VF_MAX_RVFID_MASK ((1 << LM_VF_MAX_RVFID_SIZE) - 1)
#define VF_TO_PF_CID(pdev,cid) (cid)
#define PF_TO_VF_CID(pdev,cid) (cid)
#define GET_VF_Q_ID_FROM_PF_CID(cid) (cid & LM_VF_NUM_CIDS_MASK(pdev))
#define GET_ABS_VF_ID_FROM_PF_CID(cid) ((cid >> LM_VF_CID_WND_SIZE(pdev)) & LM_VF_MAX_RVFID_MASK)
#define VF_BAR0_IGU_OFFSET 0x0000
#define VF_BAR0_USDM_QUEUES_OFFSET 0x3000
#define VF_BAR0_CSDM_QUEUES_OFFSET 0x4100
#define VF_BAR0_XSDM_QUEUES_OFFSET 0x5200
#define VF_BAR0_TSDM_QUEUES_OFFSET 0x6300
#define VF_BAR0_USDM_GLOBAL_OFFSET 0x7400
#define VF_BAR0_CSDM_GLOBAL_OFFSET 0x7600
#define VF_BAR0_XSDM_GLOBAL_OFFSET 0x7800
#define VF_BAR0_TSDM_GLOBAL_OFFSET 0x7A00
#define VF_BAR0_DB_OFFSET 0x7C00
#define VF_BAR0_DB_SIZE 512
#define VF_BAR0_GRC_OFFSET 0x7E00
#define IS_MF_MODE_CAPABLE(pdev) ((CHIP_NUM(pdev) == CHIP_NUM_5711E) || \
(CHIP_NUM(pdev) == CHIP_NUM_5712E) || \
(CHIP_IS_E3(pdev)))
#define LM_TRIGGER_PCIE(_pdev) \
{ \
u32_t kuku = 0xcafecafe; \
REG_WR((_pdev), 0x2000, kuku); \
}
#define LM_GENERAL_ATTN_INTERRUPT_SET(_pdev,_func) REG_WR((_pdev),MISC_REG_AEU_GENERAL_ATTN_12 + 4*(_func),0x1)
#define MAX_PATH_NUM 2
#define E2_MAX_NUM_OF_VFS 64
#define E1H_FUNC_MAX 8
#define E2_FUNC_MAX 4
#define MAX_VNIC_NUM 4
#define MAX_FUNC_NUM 8
#define MAX_NDSB HC_SB_MAX_SB_E2
#define MAX_RSS_CHAINS (16)
#define MAX_HW_CHAINS (64)
typedef enum
{
LM_CLI_IDX_NDIS = 0,
LM_CLI_IDX_ISCSI,
LM_CLI_IDX_FCOE,
LM_CLI_IDX_FWD,
LM_CLI_IDX_OOO,
LM_CLI_IDX_MAX
} lm_cli_idx_t;
typedef enum
{
LM_RESOURCE_NDIS = LM_CLI_IDX_NDIS,
LM_RESOURCE_ISCSI = LM_CLI_IDX_ISCSI,
LM_RESOURCE_FCOE = LM_CLI_IDX_FCOE,
LM_RESOURCE_FWD = LM_CLI_IDX_FWD,
LM_RESOURCE_OOO = LM_CLI_IDX_OOO,
LM_RESOURCE_COMMON = LM_CLI_IDX_MAX,
} lm_resource_idx_t;
struct sq_pending_command
{
d_list_entry_t list;
u32_t cid;
u16_t type;
u8_t cmd;
u8_t flags;
#define SQ_PEND_RELEASE_MEM 0x1
#define SQ_PEND_COMP_CALLED 0x2
struct slow_path_element command;
};
#include "lm_desc.h"
#include "listq.h"
#include "lm.h"
#include "mm.h"
#include "ecore_sp_verbs.h"
#ifdef VF_INVOLVED
#include "lm_vf.h"
#endif
#include "lm_stats.h"
#include "lm_dmae.h"
#if !defined(_B10KD_EXT)
#include "bcm_utils.h"
#endif
#define EVEREST 1
#define MAX_NON_RSS_CHAINS (4)
#define MAX_NON_RSS_FW_CLIENTS (4)
#define MAX_ETH_REG_CONS (MAX_RSS_CHAINS + MAX_NON_RSS_CHAINS)
#define MAX_ETH_REG_CHAINS (MAX_HW_CHAINS + MAX_NON_RSS_CHAINS)
#define MAX_ETH_CONS (MAX_ETH_REG_CONS + MAX_ETH_TX_ONLY_CONS)
#define MAX_ETH_CHAINS (MAX_ETH_REG_CHAINS + MAX_ETH_TX_ONLY_CONS)
#ifndef VF_INVOLVED
#define MAX_VF_ETH_CONS 0
#endif
#if defined(_VBD_) || defined (_VBD_CMD_)
#define MAX_TX_CHAIN(_pdev) (3U*LM_SB_CNT(_pdev) + MAX_NON_RSS_CHAINS)
#define MAX_RX_CHAIN(_pdev) (1U*LM_SB_CNT(_pdev) + MAX_NON_RSS_CHAINS)
#else
#define MAX_TX_CHAIN(_pdev) (MAX_ETH_CONS)
#define MAX_RX_CHAIN(_pdev) (MAX_ETH_REG_CONS)
#endif
#define ILT_NUM_PAGE_ENTRIES 3072
#define ILT_NUM_PAGE_ENTRIES_PER_FUNC 384
#define SEARCHER_TOTAL_MEM_REQUIRED_PER_CON 64
#define TIMERS_TOTAL_MEM_REQUIRED_PER_CON 8
#define QM_TOTAL_MEM_REQUIRED_PER_CON (32*4)
#ifndef LM_PAGE_BITS
#define LM_PAGE_BITS 12
#endif
#define LM_PAGE_SIZE (1 << LM_PAGE_BITS)
#define LM_PAGE_MASK (LM_PAGE_SIZE - 1)
#define LM_DQ_CID_BITS 7
#define LM_DQ_CID_SIZE (1 << LM_DQ_CID_BITS)
#define LM_DQ_CID_MASK (LM_DQ_CID_SIZE - 1)
#define LM_VF_DQ_CID_BITS 3
#define LM_VF_DQ_CID_SIZE (1 << LM_VF_DQ_CID_BITS)
#define LM_VF_DQ_CID_MASK (LM_VF_DQ_CID_SIZE - 1)
#define LM_ILT_ALIGNMENT 0x1000
#define LM_ILT_ALIGNMENT_MASK (LM_ILT_ALIGNMENT - 1)
#define LM_TIMERS_SCAN_POLL 20000
#define LM_TIMERS_SCAN_TIME 1000
#define LM_UNLOAD_TIME 100000
#if !defined(_VBD_CMD_)
#define LM_CID_RETURN_TIME 2000
#define LM_CID_RETURN_TIME_EMUL 10000
#else
#define LM_CID_RETURN_TIME 0
#define LM_CID_RETURN_TIME_EMUL 0
#endif
#define LM_FREE_CID_DELAY_TIME(pdev) ((pdev)->params.l4_free_cid_delay_time)
#define LM_EMUL_FACTOR 2000
#define LM_FPGA_FACTOR 200
#ifndef CACHE_LINE_SIZE_MASK
#define CACHE_LINE_SIZE_MASK 0x3f
#define CACHE_LINE_SIZE (CACHE_LINE_SIZE_MASK + 1)
#endif
#define NVRAM_1MB_SIZE 0x20000
#define NVRAM_PAGE_SIZE 256
#ifndef MAX_PACKETS_PER_INDICATION
#define MAX_PACKETS_PER_INDICATION 50
#endif
#ifndef MAX_FRAG_CNT
#define MAX_FRAG_CNT 33
#endif
#ifndef MAX_FRAG_CNT_PER_TB
#define MAX_FRAG_CNT_PER_TB 33
#endif
#define MAX_FRAGMENT_SIZE 0xf000
#define MAXIMUM_PACKET_SIZE 9632
#define CHIP_STATS_BUFFER_SIZE ((sizeof(statistics_block_t) + \
CACHE_LINE_SIZE_MASK) & \
~CACHE_LINE_SIZE_MASK)
#define STATUS_BLOCK_INVALID_TYPE 0
#define STATUS_BLOCK_SP_SL_TYPE 1
#define STATUS_BLOCK_NORMAL_TYPE 2
#define STATUS_BLOCK_NORMAL_SL_TYPE 3
#define LM_DEF_NO_EVENT_ACTIVE 0x00000000
#define LM_DEF_ATTN_ACTIVE (1L<<0)
#define LM_SP_ACTIVE (LM_DEF_USTORM_ACTIVE | LM_DEF_CSTORM_ACTIVE | LM_DEF_XSTORM_ACTIVE | LM_DEF_TSTORM_ACTIVE)
#define LM_DEF_USTORM_ACTIVE (1L<<1)
#define LM_DEF_CSTORM_ACTIVE (1L<<2)
#define LM_DEF_XSTORM_ACTIVE (1L<<3)
#define LM_DEF_TSTORM_ACTIVE (1L<<4)
#define LM_DEF_EVENT_MASK 0xffff
#define LM_NON_DEF_USTORM_ACTIVE (1L<<16)
#define LM_NON_DEF_CSTORM_ACTIVE (1L<<17)
#define LM_NON_DEF_EVENT_MASK 0xffff0000
#define ATTN_NIG_FOR_FUNC (1L << 8)
#define ATTN_SW_TIMER_4_FUNC (1L << 9)
#define GPIO_2_FUNC (1L << 10)
#define GPIO_3_FUNC (1L << 11)
#define GPIO_4_FUNC (1L << 12)
#define ATTN_GENERAL_ATTN_1 (1L << 13)
#define ATTN_GENERAL_ATTN_2 (1L << 14)
#define ATTN_GENERAL_ATTN_3 (1L << 15)
#define ATTN_NIG_FOR_FUNC1 (1L << 8)
#define ATTN_SW_TIMER_4_FUNC1 (1L << 9)
#define GPIO_2_FUNC1 (1L << 10)
#define GPIO_3_FUNC1 (1L << 11)
#define GPIO_4_FUNC1 (1L << 12)
#define ATTN_GENERAL_ATTN_4 (1L << 13)
#define ATTN_GENERAL_ATTN_5 (1L << 14)
#define ATTN_GENERAL_ATTN_6 (1L << 15)
#define ATTN_HARD_WIRED_MASK 0xff00
#define HC_SEG_ACCESS_DEF 0
#define HC_SEG_ACCESS_ATTN 4
#define HC_SEG_ACCESS_NORM 0
#define E2_STATUS_BLOCK_BUFFER_SIZE ((sizeof(struct host_hc_status_block_e2) + \
CACHE_LINE_SIZE_MASK) & \
~CACHE_LINE_SIZE_MASK)
#define E1X_STATUS_BLOCK_BUFFER_SIZE ((sizeof(struct host_hc_status_block_e1x) + \
CACHE_LINE_SIZE_MASK) & \
~CACHE_LINE_SIZE_MASK)
#define DEF_STATUS_BLOCK_BUFFER_SIZE ((sizeof(struct host_sp_status_block) + \
CACHE_LINE_SIZE_MASK) & \
~CACHE_LINE_SIZE_MASK)
#define DBG_SB_ID(port,stormID,cpuID) (((port) << 7) | ((stormID) << 5) | (cpuID))
#define DBG_DEF_SB_ID(port,stormID,vnicID) (((port) << 7) | ((stormID) << 5) | (0x10+vnicID))
#define SB_RX_INDEX(pdev, index) ((pdev)->vars.u_hc_ack[index])
#define SB_TX_INDEX(pdev, index) ((pdev)->vars.c_hc_ack[index])
#define SB_INDEX_OF_USTORM(pdev, index) ((pdev)->vars.u_hc_ack[index])
#define DEF_SB_INDEX(pdev) ((pdev)->vars.hc_def_ack)
#define DEF_SB_INDEX_OF_ATTN(pdev) ((pdev)->vars.attn_def_ack)
#define NUM_OF_ELT_PAGES 16
#define DEF_STATUS_BLOCK_IGU_INDEX 16
#define DEF_STATUS_BLOCK_INDEX HC_SP_SB_ID
#define MAX_DYNAMIC_ATTN_GRPS 8
#define MAX_NUM_BAR 3
#define MAX_NUM_VF_BAR 3
#define BAR_0 0
#define BAR_1 1
#define BAR_2 2
#define RSS_INDIRECTION_TABLE_SIZE 0x80
#define RSS_HASH_KEY_SIZE 0x28
#define LM_RX_BD_CQ_SIZE_RATIO (sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd))
#ifndef OFFSETOF
#define OFFSETOF(_s, _m) ((u32_t) PTR_SUB(&((_s *) 0)->_m, (u8_t *) 0))
#endif
#define WORD_ALIGNED_OFFSETOF(_s, _m) (OFFSETOF(_s, _m) & ~0x03)
#define CEIL_DIV( a, b ) ((a / b) + ( (a % b) ? 1 : 0))
u32_t upper_align_power_of_2(IN const u16_t num, IN const u8_t num_bits_supported);
#define GET_ATTN_CHNG_GROUPS(_pdev, _attn_bits, _attn_ack, _asserted_grps_ptr, _deasserted_grps_ptr) \
{ \
u16_t _state = (_pdev)->vars.attn_state; \
\
DbgBreakIf(~(_attn_bits ^ _attn_ack) & (_attn_bits ^ _state)); \
\
*(_asserted_grps_ptr) = _attn_bits & ~_attn_ack & ~_state; \
*(_deasserted_grps_ptr) = ~_attn_bits & _attn_ack & _state; \
}
#define HANDLE_UNICORE_INT_ASSERTED(_pdev, _nig_reg_name, _unicore_intr_val_ptr, _unicore_intr_name, _nig_status_port_ptr, _is_unicore_assrtd_ptr, _unicore_intr_size) \
{ \
*(_unicore_intr_val_ptr) = REG_RD(_pdev, _nig_reg_name); \
*(_is_unicore_assrtd_ptr) = ( ( *(_unicore_intr_val_ptr) << _unicore_intr_size) ^ (*(_nig_status_port_ptr) & _unicore_intr_name)); \
\
if (*(_is_unicore_assrtd_ptr)) \
{ \
DbgMessage(_pdev, WARN, "lm_handle_assertion_processing(): " #_unicore_intr_name " asserted!\n"); \
*(_nig_status_port_ptr) = (*(_nig_status_port_ptr) & ~(_unicore_intr_name)) | (*(_unicore_intr_val_ptr) << _unicore_intr_size); \
} \
}
typedef struct _lm_rx_statistics_t
{
u32_t aborted;
} lm_rx_stats_t;
typedef struct _lm_coalesce_buffer_t
{
s_list_entry_t link;
u8_t *mem_virt;
u32_t buf_size;
lm_frag_list_t frags;
} lm_coalesce_buffer_t;
typedef struct _lm_client_con_params_t
{
u32_t mtu;
u32_t lah_size;
u32_t num_rx_desc;
u32_t num_tx_desc;
u8_t attributes;
#define LM_CLIENT_ATTRIBUTES_RX (0x1)
#define LM_CLIENT_ATTRIBUTES_TPA (0x2)
#define LM_CLIENT_ATTRIBUTES_TX (0x4)
#define LM_CLIENT_ATTRIBUTES_REG_CLI (0x8)
} lm_client_con_params_t;
typedef struct _lm_packet_t
{
s_list_entry_t link;
lm_status_t status;
u32_t size;
union _lm_pkt_info_t
{
struct _lm_tx_pkt_info_t
{
lm_coalesce_buffer_t *coalesce_buf;
u16_t next_bd_idx;
u16_t bd_used;
u8_t span_pages;
u8_t _pad1;
u8_t hdr_nbds;
u16_t reserve;
#if DBG
struct eth_tx_bd *dbg_start_bd;
u16_t dbg_start_bd_idx;
u16_t dbg_frag_cnt;
#endif
} tx;
struct _lm_rx_pkt_info_t
{
u16_t next_bd_idx;
u8_t qidx;
#define LM_MAX_SGES_FOR_PACKET 1
lm_address_t mem_phys[1+LM_MAX_SGES_FOR_PACKET];
u32_t* hash_val_ptr;
#if DBG
struct eth_rx_sge *dbg_sge;
struct eth_rx_bd *dbg_bd;
#endif
union eth_sgl_or_raw_data sgl_or_raw_data;
} rx;
} u1;
lm_pkt_tx_info_t* l2pkt_tx_info;
lm_pkt_rx_info_t* l2pkt_rx_info;
} lm_packet_t;
DECLARE_FRAG_LIST_BUFFER_TYPE(lm_packet_frag_list_t, MAX_FRAG_CNT);
typedef enum _driver_traafic_type_t
{
LLFC_DRIVER_TRAFFIC_TYPE_NW = 0,
LLFC_DRIVER_TRAFFIC_TYPE_FCOE,
LLFC_DRIVER_TRAFFIC_TYPE_ISCSI,
LLFC_DRIVER_TRAFFIC_TYPE_MAX
}driver_traafic_type_t;
typedef struct _app_params_t
{
u32_t enabled;
u32_t traffic_type_priority[LLFC_DRIVER_TRAFFIC_TYPE_MAX];
}app_params_t;
#define DCBX_COS_MAX_NUM_E2E3A0 (ELINK_DCBX_E2E3_MAX_NUM_COS)
#define DCBX_COS_MAX_NUM_E3B0 (min(3,ELINK_DCBX_E3B0_MAX_NUM_COS))
#define DCBX_COS_MAX_NUM 3
typedef struct _dcbx_cos_params_t
{
u32_t bw_tbl;
u32_t pri_bitmask;
u8_t s_pri;
#define DCBX_S_PRI_INVALID (DCBX_COS_MAX_NUM)
#define DCBX_S_PRI_COS_HIGHEST (0)
#define DCBX_S_PRI_COS_NEXT_LOWER_PRI(_sp) ((_sp) + 1)
u8_t pauseable;
}dcbx_cos_params_t;
typedef struct _pg_params_t
{
u32_t enabled;
#define LM_DCBX_ETS_IS_ENABLED(_pdev) ((TRUE == IS_DCB_ENABLED(pdev)) && \
(TRUE == ((_pdev)->params.dcbx_port_params.ets.enabled)))
u8_t num_of_cos;
dcbx_cos_params_t cos_params[DCBX_COS_MAX_NUM];
}pg_params_t;
typedef struct _pfc_params_t
{
u32_t enabled;
u32_t priority_non_pauseable_mask;
#define LM_DCBX_PFC_PRI_NON_PAUSE_MASK(_pdev) (_pdev->params.dcbx_port_params.pfc.priority_non_pauseable_mask)
#define LM_DCBX_PFC_PRI_PAUSE_MASK(_pdev) ((u8_t)(~LM_DCBX_PFC_PRI_NON_PAUSE_MASK(_pdev)))
#define LM_DCBX_PFC_PRI_MASK (0xFF)
#define LM_DCBX_PFC_PRI_GET_PAUSE(_pdev,_pg_pri) (_pg_pri & LM_DCBX_PFC_PRI_PAUSE_MASK(_pdev))
#define LM_DCBX_PFC_PRI_GET_NON_PAUSE(_pdev,_pg_pri) (LM_DCBX_PFC_PRI_NON_PAUSE_MASK(_pdev) & _pg_pri)
#define LM_DCBX_IS_PFC_PRI_SOME_PAUSE(_pdev,_pg_pri) (0 != LM_DCBX_PFC_PRI_GET_PAUSE(_pdev,_pg_pri))
#define LM_DCBX_IS_PFC_PRI_ONLY_PAUSE(_pdev,_pg_pri) (_pg_pri == LM_DCBX_PFC_PRI_GET_PAUSE(_pdev,_pg_pri))
#define LM_DCBX_IS_PFC_PRI_ONLY_NON_PAUSE(_pdev,_pg_pri) (_pg_pri == LM_DCBX_PFC_PRI_GET_NON_PAUSE(_pdev,_pg_pri))
#define LM_DCBX_IS_PFC_PRI_MIX_PAUSE(_pdev,_pg_pri) (!(LM_DCBX_IS_PFC_PRI_ONLY_NON_PAUSE(_pdev,_pg_pri) || \
LM_DCBX_IS_PFC_PRI_ONLY_PAUSE(_pdev,_pg_pri)))
}pfc_params_t;
typedef struct _dcbx_port_params_t
{
u32_t dcbx_enabled;
pfc_params_t pfc;
pg_params_t ets;
app_params_t app;
}dcbx_port_params_t;
typedef enum
{
DCBX_READ_LOCAL_MIB,
DCBX_READ_REMOTE_MIB
}dcbx_read_mib_type;
typedef enum
{
DCBX_UPDATE_TASK_STATE_FREE,
DCBX_UPDATE_TASK_STATE_SCHEDULE,
DCBX_UPDATE_TASK_STATE_HANDLED
} dcbx_update_task_state;
typedef enum
{
LM_SINGLE_SM = 0,
LM_DOUBLE_SM_SINGLE_IGU = 1,
LM_DOUBLE_SM_DOUBLE_IGU = 2
} fw_ndsb_type;
typedef enum
{
LM_COS_MODE_COS3 = 0,
LM_COS_MODE_COS6 = 1
} lm_cos_modes ;
typedef enum
{
LM_COS_MODE_OVERRIDE = 0,
LM_COS_MODE_STATIC = 1
} lm_network_cos_modes ;
typedef enum
{
LM_AUTOGREEEN_DISABLED = 0,
LM_AUTOGREEEN_ENABLED = 1,
LM_AUTOGREEEN_NVRAM = 2
} lm_autogreeen_t ;
#define I2C_BINARY_SIZE 256
#define I2C_A2_DYNAMIC_OFFSET 0
#define I2C_A2_DYNAMIC_SIZE 128
#define I2C_A2_STATIC_OFFSET 128
#define I2C_A2_STATIC_SIZE 128
typedef enum
{
I2C_SECTION_A0 = 0,
I2C_SECTION_A2 = 1,
I2C_SECTION_MAX = 2
} i2c_section_t;
typedef struct _i2c_binary_info_t
{
u32_t last_query_status[I2C_SECTION_MAX];
u64_t last_query_ts;
u32_t reserved[10];
u8_t ax_data[I2C_SECTION_MAX][I2C_BINARY_SIZE];
} i2c_binary_info_t;
typedef struct _lm_params_t
{
u32_t mtu[LM_CLI_IDX_MAX];
#define LM_MTU_INVALID_VALUE (0xFFFFFFFF)
u32_t mtu_max;
#define MAX_CLI_PACKET_SIZE(pdev, chain_idx) ((u16_t)(pdev)->params.l2_cli_con_params[(chain_idx)].mtu + (pdev)->params.rcv_buffer_offset + ETHERNET_PACKET_HEADER_SIZE+ ETHERNET_VLAN_TAG_SIZE + ETHERNET_LLC_SNAP_SIZE + CACHE_LINE_SIZE)
#define CLI_MTU_WITH_ETH_HDR_SIZE(pdev, chain_idx) ((u16_t)(pdev)->params.l2_cli_con_params[(chain_idx)].mtu + ETHERNET_PACKET_HEADER_SIZE)
#define MAX_L2_CLI_BUFFER_SIZE(pdev, chain_idx) ((MAX_CLI_PACKET_SIZE(pdev, chain_idx) + CACHE_LINE_SIZE_MASK) & \
~CACHE_LINE_SIZE_MASK)
#define LM_MTU_NDIS_DEFAULT (1500)
#define LM_MTU_ISCSI_DEFAULT (1500)
#define LM_MTU_FCOE_DEFAULT (2500)
#define LM_MTU_FWD_DEFAULT (LM_MTU_NDIS_DEFAULT)
#define LM_MTU_FLOW_CTRL_TX_THR (5000)
#define LM_MTU_MAX_DEFAULT (1500)
#define LM_MTU_MAX (9600)
u8_t mac_addr[8];
u32_t l2_rx_desc_cnt[1+MAX_NON_RSS_CHAINS];
u32_t l2_tx_bd_page_cnt[1+MAX_NON_RSS_CHAINS];
u32_t l2_tx_coal_buf_cnt[1+MAX_NON_RSS_CHAINS];
lm_client_con_params_t l2_cli_con_params[3*MAX_HW_CHAINS + MAX_NON_RSS_CHAINS];
u32_t rcv_buffer_offset;
u32_t network_type;
#define LM_NETOWRK_TYPE_LAN 0
#define LM_NETOWRK_TYPE_WAN 1
#define LM_NETOWRK_TYPE_AUTO 2
u32_t max_cwnd_wan;
u32_t max_cwnd_lan;
u32_t cid_allocation_mode;
#define LM_CID_ALLOC_REGULAR 1
#define LM_CID_ALLOC_DELAY 2
#define LM_CID_ALLOC_NUM_MODES 2
u32_t ndsb_type;
u32_t int_coalesing_mode;
#define LM_INT_COAL_NONE 0
#define LM_INT_COAL_PERIODIC_SYNC 1
#define LM_INT_COAL_NUM_MODES 2
u32_t int_per_sec_rx_override;
u32_t int_per_sec_rx[HC_USTORM_SB_NUM_INDICES];
u32_t int_per_sec_tx_override;
u32_t int_per_sec_tx[HC_CSTORM_SB_NUM_INDICES];
u32_t vf_int_per_sec_rx[3];
u32_t vf_int_per_sec_tx[3];
#define LM_VF_INT_LOW_IDX 0
#define LM_VF_INT_MEDIUM_IDX 1
#define LM_VF_INT_HIGH_IDX 2
u32_t enable_dynamic_hc[HC_DHC_SB_NUM_INDICES];
u32_t hc_timeout0[2][HC_DHC_SB_NUM_INDICES];
u32_t hc_timeout1[2][HC_DHC_SB_NUM_INDICES];
u32_t hc_timeout2[2][HC_DHC_SB_NUM_INDICES];
u32_t hc_timeout3[2][HC_DHC_SB_NUM_INDICES];
u32_t hc_threshold0[2];
u32_t hc_threshold1[2];
u32_t hc_threshold2[2];
u32_t l2_dynamic_hc_min_bytes_per_packet;
u32_t l4_hc_scaling_factor;
u32_t l4_hc_ustorm_thresh;
u32_t l4_scq_page_cnt;
u32_t l4_rcq_page_cnt;
u32_t l4_grq_page_cnt;
u32_t l4_preallocate_cnt;
u32_t l4_preallocate_blk_size;
u32_t l4_preallocate_retry_cnt;
#if defined(_VBD_) || defined(_VBD_CMD_)
#define NUM_BUFS_FOR_GRQS(pdev) \
(pdev)->params.l4_grq_page_cnt*512*(LM_TOE_RSS_CHAIN_CNT(pdev))
#else
#define NUM_BUFS_FOR_GRQS(pdev) \
(pdev)->params.l4_grq_page_cnt*512*1
#endif
u32_t l4_tx_chain_page_cnt;
u32_t l4_rx_chain_page_cnt;
u32_t l4_gen_buf_size;
u32_t l4_history_cqe_cnt;
u32_t l4_ignore_grq_push_enabled;
u32_t l4cli_flags;
u32_t l4cli_ticks_per_second;
u32_t l4cli_ack_frequency;
u32_t l4cli_delayed_ack_ticks;
u32_t l4cli_max_retx;
u32_t l4cli_doubt_reachability_retx;
u32_t l4cli_sws_prevention_ticks;
u32_t l4cli_dup_ack_threshold;
u32_t l4cli_push_ticks;
u32_t l4cli_nce_stale_ticks;
u32_t l4cli_starting_ip_id;
u32_t test_mode;
#define TEST_MODE_DISABLED 0x00
#define TEST_MODE_OBSOLETE_0 0x01
#define TEST_MODE_OBSOLETE_1 0x02
#define TEST_MODE_VERIFY_RX_CRC 0x10
#define TEST_MODE_RX_BD_TAGGING 0x20
#define TEST_MODE_TX_BD_TAGGING 0x40
#define TEST_MODE_LOG_REG_ACCESS 0x80
#define TEST_MODE_SAVE_DUMMY_DMA_DATA 0x0100
#define TEST_MODE_INIT_GEN_BUF_DATA 0x0200
#define TEST_MODE_DRIVER_PULSE_ALWAYS_ALIVE 0x0400
#define TEST_MODE_IGNORE_SHMEM_SIGNATURE 0x0800
#define TEST_MODE_NO_MCP 0x1000
lm_offload_t ofld_cap;
lm_offload_t ofld_cap_to_ndis;
lm_wake_up_mode_t wol_cap;
lm_flow_control_t flow_ctrl_cap;
lm_eee_policy_t eee_policy;
lm_medium_t req_medium;
u32_t selective_autoneg;
#define SELECTIVE_AUTONEG_OFF 0
#define SELECTIVE_AUTONEG_SINGLE_SPEED 1
#define SELECTIVE_AUTONEG_ENABLE_SLOWER_SPEEDS 2
u32_t wire_speed;
u32_t phy_int_mode;
#define PHY_INT_MODE_AUTO 0
#define PHY_INT_MODE_MI_INTERRUPT 1
#define PHY_INT_MODE_LINK_READY 2
#define PHY_INT_MODE_AUTO_POLLING 3
u32_t link_chng_mode;
#define LINK_CHNG_MODE_AUTO 0
#define LINK_CHNG_MODE_USE_STATUS_REG 1
#define LINK_CHNG_MODE_USE_STATUS_BLOCK 2
u32_t phy_priority_mode;
#define PHY_PRIORITY_MODE_HW_DEF 0
#define PHY_PRIORITY_MODE_10GBASET 1
#define PHY_PRIORITY_MODE_SERDES 2
#define PHY_PRIORITY_MODE_HW_PIN 3
u32_t interrupt_mode;
#define LM_INT_MODE_INTA 0
#define LM_INT_MODE_SIMD 1
#define LM_INT_MODE_MIMD 2
u32_t igu_access_mode;
u32_t sw_config;
#define LM_SWCFG_1G 0
#define LM_SWCFG_10G 1
#define LM_SWCFG_AD 2
#define LM_SWCFG_OT_AD 3
#define LM_SWCFG_HW_DEF 4
u8_t mf_mode;
u8_t sd_mode;
u8_t pad[2];
#define IS_MF_AFEX(_pdev) IS_MF_AFEX_MODE(_pdev)
#define IS_MF_AFEX_MODE(_pdev) (IS_MULTI_VNIC(_pdev) && ((_pdev)->params.mf_mode == MULTI_FUNCTION_AFEX))
#define IS_MF_SI_MODE(_pdev) (IS_MULTI_VNIC(_pdev) && ((_pdev)->params.mf_mode == MULTI_FUNCTION_SI))
#define IS_MF_SD_MODE(_pdev) (IS_MULTI_VNIC(_pdev) && ((_pdev)->params.mf_mode == MULTI_FUNCTION_SD))
#define IS_SD_REGULAR_MODE(_pdev) (IS_MF_SD_MODE(_pdev) && ((_pdev)->params.sd_mode == SD_REGULAR_MODE))
#define IS_SD_UFP_MODE(_pdev) (IS_MF_SD_MODE(_pdev) && ((_pdev)->params.sd_mode == SD_UFP_MODE))
#define IS_SD_BD_MODE(_pdev) (IS_MF_SD_MODE(_pdev) && ((_pdev)->params.sd_mode == SD_BD_MODE))
lm_autogreeen_t autogreeen;
u32_t tmr_reload_value1;
u32_t max_func_connections;
u32_t max_supported_toe_cons;
u32_t max_func_toe_cons;
u32_t max_func_rdma_cons;
u32_t max_func_iscsi_cons;
u32_t max_func_fcoe_cons;
u32_t max_fcoe_task;
u32_t max_eth_including_vfs_conns;
u32_t context_line_size;
u32_t context_waste_size;
u32_t num_context_in_page;
u32_t client_page_size;
u32_t elt_page_size;
u32_t ilt_client_page_size;
u32_t cfc_last_lcid;
u32_t bandwidth_min;
u32_t bandwidth_max;
u8_t pfunc_rel;
#define PORT_ID_PARAM_FUNC_REL(_pfunc_rel) ((_pfunc_rel) & 1)
#define PORT_ID_PARAM_FUNC_ABS(_chip_num, _port_mode, _pfunc_abs) (lm_get_port_id_from_func_abs(_chip_num, _port_mode, _pfunc_abs))
#define PORT_ID(pdev) (PORT_ID_PARAM_FUNC_REL(PFDEV(pdev)->params.pfunc_rel))
#define FUNC_ID(pdev) (PFDEV(pdev)->params.pfunc_rel)
#define VNIC_ID_PARAM_FUNC_REL(_pfunc_rel) ((_pfunc_rel) >> 1)
#define VNIC_ID(pdev) (VNIC_ID_PARAM_FUNC_REL(PFDEV(pdev)->params.pfunc_rel))
#define LM_FOREACH_FUNC_IN_PORT(pdev, func) \
for ((func) = PORT_ID(pdev); (func) < E1H_FUNC_MAX; (func)+=2)
#define LM_PFS_PER_PORT(pdev) \
((LM_CHIP_PORT_MODE_4 == CHIP_PORT_MODE(pdev)) ? 2 : 4 )
#define LM_FIRST_ABS_FUNC_IN_PORT(pdev) \
((LM_CHIP_PORT_MODE_NONE == CHIP_PORT_MODE(pdev))? PORT_ID(pdev) : (PATH_ID(pdev)+2*PORT_ID(pdev)))
#define LM_FOREACH_ABS_FUNC_IN_PORT(pdev, func) \
for ( (func) = LM_FIRST_ABS_FUNC_IN_PORT(pdev) ; (func) < MAX_FUNC_NUM; (func) += (MAX_FUNC_NUM/LM_PFS_PER_PORT(pdev)) )
#define FUNC_MAILBOX_ID_PARAM(_port,_vnic,_chip_num, _port_mode) ((_port) + (_vnic) * ((CHIP_IS_E1x_PARAM(_chip_num) || (_port_mode == LM_CHIP_PORT_MODE_4))? 2 : 1))
#define FUNC_MAILBOX_ID(pdev) (FUNC_MAILBOX_ID_PARAM(PORT_ID(pdev) ,VNIC_ID(pdev),CHIP_NUM(pdev), CHIP_PORT_MODE(pdev)))
u8_t pfunc_abs;
#define ABS_FUNC_ID(pdev) (PFDEV(pdev)->params.pfunc_abs)
#define LM_FOREACH_FUNC_MAILBOX_IN_PORT(pdev, func) \
for ((func) = PORT_ID(pdev); (func) < (CHIP_IS_E1x(pdev) ? E1H_FUNC_MAX : E2_FUNC_MAX); (func)+= (CHIP_IS_E1x(pdev) ? 2 : 1))
u8_t path_id;
#define PATH_ID(pdev) (PFDEV(pdev)->params.path_id)
#define SHMEM_BASE(pdev) (pdev->hw_info.shmem_base)
u8_t vnics_per_port;
u8_t multi_vnics_mode;
u8_t path_has_ovlan;
u8_t pfunc_mb_id;
u8_t _pad;
#define IS_MULTI_VNIC(pdev) (PFDEV(pdev)->params.multi_vnics_mode)
#define VNICS_PER_PORT(pdev) (PFDEV(pdev)->params.vnics_per_port)
#define VNICS_PER_PATH(pdev) (PFDEV(pdev)->params.vnics_per_port * ((LM_CHIP_PORT_MODE_4 == CHIP_PORT_MODE(pdev))? 2 : 1 ))
u16_t ovlan;
u16_t sd_vlan_eth_type;
u8_t min_bw[MAX_VNIC_NUM];
u8_t max_bw[MAX_VNIC_NUM];
u8_t sb_cnt;
#define LM_SB_CNT(pdev) ((pdev)->params.sb_cnt)
#ifdef _VBD_
#define LM_NON_RSS_SB(pdev) (LM_SB_CNT(pdev) - 1)
#else
#define LM_NON_RSS_SB(pdev) (LM_MAX_RSS_CHAINS(pdev) - 1)
#endif
#define LM_NON_RSS_CHAIN(pdev) (LM_MAX_RSS_CHAINS(pdev) - 1)
#define LM_OOO_SB(pdev) (LM_NON_RSS_SB(pdev))
#define LM_SB_ID_VALID(pdev, sb_id) ((sb_id) < LM_SB_CNT(pdev))
#define LM_FOREACH_SB_ID(pdev, sb_id) \
for ((sb_id) = 0; (sb_id) < LM_SB_CNT(pdev); (sb_id)++)
u8_t max_pf_sb_cnt;
u8_t fw_sb_cnt;
u8_t fw_base_qzone_cnt;
u8_t fw_qzone_id[PXP_REG_HST_ZONE_PERMISSION_TABLE_SIZE];
u8_t fw_aux_qzone_cnt;
u8_t aux_fw_qzone_id;
u8_t max_pf_fw_client_cnt;
u8_t fw_client_cnt;
u8_t base_fw_client_id;
u8_t base_fw_ndsb;
u8_t base_fw_stats_id;
u8_t base_cam_offset;
u8_t vf_num_in_pf;
u8_t vf_num_in_path;
u8_t _cnt_pad[2];
#define REL_VFID(_pdev) ((_pdev)->params.vf_num_in_pf)
#define ABS_VFID(_pdev) ((_pdev)->params.vf_num_in_path)
#define FW_VFID(_pdev) (8 + ABS_VFID((_pdev)))
u32_t debug_me_register;
u16_t base_offset_in_cam_table;
#define BASE_OFFSET_IN_CAM_TABLE(_pdev) (_pdev)->params.base_offset_in_cam_table
u16_t cam_size;
#define LM_CAM_SIZE(pdev) ((pdev)->params.cam_size)
u16_t mc_table_size[LM_CLI_IDX_MAX];
#define LM_MC_TABLE_SIZE(pdev,lm_client_idx) ((pdev)->params.mc_table_size[lm_client_idx])
u16_t uc_table_size[LM_CLI_IDX_MAX];
#define LM_UC_TABLE_SIZE(pdev,lm_client_idx) ((pdev)->params.uc_table_size[lm_client_idx])
#define LM_MC_NDIS_TABLE_SIZE (64)
#define LM_MC_FCOE_TABLE_SIZE (2)
#define LM_MAX_MC_TABLE_SIZE (LM_MC_NDIS_TABLE_SIZE + LM_MC_FCOE_TABLE_SIZE)
#define LM_KEEP_CURRENT_CAM_VALUE (0xFFFF)
#define LM_INVALID_CAM_BASE_IDX (0xFF)
u8_t rss_caps;
#define LM_RSS_CAP_IPV4 1
#define LM_RSS_CAP_IPV6 2
u8_t rss_chain_cnt;
u8_t tss_chain_cnt;
u8_t max_rss_chains;
#define LM_MAX_RSS_CHAINS(pdev) (pdev)->params.max_rss_chains
u32_t override_rss_chain_cnt;
#define RSS_ID_TO_SB_ID(_rss_id) (_rss_id)
#define RSS_ID_TO_CID(_rss_id) (_rss_id)
#define TSS_ID_TO_CID(_tss_id) (_tss_id)
#define CHAIN_TO_RSS_ID(_pdev, _chain) (lm_mp_get_reg_chain_from_chain(_pdev, _chain))
#define LM_CLI_RX_FILTER_MASK(pdev, cid) (1 << LM_FW_CLI_ID(pdev, cid))
#define LM_RX_FILTER_ALL_MASK(pdev, ret_val) \
{ \
ret_val |= LM_CLI_RX_FILTER_MASK((pdev), NDIS_CID(pdev)); \
ret_val |= LM_CLI_RX_FILTER_MASK((pdev), ISCSI_CID(pdev));\
ret_val |= LM_CLI_RX_FILTER_MASK((pdev), RDMA_CID(pdev)); \
ret_val |= LM_CLI_RX_FILTER_MASK((pdev), FCOE_CID(pdev)); \
}
#define LM_SW_LEADING_SB_ID 0
#define LM_SW_LEADING_RSS_CID(pdev) 0
#define LM_INVALID_ETH_CID (0xFF)
u8_t map_client_to_cid[LM_CLI_IDX_MAX];
#define NDIS_CID(_pdev) (_pdev)->params.map_client_to_cid[LM_CLI_IDX_NDIS]
#define ISCSI_CID(_pdev) (_pdev)->params.map_client_to_cid[LM_CLI_IDX_ISCSI]
#define FCOE_CID(_pdev) (_pdev)->params.map_client_to_cid[LM_CLI_IDX_FCOE]
#define RDMA_CID(_pdev) (_pdev)->params.map_client_to_cid[LM_CLI_IDX_RDMA]
#define FWD_CID(_pdev) (_pdev)->params.map_client_to_cid[LM_CLI_IDX_FWD]
#define OOO_CID(_pdev) (_pdev)->params.map_client_to_cid[LM_CLI_IDX_OOO]
#define LM_CLI_CID(_pdev, lm_cli_idx) ((_pdev)->params.map_client_to_cid[lm_cli_idx])
#define LM_CHAIN_IDX_CLI(pdev, cid) ((lm_chain_type_not_cos != lm_mp_get_chain_type(pdev, cid)) ? LM_CLI_IDX_NDIS : \
((cid == ISCSI_CID(pdev) ? LM_CLI_IDX_ISCSI : \
((cid == FCOE_CID(pdev) ? LM_CLI_IDX_FCOE : \
((cid == FWD_CID(pdev) ? LM_CLI_IDX_FWD : \
((cid == OOO_CID(pdev) ? LM_CLI_IDX_OOO : \
(((cid >= (pdev)->params.max_pf_fw_client_cnt) && (cid < (pdev)->params.fw_client_cnt)) ? LM_CLI_IDX_NDIS : \
LM_CLI_IDX_MAX))))))))))
#define LM_CHAIN_IDX_TRAFFIC_TYPE(pdev, cid) ((lm_chain_type_not_cos != lm_mp_get_chain_type(pdev, cid)) ? LLFC_TRAFFIC_TYPE_NW : \
((cid == ISCSI_CID(pdev) ? LLFC_TRAFFIC_TYPE_ISCSI : \
((cid == FCOE_CID(pdev) ? LLFC_TRAFFIC_TYPE_FCOE : \
((cid == FWD_CID(pdev) ? LLFC_TRAFFIC_TYPE_NW : \
((cid == OOO_CID(pdev) ? LLFC_TRAFFIC_TYPE_ISCSI : \
(((cid >= (pdev)->params.max_pf_fw_client_cnt) && (cid < (pdev)->params.fw_client_cnt)) ? LLFC_TRAFFIC_TYPE_NW : \
MAX_TRAFFIC_TYPE))))))))))
#define LM_FW_CLI_ID(pdev, cid) (pdev->params.base_fw_client_id + cid)
#define LM_FW_QZONE_ID(pdev, cid) (pdev->params.fw_qzone_id[cid])
#define LM_FW_AUX_QZONE_ID(pdev, rel_non_rss_cid) (pdev->params.aux_fw_qzone_id + rel_non_rss_cid)
#define LM_FW_DHC_QZONE_ID(pdev, sb_id) (pdev->params.fw_qzone_id[sb_id])
#define LM_FW_SB_ID(pdev, sb_id) ((sb_id == DEF_STATUS_BLOCK_INDEX)? DEF_STATUS_BLOCK_INDEX: pdev->params.base_fw_ndsb + sb_id)
#define LM_FW_STATS_ID(pdev,cid) (pdev->params.base_fw_stats_id + cid)
#define LM_CLIENT_BIT_VECTOR(pdev, lm_cli_idx) (1 << (LM_FW_CLI_ID(pdev, LM_CLI_CID(pdev, lm_cli_idx))))
#define LM_CID_BIT_VECTOR(pdev, cid) (1 << (LM_FW_CLI_ID(pdev, cid)))
#define LM_FOREACH_RSS_IDX(pdev, rss_idx) \
for ((rss_idx) = 0; (rss_idx) < (pdev)->params.rss_chain_cnt; (rss_idx)++)
#define LM_FOREACH_TSS_IDX(pdev, tss_idx) \
for ((tss_idx) = 0; (tss_idx) < (pdev)->params.tss_chain_cnt; (tss_idx)++)
#define LM_FOREACH_RSS_IDX_SKIP_LEADING(pdev, rss_idx) \
for ((rss_idx) = 1; (u8_t)(rss_idx) < (pdev)->params.rss_chain_cnt; (rss_idx)++)
#define LM_FOREACH_TSS_IDX_SKIP_LEADING(pdev, tss_idx) \
for ((tss_idx) = 1; (u8_t)(tss_idx) < (pdev)->params.tss_chain_cnt; (tss_idx)++)
u8_t l4_rss_chain_cnt;
u8_t l4_tss_chain_cnt;
u8_t l4_rss_base_chain_idx;
u8_t l4_base_fw_rss_id;
#define LM_TOE_BASE_RSS_ID(pdev) ((pdev)->params.l4_rss_base_chain_idx)
#define LM_TOE_FW_RSS_ID(pdev, rss_id) ((pdev)->params.l4_base_fw_rss_id + (IS_MULTI_VNIC(pdev) ? (CHIP_IS_E1x(pdev) ? rss_id : 0) : rss_id))
#define LM_TOE_RSS_CHAIN_CNT(pdev) ((pdev)->params.l4_rss_chain_cnt)
#define LM_TOE_TSS_CHAIN_CNT(pdev) ((pdev)->params.l4_tss_chain_cnt)
#define LM_TOE_FOREACH_RSS_IDX(pdev, rss_idx) \
for ((rss_idx) = (pdev)->params.l4_rss_base_chain_idx; (rss_idx) < (pdev)->params.l4_rss_base_chain_idx + (pdev)->params.l4_rss_chain_cnt; (rss_idx)++)
#define LM_TOE_FOREACH_TSS_IDX(pdev, tss_idx) \
for ((tss_idx) = (pdev)->params.l4_rss_base_chain_idx; (tss_idx) < (pdev)->params.l4_rss_base_chain_idx + (pdev)->params.l4_tss_chain_cnt; (tss_idx)++)
#define LM_CHAIN_TO_FW_CLIENT(_pdev, _chain) ((_pdev)->params.base_fw_client_id + (_chain))
u32_t keep_vlan_tag;
u16_t eth_align_enable;
u32_t update_comp_cnt;
u32_t update_suspend_cnt;
u32_t update_toe_comp_cnt;
lm_address_t dmae_copy_scratchpad_phys;
u32_t cmng_enable;
u32_t cmng_rate_shaping_enable;
u32_t cmng_fairness_enable;
u32_t cmng_safc_rate_thresh;
u32_t cmng_activate_safc;
u32_t cmng_fair_port0_rate;
u32_t cmng_eth_weight;
u32_t cmng_toe_weight;
u32_t cmng_rdma_weight;
u32_t cmng_iscsi_weight;
u32_t cmng_eth_rate;
u32_t cmng_toe_rate;
u32_t cmng_rdma_rate;
u32_t cmng_iscsi_rate;
u32_t cmng_toe_con_number;
u32_t cmng_rdma_con_number;
u32_t cmng_iscsi_con_number;
u32_t l5sc_max_pending_tasks;
struct elink_params link;
u32_t l2_fw_flow_ctrl;
u32_t l4_fw_flow_ctrl;
u32_t preemphasis_enable;
u32_t preemphasis_rx_0;
u32_t preemphasis_rx_1;
u32_t preemphasis_rx_2;
u32_t preemphasis_rx_3;
u32_t preemphasis_tx_0;
u32_t preemphasis_tx_1;
u32_t preemphasis_tx_2;
u32_t preemphasis_tx_3;
u32_t l4_rss_enabled_by_os;
u32_t disable_patent_using;
u32_t l4_grq_filling_threshold_divider;
u32_t l4_free_cid_delay_time;
u32_t l4_enable_rss;
u32_t l4_rss_is_possible;
#define L4_RSS_DISABLED 0
#define L4_RSS_DYNAMIC 1
u32_t l4_max_rcv_wnd_size;
u32_t disable_pcie_nfr;
u32_t mf_proto_support_flags;
#define LM_PROTO_SUPPORT_ETHERNET 0x1
#define LM_PROTO_SUPPORT_ISCSI 0x2
#define LM_PROTO_SUPPORT_FCOE 0x4
u32_t debug_cap_flags;
#if DBG
#define DEFAULT_DEBUG_CAP_FLAGS_VAL 0xffffffff
#else
#define DEFAULT_DEBUG_CAP_FLAGS_VAL 0x0
#endif
#define DEBUG_CAP_FLAGS_STATS_FW 0x1
u32_t l4_limit_isles;
#define L4_LI_NOTIFY 0x0001
#define L4_LI_MAX_GEN_BUFS_IN_ISLE 0x0002
#define L4_LI_MAX_GEN_BUFS_IN_ARCHIPELAGO 0x0004
u32_t l4_max_gen_bufs_in_isle;
u32_t l4_max_gen_bufs_in_archipelago;
u32_t l4_valid_gen_bufs_in_archipelago;
u32_t l4_max_gen_buf_cnt;
u32_t l4_isles_pool_size;
u32_t i2c_interval_sec;
elink_status_t i2c_elink_status[I2C_SECTION_MAX];
u8_t l4_num_of_blocks_per_connection;
u8_t is_flr;
u8_t __nmb_pad[2];
dcbx_port_params_t dcbx_port_params;
u32_t lm_dcb_dont_break_bad_oid;
config_lldp_params_t lldp_config_params;
config_dcbx_params_t dcbx_config_params;
u32_t try_not_align_page_multiplied_memory;
u32_t l4_dominance_threshold;
u32_t l4_max_dominance_value;
u32_t l4_data_integrity;
u32_t l4_start_port;
u32_t l4_num_of_ports;
u32_t l4_skip_start_bytes;
u32_t l4_support_pending_sp_req_complete;
u32_t l4_support_upload_req_on_abortive_disconnect;
u32_t grc_timeout_max_ignore ;
u32_t tpa_desc_cnt_per_chain;
u32_t b_dcb_indicate_event;
u32_t sriov_inc_mac;
u8_t device_type;
u8_t virtualization_type;
u8_t channel_type;
u8_t pf_acquire_status;
u8_t fw_stats_init_value;
u8_t int_coalesing_mode_disabled_by_ndis;
u8_t mac_spoof_test;
u8_t run_driver_pulse;
#define IS_DRIVER_PULSE_ALWAYS_ALIVE(_pdev) (!(_pdev)->params.run_driver_pulse)
u8_t ___pad;
u32_t enable_error_recovery;
#define IS_ERROR_RECOVERY_ENABLED(_pdev) ((_pdev)->params.enable_error_recovery && !CHIP_IS_E1x(_pdev))
u32_t validate_sq_complete;
u32_t e3_cos_modes;
u32_t e3_network_cos_mode;
u32_t npar_vm_switching_enable;
u32_t flow_control_reporting_mode;
#define LM_FLOW_CONTROL_REPORTING_MODE_DISABLED 0
#define LM_FLOW_CONTROL_REPORTING_MODE_ENABLED 1
u32_t fw_valid_mask;
u32_t vf_promiscuous_mode_restricted;
u32_t max_chains_per_vf_override;
u32_t record_sp;
#define XSTORM_RECORD_SLOW_PATH 0x01
#define CSTORM_RECORD_SLOW_PATH 0x02
#define TSTORM_RECORD_SLOW_PATH 0x04
#define USTORM_RECORD_SLOW_PATH 0x08
u32_t start_mp_chain;
u32_t debug_sriov;
u32_t debug_sriov_vfs;
u8_t b_inta_mode_prvided_by_os;
} lm_params_t;
typedef struct _flash_spec_t
{
u32_t page_size;
u32_t total_size;
} flash_spec_t;
typedef struct _lm_cam_entry_t
{
u8_t cam_addr[ETHERNET_ADDRESS_SIZE];
u16_t ref_cnt;
} lm_cam_entry_t;
#define MAX_MAC_OFFSET_IN_NIG 16
typedef struct _lm_nig_mirror_entry_t
{
s32_t refcnt;
#define NIG_ENTRY_INC_REFCNT(_entry) ++(_entry)->refcnt
#define NIG_ENTRY_DEC_REFCNT(_entry) {--(_entry)->refcnt; DbgBreakIf((_entry)->refcnt < 0);}
u8_t addr[ETHERNET_ADDRESS_SIZE];
}lm_nig_mirror_entry_t;
typedef struct _lm_nig_mirror_t
{
lm_nig_mirror_entry_t entries[MAX_MAC_OFFSET_IN_NIG];
}lm_nig_mirror_t;
typedef struct _lm_hardware_mf_info_t
{
u32_t func_mf_cfg;
#define NIV_FUNCTION_ENABLED(_pdev) (GET_FLAGS((_pdev)->hw_info.mf_info.func_mf_cfg, FUNC_MF_CFG_FUNC_DISABLED|FUNC_MF_CFG_FUNC_DELETED)==0)
u8_t vnics_per_port;
u8_t multi_vnics_mode;
u8_t path_has_ovlan;
u8_t _pad;
u8_t min_bw[MAX_VNIC_NUM];
u8_t max_bw[MAX_VNIC_NUM];
u16_t ext_id;
#define VALID_OVLAN(ovlan) ((ovlan) <= 4096)
#define INVALID_VIF_ID 0xFFFF
#define OVLAN(_pdev) ((_pdev)->hw_info.mf_info.ext_id)
#define VIF_ID(_pdev) ((_pdev)->hw_info.mf_info.ext_id)
u16_t default_vlan;
#define NIV_DEFAULT_VLAN(_pdev) ((_pdev)->hw_info.mf_info.default_vlan)
u8_t niv_allowed_priorities;
#define NIV_ALLOWED_PRIORITIES(_pdev) ((_pdev)->hw_info.mf_info.niv_allowed_priorities)
u8_t niv_default_cos;
#define NIV_DEFAULT_COS(_pdev) ((_pdev)->hw_info.mf_info.niv_default_cos)
u8_t niv_mba_enabled;
u8_t _pad1;
enum mf_cfg_afex_vlan_mode afex_vlan_mode;
#define AFEX_VLAN_MODE(_pdev) ((_pdev)->hw_info.mf_info.afex_vlan_mode)
u16_t flags;
#define MF_INFO_VALID_MAC 0x0001
u8_t mf_mode;
u8_t sd_mode;
#define SD_REGULAR_MODE 0
#define SD_UFP_MODE 1
#define SD_BD_MODE 2
} lm_hardware_mf_info_t;
typedef struct _lm_vf_igu_info_t
{
u8_t igu_base_sb;
u8_t igu_sb_cnt;
u8_t igu_test_sb_cnt;
u8_t igu_test_mode;
} lm_vf_igu_info_t;
typedef struct _lm_igu_block_t
{
u8_t status;
#define LM_IGU_STATUS_AVAILABLE 0x01
#define LM_IGU_STATUS_VALID 0x02
#define LM_IGU_STATUS_BUSY 0x04
#define LM_IGU_STATUS_PF 0x08
u8_t vector_number;
u8_t pf_number;
u8_t vf_number;
u32_t block_dump;
} lm_igu_block_t;
typedef struct _lm_igu_map_t
{
lm_igu_block_t igu_blocks_set[IGU_REG_MAPPING_MEMORY_SIZE];
} lm_igu_map_t;
typedef struct _lm_igu_info_t {
u8_t igu_base_sb;
#define IGU_BASE_NDSB(pdev) ((pdev)->hw_info.intr_blk_info.igu_info.igu_base_sb)
#define IGU_PF_NDSB(pdev, sb_id) (IGU_BASE_NDSB(pdev) + sb_id)
u8_t igu_sb_cnt;
#define LM_IGU_SB_CNT(pdev) ((pdev)->hw_info.intr_blk_info.igu_info.igu_sb_cnt)
u8_t igu_dsb_id;
#define IGU_DSB_ID(pdev) ((pdev)->hw_info.intr_blk_info.igu_info.igu_dsb_id)
u8_t igu_u_sb_offset;
#define IGU_U_NDSB_OFFSET(pdev) ((pdev)->hw_info.intr_blk_info.igu_info.igu_u_sb_offset)
u8_t igu_func_id;
#define IGU_FUNC_ID(pdev) ((pdev)->hw_info.intr_blk_info.igu_info.igu_func_id)
u8_t igu_test_sb_cnt;
lm_vf_igu_info_t vf_igu_info[E2_MAX_NUM_OF_VFS];
u8_t igu_sb[IGU_REG_MAPPING_MEMORY_SIZE];
#define IGU_VF_NDSB(pdev, sb_id) ((pdev)->hw_info.intr_blk_info.igu_info.igu_sb[sb_id])
lm_igu_map_t igu_map;
#define IGU_SB(pdev, sb_id) ((pdev)->hw_info.intr_blk_info.igu_info.igu_map.igu_blocks_set[sb_id])
} lm_igu_info_t;
typedef struct _lm_intr_blk_info_t
{
u8_t blk_type;
#define INTR_BLK_HC 0
#define INTR_BLK_IGU 1
#define INTR_BLK_TYPE(_pdev) ((_pdev)->hw_info.intr_blk_info.blk_type)
u8_t blk_mode;
#define INTR_BLK_MODE_BC 0
#define INTR_BLK_MODE_NORM 1
#define INTR_BLK_MODE(_pdev) ((_pdev)->hw_info.intr_blk_info.blk_mode)
u8_t access_type;
#define INTR_BLK_ACCESS_GRC 1
#define INTR_BLK_ACCESS_IGUMEM 0
#define INTR_BLK_ACCESS(_pdev) ((_pdev)->hw_info.intr_blk_info.access_type)
u32_t simd_addr_wmask;
#define INTR_BLK_SIMD_ADDR_WMASK(_pdev) ((_pdev)->hw_info.intr_blk_info.simd_addr_wmask)
u32_t simd_addr_womask;
#define INTR_BLK_SIMD_ADDR_WOMASK(_pdev) ((_pdev)->hw_info.intr_blk_info.simd_addr_womask)
u32_t cmd_ctrl_rd_wmask;
u32_t cmd_ctrl_rd_womask;
#define INTR_BLK_CMD_CTRL_INVALID 0
#define INTR_BLK_REQUIRE_CMD_CTRL(_pdev) ((_pdev)->hw_info.intr_blk_info.cmd_ctrl_rd_wmask != INTR_BLK_CMD_CTRL_INVALID)
#define INTR_BLK_CMD_CTRL_RD_WMASK(_pdev) ((_pdev)->hw_info.intr_blk_info.cmd_ctrl_rd_wmask)
#define INTR_BLK_CMD_CTRL_RD_WOMASK(_pdev) ((_pdev)->hw_info.intr_blk_info.cmd_ctrl_rd_womask)
lm_igu_info_t igu_info;
} lm_intr_blk_info_t;
#ifdef VF_INVOLVED
#define GET_NUM_VFS_PER_PF(_pdev) ((_pdev)->hw_info.sriov_info.total_vfs)
#define GET_NUM_VFS_PER_PATH(_pdev) (64)
#else
#define GET_NUM_VFS_PER_PF(_pdev) (0)
#define GET_NUM_VFS_PER_PATH(_pdev) (0)
#endif
typedef struct _lm_sriov_info_t {
#define MAX_VF_BAR 2
u16_t sriov_control;
u16_t total_vfs;
u16_t num_vfs;
u16_t vf_device_id;
u8_t max_chains_per_vf;
u8_t vf_cid_wnd_size;
u8_t vf_pool_size;
u8_t pf_nd_pool_size;
u32_t first_vf_in_pf;
u32_t vf_bar_size[MAX_VF_BAR];
lm_address_t vf_bars[MAX_VF_BAR];
u32_t shmem_num_vfs_in_pf;
u8_t b_pf_asymetric_configuration;
} lm_sriov_info_t;
typedef enum
{
LM_CHIP_PORT_MODE_NONE = 0x0,
LM_CHIP_PORT_MODE_2 = 0x1,
LM_CHIP_PORT_MODE_4 = 0x2
} lm_chip_port_mode_t ;
typedef struct _lm_hardware_info_t
{
u16_t vid;
u16_t did;
u16_t ssid;
u16_t svid;
u8_t irq;
u8_t int_pin;
u8_t latency_timer;
u8_t cache_line_size;
u8_t rev_id;
u8_t _pad[3];
lm_address_t mem_base[MAX_NUM_BAR];
u32_t bar_size[MAX_NUM_BAR];
lm_address_t mem_base1;
u32_t bar_size1;
u8_t mac_addr[8];
u8_t iscsi_mac_addr[8];
u8_t fcoe_mac_addr[8];
u8_t fcoe_wwn_port_name[8];
u8_t fcoe_wwn_node_name[8];
u32_t shmem_base;
u32_t mf_cfg_base;
u32_t shmem_base2;
u32_t chip_id;
#define CHIP_NUM_SET(_chip_id,_p) ((_chip_id) = (((_p) & 0xffff) << 16))
#define CHIP_NUM(_p) (((_p)->hw_info.chip_id) & 0xffff0000)
#define CHIP_NUM_5710 0x164e0000
#define CHIP_NUM_5711 0x164f0000
#define CHIP_NUM_5711E 0x16500000
#define CHIP_NUM_5712 0x16620000
#define CHIP_NUM_5712E 0x16630000
#define CHIP_NUM_5713 0x16510000
#define CHIP_NUM_5713E 0x16520000
#define CHIP_NUM_57800 0x168a0000
#define CHIP_NUM_57840_OBSOLETE 0x168d0000
#define CHIP_NUM_57810 0x168e0000
#define CHIP_NUM_57800_MF 0x16a50000
#define CHIP_NUM_57840_MF_OBSOLETE 0x16ae0000
#define CHIP_NUM_57810_MF 0x16ab0000
#define CHIP_NUM_57811 0x163d0000
#define CHIP_NUM_57811_MF 0x163e0000
#define CHIP_NUM_57811_VF 0x163f0000
#define CHIP_NUM_57840_4_10 0x16a10000
#define CHIP_NUM_57840_2_20 0x16a20000
#define CHIP_NUM_57840_MF 0x16a40000
#define CHIP_NUM_57840_VF 0x16ad0000
#define CHIP_IS_E1_PARAM(_chip_num) ((_chip_num) == CHIP_NUM_5710)
#define CHIP_IS_E1(_p) (CHIP_IS_E1_PARAM(CHIP_NUM(_p)))
#define CHIP_IS_E1H_PARAM(_chip_num) (((_chip_num) == CHIP_NUM_5711) || ((_chip_num) == CHIP_NUM_5711E))
#define CHIP_IS_E1H(_p) (CHIP_IS_E1H_PARAM(CHIP_NUM(_p)))
#define CHIP_IS_E1x_PARAM(_chip_num) (CHIP_IS_E1_PARAM(((_chip_num))) || CHIP_IS_E1H_PARAM(((_chip_num))))
#define CHIP_IS_E1x(_p) (CHIP_IS_E1x_PARAM(CHIP_NUM(_p)))
#define CHIP_IS_E2_PARAM(_chip_num) (((_chip_num) == CHIP_NUM_5712) || ((_chip_num) == CHIP_NUM_5713) || \
((_chip_num) == CHIP_NUM_5712E) || ((_chip_num) == CHIP_NUM_5713E))
#define CHIP_IS_E2(_p) (CHIP_IS_E2_PARAM(CHIP_NUM(_p)))
#define CHIP_IS_E3_PARAM(_chip_num) ((_chip_num == CHIP_NUM_57800) || (_chip_num == CHIP_NUM_57810) || \
(_chip_num == CHIP_NUM_57840_4_10) || (_chip_num == CHIP_NUM_57840_2_20) || (_chip_num == CHIP_NUM_57800_MF) || \
(_chip_num == CHIP_NUM_57810_MF) || (_chip_num == CHIP_NUM_57840_MF) || \
(_chip_num == CHIP_NUM_57840_OBSOLETE) || (_chip_num == CHIP_NUM_57840_MF_OBSOLETE) || \
(_chip_num == CHIP_NUM_57811) || (_chip_num == CHIP_NUM_57811_MF) || \
(_chip_num == CHIP_NUM_57811_VF))
#define CHIP_IS_E3(_p) (CHIP_IS_E3_PARAM(CHIP_NUM(_p)))
#define CHIP_IS_E2E3(_p) (CHIP_IS_E2(_p) || (CHIP_IS_E3(_p)))
#define CHIP_IS_E2E3A0(_p) (CHIP_IS_E2(_p) || (CHIP_IS_E3A0(_p)))
#define CHIP_REV_SHIFT 12
#define CHIP_REV_MASK (0xF<<CHIP_REV_SHIFT)
#define CHIP_REV(_p) (((_p)->hw_info.chip_id) & CHIP_REV_MASK)
#define CHIP_REV_Ax (0x0<<CHIP_REV_SHIFT)
#define CHIP_REV_Bx (0x1<<CHIP_REV_SHIFT)
#define CHIP_REV_Cx (0x2<<CHIP_REV_SHIFT)
#define CHIP_REV_SIM_IS_FPGA (0x1<<CHIP_REV_SHIFT)
#define CHIP_REV_ASIC_MAX (0x5<<CHIP_REV_SHIFT)
#define CHIP_REV_IS_SLOW(_p) (CHIP_REV(_p) > CHIP_REV_ASIC_MAX)
#define CHIP_REV_IS_FPGA(_p) (CHIP_REV_IS_SLOW(_p) && (CHIP_REV(_p) & CHIP_REV_SIM_IS_FPGA))
#define CHIP_REV_IS_EMUL(_p) (CHIP_REV_IS_SLOW(_p) && !(CHIP_REV(_p)& CHIP_REV_SIM_IS_FPGA))
#define CHIP_REV_IS_ASIC(_p) (!CHIP_REV_IS_SLOW(_p))
#define CHIP_REV_SIM(_p) ((0xF - (CHIP_REV(_p)>>CHIP_REV_SHIFT))>>1)<<CHIP_REV_SHIFT
#define CHIP_IS_E3B0(_p) (CHIP_IS_E3(_p)&&( (CHIP_REV(_p) == CHIP_REV_Bx)||(CHIP_REV_SIM(_p) == CHIP_REV_Bx)))
#define CHIP_IS_E3A0(_p) (CHIP_IS_E3(_p)&&( (CHIP_REV(_p) == CHIP_REV_Ax)||(CHIP_REV_SIM(_p) == CHIP_REV_Ax)))
#define CHIP_METAL(_p) (((_p)->hw_info.chip_id) & 0x00000ff0)
#define CHIP_BONDING(_p) (((_p)->hw_info.chip_id) & 0x0000000f)
#define CHIP_ID(_p) (((_p)->hw_info.chip_id) & 0xfffffff0)
#define CHIP_ID_5706_A0 0x57060000
#define CHIP_ID_5706_A1 0x57060010
#define CHIP_ID_5706_FPGA 0x5706f000
#define CHIP_ID_5706_IKOS 0x5706e000
#define CHIP_ID_5708_A0 0x57080000
#define CHIP_ID_5708_B0 0x57081000
#define CHIP_ID_5708_FPGA 0x5708f000
#define CHIP_ID_5708_IKOS 0x5708e000
#define CHIP_ID_5710_EMUL 0X164ed000
#define CHIP_ID_5710_A0 0x164e0000
#define CHIP_ID_5710_A1 0x164e0010
#define IS_CHIP_REV_A0(_p) (CHIP_ID(_p) == CHIP_ID_5710_A0)
#define IS_CHIP_REV_A1(_p) (CHIP_ID(_p) == CHIP_ID_5710_A1)
#define CHIP_BOND_ID(_p) (((_p)->hw_info.chip_id) & 0xf)
#define CHIP_BOND_ID_SERDES_BIT 0x01
#define CHIP_OPT_MISC_DO_BIT 0x02
u8_t silent_chip_rev;
#define SILENT_CHIP_REV(_p) ((_p)->hw_info.silent_chip_rev)
#define SILENT_REV_E1_A0 0xFF
#define SILENT_REV_E1_A1 0x00
#define SILENT_REV_E1_A2 0x01
#define SILENT_REV_E1H_A0 0x00
#define SILENT_REV_E1H_A1 0x01
#define SILENT_REV_E1H_A2 0x02
#define SILENT_REV_E3_B0 0x00
#define SILENT_REV_E3_B1 0x01
lm_chip_port_mode_t chip_port_mode;
#define CHIP_PORT_MODE(_p) ((_p)->hw_info.chip_port_mode)
u32_t nvm_hw_config;
u32_t nvm_hw_config2;
u8_t board_num[16];
flash_spec_t flash_spec;
u8_t max_payload_size;
u8_t max_read_req_size;
u8_t mcp_detected;
u8_t sz_ext_phy_fw_ver[16];
u32_t link_config[ELINK_LINK_CONFIG_SIZE];
u32_t multi_phy_config;
u32_t phy_force_kr_enabler;
u8_t no_10g_kr;
u8_t pcie_lane_width;
#define PCIE_WIDTH_1 1
#define PCIE_WIDTH_2 2
#define PCIE_WIDTH_4 4
#define PCIE_WIDTH_8 8
#define PCIE_WIDTH_16 16
#define PCIE_WIDTH_32 32
u8_t pcie_lane_speed;
#define PCIE_LANE_SPEED_2_5G 1
#define PCIE_LANE_SPEED_5G 2
#define PCIE_LANE_SPEED_8G 3
#define PCIE_LANE_SPEED_2_5G_E2_A0 0
#define PCIE_LANE_SPEED_5G_E2_A0 1
u32_t saved_pf0_pcie_mps;
#define INVALID_MPS 0xEEEEEEEE
u8_t mba_features;
u32_t port_feature_config;
u32_t mba_vlan_cfg ;
u8_t is_dcc_active;
u32_t bc_rev;
#define BC_REV_SUPPORTED 0x040200
#define BC_REV_IE_DCB_SUPPORTED 0x070200
#define BC_REV_IE_SRIOV_SUPPORTED 0x070400
#define LM_GET_BC_REV_MAJOR(_p) (_p->hw_info.bc_rev>>8)
u32_t max_port_toe_conn;
u32_t max_port_rdma_conn;
u32_t max_port_iscsi_conn;
u32_t max_port_fcoe_conn;
u32_t max_port_conns;
u32_t max_common_conns;
lm_hardware_mf_info_t mf_info;
lm_intr_blk_info_t intr_blk_info;
lm_sriov_info_t sriov_info;
u8_t flr_capable;
u8_t pci_cfg_trust;
#define PCI_CFG_NOT_TESTED_FOR_TRUST 0x00
#define PCI_CFG_NOT_TRUSTED 0x01
#define PCI_CFG_TRUSTED 0x02
u8_t pda_pm_reset_in_progress;
#define SET_PDA_PM_RESET_IN_PROGRESS(_pdev) ((_pdev)->hw_info.pda_pm_reset_in_progress = TRUE)
#define CLEAR_PDA_PM_RESET_IN_PROGRESS(_pdev) ((_pdev)->hw_info.pda_pm_reset_in_progress = FALSE)
#define IS_PDA_PM_RESET_IN_PROGRESS(_pdev) ((_pdev)->hw_info.pda_pm_reset_in_progress)
u8_t ___pad;
u32_t grc_didvid;
u32_t pci_cfg_didvid;
u32_t pcie_caps_offset;
u32_t pcie_dev_capabilities;
} lm_hardware_info_t;
typedef struct _gen_sp_status_block_t
{
lm_address_t blk_phy_address;
struct hc_sp_status_block_data sb_data;
volatile struct host_sp_status_block * hc_sp_status_blk;
} gen_sp_status_block_t;
typedef struct _gen_status_block_t
{
union {
struct hc_status_block_data_e1x e1x_sb_data;
struct hc_status_block_data_e2 e2_sb_data;
lm_address_t vf_sb_phy_address;
} hc_status_block_data;
union {
volatile struct host_hc_status_block_e1x * e1x_sb;
volatile struct host_hc_status_block_e2 * e2_sb;
volatile u16_t * vf_sb;
} host_hc_status_block;
} gen_status_block_t;
typedef struct _route_cfg_sig_output
{
#define NUM_ATTN_REGS_E1X 4
#define NUM_ATTN_REGS_E2 5
#define MAX_ATTN_REGS 5
u32_t attn_sig_dword[MAX_ATTN_REGS];
} route_cfg_sig_output;
#define HC_TIMEOUT_RESOLUTION_IN_US 4
typedef struct _lm_int_coalesing_info {
struct dynamic_hc_config eth_dynamic_hc_cfg;
u32_t hc_usec_c_sb[HC_CSTORM_SB_NUM_INDICES];
u32_t hc_usec_u_sb[HC_USTORM_SB_NUM_INDICES];
} lm_int_coalesing_info;
#define LM_COMMON_DRV_STATS_ATOMIC_INC(_pdev, layer_type, field_name) \
mm_atomic_inc(&((_pdev->vars.stats.stats_mirror.stats_drv.drv_##layer_type.field_name)));
#define LM_COMMON_DRV_STATS_ATOMIC_DEC(_pdev, layer_type, field_name) \
mm_atomic_dec(&((_pdev->vars.stats.stats_mirror.stats_drv.drv_##layer_type.field_name)));
#define LM_COMMON_DRV_STATS_INC(_pdev, layer_type, field_name) \
((_pdev->vars.stats.stats_mirror.stats_drv.drv_##layer_type.field_name)++);
#define LM_COMMON_DRV_STATS_DEC(_pdev, layer_type, field_name) \
((_pdev->vars.stats.stats_mirror.stats_drv.drv_##layer_type.field_name)--);
#define LM_COMMON_DRV_STATS_ATOMIC_INC_TOE(_pdev, field_name) LM_COMMON_DRV_STATS_ATOMIC_INC(_pdev, toe, field_name)
#define LM_COMMON_DRV_STATS_ATOMIC_DEC_TOE(_pdev, field_name) LM_COMMON_DRV_STATS_ATOMIC_DEC(_pdev, toe, field_name)
#define LM_COMMON_DRV_STATS_INC_ETH(_pdev, field_name) LM_COMMON_DRV_STATS_INC(_pdev, eth, field_name)
#define LM_COMMON_DRV_STATS_DEC_ETH(_pdev, field_name) LM_COMMON_DRV_STATS_DEC(_pdev, eth, field_name)
#if DBG
#define LM_COMMON_DRV_STATS_ATOMIC_INC_ETH(_pdev, field_name) LM_COMMON_DRV_STATS_ATOMIC_INC(_pdev, eth, field_name)
#define LM_COMMON_DRV_STATS_ATOMIC_DEC_ETH(_pdev, field_name) LM_COMMON_DRV_STATS_ATOMIC_DEC(_pdev, eth, field_name)
#else
#define LM_COMMON_DRV_STATS_ATOMIC_INC_ETH(_pdev, field_name)
#define LM_COMMON_DRV_STATS_ATOMIC_DEC_ETH(_pdev, field_name)
#endif
typedef struct _lm_vf_pf_message_t
{
u32_t state;
u32_t message_size;
void * message_virt_addr;
lm_address_t message_phys_addr;
void * bulletin_virt_addr;
lm_address_t bulletin_phys_addr;
volatile u16 * done;
void * cookie;
u16_t do_not_arm_trigger;
u16_t old_version;
#ifdef VF_INVOLVED
union
{
struct pf_vf_msg_hdr sw_channel_hdr;
struct pfvf_tlv hw_channel_hdr;
} bad_response;
#endif
}
lm_vf_pf_message_t;
#define LM_DCBX_IE_IS_ETS_DISABLE(_num_traffic_classes) (0 == (_num_traffic_classes))
#define LM_DCBX_IE_CLASSIF_ENTRIES_TO_ALOC_SIZE(_entries) ((_entries) * sizeof(dcb_classif_elem_t))
#define LM_DCBX_IE_CHIP_CLASSIF_NUM_ENTRIES_LOCAL (DCBX_MAX_APP_LOCAL)
#define LM_DCBX_IE_CHIP_CLASSIF_NUM_ENTRIES_REMOTE (DCBX_MAX_APP_PROTOCOL)
#define LM_DCBX_IE_CLASSIF_NUM_ENTRIES_LOCAL (LM_DCBX_IE_CHIP_CLASSIF_NUM_ENTRIES_LOCAL + 2)
#define LM_DCBX_IE_CLASSIF_NUM_ENTRIES_REMOTE (LM_DCBX_IE_CHIP_CLASSIF_NUM_ENTRIES_REMOTE)
#define LM_DCBX_IE_CLASSIF_TABLE_ALOC_SIZE_LOCAL (LM_DCBX_IE_CLASSIF_ENTRIES_TO_ALOC_SIZE(LM_DCBX_IE_CLASSIF_NUM_ENTRIES_LOCAL))
#define LM_DCBX_IE_CLASSIF_TABLE_ALOC_SIZE_REMOTE (LM_DCBX_IE_CLASSIF_ENTRIES_TO_ALOC_SIZE(LM_DCBX_IE_CLASSIF_NUM_ENTRIES_REMOTE))
#define LM_DCBX_IE_CLASSIF_TABLE_ALOC_SIZE_DBG (LM_DCBX_IE_CLASSIF_ENTRIES_TO_ALOC_SIZE(16))
#define LM_DCBX_MAX_TRAFFIC_TYPES (8)
#define LM_DCBX_ILLEGAL_PRI (MAX_PFC_PRIORITIES)
#define IS_DCB_SUPPORTED_BY_CHIP(_pdev) (!(CHIP_IS_E1x(_pdev)))
#define IS_DCB_SUPPORTED(_pdev) (((_pdev)->params.dcbx_config_params.dcb_enable) && \
IS_DCB_SUPPORTED_BY_CHIP(_pdev))
#define IS_DCB_ENABLED(_pdev) ((_pdev)->dcbx_info.is_enabled)
#define LM_DCBX_ADMIN_MIB_OFFSET(_pdev ,_mf_cfg_offfset) (_mf_cfg_offfset + \
PORT_MAX * sizeof(lldp_params_t) + \
PORT_ID(_pdev) * sizeof(lldp_admin_mib_t))
typedef struct _lm_dcbx_stat
{
u64_t pfc_frames_sent;
u64_t pfc_frames_received;
}lm_dcbx_stat;
typedef enum
{
FUNCTION_DCBX_START_POSTED = 0,
FUNCTION_DCBX_START_COMPLETED = 1,
FUNCTION_DCBX_STOP_POSTED = 2,
FUNCTION_DCBX_STOP_COMPLETED = 3,
} lm_dcbx_function_state_t;
typedef enum
{
lm_dcbx_drv_flags_set_bit = 0,
lm_dcbx_drv_flags_reset_bit = 1,
lm_dcbx_drv_flags_reset_flags = 2,
}lm_dcbx_drv_flags_cmd_t;
typedef enum {
lm_dcbx_ets_config_state_cee,
lm_dcbx_ets_config_state_ieee,
}lm_dcbx_ets_config_state;
typedef enum {
lm_dcbx_ets_ieee_config_not_valid,
lm_dcbx_ets_ieee_config_en,
lm_dcbx_ets_ieee_config_di,
}lm_dcbx_ie_ets_ieee_config_state;
typedef struct _lm_dcbx_indicate_event_t
{
u8_t lm_cli_idx;
u32_t dcb_current_oper_state_bitmap;
#define DCB_STATE_CONFIGURED_BY_OS_QOS (1 << 0)
#define DCB_STATE_CONFIGURED_BY_OS_QOS_TO_WILLING (1 << 1)
lm_dcbx_ets_config_state ets_config_state;
u8_t is_ets_ieee_params_os_valid;
dcb_ets_tsa_param_t ets_ieee_params_os;
lm_dcbx_ie_ets_ieee_config_state ets_ieee_config_state;
dcb_ets_tsa_param_t ets_ieee_params_config;
u16_t iscsi_tcp_pri;
dcb_indicate_event_params_t dcb_params_given_dbg;
dcb_indicate_event_params_t local_params;
dcb_indicate_event_params_t remote_params;
}lm_dcbx_indicate_event_t;
typedef struct _lm_dcbx_info_t
{
dcbx_update_task_state dcbx_update_lpme_task_state;
volatile u32_t dcbx_ramrod_state;
void *pfc_fw_cfg_virt;
lm_address_t pfc_fw_cfg_phys;
u32_t dcbx_error;
#define DCBX_ERROR_NO_ERROR (0)
#define DCBX_ERROR_MCP_CMD_FAILED (1 << 0)
#define DCBX_ERROR_SET_TIMER (1 << 1)
#define DCBX_ERROR_REGISTER_LPME (1 << 2)
#define DCBX_ERROR_WRONG_PORT (1 << 3)
#define DCBX_ERROR_RESOURCE (1 << 4)
u8_t is_dcbx_neg_received;
u8_t is_enabled;
u8_t _pad[2];
lm_dcbx_indicate_event_t indicate_event;
lldp_admin_mib_t admin_mib_org;
volatile u32_t is_indicate_event_en;
u8_t pri_to_cos[LM_DCBX_MAX_TRAFFIC_TYPES];
u32_t lpme_failed_cnt;
lldp_local_mib_t local_mib_last;
}lm_dcbx_info_t;
void
lm_dcbx_config_drv_flags(
IN struct _lm_device_t *pdev,
IN const lm_dcbx_drv_flags_cmd_t drv_flags_cmd,
IN const u32_t bit_drv_flags);
typedef enum
{
NOT_PMF = 0,
PMF_ORIGINAL = 1,
PMF_MIGRATION = 2,
}pmf_type_t;
typedef enum
{
MAC_TYPE_NONE = 0,
MAC_TYPE_EMAC = 1,
MAC_TYPE_BMAC = 2,
MAC_TYPE_UMAC = 3,
MAC_TYPE_XMAC = 4,
MAC_TYPE_MAX = 5
} mac_type_t;
typedef struct _lm_reported_link_params_t
{
lm_status_t link;
lm_medium_t medium;
lm_flow_control_t flow_ctrl;
u8_t cable_is_attached;
u8_t eee_policy;
} lm_reported_link_params_t;
typedef struct _lm_variables_t
{
#if defined(__SunOS)
ddi_acc_handle_t reg_handle[MAX_NUM_BAR];
#endif
volatile void * mapped_bar_addr[MAX_NUM_BAR];
gen_sp_status_block_t gen_sp_status_block;
gen_status_block_t status_blocks_arr[MAX_NDSB];
u16_t u_hc_ack[MAX_NDSB];
u16_t c_hc_ack[MAX_NDSB];
u16_t hc_def_ack;
u16_t _hc_pad;
u16_t attn_def_ack;
u16_t attn_state;
route_cfg_sig_output attn_groups_output[MAX_DYNAMIC_ATTN_GRPS];
u32_t attn_sig_af_inv_reg_addr[MAX_ATTN_REGS];
u8_t num_attn_sig_regs;
u32_t aeu_mask_attn_func;
lm_status_t link_status;
lm_int_coalesing_info int_coal;
u8_t eth_init_state;
#define PORT_STATE_CLOSE 0
#define PORT_STATE_OPEN 1
#define PORT_STATE_CLOSING 2
lm_medium_t medium;
lm_flow_control_t flow_control;
lm_eee_policy_t eee_policy;
u32_t autogreeen;
lm_stats_all_t stats ;
#define DMAE_READY(pdev) (pdev->vars.b_is_dmae_ready)
u8_t b_is_dmae_ready ;
lm_nig_mirror_t nig_mirror;
u16_t fw_wr_seq;
u8_t fw_timed_out;
u32_t fw_port_stats_ptr;
u32_t fw_func_stats_ptr;
mac_type_t mac_type;
u8_t phy_addr;
u8_t cable_is_attached;
u16_t drv_pulse_wr_seq;
u32_t searcher_t1_num_pages;
void **searcher_t1_virt_addr_table;
lm_address_t *searcher_t1_phys_addr_table;
u32_t searcher_t2_num_pages;
void **searcher_t2_virt_addr_table;
lm_address_t *searcher_t2_phys_addr_table;
u32_t timers_linear_num_pages;
void **timers_linear_virt_addr_table;
lm_address_t *timers_linear_phys_addr_table;
u32_t qm_queues_num_pages;
void** qm_queues_virt_addr_table;
lm_address_t *qm_queues_phys_addr_table;
u32_t context_cdu_num_pages;
void **context_cdu_virt_addr_table;
lm_address_t *context_cdu_phys_addr_table;
u32_t elt_num_pages;
void * elt_virt_addr_table[NUM_OF_ELT_PAGES];
lm_address_t elt_phys_addr_table[NUM_OF_ELT_PAGES];
u32_t zero_buffer[DMAE_MAX_RW_SIZE_STATIC] ;
u32_t clk_factor ;
u32_t inst_id;
#ifndef INST_ID_TO_BUS_NUM
#define INST_ID_TO_BUS_NUM(_inst_id) (((_inst_id) >> 8)& 0xFF)
#define MAX_PCI_BUS_NUM (256)
#endif
#ifndef USER_LINUX
#define EMULATION_DOORBELL_FULL_WORKAROUND
#endif
#if defined(EMULATION_DOORBELL_FULL_WORKAROUND)
u32_t doorbells_cnt;
#define DOORBELL_CHECK_FREQUENCY 500
#define ALLOWED_DOORBELLS_HIGH_WM 1000
#define ALLOWED_DOORBELLS_LOW_WM 700
u8_t doorbells_blocked;
u32_t doorbells_high_wm_reached;
#endif
u8_t enable_intr;
u8_t dbg_intr_in_wrong_state;
u8_t dbg_intr_in_disabled;
u8_t dbg_intr_zero_status;
pmf_type_t is_pmf;
#define IS_PMF(_pdev) (( PMF_ORIGINAL == (_pdev)->vars.is_pmf) || ( PMF_MIGRATION == (_pdev)->vars.is_pmf))
#define IS_PMF_ORIGINAL(_pdev) ( PMF_ORIGINAL == (_pdev)->vars.is_pmf)
#define IS_PMF_MIGRATION(_pdev) ( PMF_MIGRATION == (_pdev)->vars.is_pmf)
lm_loader_response load_code;
u8_t b_in_init_reset_flow;
u8_t _pad[3];
lm_reported_link_params_t last_reported_link_params;
struct elink_vars link;
u32_t link_chng_cnt;
#define LM_LINK_CHNG_CNT(pdev) ((pdev)->vars.link_chng_cnt)
u32_t shared_l5_mac_client_id;
u64_t last_recycling_timestamp;
u8_t is_igu_test_mode;
u8_t is_pf_restricts_lamac;
u8_t is_pf_rejected_lamac;
u8_t is_pf_provides_mac;
u16_t pf_link_speed;
u16_t __pad;
u32_t vf_pf_channel_lock;
lm_vf_pf_message_t vf_pf_mess;
u32_t pxp_hw_interrupts_cnt;
u32_t dq_int_status_cnt;
u32_t dq_int_status_discard_cnt;
u32_t dq_int_status_vf_val_err_cnt;
u32_t dq_vf_type_val_err_fid;
u32_t dq_vf_type_val_err_mcid;
u32_t cfc_int_status_cnt;
} lm_variables_t;
typedef struct _eth_tx_prod_t
{
u32_t packets_prod;
u16_t bds_prod;
u16_t reserved;
}eth_tx_prod_t;
typedef struct _lm_chip_global_t
{
u8_t flags;
#define LM_CHIP_GLOBAL_FLAG_RESET_IN_PROGRESS 0x1
#define LM_CHIP_GLOBAL_FLAG_NIG_RESET_CALLED 0x2
u32_t cnt_grc_timeout_ignored;
u32_t grc_timeout_val[E1H_FUNC_MAX*2];
u8_t func_en[E1H_FUNC_MAX];
} lm_chip_global_t;
extern lm_chip_global_t g_lm_chip_global[MAX_PCI_BUS_NUM];
typedef struct _lm_tx_chain_t
{
u32_t idx;
lm_bd_chain_t bd_chain;
eth_tx_prod_t eth_tx_prods;
u32_t prod_bseq;
u16_t pkt_idx;
u16_t volatile *hw_con_idx_ptr;
u16_t coalesce_buf_cnt;
u16_t _reserved;
u32_t coalesce_buf_used;
u32_t lso_split_used;
lm_hc_sb_info_t hc_sb_info;
s_list_t active_descq;
s_list_t coalesce_buf_list;
} lm_tx_chain_t;
typedef struct _lm_tx_info_t
{
lm_tx_chain_t chain[3*MAX_HW_CHAINS + MAX_NON_RSS_CHAINS];
#define LM_TXQ(_pdev, _idx) (_pdev)->tx_info.chain[_idx]
u32_t max_chain_idx;
u32_t catchup_chain_idx;
u32_t forward_packets;
u32_t lso_forward_packets;
} lm_tx_info_t;
typedef struct _lm_rx_chain_common_t
{
u16_t bd_prod_without_next;
u32_t prod_bseq;
u32_t desc_cnt;
s_list_t free_descq;
} lm_rx_chain_common_t;
#define LM_TPA_MAX_AGGS (max(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,ETH_MAX_AGGREGATION_QUEUES_E1))
#define LM_TPA_MAX_AGG_SIZE (8)
#define LM_TPA_MIN_DESC (LM_TPA_MAX_AGGS * LM_TPA_MAX_AGG_SIZE * 2)
#define LM_TPA_BD_ELEN_SIZE (sizeof(struct eth_rx_sge))
#define LM_TPA_PAGE_BITS (LM_PAGE_BITS)
#define LM_TPA_PAGE_SIZE (1 << LM_TPA_PAGE_BITS)
#define LM_TPA_SGE_PAUSE_THR_LOW (150)
#define LM_TPA_SGE_PAUSE_THR_HIGH (250)
typedef struct _lm_tpa_cahin_dbg_params
{
u64_t pck_received;
u64_t pck_received_ind;
u64_t pck_ret_from_chip;
u64_t pck_ret_abort_active;
u64_t pck_ret_abort;
}lm_tpa_cahin_dbg_params;
typedef enum
{
lm_tpa_state_disable = 0,
lm_tpa_state_wait_packets = 1,
lm_tpa_state_enable = 2,
lm_tpa_state_invalid = 3,
}lm_tpa_state_t;
typedef struct _lm_tpa_sge_chain_t
{
lm_bd_chain_t bd_chain;
#define LM_TPA_CHAIN_BD(_pdev, _idx) ((_pdev)->rx_info.rxq_chain[_idx].tpa_chain.sge_chain.bd_chain)
#define LM_TPA_CHAIN_BD_NUM_ELEM(_pdev, _idx) ((_pdev)->rx_info.rxq_chain[_idx].tpa_chain.sge_chain.size)
#define LM_TPA_CHAIN_BD_MASK(_pdev, _idx) (LM_TPA_CHAIN_BD_NUM_ELEM(_pdev,_idx) - 1)
lm_packet_t** active_descq_array;
#define LM_TPA_ACTIVE_DESCQ_ARRAY_ELEM(_pdev,_idx) (LM_TPA_CHAIN_BD_NUM_ELEM(_pdev,_idx))
#define LM_TPA_ACTIVE_ENTRY_BOUNDARIES_VERIFY(_pdev,_idx,_entry) DbgBreakIf((LM_TPA_ACTIVE_DESCQ_ARRAY_ELEM(_pdev,_idx) <= (_entry)))
#define LM_TPA_BD_ENTRY_TO_ACTIVE_ENTRY(_pdev,_idx,_x) ((_x) & LM_TPA_CHAIN_BD_MASK(_pdev,_idx))
u64_t* mask_array;
#define LM_TPA_MASK_LEN(_pdev,_idx) ((LM_TPA_CHAIN_BD_NUM_ELEM(_pdev,_idx)) / \
BIT_VEC64_ELEM_SZ)
#define LM_TPA_MASK_MASK(_pdev, _idx) (LM_TPA_MASK_LEN(_pdev, _idx) - 1)
#define LM_TPA_MASK_NEXT_ELEM(_pdev, _idx, el) (((el) + 1) & LM_TPA_MASK_MASK(_pdev, _idx))
#define LM_TPA_BD_ENTRY_TO_MASK_ENTRY(_pdev,_idx,_x) (LM_TPA_BD_ENTRY_TO_ACTIVE_ENTRY(_pdev,_idx,_x) >> BIT_VEC64_ELEM_SHIFT)
#define LM_TPA_MASK_SET_ACTIVE_BIT(_pdev,_idx,_active_entry) LM_TPA_ACTIVE_ENTRY_BOUNDARIES_VERIFY(_pdev,_idx,_active_entry); \
BIT_VEC64_SET_BIT((&LM_SGE_TPA_CHAIN(_pdev,_idx))->mask_array,_active_entry)
#define LM_TPA_MASK_CLEAR_ACTIVE_BIT(_pdev,_idx,_active_entry) DbgBreakIf(0 == LM_TPA_MASK_TEST_ACTIVE_BIT(_pdev,_idx,_active_entry)); \
LM_TPA_ACTIVE_ENTRY_BOUNDARIES_VERIFY(_pdev,_idx,_active_entry); \
BIT_VEC64_CLEAR_BIT((&LM_SGE_TPA_CHAIN(_pdev,_idx))->mask_array,_active_entry)
#define LM_TPA_MASK_TEST_ACTIVE_BIT(_pdev,_idx,_active_entry) (BIT_VEC64_TEST_BIT((&LM_SGE_TPA_CHAIN(_pdev,_idx))->mask_array,_active_entry))
u16_t size;
u32_t last_max_con;
}lm_tpa_sge_chain_t;
typedef struct _lm_tpa_start_coales_bd_t
{
lm_packet_t* packet;
u8_t is_entry_used;
}lm_tpa_start_coales_bd_t;
typedef struct _lm_tpa_chain_t
{
lm_rx_chain_common_t common;
lm_tpa_start_coales_bd_t start_coales_bd[LM_TPA_MAX_AGGS];
lm_tpa_state_t state;
lm_tpa_sge_chain_t sge_chain;
struct tpa_update_ramrod_data* ramrod_data_virt;
lm_address_t ramrod_data_phys;
lm_tpa_cahin_dbg_params dbg_params;
}lm_tpa_chain_t;
typedef struct _lm_tpa_info_t
{
struct tpa_update_ramrod_data* ramrod_data_virt;
lm_address_t ramrod_data_phys;
volatile void * update_cookie;
volatile u32_t ramrod_recv_cnt;
volatile u32_t state;
#define TPA_STATE_NONE 0
#define TPA_STATE_RAMROD_SENT 1
u8_t ipvx_enabled_required;
u8_t ipvx_enabled_current;
#define TPA_IPVX_DISABLED (0)
#define TPA_IPV4_ENABLED (1<<0)
#define TPA_IPV6_ENABLED (1<<1)
}lm_tpa_info_t;
typedef enum
{
LM_RXQ_CHAIN_IDX_BD = 0,
LM_RXQ_CHAIN_IDX_SGE = 1,
LM_RXQ_CHAIN_IDX_MAX = 2,
} lm_rxq_chain_idx_t ;
typedef struct _lm_rx_chain_t
{
lm_rx_chain_common_t common;
u32_t idx;
lm_bd_chain_t chain_arr[LM_RXQ_CHAIN_IDX_MAX];
lm_tpa_chain_t tpa_chain;
u32_t lah_size;
u32_t ret_bytes;
u32_t ret_bytes_last_fw_update;
u16_t volatile *hw_con_idx_ptr;
lm_hc_sb_info_t hc_sb_info;
s_list_t active_descq;
} lm_rx_chain_t;
typedef struct _lm_sq_chain_t
{
struct slow_path_element *sq_chain_virt;
lm_address_t bd_chain_phy;
u16_t prod_idx;
u16_t con_idx;
struct slow_path_element *prod_bd;
struct slow_path_element *last_bd;
u16_t bd_left;
} lm_sq_chain_t;
typedef struct _lm_eq_chain_t
{
lm_bd_chain_t bd_chain;
u16_t volatile *hw_con_idx_ptr;
u16_t iro_prod_offset;
lm_hc_sb_info_t hc_sb_info;
} lm_eq_chain_t;
typedef struct _lm_rcq_chain_t
{
u32_t idx;
lm_bd_chain_t bd_chain;
u32_t prod_bseq;
u16_t volatile *hw_con_idx_ptr;
u16_t iro_prod_offset;
lm_hc_sb_info_t hc_sb_info;
} lm_rcq_chain_t;
typedef struct _lm_rx_info_t
{
lm_rx_chain_t rxq_chain[MAX_HW_CHAINS + MAX_NON_RSS_CHAINS];
lm_rcq_chain_t rcq_chain[MAX_HW_CHAINS + MAX_NON_RSS_CHAINS];
#define LM_RXQ(_pdev, _idx) (_pdev)->rx_info.rxq_chain[_idx]
#define LM_RXQ_COMMON(_pdev, _idx) ((_pdev)->rx_info.rxq_chain[_idx].common)
#define LM_RXQ_CHAIN(_pdev, _idx, _rxq_chain_idx) (_pdev)->rx_info.rxq_chain[_idx].chain_arr[_rxq_chain_idx]
#define LM_RXQ_CHAIN_BD(_pdev, _idx) LM_RXQ_CHAIN(_pdev, _idx, LM_RXQ_CHAIN_IDX_BD )
#define LM_RXQ_CHAIN_SGE(_pdev, _idx) LM_RXQ_CHAIN(_pdev, _idx, LM_RXQ_CHAIN_IDX_SGE )
#define LM_RXQ_IS_CHAIN_SGE_VALID(_pdev, _idx) (0 != (_pdev)->rx_info.rxq_chain[_idx].lah_size)
#define LM_RXQ_SGE_PTR_IF_VALID(_pdev, _idx) LM_RXQ_IS_CHAIN_SGE_VALID(_pdev, _idx) ? &LM_RXQ_CHAIN_SGE(_pdev, _idx ) : NULL
#define LM_RCQ(_pdev, _idx) (_pdev)->rx_info.rcq_chain[_idx]
#define LM_TPA(_pdev, _idx) ((_pdev)->rx_info.rxq_chain[_idx].tpa_chain)
#define LM_TPA_COMMON(_pdev, _idx) ((_pdev)->rx_info.rxq_chain[_idx].tpa_chain.common)
#define LM_SGE_TPA_CHAIN(_pdev, _idx) ((_pdev)->rx_info.rxq_chain[_idx].tpa_chain.sge_chain)
lm_tpa_info_t tpa_info;
#define LM_TPA_INFO(_pdev) ((_pdev)->rx_info.tpa_info)
struct tstorm_eth_approximate_match_multicast_filtering appr_mc;
} lm_rx_info_t;
#define MAX_RAMRODS_OUTSTANDING 2
typedef struct _lm_request_sp
{
u8_t req_type;
#define REQ_SET_INFORMATION 0x1
#define REQ_QUERY_INFORMATION 0x2
u32_t ioc;
u8_t ok_to_indicate;
void *clnt_blk;
u8_t ramrod_priority;
struct sq_pending_command sp_list_command;
} lm_request_sp;
typedef union _client_init_data_t{
struct client_init_ramrod_data init_data;
struct tx_queue_init_ramrod_data tx_queue;
} client_init_data_t;
typedef struct _lm_client_info_update
{
struct client_update_ramrod_data *data_virt;
lm_address_t data_phys;
volatile u32_t state;
#define LM_CLI_UPDATE_NOT_USED 0
#define LM_CLI_UPDATE_USED 1
#define LM_CLI_UPDATE_RECV 2
}lm_client_info_update;
typedef struct _lm_client_info_t
{
client_init_data_t * client_init_data_virt;
lm_address_t client_init_data_phys;
lm_client_info_update update;
struct ecore_vlan_mac_obj mac_obj;
struct ecore_vlan_mac_obj mac_vlan_obj;
struct ecore_vlan_mac_obj vlan_obj;
u16_t current_set_vlan;
void * volatile set_mac_cookie;
volatile u32_t sp_mac_state;
void * volatile set_rx_mode_cookie;
volatile unsigned long sp_rxmode_state;
u32_t last_set_rx_mask;
u8_t b_any_vlan_on;
u8_t b_vlan_only_in_process;
} lm_client_info_t ;
typedef void(*lm_sq_comp_cb_t)(struct _lm_device_t *pdev, struct sq_pending_command *pending);
typedef enum {
SQ_STATE_NORMAL = 0,
SQ_STATE_PENDING = 1,
SQ_STATE_BLOCKED = 2
} lm_sq_state_t;
typedef struct _lm_sq_info_t
{
lm_sq_chain_t sq_chain;
u8_t num_pending_normal;
u8_t num_pending_high;
d_list_t pending_normal;
d_list_t pending_high;
d_list_t pending_complete;
lm_sq_state_t sq_state;
lm_sq_comp_cb_t sq_comp_cb[MAX_CONNECTION_TYPE];
u8_t sq_comp_scheduled;
} lm_sq_info_t;
typedef enum {
FUNCTION_START_POSTED = 0,
FUNCTION_START_COMPLETED = 1,
FUNCTION_STOP_POSTED = 2,
FUNCTION_STOP_COMPLETED = 3
} lm_function_state_t;
typedef struct _lm_eq_info_t
{
lm_eq_chain_t eq_chain;
volatile u32_t function_state;
} lm_eq_info_t;
#define MAX_PROTO (FCOE_CONNECTION_TYPE + 1)
#if 0
#define LM_PROTO_NIC 0
#define LM_PROTO_TOE 1
#endif
typedef struct _lm_cid_resc_t
{
lm_sp_req_manager_t sp_req_mgr;
void *cookies[MAX_PROTO];
u8_t cid_pending;
#if defined(__SunOS)
ddi_acc_handle_t reg_handle;
#endif
volatile void *mapped_cid_bar_addr;
volatile u32_t con_state;
#define LM_CON_STATE_CLOSE 0
#define LM_CON_STATE_OPEN_SENT 1
#define LM_CON_STATE_OPEN 2
#define LM_CON_STATE_HALT_SENT 3
#define LM_CON_STATE_HALT 4
#define LM_CON_STATE_TERMINATE 5
} lm_cid_resc_t;
struct lm_context_cookie{
lm_cid_resc_t cid_resc;
u32_t next;
u32_t prev;
u8_t invalid;
u8_t ip_type;
u8_t cfc_delete_cnt;
u8_t _pad;
u32_t h_val;
};
#define LM_MAX_VALID_CFC_DELETIONS 3
#define LM_CONTEXT_VALID 0
#define LM_CONTEXT_INVALID_WAIT 1
#define LM_CONTEXT_INVALID_DELETE 2
#define LM_CONTEXT_SIZE 1024
typedef struct _lm_searcher_hash_entry {
u8_t num_ipv4;
u8_t num_ipv6;
u8_t depth_ipv4;
} lm_searcher_hash_entry_t;
typedef struct _lm_searcher_hash_info {
#define SEARCHER_KEY_LEN 40
u8_t searcher_key[SEARCHER_KEY_LEN];
u8_t searcher_key_bits[SEARCHER_KEY_LEN*8];
#define MAX_SEARCHER_IN_STR 36
u8_t searcher_in_str_bits[MAX_SEARCHER_IN_STR*8];
lm_searcher_hash_entry_t *searcher_table;
u32_t num_tuples;
u8_t hash_depth_reached;
u8_t num_hash_bits;
} lm_searcher_hash_info_t;
typedef struct _lm_context_info {
struct lm_context_cookie * array;
u32_t proto_start[MAX_PROTO];
u32_t proto_end[MAX_PROTO];
u32_t proto_ffree[MAX_PROTO];
u32_t proto_pending[MAX_PROTO];
lm_searcher_hash_info_t searcher_hash;
} lm_context_info_t;
#include "lm_l4st.h"
#include "lm_l4if.h"
#include "lm_l5st.h"
#include "lm_l5if.h"
typedef struct _lm_offload_info_t
{
struct _lm_device_t *pdev;
l4_ofld_params_t l4_params;
#define STATE_BLOCK_IDX0 0
#define STATE_BLOCK_TOE STATE_BLOCK_IDX0
#define STATE_BLOCK_IDX1 1
#define STATE_BLOCK_IDX2 2
#define STATE_BLOCK_ISCSI STATE_BLOCK_IDX2
#define STATE_BLOCK_IDX3 3
#define STATE_BLOCK_RDMA STATE_BLOCK_IDX3
#define STATE_BLOCK_IDX4 4
#define STATE_BLOCK_FCOE STATE_BLOCK_IDX4
#define STATE_BLOCK_CNT 5
lm_state_block_t *state_blks[STATE_BLOCK_CNT];
} lm_offload_info_t;
typedef void(*lm_cid_recycled_cb_t)(struct _lm_device_t *pdev, void *cookie, s32_t cid);
struct iro {
u32_t base;
u16_t m1;
u16_t m2;
u16_t m3;
u16_t size;
} ;
typedef struct _ecore_info_t
{
void * gunzip_buf;
u32_t gunzip_outlen;
lm_address_t gunzip_phys;
#define FW_BUF_SIZE 0x8000
#define GUNZIP_BUF(_pdev) (_pdev)->ecore_info.gunzip_buf
#define GUNZIP_OUTLEN(_pdev) (_pdev)->ecore_info.gunzip_outlen
#define GUNZIP_PHYS(_pdev) (_pdev)->ecore_info.gunzip_phys
const struct raw_op *init_ops;
const u16_t *init_ops_offsets;
const u32_t *init_data;
u32_t init_mode_flags;
#define INIT_MODE_FLAGS(_pdev) (_pdev)->ecore_info.init_mode_flags
const u8_t *tsem_int_table_data;
const u8_t *tsem_pram_data;
const u8_t *usem_int_table_data;
const u8_t *usem_pram_data;
const u8_t *xsem_int_table_data;
const u8_t *xsem_pram_data;
const u8_t *csem_int_table_data;
const u8_t *csem_pram_data;
#define INIT_OPS(_pdev) (_pdev)->ecore_info.init_ops
#define INIT_DATA(_pdev) (_pdev)->ecore_info.init_data
#define INIT_OPS_OFFSETS(_pdev) (_pdev)->ecore_info.init_ops_offsets
#define INIT_TSEM_PRAM_DATA(_pdev) (_pdev)->ecore_info.tsem_pram_data
#define INIT_XSEM_PRAM_DATA(_pdev) (_pdev)->ecore_info.xsem_pram_data
#define INIT_USEM_PRAM_DATA(_pdev) (_pdev)->ecore_info.usem_pram_data
#define INIT_CSEM_PRAM_DATA(_pdev) (_pdev)->ecore_info.csem_pram_data
#define INIT_TSEM_INT_TABLE_DATA(_pdev) (_pdev)->ecore_info.tsem_int_table_data
#define INIT_XSEM_INT_TABLE_DATA(_pdev) (_pdev)->ecore_info.xsem_int_table_data
#define INIT_USEM_INT_TABLE_DATA(_pdev) (_pdev)->ecore_info.usem_int_table_data
#define INIT_CSEM_INT_TABLE_DATA(_pdev) (_pdev)->ecore_info.csem_int_table_data
const struct iro *iro_arr;
#define INIT_IRO_ARRAY(_pdev) (_pdev)->ecore_info.iro_arr
#define IRO (PFDEV(pdev))->ecore_info.iro_arr
} ecore_info_t;
typedef struct _flr_stats_t {
u32_t is_pf;
u32_t default_wait_interval_ms;
u32_t cfc_usage_counter;
u32_t qm_usage_counter;
u32_t tm_vnic_usage_counter;
u32_t tm_num_scans_usage_counter;
u32_t dq_usage_counter;
u32_t final_cleanup_complete;
u32_t dmae_cx;
u32_t pbf_queue[3];
u32_t pbf_transmit_buffer[3];
} flr_stats_t;
typedef struct _lm_slowpath_data_t {
struct function_start_data * func_start_data;
lm_address_t func_start_data_phys;
union {
struct mac_configuration_cmd e1x;
struct eth_classify_rules_ramrod_data e2;
} * mac_rdata[LM_CLI_IDX_MAX];
lm_address_t mac_rdata_phys[LM_CLI_IDX_MAX];
union {
struct tstorm_eth_mac_filter_config e1x;
struct eth_filter_rules_ramrod_data e2;
} * rx_mode_rdata[LM_CLI_IDX_MAX];
lm_address_t rx_mode_rdata_phys[LM_CLI_IDX_MAX];
union {
struct mac_configuration_cmd e1;
struct eth_multicast_rules_ramrod_data e2;
} * mcast_rdata[LM_CLI_IDX_MAX];
lm_address_t mcast_rdata_phys[LM_CLI_IDX_MAX];
union {
struct eth_rss_update_ramrod_data e2;
} * rss_rdata;
lm_address_t rss_rdata_phys;
struct function_update_data* niv_function_update_data;
lm_address_t niv_function_update_data_phys;
struct function_update_data* l2mp_func_update_data;
lm_address_t l2mp_func_update_data_phys;
struct function_update_data* encap_function_update_data;
lm_address_t encap_function_update_data_phys;
struct function_update_data* ufp_function_update_data;
lm_address_t ufp_function_update_data_phys;
} lm_slowpath_data_t ;
typedef enum _niv_ramrod_state_t
{
NIV_RAMROD_NOT_POSTED,
NIV_RAMROD_VIF_UPDATE_POSTED,
NIV_RAMROD_VIF_LISTS_POSTED,
NIV_RAMROD_SET_LOOPBACK_POSTED,
NIV_RAMROD_CLEAR_LOOPBACK_POSTED,
NIV_RAMROD_COMPLETED
}niv_ramrod_state_t;
typedef enum _ufp_ramrod_state_t
{
UFP_RAMROD_NOT_POSTED,
UFP_RAMROD_PF_LINK_UPDATE_POSTED,
UFP_RAMROD_PF_UPDATE_POSTED,
UFP_RAMROD_COMPLETED
}ufp_ramrod_state_t;
typedef struct _lm_slowpath_info_t {
lm_slowpath_data_t slowpath_data;
#define LM_SLOWPATH(pdev, var) (pdev->slowpath_info.slowpath_data.var)
#define LM_SLOWPATH_PHYS(pdev, var) (pdev->slowpath_info.slowpath_data.var##_phys)
struct ecore_credit_pool_obj vlans_pool;
struct ecore_credit_pool_obj macs_pool;
struct ecore_rx_mode_obj rx_mode_obj;
struct ecore_mcast_obj mcast_obj[LM_CLI_IDX_MAX];
volatile void * set_mcast_cookie[LM_CLI_IDX_MAX];
volatile u32_t sp_mcast_state[LM_CLI_IDX_MAX];
struct ecore_rss_config_obj rss_conf_obj;
volatile void * set_rss_cookie;
volatile u32_t sp_rss_state;
u32_t rss_hash_key[RSS_HASH_KEY_SIZE/4];
u32_t last_set_rss_flags;
u32_t last_set_rss_result_mask;
u8 last_set_indirection_table[T_ETH_INDIRECTION_TABLE_SIZE];
#define FUNC_UPDATE_RAMROD_NO_SOURCE 0
#define FUNC_UPDATE_RAMROD_SOURCE_NIV 1
#define FUNC_UPDATE_RAMROD_SOURCE_L2MP 2
#define FUNC_UPDATE_RAMROD_SOURCE_ENCAP 3
#define FUNC_UPDATE_RAMROD_SOURCE_UFP 4
volatile u32_t niv_ramrod_state;
volatile u32_t l2mp_func_update_ramrod_state;
#define L2MP_FUNC_UPDATE_RAMROD_NOT_POSTED 0
#define L2MP_FUNC_UPDATE_RAMROD_POSTED 1
#define L2MP_FUNC_UPDATE_RAMROD_COMPLETED 2
volatile u8_t last_vif_list_bitmap;
volatile u32_t ufp_func_ramrod_state;
} lm_slowpath_info_t;
#define MAX_ER_DEBUG_ENTRIES 10
typedef struct _lm_er_debug_info_t
{
u32_t attn_sig[MAX_ATTN_REGS];
} lm_er_debug_info_t;
typedef enum _encap_ofld_state_t
{
ENCAP_OFFLOAD_DISABLED,
ENCAP_OFFLOAD_ENABLED
} encap_ofld_state_t;
typedef struct _lm_encap_info_t
{
u8_t new_encap_offload_state;
u8_t current_encap_offload_state;
volatile void * update_cookie;
}lm_encap_info_t;
typedef struct _lm_debug_info_t
{
u32_t ack_dis[MAX_HW_CHAINS];
u32_t ack_en[MAX_HW_CHAINS];
u32_t ack_def_dis;
u32_t ack_def_en;
u32_t rx_only_int[MAX_HW_CHAINS];
u32_t tx_only_int[MAX_HW_CHAINS];
u32_t both_int[MAX_HW_CHAINS];
u32_t empty_int[MAX_HW_CHAINS];
u32_t false_int[MAX_HW_CHAINS];
u32_t not_porocessed_int[MAX_HW_CHAINS];
lm_er_debug_info_t er_debug_info[MAX_ER_DEBUG_ENTRIES];
u8_t curr_er_debug_idx;
u8_t er_bit_is_set_already;
u8_t er_bit_from_previous_sessions;
u8_t _pad;
u32_t number_of_long_LSO_headers;
u32_t pending_tx_packets_on_fwd;
u32_t pf0_mps_overwrite;
u32_t toe_rx_comp_upon_er;
u32_t toe_tx_comp_upon_er;
u32_t toe_prealloc_alloc_fail;
} lm_debug_info_t;
typedef struct _lm_cli_drv_ver_to_shmem_t
{
struct os_drv_ver cli_drv_ver;
}lm_cli_drv_ver_to_shmem_t;
typedef struct _lm_device_t
{
d_list_entry_t link;
u32_t ver_num;
u8_t ver_str[16];
u32_t ver_num_fw;
u8_t product_version[4];
lm_variables_t vars;
lm_tx_info_t tx_info;
lm_rx_info_t rx_info;
lm_sq_info_t sq_info;
lm_eq_info_t eq_info;
lm_client_info_t client_info[ETH_MAX_RX_CLIENTS_E2];
lm_offload_info_t ofld_info;
lm_toe_info_t toe_info;
lm_dcbx_info_t dcbx_info;
lm_hardware_info_t hw_info;
lm_slowpath_info_t slowpath_info;
lm_dmae_info_t dmae_info;
lm_params_t params;
lm_context_info_t* context_info;
lm_nwuf_list_t nwuf_list;
i2c_binary_info_t i2c_binary_info;
u32_t chip_reset_cnt;
u32_t fw_timed_out_cnt;
lm_cid_recycled_cb_t cid_recycled_callbacks[MAX_PROTO];
lm_iscsi_info_t iscsi_info;
lm_fcoe_info_t fcoe_info;
ecore_info_t ecore_info;
struct _lm_device_t* pf_dev;
#ifdef VF_INVOLVED
pf_resources_set_t pf_resources;
u8_t vf_idx;
u8_t _vf_pad[2];
lm_vfs_set_t vfs_set;
void * pf_vf_acquiring_resp;
#endif
flr_stats_t flr_stats;
lm_encap_info_t encap_info;
lm_debug_info_t debug_info;
lm_cli_drv_ver_to_shmem_t lm_cli_drv_ver_to_shmem;
u8_t panic;
} lm_device_t;
#define DRV_PULSE_PERIOD_MS_FACTOR(_p) CHIP_REV_IS_ASIC(_p) ? DRV_PULSE_PERIOD_MS : (DRV_PULSE_PERIOD_MS*10)
#define BRB_SIZE(_pdev) (CHIP_IS_E3(_pdev) ? 1024 : 512)
#define MAX_AGG_QS(_pdev) (CHIP_IS_E1(_pdev) ? \
ETH_MAX_AGGREGATION_QUEUES_E1 :\
ETH_MAX_AGGREGATION_QUEUES_E1H_E2)
#define FW_DROP_LEVEL(_pdev) (ETH_MIN_RX_CQES_WITHOUT_TPA + MAX_AGG_QS(_pdev))
#define FW_PREFETCH_CNT 16
#define DROPLESS_FC_HEADROOM 150
void print_sb_info(lm_device_t *pdev);
lm_status_t lm_pretend_func( struct _lm_device_t *pdev, u16_t pretend_func_num );
volatile struct host_status_block * lm_get_status_block(lm_device_t *pdev, u8_t rss_id);
volatile struct hc_sp_status_block * lm_get_default_status_block(lm_device_t *pdev);
volatile struct atten_sp_status_block * lm_get_attention_status_block(lm_device_t *pdev);
lm_status_t lm_reset_mcp_prep(lm_device_t *pde, u32_t * magic_val);
lm_status_t lm_reset_mcp_comp(lm_device_t *pdev, u32_t magic_val);
void init_status_blocks(struct _lm_device_t *pdev);
void lm_setup_ndsb_index(struct _lm_device_t *pdev, u8_t sb_id, u8_t idx, u8_t sm_idx, u8_t timeout, u8_t dhc_enable);
void lm_reset_sb_ack_values(struct _lm_device_t *pdev);
void lm_set_int_coal_info(struct _lm_device_t *pdev);
void lm_int_igu_sb_cleanup(lm_device_t *pdev, u8 igu_sb_id);
u8_t
lm_eth_tx_hc_cq_cons_cosx_from_chain(IN lm_device_t *pdev,
IN const u32_t chain);
void lm_reset_sb_ack_values(struct _lm_device_t *pdev);
void lm_int_ack_sb_enable(lm_device_t *pdev, u8_t rss_id);
void lm_int_ack_sb_disable(lm_device_t *pdev, u8_t rss_id);
void lm_int_ack_def_sb_enable(lm_device_t *pdev);
void lm_int_ack_def_sb_disable(lm_device_t *pdev);
#define USTORM_INTR_FLAG 1
#define CSTORM_INTR_FLAG 2
#define SERV_RX_INTR_FLAG 4
#define SERV_TX_INTR_FLAG 8
#ifndef USER_LINUX
static __inline u16_t lm_get_sb_number_indexes(lm_device_t *pdev)
{
if (CHIP_IS_E1x(pdev))
{
return HC_SB_MAX_INDICES_E1X;
}
else
{
return HC_SB_MAX_INDICES_E2;
}
}
static __inline u16_t lm_get_sb_running_index(lm_device_t *pdev, u8_t sb_id, u8_t sm_idx)
{
#ifdef VF_INVOLVED
if (IS_CHANNEL_VFDEV(pdev)) {
return lm_vf_pf_get_sb_running_index(pdev, sb_id, sm_idx);
}
#endif
if (CHIP_IS_E1x(pdev))
{
return mm_le16_to_cpu(pdev->vars.status_blocks_arr[sb_id].host_hc_status_block.e1x_sb->sb.running_index[sm_idx]);
}
else
{
return mm_le16_to_cpu(pdev->vars.status_blocks_arr[sb_id].host_hc_status_block.e2_sb->sb.running_index[sm_idx]);
}
}
static __inline u16_t lm_get_sb_index(lm_device_t *pdev, u8_t sb_id, u8_t idx)
{
#ifdef VF_INVOLVED
if (IS_CHANNEL_VFDEV(pdev)) {
return lm_vf_pf_get_sb_index(pdev, sb_id, idx);
}
#endif
if (CHIP_IS_E1x(pdev))
{
return mm_le16_to_cpu(pdev->vars.status_blocks_arr[sb_id].host_hc_status_block.e1x_sb->sb.index_values[idx]);
}
else
{
return mm_le16_to_cpu(pdev->vars.status_blocks_arr[sb_id].host_hc_status_block.e2_sb->sb.index_values[idx]);
}
}
static __inline u16_t volatile * lm_get_sb_running_indexes(lm_device_t *pdev, u8_t sb_idx)
{
u16_t volatile * running_indexes_ptr;
if (CHIP_IS_E1x(pdev))
{
running_indexes_ptr = &pdev->vars.status_blocks_arr[sb_idx].host_hc_status_block.e1x_sb->sb.running_index[0];
}
else
{
running_indexes_ptr = &pdev->vars.status_blocks_arr[sb_idx].host_hc_status_block.e2_sb->sb.running_index[0];
}
return running_indexes_ptr;
}
static __inline u16_t volatile * lm_get_sb_indexes(lm_device_t *pdev, u8_t sb_idx)
{
u16_t volatile * indexes_ptr;
#ifdef VF_INVOLVED
if (IS_CHANNEL_VFDEV(pdev)) {
return pdev->vars.status_blocks_arr[sb_idx].host_hc_status_block.vf_sb;
}
#endif
if (CHIP_IS_E1x(pdev))
{
indexes_ptr = &pdev->vars.status_blocks_arr[sb_idx].host_hc_status_block.e1x_sb->sb.index_values[0];
}
else
{
indexes_ptr = &pdev->vars.status_blocks_arr[sb_idx].host_hc_status_block.e2_sb->sb.index_values[0];
}
return indexes_ptr;
}
static __inline u8_t lm_map_igu_sb_id_to_drv_rss(lm_device_t *pdev, u8_t igu_sb_id)
{
u8_t drv_sb_id = igu_sb_id;
if (INTR_BLK_TYPE(pdev) == INTR_BLK_IGU)
{
if (drv_sb_id >= IGU_U_NDSB_OFFSET(pdev))
{
drv_sb_id -= IGU_U_NDSB_OFFSET(pdev);
}
}
return drv_sb_id;
}
static __inline u8_t lm_query_storm_intr(lm_device_t *pdev, u8_t igu_sb_id, u8_t * drv_sb_id)
{
u8_t flags = 0;
*drv_sb_id = igu_sb_id;
switch(pdev->params.ndsb_type)
{
case LM_SINGLE_SM:
SET_FLAGS(flags, USTORM_INTR_FLAG);
break;
case LM_DOUBLE_SM_SINGLE_IGU:
SET_FLAGS(flags, USTORM_INTR_FLAG);
break;
default:
{
if (igu_sb_id >= IGU_U_NDSB_OFFSET(pdev))
{
*drv_sb_id -= IGU_U_NDSB_OFFSET(pdev);
SET_FLAGS(flags, USTORM_INTR_FLAG);
}
else
{
SET_FLAGS(flags, CSTORM_INTR_FLAG);
}
}
break;
}
return flags;
}
static __inline u8_t lm_is_sb_updated(lm_device_t *pdev, u8_t igu_sb_id)
{
u8_t result = FALSE;
u16_t hw_sb_idx = 0;
u8_t flags = 0;
u8_t drv_sb_id = 0;
DbgBreakIfFastPath(!pdev);
if (!pdev)
{
return FALSE;
}
flags = lm_query_storm_intr(pdev, igu_sb_id, &drv_sb_id);
if (GET_FLAGS(flags, USTORM_INTR_FLAG))
{
hw_sb_idx = lm_get_sb_running_index(pdev, drv_sb_id, SM_RX_ID);
if (hw_sb_idx != pdev->vars.u_hc_ack[drv_sb_id])
{
DbgMessage(pdev, INFORMi, "lm_is_sb_updated():u_sb.status_block_index:%d u_hc_ack:%d\n",
hw_sb_idx, pdev->vars.u_hc_ack[drv_sb_id]);
result = TRUE;
}
}
if (GET_FLAGS(flags, CSTORM_INTR_FLAG))
{
hw_sb_idx = lm_get_sb_running_index(pdev, drv_sb_id, SM_TX_ID);
if (hw_sb_idx != pdev->vars.c_hc_ack[drv_sb_id])
{
DbgMessage(pdev, INFORMi, "lm_is_sb_updated():c_sb.status_block_index:%d c_hc_ack:%d\n",
hw_sb_idx, pdev->vars.u_hc_ack[drv_sb_id]);
result = TRUE;
}
}
DbgMessage(pdev, INFORMi, "lm_is_sb_updated(): result:%s\n", result? "TRUE" : "FALSE");
return result;
}
#endif
u8_t lm_is_def_sb_updated(lm_device_t *pdev);
u8_t lm_is_rx_completion(lm_device_t *pdev, u8_t chain_idx);
u8_t lm_is_tx_completion(lm_device_t *pdev, u8_t chain_idx);
u8_t lm_handle_igu_sb_id(lm_device_t *pdev, u8_t igu_sb_id, OUT u8_t *rx_rss_id, OUT u8_t *tx_rss_id);
lm_status_t lm_update_eth_client(IN struct _lm_device_t *pdev,
IN const u8_t cid,
IN const u16_t silent_vlan_value,
IN const u16_t silent_vlan_mask,
IN const u8_t silent_vlan_removal_flg,
IN const u8_t silent_vlan_change_flg
);
lm_status_t lm_establish_eth_con(struct _lm_device_t *pdev, u8_t const cid, u8_t sb_id, u8_t attributes_bitmap);
lm_status_t lm_establish_forward_con(struct _lm_device_t *pdev);
lm_status_t lm_close_forward_con(struct _lm_device_t *pdev);
lm_status_t lm_close_eth_con(struct _lm_device_t *pdev, u32_t const cid,
const u8_t send_halt_ramrod);
lm_status_t lm_terminate_eth_con(struct _lm_device_t *pdev, u32_t const cid);
lm_status_t lm_chip_stop(struct _lm_device_t *pdev);
int lm_set_init_arrs(lm_device_t *pdev);
lm_status_t
lm_empty_ramrod_eth(IN struct _lm_device_t *pdev,
IN const u32_t cid,
IN u32_t data_cid,
IN volatile u32_t *curr_state,
IN u32_t new_state);
lm_status_t
lm_setup_client_con_params( IN struct _lm_device_t *pdev,
IN u8_t const chain_idx,
IN struct _lm_client_con_params_t *cli_params );
lm_status_t
lm_eq_ramrod_post_sync( IN struct _lm_device_t *pdev,
IN u8_t cmd_id,
IN u64_t data,
IN u8_t ramrod_priority,
IN volatile u32_t *p_curr_state,
IN u32_t curr_state,
IN u32_t new_state);
lm_status_t
lm_init_chain_con( IN struct _lm_device_t *pdev,
IN u8_t const chain_idx,
IN u8_t const b_alloc );
lm_status_t
lm_clear_eth_con_resc(
IN struct _lm_device_t *pdev,
IN u8_t const cid
);
lm_status_t
lm_clear_chain_sb_cons_idx(
IN struct _lm_device_t *pdev,
IN u8_t sb_idx,
IN struct _lm_hc_sb_info_t *hc_sb_info,
IN volatile u16_t ** hw_con_idx_ptr
);
u8_t lm_is_eq_completion(lm_device_t *pdev);
void lm_handle_assertion_processing(lm_device_t *pdev, u16_t assertion_proc_flgs);
void lm_handle_deassertion_processing(lm_device_t *pdev, u16_t deassertion_proc_flgs);
void lm_get_attn_info(lm_device_t *pdev, u16_t *attn_bits, u16_t *attn_ack);
void sync_link_status(lm_device_t *pdev);
u16_t
lm_get_max_bw(IN const lm_device_t *pdev,
IN const u32_t link_speed,
IN const u8_t vnic);
void lm_reload_link_and_cmng(lm_device_t *pdev);
u32_t count_bits(u32_t n);
u32_t LOG2(u32_t v);
lm_status_t lm_wait_state_change(struct _lm_device_t *pdev, volatile u32_t * curr_state, u32_t new_state);
void lm_update_fp_hc_indices(lm_device_t *pdev, u8_t igu_sb_id, u32_t *activity_flg, u8_t *drv_rss_id);
void lm_update_def_hc_indices(lm_device_t *pdev, u8_t sb_id, u32_t *activity_flg);
void lm_57710A0_dbg_intr( struct _lm_device_t * pdev );
lm_status_t
lm_mwrite(
lm_device_t *pdev,
u32_t reg,
u32_t val);
lm_status_t
lm_mread(
lm_device_t *pdev,
u32_t reg,
u32_t *ret_val);
lm_status_t
lm_m45write(
lm_device_t *pdev,
u32_t reg,
u32_t addr,
u32_t val);
lm_status_t
lm_m45read(
lm_device_t *pdev,
u32_t reg,
u32_t addr,
u32_t *ret_val);
lm_status_t
lm_phy45_read(
lm_device_t *pdev,
u8_t phy_addr,
u8_t dev_addr,
u16_t reg,
u16_t *ret_val);
lm_status_t
lm_phy45_write(
lm_device_t *pdev,
u8_t phy_addr,
u8_t dev_addr,
u16_t reg,
u16_t val);
lm_status_t
lm_set_phy_addr(
lm_device_t *pdev,
u8_t addr);
void
lm_reset_link(lm_device_t *pdev);
u32_t
lm_nvram_query(
lm_device_t *pdev,
u8_t reset_flash_block,
u8_t no_hw_mod);
void
lm_nvram_init(
lm_device_t *pdev,
u8_t reset_flash_block);
lm_status_t
lm_nvram_read(
lm_device_t *pdev,
u32_t offset,
u32_t *ret_buf,
u32_t buf_size);
lm_status_t
lm_nvram_write(
lm_device_t *pdev,
u32_t offset,
u32_t *data_buf,
u32_t buf_size);
void
lm_reg_rd_ind(
lm_device_t *pdev,
u32_t offset,
u32_t *ret);
void
lm_reg_wr_ind(
lm_device_t *pdev,
u32_t offset,
u32_t val);
void
lm_reg_rd_ind_imp(
lm_device_t *pdev,
u32_t offset,
u32_t *ret);
void
lm_reg_wr_ind_imp(
lm_device_t *pdev,
u32_t offset,
u32_t val);
lm_status_t
lm_init_mac_link(
lm_device_t *pdev);
u8_t
fw_reset_sync(
lm_device_t *pdev,
lm_reason_t reason,
u32_t msg_data,
u32_t fw_ack_timeout_us);
lm_status_t
lm_mcp_submit_cmd(
lm_device_t *pdev,
u32_t drv_msg);
lm_status_t
lm_mcp_get_resp(
lm_device_t *pdev);
lm_coalesce_buffer_t *
lm_get_coalesce_buffer(
IN lm_device_t *pdev,
IN lm_tx_chain_t *txq,
IN u32_t buf_size);
void
lm_put_coalesce_buffer(
IN lm_device_t *pdev,
IN lm_tx_chain_t *txq,
IN lm_coalesce_buffer_t *coalesce_buf);
void lm_reset_device_if_undi_active(
IN struct _lm_device_t *pdev);
void
lm_cmng_init(
struct _lm_device_t *pdev,
u32_t port_rate);
lm_status_t lm_get_pcicfg_mps_mrrs(lm_device_t * pdev);
void lm_set_pcie_nfe_report( lm_device_t *pdev);
void lm_clear_non_def_status_block(struct _lm_device_t *pdev,
u8_t sb_id);
void lm_init_non_def_status_block(struct _lm_device_t *pdev,
u8_t sb_id,
u8_t port);
void lm_eth_init_command_comp(struct _lm_device_t *pdev, struct common_ramrod_eth_rx_cqe *cqe);
u8_t lm_is_nig_reset_called(struct _lm_device_t *pdev);
void lm_clear_nig_reset_called(struct _lm_device_t *pdev);
void lm_setup_fan_failure_detection(struct _lm_device_t *pdev);
void enable_blocks_attention(struct _lm_device_t *pdev);
u32_t lm_inc_cnt_grc_timeout_ignore(struct _lm_device_t *pdev, u32_t val);
lm_status_t acquire_split_alr(lm_device_t *pdev);
void release_split_alr(lm_device_t *pdev);
#ifdef __BIG_ENDIAN
#define CHANGE_ENDIANITY TRUE
#else
#define CHANGE_ENDIANITY FALSE
#endif
#define REG_WR_DMAE_LEN_IMP(_pdev,_reg_offset, _addr_src, _b_src_is_zeroed, _len, le32_swap) lm_dmae_reg_wr(_pdev, \
lm_dmae_get(_pdev, LM_DMAE_DEFAULT)->context, \
(void*)_addr_src, \
_reg_offset,\
(u16_t)_len,\
_b_src_is_zeroed,\
le32_swap)
#define REG_RD_DMAE_LEN_IMP(_pdev,_reg_offset, _addr_dst, _len) lm_dmae_reg_rd( _pdev, \
lm_dmae_get(_pdev, LM_DMAE_DEFAULT)->context, \
_reg_offset, \
_addr_dst,\
_len,\
FALSE)
#define VIRT_WR_DMAE_LEN(_pdev, _src_addr, _dst_addr, _len, le32_swap) REG_WR_DMAE_LEN_IMP(_pdev, _dst_addr, _src_addr, FALSE, _len, le32_swap)
#define PHYS_WR_DMAE_LEN(_pdev, _src_addr, _dst_addr, _len) lm_dmae_reg_wr_phys( _pdev, \
lm_dmae_get(_pdev, LM_DMAE_DEFAULT)->context, \
_src_addr, \
_dst_addr,\
(u16_t)_len)
#define PHYS_COPY_DMAE_LEN(_pdev, _src_addr, _dst_addr, _len) lm_dmae_copy_phys_buffer_unsafe( _pdev,\
lm_dmae_get(_pdev, LM_DMAE_TOE)->context,\
_src_addr,\
_dst_addr,\
(u16_t)_len)
#define REG_WR_DMAE_LEN(_pdev,_reg_offset, _addr_src, _len) REG_WR_DMAE_LEN_IMP(_pdev, _reg_offset, _addr_src, FALSE, _len, FALSE)
#define REG_RD_DMAE_LEN(_pdev,_reg_offset, _addr_dst, _len) REG_RD_DMAE_LEN_IMP(_pdev,_reg_offset, _addr_dst, _len)
#define REG_WR_DMAE_LEN_ZERO(_pdev,_reg_offset, _len) REG_WR_DMAE_LEN_IMP(_pdev,_reg_offset, pdev->vars.zero_buffer, TRUE, _len, FALSE)
#define REG_WR_DMAE(_pdev,_reg_offset, _addr_src ) REG_WR_DMAE_LEN(_pdev,_reg_offset, _addr_src, 2)
#define REG_RD_DMAE(_pdev,_reg_offset, _addr_dst ) REG_RD_DMAE_LEN(_pdev,_reg_offset, _addr_dst, 2)
#define REG_RD_IND(_pdev, _reg_offset, _ret) lm_reg_rd_ind(_pdev, (_reg_offset), _ret)
#define REG_WR_IND(_pdev, _reg_offset, _val) lm_reg_wr_ind(_pdev, (_reg_offset), _val)
#ifndef __LINUX
#define LM_BAR_WR32_ADDRESS(_pdev, _address, _val) \
*((u32_t volatile *) (_address))=(_val); \
mm_write_barrier()
#else
#define LM_BAR_WR32_ADDRESS(_pdev, _address, _val) \
mm_io_write_dword(_pdev, _address, _val)
#endif
#if !(defined(UEFI) || defined(__SunOS) || defined(__LINUX)) || defined(__SunOS_MDB)
#ifdef _VBD_CMD_
void vbd_cmd_on_bar_access(lm_device_t* pdev, u8_t bar, u32_t offset);
#define VBD_CMD_VERIFY_BAR_ACCESS(_pdev, _bar, _offset) vbd_cmd_on_bar_access(_pdev, _bar, _offset);
#else
#define VBD_CMD_VERIFY_BAR_ACCESS(_pdev, _bar, _offset)
#endif
#define LM_BAR_RD8_OFFSET(_pdev, _bar, _offset, _ret) \
do { \
mm_read_barrier(); \
VBD_CMD_VERIFY_BAR_ACCESS(_pdev, _bar, _offset)\
*(_ret) = *((u8_t volatile *) ((u8_t *) (_pdev)->vars.mapped_bar_addr[(_bar)] + (_offset))); \
} while (0)
#define LM_BAR_RD16_OFFSET(_pdev, _bar, _offset, _ret) \
do { \
mm_read_barrier(); \
VBD_CMD_VERIFY_BAR_ACCESS(_pdev, _bar, _offset)\
*(_ret) = *((u16_t volatile *) ((u8_t *) (_pdev)->vars.mapped_bar_addr[(_bar)]+(_offset))); \
} while (0)
#define LM_BAR_RD32_OFFSET(_pdev, _bar, _offset, _ret) \
do { \
mm_read_barrier(); \
VBD_CMD_VERIFY_BAR_ACCESS(_pdev, _bar, _offset)\
*(_ret) = *((u32_t volatile *) ((u8_t *) (_pdev)->vars.mapped_bar_addr[(_bar)]+(_offset))); \
} while (0)
#define LM_BAR_RD64_OFFSET(_pdev, _bar, _offset, _ret) \
do { \
mm_read_barrier(); \
VBD_CMD_VERIFY_BAR_ACCESS(_pdev, _bar, _offset)\
*(_ret) = *((u64_t volatile *) ((u8_t *) (_pdev)->vars.mapped_bar_addr[(_bar)]+(_offset))); \
} while (0)
#define LM_BAR_WR8_OFFSET(_pdev, _bar, _offset, _val) \
do { \
VBD_CMD_VERIFY_BAR_ACCESS(_pdev, _bar, _offset)\
*((u8_t volatile *) ((u8_t *) (_pdev)->vars.mapped_bar_addr[(_bar)]+(_offset)))=(_val); \
mm_write_barrier(); \
} while (0)
#define LM_BAR_WR16_OFFSET(_pdev, _bar, _offset, _val) \
do { \
VBD_CMD_VERIFY_BAR_ACCESS(_pdev, _bar, _offset)\
*((u16_t volatile *) ((u8_t *) (_pdev)->vars.mapped_bar_addr[(_bar)]+(_offset)))=(_val); \
mm_write_barrier(); \
} while (0)
#define LM_BAR_WR32_OFFSET(_pdev, _bar, _offset, _val) \
do { \
VBD_CMD_VERIFY_BAR_ACCESS(_pdev, _bar, _offset)\
*((u32_t volatile *) ((u8_t *) (_pdev)->vars.mapped_bar_addr[(_bar)]+(_offset)))=(_val); \
mm_write_barrier(); \
} while (0)
#define LM_BAR_WR64_OFFSET(_pdev, _bar, _offset, _val) \
do { \
VBD_CMD_VERIFY_BAR_ACCESS(_pdev, _bar, _offset)\
*((u64_t volatile *) ((u8_t *) (_pdev)->vars.mapped_bar_addr[(_bar)]+(_offset)))=(_val); \
mm_write_barrier(); \
} while (0)
#define LM_BAR_COPY_BUFFER(_pdev, _bar, _offset, _size_, _buf_ptr) \
do { \
u32_t i; \
for (i=0; i<size; i++) { \
VBD_CMD_VERIFY_BAR_ACCESS(_pdev, _bar, (_offset+i*4) )\
*((u32_t volatile *) ((u8_t *) (_pdev)->vars.mapped_bar_addr[(_bar)]+(_offset)+i*4))=*(buf_ptr+i); \
} \
} while (0)
#else
#define LM_BAR_RD8_OFFSET(_pdev, _bar, _offset, _ret) \
mm_bar_read_byte(_pdev, _bar, _offset, _ret)
#define LM_BAR_RD16_OFFSET(_pdev, _bar, _offset, _ret) \
mm_bar_read_word(_pdev, _bar, _offset, _ret)
#define LM_BAR_RD32_OFFSET(_pdev, _bar, _offset, _ret) \
mm_bar_read_dword(_pdev, _bar, _offset, _ret)
#define LM_BAR_RD64_OFFSET(_pdev, _bar, _offset, _ret) \
mm_bar_read_ddword(_pdev, _bar, _offset, _ret)
#define LM_BAR_WR8_OFFSET(_pdev, _bar, _offset, _val) \
mm_bar_write_byte(_pdev, _bar, _offset, _val)
#define LM_BAR_WR16_OFFSET(_pdev, _bar, _offset, _val) \
mm_bar_write_word(_pdev, _bar, _offset, _val)
#define LM_BAR_WR32_OFFSET(_pdev, _bar, _offset, _val) \
mm_bar_write_dword(_pdev, _bar, _offset, _val)
#define LM_BAR_WR64_OFFSET(_pdev, _bar, _offset, _val) \
mm_bar_write_ddword(_pdev, _bar, _offset, _val)
#define LM_BAR_COPY_BUFFER(_pdev, _bar, _offset, _size, _buf_ptr) \
mm_bar_copy_buffer(_pdev, _bar, _offset, _size, _buf_ptr)
#endif
#ifndef USER_LINUX
#if DBG && LOG_REG_ACCESS
#define LOG_REG_RD(_pdev, _offset, _val) \
if((_pdev)->params.test_mode & TEST_MODE_LOG_REG_ACCESS) \
{ \
DbgMessage(_pdev, INFORM, "rd 0x%04x = 0x%08x\n", _offset, _val); \
}
#define LOG_REG_WR(_pdev, _offset, _val) \
if((_pdev)->params.test_mode & TEST_MODE_LOG_REG_ACCESS) \
{ \
DbgMessage(_pdev, INFORM, "wr 0x%04x 0x%08x\n", _offset, _val); \
}
#else
#define LOG_REG_RD(_pdev, _offset, _val)
#define LOG_REG_WR(_pdev, _offset, _val)
#endif
#endif
#if defined(__SunOS)
#ifdef __SunOS_MDB
static __inline u32_t _reg_rd(struct _lm_device_t * pdev, u32_t reg_offset)
{
u32_t val;
LM_BAR_RD32_OFFSET(pdev, BAR_0, reg_offset, &val);
return val;
}
#define REG_RD(_pdev, _reg_offset) _reg_rd(_pdev, _reg_offset)
#define VF_REG_RD(_pdev, _reg_offset) _reg_rd(_pdev, _reg_offset)
#define REG_WR(_pdev, _reg_offset, _val) \
do { \
LOG_REG_WR(_pdev, (u32_t)(_reg_offset), _val); \
LM_BAR_WR32_OFFSET(_pdev, BAR_0, _reg_offset, _val); \
} while (0)
#define VF_REG_WR(_pdev, _reg_offset, _val) REG_WR(_pdev, _reg_offset, _val)
#else
#define REG_RD(_pdev, _reg_offset) \
ddi_get32((_pdev)->vars.reg_handle[BAR_0], \
(uint32_t *)((caddr_t)(_pdev)->vars.mapped_bar_addr[BAR_0] + \
(_reg_offset)))
#define REG_WR(_pdev, _reg_offset, _val) \
ddi_put32((_pdev)->vars.reg_handle[BAR_0], \
(uint32_t *)((caddr_t)(_pdev)->vars.mapped_bar_addr[BAR_0] + \
(_reg_offset)), \
(_val)) \
#define VF_REG_RD(_pdev, _reg_offset) \
ddi_get32((_pdev)->vars.reg_handle[BAR_0], \
(uint32_t *)((caddr_t)(_pdev)->vars.mapped_bar_addr[BAR_0] + \
(_reg_offset)))
#define VF_REG_WR(_pdev, _reg_offset, _val) \
ddi_put32((_pdev)->vars.reg_handle[BAR_0], \
(uint32_t *)((caddr_t)(_pdev)->vars.mapped_bar_addr[BAR_0] + \
(_reg_offset)), \
(_val))
#endif
#elif defined (_VBD_CMD_)
void vbd_cmd_on_reg_write(lm_device_t* pdev, u32_t offset);
static __inline u32_t _reg_rd(struct _lm_device_t * pdev, u32_t reg_offset)
{
u32_t val;
DbgBreakIf(IS_VFDEV(pdev));
LM_BAR_RD32_OFFSET(pdev, BAR_0, reg_offset, &val);
LOG_REG_RD(pdev, (reg_offset), val);
return val;
}
#define REG_RD(_pdev, _reg_offset) _reg_rd(_pdev, _reg_offset)
static __inline u32_t _vf_reg_rd(struct _lm_device_t * pdev, u32_t reg_offset)
{
u32_t val;
LM_BAR_RD32_OFFSET(pdev, BAR_0, reg_offset, &val);
LOG_REG_RD(pdev, (reg_offset), val);
return val;
}
#define VF_REG_RD(_pdev, _reg_offset) _reg_rd(_pdev, _reg_offset)
#define REG_WR(_pdev, _reg_offset, _val) \
do { \
DbgBreakIf(IS_VFDEV(_pdev)); \
LOG_REG_WR(_pdev, (u32_t)(_reg_offset), _val); \
LM_BAR_WR32_OFFSET(_pdev, BAR_0, _reg_offset, _val); \
vbd_cmd_on_reg_write(_pdev, _reg_offset);\
} while (0)
#define VF_REG_WR(_pdev, _reg_offset, _val) \
do { \
LOG_REG_WR(_pdev, (u32_t)(_reg_offset), _val); \
LM_BAR_WR32_OFFSET(_pdev, BAR_0, _reg_offset, _val); \
vbd_cmd_on_reg_write(_pdev, _reg_offset);\
} while (0)
#elif !defined(USER_LINUX)
static __inline u32_t _reg_rd(struct _lm_device_t * pdev, u32_t reg_offset)
{
u32_t val;
DbgBreakIf(IS_VFDEV(pdev));
LM_BAR_RD32_OFFSET(pdev, BAR_0, reg_offset, &val);
LOG_REG_RD(pdev, (reg_offset), val);
return val;
}
#define REG_RD(_pdev, _reg_offset) _reg_rd(_pdev, _reg_offset)
static __inline u32_t _vf_reg_rd(struct _lm_device_t * pdev, u32_t reg_offset)
{
u32_t val;
LM_BAR_RD32_OFFSET(pdev, BAR_0, reg_offset, &val);
LOG_REG_RD(pdev, (reg_offset), val);
return val;
}
#define VF_REG_RD(_pdev, _reg_offset) _reg_rd(_pdev, _reg_offset)
#define REG_WR(_pdev, _reg_offset, _val) \
do { \
DbgBreakIf(IS_VFDEV(_pdev)); \
LOG_REG_WR(_pdev, (u32_t)(_reg_offset), _val); \
LM_BAR_WR32_OFFSET(_pdev, BAR_0, _reg_offset, _val); \
} while (0)
#define VF_REG_WR(_pdev, _reg_offset, _val) \
do { \
LOG_REG_WR(_pdev, (u32_t)(_reg_offset), _val); \
LM_BAR_WR32_OFFSET(_pdev, BAR_0, _reg_offset, _val); \
} while (0)
#endif
#define LM_SHMEM_READ_IMP(_pdev,_offset,_ret,_shmem_base_name) \
LM_BAR_RD32_OFFSET((_pdev),BAR_0,(_pdev)->hw_info._shmem_base_name + _offset,(_ret));
#define LM_SHMEM_READ(_pdev,_offset,_ret) LM_SHMEM_READ_IMP(_pdev,_offset,_ret, shmem_base );
#define LM_SHMEM2_READ(_pdev,_offset,_ret) LM_SHMEM_READ_IMP(_pdev,_offset,_ret, shmem_base2 );
#define LM_MFCFG_READ(_pdev,_offset,_ret) LM_SHMEM_READ_IMP(_pdev,_offset,_ret, mf_cfg_base );
#define LM_SHMEM_WRITE_IMP(_pdev,_offset,_val,_shmem_base_name) \
LM_BAR_WR32_OFFSET((_pdev),BAR_0,(_pdev)->hw_info._shmem_base_name + _offset,(_val));
#define LM_SHMEM_WRITE(_pdev,_offset,_val) LM_SHMEM_WRITE_IMP(_pdev,_offset,_val,shmem_base);
#define LM_SHMEM2_WRITE(_pdev,_offset,_val) LM_SHMEM_WRITE_IMP(_pdev,_offset,_val,shmem_base2);
#define LM_MFCFG_WRITE(_pdev,_offset,_val) LM_SHMEM_WRITE_IMP(_pdev,_offset,_val,mf_cfg_base);
#define LM_SHMEM2_ADDR(_pdev, field) (_pdev->hw_info.shmem_base2 + OFFSETOF(struct shmem2_region, field))
#define LM_SHMEM2_HAS(_pdev, field) ((_pdev)->hw_info.shmem_base2 && \
(REG_RD(_pdev, LM_SHMEM2_ADDR(_pdev, size)) > OFFSETOF(struct shmem2_region, field)))
#define LM_INTMEM_READ8(_pdev,_offset,_ret,_type) \
DbgMessage(pdev, INFORMi, "LM_INTMEM_READ8() inside! storm:%s address:0x%x\n",#_type,_type); \
LM_BAR_RD8_OFFSET((_pdev),BAR_0,((_type)+(_offset)),(_ret));
#define LM_INTMEM_WRITE8(_pdev,_offset,_val,_type) \
DbgMessage(pdev, INFORMi, "LM_INTMEM_WRITE8() inside! storm:%s address:0x%x\n",#_type,_type); \
LM_BAR_WR8_OFFSET((_pdev),BAR_0,((_type)+(_offset)),(_val));
#define LM_INTMEM_READ16(_pdev,_offset,_ret,_type) \
DbgMessage(pdev, INFORMi, "LM_INTMEM_READ16() inside! storm:%s address:0x%x\n",#_type,_type); \
LM_BAR_RD16_OFFSET((_pdev),BAR_0,((_type)+(_offset)),(_ret));
#define LM_INTMEM_WRITE16(_pdev,_offset,_val,_type) \
DbgMessage(pdev, INFORMi, "LM_INTMEM_WRITE16() inside! storm:%s address:0x%x offset=%x val=%x\n",#_type,_type, _offset, _val); \
LM_BAR_WR16_OFFSET((_pdev),BAR_0,((_type)+(_offset)),(_val));
#define LM_INTMEM_READ32(_pdev,_offset,_ret,_type) \
DbgMessage(pdev, INFORMi, "LM_INTMEM_READ32() inside! storm:%s address:0x%x\n",#_type,_type); \
LM_BAR_RD32_OFFSET((_pdev),BAR_0,((_type)+(_offset)),(_ret));
#define LM_INTMEM_WRITE32(_pdev,_offset,_val,_type) \
DbgMessage(pdev, INFORMi, "LM_INTMEM_WRITE32() inside! storm:%s address:0x%x\n",#_type,_type); \
LM_BAR_WR32_OFFSET((_pdev),BAR_0,((_type)+(_offset)),(_val));
#define LM_INTMEM_READ64(_pdev,_offset,_ret,_type) \
DbgMessage(pdev, INFORMi, "LM_INTMEM_READ64() inside! storm:%s address:0x%x\n",#_type,_type); \
LM_BAR_RD64_OFFSET((_pdev),BAR_0,((_type)+(_offset)),(_ret));
#define LM_INTMEM_WRITE64(_pdev,_offset,_val,_type) \
DbgMessage(pdev, INFORMi, "LM_INTMEM_WRITE64() inside! storm:%s address:0x%x\n",#_type,_type); \
LM_BAR_WR64_OFFSET((_pdev),BAR_0,((_type)+(_offset)),(_val));
#define DEFAULT_WAIT_INTERVAL_MICSEC 30
u32_t reg_wait_verify_val(struct _lm_device_t * pdev, u32_t reg_offset, u32_t excpected_val, u32_t total_wait_time_ms );
#if !defined(_VBD_CMD_)
#define REG_WAIT_VERIFY_VAL(_pdev, _reg_offset, _excpected_val, _total_wait_time_ms ) \
reg_wait_verify_val(_pdev, _reg_offset, _excpected_val, _total_wait_time_ms );
#else
#define REG_WAIT_VERIFY_VAL(_pdev, _reg_offset, _excpected_val, _total_wait_time_ms ) 0
#endif
#define DPM_TRIGER_TYPE 0x40
#if defined(EMULATION_DOORBELL_FULL_WORKAROUND)
#define _DOORBELL(PDEV,CID,VAL) do{\
MM_WRITE_DOORBELL(PDEV,BAR_1,CID,VAL);\
} while(0)
static __inline void DOORBELL(lm_device_t *pdev, u32_t cid, u32_t val)
{
u32_t db_fill;
u32_t wait_cnt = 0;
if (CHIP_REV_IS_EMUL(pdev) || CHIP_REV_IS_FPGA(pdev)) {
lm_device_t *pf_dev = pdev->pf_dev;
if (!pf_dev) {
pf_dev = pdev;
}
while(pdev->vars.doorbells_blocked) {
wait_cnt++;
}
if(mm_atomic_dec(&pdev->vars.doorbells_cnt) == 0) {
mm_atomic_set(&pdev->vars.doorbells_cnt, DOORBELL_CHECK_FREQUENCY);
db_fill=REG_RD(pf_dev,DORQ_REG_DQ_FILL_LVLF);
if (db_fill > ALLOWED_DOORBELLS_HIGH_WM) {
DbgMessage(pdev, WARN,
"EMULATION_DOORBELL_FULL_WORKAROUND: db_fill=%d, doorbell in busy wait!\n",
db_fill);
pdev->vars.doorbells_blocked = 1;
do {
db_fill=REG_RD(pf_dev,DORQ_REG_DQ_FILL_LVLF);
if (db_fill == 0xffffffff) {
DbgMessage(pdev, FATAL, "DOORBELL: fill level 0xffffffff\n");
break;
}
} while (db_fill > ALLOWED_DOORBELLS_LOW_WM);
pdev->vars.doorbells_high_wm_reached++;
pdev->vars.doorbells_blocked = 0;
}
}
}
_DOORBELL(pdev,cid,val);
}
#else
#define DOORBELL(PDEV,CID,VAL) do{\
MM_WRITE_DOORBELL(PDEV,BAR_1,CID,VAL);\
} while(0)
#endif
#define HW_CID(pdev,x) (x |(PORT_ID(pdev) << 23 | VNIC_ID(pdev) << 17))
#define SW_CID(x) (x & COMMON_RAMROD_ETH_RX_CQE_CID & ~0x860000)
u64_t lm_get_timestamp_of_recent_cid_recycling(struct _lm_device_t *pdev);
static u8_t __inline lm_sb_id_from_chain(struct _lm_device_t *pdev, u32_t chain_idx)
{
u8_t sb_id = 0 ;
if (CHAIN_TO_RSS_ID(pdev,(u32_t)chain_idx) >= LM_SB_CNT(pdev))
{
sb_id = DEF_STATUS_BLOCK_INDEX;
}
else
{
sb_id = (u8_t)RSS_ID_TO_SB_ID(CHAIN_TO_RSS_ID(pdev,(u32_t)chain_idx));
}
return sb_id;
}
static void __inline lm_set_virt_mode(struct _lm_device_t *pdev, u8_t device_type, u8_t virtualization_type)
{
if (CHK_NULL(pdev))
{
DbgBreakMsg("lm_set_virt_mode pdev is null");
return;
}
if ((pdev->params.device_type == DEVICE_TYPE_PF) && (pdev->params.virtualization_type == VT_NONE)) {
switch (device_type) {
case DEVICE_TYPE_PF:
pdev->params.device_type = device_type;
switch (virtualization_type) {
case VT_NONE:
break;
case VT_BASIC_VF:
case VT_CHANNEL_VF:
case VT_ASSIGNED_TO_VM_PF:
pdev->params.virtualization_type = virtualization_type;
break;
default:
DbgMessage(pdev, FATAL, "Master PF mode %d is not supported in virt.mode\n",virtualization_type);
DbgBreak();
break;
}
break;
case DEVICE_TYPE_VF:
pdev->params.device_type = device_type;
switch (virtualization_type) {
case VT_BASIC_VF:
case VT_CHANNEL_VF:
pdev->params.virtualization_type = virtualization_type;
break;
case VT_NONE:
DbgMessage(pdev, FATAL, "VF mode is mandatory parameter\n");
DbgBreak();
break;
default:
DbgMessage(pdev, FATAL, "VF mode %d is not supported\n",virtualization_type);
DbgBreak();
break;
}
break;
default:
DbgMessage(pdev, FATAL, "Device type %d is not supported in virt.mode\n",device_type);
DbgBreak();
}
} else {
DbgMessage(pdev, FATAL, "Virt.mode is set already (%d,%d)\n",device_type,virtualization_type);
}
DbgMessage(pdev, WARN, "Virt.mode is set as (%d,%d)\n", pdev->params.device_type, pdev->params.virtualization_type);
}
static void __inline lm_set_virt_channel_type(struct _lm_device_t *pdev, u8_t channel_type)
{
if (CHK_NULL(pdev))
{
DbgBreakMsg("lm_set_virt_channel_type pdev is null");
return;
}
switch (channel_type) {
case VT_HW_CHANNEL_TYPE:
case VT_SW_CHANNEL_TYPE:
break;
default:
DbgMessage(pdev, WARN, "Unknown channel type (%d)\n", channel_type);
DbgBreak();
channel_type = VT_HW_CHANNEL_TYPE;
}
pdev->params.channel_type = channel_type;
DbgMessage(pdev, WARN, "Channel type is set as (%d)\n", pdev->params.channel_type);
}
static void __inline lm_reset_virt_mode(struct _lm_device_t *pdev)
{
if (CHK_NULL(pdev))
{
DbgBreakMsg("lm_reset_virt_mode pdev is null");
return;
}
if (pdev->params.device_type == DEVICE_TYPE_PF) {
pdev->params.device_type = DEVICE_TYPE_PF;
pdev->params.virtualization_type = VT_NONE;
DbgMessage(pdev, FATAL, "Vrtualization mode is reset to simple PF\n");
} else {
DbgMessage(pdev, FATAL, "Virtualization mode reset is is valid only for PF\n");
}
}
u32_t lm_get_num_of_cashed_grq_bds(struct _lm_device_t *pdev);
void lm_set_waitp(lm_device_t *pdev);
u8_t lm_get_port_id_from_func_abs( const u32_t chip_num, const lm_chip_port_mode_t lm_chip_port_mode, const u8_t abs_func );
u8_t lm_get_abs_func_vector( const u32_t chip_num, const lm_chip_port_mode_t chip_port_mode, const u8_t b_multi_vnics_mode, const u8_t path_id );
u8_t lm_check_if_pf_assigned_to_vm(struct _lm_device_t *pdev);
u8_t lm_is_fw_version_valid(struct _lm_device_t *pdev);
lm_status_t lm_set_cli_drv_ver_to_shmem(struct _lm_device_t *lmdev);
#ifdef VF_INVOLVED
lm_vf_info_t * lm_pf_find_vf_info_by_rel_id(struct _lm_device_t *pdev, u16_t relative_vf_id);
lm_vf_info_t * lm_pf_find_vf_info_by_abs_id(struct _lm_device_t *pdev, u8_t abs_vf_id);
lm_status_t lm_pf_download_standard_request(struct _lm_device_t *pdev, lm_vf_info_t *vf_info, void* virt_buffer, u32_t length);
lm_status_t lm_pf_upload_standard_response(struct _lm_device_t *pdev, lm_vf_info_t *vf_info, void* virt_buffer, u32_t length);
lm_status_t lm_pf_upload_standard_request(struct _lm_device_t *pdev, lm_vf_info_t *vf_info, lm_address_t * phys_buffer, u32_t length);
lm_status_t lm_pf_download_standard_response(struct _lm_device_t *pdev, lm_vf_info_t *vf_info, lm_address_t * phys_buffer, u32_t length);
lm_status_t lm_pf_allocate_vfs(struct _lm_device_t *pdev);
lm_status_t lm_pf_init_vfs(struct _lm_device_t *pdev, u16_t num_vfs);
lm_status_t lm_pf_clear_vfs(struct _lm_device_t * pdev);
lm_status_t lm_pf_set_vf_ctx(struct _lm_device_t *pdev, u16_t vf_id, void* ctx);
#if 0
lm_status_t lm_pf_set_vf_client_id(struct _lm_device_t *pdev,
u16_t vf_id,
u8_t base_fw_client_id,
u8_t base_sw_client_id);
lm_status_t lm_pf_set_vf_ndsb(struct _lm_device_t *pdev,
u16_t vf_id,
u8_t base_fw_ndsb,
u8_t base_sw_ndsb,
u8_t base_fw_dhc_qzone_id);
lm_status_t lm_pf_set_vf_qzone_id(struct _lm_device_t *pdev,
u16_t vf_id,
u8_t base_fw_qzone_id);
#endif
lm_status_t lm_pf_set_vf_stat_id(struct _lm_device_t *pdev,
u16_t vf_id,
u8_t base_fw_stats_id);
u8_t lm_pf_is_vf_mac_set(struct _lm_device_t *pdev, u16_t vf_id);
lm_status_t lm_pf_set_vf_base_cam_idx(struct _lm_device_t *pdev, u16_t vf_id, u32_t base_cam_idx);
u32_t lm_pf_get_sw_client_idx_from_cid(struct _lm_device_t *pdev, u32_t cid);
u32_t lm_pf_get_fw_client_idx_from_cid(struct _lm_device_t *pdev, u32_t cid);
u8_t lm_pf_acquire_vf_chains_resources(struct _lm_device_t *pdev, u16_t vf_id, u32_t num_chains);
void lm_pf_release_vf_chains_resources(struct _lm_device_t *pdev, u16_t vf_id);
void lm_pf_release_separate_vf_chain_resources(struct _lm_device_t *pdev, u16_t vf_id, u8_t chain_num);
u8_t lm_pf_is_sriov_valid(struct _lm_device_t *pdev);
u8_t lm_pf_allocate_vf_igu_sbs(struct _lm_device_t *pdev, struct _lm_vf_info_t *vf_info, u8_t num_of_igu_sbs);
void lm_pf_release_vf_igu_sbs(struct _lm_device_t *pdev, struct _lm_vf_info_t *vf_info);
u8_t lm_pf_get_max_number_of_vf_igu_sbs(struct _lm_device_t *pdev);
u8_t lm_pf_get_next_free_igu_block_id(struct _lm_device_t *pdev, u8_t starting_from);
void lm_pf_clear_vf_igu_blocks(struct _lm_device_t *pdev);
u8_t lm_pf_release_vf_igu_block(struct _lm_device_t *pdev, u8_t igu_sb_idx);
u8_t lm_pf_acquire_vf_igu_block(struct _lm_device_t *pdev, u8_t igu_sb_idx, u8_t abs_vf_id, u8_t vector_number);
u8_t lm_pf_get_vf_available_igu_blocks(struct _lm_device_t *pdev);
lm_status_t lm_pf_update_vf_default_vlan(IN struct _lm_device_t *pdev, IN struct _lm_vf_info_t * vf_info,
IN const u16_t silent_vlan_value,
IN const u16_t silent_vlan_mask,
IN const u8_t silent_vlan_removal_flg,
IN const u8_t silent_vlan_change_flg,
IN const u16_t default_vlan,
IN const u8_t default_vlan_enable_flg,
IN const u8_t default_vlan_change_flg);
lm_status_t lm_pf_update_vf_ndsb(IN struct _lm_device_t *pdev,
IN struct _lm_vf_info_t *vf_info,
IN u8_t relative_in_vf_ndsb,
IN u16_t interrupt_mod_level);
lm_status_t lm_pf_update_vf_ndsbs(IN struct _lm_device_t *pdev,
IN struct _lm_vf_info_t *vf_info,
IN u16_t interrupt_mod_level);
#endif
#endif