#ifndef _ICE_BITOPS_H_
#define _ICE_BITOPS_H_
typedef uint32_t ice_bitmap_t;
#define BITS_PER_CHUNK (8 * sizeof(ice_bitmap_t))
#define BIT_CHUNK(nr) ((nr) / BITS_PER_CHUNK)
#define BITS_TO_CHUNKS(sz) (((sz) + BITS_PER_CHUNK - 1) / BITS_PER_CHUNK)
#define BIT_IN_CHUNK(nr) ((nr) % BITS_PER_CHUNK)
#define LAST_CHUNK_BITS(nr) ((((nr) - 1) % BITS_PER_CHUNK) + 1)
#define LAST_CHUNK_MASK(nr) (((ice_bitmap_t)~0) >> \
(BITS_PER_CHUNK - LAST_CHUNK_BITS(nr)))
#define ice_declare_bitmap(A, sz) \
ice_bitmap_t A[BITS_TO_CHUNKS(sz)]
static inline bool ice_is_bit_set_internal(uint16_t nr, const ice_bitmap_t *bitmap)
{
return !!(*bitmap & BIT(nr));
}
static inline void ice_clear_bit_internal(uint16_t nr, ice_bitmap_t *bitmap)
{
*bitmap &= ~BIT(nr);
}
static inline void ice_set_bit_internal(uint16_t nr, ice_bitmap_t *bitmap)
{
*bitmap |= BIT(nr);
}
static inline bool ice_test_and_clear_bit_internal(uint16_t nr,
ice_bitmap_t *bitmap)
{
if (ice_is_bit_set_internal(nr, bitmap)) {
ice_clear_bit_internal(nr, bitmap);
return true;
}
return false;
}
static inline bool ice_test_and_set_bit_internal(uint16_t nr, ice_bitmap_t *bitmap)
{
if (ice_is_bit_set_internal(nr, bitmap))
return true;
ice_set_bit_internal(nr, bitmap);
return false;
}
static inline bool ice_is_bit_set(const ice_bitmap_t *bitmap, uint16_t nr)
{
return ice_is_bit_set_internal(BIT_IN_CHUNK(nr),
&bitmap[BIT_CHUNK(nr)]);
}
static inline void ice_clear_bit(uint16_t nr, ice_bitmap_t *bitmap)
{
ice_clear_bit_internal(BIT_IN_CHUNK(nr), &bitmap[BIT_CHUNK(nr)]);
}
static inline void ice_set_bit(uint16_t nr, ice_bitmap_t *bitmap)
{
ice_set_bit_internal(BIT_IN_CHUNK(nr), &bitmap[BIT_CHUNK(nr)]);
}
static inline bool
ice_test_and_clear_bit(uint16_t nr, ice_bitmap_t *bitmap)
{
return ice_test_and_clear_bit_internal(BIT_IN_CHUNK(nr),
&bitmap[BIT_CHUNK(nr)]);
}
static inline bool
ice_test_and_set_bit(uint16_t nr, ice_bitmap_t *bitmap)
{
return ice_test_and_set_bit_internal(BIT_IN_CHUNK(nr),
&bitmap[BIT_CHUNK(nr)]);
}
static inline void ice_zero_bitmap(ice_bitmap_t *bmp, uint16_t size)
{
memset(bmp, 0, BITS_TO_CHUNKS(size) * sizeof(ice_bitmap_t));
}
static inline int
ice_and_bitmap(ice_bitmap_t *dst, const ice_bitmap_t *bmp1,
const ice_bitmap_t *bmp2, uint16_t size)
{
ice_bitmap_t res = 0, mask;
uint16_t i;
for (i = 0; i < BITS_TO_CHUNKS(size) - 1; i++) {
dst[i] = bmp1[i] & bmp2[i];
res |= dst[i];
}
mask = LAST_CHUNK_MASK(size);
dst[i] = (dst[i] & ~mask) | ((bmp1[i] & bmp2[i]) & mask);
res |= dst[i] & mask;
return res != 0;
}
static inline void
ice_or_bitmap(ice_bitmap_t *dst, const ice_bitmap_t *bmp1,
const ice_bitmap_t *bmp2, uint16_t size)
{
ice_bitmap_t mask;
uint16_t i;
for (i = 0; i < BITS_TO_CHUNKS(size) - 1; i++)
dst[i] = bmp1[i] | bmp2[i];
mask = LAST_CHUNK_MASK(size);
dst[i] = (dst[i] & ~mask) | ((bmp1[i] | bmp2[i]) & mask);
}
static inline void
ice_xor_bitmap(ice_bitmap_t *dst, const ice_bitmap_t *bmp1,
const ice_bitmap_t *bmp2, uint16_t size)
{
ice_bitmap_t mask;
uint16_t i;
for (i = 0; i < BITS_TO_CHUNKS(size) - 1; i++)
dst[i] = bmp1[i] ^ bmp2[i];
mask = LAST_CHUNK_MASK(size);
dst[i] = (dst[i] & ~mask) | ((bmp1[i] ^ bmp2[i]) & mask);
}
static inline void
ice_andnot_bitmap(ice_bitmap_t *dst, const ice_bitmap_t *bmp1,
const ice_bitmap_t *bmp2, uint16_t size)
{
ice_bitmap_t mask;
uint16_t i;
for (i = 0; i < BITS_TO_CHUNKS(size) - 1; i++)
dst[i] = bmp1[i] & ~bmp2[i];
mask = LAST_CHUNK_MASK(size);
dst[i] = (dst[i] & ~mask) | ((bmp1[i] & ~bmp2[i]) & mask);
}
static inline uint16_t
ice_find_next_bit(const ice_bitmap_t *bitmap, uint16_t size, uint16_t offset)
{
uint16_t i, j;
if (offset >= size)
return size;
i = BIT_CHUNK(offset);
if (bitmap[i] != 0) {
uint16_t off = i * BITS_PER_CHUNK;
for (j = offset % BITS_PER_CHUNK; j < BITS_PER_CHUNK; j++) {
if (ice_is_bit_set(bitmap, off + j))
return min(size, (uint16_t)(off + j));
}
}
for (i++; i < BITS_TO_CHUNKS(size); i++) {
if (bitmap[i] != 0) {
uint16_t off = i * BITS_PER_CHUNK;
for (j = 0; j < BITS_PER_CHUNK; j++) {
if (ice_is_bit_set(bitmap, off + j))
return min(size, (uint16_t)(off + j));
}
}
}
return size;
}
static inline uint16_t ice_find_first_bit(const ice_bitmap_t *bitmap, uint16_t size)
{
return ice_find_next_bit(bitmap, size, 0);
}
#define ice_for_each_set_bit(_bitpos, _addr, _maxlen) \
for ((_bitpos) = ice_find_first_bit((_addr), (_maxlen)); \
(_bitpos) < (_maxlen); \
(_bitpos) = ice_find_next_bit((_addr), (_maxlen), (_bitpos) + 1))
static inline bool ice_is_any_bit_set(ice_bitmap_t *bitmap, uint16_t size)
{
return ice_find_first_bit(bitmap, size) < size;
}
static inline void ice_cp_bitmap(ice_bitmap_t *dst, ice_bitmap_t *src, uint16_t size)
{
memcpy(dst, src, BITS_TO_CHUNKS(size) * sizeof(ice_bitmap_t));
}
static inline void
ice_bitmap_set(ice_bitmap_t *dst, uint16_t pos, uint16_t num_bits)
{
uint16_t i;
for (i = pos; i < pos + num_bits; i++)
ice_set_bit(i, dst);
}
static inline int
ice_bitmap_hweight(ice_bitmap_t *bm, uint16_t size)
{
int count = 0;
uint16_t bit = 0;
while (size > (bit = ice_find_next_bit(bm, size, bit))) {
count++;
bit++;
}
return count;
}
static inline bool
ice_cmp_bitmap(ice_bitmap_t *bmp1, ice_bitmap_t *bmp2, uint16_t size)
{
ice_bitmap_t mask;
uint16_t i;
for (i = 0; i < BITS_TO_CHUNKS(size) - 1; i++)
if (bmp1[i] != bmp2[i])
return false;
mask = LAST_CHUNK_MASK(size);
if ((bmp1[i] & mask) != (bmp2[i] & mask))
return false;
return true;
}
static inline void
ice_bitmap_from_array32(ice_bitmap_t *dst, uint32_t *src, uint16_t size)
{
uint32_t remaining_bits, i;
#define BITS_PER_U32 (sizeof(uint32_t) * 8)
ice_zero_bitmap(dst, size);
for (i = 0; i < (uint32_t)(size / BITS_PER_U32); i++) {
uint32_t bit_offset = i * BITS_PER_U32;
uint32_t entry = src[i];
uint32_t j;
for (j = 0; j < BITS_PER_U32; j++) {
if (entry & BIT(j))
ice_set_bit((uint16_t)(j + bit_offset), dst);
}
}
remaining_bits = size % BITS_PER_U32;
if (remaining_bits) {
uint32_t bit_offset = i * BITS_PER_U32;
uint32_t entry = src[i];
uint32_t j;
for (j = 0; j < remaining_bits; j++) {
if (entry & BIT(j))
ice_set_bit((uint16_t)(j + bit_offset), dst);
}
}
}
#undef BIT_CHUNK
#undef BIT_IN_CHUNK
#undef LAST_CHUNK_BITS
#undef LAST_CHUNK_MASK
#endif
struct ice_dma_mem {
void *va;
uint64_t pa;
bus_size_t size;
bus_dma_tag_t tag;
bus_dmamap_t map;
bus_dma_segment_t seg;
};
#define ICE_DMA_MAP(_m) ((_m)->map)
#define ICE_DMA_DVA(_m) ((_m)->map->dm_segs[0].ds_addr)
#define ICE_DMA_KVA(_m) ((void *)(_m)->va)
#define ICE_DMA_LEN(_m) ((_m)->size)
#define ICE_STR_BUF_LEN 64
struct ice_lock {
struct mutex mutex;
char name[ICE_STR_BUF_LEN];
};
extern uint16_t ice_lock_count;
static inline void
ice_init_lock(struct ice_lock *lock)
{
memset(lock->name, 0, sizeof(lock->name));
snprintf(lock->name, ICE_STR_BUF_LEN, "ice_lock_%u", ice_lock_count++);
mtx_init_flags(&lock->mutex, IPL_NET, lock->name, 0);
}
#define ICE_NVM_TIMEOUT 180000
#define ICE_CHANGE_LOCK_TIMEOUT 1000
#define ICE_GLOBAL_CFG_LOCK_TIMEOUT 3000
#define ICE_PF_RESET_WAIT_COUNT 500
enum ice_status {
ICE_SUCCESS = 0,
ICE_ERR_PARAM = -1,
ICE_ERR_NOT_IMPL = -2,
ICE_ERR_NOT_READY = -3,
ICE_ERR_NOT_SUPPORTED = -4,
ICE_ERR_BAD_PTR = -5,
ICE_ERR_INVAL_SIZE = -6,
ICE_ERR_DEVICE_NOT_SUPPORTED = -8,
ICE_ERR_RESET_FAILED = -9,
ICE_ERR_FW_API_VER = -10,
ICE_ERR_NO_MEMORY = -11,
ICE_ERR_CFG = -12,
ICE_ERR_OUT_OF_RANGE = -13,
ICE_ERR_ALREADY_EXISTS = -14,
ICE_ERR_DOES_NOT_EXIST = -15,
ICE_ERR_IN_USE = -16,
ICE_ERR_MAX_LIMIT = -17,
ICE_ERR_RESET_ONGOING = -18,
ICE_ERR_HW_TABLE = -19,
ICE_ERR_FW_DDP_MISMATCH = -20,
ICE_ERR_NVM = -50,
ICE_ERR_NVM_CHECKSUM = -51,
ICE_ERR_BUF_TOO_SHORT = -52,
ICE_ERR_NVM_BLANK_MODE = -53,
ICE_ERR_AQ_ERROR = -100,
ICE_ERR_AQ_TIMEOUT = -101,
ICE_ERR_AQ_FULL = -102,
ICE_ERR_AQ_NO_WORK = -103,
ICE_ERR_AQ_EMPTY = -104,
ICE_ERR_AQ_FW_CRITICAL = -105,
};
#define ICE_SQ_SEND_DELAY_TIME_MS 10
#define ICE_SQ_SEND_MAX_EXECUTE 3
enum ice_fw_modes {
ICE_FW_MODE_NORMAL,
ICE_FW_MODE_DBG,
ICE_FW_MODE_REC,
ICE_FW_MODE_ROLLBACK
};
#define ICE_AQ_LEN 1023
#define ICE_MBXQ_LEN 512
#define ICE_SBQ_LEN 512
#define ICE_CTRLQ_WORK_LIMIT 256
#define ICE_DFLT_TRAFFIC_CLASS BIT(0)
#define ICE_Q_WAIT_RETRY_LIMIT 5
#define ICE_AQ_MAX_BUF_LEN 4096
#define ICE_MBXQ_MAX_BUF_LEN 4096
#define ICE_CTL_Q_DESC(R, i) \
(&(((struct ice_aq_desc *)((R).desc_buf.va))[i]))
#define ICE_CTL_Q_DESC_UNUSED(R) \
((uint16_t)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
(R)->next_to_clean - (R)->next_to_use - 1))
#define EXP_FW_API_VER_BRANCH 0x00
#define EXP_FW_API_VER_MAJOR 0x01
#define EXP_FW_API_VER_MINOR 0x05
#define DBA_ALIGN 128
#define ICE_TSO_SIZE ((256*1024) - 1)
#define ICE_MIN_TSO_MSS 64
#define ICE_MAX_TX_SEGS 8
#define ICE_MAX_TSO_SEGS 8
#define ICE_MAX_DMA_SEG_SIZE ((16*1024) - 1)
#define ICE_MAX_RX_SEGS 5
#define ICE_MAX_TSO_HDR_SEGS 3
#define ICE_MSIX_BAR 3
#define ICE_DEFAULT_DESC_COUNT 1024
#define ICE_MAX_DESC_COUNT 8160
#define ICE_MIN_DESC_COUNT 64
#define ICE_DESC_COUNT_INCR 32
#define ICE_MAX_FRAME_SIZE ICE_AQ_SET_MAC_FRAME_SIZE_MAX
#define ICE_MAX_MTU (ICE_MAX_FRAME_SIZE - \
ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN)
#define ICE_QIDX_INVALID 0xffff
#define ICE_MIN_MTU 112
#define ICE_DEFAULT_VF_QUEUES 4
#define ICE_INVALID_MIRROR_VSI ((u16)-1)
#define ICE_MAX_RXQS_PER_TC 256
#define ICE_APPLY_LS BIT(0)
#define ICE_APPLY_FEC BIT(1)
#define ICE_APPLY_FC BIT(2)
#define ICE_APPLY_LS_FEC (ICE_APPLY_LS | ICE_APPLY_FEC)
#define ICE_APPLY_LS_FC (ICE_APPLY_LS | ICE_APPLY_FC)
#define ICE_APPLY_FEC_FC (ICE_APPLY_FEC | ICE_APPLY_FC)
#define ICE_APPLY_LS_FEC_FC (ICE_APPLY_LS_FEC | ICE_APPLY_FC)
enum ice_dyn_idx_t {
ICE_IDX_ITR0 = 0,
ICE_IDX_ITR1 = 1,
ICE_IDX_ITR2 = 2,
ICE_ITR_NONE = 3
};
#define ICE_RX_ITR ICE_IDX_ITR0
#define ICE_TX_ITR ICE_IDX_ITR1
#define ICE_ITR_MAX 8160
#define ICE_DFLT_TX_ITR 50
#define ICE_DFLT_RX_ITR 50
enum ice_rx_dtype {
ICE_RX_DTYPE_NO_SPLIT = 0,
ICE_RX_DTYPE_HEADER_SPLIT = 1,
ICE_RX_DTYPE_SPLIT_ALWAYS = 2,
};
#if 0
#define ICE_CSUM_OFFLOAD (CSUM_IP | CSUM_IP_TCP | CSUM_IP_UDP | CSUM_IP_SCTP | \
CSUM_IP6_TCP| CSUM_IP6_UDP | CSUM_IP6_SCTP | \
CSUM_IP_TSO | CSUM_IP6_TSO)
#define ICE_CSUM_TCP (CSUM_IP_TCP|CSUM_IP_TSO|CSUM_IP6_TSO|CSUM_IP6_TCP)
#define ICE_CSUM_UDP (CSUM_IP_UDP|CSUM_IP6_UDP)
#define ICE_CSUM_SCTP (CSUM_IP_SCTP|CSUM_IP6_SCTP)
#define ICE_CSUM_IP (CSUM_IP|CSUM_IP_TSO)
#define ICE_RX_CSUM_FLAGS (CSUM_L3_CALC | CSUM_L3_VALID | CSUM_L4_CALC | \
CSUM_L4_VALID | CSUM_L5_CALC | CSUM_L5_VALID | \
CSUM_COALESCED)
#endif
#define ICE_FULL_CAPS \
(IFCAP_TSOv4 | IFCAP_TSOv6 | \
IFCAP_CSUM_TCPv4 | IFCAP_CSUM_TCPv6| \
IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWOFFLOAD | \
IFCAP_VLAN_MTU | IFCAP_LRO)
#define ICE_SAFE_CAPS \
(ICE_FULL_CAPS & ~(IFCAP_CSUM_TCPv4 | IFCAP_CSUM_TCPv6 | \
IFCAP_TSOv4 | IFCAP_TSOv6 | IFCAP_VLAN_HWOFFLOAD))
#define ICE_CAPS(sc) \
(ice_is_bit_set(sc->feat_en, ICE_FEATURE_SAFE_MODE) ? ICE_SAFE_CAPS : ICE_FULL_CAPS)
enum ice_ctl_q {
ICE_CTL_Q_UNKNOWN = 0,
ICE_CTL_Q_ADMIN,
ICE_CTL_Q_MAILBOX,
};
#define ICE_CTL_Q_SQ_CMD_TIMEOUT 100000
#define ICE_CTL_Q_ADMIN_INIT_TIMEOUT 10
#define ICE_CTL_Q_ADMIN_INIT_MSEC 100
struct ice_ctl_q_ring {
void *dma_head;
struct ice_dma_mem desc_buf;
union {
struct ice_dma_mem *sq_bi;
struct ice_dma_mem *rq_bi;
} r;
uint16_t count;
uint16_t next_to_use;
uint16_t next_to_clean;
uint32_t head;
uint32_t tail;
uint32_t len;
uint32_t bah;
uint32_t bal;
uint32_t len_mask;
uint32_t len_ena_mask;
uint32_t len_crit_mask;
uint32_t head_mask;
};
struct ice_sq_cd {
struct ice_aq_desc *wb_desc;
};
struct ice_rq_event_info {
struct ice_aq_desc desc;
uint16_t msg_len;
uint16_t buf_len;
uint8_t *msg_buf;
};
struct ice_ctl_q_info {
enum ice_ctl_q qtype;
struct ice_ctl_q_ring rq;
struct ice_ctl_q_ring sq;
uint32_t sq_cmd_timeout;
uint16_t num_rq_entries;
uint16_t num_sq_entries;
uint16_t rq_buf_size;
uint16_t sq_buf_size;
enum ice_aq_err sq_last_status;
struct ice_lock sq_lock;
struct ice_lock rq_lock;
};
enum ice_mac_type {
ICE_MAC_UNKNOWN = 0,
ICE_MAC_VF,
ICE_MAC_E810,
ICE_MAC_GENERIC,
ICE_MAC_GENERIC_3K,
ICE_MAC_GENERIC_3K_E825,
};
enum ice_reset_req {
ICE_RESET_POR = 0,
ICE_RESET_INVAL = 0,
ICE_RESET_CORER = 1,
ICE_RESET_GLOBR = 2,
ICE_RESET_EMPR = 3,
ICE_RESET_PFR = 4,
};
struct ice_hw_common_caps {
uint64_t wr_csr_prot;
uint32_t switching_mode;
#define ICE_NVM_IMAGE_TYPE_EVB 0x0
uint32_t mgmt_mode;
#define ICE_MGMT_MODE_PASS_THRU_MODE_M 0xF
#define ICE_MGMT_MODE_CTL_INTERFACE_M 0xF0
#define ICE_MGMT_MODE_REDIR_SB_INTERFACE_M 0xF00
uint32_t mgmt_protocols_mctp;
#define ICE_MGMT_MODE_PROTO_RSVD BIT(0)
#define ICE_MGMT_MODE_PROTO_PLDM BIT(1)
#define ICE_MGMT_MODE_PROTO_OEM BIT(2)
#define ICE_MGMT_MODE_PROTO_NC_SI BIT(3)
uint32_t os2bmc;
uint32_t valid_functions;
uint32_t active_tc_bitmap;
uint32_t maxtc;
uint32_t rss_table_size;
uint32_t rss_table_entry_width;
uint32_t num_rxq;
uint32_t rxq_first_id;
uint32_t num_txq;
uint32_t txq_first_id;
uint32_t num_msix_vectors;
uint32_t msix_vector_first_id;
uint32_t max_mtu;
uint32_t num_wol_proxy_fltr;
uint32_t wol_proxy_vsi_seid;
uint32_t led_pin_num;
uint32_t sdp_pin_num;
#define ICE_MAX_SUPPORTED_GPIO_LED 12
#define ICE_MAX_SUPPORTED_GPIO_SDP 8
uint8_t led[ICE_MAX_SUPPORTED_GPIO_LED];
uint8_t sdp[ICE_MAX_SUPPORTED_GPIO_SDP];
uint8_t sr_iov_1_1;
uint8_t vmdq;
uint8_t evb_802_1_qbg;
uint8_t evb_802_1_qbh;
uint8_t dcb;
uint8_t iscsi;
uint8_t mgmt_cem;
uint8_t iwarp;
uint8_t roce_lag;
#define ICE_WOL_SUPPORT_M BIT(0)
#define ICE_ACPI_PROG_MTHD_M BIT(1)
#define ICE_PROXY_SUPPORT_M BIT(2)
uint8_t apm_wol_support;
uint8_t acpi_prog_mthd;
uint8_t proxy_support;
bool sec_rev_disabled;
bool update_disabled;
bool nvm_unified_update;
bool netlist_auth;
#define ICE_NVM_MGMT_SEC_REV_DISABLED BIT(0)
#define ICE_NVM_MGMT_UPDATE_DISABLED BIT(1)
#define ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT BIT(3)
#define ICE_NVM_MGMT_NETLIST_AUTH_SUPPORT BIT(5)
bool pcie_reset_avoidance;
bool reset_restrict_support;
#define ICE_EXT_TOPO_DEV_IMG_COUNT 4
uint32_t ext_topo_dev_img_ver_high[ICE_EXT_TOPO_DEV_IMG_COUNT];
uint32_t ext_topo_dev_img_ver_low[ICE_EXT_TOPO_DEV_IMG_COUNT];
uint8_t ext_topo_dev_img_part_num[ICE_EXT_TOPO_DEV_IMG_COUNT];
#define ICE_EXT_TOPO_DEV_IMG_PART_NUM_S 8
#define ICE_EXT_TOPO_DEV_IMG_PART_NUM_M \
MAKEMASK(0xFF, ICE_EXT_TOPO_DEV_IMG_PART_NUM_S)
bool ext_topo_dev_img_load_en[ICE_EXT_TOPO_DEV_IMG_COUNT];
#define ICE_EXT_TOPO_DEV_IMG_LOAD_EN BIT(0)
bool ext_topo_dev_img_prog_en[ICE_EXT_TOPO_DEV_IMG_COUNT];
#define ICE_EXT_TOPO_DEV_IMG_PROG_EN BIT(1)
bool ext_topo_dev_img_ver_schema[ICE_EXT_TOPO_DEV_IMG_COUNT];
#define ICE_EXT_TOPO_DEV_IMG_VER_SCHEMA BIT(2)
bool tx_sched_topo_comp_mode_en;
bool dyn_flattening_en;
bool orom_recovery_update;
};
#define ICE_NAC_TOPO_PRIMARY_M BIT(0)
#define ICE_NAC_TOPO_DUAL_M BIT(1)
#define ICE_NAC_TOPO_ID_M MAKEMASK(0xf, 0)
enum ice_aq_res_ids {
ICE_NVM_RES_ID = 1,
ICE_SPD_RES_ID,
ICE_CHANGE_LOCK_RES_ID,
ICE_GLOBAL_CFG_LOCK_RES_ID
};
#define ICE_NVM_TIMEOUT 180000
#define ICE_CHANGE_LOCK_TIMEOUT 1000
#define ICE_GLOBAL_CFG_LOCK_TIMEOUT 3000
struct ice_link_default_override_tlv {
uint8_t options;
#define ICE_LINK_OVERRIDE_OPT_M 0x3F
#define ICE_LINK_OVERRIDE_STRICT_MODE BIT(0)
#define ICE_LINK_OVERRIDE_EPCT_DIS BIT(1)
#define ICE_LINK_OVERRIDE_PORT_DIS BIT(2)
#define ICE_LINK_OVERRIDE_EN BIT(3)
#define ICE_LINK_OVERRIDE_AUTO_LINK_DIS BIT(4)
#define ICE_LINK_OVERRIDE_EEE_EN BIT(5)
uint8_t phy_config;
#define ICE_LINK_OVERRIDE_PHY_CFG_S 8
#define ICE_LINK_OVERRIDE_PHY_CFG_M (0xC3 << ICE_LINK_OVERRIDE_PHY_CFG_S)
#define ICE_LINK_OVERRIDE_PAUSE_M 0x3
#define ICE_LINK_OVERRIDE_LESM_EN BIT(6)
#define ICE_LINK_OVERRIDE_AUTO_FEC_EN BIT(7)
uint8_t fec_options;
#define ICE_LINK_OVERRIDE_FEC_OPT_M 0xFF
uint8_t rsvd1;
uint64_t phy_type_low;
uint64_t phy_type_high;
};
#define ICE_NVM_VER_LEN 32
#define ICE_NVM_VER_LEN 32
#define ICE_MAX_TRAFFIC_CLASS 8
#define ICE_TXSCHED_MAX_BRANCHES ICE_MAX_TRAFFIC_CLASS
#define ice_for_each_traffic_class(_i) \
for ((_i) = 0; (_i) < ICE_MAX_TRAFFIC_CLASS; (_i)++)
#define ICE_INVAL_TEID 0xFFFFFFFF
#define ICE_DFLT_AGG_ID 0
struct ice_sched_node {
struct ice_sched_node *parent;
struct ice_sched_node *sibling;
struct ice_sched_node **children;
struct ice_aqc_txsched_elem_data info;
uint32_t agg_id;
uint16_t vsi_handle;
uint8_t in_use;
uint8_t tx_sched_layer;
uint8_t num_children;
uint8_t tc_num;
uint8_t owner;
#define ICE_SCHED_NODE_OWNER_LAN 0
#define ICE_SCHED_NODE_OWNER_AE 1
#define ICE_SCHED_NODE_OWNER_RDMA 2
};
#define ICE_TXSCHED_GET_NODE_TEID(x) le32toh((x)->info.node_teid)
#define ICE_TXSCHED_GET_PARENT_TEID(x) le32toh((x)->info.parent_teid)
#define ICE_TXSCHED_GET_CIR_RL_ID(x) \
le16toh((x)->info.cir_bw.bw_profile_idx)
#define ICE_TXSCHED_GET_EIR_RL_ID(x) \
le16toh((x)->info.eir_bw.bw_profile_idx)
#define ICE_TXSCHED_GET_SRL_ID(x) le16toh((x)->info.srl_id)
#define ICE_TXSCHED_GET_CIR_BWALLOC(x) \
le16toh((x)->info.cir_bw.bw_alloc)
#define ICE_TXSCHED_GET_EIR_BWALLOC(x) \
le16toh((x)->info.eir_bw.bw_alloc)
enum ice_rl_type {
ICE_UNKNOWN_BW = 0,
ICE_MIN_BW,
ICE_MAX_BW,
ICE_SHARED_BW
};
#define ICE_SCHED_MIN_BW 500
#define ICE_SCHED_MAX_BW 100000000
#define ICE_SCHED_DFLT_BW 0xFFFFFFFF
#define ICE_SCHED_NO_PRIORITY 0
#define ICE_SCHED_NO_BW_WT 0
#define ICE_SCHED_DFLT_RL_PROF_ID 0
#define ICE_SCHED_NO_SHARED_RL_PROF_ID 0xFFFF
#define ICE_SCHED_DFLT_BW_WT 4
#define ICE_SCHED_INVAL_PROF_ID 0xFFFF
#define ICE_SCHED_DFLT_BURST_SIZE (15 * 1024)
struct ice_driver_ver {
uint8_t major_ver;
uint8_t minor_ver;
uint8_t build_ver;
uint8_t subbuild_ver;
uint8_t driver_string[32];
};
enum ice_fc_mode {
ICE_FC_NONE = 0,
ICE_FC_RX_PAUSE,
ICE_FC_TX_PAUSE,
ICE_FC_FULL,
ICE_FC_AUTO,
ICE_FC_PFC,
ICE_FC_DFLT
};
enum ice_fec_mode {
ICE_FEC_NONE = 0,
ICE_FEC_RS,
ICE_FEC_BASER,
ICE_FEC_AUTO,
ICE_FEC_DIS_AUTO
};
struct ice_fc_info {
enum ice_fc_mode current_mode;
enum ice_fc_mode req_mode;
};
struct ice_orom_info {
uint8_t major;
uint8_t patch;
uint16_t build;
uint32_t srev;
};
struct ice_nvm_info {
uint32_t eetrack;
uint32_t srev;
uint8_t major;
uint8_t minor;
};
struct ice_minsrev_info {
uint32_t nvm;
uint32_t orom;
uint8_t nvm_valid : 1;
uint8_t orom_valid : 1;
};
struct ice_netlist_info {
uint32_t major;
uint32_t minor;
uint32_t type;
uint32_t rev;
uint32_t hash;
uint16_t cust_ver;
};
enum ice_flash_bank {
ICE_INVALID_FLASH_BANK,
ICE_1ST_FLASH_BANK,
ICE_2ND_FLASH_BANK,
};
enum ice_bank_select {
ICE_ACTIVE_FLASH_BANK,
ICE_INACTIVE_FLASH_BANK,
};
struct ice_bank_info {
uint32_t nvm_ptr;
uint32_t nvm_size;
uint32_t orom_ptr;
uint32_t orom_size;
uint32_t netlist_ptr;
uint32_t netlist_size;
enum ice_flash_bank nvm_bank;
enum ice_flash_bank orom_bank;
enum ice_flash_bank netlist_bank;
};
struct ice_flash_info {
struct ice_orom_info orom;
struct ice_nvm_info nvm;
struct ice_netlist_info netlist;
struct ice_bank_info banks;
uint16_t sr_words;
uint32_t flash_size;
uint8_t blank_nvm_mode;
};
#define ICE_SR_NVM_CTRL_WORD 0x00
#define ICE_SR_PHY_ANALOG_PTR 0x04
#define ICE_SR_OPTION_ROM_PTR 0x05
#define ICE_SR_RO_PCIR_REGS_AUTO_LOAD_PTR 0x06
#define ICE_SR_AUTO_GENERATED_POINTERS_PTR 0x07
#define ICE_SR_PCIR_REGS_AUTO_LOAD_PTR 0x08
#define ICE_SR_EMP_GLOBAL_MODULE_PTR 0x09
#define ICE_SR_EMP_IMAGE_PTR 0x0B
#define ICE_SR_PE_IMAGE_PTR 0x0C
#define ICE_SR_CSR_PROTECTED_LIST_PTR 0x0D
#define ICE_SR_MNG_CFG_PTR 0x0E
#define ICE_SR_EMP_MODULE_PTR 0x0F
#define ICE_SR_PBA_BLOCK_PTR 0x16
#define ICE_SR_BOOT_CFG_PTR 0x132
#define ICE_SR_NVM_WOL_CFG 0x19
#define ICE_NVM_OROM_VER_OFF 0x02
#define ICE_SR_NVM_DEV_STARTER_VER 0x18
#define ICE_SR_ALTERNATE_SAN_MAC_ADDR_PTR 0x27
#define ICE_SR_PERMANENT_SAN_MAC_ADDR_PTR 0x28
#define ICE_SR_NVM_MAP_VER 0x29
#define ICE_SR_NVM_IMAGE_VER 0x2A
#define ICE_SR_NVM_STRUCTURE_VER 0x2B
#define ICE_SR_NVM_EETRACK_LO 0x2D
#define ICE_SR_NVM_EETRACK_HI 0x2E
#define ICE_NVM_VER_LO_SHIFT 0
#define ICE_NVM_VER_LO_MASK (0xff << ICE_NVM_VER_LO_SHIFT)
#define ICE_NVM_VER_HI_SHIFT 12
#define ICE_NVM_VER_HI_MASK (0xf << ICE_NVM_VER_HI_SHIFT)
#define ICE_OEM_EETRACK_ID 0xffffffff
#define ICE_OROM_VER_PATCH_SHIFT 0
#define ICE_OROM_VER_PATCH_MASK (0xff << ICE_OROM_VER_PATCH_SHIFT)
#define ICE_OROM_VER_BUILD_SHIFT 8
#define ICE_OROM_VER_BUILD_MASK (0xffff << ICE_OROM_VER_BUILD_SHIFT)
#define ICE_OROM_VER_SHIFT 24
#define ICE_OROM_VER_MASK (0xff << ICE_OROM_VER_SHIFT)
#define ICE_SR_VPD_PTR 0x2F
#define ICE_SR_PXE_SETUP_PTR 0x30
#define ICE_SR_PXE_CFG_CUST_OPTIONS_PTR 0x31
#define ICE_SR_NVM_ORIGINAL_EETRACK_LO 0x34
#define ICE_SR_NVM_ORIGINAL_EETRACK_HI 0x35
#define ICE_SR_VLAN_CFG_PTR 0x37
#define ICE_SR_POR_REGS_AUTO_LOAD_PTR 0x38
#define ICE_SR_EMPR_REGS_AUTO_LOAD_PTR 0x3A
#define ICE_SR_GLOBR_REGS_AUTO_LOAD_PTR 0x3B
#define ICE_SR_CORER_REGS_AUTO_LOAD_PTR 0x3C
#define ICE_SR_PHY_CFG_SCRIPT_PTR 0x3D
#define ICE_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E
#define ICE_SR_SW_CHECKSUM_WORD 0x3F
#define ICE_SR_PFA_PTR 0x40
#define ICE_SR_1ST_SCRATCH_PAD_PTR 0x41
#define ICE_SR_1ST_NVM_BANK_PTR 0x42
#define ICE_SR_NVM_BANK_SIZE 0x43
#define ICE_SR_1ST_OROM_BANK_PTR 0x44
#define ICE_SR_OROM_BANK_SIZE 0x45
#define ICE_SR_NETLIST_BANK_PTR 0x46
#define ICE_SR_NETLIST_BANK_SIZE 0x47
#define ICE_SR_EMP_SR_SETTINGS_PTR 0x48
#define ICE_SR_CONFIGURATION_METADATA_PTR 0x4D
#define ICE_SR_IMMEDIATE_VALUES_PTR 0x4E
#define ICE_SR_LINK_DEFAULT_OVERRIDE_PTR 0x134
#define ICE_SR_POR_REGISTERS_AUTOLOAD_PTR 0x118
#define ICE_NVM_CSS_HDR_LEN_L 0x02
#define ICE_NVM_CSS_HDR_LEN_H 0x03
#define ICE_NVM_CSS_SREV_L 0x14
#define ICE_NVM_CSS_SREV_H 0x15
#define ICE_NVM_AUTH_HEADER_LEN 0x08
#define ICE_NETLIST_LINK_TOPO_MOD_ID 0x011B
#define ICE_NETLIST_TYPE_OFFSET 0x0000
#define ICE_NETLIST_LEN_OFFSET 0x0001
#define ICE_NETLIST_LINK_TOPO_OFFSET(n) ((n) + 2)
#define ICE_LINK_TOPO_MODULE_LEN ICE_NETLIST_LINK_TOPO_OFFSET(0x0000)
#define ICE_LINK_TOPO_NODE_COUNT ICE_NETLIST_LINK_TOPO_OFFSET(0x0001)
#define ICE_LINK_TOPO_NODE_COUNT_M MAKEMASK(0x3FF, 0)
#define ICE_NETLIST_ID_BLK_SIZE 0x30
#define ICE_NETLIST_ID_BLK_OFFSET(n) ICE_NETLIST_LINK_TOPO_OFFSET(0x0004 + 2 * (n))
#define ICE_NETLIST_ID_BLK_MAJOR_VER_LOW 0x02
#define ICE_NETLIST_ID_BLK_MAJOR_VER_HIGH 0x03
#define ICE_NETLIST_ID_BLK_MINOR_VER_LOW 0x04
#define ICE_NETLIST_ID_BLK_MINOR_VER_HIGH 0x05
#define ICE_NETLIST_ID_BLK_TYPE_LOW 0x06
#define ICE_NETLIST_ID_BLK_TYPE_HIGH 0x07
#define ICE_NETLIST_ID_BLK_REV_LOW 0x08
#define ICE_NETLIST_ID_BLK_REV_HIGH 0x09
#define ICE_NETLIST_ID_BLK_SHA_HASH_WORD(n) (0x0A + (n))
#define ICE_NETLIST_ID_BLK_CUST_VER 0x2F
#define ICE_SR_VPD_SIZE_WORDS 512
#define ICE_SR_PCIE_ALT_SIZE_WORDS 512
#define ICE_SR_CTRL_WORD_1_S 0x06
#define ICE_SR_CTRL_WORD_1_M (0x03 << ICE_SR_CTRL_WORD_1_S)
#define ICE_SR_CTRL_WORD_VALID 0x1
#define ICE_SR_CTRL_WORD_OROM_BANK BIT(3)
#define ICE_SR_CTRL_WORD_NETLIST_BANK BIT(4)
#define ICE_SR_CTRL_WORD_NVM_BANK BIT(5)
#define ICE_SR_NVM_PTR_4KB_UNITS BIT(15)
#define ICE_SR_SECTOR_SIZE_IN_WORDS 0x800
#define ICE_SR_BUF_ALIGNMENT 4096
#define ICE_SR_WORDS_IN_1KB 512
#define ICE_SR_SW_CHECKSUM_BASE 0xBABA
#define ICE_SR_PFA_LINK_OVERRIDE_WORDS 10
#define ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS 4
#define ICE_SR_PFA_LINK_OVERRIDE_OFFSET 2
#define ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET 1
#define ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET 2
#define ICE_FW_API_LINK_OVERRIDE_MAJ 1
#define ICE_FW_API_LINK_OVERRIDE_MIN 5
#define ICE_FW_API_LINK_OVERRIDE_PATCH 2
#define ICE_PBA_FLAG_DFLT 0xFAFA
#define ICE_VSIQF_HLUT_ARRAY_SIZE ((VSIQF_HLUT_MAX_INDEX + 1) * 4)
#define GLPCI_LBARCTRL_VF_PE_DB_SIZE_0KB 0x0
#define GLPCI_LBARCTRL_VF_PE_DB_SIZE_8KB 0x1
#define GLPCI_LBARCTRL_VF_PE_DB_SIZE_64KB 0x2
#define ICE_FW_API_LLDP_FLTR_MAJ 1
#define ICE_FW_API_LLDP_FLTR_MIN 7
#define ICE_FW_API_LLDP_FLTR_PATCH 1
#define ICE_FW_API_REPORT_DFLT_CFG_MAJ 1
#define ICE_FW_API_REPORT_DFLT_CFG_MIN 7
#define ICE_FW_API_REPORT_DFLT_CFG_PATCH 3
#define ICE_FW_VER_BRANCH_E82X 0
#define ICE_FW_VER_BRANCH_E810 1
#define ICE_FW_FEC_DIS_AUTO_MAJ 7
#define ICE_FW_FEC_DIS_AUTO_MIN 0
#define ICE_FW_FEC_DIS_AUTO_PATCH 5
#define ICE_FW_FEC_DIS_AUTO_MAJ_E82X 7
#define ICE_FW_FEC_DIS_AUTO_MIN_E82X 1
#define ICE_FW_FEC_DIS_AUTO_PATCH_E82X 2
#define ICE_FW_API_HEALTH_REPORT_MAJ 1
#define ICE_FW_API_HEALTH_REPORT_MIN 7
#define ICE_FW_API_HEALTH_REPORT_PATCH 6
#define ICE_FW_API_AUTO_DROP_MAJ 1
#define ICE_FW_API_AUTO_DROP_MIN 4
struct ice_hw_func_caps {
struct ice_hw_common_caps common_cap;
uint32_t num_allocd_vfs;
uint32_t vf_base_id;
uint32_t guar_num_vsi;
};
struct ice_nac_topology {
uint32_t mode;
uint8_t id;
};
struct ice_hw_dev_caps {
struct ice_hw_common_caps common_cap;
uint32_t num_vfs_exposed;
uint32_t num_vsi_allocd_to_host;
uint32_t num_funcs;
struct ice_nac_topology nac_topo;
uint32_t supported_sensors;
#define ICE_SENSOR_SUPPORT_E810_INT_TEMP BIT(0)
};
#define SCHED_NODE_NAME_MAX_LEN 32
#define ICE_SCHED_5_LAYERS 5
#define ICE_SCHED_9_LAYERS 9
#define ICE_QGRP_LAYER_OFFSET 2
#define ICE_VSI_LAYER_OFFSET 4
#define ICE_AGG_LAYER_OFFSET 6
#define ICE_SCHED_INVAL_LAYER_NUM 0xFF
#define ICE_64_BYTE_GRANULARITY 0
#define ICE_KBYTE_GRANULARITY BIT(11)
#define ICE_MIN_BURST_SIZE_ALLOWED 64
#define ICE_MAX_BURST_SIZE_ALLOWED \
((BIT(11) - 1) * 1024)
#define ICE_MAX_BURST_SIZE_64_BYTE_GRANULARITY \
((BIT(11) - 1) * 64)
#define ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY ICE_MAX_BURST_SIZE_ALLOWED
#define ICE_RL_PROF_ACCURACY_BYTES 128
#define ICE_RL_PROF_MULTIPLIER 10000
#define ICE_RL_PROF_TS_MULTIPLIER 32
#define ICE_RL_PROF_FRACTION 512
#define ICE_PSM_CLK_367MHZ_IN_HZ 367647059
#define ICE_PSM_CLK_416MHZ_IN_HZ 416666667
#define ICE_PSM_CLK_446MHZ_IN_HZ 446428571
#define ICE_PSM_CLK_390MHZ_IN_HZ 390625000
#define PSM_CLK_SRC_367_MHZ 0x0
#define PSM_CLK_SRC_416_MHZ 0x1
#define PSM_CLK_SRC_446_MHZ 0x2
#define PSM_CLK_SRC_390_MHZ 0x3
#define ICE_SCHED_MIN_BW 500
#define ICE_SCHED_MAX_BW 100000000
#define ICE_SCHED_DFLT_BW 0xFFFFFFFF
#define ICE_SCHED_NO_PRIORITY 0
#define ICE_SCHED_NO_BW_WT 0
#define ICE_SCHED_DFLT_RL_PROF_ID 0
#define ICE_SCHED_NO_SHARED_RL_PROF_ID 0xFFFF
#define ICE_SCHED_DFLT_BW_WT 4
#define ICE_SCHED_INVAL_PROF_ID 0xFFFF
#define ICE_SCHED_DFLT_BURST_SIZE (15 * 1024)
#define ICE_TXSCHED_GET_RL_PROF_ID(p) le16toh((p)->info.profile_id)
#define ICE_TXSCHED_GET_RL_MBS(p) le16toh((p)->info.max_burst_size)
#define ICE_TXSCHED_GET_RL_MULTIPLIER(p) le16toh((p)->info.rl_multiply)
#define ICE_TXSCHED_GET_RL_WAKEUP_MV(p) le16toh((p)->info.wake_up_calc)
#define ICE_TXSCHED_GET_RL_ENCODE(p) le16toh((p)->info.rl_encode)
#define ICE_MAX_PORT_PER_PCI_DEV 8
enum ice_bw_type {
ICE_BW_TYPE_PRIO,
ICE_BW_TYPE_CIR,
ICE_BW_TYPE_CIR_WT,
ICE_BW_TYPE_EIR,
ICE_BW_TYPE_EIR_WT,
ICE_BW_TYPE_SHARED,
ICE_BW_TYPE_CNT
};
struct ice_bw {
uint32_t bw;
uint16_t bw_alloc;
};
struct ice_bw_type_info {
ice_declare_bitmap(bw_t_bitmap, ICE_BW_TYPE_CNT);
uint8_t generic;
struct ice_bw cir_bw;
struct ice_bw eir_bw;
uint32_t shared_bw;
};
struct ice_q_ctx {
uint16_t q_handle;
uint32_t q_teid;
struct ice_bw_type_info bw_t_info;
};
struct ice_sched_agg_vsi_info {
TAILQ_ENTRY(ice_sched_agg_vsi_info) list_entry;
ice_declare_bitmap(tc_bitmap, ICE_MAX_TRAFFIC_CLASS);
uint16_t vsi_handle;
ice_declare_bitmap(replay_tc_bitmap, ICE_MAX_TRAFFIC_CLASS);
};
struct ice_sched_vsi_info {
struct ice_sched_node *vsi_node[ICE_MAX_TRAFFIC_CLASS];
struct ice_sched_node *ag_node[ICE_MAX_TRAFFIC_CLASS];
uint16_t max_lanq[ICE_MAX_TRAFFIC_CLASS];
uint16_t max_rdmaq[ICE_MAX_TRAFFIC_CLASS];
struct ice_bw_type_info bw_t_info[ICE_MAX_TRAFFIC_CLASS];
};
enum ice_agg_type {
ICE_AGG_TYPE_UNKNOWN = 0,
ICE_AGG_TYPE_TC,
ICE_AGG_TYPE_AGG,
ICE_AGG_TYPE_VSI,
ICE_AGG_TYPE_QG,
ICE_AGG_TYPE_Q
};
TAILQ_HEAD(ice_vsi_list_head, ice_sched_agg_vsi_info);
#define ICE_MAX_VSI_AVAILABLE 768
struct ice_sched_agg_info {
struct ice_vsi_list_head agg_vsi_list;
TAILQ_ENTRY(ice_sched_agg_info) list_entry;
ice_declare_bitmap(tc_bitmap, ICE_MAX_TRAFFIC_CLASS);
uint32_t agg_id;
enum ice_agg_type agg_type;
struct ice_bw_type_info bw_t_info[ICE_MAX_TRAFFIC_CLASS];
ice_declare_bitmap(replay_tc_bitmap, ICE_MAX_TRAFFIC_CLASS);
};
#define ICE_DCBX_OFFLOAD_DIS 0
#define ICE_DCBX_OFFLOAD_ENABLED 1
#define ICE_DCBX_STATUS_NOT_STARTED 0
#define ICE_DCBX_STATUS_IN_PROGRESS 1
#define ICE_DCBX_STATUS_DONE 2
#define ICE_DCBX_STATUS_MULTIPLE_PEERS 3
#define ICE_DCBX_STATUS_DIS 7
#define ICE_TLV_TYPE_END 0
#define ICE_TLV_TYPE_ORG 127
#define ICE_IEEE_8021QAZ_OUI 0x0080C2
#define ICE_IEEE_SUBTYPE_ETS_CFG 9
#define ICE_IEEE_SUBTYPE_ETS_REC 10
#define ICE_IEEE_SUBTYPE_PFC_CFG 11
#define ICE_IEEE_SUBTYPE_APP_PRI 12
#define ICE_CEE_DCBX_OUI 0x001B21
#define ICE_CEE_DCBX_TYPE 2
#define ICE_DSCP_OUI 0xFFFFFF
#define ICE_DSCP_SUBTYPE_DSCP2UP 0x41
#define ICE_DSCP_SUBTYPE_ENFORCE 0x42
#define ICE_DSCP_SUBTYPE_TCBW 0x43
#define ICE_DSCP_SUBTYPE_PFC 0x44
#define ICE_DSCP_IPV6_OFFSET 80
#define ICE_CEE_SUBTYPE_CTRL 1
#define ICE_CEE_SUBTYPE_PG_CFG 2
#define ICE_CEE_SUBTYPE_PFC_CFG 3
#define ICE_CEE_SUBTYPE_APP_PRI 4
#define ICE_CEE_MAX_FEAT_TYPE 3
#define ICE_LLDP_ADMINSTATUS_DIS 0
#define ICE_LLDP_ADMINSTATUS_ENA_RX 1
#define ICE_LLDP_ADMINSTATUS_ENA_TX 2
#define ICE_LLDP_ADMINSTATUS_ENA_RXTX 3
#define ICE_LLDP_TLV_LEN_S 0
#define ICE_LLDP_TLV_LEN_M (0x01FF << ICE_LLDP_TLV_LEN_S)
#define ICE_LLDP_TLV_TYPE_S 9
#define ICE_LLDP_TLV_TYPE_M (0x7F << ICE_LLDP_TLV_TYPE_S)
#define ICE_LLDP_TLV_SUBTYPE_S 0
#define ICE_LLDP_TLV_SUBTYPE_M (0xFF << ICE_LLDP_TLV_SUBTYPE_S)
#define ICE_LLDP_TLV_OUI_S 8
#define ICE_LLDP_TLV_OUI_M (0xFFFFFFUL << ICE_LLDP_TLV_OUI_S)
#define ICE_IEEE_ETS_MAXTC_S 0
#define ICE_IEEE_ETS_MAXTC_M (0x7 << ICE_IEEE_ETS_MAXTC_S)
#define ICE_IEEE_ETS_CBS_S 6
#define ICE_IEEE_ETS_CBS_M BIT(ICE_IEEE_ETS_CBS_S)
#define ICE_IEEE_ETS_WILLING_S 7
#define ICE_IEEE_ETS_WILLING_M BIT(ICE_IEEE_ETS_WILLING_S)
#define ICE_IEEE_ETS_PRIO_0_S 0
#define ICE_IEEE_ETS_PRIO_0_M (0x7 << ICE_IEEE_ETS_PRIO_0_S)
#define ICE_IEEE_ETS_PRIO_1_S 4
#define ICE_IEEE_ETS_PRIO_1_M (0x7 << ICE_IEEE_ETS_PRIO_1_S)
#define ICE_CEE_PGID_PRIO_0_S 0
#define ICE_CEE_PGID_PRIO_0_M (0xF << ICE_CEE_PGID_PRIO_0_S)
#define ICE_CEE_PGID_PRIO_1_S 4
#define ICE_CEE_PGID_PRIO_1_M (0xF << ICE_CEE_PGID_PRIO_1_S)
#define ICE_CEE_PGID_STRICT 15
#define ICE_IEEE_TSA_STRICT 0
#define ICE_IEEE_TSA_CBS 1
#define ICE_IEEE_TSA_ETS 2
#define ICE_IEEE_TSA_VENDOR 255
#define ICE_IEEE_PFC_CAP_S 0
#define ICE_IEEE_PFC_CAP_M (0xF << ICE_IEEE_PFC_CAP_S)
#define ICE_IEEE_PFC_MBC_S 6
#define ICE_IEEE_PFC_MBC_M BIT(ICE_IEEE_PFC_MBC_S)
#define ICE_IEEE_PFC_WILLING_S 7
#define ICE_IEEE_PFC_WILLING_M BIT(ICE_IEEE_PFC_WILLING_S)
#define ICE_IEEE_APP_SEL_S 0
#define ICE_IEEE_APP_SEL_M (0x7 << ICE_IEEE_APP_SEL_S)
#define ICE_IEEE_APP_PRIO_S 5
#define ICE_IEEE_APP_PRIO_M (0x7 << ICE_IEEE_APP_PRIO_S)
#define ICE_TLV_ID_CHASSIS_ID 0
#define ICE_TLV_ID_PORT_ID 1
#define ICE_TLV_ID_TIME_TO_LIVE 2
#define ICE_IEEE_TLV_ID_ETS_CFG 3
#define ICE_IEEE_TLV_ID_ETS_REC 4
#define ICE_IEEE_TLV_ID_PFC_CFG 5
#define ICE_IEEE_TLV_ID_APP_PRI 6
#define ICE_TLV_ID_END_OF_LLDPPDU 7
#define ICE_TLV_ID_START ICE_IEEE_TLV_ID_ETS_CFG
#define ICE_TLV_ID_DSCP_UP 3
#define ICE_TLV_ID_DSCP_ENF 4
#define ICE_TLV_ID_DSCP_TC_BW 5
#define ICE_TLV_ID_DSCP_TO_PFC 6
#define ICE_IEEE_ETS_TLV_LEN 25
#define ICE_IEEE_PFC_TLV_LEN 6
#define ICE_IEEE_APP_TLV_LEN 11
#define ICE_DSCP_UP_TLV_LEN 148
#define ICE_DSCP_ENF_TLV_LEN 132
#define ICE_DSCP_TC_BW_TLV_LEN 25
#define ICE_DSCP_PFC_TLV_LEN 6
struct ice_lldp_org_tlv {
uint16_t typelen;
uint32_t ouisubtype;
uint8_t tlvinfo[STRUCT_HACK_VAR_LEN];
} __packed;
struct ice_cee_tlv_hdr {
uint16_t typelen;
uint8_t operver;
uint8_t maxver;
};
struct ice_cee_ctrl_tlv {
struct ice_cee_tlv_hdr hdr;
uint32_t seqno;
uint32_t ackno;
};
struct ice_cee_feat_tlv {
struct ice_cee_tlv_hdr hdr;
uint8_t en_will_err;
#define ICE_CEE_FEAT_TLV_ENA_M 0x80
#define ICE_CEE_FEAT_TLV_WILLING_M 0x40
#define ICE_CEE_FEAT_TLV_ERR_M 0x20
uint8_t subtype;
uint8_t tlvinfo[STRUCT_HACK_VAR_LEN];
};
struct ice_cee_app_prio {
uint16_t protocol;
uint8_t upper_oui_sel;
#define ICE_CEE_APP_SELECTOR_M 0x03
uint16_t lower_oui;
uint8_t prio_map;
} __packed;
struct ice_dcb_ets_cfg {
uint8_t willing;
uint8_t cbs;
uint8_t maxtcs;
uint8_t prio_table[ICE_MAX_TRAFFIC_CLASS];
uint8_t tcbwtable[ICE_MAX_TRAFFIC_CLASS];
uint8_t tsatable[ICE_MAX_TRAFFIC_CLASS];
};
struct ice_dcb_pfc_cfg {
uint8_t willing;
uint8_t mbc;
uint8_t pfccap;
uint8_t pfcena;
};
struct ice_dcb_app_priority_table {
uint16_t prot_id;
uint8_t priority;
uint8_t selector;
};
#define ICE_MAX_USER_PRIORITY 8
#define ICE_DCBX_MAX_APPS 64
#define ICE_DSCP_NUM_VAL 64
#define ICE_LLDPDU_SIZE 1500
#define ICE_TLV_STATUS_OPER 0x1
#define ICE_TLV_STATUS_SYNC 0x2
#define ICE_TLV_STATUS_ERR 0x4
#define ICE_APP_PROT_ID_FCOE 0x8906
#define ICE_APP_PROT_ID_ISCSI 0x0cbc
#define ICE_APP_PROT_ID_ISCSI_860 0x035c
#define ICE_APP_PROT_ID_FIP 0x8914
#define ICE_APP_SEL_ETHTYPE 0x1
#define ICE_APP_SEL_TCPIP 0x2
#define ICE_CEE_APP_SEL_ETHTYPE 0x0
#define ICE_CEE_APP_SEL_TCPIP 0x1
struct ice_dcbx_cfg {
uint32_t numapps;
uint32_t tlv_status;
struct ice_dcb_ets_cfg etscfg;
struct ice_dcb_ets_cfg etsrec;
struct ice_dcb_pfc_cfg pfc;
#define ICE_QOS_MODE_VLAN 0x0
#define ICE_QOS_MODE_DSCP 0x1
uint8_t pfc_mode;
struct ice_dcb_app_priority_table app[ICE_DCBX_MAX_APPS];
ice_declare_bitmap(dscp_mapped, ICE_DSCP_NUM_VAL);
uint8_t dscp_map[ICE_DSCP_NUM_VAL];
uint8_t dcbx_mode;
#define ICE_DCBX_MODE_CEE 0x1
#define ICE_DCBX_MODE_IEEE 0x2
uint8_t app_mode;
#define ICE_DCBX_APPS_NON_WILLING 0x1
};
struct ice_qos_cfg {
struct ice_dcbx_cfg local_dcbx_cfg;
struct ice_dcbx_cfg desired_dcbx_cfg;
struct ice_dcbx_cfg remote_dcbx_cfg;
uint8_t dcbx_status : 3;
uint8_t is_sw_lldp : 1;
};
struct ice_mac_info {
uint8_t lan_addr[ETHER_ADDR_LEN];
uint8_t perm_addr[ETHER_ADDR_LEN];
uint8_t port_addr[ETHER_ADDR_LEN];
uint8_t wol_addr[ETHER_ADDR_LEN];
};
enum ice_media_type {
ICE_MEDIA_NONE = 0,
ICE_MEDIA_UNKNOWN,
ICE_MEDIA_FIBER,
ICE_MEDIA_BASET,
ICE_MEDIA_BACKPLANE,
ICE_MEDIA_DA,
ICE_MEDIA_AUI,
};
#define ICE_MEDIA_BASET_PHY_TYPE_LOW_M (ICE_PHY_TYPE_LOW_100BASE_TX | \
ICE_PHY_TYPE_LOW_1000BASE_T | \
ICE_PHY_TYPE_LOW_2500BASE_T | \
ICE_PHY_TYPE_LOW_5GBASE_T | \
ICE_PHY_TYPE_LOW_10GBASE_T | \
ICE_PHY_TYPE_LOW_25GBASE_T)
#define ICE_MEDIA_C2M_PHY_TYPE_LOW_M (ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC | \
ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC | \
ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC | \
ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC | \
ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC | \
ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC | \
ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC | \
ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC)
#define ICE_MEDIA_C2M_PHY_TYPE_HIGH_M (ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC | \
ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC)
#define ICE_MEDIA_OPT_PHY_TYPE_LOW_M (ICE_PHY_TYPE_LOW_1000BASE_SX | \
ICE_PHY_TYPE_LOW_1000BASE_LX | \
ICE_PHY_TYPE_LOW_10GBASE_SR | \
ICE_PHY_TYPE_LOW_10GBASE_LR | \
ICE_PHY_TYPE_LOW_25GBASE_SR | \
ICE_PHY_TYPE_LOW_25GBASE_LR | \
ICE_PHY_TYPE_LOW_40GBASE_SR4 | \
ICE_PHY_TYPE_LOW_40GBASE_LR4 | \
ICE_PHY_TYPE_LOW_50GBASE_SR2 | \
ICE_PHY_TYPE_LOW_50GBASE_LR2 | \
ICE_PHY_TYPE_LOW_50GBASE_SR | \
ICE_PHY_TYPE_LOW_50GBASE_LR | \
ICE_PHY_TYPE_LOW_100GBASE_SR4 | \
ICE_PHY_TYPE_LOW_100GBASE_LR4 | \
ICE_PHY_TYPE_LOW_100GBASE_SR2 | \
ICE_PHY_TYPE_LOW_50GBASE_FR | \
ICE_PHY_TYPE_LOW_100GBASE_DR)
#define ICE_MEDIA_BP_PHY_TYPE_LOW_M (ICE_PHY_TYPE_LOW_1000BASE_KX | \
ICE_PHY_TYPE_LOW_2500BASE_KX | \
ICE_PHY_TYPE_LOW_5GBASE_KR | \
ICE_PHY_TYPE_LOW_10GBASE_KR_CR1 | \
ICE_PHY_TYPE_LOW_25GBASE_KR | \
ICE_PHY_TYPE_LOW_25GBASE_KR_S | \
ICE_PHY_TYPE_LOW_25GBASE_KR1 | \
ICE_PHY_TYPE_LOW_40GBASE_KR4 | \
ICE_PHY_TYPE_LOW_50GBASE_KR2 | \
ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4 | \
ICE_PHY_TYPE_LOW_100GBASE_KR4 | \
ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4)
#define ICE_MEDIA_BP_PHY_TYPE_HIGH_M ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4
#define ICE_MEDIA_DAC_PHY_TYPE_LOW_M (ICE_PHY_TYPE_LOW_10G_SFI_DA | \
ICE_PHY_TYPE_LOW_25GBASE_CR | \
ICE_PHY_TYPE_LOW_25GBASE_CR_S | \
ICE_PHY_TYPE_LOW_25GBASE_CR1 | \
ICE_PHY_TYPE_LOW_40GBASE_CR4 | \
ICE_PHY_TYPE_LOW_50GBASE_CR2 | \
ICE_PHY_TYPE_LOW_100GBASE_CR4 | \
ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 | \
ICE_PHY_TYPE_LOW_50GBASE_CP | \
ICE_PHY_TYPE_LOW_100GBASE_CP2)
#define ICE_MEDIA_C2C_PHY_TYPE_LOW_M (ICE_PHY_TYPE_LOW_100M_SGMII | \
ICE_PHY_TYPE_LOW_1G_SGMII | \
ICE_PHY_TYPE_LOW_2500BASE_X | \
ICE_PHY_TYPE_LOW_10G_SFI_C2C | \
ICE_PHY_TYPE_LOW_25G_AUI_C2C | \
ICE_PHY_TYPE_LOW_40G_XLAUI | \
ICE_PHY_TYPE_LOW_50G_LAUI2 | \
ICE_PHY_TYPE_LOW_50G_AUI2 | \
ICE_PHY_TYPE_LOW_50G_AUI1 | \
ICE_PHY_TYPE_LOW_100G_CAUI4 | \
ICE_PHY_TYPE_LOW_100G_AUI4)
#define ICE_MEDIA_C2C_PHY_TYPE_HIGH_M (ICE_PHY_TYPE_HIGH_100G_CAUI2 | \
ICE_PHY_TYPE_HIGH_100G_AUI2)
#define ICE_IPV6_ADDR_LENGTH 16
#define ICE_NUM_WORDS_RECIPE 4
#define ICE_MAX_CHAIN_RECIPE 5
#define ICE_MAX_CHAIN_WORDS (ICE_NUM_WORDS_RECIPE * ICE_MAX_CHAIN_RECIPE)
#define ICE_CHAIN_FV_INDEX_START 47
enum ice_protocol_type {
ICE_MAC_OFOS = 0,
ICE_MAC_IL,
ICE_ETYPE_OL,
ICE_ETYPE_IL,
ICE_VLAN_OFOS,
ICE_IPV4_OFOS,
ICE_IPV4_IL,
ICE_IPV6_OFOS,
ICE_IPV6_IL,
ICE_TCP_IL,
ICE_UDP_OF,
ICE_UDP_ILOS,
ICE_SCTP_IL,
ICE_VXLAN,
ICE_GENEVE,
ICE_VXLAN_GPE,
ICE_NVGRE,
ICE_GTP,
ICE_GTP_NO_PAY,
ICE_PPPOE,
ICE_L2TPV3,
ICE_PROTOCOL_LAST
};
enum ice_sw_tunnel_type {
ICE_NON_TUN = 0,
ICE_SW_TUN_AND_NON_TUN,
ICE_SW_TUN_VXLAN_GPE,
ICE_SW_TUN_GENEVE,
ICE_SW_TUN_GENEVE_VLAN,
ICE_SW_TUN_VXLAN,
ICE_SW_TUN_VXLAN_VLAN,
ICE_SW_TUN_NVGRE,
ICE_SW_TUN_UDP,
ICE_SW_TUN_GTPU,
ICE_SW_TUN_GTPC,
ICE_ALL_TUNNELS
};
enum ice_prot_id {
ICE_PROT_ID_INVAL = 0,
ICE_PROT_MAC_OF_OR_S = 1,
ICE_PROT_MAC_O2 = 2,
ICE_PROT_MAC_IL = 4,
ICE_PROT_MAC_IN_MAC = 7,
ICE_PROT_ETYPE_OL = 9,
ICE_PROT_ETYPE_IL = 10,
ICE_PROT_PAY = 15,
ICE_PROT_EVLAN_O = 16,
ICE_PROT_VLAN_O = 17,
ICE_PROT_VLAN_IF = 18,
ICE_PROT_MPLS_OL_MINUS_1 = 27,
ICE_PROT_MPLS_OL_OR_OS = 28,
ICE_PROT_MPLS_IL = 29,
ICE_PROT_IPV4_OF_OR_S = 32,
ICE_PROT_IPV4_IL = 33,
ICE_PROT_IPV4_IL_IL = 34,
ICE_PROT_IPV6_OF_OR_S = 40,
ICE_PROT_IPV6_IL = 41,
ICE_PROT_IPV6_IL_IL = 42,
ICE_PROT_IPV6_NEXT_PROTO = 43,
ICE_PROT_IPV6_FRAG = 47,
ICE_PROT_TCP_IL = 49,
ICE_PROT_UDP_OF = 52,
ICE_PROT_UDP_IL_OR_S = 53,
ICE_PROT_GRE_OF = 64,
ICE_PROT_NSH_F = 84,
ICE_PROT_ESP_F = 88,
ICE_PROT_ESP_2 = 89,
ICE_PROT_SCTP_IL = 96,
ICE_PROT_ICMP_IL = 98,
ICE_PROT_ICMPV6_IL = 100,
ICE_PROT_VRRP_F = 101,
ICE_PROT_OSPF = 102,
ICE_PROT_ATAOE_OF = 114,
ICE_PROT_CTRL_OF = 116,
ICE_PROT_LLDP_OF = 117,
ICE_PROT_ARP_OF = 118,
ICE_PROT_EAPOL_OF = 120,
ICE_PROT_META_ID = 255,
ICE_PROT_INVALID = 255
};
#define ICE_VNI_OFFSET 12
#define ICE_NAN_OFFSET 511
#define ICE_MAC_OFOS_HW 1
#define ICE_MAC_IL_HW 4
#define ICE_ETYPE_OL_HW 9
#define ICE_ETYPE_IL_HW 10
#define ICE_VLAN_OF_HW 16
#define ICE_VLAN_OL_HW 17
#define ICE_IPV4_OFOS_HW 32
#define ICE_IPV4_IL_HW 33
#define ICE_IPV6_OFOS_HW 40
#define ICE_IPV6_IL_HW 41
#define ICE_TCP_IL_HW 49
#define ICE_UDP_ILOS_HW 53
#define ICE_SCTP_IL_HW 96
#define ICE_PPPOE_HW 103
#define ICE_L2TPV3_HW 104
#define ICE_UDP_OF_HW 52
#define ICE_GRE_OF_HW 64
#define ICE_META_DATA_ID_HW 255
#define ICE_MDID_SIZE 2
#define ICE_TUN_FLAG_MDID 20
#define ICE_TUN_FLAG_MDID_OFF(word) \
(ICE_MDID_SIZE * (ICE_TUN_FLAG_MDID + (word)))
#define ICE_TUN_FLAG_MASK 0xFF
#define ICE_FROM_NETWORK_FLAG_MASK 0x8
#define ICE_DIR_FLAG_MASK 0x10
#define ICE_TUN_FLAG_IN_VLAN_MASK 0x80
#define ICE_TUN_FLAG_VLAN_MASK 0x01
#define ICE_TUN_FLAG_FV_IND 2
#define ICE_VLAN_FLAG_MDID 20
#define ICE_VLAN_FLAG_MDID_OFF (ICE_MDID_SIZE * ICE_VLAN_FLAG_MDID)
#define ICE_PKT_FLAGS_0_TO_15_VLAN_FLAGS_MASK 0xD000
#define ICE_PROTOCOL_MAX_ENTRIES 16
struct ice_protocol_entry {
enum ice_protocol_type type;
uint8_t protocol_id;
};
struct ice_ether_hdr {
uint8_t dst_addr[ETHER_ADDR_LEN];
uint8_t src_addr[ETHER_ADDR_LEN];
};
struct ice_ethtype_hdr {
uint16_t ethtype_id;
};
struct ice_ether_vlan_hdr {
uint8_t dst_addr[ETHER_ADDR_LEN];
uint8_t src_addr[ETHER_ADDR_LEN];
uint32_t vlan_id;
};
struct ice_vlan_hdr {
uint16_t type;
uint16_t vlan;
};
struct ice_ipv4_hdr {
uint8_t version;
uint8_t tos;
uint16_t total_length;
uint16_t id;
uint16_t frag_off;
uint8_t time_to_live;
uint8_t protocol;
uint16_t check;
uint32_t src_addr;
uint32_t dst_addr;
};
struct ice_le_ver_tc_flow {
union {
struct {
uint32_t flow_label : 20;
uint32_t tc : 8;
uint32_t version : 4;
} fld;
uint32_t val;
} u;
};
struct ice_ipv6_hdr {
uint32_t be_ver_tc_flow;
uint16_t payload_len;
uint8_t next_hdr;
uint8_t hop_limit;
uint8_t src_addr[ICE_IPV6_ADDR_LENGTH];
uint8_t dst_addr[ICE_IPV6_ADDR_LENGTH];
};
struct ice_sctp_hdr {
uint16_t src_port;
uint16_t dst_port;
uint32_t verification_tag;
uint32_t check;
};
struct ice_l4_hdr {
uint16_t src_port;
uint16_t dst_port;
uint16_t len;
uint16_t check;
};
struct ice_udp_tnl_hdr {
uint16_t field;
uint16_t proto_type;
uint32_t vni;
};
struct ice_udp_gtp_hdr {
uint8_t flags;
uint8_t msg_type;
uint16_t rsrvd_len;
uint32_t teid;
uint16_t rsrvd_seq_nbr;
uint8_t rsrvd_n_pdu_nbr;
uint8_t rsrvd_next_ext;
uint8_t rsvrd_ext_len;
uint8_t pdu_type;
uint8_t qfi;
uint8_t rsvrd;
};
struct ice_pppoe_hdr {
uint8_t rsrvd_ver_type;
uint8_t rsrvd_code;
uint16_t session_id;
uint16_t length;
uint16_t ppp_prot_id;
};
struct ice_l2tpv3_sess_hdr {
uint32_t session_id;
uint64_t cookie;
};
struct ice_nvgre {
uint16_t flags;
uint16_t protocol;
uint32_t tni_flow;
};
union ice_prot_hdr {
struct ice_ether_hdr eth_hdr;
struct ice_ethtype_hdr ethertype;
struct ice_vlan_hdr vlan_hdr;
struct ice_ipv4_hdr ipv4_hdr;
struct ice_ipv6_hdr ipv6_hdr;
struct ice_l4_hdr l4_hdr;
struct ice_sctp_hdr sctp_hdr;
struct ice_udp_tnl_hdr tnl_hdr;
struct ice_nvgre nvgre_hdr;
struct ice_udp_gtp_hdr gtp_hdr;
struct ice_pppoe_hdr pppoe_hdr;
struct ice_l2tpv3_sess_hdr l2tpv3_sess_hdr;
};
struct ice_prot_ext_tbl_entry {
enum ice_protocol_type prot_type;
uint8_t offs[sizeof(union ice_prot_hdr)];
};
#define ICE_FV_OFFSET_INVAL 0x1FF
struct ice_fv_word {
uint8_t prot_id;
uint16_t off;
uint8_t nresvrd;
} __packed;
#define ICE_MAX_FV_WORDS 48
struct ice_fv {
struct ice_fv_word ew[ICE_MAX_FV_WORDS];
};
#define ICE_PTYPE_MAC_PAY 1
#define ICE_PTYPE_IPV4FRAG_PAY 22
#define ICE_PTYPE_IPV4_PAY 23
#define ICE_PTYPE_IPV4_UDP_PAY 24
#define ICE_PTYPE_IPV4_TCP_PAY 26
#define ICE_PTYPE_IPV4_SCTP_PAY 27
#define ICE_PTYPE_IPV4_ICMP_PAY 28
#define ICE_PTYPE_IPV6FRAG_PAY 88
#define ICE_PTYPE_IPV6_PAY 89
#define ICE_PTYPE_IPV6_UDP_PAY 90
#define ICE_PTYPE_IPV6_TCP_PAY 92
#define ICE_PTYPE_IPV6_SCTP_PAY 93
#define ICE_PTYPE_IPV6_ICMP_PAY 94
struct ice_meta_sect {
struct ice_pkg_ver ver;
#define ICE_META_SECT_NAME_SIZE 28
char name[ICE_META_SECT_NAME_SIZE];
uint32_t track_id;
};
#define ICE_PTG_IM_IPV4_TCP 16
#define ICE_PTG_IM_IPV4_UDP 17
#define ICE_PTG_IM_IPV4_SCTP 18
#define ICE_PTG_IM_IPV4_PAY 20
#define ICE_PTG_IM_IPV4_OTHER 21
#define ICE_PTG_IM_IPV6_TCP 32
#define ICE_PTG_IM_IPV6_UDP 33
#define ICE_PTG_IM_IPV6_SCTP 34
#define ICE_PTG_IM_IPV6_OTHER 37
#define ICE_PTG_IM_L2_OTHER 67
struct ice_prot_lkup_ext {
uint16_t prot_type;
uint8_t n_val_words;
uint16_t field_off[ICE_MAX_CHAIN_WORDS];
uint16_t field_mask[ICE_MAX_CHAIN_WORDS];
struct ice_fv_word fv_words[ICE_MAX_CHAIN_WORDS];
ice_declare_bitmap(done, ICE_MAX_CHAIN_WORDS);
};
struct ice_pref_recipe_group {
uint8_t n_val_pairs;
struct ice_fv_word pairs[ICE_NUM_WORDS_RECIPE];
uint16_t mask[ICE_NUM_WORDS_RECIPE];
};
struct ice_recp_grp_entry {
TAILQ_ENTRY(ice_recp_grp_entry) l_entry;
#define ICE_INVAL_CHAIN_IND 0xFF
uint16_t rid;
uint8_t chain_idx;
uint16_t fv_idx[ICE_NUM_WORDS_RECIPE];
uint16_t fv_mask[ICE_NUM_WORDS_RECIPE];
struct ice_pref_recipe_group r_group;
};
enum ice_vsi_type {
ICE_VSI_PF = 0,
ICE_VSI_VF = 1,
ICE_VSI_VMDQ2 = 2,
ICE_VSI_LB = 6,
};
struct ice_link_status {
uint64_t phy_type_low;
uint64_t phy_type_high;
uint8_t topo_media_conflict;
uint16_t max_frame_size;
uint16_t link_speed;
uint16_t req_speeds;
uint8_t link_cfg_err;
uint8_t lse_ena;
uint8_t link_info;
uint8_t an_info;
uint8_t ext_info;
uint8_t fec_info;
uint8_t pacing;
uint8_t module_type[ICE_MODULE_TYPE_TOTAL_BYTE];
};
struct ice_phy_info {
struct ice_link_status link_info;
struct ice_link_status link_info_old;
uint64_t phy_type_low;
uint64_t phy_type_high;
enum ice_media_type media_type;
uint8_t get_link_info;
uint16_t curr_user_speed_req;
enum ice_fec_mode curr_user_fec_req;
enum ice_fc_mode curr_user_fc_req;
struct ice_aqc_set_phy_cfg_data curr_user_phy_cfg;
};
struct ice_port_info {
struct ice_sched_node *root;
struct ice_hw *hw;
uint32_t last_node_teid;
uint16_t sw_id;
uint16_t pf_vf_num;
uint8_t port_state;
#define ICE_SCHED_PORT_STATE_INIT 0x0
#define ICE_SCHED_PORT_STATE_READY 0x1
uint8_t lport;
#define ICE_LPORT_MASK 0xff
struct ice_fc_info fc;
struct ice_mac_info mac;
struct ice_phy_info phy;
struct ice_lock sched_lock;
struct ice_sched_node *
sib_head[ICE_MAX_TRAFFIC_CLASS][ICE_AQC_TOPO_MAX_LEVEL_NUM];
struct ice_bw_type_info root_node_bw_t_info;
struct ice_bw_type_info tc_node_bw_t_info[ICE_MAX_TRAFFIC_CLASS];
struct ice_qos_cfg qos_cfg;
uint8_t is_vf:1;
uint8_t is_custom_tx_enabled:1;
};
TAILQ_HEAD(ice_vsi_list_map_head, ice_vsi_list_map_info);
#define ICE_MAX_NUM_PROFILES 256
#define ICE_SW_CFG_MAX_BUF_LEN 2048
#define ICE_MAX_SW 256
#define ICE_DFLT_VSI_INVAL 0xff
#define ICE_VSI_INVAL_ID 0xFFFF
#define ICE_INVAL_Q_HANDLE 0xFFFF
#define ICE_FLTR_RX BIT(0)
#define ICE_FLTR_TX BIT(1)
#define ICE_FLTR_RX_LB BIT(2)
#define ICE_FLTR_TX_RX (ICE_FLTR_RX | ICE_FLTR_TX)
#define ICE_DUMMY_ETH_HDR_LEN 16
struct ice_vsi_ctx {
uint16_t vsi_num;
uint16_t vsis_allocd;
uint16_t vsis_unallocated;
uint16_t flags;
struct ice_aqc_vsi_props info;
struct ice_sched_vsi_info sched;
uint8_t alloc_from_pool;
uint8_t vf_num;
uint16_t num_lan_q_entries[ICE_MAX_TRAFFIC_CLASS];
struct ice_q_ctx *lan_q_ctx[ICE_MAX_TRAFFIC_CLASS];
uint16_t num_rdma_q_entries[ICE_MAX_TRAFFIC_CLASS];
struct ice_q_ctx *rdma_q_ctx[ICE_MAX_TRAFFIC_CLASS];
};
struct ice_switch_info {
struct ice_vsi_list_map_head vsi_list_map_head;
struct ice_sw_recipe *recp_list;
uint16_t prof_res_bm_init;
uint16_t max_used_prof_index;
ice_declare_bitmap(prof_res_bm[ICE_MAX_NUM_PROFILES], ICE_MAX_FV_WORDS);
};
TAILQ_HEAD(ice_rl_prof_list_head, ice_aqc_rl_profile_info);
TAILQ_HEAD(ice_agg_list_head, ice_sched_agg_info);
struct ice_aqc_rl_profile_info {
struct ice_aqc_rl_profile_elem profile;
TAILQ_ENTRY(ice_aqc_rl_profile_info) list_entry;
uint32_t bw;
uint16_t prof_id_ref;
};
struct ice_vsi_list_map_info {
TAILQ_ENTRY(ice_vsi_list_map_info) list_entry;
ice_declare_bitmap(vsi_map, ICE_MAX_VSI);
uint16_t vsi_list_id;
uint16_t ref_cnt;
};
struct ice_adv_lkup_elem {
enum ice_protocol_type type;
union ice_prot_hdr h_u;
union ice_prot_hdr m_u;
};
struct ice_adv_rule_flags_info {
uint32_t act;
uint8_t act_valid;
};
enum ice_sw_fwd_act_type {
ICE_FWD_TO_VSI = 0,
ICE_FWD_TO_VSI_LIST,
ICE_FWD_TO_Q,
ICE_FWD_TO_QGRP,
ICE_DROP_PACKET,
ICE_LG_ACTION,
ICE_INVAL_ACT
};
struct ice_sw_act_ctrl {
uint16_t src;
uint16_t flag;
enum ice_sw_fwd_act_type fltr_act;
union {
uint16_t q_id:11;
uint16_t vsi_id:10;
uint16_t hw_vsi_id:10;
uint16_t vsi_list_id:10;
} fwd_id;
uint16_t vsi_handle;
uint8_t qgrp_size;
};
struct ice_adv_rule_info {
enum ice_sw_tunnel_type tun_type;
struct ice_sw_act_ctrl sw_act;
uint32_t priority;
uint8_t rx;
uint8_t add_dir_lkup;
uint16_t fltr_rule_id;
uint16_t lg_id;
uint16_t vlan_type;
struct ice_adv_rule_flags_info flags_info;
};
struct ice_adv_fltr_mgmt_list_entry {
TAILQ_ENTRY(ice_adv_fltr_mgmt_list_entry) list_entry;
struct ice_adv_lkup_elem *lkups;
struct ice_adv_rule_info rule_info;
uint16_t lkups_cnt;
struct ice_vsi_list_map_info *vsi_list_info;
uint16_t vsi_count;
};
enum ice_promisc_flags {
ICE_PROMISC_UCAST_RX = 0,
ICE_PROMISC_UCAST_TX,
ICE_PROMISC_MCAST_RX,
ICE_PROMISC_MCAST_TX,
ICE_PROMISC_BCAST_RX,
ICE_PROMISC_BCAST_TX,
ICE_PROMISC_VLAN_RX,
ICE_PROMISC_VLAN_TX,
ICE_PROMISC_UCAST_RX_LB,
ICE_PROMISC_MAX,
};
enum ice_src_id {
ICE_SRC_ID_UNKNOWN = 0,
ICE_SRC_ID_VSI,
ICE_SRC_ID_QUEUE,
ICE_SRC_ID_LPORT,
};
struct ice_fltr_info {
enum ice_sw_lkup_type lkup_type;
enum ice_sw_fwd_act_type fltr_act;
uint16_t fltr_rule_id;
uint16_t flag;
uint16_t src;
enum ice_src_id src_id;
union {
struct {
uint8_t mac_addr[ETHER_ADDR_LEN];
} mac;
struct {
uint8_t mac_addr[ETHER_ADDR_LEN];
uint16_t vlan_id;
} mac_vlan;
struct {
uint16_t vlan_id;
uint16_t tpid;
uint8_t tpid_valid;
} vlan;
struct {
uint16_t ethertype;
uint8_t mac_addr[ETHER_ADDR_LEN];
} ethertype_mac;
} l_data;
union {
uint16_t q_id:11;
uint16_t hw_vsi_id:10;
uint16_t vsi_list_id:10;
} fwd_id;
uint16_t vsi_handle;
uint8_t qgrp_size;
uint8_t lb_en;
uint8_t lan_en;
};
enum ice_fltr_marker {
ICE_FLTR_NOT_FOUND,
ICE_FLTR_FOUND,
};
struct ice_fltr_list_entry {
TAILQ_ENTRY(ice_fltr_list_entry) list_entry;
enum ice_status status;
struct ice_fltr_info fltr_info;
};
struct ice_fltr_mgmt_list_entry {
struct ice_vsi_list_map_info *vsi_list_info;
uint16_t vsi_count;
#define ICE_INVAL_LG_ACT_INDEX 0xffff
uint16_t lg_act_idx;
#define ICE_INVAL_SW_MARKER_ID 0xffff
uint16_t sw_marker_id;
TAILQ_ENTRY(ice_fltr_mgmt_list_entry) list_entry;
struct ice_fltr_info fltr_info;
#define ICE_INVAL_COUNTER_ID 0xff
uint8_t counter_index;
enum ice_fltr_marker marker;
};
#define ICE_IPV4_MAKE_PREFIX_MASK(prefix) ((uint32_t)((~0ULL) << (32 - (prefix))))
#define ICE_FLOW_PROF_ID_INVAL 0xfffffffffffffffful
#define ICE_FLOW_PROF_ID_BYPASS 0
#define ICE_FLOW_PROF_ID_DEFAULT 1
#define ICE_FLOW_ENTRY_HANDLE_INVAL 0
#define ICE_FLOW_VSI_INVAL 0xffff
#define ICE_FLOW_FLD_OFF_INVAL 0xffff
#define ICE_FLOW_HASH_IPV4 \
(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | \
BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA))
#define ICE_FLOW_HASH_IPV6 \
(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) | \
BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA))
#define ICE_FLOW_HASH_TCP_PORT \
(BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT) | \
BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT))
#define ICE_FLOW_HASH_UDP_PORT \
(BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT) | \
BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT))
#define ICE_FLOW_HASH_SCTP_PORT \
(BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT) | \
BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT))
#define ICE_HASH_INVALID 0
#define ICE_HASH_TCP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_TCP_PORT)
#define ICE_HASH_TCP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_TCP_PORT)
#define ICE_HASH_UDP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_UDP_PORT)
#define ICE_HASH_UDP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_UDP_PORT)
#define ICE_HASH_SCTP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_SCTP_PORT)
#define ICE_HASH_SCTP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_SCTP_PORT)
enum ice_flow_seg_hdr {
ICE_FLOW_SEG_HDR_NONE = 0x00000000,
ICE_FLOW_SEG_HDR_ETH = 0x00000001,
ICE_FLOW_SEG_HDR_VLAN = 0x00000002,
ICE_FLOW_SEG_HDR_IPV4 = 0x00000004,
ICE_FLOW_SEG_HDR_IPV6 = 0x00000008,
ICE_FLOW_SEG_HDR_ARP = 0x00000010,
ICE_FLOW_SEG_HDR_ICMP = 0x00000020,
ICE_FLOW_SEG_HDR_TCP = 0x00000040,
ICE_FLOW_SEG_HDR_UDP = 0x00000080,
ICE_FLOW_SEG_HDR_SCTP = 0x00000100,
ICE_FLOW_SEG_HDR_GRE = 0x00000200,
ICE_FLOW_SEG_HDR_IPV_FRAG = 0x40000000,
ICE_FLOW_SEG_HDR_IPV_OTHER = 0x80000000,
};
enum ice_flow_field {
ICE_FLOW_FIELD_IDX_ETH_DA,
ICE_FLOW_FIELD_IDX_ETH_SA,
ICE_FLOW_FIELD_IDX_S_VLAN,
ICE_FLOW_FIELD_IDX_C_VLAN,
ICE_FLOW_FIELD_IDX_ETH_TYPE,
ICE_FLOW_FIELD_IDX_IPV4_DSCP,
ICE_FLOW_FIELD_IDX_IPV6_DSCP,
ICE_FLOW_FIELD_IDX_IPV4_TTL,
ICE_FLOW_FIELD_IDX_IPV4_PROT,
ICE_FLOW_FIELD_IDX_IPV6_TTL,
ICE_FLOW_FIELD_IDX_IPV6_PROT,
ICE_FLOW_FIELD_IDX_IPV4_SA,
ICE_FLOW_FIELD_IDX_IPV4_DA,
ICE_FLOW_FIELD_IDX_IPV6_SA,
ICE_FLOW_FIELD_IDX_IPV6_DA,
ICE_FLOW_FIELD_IDX_TCP_SRC_PORT,
ICE_FLOW_FIELD_IDX_TCP_DST_PORT,
ICE_FLOW_FIELD_IDX_UDP_SRC_PORT,
ICE_FLOW_FIELD_IDX_UDP_DST_PORT,
ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT,
ICE_FLOW_FIELD_IDX_SCTP_DST_PORT,
ICE_FLOW_FIELD_IDX_TCP_FLAGS,
ICE_FLOW_FIELD_IDX_ARP_SIP,
ICE_FLOW_FIELD_IDX_ARP_DIP,
ICE_FLOW_FIELD_IDX_ARP_SHA,
ICE_FLOW_FIELD_IDX_ARP_DHA,
ICE_FLOW_FIELD_IDX_ARP_OP,
ICE_FLOW_FIELD_IDX_ICMP_TYPE,
ICE_FLOW_FIELD_IDX_ICMP_CODE,
ICE_FLOW_FIELD_IDX_GRE_KEYID,
ICE_FLOW_FIELD_IDX_MAX
};
enum ice_flow_avf_hdr_field {
ICE_AVF_FLOW_FIELD_INVALID = 0,
ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP = 29,
ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP,
ICE_AVF_FLOW_FIELD_IPV4_UDP,
ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK,
ICE_AVF_FLOW_FIELD_IPV4_TCP,
ICE_AVF_FLOW_FIELD_IPV4_SCTP,
ICE_AVF_FLOW_FIELD_IPV4_OTHER,
ICE_AVF_FLOW_FIELD_FRAG_IPV4,
ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP = 39,
ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP,
ICE_AVF_FLOW_FIELD_IPV6_UDP,
ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK,
ICE_AVF_FLOW_FIELD_IPV6_TCP,
ICE_AVF_FLOW_FIELD_IPV6_SCTP,
ICE_AVF_FLOW_FIELD_IPV6_OTHER,
ICE_AVF_FLOW_FIELD_FRAG_IPV6,
ICE_AVF_FLOW_FIELD_RSVD47,
ICE_AVF_FLOW_FIELD_FCOE_OX,
ICE_AVF_FLOW_FIELD_FCOE_RX,
ICE_AVF_FLOW_FIELD_FCOE_OTHER,
ICE_AVF_FLOW_FIELD_L2_PAYLOAD = 63,
ICE_AVF_FLOW_FIELD_MAX
};
#define ICE_DEFAULT_RSS_HENA ( \
BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP))
enum ice_rss_cfg_hdr_type {
ICE_RSS_OUTER_HEADERS,
ICE_RSS_INNER_HEADERS,
ICE_RSS_INNER_HEADERS_W_OUTER_IPV4,
ICE_RSS_INNER_HEADERS_W_OUTER_IPV6,
ICE_RSS_INNER_HEADERS_W_OUTER_IPV4_GRE,
ICE_RSS_INNER_HEADERS_W_OUTER_IPV6_GRE,
ICE_RSS_ANY_HEADERS
};
struct ice_rss_hash_cfg {
uint32_t addl_hdrs;
uint64_t hash_flds;
enum ice_rss_cfg_hdr_type hdr_type;
bool symm;
};
enum ice_flow_dir {
ICE_FLOW_DIR_UNDEFINED = 0,
ICE_FLOW_TX = 0x01,
ICE_FLOW_RX = 0x02,
ICE_FLOW_TX_RX = ICE_FLOW_RX | ICE_FLOW_TX
};
enum ice_flow_priority {
ICE_FLOW_PRIO_LOW,
ICE_FLOW_PRIO_NORMAL,
ICE_FLOW_PRIO_HIGH
};
#define ICE_FLOW_SEG_SINGLE 1
#define ICE_FLOW_SEG_MAX 2
#define ICE_FLOW_PROFILE_MAX 1024
#define ICE_FLOW_ACL_FIELD_VECTOR_MAX 32
#define ICE_FLOW_FV_EXTRACT_SZ 2
#define ICE_FLOW_SET_HDRS(seg, val) ((seg)->hdrs |= (uint32_t)(val))
struct ice_flow_seg_xtrct {
uint8_t prot_id;
uint16_t off;
uint8_t idx;
uint8_t disp;
};
enum ice_flow_fld_match_type {
ICE_FLOW_FLD_TYPE_REG,
ICE_FLOW_FLD_TYPE_RANGE,
ICE_FLOW_FLD_TYPE_PREFIX,
ICE_FLOW_FLD_TYPE_SIZE,
};
struct ice_flow_fld_loc {
uint16_t val;
uint16_t mask;
uint16_t last;
};
struct ice_flow_fld_info {
enum ice_flow_fld_match_type type;
struct ice_flow_fld_loc src;
struct ice_flow_fld_loc entry;
struct ice_flow_seg_xtrct xtrct;
};
struct ice_flow_seg_info {
uint32_t hdrs;
ice_declare_bitmap(match, ICE_FLOW_FIELD_IDX_MAX);
ice_declare_bitmap(range, ICE_FLOW_FIELD_IDX_MAX);
struct ice_flow_fld_info fields[ICE_FLOW_FIELD_IDX_MAX];
};
#define ICE_FLOW_ENTRY_HNDL(e) ((uint64_t)e)
struct ice_flow_prof {
TAILQ_ENTRY(ice_flow_prof) l_entry;
uint64_t id;
enum ice_flow_dir dir;
uint8_t segs_cnt;
struct ice_flow_seg_info segs[ICE_FLOW_SEG_MAX];
ice_declare_bitmap(vsis, ICE_MAX_VSI);
union {
bool symm;
} cfg;
};
struct ice_rss_cfg {
TAILQ_ENTRY(ice_rss_cfg) l_entry;
ice_declare_bitmap(vsis, ICE_MAX_VSI);
struct ice_rss_hash_cfg hash;
};
TAILQ_HEAD(ice_rss_cfg_head, ice_rss_cfg);
enum ice_flow_action_type {
ICE_FLOW_ACT_NOP,
ICE_FLOW_ACT_ALLOW,
ICE_FLOW_ACT_DROP,
ICE_FLOW_ACT_CNTR_PKT,
ICE_FLOW_ACT_FWD_VSI,
ICE_FLOW_ACT_FWD_VSI_LIST,
ICE_FLOW_ACT_FWD_QUEUE,
ICE_FLOW_ACT_FWD_QUEUE_GROUP,
ICE_FLOW_ACT_PUSH,
ICE_FLOW_ACT_POP,
ICE_FLOW_ACT_MODIFY,
ICE_FLOW_ACT_CNTR_BYTES,
ICE_FLOW_ACT_CNTR_PKT_BYTES,
ICE_FLOW_ACT_GENERIC_0,
ICE_FLOW_ACT_GENERIC_1,
ICE_FLOW_ACT_GENERIC_2,
ICE_FLOW_ACT_GENERIC_3,
ICE_FLOW_ACT_GENERIC_4,
ICE_FLOW_ACT_RPT_FLOW_ID,
ICE_FLOW_ACT_BUILD_PROF_IDX,
};
struct ice_flow_action {
enum ice_flow_action_type type;
union {
uint32_t dummy;
} data;
};
TAILQ_HEAD(ice_recp_grp_entry_head, ice_recp_grp_entry);
TAILQ_HEAD(ice_fltr_list_head, ice_fltr_list_entry);
TAILQ_HEAD(ice_fltr_mgmt_list_head, ice_fltr_mgmt_list_entry);
TAILQ_HEAD(ice_adv_fltr_mgmt_list_head, ice_adv_fltr_mgmt_list_entry);
#define ICE_PKG_SUPP_VER_MAJ 1
#define ICE_PKG_SUPP_VER_MNR 3
#define ICE_PKG_FMT_VER_MAJ 1
#define ICE_PKG_FMT_VER_MNR 0
#define ICE_PKG_FMT_VER_UPD 0
#define ICE_PKG_FMT_VER_DFT 0
#define ICE_PKG_CNT 4
enum ice_ddp_state {
ICE_DDP_PKG_SUCCESS = 0,
ICE_DDP_PKG_ALREADY_LOADED = -1,
ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED = -2,
ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED = -3,
ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED = -4,
ICE_DDP_PKG_FW_MISMATCH = -5,
ICE_DDP_PKG_INVALID_FILE = -6,
ICE_DDP_PKG_FILE_VERSION_TOO_HIGH = -7,
ICE_DDP_PKG_FILE_VERSION_TOO_LOW = -8,
ICE_DDP_PKG_NO_SEC_MANIFEST = -9,
ICE_DDP_PKG_FILE_SIGNATURE_INVALID = -10,
ICE_DDP_PKG_SECURE_VERSION_NBR_TOO_LOW = -11,
ICE_DDP_PKG_MANIFEST_INVALID = -12,
ICE_DDP_PKG_BUFFER_INVALID = -13,
ICE_DDP_PKG_ERR = -14,
};
struct ice_pkg_hdr {
struct ice_pkg_ver pkg_format_ver;
uint32_t seg_count;
uint32_t seg_offset[STRUCT_HACK_VAR_LEN];
};
#define SEGMENT_SIGN_TYPE_INVALID 0x00000000
#define SEGMENT_SIGN_TYPE_RSA2K 0x00000001
#define SEGMENT_SIGN_TYPE_RSA3K 0x00000002
#define SEGMENT_SIGN_TYPE_RSA3K_SBB 0x00000003
#define SEGMENT_SIGN_TYPE_RSA3K_E825 0x00000005
struct ice_generic_seg_hdr {
#define SEGMENT_TYPE_INVALID 0x00000000
#define SEGMENT_TYPE_METADATA 0x00000001
#define SEGMENT_TYPE_ICE_E810 0x00000010
#define SEGMENT_TYPE_SIGNING 0x00001001
#define SEGMENT_TYPE_ICE_RUN_TIME_CFG 0x00000020
uint32_t seg_type;
struct ice_pkg_ver seg_format_ver;
uint32_t seg_size;
char seg_id[ICE_PKG_NAME_SIZE];
};
union ice_device_id {
struct {
uint16_t device_id;
uint16_t vendor_id;
} dev_vend_id;
uint32_t id;
};
struct ice_device_id_entry {
union ice_device_id device;
union ice_device_id sub_device;
};
struct ice_seg {
struct ice_generic_seg_hdr hdr;
uint32_t device_table_count;
struct ice_device_id_entry device_table[STRUCT_HACK_VAR_LEN];
};
struct ice_nvm_table {
uint32_t table_count;
uint32_t vers[STRUCT_HACK_VAR_LEN];
};
struct ice_buf {
#define ICE_PKG_BUF_SIZE 4096
uint8_t buf[ICE_PKG_BUF_SIZE];
};
struct ice_buf_table {
uint32_t buf_count;
struct ice_buf buf_array[STRUCT_HACK_VAR_LEN];
};
struct ice_run_time_cfg_seg {
struct ice_generic_seg_hdr hdr;
uint8_t rsvd[8];
struct ice_buf_table buf_table;
};
struct ice_global_metadata_seg {
struct ice_generic_seg_hdr hdr;
struct ice_pkg_ver pkg_ver;
uint32_t rsvd;
char pkg_name[ICE_PKG_NAME_SIZE];
};
#define ICE_MIN_S_OFF 12
#define ICE_MAX_S_OFF 4095
#define ICE_MIN_S_SZ 1
#define ICE_MAX_S_SZ 4084
struct ice_sign_seg {
struct ice_generic_seg_hdr hdr;
uint32_t seg_id;
uint32_t sign_type;
uint32_t signed_seg_idx;
uint32_t signed_buf_start;
uint32_t signed_buf_count;
#define ICE_SIGN_SEG_RESERVED_COUNT 44
uint8_t reserved[ICE_SIGN_SEG_RESERVED_COUNT];
struct ice_buf_table buf_tbl;
};
struct ice_section_entry {
uint32_t type;
uint16_t offset;
uint16_t size;
};
#define ICE_MIN_S_COUNT 1
#define ICE_MAX_S_COUNT 511
#define ICE_MIN_S_DATA_END 12
#define ICE_MAX_S_DATA_END 4096
#define ICE_METADATA_BUF 0x80000000
struct ice_buf_hdr {
uint16_t section_count;
uint16_t data_end;
struct ice_section_entry section_entry[STRUCT_HACK_VAR_LEN];
};
#define ICE_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz) ((ICE_PKG_BUF_SIZE - \
ice_struct_size((struct ice_buf_hdr *)0, section_entry, 1) - (hd_sz)) /\
(ent_sz))
#define ICE_SID_METADATA 1
#define ICE_SID_XLT0_SW 10
#define ICE_SID_XLT_KEY_BUILDER_SW 11
#define ICE_SID_XLT1_SW 12
#define ICE_SID_XLT2_SW 13
#define ICE_SID_PROFID_TCAM_SW 14
#define ICE_SID_PROFID_REDIR_SW 15
#define ICE_SID_FLD_VEC_SW 16
#define ICE_SID_CDID_KEY_BUILDER_SW 17
#define ICE_SID_CDID_REDIR_SW 18
#define ICE_SID_XLT0_ACL 20
#define ICE_SID_XLT_KEY_BUILDER_ACL 21
#define ICE_SID_XLT1_ACL 22
#define ICE_SID_XLT2_ACL 23
#define ICE_SID_PROFID_TCAM_ACL 24
#define ICE_SID_PROFID_REDIR_ACL 25
#define ICE_SID_FLD_VEC_ACL 26
#define ICE_SID_CDID_KEY_BUILDER_ACL 27
#define ICE_SID_CDID_REDIR_ACL 28
#define ICE_SID_XLT0_FD 30
#define ICE_SID_XLT_KEY_BUILDER_FD 31
#define ICE_SID_XLT1_FD 32
#define ICE_SID_XLT2_FD 33
#define ICE_SID_PROFID_TCAM_FD 34
#define ICE_SID_PROFID_REDIR_FD 35
#define ICE_SID_FLD_VEC_FD 36
#define ICE_SID_CDID_KEY_BUILDER_FD 37
#define ICE_SID_CDID_REDIR_FD 38
#define ICE_SID_XLT0_RSS 40
#define ICE_SID_XLT_KEY_BUILDER_RSS 41
#define ICE_SID_XLT1_RSS 42
#define ICE_SID_XLT2_RSS 43
#define ICE_SID_PROFID_TCAM_RSS 44
#define ICE_SID_PROFID_REDIR_RSS 45
#define ICE_SID_FLD_VEC_RSS 46
#define ICE_SID_CDID_KEY_BUILDER_RSS 47
#define ICE_SID_CDID_REDIR_RSS 48
#define ICE_SID_RXPARSER_CAM 50
#define ICE_SID_RXPARSER_NOMATCH_CAM 51
#define ICE_SID_RXPARSER_IMEM 52
#define ICE_SID_RXPARSER_XLT0_BUILDER 53
#define ICE_SID_RXPARSER_NODE_PTYPE 54
#define ICE_SID_RXPARSER_MARKER_PTYPE 55
#define ICE_SID_RXPARSER_BOOST_TCAM 56
#define ICE_SID_RXPARSER_PROTO_GRP 57
#define ICE_SID_RXPARSER_METADATA_INIT 58
#define ICE_SID_RXPARSER_XLT0 59
#define ICE_SID_TXPARSER_CAM 60
#define ICE_SID_TXPARSER_NOMATCH_CAM 61
#define ICE_SID_TXPARSER_IMEM 62
#define ICE_SID_TXPARSER_XLT0_BUILDER 63
#define ICE_SID_TXPARSER_NODE_PTYPE 64
#define ICE_SID_TXPARSER_MARKER_PTYPE 65
#define ICE_SID_TXPARSER_BOOST_TCAM 66
#define ICE_SID_TXPARSER_PROTO_GRP 67
#define ICE_SID_TXPARSER_METADATA_INIT 68
#define ICE_SID_TXPARSER_XLT0 69
#define ICE_SID_RXPARSER_INIT_REDIR 70
#define ICE_SID_TXPARSER_INIT_REDIR 71
#define ICE_SID_RXPARSER_MARKER_GRP 72
#define ICE_SID_TXPARSER_MARKER_GRP 73
#define ICE_SID_RXPARSER_LAST_PROTO 74
#define ICE_SID_TXPARSER_LAST_PROTO 75
#define ICE_SID_RXPARSER_PG_SPILL 76
#define ICE_SID_TXPARSER_PG_SPILL 77
#define ICE_SID_RXPARSER_NOMATCH_SPILL 78
#define ICE_SID_TXPARSER_NOMATCH_SPILL 79
#define ICE_SID_XLT0_PE 80
#define ICE_SID_XLT_KEY_BUILDER_PE 81
#define ICE_SID_XLT1_PE 82
#define ICE_SID_XLT2_PE 83
#define ICE_SID_PROFID_TCAM_PE 84
#define ICE_SID_PROFID_REDIR_PE 85
#define ICE_SID_FLD_VEC_PE 86
#define ICE_SID_CDID_KEY_BUILDER_PE 87
#define ICE_SID_CDID_REDIR_PE 88
#define ICE_SID_RXPARSER_FLAG_REDIR 97
#define ICE_SID_LBL_FIRST 0x80000010
#define ICE_SID_LBL_RXPARSER_IMEM 0x80000010
#define ICE_SID_LBL_TXPARSER_IMEM 0x80000011
#define ICE_SID_LBL_RESERVED_12 0x80000012
#define ICE_SID_LBL_RESERVED_13 0x80000013
#define ICE_SID_LBL_RXPARSER_MARKER 0x80000014
#define ICE_SID_LBL_TXPARSER_MARKER 0x80000015
#define ICE_SID_LBL_PTYPE 0x80000016
#define ICE_SID_LBL_PROTOCOL_ID 0x80000017
#define ICE_SID_LBL_RXPARSER_TMEM 0x80000018
#define ICE_SID_LBL_TXPARSER_TMEM 0x80000019
#define ICE_SID_LBL_RXPARSER_PG 0x8000001A
#define ICE_SID_LBL_TXPARSER_PG 0x8000001B
#define ICE_SID_LBL_RXPARSER_M_TCAM 0x8000001C
#define ICE_SID_LBL_TXPARSER_M_TCAM 0x8000001D
#define ICE_SID_LBL_SW_PROFID_TCAM 0x8000001E
#define ICE_SID_LBL_ACL_PROFID_TCAM 0x8000001F
#define ICE_SID_LBL_PE_PROFID_TCAM 0x80000020
#define ICE_SID_LBL_RSS_PROFID_TCAM 0x80000021
#define ICE_SID_LBL_FD_PROFID_TCAM 0x80000022
#define ICE_SID_LBL_FLAG 0x80000023
#define ICE_SID_LBL_REG 0x80000024
#define ICE_SID_LBL_SW_PTG 0x80000025
#define ICE_SID_LBL_ACL_PTG 0x80000026
#define ICE_SID_LBL_PE_PTG 0x80000027
#define ICE_SID_LBL_RSS_PTG 0x80000028
#define ICE_SID_LBL_FD_PTG 0x80000029
#define ICE_SID_LBL_SW_VSIG 0x8000002A
#define ICE_SID_LBL_ACL_VSIG 0x8000002B
#define ICE_SID_LBL_PE_VSIG 0x8000002C
#define ICE_SID_LBL_RSS_VSIG 0x8000002D
#define ICE_SID_LBL_FD_VSIG 0x8000002E
#define ICE_SID_LBL_PTYPE_META 0x8000002F
#define ICE_SID_LBL_SW_PROFID 0x80000030
#define ICE_SID_LBL_ACL_PROFID 0x80000031
#define ICE_SID_LBL_PE_PROFID 0x80000032
#define ICE_SID_LBL_RSS_PROFID 0x80000033
#define ICE_SID_LBL_FD_PROFID 0x80000034
#define ICE_SID_LBL_RXPARSER_MARKER_GRP 0x80000035
#define ICE_SID_LBL_TXPARSER_MARKER_GRP 0x80000036
#define ICE_SID_LBL_RXPARSER_PROTO 0x80000037
#define ICE_SID_LBL_TXPARSER_PROTO 0x80000038
#define ICE_SID_LBL_LAST 0x80000038
#define ICE_SID_TX_5_LAYER_TOPO 0x10
enum ice_block {
ICE_BLK_SW = 0,
ICE_BLK_ACL,
ICE_BLK_FD,
ICE_BLK_RSS,
ICE_BLK_PE,
ICE_BLK_COUNT
};
enum ice_sect {
ICE_XLT0 = 0,
ICE_XLT_KB,
ICE_XLT1,
ICE_XLT2,
ICE_PROF_TCAM,
ICE_PROF_REDIR,
ICE_VEC_TBL,
ICE_CDID_KB,
ICE_CDID_REDIR,
ICE_SECT_COUNT
};
struct ice_buf_build {
struct ice_buf buf;
uint16_t reserved_section_table_entries;
};
struct ice_pkg_enum {
struct ice_buf_table *buf_table;
uint32_t buf_idx;
uint32_t type;
struct ice_buf_hdr *buf;
uint32_t sect_idx;
void *sect;
uint32_t sect_type;
uint32_t entry_idx;
void *(*handler)(uint32_t sect_type, void *section, uint32_t index,
uint32_t *offset);
};
struct ice_flex_fields {
union {
struct {
uint8_t src_ip;
uint8_t dst_ip;
uint8_t flow_label;
} ip_fields;
struct {
uint8_t src_prt;
uint8_t dst_prt;
} tcp_udp_fields;
struct {
uint8_t src_ip;
uint8_t dst_ip;
uint8_t src_prt;
uint8_t dst_prt;
} ip_tcp_udp_fields;
struct {
uint8_t src_prt;
uint8_t dst_prt;
uint8_t flow_label;
uint8_t spi;
} ip_esp_fields;
struct {
uint32_t offset;
uint32_t length;
} off_len;
} fields;
};
#define ICE_XLT1_DFLT_GRP 0
#define ICE_XLT1_TABLE_SIZE 1024
struct ice_label {
uint16_t value;
#define ICE_PKG_LABEL_SIZE 64
char name[ICE_PKG_LABEL_SIZE];
};
struct ice_label_section {
uint16_t count;
struct ice_label label[STRUCT_HACK_VAR_LEN];
};
#define ICE_MAX_LABELS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \
ice_struct_size((struct ice_label_section *)0, label, 1) - \
sizeof(struct ice_label), sizeof(struct ice_label))
struct ice_sw_fv_section {
uint16_t count;
uint16_t base_offset;
struct ice_fv fv[STRUCT_HACK_VAR_LEN];
};
#pragma pack(1)
struct ice_boost_key_value {
#define ICE_BOOST_REMAINING_HV_KEY 15
uint8_t remaining_hv_key[ICE_BOOST_REMAINING_HV_KEY];
union {
struct {
uint16_t hv_dst_port_key;
uint16_t hv_src_port_key;
} ;
struct {
uint16_t hv_vlan_id_key;
uint16_t hv_etype_key;
} vlan;
};
uint8_t tcam_search_key;
};
#pragma pack()
struct ice_boost_key {
struct ice_boost_key_value key;
struct ice_boost_key_value key2;
};
struct ice_boost_tcam_entry {
uint16_t addr;
uint16_t reserved;
struct ice_boost_key key;
uint8_t boost_hit_index_group;
#define ICE_BOOST_BIT_FIELDS 43
uint8_t bit_fields[ICE_BOOST_BIT_FIELDS];
};
struct ice_boost_tcam_section {
uint16_t count;
uint16_t reserved;
struct ice_boost_tcam_entry tcam[STRUCT_HACK_VAR_LEN];
};
#define ICE_MAX_BST_TCAMS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \
ice_struct_size((struct ice_boost_tcam_section *)0, tcam, 1) - \
sizeof(struct ice_boost_tcam_entry), \
sizeof(struct ice_boost_tcam_entry))
struct ice_xlt1_section {
uint16_t count;
uint16_t offset;
uint8_t value[STRUCT_HACK_VAR_LEN];
};
struct ice_xlt2_section {
uint16_t count;
uint16_t offset;
uint16_t value[STRUCT_HACK_VAR_LEN];
};
struct ice_prof_redir_section {
uint16_t count;
uint16_t offset;
uint8_t redir_value[STRUCT_HACK_VAR_LEN];
};
enum ice_tunnel_type {
TNL_VXLAN = 0,
TNL_GENEVE,
TNL_GRETAP,
TNL_GTP,
TNL_GTPC,
TNL_GTPU,
TNL_LAST = 0xFF,
TNL_ALL = 0xFF,
};
struct ice_tunnel_type_scan {
enum ice_tunnel_type type;
const char *label_prefix;
};
struct ice_tunnel_entry {
enum ice_tunnel_type type;
uint16_t boost_addr;
uint16_t port;
uint16_t ref;
struct ice_boost_tcam_entry *boost_entry;
uint8_t valid;
uint8_t in_use;
uint8_t marked;
};
#define ICE_TUNNEL_MAX_ENTRIES 16
struct ice_tunnel_table {
struct ice_tunnel_entry tbl[ICE_TUNNEL_MAX_ENTRIES];
uint16_t count;
};
#define ICE_TNL_PRE "TNL_"
struct ice_pkg_es {
uint16_t count;
uint16_t offset;
struct ice_fv_word es[STRUCT_HACK_VAR_LEN];
};
TAILQ_HEAD(ice_prof_map_head, ice_prof_map);
struct ice_es {
uint32_t sid;
uint16_t count;
uint16_t fvw;
uint16_t *ref_count;
struct ice_prof_map_head prof_map;
struct ice_fv_word *t;
struct ice_lock prof_map_lock;
uint8_t *written;
uint8_t reverse;
};
#define ICE_DEFAULT_PTG 0
struct ice_ptg_entry {
struct ice_ptg_ptype *first_ptype;
uint8_t in_use;
};
struct ice_ptg_ptype {
struct ice_ptg_ptype *next_ptype;
uint8_t ptg;
};
#define ICE_MAX_TCAM_PER_PROFILE 32
#define ICE_MAX_PTG_PER_PROFILE 32
struct ice_prof_map {
TAILQ_ENTRY(ice_prof_map) list;
uint64_t profile_cookie;
uint64_t context;
uint8_t prof_id;
uint8_t ptg_cnt;
uint8_t ptg[ICE_MAX_PTG_PER_PROFILE];
};
#define ICE_INVALID_TCAM 0xFFFF
struct ice_tcam_inf {
uint16_t tcam_idx;
uint8_t ptg;
uint8_t prof_id;
uint8_t in_use;
};
struct ice_vsig_prof {
TAILQ_ENTRY(ice_vsig_prof) list;
uint64_t profile_cookie;
uint8_t prof_id;
uint8_t tcam_count;
struct ice_tcam_inf tcam[ICE_MAX_TCAM_PER_PROFILE];
};
TAILQ_HEAD(ice_vsig_prof_head, ice_vsig_prof);
struct ice_vsig_entry {
struct ice_vsig_prof_head prop_lst;
struct ice_vsig_vsi *first_vsi;
uint8_t in_use;
};
struct ice_vsig_vsi {
struct ice_vsig_vsi *next_vsi;
uint32_t prop_mask;
uint16_t changed;
uint16_t vsig;
};
#define ICE_XLT1_CNT 1024
#define ICE_MAX_PTGS 256
struct ice_xlt1 {
struct ice_ptg_entry *ptg_tbl;
struct ice_ptg_ptype *ptypes;
uint8_t *t;
uint32_t sid;
uint16_t count;
};
#define ICE_XLT2_CNT 768
#define ICE_MAX_VSIGS 768
#define ICE_VSIG_IDX_M (0x1FFF)
#define ICE_PF_NUM_S 13
#define ICE_PF_NUM_M (0x07 << ICE_PF_NUM_S)
#define ICE_VSIG_VALUE(vsig, pf_id) \
((uint16_t)((((uint16_t)(vsig)) & ICE_VSIG_IDX_M) | \
(((uint16_t)(pf_id) << ICE_PF_NUM_S) & ICE_PF_NUM_M)))
#define ICE_DEFAULT_VSIG 0
struct ice_xlt2 {
struct ice_vsig_entry *vsig_tbl;
struct ice_vsig_vsi *vsis;
uint16_t *t;
uint32_t sid;
uint16_t count;
};
union ice_match_fld {
struct {
uint8_t prot_id;
uint8_t offset;
uint8_t length;
uint8_t reserved;
} fld;
uint32_t val;
};
#define ICE_MATCH_LIST_SZ 20
#pragma pack(1)
struct ice_match {
uint8_t count;
union ice_match_fld list[ICE_MATCH_LIST_SZ];
};
struct ice_prof_id_key {
uint16_t flags;
uint8_t xlt1;
uint16_t xlt2_cdid;
};
#define ICE_TCAM_KEY_VAL_SZ 5
#define ICE_TCAM_KEY_SZ (2 * ICE_TCAM_KEY_VAL_SZ)
struct ice_prof_tcam_entry {
uint16_t addr;
uint8_t key[ICE_TCAM_KEY_SZ];
uint8_t prof_id;
};
#pragma pack()
struct ice_prof_id_section {
uint16_t count;
struct ice_prof_tcam_entry entry[STRUCT_HACK_VAR_LEN];
};
struct ice_prof_tcam {
uint32_t sid;
uint16_t count;
uint16_t max_prof_id;
struct ice_prof_tcam_entry *t;
uint8_t cdid_bits;
};
enum ice_chg_type {
ICE_TCAM_NONE = 0,
ICE_PTG_ES_ADD,
ICE_TCAM_ADD,
ICE_VSIG_ADD,
ICE_VSIG_REM,
ICE_VSI_MOVE,
};
TAILQ_HEAD(ice_chs_chg_head, ice_chs_chg);
struct ice_chs_chg {
TAILQ_ENTRY(ice_chs_chg) list_entry;
enum ice_chg_type type;
uint8_t add_ptg;
uint8_t add_vsig;
uint8_t add_tcam_idx;
uint8_t add_prof;
uint16_t ptype;
uint8_t ptg;
uint8_t prof_id;
uint16_t vsi;
uint16_t vsig;
uint16_t orig_vsig;
uint16_t tcam_idx;
};
#define ICE_FLOW_PTYPE_MAX ICE_XLT1_CNT
struct ice_prof_redir {
uint8_t *t;
uint32_t sid;
uint16_t count;
};
struct ice_blk_info {
struct ice_xlt1 xlt1;
struct ice_xlt2 xlt2;
struct ice_prof_tcam prof;
struct ice_prof_redir prof_redir;
struct ice_es es;
uint8_t overwrite;
uint8_t is_list_init;
};
struct ice_sw_recipe {
uint8_t is_root;
uint8_t root_rid;
uint8_t recp_created;
uint8_t n_ext_words;
struct ice_fv_word ext_words[ICE_MAX_CHAIN_WORDS];
uint16_t word_masks[ICE_MAX_CHAIN_WORDS];
uint8_t big_recp;
uint8_t chain_idx;
uint8_t n_grp_count;
ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
#if 0
enum ice_sw_tunnel_type tun_type;
#endif
uint8_t adv_rule;
struct ice_fltr_mgmt_list_head filt_rules;
struct ice_adv_fltr_mgmt_list_head adv_filt_rules;
struct ice_fltr_mgmt_list_head filt_replay_rules;
struct ice_lock filt_rule_lock;
#if 0
struct LIST_HEAD_TYPE fv_list;
#endif
uint8_t num_profs, *prof_ids;
ice_declare_bitmap(res_idxs, ICE_MAX_FV_WORDS);
uint8_t priority;
struct ice_recp_grp_entry_head rg_list;
struct ice_aqc_recipe_data_elem *root_buf;
#if 0
struct ice_prot_lkup_ext lkup_exts;
#endif
};
TAILQ_HEAD(ice_flow_prof_head, ice_flow_prof);
struct ice_hw {
struct ice_softc *hw_sc;
#if 0
uint8_t *hw_addr;
void *back;
#endif
struct ice_aqc_layer_props *layer_info;
struct ice_port_info *port_info;
#if 0
struct ice_sched_rl_profile **cir_profiles;
struct ice_sched_rl_profile **eir_profiles;
struct ice_sched_rl_profile **srl_profiles;
#endif
uint32_t psm_clk_freq;
enum ice_mac_type mac_type;
#if 0
uint16_t device_id;
uint16_t vendor_id;
uint16_t subsystem_device_id;
uint16_t subsystem_vendor_id;
uint8_t revision_id;
#endif
uint8_t pf_id;
#if 0
enum ice_phy_model phy_model;
uint8_t phy_ports;
uint8_t max_phy_port;
#endif
uint16_t max_burst_size;
uint8_t num_tx_sched_layers;
uint8_t num_tx_sched_phys_layers;
uint8_t flattened_layers;
uint8_t max_cgds;
uint8_t sw_entry_point_layer;
uint16_t max_children[ICE_AQC_TOPO_MAX_LEVEL_NUM];
struct ice_agg_list_head agg_list;
struct ice_rl_prof_list_head rl_prof_list[ICE_AQC_TOPO_MAX_LEVEL_NUM];
struct ice_vsi_ctx *vsi_ctx[ICE_MAX_VSI];
uint8_t evb_veb;
uint8_t reset_ongoing;
#if 0
struct ice_bus_info bus;
#endif
struct ice_flash_info flash;
struct ice_hw_dev_caps dev_caps;
struct ice_hw_func_caps func_caps;
struct ice_switch_info *switch_info;
struct ice_ctl_q_info adminq;
struct ice_ctl_q_info mailboxq;
uint8_t api_branch;
uint8_t api_maj_ver;
uint8_t api_min_ver;
uint8_t api_patch;
uint8_t fw_branch;
uint8_t fw_maj_ver;
uint8_t fw_min_ver;
uint8_t fw_patch;
uint32_t fw_build;
struct ice_fwlog_cfg fwlog_cfg;
bool fwlog_support_ena;
#define ICE_MAX_AGG_BW_200G 0x0
#define ICE_MAX_AGG_BW_100G 0X1
#define ICE_MAX_AGG_BW_50G 0x2
#define ICE_MAX_AGG_BW_25G 0x3
#define ICE_ITR_GRAN_ABOVE_25 2
#define ICE_ITR_GRAN_MAX_25 4
uint8_t itr_gran;
#define ICE_INTRL_GRAN_ABOVE_25 4
#define ICE_INTRL_GRAN_MAX_25 8
uint8_t intrl_gran;
uint8_t umac_shared;
#if 0
#define ICE_PHY_PER_NAC_E822 1
#define ICE_MAX_QUAD 2
#define ICE_QUADS_PER_PHY_E822 2
#define ICE_PORTS_PER_PHY_E822 8
#define ICE_PORTS_PER_QUAD 4
#define ICE_PORTS_PER_PHY_E810 4
#define ICE_NUM_EXTERNAL_PORTS (ICE_MAX_QUAD * ICE_PORTS_PER_QUAD)
#endif
struct ice_pkg_ver active_pkg_ver;
uint32_t pkg_seg_id;
uint32_t pkg_sign_type;
uint32_t active_track_id;
uint8_t pkg_has_signing_seg:1;
uint8_t active_pkg_name[ICE_PKG_NAME_SIZE];
uint8_t active_pkg_in_nvm;
struct ice_pkg_ver pkg_ver;
uint8_t pkg_name[ICE_PKG_NAME_SIZE];
struct ice_pkg_ver ice_seg_fmt_ver;
uint8_t ice_seg_id[ICE_SEG_ID_SIZE];
struct ice_seg *seg;
uint8_t *pkg_copy;
uint32_t pkg_size;
struct ice_lock tnl_lock;
struct ice_tunnel_table tnl;
struct ice_blk_info blk[ICE_BLK_COUNT];
#if 0
struct ice_lock fl_profs_locks[ICE_BLK_COUNT];
#endif
struct ice_flow_prof_head fl_profs[ICE_BLK_COUNT];
#if 0
struct ice_lock rss_locks;
#endif
struct ice_rss_cfg_head rss_list_head;
#if 0
uint16_t vsi_owning_pf_lut;
struct ice_mbx_snapshot mbx_snapshot;
#endif
uint8_t dvm_ena;
#if 0
bool subscribable_recipes_supported;
#endif
};
enum ice_state {
ICE_STATE_CONTROLQ_EVENT_PENDING,
ICE_STATE_VFLR_PENDING,
ICE_STATE_MDD_PENDING,
ICE_STATE_RESET_OICR_RECV,
ICE_STATE_RESET_PFR_REQ,
ICE_STATE_PREPARED_FOR_RESET,
ICE_STATE_SUBIF_NEEDS_REINIT,
ICE_STATE_RESET_FAILED,
ICE_STATE_DRIVER_INITIALIZED,
ICE_STATE_NO_MEDIA,
ICE_STATE_RECOVERY_MODE,
ICE_STATE_ROLLBACK_MODE,
ICE_STATE_LINK_STATUS_REPORTED,
ICE_STATE_ATTACHING,
ICE_STATE_DETACHING,
ICE_STATE_LINK_DEFAULT_OVERRIDE_PENDING,
ICE_STATE_LLDP_RX_FLTR_FROM_DRIVER,
ICE_STATE_MULTIPLE_TCS,
ICE_STATE_DO_FW_DEBUG_DUMP,
ICE_STATE_LINK_ACTIVE_ON_DOWN,
ICE_STATE_FIRST_INIT_LINK,
ICE_STATE_DO_CREATE_MIRR_INTFC,
ICE_STATE_DO_DESTROY_MIRR_INTFC,
ICE_STATE_LAST,
};
static inline void
ice_set_state(volatile uint32_t *s, enum ice_state bit)
{
atomic_setbits_int(s, (1UL << bit));
}
static inline void
ice_clear_state(volatile uint32_t *s, enum ice_state bit)
{
atomic_clearbits_int(s, (1UL << bit));
}
static inline uint32_t
ice_testandset_state(volatile uint32_t *s, enum ice_state bit)
{
uint32_t expected = *s;
uint32_t previous;
previous = atomic_cas_uint(s, expected, expected | (1UL << bit));
return (previous & (1UL << bit)) ? 1 : 0;
}
static inline uint32_t
ice_testandclear_state(volatile uint32_t *s, enum ice_state bit)
{
uint32_t expected = *s;
uint32_t previous;
previous = atomic_cas_uint(s, expected, expected & ~(1UL << bit));
return (previous & (1UL << bit)) ? 1 : 0;
}
static inline uint32_t
ice_test_state(volatile uint32_t *s, enum ice_state bit)
{
return (*s & (1UL << bit)) ? 1 : 0;
}
static inline uint32_t ice_round_to_num(uint32_t N, uint32_t R)
{
return ((((N) % (R)) < ((R) / 2)) ? (((N) / (R)) * (R)) :
((((N) + (R) - 1) / (R)) * (R)));
}
static inline uint16_t
ice_popcount16(uint16_t n16)
{
n16 = ((n16 & 0xaaaa) >> 1) + (n16 & 0x5555);
n16 = ((n16 & 0xcccc) >> 2) + (n16 & 0x3333);
n16 = ((n16 & 0xf0f0) >> 4) + (n16 & 0x0f0f);
n16 = ((n16 & 0xff00) >> 8) + (n16 & 0x00ff);
return (n16);
}
static inline uint32_t
ice_popcount32(uint32_t n32)
{
n32 = ((n32 & 0xaaaaaaaa) >> 1) + (n32 & 0x55555555);
n32 = ((n32 & 0xcccccccc) >> 2) + (n32 & 0x33333333);
n32 = ((n32 & 0xf0f0f0f0) >> 4) + (n32 & 0x0f0f0f0f);
n32 = ((n32 & 0xff00ff00) >> 8) + (n32 & 0x00ff00ff);
n32 = ((n32 & 0xffff0000) >> 16) + (n32 & 0x0000ffff);
return (n32);
}
#define ice_ilog2(x) ((sizeof(x) <= 4) ? (fls(x) - 1) : (flsl(x) - 1))
typedef uint32_t ice_bitstr_t;
#define ICE_BITSTR_MASK (~0UL)
#define ICE_BITSTR_BITS (sizeof(ice_bitstr_t) * 8)
#define ice_bit_roundup(x, y) \
(((size_t)(x) + (y) - 1) & ~((size_t)(y) - 1))
#define ice_bitstr_size(nbits) (ice_bit_roundup((nbits), ICE_BITSTR_BITS) / 8)
static inline ice_bitstr_t *
ice_bit_alloc(size_t nbits)
{
return malloc(ice_bitstr_size(nbits), M_DEVBUF, M_NOWAIT | M_ZERO);
}
#define ice_bit_decl(name, nbits) \
((name)[bitstr_size(nbits) / sizeof(ice_bitstr_t)])
static inline size_t
ice_bit_idx(size_t bit)
{
return (bit / ICE_BITSTR_BITS);
}
static inline size_t
ice_bit_offset(size_t bit)
{
return (bit % ICE_BITSTR_BITS);
}
static inline ice_bitstr_t
ice_bit_mask(size_t bit)
{
return (1UL << ice_bit_offset(bit));
}
static inline ice_bitstr_t
ice_bit_make_mask(size_t start, size_t stop)
{
return ((ICE_BITSTR_MASK << ice_bit_offset(start)) &
(ICE_BITSTR_MASK >> (ICE_BITSTR_BITS - ice_bit_offset(stop) - 1)));
}
static inline int
ice_bit_test(const ice_bitstr_t *bitstr, size_t bit)
{
return ((bitstr[ice_bit_idx(bit)] & ice_bit_mask(bit)) != 0);
}
static inline void
ice_bit_set(ice_bitstr_t *bitstr, size_t bit)
{
bitstr[ice_bit_idx(bit)] |= ice_bit_mask(bit);
}
static inline void
ice_bit_clear(ice_bitstr_t *bitstr, size_t bit)
{
bitstr[ice_bit_idx(bit)] &= ~ice_bit_mask(bit);
}
static inline ssize_t
ice_bit_count(ice_bitstr_t *bitstr, size_t start, size_t nbits)
{
ice_bitstr_t *curbitstr, mask;
size_t curbitstr_len;
ssize_t value = 0;
if (start >= nbits)
return (0);
curbitstr = bitstr + ice_bit_idx(start);
nbits -= ICE_BITSTR_BITS * ice_bit_idx(start);
start -= ICE_BITSTR_BITS * ice_bit_idx(start);
if (start > 0) {
curbitstr_len = (int)ICE_BITSTR_BITS < nbits ?
(int)ICE_BITSTR_BITS : nbits;
mask = ice_bit_make_mask(start,
ice_bit_offset(curbitstr_len - 1));
value += ice_popcount32(*curbitstr & mask);
curbitstr++;
if (nbits < ICE_BITSTR_BITS)
return (value);
nbits -= ICE_BITSTR_BITS;
}
while (nbits >= (int)ICE_BITSTR_BITS) {
value += ice_popcount32(*curbitstr);
curbitstr++;
nbits -= ICE_BITSTR_BITS;
}
if (nbits > 0) {
mask = ice_bit_make_mask(0, ice_bit_offset(nbits - 1));
value += ice_popcount32(*curbitstr & mask);
}
return (value);
}
static inline ssize_t
ice_bit_ff_at(ice_bitstr_t *bitstr, size_t start, size_t nbits, int match)
{
ice_bitstr_t *curbitstr;
ice_bitstr_t *stopbitstr;
ice_bitstr_t mask;
ice_bitstr_t test;
ssize_t value;
if (start >= nbits || nbits <= 0)
return (-1);
curbitstr = bitstr + ice_bit_idx(start);
stopbitstr = bitstr + ice_bit_idx(nbits - 1);
mask = match ? 0 : ICE_BITSTR_MASK;
test = mask ^ *curbitstr;
if (ice_bit_offset(start) != 0)
test &= ice_bit_make_mask(start, ICE_BITSTR_BITS - 1);
while (test == 0 && curbitstr < stopbitstr)
test = mask ^ *(++curbitstr);
value = ((curbitstr - bitstr) * ICE_BITSTR_BITS) + ffs(test) - 1;
if (test == 0 ||
(ice_bit_offset(nbits) != 0 && (size_t)value >= nbits))
value = -1;
return (value);
}
static inline ssize_t
ice_bit_ff_area_at(ice_bitstr_t *bitstr, size_t start, size_t nbits,
size_t size, int match)
{
ice_bitstr_t *curbitstr, mask, test;
size_t last, shft, maxshft;
ssize_t value;
if (start + size > nbits || nbits <= 0)
return (-1);
mask = match ? ICE_BITSTR_MASK : 0;
maxshft = ice_bit_idx(size - 1) == 0 ? size : (int)ICE_BITSTR_BITS;
value = start;
curbitstr = bitstr + ice_bit_idx(start);
test = ~(ICE_BITSTR_MASK << ice_bit_offset(start));
for (last = size - 1, test |= mask ^ *curbitstr;
!(ice_bit_idx(last) == 0 &&
(test & ice_bit_make_mask(0, last)) == 0);
last -= ICE_BITSTR_BITS, test = mask ^ *++curbitstr) {
if (test == 0)
continue;
for (shft = maxshft; shft > 1 && (test & (test + 1)) != 0;
shft = (shft + 1) / 2)
test |= test >> shft / 2;
last = ffs(~(test >> 1));
value = (curbitstr - bitstr) * ICE_BITSTR_BITS + last;
if (value + size > nbits) {
value = -1;
break;
}
last += size - 1;
if (ice_bit_idx(last) == 0)
break;
}
return (value);
}
#define ice_bit_ffs_area(_bitstr, _nbits, _size, _resultp) \
*(_resultp) = ice_bit_ff_area_at((_bitstr), 0, (_nbits), (_size), 1)
#define ice_bit_ffc_area(_bitstr, _nbits, _size, _resultp) \
*(_resultp) = ice_bit_ff_area_at((_bitstr), 0, (_nbits), (_size), 0)
#define ICE_MAX_SCATTERED_QUEUES 16
#define ICE_INVALID_RES_IDX 0xFFFF
struct ice_resmgr {
ice_bitstr_t *resources;
uint16_t num_res;
bool contig_only;
};
enum ice_resmgr_alloc_type {
ICE_RESMGR_ALLOC_INVALID = 0,
ICE_RESMGR_ALLOC_CONTIGUOUS,
ICE_RESMGR_ALLOC_SCATTERED
};
struct ice_tc_info {
uint16_t qoffset;
uint16_t qcount_tx;
uint16_t qcount_rx;
};
struct ice_eth_stats {
uint64_t rx_bytes;
uint64_t rx_unicast;
uint64_t rx_multicast;
uint64_t rx_broadcast;
uint64_t rx_discards;
uint64_t rx_unknown_protocol;
uint64_t tx_bytes;
uint64_t tx_unicast;
uint64_t tx_multicast;
uint64_t tx_broadcast;
uint64_t tx_discards;
uint64_t tx_errors;
uint64_t rx_no_desc;
uint64_t rx_errors;
};
struct ice_vsi_hw_stats {
struct ice_eth_stats prev;
struct ice_eth_stats cur;
bool offsets_loaded;
};
struct ice_hw_port_stats {
struct ice_eth_stats eth;
uint64_t tx_dropped_link_down;
uint64_t crc_errors;
uint64_t illegal_bytes;
uint64_t error_bytes;
uint64_t mac_local_faults;
uint64_t mac_remote_faults;
uint64_t rx_len_errors;
uint64_t link_xon_rx;
uint64_t link_xoff_rx;
uint64_t link_xon_tx;
uint64_t link_xoff_tx;
uint64_t priority_xon_rx[8];
uint64_t priority_xoff_rx[8];
uint64_t priority_xon_tx[8];
uint64_t priority_xoff_tx[8];
uint64_t priority_xon_2_xoff[8];
uint64_t rx_size_64;
uint64_t rx_size_127;
uint64_t rx_size_255;
uint64_t rx_size_511;
uint64_t rx_size_1023;
uint64_t rx_size_1522;
uint64_t rx_size_big;
uint64_t rx_undersize;
uint64_t rx_fragments;
uint64_t rx_oversize;
uint64_t rx_jabber;
uint64_t tx_size_64;
uint64_t tx_size_127;
uint64_t tx_size_255;
uint64_t tx_size_511;
uint64_t tx_size_1023;
uint64_t tx_size_1522;
uint64_t tx_size_big;
uint64_t mac_short_pkt_dropped;
uint32_t tx_lpi_status;
uint32_t rx_lpi_status;
uint64_t tx_lpi_count;
uint64_t rx_lpi_count;
};
struct ice_pf_hw_stats {
struct ice_hw_port_stats prev;
struct ice_hw_port_stats cur;
bool offsets_loaded;
};
struct ice_pf_sw_stats {
uint32_t corer_count;
uint32_t globr_count;
uint32_t empr_count;
uint32_t pfr_count;
uint32_t tx_mdd_count;
uint32_t rx_mdd_count;
};
struct ice_tx_map {
struct mbuf *txm_m;
bus_dmamap_t txm_map;
bus_dmamap_t txm_map_tso;
unsigned int txm_eop;
};
struct ice_tx_queue {
struct ice_vsi *vsi;
struct ice_tx_desc *tx_base;
struct ice_dma_mem tx_desc_mem;
bus_addr_t tx_paddr;
struct ice_tx_map *tx_map;
#if 0
struct tx_stats stats;
#endif
uint64_t tso;
uint16_t desc_count;
uint32_t tail;
struct ice_intr_vector *irqv;
uint32_t q_teid;
uint32_t me;
uint16_t q_handle;
uint8_t tc;
uint16_t *tx_rsq;
uint16_t tx_rs_cidx;
uint16_t tx_rs_pidx;
uint16_t tx_cidx_processed;
struct ifqueue *txq_ifq;
unsigned int txq_prod;
unsigned int txq_cons;
};
struct ice_rx_map {
struct mbuf *rxm_m;
bus_dmamap_t rxm_map;
};
struct ice_rx_queue {
struct ice_vsi *vsi;
union ice_32b_rx_flex_desc *rx_base;
struct ice_dma_mem rx_desc_mem;
bus_addr_t rx_paddr;
struct ice_rx_map *rx_map;
#if 0
struct rx_stats stats;
#endif
uint16_t desc_count;
uint32_t tail;
struct ice_intr_vector *irqv;
uint32_t me;
uint8_t tc;
struct if_rxring rxq_acct;
struct timeout rxq_refill;
unsigned int rxq_prod;
unsigned int rxq_cons;
struct ifiqueue *rxq_ifiq;
struct mbuf *rxq_m_head;
struct mbuf **rxq_m_tail;
};
struct ice_vsi {
struct ice_softc *sc;
bool dynamic;
enum ice_vsi_type type;
uint16_t idx;
uint16_t *tx_qmap;
uint16_t *rx_qmap;
enum ice_resmgr_alloc_type qmap_type;
struct ice_tx_queue *tx_queues;
struct ice_rx_queue *rx_queues;
int num_tx_queues;
int num_rx_queues;
int num_vectors;
int16_t rx_itr;
int16_t tx_itr;
uint16_t rss_table_size;
uint8_t rss_lut_type;
int max_frame_size;
uint16_t mbuf_sz;
struct ice_aqc_vsi_props info;
uint8_t num_tcs;
uint16_t tc_map;
struct ice_tc_info tc_info[ICE_MAX_TRAFFIC_CLASS];
#if 0
struct sysctl_ctx_list ctx;
struct sysctl_oid *vsi_node;
struct sysctl_ctx_list txqs_ctx;
struct sysctl_oid *txqs_node;
struct sysctl_ctx_list rxqs_ctx;
struct sysctl_oid *rxqs_node;
#endif
struct ice_vsi_hw_stats hw_stats;
uint16_t mirror_src_vsi;
uint16_t rule_mir_ingress;
uint16_t rule_mir_egress;
};
#define ICE_MAIN_VSI_HANDLE 0
#define ICE_I2C_MAX_RETRIES 10