igu_sb_id
u8_t igu_sb_id;
for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE; igu_sb_id++ )
lm_igu_block_t * lm_igu_sb = &IGU_SB(pdev,igu_sb_id);
lm_igu_sb->block_dump = val = REG_RD(PFDEV(pdev), IGU_REG_MAPPING_MEMORY + 4*igu_sb_id);
DbgMessage(pdev, WARN, "addr:0x%x IGU_CAM[%d]=%x\n",IGU_REG_MAPPING_MEMORY + 4*igu_sb_id, igu_sb_id, val);
DbgMessage(pdev, VERBOSEi, "FID[%d]=%d\n", igu_sb_id, fid);
DbgMessage(pdev, INFORMi, "VEC[%d]=%d\n", igu_sb_id, vec);
blk_info->igu_info.igu_dsb_id = igu_sb_id;
blk_info->igu_info.igu_base_sb = igu_sb_id;
blk_info->igu_info.vf_igu_info[vf_id].igu_base_sb = igu_sb_id;
starting_from = vf_info->vf_chains[idx].igu_sb_id = lm_pf_get_next_free_igu_block_id(pdev, starting_from);
vf_info->vf_chains[idx].igu_sb_id = pdev->hw_info.intr_blk_info.igu_info.vf_igu_info[vf_info->abs_vf_id].igu_base_sb + idx;
igu_sb_id, igu_sb_id/32, igu_sb_id%32, cnt);
u8_t igu_sb_id = 0;
igu_sb_id = IGU_BASE_NDSB(pdev) + /*IGU_U_NDSB_OFFSET(pdev)*/ + sb_id;
igu_sb_id = sb_id;
lm_setup_ndsb_state_machine(pdev, sb_id, SM_RX_ID, igu_sb_id + IGU_U_NDSB_OFFSET(pdev), igu_seg_id);
lm_setup_ndsb_state_machine(pdev, sb_id, SM_TX_ID, igu_sb_id,igu_seg_id);
u8_t igu_sb_id = 0;
igu_sb_id = LM_VF_IGU_SB_ID(vf_info,sb_idx);
lm_setup_ndsb_state_machine(pdev, LM_SW_VF_SB_ID(vf_info,sb_idx), SM_RX_ID, igu_sb_id + IGU_U_NDSB_OFFSET(pdev), igu_seg_id);
lm_setup_ndsb_state_machine(pdev, LM_SW_VF_SB_ID(vf_info,sb_idx), SM_TX_ID, igu_sb_id,igu_seg_id);
void lm_update_fp_hc_indices(lm_device_t *pdev, u8_t igu_sb_id, u32_t *activity_flg, u8_t *drv_rss_id)
drv_sb_id = igu_sb_id;
flags = lm_query_storm_intr(pdev, igu_sb_id, &drv_sb_id);
u8_t lm_handle_igu_sb_id(lm_device_t *pdev, u8_t igu_sb_id, u8_t *rx_rss_id, u8_t *tx_rss_id)
drv_sb_id = igu_sb_id;
pdev->vars.gen_sp_status_block.hc_sp_status_blk->sp_sb.running_index, pdev->vars.gen_sp_status_block.sb_data.igu_sb_id);
pdev->vars.gen_sp_status_block.sb_data.igu_sb_id = igu_sp_sb_index;
void lm_setup_ndsb_state_machine(struct _lm_device_t *pdev, u8_t sb_id, u8_t sm_id, u8_t igu_sb_id, u8_t igu_seg_id)
hc_state_machine->igu_sb_id = igu_sb_id;
void lm_int_igu_ack_sb(lm_device_t *pdev, u8_t igu_sb_id, u8_t segment_access, u16_t sb_index, u8_t int_op, u8_t is_update_idx)
cmd_addr = IGU_CMD_INT_ACK_BASE + igu_sb_id;
void lm_int_igu_sb_cleanup(lm_device_t *pdev, u8 igu_sb_id)
u32_t igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (igu_sb_id/32)*4;
u32_t sb_bit = 1 << (igu_sb_id%32);
(((IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id) << IGU_CTRL_REG_ADDRESS_SHIFT) |
igu_sb_id, igu_sb_id/32, igu_sb_id%32, cnt);
u8_t igu_sb_id /* sb_id within the IGU */;
u8_t igu_sb_id /* sb_id within the IGU */;
u8_t igu_sb_id /* sb_id within the IGU */;
u8_t igu_sb_id /* sb_id within the IGU */;
void lm_int_igu_sb_cleanup(lm_device_t *pdev, u8 igu_sb_id);
static __inline u8_t lm_map_igu_sb_id_to_drv_rss(lm_device_t *pdev, u8_t igu_sb_id)
u8_t drv_sb_id = igu_sb_id;
static __inline u8_t lm_query_storm_intr(lm_device_t *pdev, u8_t igu_sb_id, u8_t * drv_sb_id)
*drv_sb_id = igu_sb_id;
if (igu_sb_id >= IGU_U_NDSB_OFFSET(pdev))
static __inline u8_t lm_is_sb_updated(lm_device_t *pdev, u8_t igu_sb_id)
flags = lm_query_storm_intr(pdev, igu_sb_id, &drv_sb_id);
u8_t lm_handle_igu_sb_id(lm_device_t *pdev, u8_t igu_sb_id, OUT u8_t *rx_rss_id, OUT u8_t *tx_rss_id);
void lm_update_fp_hc_indices(lm_device_t *pdev, u8_t igu_sb_id, u32_t *activity_flg, u8_t *drv_rss_id);
u8_t igu_sb_id;
for (igu_sb_id = 0; igu_sb_id < LM_IGU_SB_CNT(pdev); igu_sb_id++) {
prod_idx = (IGU_BASE_NDSB(pdev) + igu_sb_id);
lm_pf_release_vf_igu_block(pdev, vf_info->vf_chains[i].igu_sb_id);
u8_t igu_sb_id;
for (igu_sb_id = 0; igu_sb_id < igu_sb_cnt; igu_sb_id++) {
prod_idx = LM_VF_IGU_SB_ID(vf_info, igu_sb_id);
u8_t igu_sb_id = 0;
igu_sb_id = LM_VF_IGU_SB_ID(vf_info,vf_chain_id);
igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (igu_sb_id/32)*4;
sb_bit = 1 << (igu_sb_id%32);
(((IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id) << IGU_CTRL_REG_ADDRESS_SHIFT) |
igu_sb_id, igu_sb_id/32, igu_sb_id%32, cnt);
igu_sb_id, igu_sb_id/32, igu_sb_id%32, cnt);
u8_t igu_sb_id;
#define LM_VF_IGU_SB_ID(_vf_info, _igu_sb_id) ((_vf_info)->vf_chains[(_igu_sb_id)].igu_sb_id)
int i, igu_sb_id;
for (igu_sb_id = 0;
igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_dev);
igu_sb_id++) {
p_block = &p_igu_info->entry[igu_sb_id];
STORE_RT_REG_AGG(p_hwfn, offset + igu_sb_id * 2,
u16 igu_sb_id, u32 pi_index,
sb_offset = igu_sb_id * PIS_PER_SB;
_ecore_int_cau_conf_pi(p_hwfn, p_ptt, p_sb->igu_sb_id,
dma_addr_t sb_phys, u16 igu_sb_id,
igu_sb_id * sizeof(u64), 2, 0);
igu_sb_id * sizeof(u64), 2, 0);
CAU_REG_SB_ADDR_MEMORY_RT_OFFSET+igu_sb_id*2,
CAU_REG_SB_VAR_MEMORY_RT_OFFSET+igu_sb_id*2,
_ecore_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
igu_sb_id, TX_PI(i),
sb_info->igu_sb_id, 0, 0);
u16 igu_sb_id;
igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
igu_sb_id = ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1);
igu_sb_id = ecore_vf_get_igu_sb_id(p_hwfn, sb_id);
if (igu_sb_id == ECORE_SB_INVALID_IDX)
"Slowpath SB index in IGU is 0x%04x\n", igu_sb_id);
"SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id);
return igu_sb_id;
sb_info->igu_sb_id = ecore_get_igu_sb_id(p_hwfn, sb_id);
if (sb_info->igu_sb_id == ECORE_SB_INVALID_IDX)
p_block = &p_info->entry[sb_info->igu_sb_id];
(sb_info->igu_sb_id << 3);
((IGU_CMD_INT_ACK_BASE + sb_info->igu_sb_id) << 3);
p_block = &p_info->entry[sb_info->igu_sb_id];
return p_hwfn->p_sp_sb->sb_info.igu_sb_id;
u16 igu_sb_id,
u32 pxp_addr = IGU_CMD_INT_ACK_BASE + igu_sb_id;
sb_bit = 1 << (igu_sb_id % 32);
sb_bit_addr = igu_sb_id / 32 * sizeof(u32);
val, igu_sb_id);
u16 igu_sb_id, u16 opaque, bool b_set)
p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id];
igu_sb_id, p_block->function_id, p_block->is_pf,
ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1, opaque);
ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0, opaque);
((igu_sb_id / 32) * 4));
if (val & (1 << (igu_sb_id % 32)))
igu_sb_id);
CAU_REG_PI_MEMORY + (igu_sb_id * 12 + pi) * 4, 0);
u16 igu_sb_id = 0;
for (igu_sb_id = 0;
igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
igu_sb_id++) {
p_block = &p_info->entry[igu_sb_id];
ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, igu_sb_id,
u16 igu_sb_id;
for (igu_sb_id = p_info->igu_dsb_id;
igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
igu_sb_id++) {
p_block = &p_info->entry[igu_sb_id];
sizeof(u32) * igu_sb_id);
sizeof(u32) * igu_sb_id,
igu_sb_id, p_block->function_id,
u16 igu_sb_id)
IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id);
p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id];
p_block->igu_sb_id = igu_sb_id;
u16 igu_sb_id;
for (igu_sb_id = 0;
igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
igu_sb_id++) {
ecore_int_igu_read_cam_block(p_hwfn, p_ptt, igu_sb_id);
p_block = &p_igu_info->entry[igu_sb_id];
p_igu_info->igu_dsb_id = igu_sb_id;
igu_sb_id, p_block->function_id,
u16 igu_sb_id = 0, vf_num = 0;
igu_sb_id = ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1);
if (igu_sb_id == ECORE_SB_INVALID_IDX)
for (; igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
igu_sb_id++) {
p_block = &p_info->entry[igu_sb_id];
if (igu_sb_id == ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev)) {
IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id,
igu_sb_id, vf_num,
igu_sb_id, p_block->function_id,
u16 sbid = p_sb->igu_sb_id;
u16 igu_sb_id;
u16 igu_sb_id;
p_cid->sb_igu_id = p_params->p_sb->igu_sb_id;