sb_id
void lm_init_connection_context(struct _lm_device_t *pdev, u32_t const sw_cid, u8_t sb_id)
void lm_init_connection_context(struct _lm_device_t *pdev, u32_t const sw_cid, u8_t sb_id);
u8_t sb_id = 0;
LM_FOREACH_SB_ID(pdev, sb_id)
LM_INTMEM_WRITE8(pdev, CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(LM_FW_SB_ID(pdev, sb_id)),
u8_t sb_id;
for (sb_id = 0; sb_id < LM_IGU_SB_CNT(pdev); sb_id++)
prod_idx = (IGU_BASE_NDSB(pdev) + sb_id)*num_segs; /* bc-assumption consecutive pfs, norm-no assumption */
lm_int_ack_sb_enable(pdev, sb_id);
lm_int_igu_sb_cleanup(pdev, IGU_BASE_NDSB(pdev) + sb_id);
eq_data.sb_id = DEF_STATUS_BLOCK_INDEX;
if (!LM_SB_ID_VALID(pdev, sb_id))
u8_t sb_id = 0 ;
LM_FOREACH_SB_ID(pdev, sb_id)
vars->status_blocks_arr[sb_id].host_hc_status_block.e1x_sb = mm_alloc_phys_mem(pdev, mem_size, &sb_phy_address, 0, mm_cli_idx);
vars->status_blocks_arr[sb_id].hc_status_block_data.e1x_sb_data.common.host_sb_addr.lo = sb_phy_address.as_u32.low;
vars->status_blocks_arr[sb_id].hc_status_block_data.e1x_sb_data.common.host_sb_addr.hi = sb_phy_address.as_u32.high;
vars->status_blocks_arr[sb_id].hc_status_block_data.e2_sb_data.common.host_sb_addr.lo = sb_phy_address.as_u32.low;
vars->status_blocks_arr[sb_id].hc_status_block_data.e2_sb_data.common.host_sb_addr.hi = sb_phy_address.as_u32.high;
if CHK_NULL(vars->status_blocks_arr[sb_id].host_hc_status_block.e1x_sb)
mm_mem_zero((void *)(vars->status_blocks_arr[sb_id].host_hc_status_block.e1x_sb), mem_size);
IN u8_t sb_id,
u32_t sb_id = RSS_ID_TO_SB_ID(CHAIN_TO_RSS_ID(pdev,cid));
const u8_t byte_counter_id = CHIP_IS_E1x(pdev)? LM_FW_SB_ID(pdev, sb_id) : LM_FW_DHC_QZONE_ID(pdev, sb_id);
if( sb_id >= ARRSIZE(pdev->vars.status_blocks_arr) )
DbgBreakIf( sb_id >= ARRSIZE(pdev->vars.status_blocks_arr) ) ;
sb_indexes = lm_get_sb_indexes(pdev, (u8_t)sb_id);
u32_t sb_id = RSS_ID_TO_SB_ID(CHAIN_TO_RSS_ID(pdev,cid));
const u8_t byte_counter_id = CHIP_IS_E1x(pdev)? LM_FW_SB_ID(pdev, sb_id) : LM_FW_DHC_QZONE_ID(pdev, sb_id);
sb_lock_id = lm_sb_id_from_chain(pdev, sb_id);
rxq_chain->hc_sb_info.iro_dhc_offset = sizeof(struct cstorm_queue_zone_data) * LM_FW_DHC_QZONE_ID(pdev, sb_id)
DbgMessage(pdev, WARN, "Dhc offset is 0x%x for VF Q Zone %d\n",rxq_chain->hc_sb_info.iro_dhc_offset,LM_FW_DHC_QZONE_ID(pdev, sb_id));
u32_t sb_id = RSS_ID_TO_SB_ID(CHAIN_TO_RSS_ID(pdev,cid));
const u8_t byte_counter_id = CHIP_IS_E1x(pdev)? LM_FW_SB_ID(pdev, sb_id) : LM_FW_DHC_QZONE_ID(pdev, sb_id);
rcq_chain->iro_prod_offset = USTORM_RX_PRODS_E2_OFFSET(LM_FW_DHC_QZONE_ID(pdev, sb_id));
if( sb_id >= ARRSIZE(pdev->vars.status_blocks_arr) )
DbgBreakIf( sb_id >= ARRSIZE(pdev->vars.status_blocks_arr) ) ;
sb_indexes = lm_get_sb_indexes(pdev, (u8_t)sb_id);
fw_sb_id = LM_FW_SB_ID(pdev, sb_id);
u8_t sb_id = lm_sb_id_from_chain(pdev, cid);
lm_clear_chain_sb_cons_idx(pdev, sb_id, &LM_TXQ(pdev, cid).hc_sb_info, &LM_TXQ(pdev, cid).hw_con_idx_ptr);
lm_clear_chain_sb_cons_idx(pdev, sb_id, &LM_RCQ(pdev, cid).hc_sb_info, &LM_RCQ(pdev, cid).hw_con_idx_ptr);
u8_t sb_id = 0 ;
u8_t sb_id,
const u8_t fw_sb_id = LM_FW_SB_ID(pdev, sb_id);
const u8_t dhc_qzone_id = LM_FW_DHC_QZONE_ID(pdev, sb_id);
pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e1x_sb_data.common.p_func.pf_id = FUNC_ID(pdev);
pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e1x_sb_data.common.p_func.vf_id = 0xff;
pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e1x_sb_data.common.p_func.vf_valid = FALSE;
pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e1x_sb_data.common.p_func.vnic_id = VNIC_ID(pdev);
LM_FOREACH_SB_ID(pdev, sb_id)
pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e1x_sb_data.common.same_igu_sb_1b = TRUE;
pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e1x_sb_data.common.same_igu_sb_1b = FALSE;
pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e1x_sb_data.common.state = SB_ENABLED;
pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e2_sb_data.common.p_func.pf_id = FUNC_ID(pdev);
pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e2_sb_data.common.p_func.vf_id = 0xff;
pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e2_sb_data.common.p_func.vf_valid = FALSE;
pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e2_sb_data.common.p_func.vf_id = ABS_VFID(pdev);
pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e2_sb_data.common.p_func.vf_valid = TRUE;
pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e2_sb_data.common.p_func.vnic_id = VNIC_ID(pdev);
pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e2_sb_data.common.same_igu_sb_1b = TRUE;
if ((lm_status = lm_set_hc_flag(pdev, sb_id, HC_INDEX_TOE_RX_CQ_CONS, is_enable)) != LM_STATUS_SUCCESS)
pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e2_sb_data.common.same_igu_sb_1b = FALSE;
pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e2_sb_data.common.dhc_qzone_id = dhc_qzone_id;
pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e2_sb_data.common.state = SB_ENABLED;
igu_sb_id = IGU_BASE_NDSB(pdev) + /*IGU_U_NDSB_OFFSET(pdev)*/ + sb_id;
igu_sb_id = sb_id;
lm_setup_ndsb_state_machine(pdev, sb_id, SM_RX_ID, igu_sb_id + IGU_U_NDSB_OFFSET(pdev), igu_seg_id);
lm_setup_ndsb_state_machine(pdev, sb_id, SM_TX_ID, igu_sb_id,igu_seg_id);
if ((lm_status = lm_set_hc_flag(pdev, sb_id, HC_INDEX_TOE_TX_CQ_CONS, is_enable)) != LM_STATUS_SUCCESS)
if ((lm_status = lm_set_hc_flag(pdev, sb_id, HC_INDEX_ETH_RX_CQ_CONS, is_enable)) != LM_STATUS_SUCCESS)
if ((lm_status = lm_set_hc_flag(pdev, sb_id, HC_INDEX_ETH_TX_CQ_CONS_COS0, is_enable)) != LM_STATUS_SUCCESS)
if ((lm_status = lm_set_hc_flag(pdev, sb_id, HC_INDEX_ETH_TX_CQ_CONS_COS1, is_enable)) != LM_STATUS_SUCCESS)
lm_setup_ndsb_index(pdev, sb_id, index, sm_idx, timeout, dhc_enable);
if ((lm_status = lm_set_hc_flag(pdev, sb_id, HC_INDEX_ETH_TX_CQ_CONS_COS2, is_enable)) != LM_STATUS_SUCCESS)
*((u32_t*)(&pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e1x_sb_data) + index), BAR_CSTRORM_INTMEM);
*((u32_t*)(&pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e2_sb_data) + index), BAR_CSTRORM_INTMEM);
lm_status_t lm_set_hc_flag(struct _lm_device_t *pdev, u8_t sb_id, u8_t idx, u8_t is_enable)
hc_index_entry = pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e1x_sb_data.index_data + idx;
hc_index_entry = pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e2_sb_data.index_data + idx;
fw_sb_id = LM_FW_SB_ID(pdev, sb_id);
DbgMessage(pdev, INFORMi, "HC set to %d for SB%d(index%d)\n",is_enable,sb_id,idx);
DbgMessage(pdev, INFORMi, "HC already set to %d for SB%d(index%d)\n",is_enable,sb_id,idx);
u8_t sb_id,
attention_sb->status_block_id = sb_id;
u8_t sb_id,
init_hc_attn_status_block(pdev,sb_id,host_sb_addr);
u8_t sb_id = 0;
LM_FOREACH_SB_ID(pdev, sb_id)
lm_init_non_def_status_block(pdev, sb_id, port);
void lm_setup_ndsb_index(struct _lm_device_t *pdev, u8_t sb_id, u8_t idx, u8_t sm_idx, u8_t timeout, u8_t dhc_enable)
hc_index_entry = pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e1x_sb_data.index_data + idx;
hc_index_entry = pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e2_sb_data.index_data + idx;
void lm_setup_ndsb_state_machine(struct _lm_device_t *pdev, u8_t sb_id, u8_t sm_id, u8_t igu_sb_id, u8_t igu_seg_id)
hc_state_machine = pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e1x_sb_data.common.state_machine + sm_id;
hc_state_machine = pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e2_sb_data.common.state_machine + sm_id;
IN const u8_t sb_id)
rx->status_block_id = LM_FW_SB_ID(pdev, sb_id);
rx->client_qzone_id = LM_FW_DHC_QZONE_ID(pdev, sb_id);
IN const u8_t sb_id)
tx->tx_status_block_id = LM_FW_SB_ID(pdev, sb_id);
IN const u8_t sb_id)
sb_id);
lm_status_t lm_eth_init_client_init_data(lm_device_t *pdev, u8_t cid, u8_t sb_id)
sb_id);
sb_id);
lm_status_t lm_establish_eth_con(struct _lm_device_t *pdev, u8_t const chain_idx, u8_t sb_id, u8_t attributes_bitmap)
lm_status = lm_eth_init_client_init_data(pdev, cid, sb_id);
lm_status = lm_eth_init_tx_queue_data(pdev, cid, sb_id);
lm_init_connection_context(pdev, cid, sb_id);
u8_t sb_id /* Status block id (EQ consumer) */;
u8_t sb_id /* status block ID */;
u8_t sb_id /* status block ID */;
lm_status_t lm_set_hc_flag(struct _lm_device_t *pdev, u8_t sb_id, u8_t idx, u8_t is_enable);
#define LM_SB_ID_VALID(pdev, sb_id) ((sb_id) < LM_SB_CNT(pdev))
#define LM_FOREACH_SB_ID(pdev, sb_id) \
for ((sb_id) = 0; (sb_id) < LM_SB_CNT(pdev); (sb_id)++)
#define LM_FW_DHC_QZONE_ID(pdev, sb_id) (pdev->params.fw_qzone_id[sb_id])
#define LM_FW_SB_ID(pdev, sb_id) ((sb_id == DEF_STATUS_BLOCK_INDEX)? DEF_STATUS_BLOCK_INDEX: pdev->params.base_fw_ndsb + sb_id)
#define IGU_PF_NDSB(pdev, sb_id) (IGU_BASE_NDSB(pdev) + sb_id)
#define IGU_VF_NDSB(pdev, sb_id) ((pdev)->hw_info.intr_blk_info.igu_info.igu_sb[sb_id])
#define IGU_SB(pdev, sb_id) ((pdev)->hw_info.intr_blk_info.igu_info.igu_map.igu_blocks_set[sb_id])
void lm_setup_ndsb_index(struct _lm_device_t *pdev, u8_t sb_id, u8_t idx, u8_t sm_idx, u8_t timeout, u8_t dhc_enable);
static __inline u16_t lm_get_sb_running_index(lm_device_t *pdev, u8_t sb_id, u8_t sm_idx)
return lm_vf_pf_get_sb_running_index(pdev, sb_id, sm_idx);
return mm_le16_to_cpu(pdev->vars.status_blocks_arr[sb_id].host_hc_status_block.e1x_sb->sb.running_index[sm_idx]);
return mm_le16_to_cpu(pdev->vars.status_blocks_arr[sb_id].host_hc_status_block.e2_sb->sb.running_index[sm_idx]);
static __inline u16_t lm_get_sb_index(lm_device_t *pdev, u8_t sb_id, u8_t idx)
return lm_vf_pf_get_sb_index(pdev, sb_id, idx);
return mm_le16_to_cpu(pdev->vars.status_blocks_arr[sb_id].host_hc_status_block.e1x_sb->sb.index_values[idx]);
return mm_le16_to_cpu(pdev->vars.status_blocks_arr[sb_id].host_hc_status_block.e2_sb->sb.index_values[idx]);
lm_status_t lm_establish_eth_con(struct _lm_device_t *pdev, u8_t const cid, u8_t sb_id, u8_t attributes_bitmap);
void lm_update_def_hc_indices(lm_device_t *pdev, u8_t sb_id, u32_t *activity_flg);
u8_t sb_id);
u8_t sb_id,
u8_t sb_id = 0 ;
sb_id = DEF_STATUS_BLOCK_INDEX;
sb_id = (u8_t)RSS_ID_TO_SB_ID(CHAIN_TO_RSS_ID(pdev,(u32_t)chain_idx));
return sb_id;
u32_t sb_id;
sb_id = RSS_ID_TO_SB_ID(i);
sb_id = LM_NON_RSS_SB(pdev);
sb_indexes = lm_get_sb_indexes(pdev, (u8_t)sb_id);
sb_id = RSS_ID_TO_SB_ID(i);
sb_id = LM_NON_RSS_SB(pdev);
byte_counter_id = CHIP_IS_E1x(pdev)? LM_FW_SB_ID(pdev, sb_id) : LM_FW_DHC_QZONE_ID(pdev, sb_id);
sb_indexes = lm_get_sb_indexes(pdev, (u8_t)sb_id);
ramrod_params->fcoe_init.sb_id = HC_INDEX_FCOE_EQ_CONS;
u8_t sb_id = 0 ;
LM_FOREACH_SB_ID(pdev, sb_id)
pdev->vars.status_blocks_arr[sb_id].host_hc_status_block.e2_sb = mm_alloc_phys_mem(pdev, mem_size, &sb_phy_address, 0, mm_cli_idx);
pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e2_sb_data.common.host_sb_addr.lo = sb_phy_address.as_u32.low;
pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e2_sb_data.common.host_sb_addr.hi = sb_phy_address.as_u32.high;
if CHK_NULL(pdev->vars.status_blocks_arr[sb_id].host_hc_status_block.e1x_sb)
mm_mem_zero((void *)(pdev->vars.status_blocks_arr[sb_id].host_hc_status_block.e2_sb), mem_size);
u8_t sb_id;
for (sb_id = 0; sb_id < LM_IGU_SB_CNT(pdev); sb_id++) {
prod_idx = (IGU_BASE_NDSB(pdev) + sb_id)*num_segs; /* bc-assumption consecutive pfs, norm-no assumption */
lm_int_ack_sb_enable(pdev, sb_id);
u16_t lm_vf_pf_get_sb_running_index(struct _lm_device_t *pdev, u8_t sb_id, u8_t sm_idx)
u16_t lm_vf_pf_get_sb_index(struct _lm_device_t *pdev, u8_t sb_id, u8_t idx)
u16_t lm_vf_pf_get_sb_running_index(lm_device_t *pdev, u8_t sb_id, u8_t sm_idx)
running_index = pdev->vars.status_blocks_arr[sb_id].host_hc_status_block.vf_sb[p_sw_resp->pfdev_info.indices_per_sb + sm_idx];
running_index = pdev->vars.status_blocks_arr[sb_id].host_hc_status_block.vf_sb[p_hw_resp->pfdev_info.indices_per_sb + sm_idx];
u16_t lm_vf_pf_get_sb_index(lm_device_t *pdev, u8_t sb_id, u8_t idx)
DbgBreakIf(!(p_sw_resp && (sb_id < p_sw_resp->pfdev_info.indices_per_sb)));
DbgBreakIf(!(p_hw_resp && (sb_id < p_hw_resp->pfdev_info.indices_per_sb)));
return mm_le16_to_cpu(pdev->vars.status_blocks_arr[sb_id].host_hc_status_block.vf_sb[sb_id]);
u8_t sb_id;
LM_FOREACH_SB_ID(pdev,sb_id) {
mess->sb_addr[sb_id] = pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.vf_sb_phy_address.as_u64;
LM_FOREACH_SB_ID(pdev,sb_id) {
mess->sb_addr[sb_id] = pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.vf_sb_phy_address.as_u64;
u8_t sb_id = 0 ;
LM_FOREACH_SB_ID(pdev, sb_id)
pdev->vars.status_blocks_arr[sb_id].host_hc_status_block.vf_sb = p_sb = mm_alloc_phys_mem(pdev, mem_size, &sb_phy_address, 0, mm_cli_idx);
pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.vf_sb_phy_address.as_u32.low = sb_phy_address.as_u32.low;
pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.vf_sb_phy_address.as_u32.high = sb_phy_address.as_u32.high;
pdev->vars.status_blocks_arr[sb_id].host_hc_status_block.e2_sb = p_sb = mm_alloc_phys_mem(pdev, mem_size, &sb_phy_address, 0, mm_cli_idx);
pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e2_sb_data.common.host_sb_addr.lo = sb_phy_address.as_u32.low;
pdev->vars.status_blocks_arr[sb_id].hc_status_block_data.e2_sb_data.common.host_sb_addr.hi = sb_phy_address.as_u32.high;
p_sb = (void *)pdev->vars.status_blocks_arr[sb_id].host_hc_status_block.vf_sb;
p_sb = (void *)pdev->vars.status_blocks_arr[sb_id].host_hc_status_block.e2_sb;
u8_t sb_id;
for (sb_id = 0; sb_id < igu_sb_cnt; sb_id++)
lm_int_igu_ack_sb(pdev, IGU_VF_NDSB(pdev,sb_id), IGU_SEG_ACCESS_NORM, 0, IGU_INT_DISABLE, 0);
lm_int_igu_ack_sb(pdev, IGU_VF_NDSB(pdev,sb_id), IGU_SEG_ACCESS_NORM, 0, IGU_INT_ENABLE, 1);
u8_t sb_id;
for (sb_id = 0; sb_id < igu_sb_cnt; sb_id++) {
prod_idx = LM_VF_IGU_SB_ID(vf_info,sb_id)*num_segs; /* bc-assumption consecutive pfs, norm-no assumption */
SB_RX_INDEX(pdev,LM_VF_IGU_SB_ID(vf_info,sb_id)) = 0;
lm_int_ack_sb_enable(pdev, LM_VF_IGU_SB_ID(vf_info,sb_id));
lm_pf_int_vf_igu_sb_cleanup(pdev, vf_info, sb_id);
u16_t lm_vf_pf_get_sb_running_index(struct _lm_device_t *pdev, u8_t sb_id, u8_t sm_idx);
u16_t lm_vf_pf_get_sb_index(struct _lm_device_t *pdev, u8_t sb_id, u8_t idx);
int sb_id;
sb_id = lm_sb_id_from_chain(&pUM->lm_dev, cid);
rc = lm_establish_eth_con(pLM, cid, sb_id,
u8_t sb_id = LM_NON_RSS_SB(pLM);
sb_id, drv_rss_id);
ddi_dma_sync(pUM->statusBlocks[sb_id]->dmaHandle,
BnxeCheckDmaHandle(pUM->statusBlocks[sb_id]->dmaHandle) != DDI_FM_OK)
pUM->intrSbPollCnt[sb_id]++;
if (lm_is_sb_updated(pLM, sb_id) == 0)
pUM->intrSbPollNoChangeCnt[sb_id]++;
lm_update_fp_hc_indices(pLM, sb_id, &activity_flg, &drv_rss_id);
sb_id, activity_flg);
u8_t sb_id,
drv_rss_id = lm_map_igu_sb_id_to_drv_rss(pLM, sb_id);
sb_id, drv_rss_id);
ddi_dma_sync(pUM->statusBlocks[sb_id]->dmaHandle,
BnxeCheckDmaHandle(pUM->statusBlocks[sb_id]->dmaHandle) != DDI_FM_OK)
pUM->intrSbCnt[sb_id]++;
if (lm_is_sb_updated(pLM, sb_id) == 0)
pUM->intrSbNoChangeCnt[sb_id]++;
lm_update_fp_hc_indices(pLM, sb_id, &activity_flg, &drv_rss_id);
int sb_id = (int)(uintptr_t)arg2;
BNXE_LOCK_ENTER_INTR(pUM, sb_id);
BNXE_LOCK_EXIT_INTR(pUM, sb_id);
BnxeLogDbg(pUM, "-> BNXE MSIX Interrupt SB %d <-", sb_id);
BNXE_LOCK_EXIT_INTR(pUM, sb_id);
if (sb_id == DEF_STATUS_BLOCK_IGU_INDEX)
idx = ((sb_id == LM_NON_RSS_SB(pLM)) &&
FCOE_CID(pLM) : sb_id;
BnxeServiceSbIntr(pUM, sb_id, &pktsRxed, &pktsTxed);
if (pktsTxed) BnxeTxRingProcess(pUM, sb_id);
if (pktsRxed) BnxeRxRingProcess(pUM, sb_id, B_FALSE, 0);
BNXE_LOCK_EXIT_INTR(pUM, sb_id);
u16 sb_id);
__le16 sb_id /* Status block ID */;
__le16 sb_id /* Status block ID */;
u16 ecore_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)
if (sb_id == ECORE_SP_SB_ID)
igu_sb_id = ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1);
igu_sb_id = ecore_vf_get_igu_sb_id(p_hwfn, sb_id);
sb_id);
else if (sb_id == ECORE_SP_SB_ID)
"SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id);
u16 sb_id)
sb_info->igu_sb_id = ecore_get_igu_sb_id(p_hwfn, sb_id);
if (sb_id != ECORE_SP_SB_ID) {
ecore_vf_set_sb_info(p_hwfn, sb_id, sb_info);
u16 sb_id)
ecore_vf_set_sb_info(p_hwfn, sb_id, OSAL_NULL);
u16 sb_id, bool b_to_vf)
if (sb_id == ECORE_SP_SB_ID)
igu_sb_id = ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1);
if (ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1) !=
p_block->vector_number = sb_id + 1;
u8 timer_res, u16 sb_id, bool tx)
sb_id * sizeof(u64),
sb_id * sizeof(u64), 2, 0);
u16 ecore_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id);
u16 sb_id);
u16 sb_id);
u16 sb_id, bool b_to_vf);
p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->sb_igu_id);
p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->sb_igu_id);
u16 sb_id);
u16 sb_id, struct ecore_sb_info *p_sb);
static OSAL_INLINE u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id) {return 0;}
static OSAL_INLINE void ecore_vf_set_sb_info(struct ecore_hwfn *p_hwfn, u16 sb_id, struct ecore_sb_info *p_sb) {}
__le16 sb_id /* Status block ID */;
__le16 sb_id /* Status block ID */;
__le16 sb_id /* Status block ID */;
__le16 sb_id /* Status block ID */;
int sb_id;
sb_id = vect_info->vect_index;
p_hwfn = &edev->hwfns[sb_id % qede->num_hwfns];