#include "lm5710.h"
#include "license.h"
#include "mcp_shmem.h"
#include "command.h"
#include "debug.h"
#define LM_STATS_IS_HW_ACTIVE(_pdev) ( _pdev->vars.stats.stats_collect.stats_hw.b_collect_enabled && \
IS_PMF(_pdev) )
#define LM_STATS_DO_IF_SF(_pdev,_cmd) if( !_pdev->hw_info.mf_info.multi_vnics_mode ){ _cmd; } ;
#define LM_STATS_64_TO_HI_LO( _x_64_, _hi_lo ) ( _hi_lo##_hi = (u32_t)U64_HI( _x_64_ ) ); ( _hi_lo##_lo = (u32_t)U64_LO( _x_64_ ) );
#define LM_STATS_HI_LO_TO_64( _hi_lo, _x_64_ ) ( _x_64_ = (((u64_t)(_hi_lo##_hi) << 32) | (_hi_lo##_lo)) )
#define DATA_MASK(_bits) (((u64_t)-1)>>(64-_bits))
#define STATS_DATA(_bits,_val) ( (_val) & DATA_MASK(_bits) )
#define WRAPAROUND_COUNT_MASK(_bits) ( ~ DATA_MASK(_bits) )
#define HAS_WRAPPED_AROUND(_bits,_old,_new) ((STATS_DATA(_bits,_old) ) > (STATS_DATA(_bits,_new) ))
#define INC_WRAPAROUND_COUNT(_bits,_val) (_val + ( 1ull << _bits ) )
static u64_t lm_update_wraparound_if_needed(u8_t data_bits, u64_t val_current, u64_t val_prev, u8_t b_swap_bytes)
{
if(b_swap_bytes)
{
DbgBreakIf(data_bits != 32);
val_current=mm_le32_to_cpu(val_current);
}
if (HAS_WRAPPED_AROUND(data_bits,val_prev,val_current))
{
val_prev=INC_WRAPAROUND_COUNT(data_bits,val_prev);
}
return ((val_prev & WRAPAROUND_COUNT_MASK(data_bits)) |
(val_current & DATA_MASK(data_bits)));
}
#define LM_SIGN_EXTEND_VALUE_32( val_current_32, val_prev_64 ) \
val_prev_64 = lm_update_wraparound_if_needed( 32, val_current_32, val_prev_64, CHANGE_ENDIANITY )
#define LM_SIGN_EXTEND_VALUE_36( val_current_36, val_prev_64 ) \
val_prev_64 = lm_update_wraparound_if_needed( 36, val_current_36, val_prev_64, FALSE)
#define LM_SIGN_EXTEND_VALUE_42( val_current_42, val_prev_64 ) \
val_prev_64 = lm_update_wraparound_if_needed( 42, val_current_42, val_prev_64, FALSE )
u8_t is_pending_stats_completion(struct _lm_device_t * pdev);
lm_status_t lm_stats_hw_collect( struct _lm_device_t *pdev );
#ifdef _VBD_CMD_
extern volatile u32_t* g_everest_sim_flags_ptr;
#define EVEREST_SIM_STATS 0x02
#endif
lm_status_t
lm_get_stats( lm_device_t* pdev,
lm_stats_t stats_type,
u64_t* stats_cnt
#ifdef VF_INVOLVED
,lm_vf_info_t * vf_info
#endif
)
{
lm_status_t lm_status = LM_STATUS_SUCCESS;
lm_u64_t* stats = (lm_u64_t *)stats_cnt;
const u32_t i = LM_CLI_IDX_NDIS;
lm_stats_fw_t* stats_fw = NULL;
#ifdef VF_INVOLVED
if (vf_info != NULL) {
stats_fw = (lm_stats_fw_t*)vf_info->vf_stats.mirror_stats_fw;
vf_info->vf_stats.vf_exracted_stats_cnt++;
} else
#endif
{
stats_fw = &pdev->vars.stats.stats_mirror.stats_fw;
}
switch(stats_type)
{
case LM_STATS_FRAMES_XMITTED_OK:
stats->as_u64 = stats_fw->eth_xstorm_common.client_statistics[i].total_sent_pkts ;
break;
case LM_STATS_FRAMES_RECEIVED_OK:
stats->as_u64 = stats_fw->eth_tstorm_common.client_statistics[i].rcv_broadcast_pkts +
stats_fw->eth_tstorm_common.client_statistics[i].rcv_multicast_pkts +
stats_fw->eth_tstorm_common.client_statistics[i].rcv_unicast_pkts ;
stats->as_u64-= stats_fw->eth_ustorm_common.client_statistics[i].ucast_no_buff_pkts ;
stats->as_u64-= stats_fw->eth_ustorm_common.client_statistics[i].mcast_no_buff_pkts ;
stats->as_u64-= stats_fw->eth_ustorm_common.client_statistics[i].bcast_no_buff_pkts ;
break;
case LM_STATS_ERRORED_RECEIVE_CNT:
#ifdef VF_INVOLVED
DbgBreakIf(vf_info);
#endif
#define LM_STATS_ERROR_DISCARD_SUM( _pdev, _i ) _pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[_i].checksum_discard + \
_pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[_i].packets_too_big_discard + \
_pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.port_statistics.mac_discard + \
LM_STATS_HW_GET_MACS_U64(_pdev, stats_rx.rx_stat_dot3statsframestoolong )
stats->as_u64 = LM_STATS_ERROR_DISCARD_SUM( pdev, i ) ;
break;
case LM_STATS_RCV_CRC_ERROR:
#ifdef VF_INVOLVED
DbgBreakIf(vf_info);
#endif
stats->as_u64 = LM_STATS_HW_GET_MACS_U64(pdev, stats_rx.rx_stat_dot3statsfcserrors) ;
break;
case LM_STATS_ALIGNMENT_ERROR:
#ifdef VF_INVOLVED
DbgBreakIf(vf_info);
#endif
if( !IS_PMF(pdev))
{
stats->as_u64 = 0 ;
}
else
{
stats->as_u64 = LM_STATS_HW_GET_MACS_U64(pdev, stats_rx.rx_stat_dot3statsalignmenterrors) ;
}
break;
case LM_STATS_SINGLE_COLLISION_FRAMES:
#ifdef VF_INVOLVED
DbgBreakIf(vf_info);
#endif
if( !IS_PMF(pdev) )
{
stats->as_u64 = 0 ;
}
else
{
stats->as_u64 = LM_STATS_HW_GET_MACS_U64(pdev, stats_tx.tx_stat_dot3statssinglecollisionframes ) ;
}
break;
case LM_STATS_MULTIPLE_COLLISION_FRAMES:
#ifdef VF_INVOLVED
DbgBreakIf(vf_info);
#endif
if( !IS_PMF(pdev) )
{
stats->as_u64 = 0 ;
}
else
{
stats->as_u64 = LM_STATS_HW_GET_MACS_U64(pdev, stats_tx.tx_stat_dot3statsmultiplecollisionframes ) ;
}
break;
case LM_STATS_FRAMES_DEFERRED:
#ifdef VF_INVOLVED
DbgBreakIf(vf_info);
#endif
stats->as_u64 = LM_STATS_HW_GET_MACS_U64(pdev, stats_tx.tx_stat_dot3statsdeferredtransmissions ) ;
break;
case LM_STATS_MAX_COLLISIONS:
#ifdef VF_INVOLVED
DbgBreakIf(vf_info);
#endif
stats->as_u64 = LM_STATS_HW_GET_MACS_U64(pdev, stats_tx.tx_stat_dot3statsexcessivecollisions ) ;
break;
case LM_STATS_UNICAST_FRAMES_XMIT:
stats->as_u64 = stats_fw->eth_xstorm_common.client_statistics[i].unicast_pkts_sent ;
break;
case LM_STATS_MULTICAST_FRAMES_XMIT:
stats->as_u64 = stats_fw->eth_xstorm_common.client_statistics[i].multicast_pkts_sent ;
break;
case LM_STATS_BROADCAST_FRAMES_XMIT:
stats->as_u64 = stats_fw->eth_xstorm_common.client_statistics[i].broadcast_pkts_sent ;
break;
case LM_STATS_UNICAST_FRAMES_RCV:
stats->as_u64 = stats_fw->eth_tstorm_common.client_statistics[i].rcv_unicast_pkts ;
break;
case LM_STATS_MULTICAST_FRAMES_RCV:
stats->as_u64 = stats_fw->eth_tstorm_common.client_statistics[i].rcv_multicast_pkts ;
break;
case LM_STATS_BROADCAST_FRAMES_RCV:
stats->as_u64 = stats_fw->eth_tstorm_common.client_statistics[i].rcv_broadcast_pkts ;
break;
case LM_STATS_ERRORED_TRANSMIT_CNT:
#ifdef VF_INVOLVED
DbgBreakIf(vf_info);
#endif
if( !IS_PMF(pdev) )
{
stats->as_u64 = 0 ;
}
else
{
stats->as_u64 = LM_STATS_HW_GET_MACS_U64(pdev, stats_tx.tx_stat_dot3statsinternalmactransmiterrors ) ;
}
break;
case LM_STATS_RCV_OVERRUN:
#ifdef VF_INVOLVED
DbgBreakIf(vf_info);
#endif
stats->as_u64 = pdev->vars.stats.stats_mirror.stats_hw.nig.brb_discard ;
stats->as_u64+= pdev->vars.stats.stats_mirror.stats_hw.nig.brb_truncate ;
stats->as_u64+= pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.port_statistics.xxoverflow_discard ;
break;
case LM_STATS_XMIT_UNDERRUN:
#ifdef VF_INVOLVED
DbgBreakIf(vf_info);
#endif
stats->as_u64 = 0;
break;
case LM_STATS_RCV_NO_BUFFER_DROP:
stats->as_u64 = stats_fw->eth_tstorm_common.client_statistics[i].no_buff_discard ;
stats->as_u64+= stats_fw->eth_ustorm_common.client_statistics[i].ucast_no_buff_pkts ;
stats->as_u64+= stats_fw->eth_ustorm_common.client_statistics[i].mcast_no_buff_pkts ;
stats->as_u64+= stats_fw->eth_ustorm_common.client_statistics[i].bcast_no_buff_pkts ;
break;
case LM_STATS_BYTES_RCV:
stats->as_u64 = stats_fw->eth_tstorm_common.client_statistics[i].rcv_broadcast_bytes +
stats_fw->eth_tstorm_common.client_statistics[i].rcv_multicast_bytes +
stats_fw->eth_tstorm_common.client_statistics[i].rcv_unicast_bytes ;
break;
case LM_STATS_BYTES_XMIT:
stats->as_u64 = stats_fw->eth_xstorm_common.client_statistics[i].total_sent_bytes ;
break;
case LM_STATS_IF_IN_DISCARDS:
#ifdef VF_INVOLVED
if (vf_info != NULL)
{
stats->as_u64 = 0;
}
else
#endif
{
stats->as_u64 = LM_STATS_ERROR_DISCARD_SUM( pdev, i ) ;
}
stats->as_u64+= stats_fw->eth_tstorm_common.client_statistics[i].no_buff_discard ;
stats->as_u64+= stats_fw->eth_ustorm_common.client_statistics[i].ucast_no_buff_pkts ;
stats->as_u64+= stats_fw->eth_ustorm_common.client_statistics[i].mcast_no_buff_pkts ;
stats->as_u64+= stats_fw->eth_ustorm_common.client_statistics[i].bcast_no_buff_pkts ;
#ifdef VF_INVOLVED
if (vf_info == NULL)
#endif
{
stats->as_u64+= pdev->vars.stats.stats_mirror.stats_hw.nig.brb_discard ;
stats->as_u64+= pdev->vars.stats.stats_mirror.stats_hw.nig.brb_truncate ;
}
stats->as_u64+= stats_fw->eth_tstorm_common.port_statistics.xxoverflow_discard ;
break;
case LM_STATS_MULTICAST_BYTES_RCV:
stats->as_u64 = stats_fw->eth_tstorm_common.client_statistics[i].rcv_multicast_bytes ;
break;
case LM_STATS_DIRECTED_BYTES_RCV:
stats->as_u64 = stats_fw->eth_tstorm_common.client_statistics[i].rcv_unicast_bytes ;
break;
case LM_STATS_BROADCAST_BYTES_RCV:
stats->as_u64 = stats_fw->eth_tstorm_common.client_statistics[i].rcv_broadcast_bytes ;
break;
case LM_STATS_DIRECTED_BYTES_XMIT:
stats->as_u64 = stats_fw->eth_xstorm_common.client_statistics[i].unicast_bytes_sent ;
break;
case LM_STATS_MULTICAST_BYTES_XMIT:
stats->as_u64 = stats_fw->eth_xstorm_common.client_statistics[i].multicast_bytes_sent ;
break;
case LM_STATS_BROADCAST_BYTES_XMIT:
stats->as_u64 = stats_fw->eth_xstorm_common.client_statistics[i].broadcast_bytes_sent ;
break;
default:
stats->as_u64 = 0 ;
lm_status = LM_STATUS_INVALID_PARAMETER;
break;
}
return lm_status;
}
void lm_stats_reset( struct _lm_device_t* pdev)
{
DbgMessage(pdev, INFORM, "Zero 'mirror' statistics...\n");
mm_mem_zero( &pdev->vars.stats.stats_mirror, sizeof(pdev->vars.stats.stats_mirror) ) ;
}
static u32_t
lm_edebug_if_is_stats_disabled(struct _lm_device_t * pdev)
{
u32_t shmem2_size;
u32_t offset = OFFSETOF(shmem2_region_t, edebug_driver_if[1]);
u32_t val;
if (pdev->hw_info.shmem_base2 != 0)
{
LM_SHMEM2_READ(pdev, OFFSETOF(shmem2_region_t, size), &shmem2_size);
if (shmem2_size > offset)
{
LM_SHMEM2_READ(pdev, offset, &val);
if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT)
{
return TRUE;
}
}
}
return FALSE;
}
static lm_status_t lm_stats_fw_post_request(lm_device_t *pdev)
{
lm_status_t lm_status = LM_STATUS_SUCCESS;
lm_stats_fw_collect_t * stats_fw = &pdev->vars.stats.stats_collect.stats_fw;
stats_fw->fw_stats_req->hdr.drv_stats_counter = mm_cpu_to_le16(stats_fw->drv_counter);
stats_fw->timer_wakeup_no_completion_current = 0 ;
stats_fw->b_completion_done = FALSE ;
if (IS_VFDEV(pdev))
{
return LM_STATUS_SUCCESS;
}
stats_fw->b_ramrod_completed = FALSE ;
#ifdef VF_INVOLVED
#ifndef __LINUX
if (IS_CHANNEL_VIRT_MODE_MASTER_PFDEV(pdev))
{
lm_stats_prep_vf_fw_stats_req(pdev);
}
#endif
#endif
lm_status = lm_sq_post_entry(pdev,&(stats_fw->stats_sp_list_command),CMD_PRIORITY_HIGH);
DbgBreakIf( LM_STATUS_SUCCESS != lm_status ) ;
if (lm_status == LM_STATUS_SUCCESS)
{
++stats_fw->stats_ramrod_cnt ;
}
return lm_status;
}
void lm_stats_on_timer( struct _lm_device_t * pdev )
{
lm_status_t lm_status = LM_STATUS_SUCCESS ;
u32_t val = 0 ;
if CHK_NULL( pdev )
{
DbgBreakIf(!pdev) ;
return;
}
++pdev->vars.stats.stats_collect.timer_wakeup ;
#ifdef _VBD_CMD_
val = GET_FLAGS(*g_everest_sim_flags_ptr, EVEREST_SIM_STATS);
pdev->vars.stats.stats_collect.stats_fw.b_collect_enabled = val && pdev->vars.stats.stats_collect.stats_fw.b_collect_enabled;
#endif
if (pdev->params.record_sp)
{
++pdev->vars.stats.stats_collect.sp_record_disabled;
return;
}
if(lm_edebug_if_is_stats_disabled(pdev))
{
++pdev->vars.stats.stats_collect.shmem_disabled;
return;
}
if( pdev->vars.stats.stats_collect.stats_fw.b_collect_enabled )
{
if( lm_stats_fw_complete( pdev ) == LM_STATUS_BUSY)
{
val = ++pdev->vars.stats.stats_collect.stats_fw.timer_wakeup_no_completion_current ;
if( pdev->vars.stats.stats_collect.stats_fw.timer_wakeup_no_completion_max < val )
{
pdev->vars.stats.stats_collect.stats_fw.timer_wakeup_no_completion_max = val ;
}
if ((!is_pending_stats_completion(pdev) && (val >= MAX_STATS_TIMER_WAKEUP_NO_COMPLETION)) ||
(val >= MAX_STATS_TIMER_WAKEUP_COMP_NOT_HANDLED))
{
if(GET_FLAGS(pdev->params.debug_cap_flags,DEBUG_CAP_FLAGS_STATS_FW))
{
LM_TRIGGER_PCIE(pdev);
}
if (!lm_reset_is_inprogress(pdev))
{
if(GET_FLAGS(pdev->params.debug_cap_flags,DEBUG_CAP_FLAGS_STATS_FW))
{
DbgBreakIfAll( val >= MAX_STATS_TIMER_WAKEUP_NO_COMPLETION ) ;
}
}
}
lm_57710A0_dbg_intr(pdev);
++pdev->vars.stats.stats_collect.stats_fw.timer_wakeup_no_completion_total ;
}
else
{
lm_status = lm_stats_fw_post_request(pdev);
DbgBreakIf(lm_status != LM_STATUS_SUCCESS);
}
}
if( LM_STATS_IS_HW_ACTIVE(pdev) )
{
if( pdev->vars.stats.stats_collect.stats_hw.b_is_link_up )
{
MM_ACQUIRE_PHY_LOCK_DPC(pdev);
if( pdev->vars.stats.stats_collect.stats_hw.b_is_link_up )
{
lm_status = lm_stats_hw_collect( pdev );
DbgBreakIf( LM_STATUS_SUCCESS != lm_status ) ;
lm_stats_hw_assign( pdev ) ;
}
lm_stats_mgmt_assign( pdev ) ;
MM_RELEASE_PHY_LOCK_DPC(pdev);
}
}
else if( pdev->vars.stats.stats_collect.stats_hw.b_collect_enabled &&
pdev->vars.stats.stats_collect.stats_hw.b_is_link_up )
{
MM_ACQUIRE_PHY_LOCK_DPC(pdev);
lm_stats_mgmt_assign( pdev ) ;
MM_RELEASE_PHY_LOCK_DPC(pdev);
}
}
u8_t is_pending_stats_completion(struct _lm_device_t * pdev)
{
volatile struct hc_sp_status_block * sp_sb=NULL;
u32_t val=0;
if (INTR_BLK_TYPE(pdev)==INTR_BLK_HC){
val = REG_RD(pdev, HC_REG_INT_MASK + 4*PORT_ID(pdev) );
}
sp_sb = lm_get_default_status_block(pdev);
if(!GET_FLAGS(val, 1) && lm_is_eq_completion(pdev))
{
return TRUE;
}
return FALSE;
}
static lm_dmae_operation_t*
lm_stats_get_dmae_operation(lm_device_t* pdev)
{
if (HAS_MSTAT(pdev) || (pdev->vars.mac_type == MAC_TYPE_BMAC))
{
return (lm_dmae_operation_t*)pdev->vars.stats.stats_collect.stats_hw.non_emac_dmae_operation;
}
else if(pdev->vars.mac_type == MAC_TYPE_EMAC)
{
return (lm_dmae_operation_t*)pdev->vars.stats.stats_collect.stats_hw.emac_dmae_operation;
}
else
{
DbgBreakIf((pdev->vars.mac_type != MAC_TYPE_EMAC) && (pdev->vars.mac_type != MAC_TYPE_BMAC));
return NULL;
}
}
lm_status_t lm_stats_dmae( lm_device_t *pdev )
{
lm_status_t lm_status = LM_STATUS_SUCCESS ;
lm_dmae_context_t* context = lm_dmae_get(pdev, LM_DMAE_STATS)->context;
lm_dmae_operation_t* operation = lm_stats_get_dmae_operation(pdev);
DbgBreakIf( FALSE == LM_STATS_IS_HW_ACTIVE( pdev ) ) ;
if (NULL == operation)
{
DbgBreakIf( NULL == operation );
return LM_STATUS_FAILURE;
}
lm_status = lm_dmae_context_execute(pdev,context,operation);
if (LM_STATUS_ABORTED == lm_status)
{
lm_status = LM_STATUS_SUCCESS;
}
return lm_status ;
}
lm_status_t lm_stats_clear_emac_stats( lm_device_t *pdev )
{
u32_t i = 0 ;
u32_t j = 0 ;
u32_t count_limit[3] = { EMAC_REG_EMAC_RX_STAT_AC_COUNT,
1,
EMAC_REG_EMAC_TX_STAT_AC_COUNT } ;
u32_t reg_start [3] = { EMAC_REG_EMAC_RX_STAT_AC,
EMAC_REG_EMAC_RX_STAT_AC_28,
EMAC_REG_EMAC_TX_STAT_AC } ;
u32_t emac_base = 0 ;
u32_t dummy = 0 ;
ASSERT_STATIC( ARRSIZE(reg_start) == ARRSIZE(count_limit) );
if CHK_NULL( pdev )
{
return LM_STATUS_INVALID_PARAMETER ;
}
emac_base = ( 0 == PORT_ID(pdev) ) ? GRCBASE_EMAC0 : GRCBASE_EMAC1 ;
for( i = 0; i< ARRSIZE(reg_start) ; i++ )
{
for( j = 0 ; j < count_limit[i]; j++ )
{
dummy = REG_RD( pdev, emac_base + reg_start[i]+(j*sizeof(u32_t))) ;
}
}
return LM_STATUS_SUCCESS ;
}
lm_status_t lm_stats_on_update_state(lm_device_t * pdev )
{
lm_status_t lm_status = LM_STATUS_SUCCESS ;
if CHK_NULL( pdev )
{
DbgBreakIf( !pdev ) ;
return LM_STATUS_INVALID_PARAMETER ;
}
if( MAC_TYPE_NONE == pdev->vars.mac_type )
{
DbgMessage(pdev, WARNstat, "lm_stats_on_link_update: linking down when already linked down\n" );
return LM_STATUS_LINK_DOWN ;
}
if ( LM_STATS_IS_HW_ACTIVE(pdev) )
{
lm_status = lm_stats_dmae( pdev ) ;
if( LM_STATUS_SUCCESS != lm_status )
{
DbgBreakIf( LM_STATUS_SUCCESS != lm_status ) ;
}
lm_stats_hw_assign( pdev ) ;
}
lm_stats_mgmt_assign( pdev ) ;
return lm_status;
}
lm_status_t lm_stats_on_link_update( lm_device_t *pdev, const u8_t b_is_link_up )
{
lm_status_t lm_status = LM_STATUS_SUCCESS ;
if CHK_NULL( pdev )
{
DbgBreakIf( !pdev ) ;
return LM_STATUS_INVALID_PARAMETER ;
}
if( FALSE == b_is_link_up )
{
pdev->vars.stats.stats_collect.stats_hw.b_is_link_up = FALSE ;
if ( FALSE == LM_STATS_IS_HW_ACTIVE(pdev) )
{
return LM_STATUS_SUCCESS;
}
lm_status = lm_stats_on_update_state(pdev);
if( LM_STATUS_SUCCESS != lm_status )
{
return lm_status ;
}
switch( pdev->vars.mac_type )
{
case MAC_TYPE_EMAC:
lm_stats_clear_emac_stats( pdev ) ;
break;
case MAC_TYPE_BMAC:
break;
case MAC_TYPE_UMAC:
case MAC_TYPE_XMAC:
DbgBreakIf(!CHIP_IS_E3(pdev));
break;
default:
case MAC_TYPE_NONE:
DbgBreakMsg( "mac_type not acceptable\n" ) ;
return LM_STATUS_INVALID_PARAMETER ;
}
mm_mem_zero( &pdev->vars.stats.stats_mirror.stats_hw.macs[STATS_MACS_IDX_CURRENT],
sizeof(pdev->vars.stats.stats_mirror.stats_hw.macs[STATS_MACS_IDX_CURRENT]) ) ;
}
else
{
pdev->vars.stats.stats_collect.stats_hw.b_is_link_up = TRUE ;
}
return lm_status ;
}
static lm_status_t lm_stats_alloc_hw_query(lm_device_t *pdev)
{
lm_stats_hw_collect_t* stats_hw = &(pdev->vars.stats.stats_collect.stats_hw);
u32_t alloc_size = 0 ;
u32_t mac_stats_alloc_size = 0;
lm_address_t phys_addr = {{0}};
if(!HAS_MSTAT(pdev))
{
DbgMessage(NULL, INFORM, "lm_stats_alloc_hw_query: device has no MSTAT block.\n");
mac_stats_alloc_size = sizeof(struct _stats_emac_query_t) + sizeof( union _stats_bmac_query_t);
alloc_size = mac_stats_alloc_size + sizeof( struct _stats_nig_query_t ) ;
stats_hw->u.s.addr_emac_stats_query = mm_alloc_phys_mem(pdev, alloc_size, &phys_addr ,PHYS_MEM_TYPE_NONCACHED, LM_RESOURCE_COMMON );
stats_hw->mac_stats_phys_addr = phys_addr;
LM_INC64(&phys_addr, sizeof(struct _stats_emac_query_t));
stats_hw->bmac_stats_phys_addr = phys_addr;
LM_INC64(&phys_addr, sizeof( union _stats_bmac_query_t));
stats_hw->nig_stats_phys_addr= phys_addr;
DbgMessage(NULL, INFORM, "lm_stats_alloc_hw_query: allocated a block of size %d at %x\n", alloc_size, stats_hw->u.s.addr_emac_stats_query);
if CHK_NULL( stats_hw->u.s.addr_emac_stats_query )
{
DbgBreakIf(!stats_hw->u.s.addr_emac_stats_query );
return LM_STATUS_FAILURE ;
}
stats_hw->u.s.addr_bmac1_stats_query = (struct _stats_bmac1_query_t*)((u8_t*)stats_hw->u.s.addr_emac_stats_query + sizeof(struct _stats_emac_query_t)) ;
stats_hw->u.s.addr_bmac2_stats_query = (struct _stats_bmac2_query_t*)((u8_t*)stats_hw->u.s.addr_emac_stats_query + sizeof(struct _stats_emac_query_t)) ;
stats_hw->addr_nig_stats_query = (struct _stats_nig_query_t*)((u8_t*)stats_hw->u.s.addr_bmac1_stats_query + sizeof(union _stats_bmac_query_t)) ;
DbgMessage(NULL, INFORM, "lm_stats_alloc_hw_query: addr_bmac1_stats_query = %x, addr_bmac2_stats_query=%x, addr_nig_stats_query=%x\n", stats_hw->u.s.addr_bmac1_stats_query, stats_hw->u.s.addr_bmac2_stats_query, stats_hw->addr_nig_stats_query);
}
else
{
DbgMessage(NULL, INFORM, "lm_stats_alloc_hw_query: device has an MSTAT block.\n");
mac_stats_alloc_size = sizeof(struct _stats_mstat_query_t);
alloc_size = mac_stats_alloc_size + sizeof( struct _stats_nig_query_t );
stats_hw->u.addr_mstat_stats_query = mm_alloc_phys_mem(pdev, alloc_size, &phys_addr ,PHYS_MEM_TYPE_NONCACHED, LM_RESOURCE_COMMON );
stats_hw->mac_stats_phys_addr = phys_addr;
LM_INC64(&phys_addr, mac_stats_alloc_size);
stats_hw->nig_stats_phys_addr = phys_addr;
DbgMessage(NULL, INFORM, "lm_stats_alloc_hw_query: allocated a block of size %d at %x\n", alloc_size, stats_hw->u.addr_mstat_stats_query);
if CHK_NULL( stats_hw->u.addr_mstat_stats_query )
{
DbgBreakIf(!stats_hw->u.addr_mstat_stats_query );
return LM_STATUS_FAILURE ;
}
stats_hw->addr_nig_stats_query = (struct _stats_nig_query_t*)((u8_t*)stats_hw->u.addr_mstat_stats_query + sizeof(struct _stats_mstat_query_t)) ;
DbgMessage(NULL, INFORM, "lm_stats_alloc_hw_query: stats_hw->addr_nig_stats_query=%x\n", stats_hw->addr_nig_stats_query);
}
return LM_STATUS_SUCCESS;
}
lm_status_t lm_stats_alloc_fw_resc (struct _lm_device_t *pdev)
{
lm_stats_fw_collect_t * stats_fw = &pdev->vars.stats.stats_collect.stats_fw;
u32_t num_groups = 0;
u32_t alloc_size = 0;
u8_t num_queue_stats = 1;
#define NUM_FW_STATS_REQS 5
stats_fw->fw_static_stats_num = stats_fw->fw_stats_num = NUM_FW_STATS_REQS;
#ifndef __LINUX
if (IS_CHANNEL_VIRT_MODE_MASTER_PFDEV(pdev)) {
stats_fw->fw_stats_num += pdev->hw_info.sriov_info.total_vfs * 2;
}
#endif
num_groups = (stats_fw->fw_stats_num) / STATS_QUERY_CMD_COUNT +
(((stats_fw->fw_stats_num) % STATS_QUERY_CMD_COUNT) ? 1 : 0);
#ifndef __LINUX
if (IS_CHANNEL_VIRT_MODE_MASTER_PFDEV(pdev)) {
DbgMessage(pdev, WARN, "%d stats groups to support %d VFs\n",num_groups, pdev->hw_info.sriov_info.total_vfs);
}
#endif
stats_fw->fw_stats_req_sz = sizeof(struct stats_query_header) +
num_groups * sizeof(struct stats_query_cmd_group);
stats_fw->fw_stats_data_sz = sizeof(struct per_port_stats) +
sizeof(struct per_pf_stats) +
sizeof(struct per_queue_stats) * num_queue_stats +
sizeof(struct toe_stats_query) +
sizeof(struct fcoe_statistics_params) +
sizeof(struct stats_counter);
alloc_size = stats_fw->fw_stats_data_sz + stats_fw->fw_stats_req_sz;
stats_fw->fw_stats = mm_alloc_phys_mem(pdev, alloc_size, &stats_fw->fw_stats_mapping ,PHYS_MEM_TYPE_NONCACHED, LM_RESOURCE_COMMON );
if (!stats_fw->fw_stats)
{
return LM_STATUS_RESOURCE;
}
stats_fw->fw_stats_req = (lm_stats_fw_stats_req_t *)stats_fw->fw_stats;
stats_fw->fw_stats_req_mapping = stats_fw->fw_stats_mapping;
stats_fw->fw_stats_data = (lm_stats_fw_stats_data_t *)
((u8*)stats_fw->fw_stats + stats_fw->fw_stats_req_sz);
stats_fw->fw_stats_data_mapping = stats_fw->fw_stats_mapping;
LM_INC64(&stats_fw->fw_stats_data_mapping, stats_fw->fw_stats_req_sz);
return LM_STATUS_SUCCESS;
}
static lm_status_t lm_stats_alloc_drv_info_to_mfw_resc(lm_device_t *pdev)
{
lm_stats_drv_info_to_mfw_t* drv_info_to_mfw = &(pdev->vars.stats.stats_collect.drv_info_to_mfw );
u32_t alloc_size = 0 ;
lm_address_t phys_addr = {{0}};
lm_status_t lm_status = LM_STATUS_SUCCESS;
if( CHIP_IS_E3(pdev) )
{
alloc_size = max( ( sizeof( *drv_info_to_mfw->addr.eth_stats ) ),
( sizeof( *drv_info_to_mfw->addr.iscsi_stats ) ) ) ;
alloc_size = max( ( sizeof( *drv_info_to_mfw->addr.fcoe_stats ) ), alloc_size ) ;
drv_info_to_mfw->addr.eth_stats = mm_alloc_phys_mem(pdev, alloc_size, &phys_addr ,PHYS_MEM_TYPE_NONCACHED, LM_RESOURCE_COMMON );
if( !drv_info_to_mfw->addr.eth_stats )
{
lm_status = LM_STATUS_RESOURCE;
}
drv_info_to_mfw->drv_info_to_mfw_phys_addr = phys_addr;
}
return lm_status;
}
lm_status_t lm_stats_alloc_resc( struct _lm_device_t* pdev )
{
u8_t loader_channel_idx = (u8_t)(-1) ;
u8_t executer_channel_idx = (u8_t)(-1) ;
lm_status_t lm_status = LM_STATUS_SUCCESS;
lm_dmae_context_info_t *stats_dmae_context_info = lm_dmae_get(pdev, LM_DMAE_STATS);
if CHK_NULL(pdev )
{
DbgBreakIf(!pdev) ;
return LM_STATUS_INVALID_PARAMETER ;
}
lm_status = lm_stats_alloc_fw_resc(pdev);
if( lm_status != LM_STATUS_SUCCESS )
{
return lm_status;
}
lm_status = lm_stats_alloc_drv_info_to_mfw_resc(pdev);
if( lm_status != LM_STATUS_SUCCESS )
{
return lm_status;
}
lm_status = lm_stats_alloc_hw_query(pdev);
if(lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
switch (PORT_ID(pdev))
{
case 0:
{
loader_channel_idx = DMAE_STATS_PORT_0_CMD_IDX_0;
executer_channel_idx = DMAE_STATS_PORT_0_CMD_IDX_1;
}
break;
case 1:
{
loader_channel_idx = DMAE_STATS_PORT_1_CMD_IDX_0;
executer_channel_idx = DMAE_STATS_PORT_1_CMD_IDX_1;
}
break;
default:
{
DbgMessage(NULL, FATAL, "Invalid Port ID %d\n", PORT_ID(pdev));
DbgBreak();
return LM_STATUS_INVALID_PARAMETER;
}
break;
}
lm_status = lm_dmae_locking_policy_create(pdev, LM_PROTECTED_RESOURCE_DMAE_STATS, LM_DMAE_LOCKING_POLICY_TYPE_PER_PF, &stats_dmae_context_info->locking_policy);
if(lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
stats_dmae_context_info->context = lm_dmae_context_create_sgl( pdev,
loader_channel_idx,
executer_channel_idx,
&stats_dmae_context_info->locking_policy,
CHANGE_ENDIANITY);
if (CHK_NULL(stats_dmae_context_info->context))
{
DbgBreak();
return LM_STATUS_FAILURE;
}
pdev->vars.stats.stats_collect.stats_hw.non_emac_dmae_operation = lm_dmae_operation_create_sgl(pdev, TRUE, stats_dmae_context_info->context);
if (!HAS_MSTAT(pdev))
{
pdev->vars.stats.stats_collect.stats_hw.emac_dmae_operation = lm_dmae_operation_create_sgl(pdev, TRUE, stats_dmae_context_info->context);
}
else
{
pdev->vars.stats.stats_collect.stats_hw.emac_dmae_operation = NULL;
}
return LM_STATUS_SUCCESS ;
}
static lm_status_t lm_stats_hw_setup_nig(lm_device_t* pdev, lm_dmae_operation_t* dmae_operation)
{
lm_status_t lm_status = LM_STATUS_FAILURE;
lm_dmae_address_t source = lm_dmae_address((0==PORT_ID(pdev))?NIG_REG_STAT0_BRB_DISCARD : NIG_REG_STAT1_BRB_DISCARD,
LM_DMAE_ADDRESS_GRC);
lm_dmae_address_t dest = lm_dmae_address(pdev->vars.stats.stats_collect.stats_hw.nig_stats_phys_addr.as_u64,
LM_DMAE_ADDRESS_HOST_PHYS);
lm_status = lm_dmae_operation_add_sge(pdev, dmae_operation, source, dest, sizeof(struct _stats_nig_query_t ) / sizeof(u32_t));
return lm_status;
}
struct lm_stats_sge_descr_t{
u32_t source_offset;
u64_t dest_paddr;
u16_t length;
};
static lm_status_t lm_stats_set_dmae_operation_sges(lm_device_t* pdev, lm_dmae_operation_t* operation, struct lm_stats_sge_descr_t* sge_descr, u8_t num_sges)
{
u8_t sge_idx = 0;
lm_dmae_address_t sge_source = {{0}};
lm_dmae_address_t sge_dest = {{0}};
lm_status_t lm_status = LM_STATUS_SUCCESS;
lm_dmae_operation_clear_all_sges(operation);
for (sge_idx = 0; sge_idx < num_sges; ++sge_idx)
{
sge_source = lm_dmae_address(sge_descr[sge_idx].source_offset, LM_DMAE_ADDRESS_GRC);
sge_dest = lm_dmae_address(sge_descr[sge_idx].dest_paddr, LM_DMAE_ADDRESS_HOST_PHYS);
lm_status = lm_dmae_operation_add_sge(pdev, operation, sge_source, sge_dest, sge_descr[sge_idx].length);
if (LM_STATUS_SUCCESS != lm_status)
{
DbgBreak();
return lm_status;
}
}
return lm_status;
}
static lm_status_t lm_stats_hw_setup_emac( lm_device_t* pdev)
{
const u64_t base_paddr = pdev->vars.stats.stats_collect.stats_hw.mac_stats_phys_addr.as_u64;
const u16_t sge1_len = sizeof(pdev->vars.stats.stats_collect.stats_hw.u.s.addr_emac_stats_query->stats_rx );
const u16_t sge2_len = sizeof(pdev->vars.stats.stats_collect.stats_hw.u.s.addr_emac_stats_query->stats_rx_err );
const u32_t emac_base = (PORT_ID(pdev)==0) ? GRCBASE_EMAC0 : GRCBASE_EMAC1;
lm_status_t lm_status = LM_STATUS_FAILURE;
lm_dmae_operation_t* operation = pdev->vars.stats.stats_collect.stats_hw.emac_dmae_operation;
struct lm_stats_sge_descr_t sges[3] = {{0}};
sges[0].source_offset = emac_base + EMAC_REG_EMAC_RX_STAT_IFHCINOCTETS;
sges[0].dest_paddr = base_paddr;
sges[0].length = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
sges[1].source_offset = emac_base + EMAC_REG_EMAC_RX_STAT_FALSECARRIERERRORS;
sges[1].dest_paddr = base_paddr + sge1_len;
sges[1].length = 1;
sges[2].source_offset = emac_base + EMAC_REG_EMAC_TX_STAT_IFHCOUTOCTETS;
sges[2].dest_paddr = base_paddr + sge1_len + sge2_len;
sges[2].length = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
lm_status = lm_stats_set_dmae_operation_sges(pdev, operation, sges, ARRSIZE(sges));
if (LM_STATUS_SUCCESS != lm_status)
{
DbgBreakMsg("Failed to initialize EMAC stats DMAE operation.\n");
return lm_status;
}
lm_status = lm_stats_hw_setup_nig(pdev, operation);
if (LM_STATUS_SUCCESS != lm_status)
{
DbgBreakMsg("Failed to initialize NIG stats DMAE operation.\n");
return lm_status;
}
return lm_status;
}
static lm_status_t lm_stats_hw_setup_non_emac( lm_device_t* pdev,
u64_t paddr_base,
u32_t grc_base,
u32_t block1_start, u16_t block1_size,
u32_t block2_start, u16_t block2_size)
{
lm_status_t lm_status = LM_STATUS_FAILURE;
lm_dmae_operation_t* operation = (lm_dmae_operation_t*)pdev->vars.stats.stats_collect.stats_hw.non_emac_dmae_operation;
struct lm_stats_sge_descr_t sges[2] = {{0}};
sges[0].source_offset = grc_base+block1_start;
sges[0].dest_paddr = paddr_base;
sges[0].length = block1_size / sizeof(u32_t);
sges[1].source_offset = grc_base+block2_start;
sges[1].dest_paddr = paddr_base + block1_size;
sges[1].length = block2_size / sizeof(u32_t);
lm_status = lm_stats_set_dmae_operation_sges(pdev, operation, sges, ARRSIZE(sges));
if (LM_STATUS_SUCCESS != lm_status)
{
DbgBreakMsg("Failed to initialize non-EMAC stats DMAE operation.\n");
return lm_status;
}
lm_status = lm_stats_hw_setup_nig(pdev, operation);
if (LM_STATUS_SUCCESS != lm_status)
{
DbgBreakMsg("Failed to initialize NIG stats DMAE operation.\n");
return lm_status;
}
return lm_status;
}
static lm_status_t lm_stats_hw_setup_bmac(lm_device_t* pdev)
{
const u32_t port = PORT_ID(pdev) ;
u32_t bmac_base = 0 ;
u32_t bmac_tx_start_reg, bmac_rx_start_reg;
u16_t bmac_tx_stat_size, bmac_rx_stat_size;
lm_status_t lm_status = LM_STATUS_FAILURE;
DbgBreakIf(HAS_MSTAT(pdev));
switch( port )
{
case 0:
bmac_base = NIG_REG_INGRESS_BMAC0_MEM ;
break;
case 1:
bmac_base = NIG_REG_INGRESS_BMAC1_MEM;
if (!CHIP_IS_E1x(pdev))
{
DbgMessage(pdev, INFORMi, "BMAC stats should never be collected on port 1 of E2!\n");
bmac_base = NIG_REG_INGRESS_BMAC0_MEM;
}
break;
default:
DbgBreakIf( port > 1 ) ;
break;
}
if (CHIP_IS_E1x(pdev))
{
bmac_tx_start_reg = BIGMAC_REGISTER_TX_STAT_GTPKT;
bmac_rx_start_reg = BIGMAC_REGISTER_RX_STAT_GR64;
bmac_tx_stat_size = sizeof(pdev->vars.stats.stats_collect.stats_hw.u.s.addr_bmac1_stats_query->stats_tx);
bmac_rx_stat_size = sizeof(pdev->vars.stats.stats_collect.stats_hw.u.s.addr_bmac1_stats_query->stats_rx);
}
else
{
bmac_tx_start_reg = BIGMAC2_REGISTER_TX_STAT_GTPOK;
bmac_rx_start_reg = BIGMAC2_REGISTER_RX_STAT_GR64;
bmac_tx_stat_size = sizeof(pdev->vars.stats.stats_collect.stats_hw.u.s.addr_bmac2_stats_query->stats_tx);
bmac_rx_stat_size = sizeof(pdev->vars.stats.stats_collect.stats_hw.u.s.addr_bmac2_stats_query->stats_rx);
}
lm_status = lm_stats_hw_setup_non_emac(pdev,
pdev->vars.stats.stats_collect.stats_hw.bmac_stats_phys_addr.as_u64,
bmac_base,
bmac_tx_start_reg,
bmac_tx_stat_size,
bmac_rx_start_reg,
bmac_rx_stat_size);
return lm_status;
}
static lm_status_t lm_stats_hw_setup_mstat(lm_device_t* pdev)
{
const u32_t port = PORT_ID(pdev) ;
u32_t mstat_base = 0;
u32_t mstat_tx_start, mstat_rx_start;
u16_t mstat_tx_size, mstat_rx_size;
lm_status_t lm_status = LM_STATUS_FAILURE;
lm_stats_hw_collect_t* stats_hw = &pdev->vars.stats.stats_collect.stats_hw;
DbgBreakIf(!HAS_MSTAT(pdev));
mstat_tx_start = MSTAT_REG_TX_STAT_GTXPOK_LO;
mstat_tx_size = sizeof(stats_hw->u.addr_mstat_stats_query->stats_tx);
mstat_rx_start = MSTAT_REG_RX_STAT_GR64_LO;
mstat_rx_size = sizeof(stats_hw->u.addr_mstat_stats_query->stats_rx);
DbgMessage(pdev, INFORM, "lm_stats_hw_setup_mstat: mstat_tx_start=%x, mstat_tx_size=%x, mstat_rx_start=%x, mstat_rx_size=%x\n",mstat_tx_start,mstat_tx_size,mstat_rx_start, mstat_rx_size);
switch(port)
{
case 0:
mstat_base = GRCBASE_MSTAT0;
break;
case 1:
mstat_base = GRCBASE_MSTAT1;
break;
default:
DbgBreakIf( port > 1 ) ;
break;
}
lm_status = lm_stats_hw_setup_non_emac(pdev,
pdev->vars.stats.stats_collect.stats_hw.mac_stats_phys_addr.as_u64,
mstat_base,
mstat_tx_start,
mstat_tx_size,
mstat_rx_start,
mstat_rx_size);
return lm_status;
}
lm_status_t lm_stats_hw_setup(struct _lm_device_t *pdev)
{
lm_status_t lm_status = LM_STATUS_SUCCESS ;
u8_t b_enable_collect = HAS_MSTAT(pdev)? ((CHIP_REV_IS_EMUL(pdev) && (CHIP_BONDING(pdev) == 0)) || CHIP_REV_IS_ASIC(pdev)) : TRUE;
if(HAS_MSTAT(pdev))
{
lm_status = lm_stats_hw_setup_mstat(pdev);
if(lm_status != LM_STATUS_SUCCESS)
{
DbgMessage(NULL, FATAL, "Failed to initialize MSTAT statistics\n");
return lm_status;
}
}
else
{
lm_status = lm_stats_hw_setup_emac(pdev);
if(lm_status != LM_STATUS_SUCCESS)
{
DbgMessage(NULL, FATAL, "Failed to initialize EMAC statistics\n");
return lm_status;
}
lm_status = lm_stats_hw_setup_bmac(pdev);
if(lm_status != LM_STATUS_SUCCESS)
{
DbgMessage(NULL, FATAL, "Failed to initialize BMAC statistics\n");
return lm_status;
}
}
pdev->vars.stats.stats_collect.stats_hw.b_is_link_up = FALSE;
pdev->vars.stats.stats_collect.stats_hw.b_collect_enabled = b_enable_collect ;
return lm_status ;
}
static void lm_stats_prep_fw_stats_req(lm_device_t *pdev)
{
lm_stats_fw_collect_t *stats_fw = &pdev->vars.stats.stats_collect.stats_fw;
struct stats_query_header *stats_hdr = &stats_fw->fw_stats_req->hdr;
lm_address_t cur_data_offset = {{0}};
struct stats_query_entry *cur_query_entry = NULL;
stats_hdr->cmd_num = stats_fw->fw_stats_num;
stats_hdr->drv_stats_counter = 0;
cur_data_offset = stats_fw->fw_stats_data_mapping;
LM_INC64(&cur_data_offset, OFFSETOF(lm_stats_fw_stats_data_t, storm_counters));
stats_hdr->stats_counters_addrs.hi = mm_cpu_to_le32(cur_data_offset.as_u32.high);
stats_hdr->stats_counters_addrs.lo = mm_cpu_to_le32(cur_data_offset.as_u32.low);
mm_memset(&stats_fw->fw_stats_data->storm_counters, 0xff, sizeof(stats_fw->fw_stats_data->storm_counters) );
cur_data_offset = stats_fw->fw_stats_data_mapping;
LM_INC64(&cur_data_offset, OFFSETOF(lm_stats_fw_stats_data_t, port));
cur_query_entry = &stats_fw->fw_stats_req->query[LM_STATS_PORT_QUERY_IDX];
cur_query_entry->kind = STATS_TYPE_PORT;
cur_query_entry->index = PORT_ID(pdev);
cur_query_entry->funcID = mm_cpu_to_le16(FUNC_ID(pdev));;
cur_query_entry->address.hi = mm_cpu_to_le32(cur_data_offset.as_u32.high);
cur_query_entry->address.lo = mm_cpu_to_le32(cur_data_offset.as_u32.low);
cur_data_offset = stats_fw->fw_stats_data_mapping;
LM_INC64(&cur_data_offset, OFFSETOF(lm_stats_fw_stats_data_t, pf));
cur_query_entry = &stats_fw->fw_stats_req->query[LM_STATS_PF_QUERY_IDX];
cur_query_entry->kind = STATS_TYPE_PF;
cur_query_entry->index = PORT_ID(pdev);
cur_query_entry->funcID = mm_cpu_to_le16(FUNC_ID(pdev));
cur_query_entry->address.hi = mm_cpu_to_le32(cur_data_offset.as_u32.high);
cur_query_entry->address.lo = mm_cpu_to_le32(cur_data_offset.as_u32.low);
cur_data_offset = stats_fw->fw_stats_data_mapping;
LM_INC64(&cur_data_offset, OFFSETOF(lm_stats_fw_stats_data_t, toe));
ASSERT_STATIC(LM_STATS_TOE_IDX<ARRSIZE(stats_fw->fw_stats_req->query));
cur_query_entry = &stats_fw->fw_stats_req->query[LM_STATS_TOE_IDX];
cur_query_entry->kind = STATS_TYPE_TOE;
cur_query_entry->index = LM_STATS_CNT_ID(pdev);
cur_query_entry->funcID = mm_cpu_to_le16(FUNC_ID(pdev));
cur_query_entry->address.hi = mm_cpu_to_le32(cur_data_offset.as_u32.high);
cur_query_entry->address.lo = mm_cpu_to_le32(cur_data_offset.as_u32.low);
if ( !CHIP_IS_E1x(pdev) )
{
cur_data_offset = stats_fw->fw_stats_data_mapping;
LM_INC64(&cur_data_offset, OFFSETOF(lm_stats_fw_stats_data_t, fcoe));
ASSERT_STATIC(LM_STATS_FCOE_IDX<ARRSIZE(stats_fw->fw_stats_req->query));
cur_query_entry = &stats_fw->fw_stats_req->query[LM_STATS_FCOE_IDX];
cur_query_entry->kind = STATS_TYPE_FCOE;
cur_query_entry->index = LM_STATS_CNT_ID(pdev);
cur_query_entry->funcID = mm_cpu_to_le16(FUNC_ID(pdev));
cur_query_entry->address.hi = mm_cpu_to_le32(cur_data_offset.as_u32.high);
cur_query_entry->address.lo = mm_cpu_to_le32(cur_data_offset.as_u32.low);
}
else
{
--stats_hdr->cmd_num;
}
cur_data_offset = stats_fw->fw_stats_data_mapping;
LM_INC64(&cur_data_offset, OFFSETOF(lm_stats_fw_stats_data_t, queue_stats));
ASSERT_STATIC(LM_STATS_FIRST_QUEUE_QUERY_IDX < ARRSIZE(stats_fw->fw_stats_req->query));
cur_query_entry = &stats_fw->fw_stats_req->query[LM_STATS_FIRST_QUEUE_QUERY_IDX];
cur_query_entry->kind = STATS_TYPE_QUEUE;
cur_query_entry->index = LM_STATS_CNT_ID(pdev);
cur_query_entry->funcID = mm_cpu_to_le16(FUNC_ID(pdev));
cur_query_entry->address.hi = mm_cpu_to_le32(cur_data_offset.as_u32.high);
cur_query_entry->address.lo = mm_cpu_to_le32(cur_data_offset.as_u32.low);
}
#ifdef VF_INVOLVED
void lm_stats_prep_vf_fw_stats_req(lm_device_t *pdev)
{
lm_stats_fw_collect_t *stats_fw = &pdev->vars.stats.stats_collect.stats_fw;
struct stats_query_header *stats_hdr = &stats_fw->fw_stats_req->hdr;
struct stats_query_entry *cur_query_entry;
u8_t vf_idx = 0;
u8_t cmd_cnt = 0;
lm_vf_info_t *vf_info;
cur_query_entry = &stats_fw->fw_stats_req->query[LM_STATS_FIRST_VF_QUEUE_QUERY_IDX];
MM_ACQUIRE_VFS_STATS_LOCK_DPC(pdev);
for (vf_idx = 0; vf_idx < pdev->vfs_set.number_of_enabled_vfs; vf_idx++) {
vf_info = &pdev->vfs_set.vfs_array[vf_idx];
if (vf_info->vf_stats.vf_stats_state == VF_STATS_REQ_SUBMITTED) {
u8_t process_it = FALSE;
if (vf_info->vf_stats.vf_stats_flag & VF_STATS_COLLECT_FW_STATS_FOR_PF) {
cur_query_entry->kind = STATS_TYPE_QUEUE;
cur_query_entry->index = LM_FW_VF_STATS_CNT_ID(vf_info);
cur_query_entry->funcID = mm_cpu_to_le16(FUNC_ID(pdev));
cur_query_entry->address.hi = mm_cpu_to_le32(vf_info->vf_stats.pf_fw_stats_phys_data.as_u32.high);
cur_query_entry->address.lo = mm_cpu_to_le32(vf_info->vf_stats.pf_fw_stats_phys_data.as_u32.low);
process_it = TRUE;
cur_query_entry++;
cmd_cnt++;
}
if (vf_info->vf_stats.vf_stats_flag & VF_STATS_COLLECT_FW_STATS_FOR_VF) {
cur_query_entry->kind = STATS_TYPE_QUEUE;
cur_query_entry->index = LM_FW_VF_STATS_CNT_ID(vf_info);
cur_query_entry->funcID = mm_cpu_to_le16(8 + vf_info->abs_vf_id);
cur_query_entry->address.hi = mm_cpu_to_le32(vf_info->vf_stats.vf_fw_stats_phys_data.as_u32.high);
cur_query_entry->address.lo = mm_cpu_to_le32(vf_info->vf_stats.vf_fw_stats_phys_data.as_u32.low);
process_it = TRUE;
cur_query_entry++;
cmd_cnt++;
}
if (process_it) {
vf_info->vf_stats.vf_stats_state = VF_STATS_REQ_IN_PROCESSING;
vf_info->vf_stats.vf_stats_cnt++;
}
}
}
stats_hdr->cmd_num = stats_fw->fw_static_stats_num + cmd_cnt;
MM_RELEASE_VFS_STATS_LOCK_DPC(pdev);
}
#endif
void lm_stats_fw_setup(struct _lm_device_t *pdev)
{
lm_stats_fw_collect_t * stats_fw = &pdev->vars.stats.stats_collect.stats_fw;
stats_fw->b_completion_done = TRUE ;
stats_fw->b_ramrod_completed = TRUE ;
stats_fw->drv_counter = 0 ;
stats_fw->b_collect_enabled = pdev->params.fw_stats_init_value ;
pdev->vars.stats.stats_collect.b_last_called = TRUE ;
lm_sq_post_fill_entry(pdev,
&(stats_fw->stats_sp_list_command),
0 ,
RAMROD_CMD_ID_COMMON_STAT_QUERY,
NONE_CONNECTION_TYPE,
stats_fw->fw_stats_req_mapping.as_u64,
FALSE );
lm_stats_prep_fw_stats_req(pdev);
}
void lm_stats_fw_check_update_done( struct _lm_device_t *pdev, OUT u32_t* ptr_stats_flags_done )
{
if CHK_NULL( ptr_stats_flags_done )
{
DbgBreakIf(!ptr_stats_flags_done) ;
return;
}
if (IS_VFDEV(pdev)) {
SET_FLAGS(*ptr_stats_flags_done,LM_STATS_FLAGS_ALL);
return;
}
if( 0 == GET_FLAGS(*ptr_stats_flags_done, LM_STATS_FLAG_XSTORM ) )
{
if( LM_STATS_VERIFY_COUNTER( pdev, fw_stats_data->storm_counters.xstats_counter ) )
{
SET_FLAGS(*ptr_stats_flags_done,LM_STATS_FLAG_XSTORM ) ;
}
}
if( 0 == GET_FLAGS(*ptr_stats_flags_done, LM_STATS_FLAG_TSTORM ) )
{
if( LM_STATS_VERIFY_COUNTER( pdev, fw_stats_data->storm_counters.tstats_counter ) )
{
SET_FLAGS(*ptr_stats_flags_done,LM_STATS_FLAG_TSTORM ) ;
}
}
if( 0 == GET_FLAGS(*ptr_stats_flags_done, LM_STATS_FLAG_USTORM ) )
{
if( LM_STATS_VERIFY_COUNTER( pdev, fw_stats_data->storm_counters.ustats_counter ) )
{
SET_FLAGS(*ptr_stats_flags_done,LM_STATS_FLAG_USTORM ) ;
}
}
if( 0 == GET_FLAGS(*ptr_stats_flags_done, LM_STATS_FLAG_CSTORM ) )
{
if( LM_STATS_VERIFY_COUNTER( pdev, fw_stats_data->storm_counters.cstats_counter ) )
{
SET_FLAGS(*ptr_stats_flags_done,LM_STATS_FLAG_CSTORM ) ;
}
}
}
lm_status_t lm_stats_fw_complete( struct _lm_device_t *pdev )
{
u32_t stats_flags_done = 0 ;
u32_t stats_flags_assigned = 0 ;
lm_status_t lm_status = LM_STATUS_SUCCESS;
if CHK_NULL( pdev )
{
DbgBreakIf( !pdev ) ;
return LM_STATUS_INVALID_PARAMETER;
}
if (IS_PFDEV(pdev) && (FALSE == pdev->vars.stats.stats_collect.stats_fw.b_ramrod_completed))
{
lm_status = LM_STATUS_BUSY;
}
else if (FALSE == pdev->vars.stats.stats_collect.stats_fw.b_completion_done)
{
lm_stats_fw_check_update_done( pdev, &stats_flags_done ) ;
if ( LM_STATS_DO_ASSIGN_ANY( stats_flags_done, stats_flags_assigned) )
{
lm_stats_fw_assign( pdev, stats_flags_done, &stats_flags_assigned ) ;
#ifdef VF_INVOLVED
#ifndef __LINUX
if (IS_CHANNEL_VIRT_MODE_MASTER_PFDEV(pdev)) {
u32_t vf_stats_flags_assigned = 0;
MM_ACQUIRE_VFS_STATS_LOCK_DPC(pdev);
lm_pf_stats_vf_fw_assign( pdev, stats_flags_done, &vf_stats_flags_assigned);
MM_RELEASE_VFS_STATS_LOCK_DPC(pdev);
}
#endif
#endif
}
if ERR_IF( LM_STATS_FLAGS_ALL != stats_flags_assigned )
{
lm_status = LM_STATUS_BUSY;
}
else
{
#ifdef VF_INVOLVED
#ifndef __LINUX
if (IS_CHANNEL_VIRT_MODE_MASTER_PFDEV(pdev)) {
u8_t vf_idx;
lm_vf_info_t *vf_info;
MM_ACQUIRE_VFS_STATS_LOCK_DPC(pdev);
for (vf_idx = 0; vf_idx < pdev->vfs_set.number_of_enabled_vfs; vf_idx++) {
vf_info = &pdev->vfs_set.vfs_array[vf_idx];
if (vf_info->vf_stats.vf_stats_state == VF_STATS_REQ_IN_PROCESSING) {
if (vf_info->vf_stats.stop_collect_stats || vf_info->was_flred) {
vf_info->vf_stats.vf_stats_state = VF_STATS_REQ_READY;
} else {
vf_info->vf_stats.vf_stats_state = VF_STATS_REQ_SUBMITTED;
}
}
}
MM_RELEASE_VFS_STATS_LOCK_DPC(pdev);
}
#endif
#endif
++pdev->vars.stats.stats_collect.stats_fw.drv_counter ;
if (IS_PFDEV(pdev))
{
mm_write_barrier();
}
pdev->vars.stats.stats_collect.stats_fw.b_completion_done = TRUE ;
lm_status = LM_STATUS_SUCCESS;
}
}
return lm_status;
}
void
lm_stats_fw_assign_fcoe_xstorm(IN const struct fcoe_statistics_params* collect,
OUT lm_fcoe_stats_t* mirror)
{
LM_SIGN_EXTEND_VALUE_32(collect->tx_stat.fcoe_tx_byte_cnt, mirror->fcoe_tx_byte_cnt);
LM_SIGN_EXTEND_VALUE_32(collect->tx_stat.fcoe_tx_pkt_cnt, mirror->fcoe_tx_pkt_cnt);
LM_SIGN_EXTEND_VALUE_32(collect->tx_stat.fcp_tx_pkt_cnt, mirror->fcp_tx_pkt_cnt);
}
void
lm_stats_fw_assign_fcoe_tstorm(IN const struct fcoe_statistics_params* collect,
OUT lm_fcoe_stats_t* mirror)
{
LM_SIGN_EXTEND_VALUE_32(collect->rx_stat0.fcoe_rx_byte_cnt, mirror->fcoe_rx_byte_cnt);
LM_SIGN_EXTEND_VALUE_32(collect->rx_stat0.fcoe_rx_pkt_cnt, mirror->fcoe_rx_pkt_cnt);
LM_SIGN_EXTEND_VALUE_32(collect->rx_stat1.fcoe_rx_drop_pkt_cnt, mirror->fcoe_rx_drop_pkt_cnt_tstorm);
LM_SIGN_EXTEND_VALUE_32(collect->rx_stat1.fcoe_ver_cnt, mirror->fcoe_ver_cnt);
}
void
lm_stats_fw_assign_fcoe_ustorm(IN const struct fcoe_statistics_params* collect,
OUT lm_fcoe_stats_t* mirror)
{
LM_SIGN_EXTEND_VALUE_32(collect->rx_stat2.drop_seq_cnt, mirror->drop_seq_cnt);
LM_SIGN_EXTEND_VALUE_32(collect->rx_stat2.eofa_del_cnt, mirror->eofa_del_cnt);
LM_SIGN_EXTEND_VALUE_32(collect->rx_stat2.fc_crc_cnt, mirror->fc_crc_cnt);
LM_SIGN_EXTEND_VALUE_32(collect->rx_stat2.fcoe_rx_drop_pkt_cnt, mirror->fcoe_rx_drop_pkt_cnt_ustorm);
LM_SIGN_EXTEND_VALUE_32(collect->rx_stat2.fcp_rx_pkt_cnt, mirror->fcp_rx_pkt_cnt);
LM_SIGN_EXTEND_VALUE_32(collect->rx_stat2.miss_frame_cnt, mirror->miss_frame_cnt);
LM_SIGN_EXTEND_VALUE_32(collect->rx_stat2.seq_timeout_cnt, mirror->seq_timeout_cnt);
}
void lm_stats_fw_assign( struct _lm_device_t *pdev, u32_t stats_flags_done, u32_t* ptr_stats_flags_assigned )
{
const u8_t cli_id = LM_CLI_IDX_NDIS ;
int arr_cnt = 0 ;
u8_t i = 0 ;
if CHK_NULL( ptr_stats_flags_assigned )
{
DbgBreakIf(!ptr_stats_flags_assigned) ;
return;
}
#define LM_STATS_FW_ASSIGN_TOE_REGPAIR(field_name) \
REGPAIR_TO_U64(pdev->vars.stats.stats_mirror.stats_fw.toe_##field_name, \
pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->toe.field_name ) ;
#define LM_STATS_FW_ASSIGN_TOE_U32(field_name) \
LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->toe.field_name, \
pdev->vars.stats.stats_mirror.stats_fw.toe_##field_name ) ;
if( LM_STATS_DO_ASSIGN( stats_flags_done, *ptr_stats_flags_assigned, LM_STATS_FLAG_XSTORM ) )
{
REGPAIR_TO_U64(pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[cli_id].unicast_bytes_sent,
pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.xstorm_queue_statistics.ucast_bytes_sent);
REGPAIR_TO_U64(pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[cli_id].multicast_bytes_sent,
pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.xstorm_queue_statistics.mcast_bytes_sent);
REGPAIR_TO_U64(pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[cli_id].broadcast_bytes_sent,
pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.xstorm_queue_statistics.bcast_bytes_sent);
pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[cli_id].total_sent_bytes =
pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[cli_id].unicast_bytes_sent +
pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[cli_id].multicast_bytes_sent +
pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[cli_id].broadcast_bytes_sent;
LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.xstorm_queue_statistics.ucast_pkts_sent,
pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[cli_id].unicast_pkts_sent );
LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.xstorm_queue_statistics.mcast_pkts_sent,
pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[cli_id].multicast_pkts_sent );
LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.xstorm_queue_statistics.bcast_pkts_sent,
pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[cli_id].broadcast_pkts_sent );
LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.xstorm_queue_statistics.error_drop_pkts,
pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[cli_id].error_drop_pkts );
pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[cli_id].total_sent_pkts =
pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[cli_id].unicast_pkts_sent+
pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[cli_id].multicast_pkts_sent +
pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[cli_id].broadcast_pkts_sent;
arr_cnt = ARRSIZE(pdev->vars.stats.stats_mirror.stats_fw.toe_xstorm_toe.statistics) ;
for ( i = 0; i < arr_cnt; i++)
{
LM_STATS_FW_ASSIGN_TOE_U32(xstorm_toe.statistics[i].tcp_out_segments) ;
LM_STATS_FW_ASSIGN_TOE_U32(xstorm_toe.statistics[i].tcp_retransmitted_segments) ;
LM_STATS_FW_ASSIGN_TOE_REGPAIR(xstorm_toe.statistics[i].ip_out_octets ) ;
LM_STATS_FW_ASSIGN_TOE_U32(xstorm_toe.statistics[i].ip_out_requests) ;
}
if( !CHIP_IS_E1x(pdev) )
{
lm_stats_fw_assign_fcoe_xstorm(&pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->fcoe,
&pdev->vars.stats.stats_mirror.stats_fw.fcoe);
}
SET_FLAGS( *ptr_stats_flags_assigned, LM_STATS_FLAG_XSTORM ) ;
}
if( LM_STATS_DO_ASSIGN( stats_flags_done, *ptr_stats_flags_assigned, LM_STATS_FLAG_TSTORM ) )
{
REGPAIR_TO_U64(pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[cli_id].rcv_unicast_bytes,
pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.tstorm_queue_statistics.rcv_ucast_bytes );
REGPAIR_TO_U64(pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[cli_id].rcv_broadcast_bytes,
pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.tstorm_queue_statistics.rcv_bcast_bytes );
REGPAIR_TO_U64(pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[cli_id].rcv_multicast_bytes,
pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.tstorm_queue_statistics.rcv_mcast_bytes );
LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.tstorm_queue_statistics.checksum_discard,
pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[cli_id].checksum_discard );
LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.tstorm_queue_statistics.pkts_too_big_discard,
pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[cli_id].packets_too_big_discard );
LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.tstorm_queue_statistics.rcv_ucast_pkts,
pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[cli_id].rcv_unicast_pkts );
LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.tstorm_queue_statistics.rcv_bcast_pkts,
pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[cli_id].rcv_broadcast_pkts );
LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.tstorm_queue_statistics.rcv_mcast_pkts,
pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[cli_id].rcv_multicast_pkts );
LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.tstorm_queue_statistics.no_buff_discard,
pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[cli_id].no_buff_discard );
LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.tstorm_queue_statistics.ttl0_discard,
pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[cli_id].ttl0_discard );
LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->port.tstorm_port_statistics.mf_tag_discard,
pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[cli_id].ttl0_discard );
LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->port.tstorm_port_statistics.mac_filter_discard, \
pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.port_statistics.mac_filter_discard ) ;
LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->port.tstorm_port_statistics.brb_truncate_discard, \
pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.port_statistics.brb_truncate_discard ) ;
LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->port.tstorm_port_statistics.mac_discard, \
pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.port_statistics.mac_discard ) ;
arr_cnt = ARRSIZE(pdev->vars.stats.stats_mirror.stats_fw.toe_tstorm_toe.statistics) ;
for ( i = 0; i < arr_cnt; i++)
{
LM_STATS_FW_ASSIGN_TOE_U32(tstorm_toe.statistics[i].ip_in_receives) ;
LM_STATS_FW_ASSIGN_TOE_U32(tstorm_toe.statistics[i].ip_in_delivers) ;
LM_STATS_FW_ASSIGN_TOE_REGPAIR(tstorm_toe.statistics[i].ip_in_octets) ;
LM_STATS_FW_ASSIGN_TOE_U32(tstorm_toe.statistics[i].tcp_in_errors) ;
LM_STATS_FW_ASSIGN_TOE_U32(tstorm_toe.statistics[i].ip_in_header_errors) ;
LM_STATS_FW_ASSIGN_TOE_U32(tstorm_toe.statistics[i].ip_in_discards) ;
LM_STATS_FW_ASSIGN_TOE_U32(tstorm_toe.statistics[i].ip_in_truncated_packets) ;
}
if( !CHIP_IS_E1x(pdev) )
{
lm_stats_fw_assign_fcoe_tstorm(&pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->fcoe,
&pdev->vars.stats.stats_mirror.stats_fw.fcoe);
}
SET_FLAGS( *ptr_stats_flags_assigned, LM_STATS_FLAG_TSTORM ) ;
}
if( LM_STATS_DO_ASSIGN( stats_flags_done, *ptr_stats_flags_assigned, LM_STATS_FLAG_USTORM ) )
{
REGPAIR_TO_U64(pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[cli_id].ucast_no_buff_bytes,
pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.ustorm_queue_statistics.ucast_no_buff_bytes );
REGPAIR_TO_U64(pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[cli_id].mcast_no_buff_bytes,
pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.ustorm_queue_statistics.mcast_no_buff_bytes );
REGPAIR_TO_U64(pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[cli_id].bcast_no_buff_bytes,
pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.ustorm_queue_statistics.bcast_no_buff_bytes );
REGPAIR_TO_U64(pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[cli_id].coalesced_bytes,
pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.ustorm_queue_statistics.coalesced_bytes );
LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.ustorm_queue_statistics.ucast_no_buff_pkts,
pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[cli_id].ucast_no_buff_pkts );
LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.ustorm_queue_statistics.mcast_no_buff_pkts,
pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[cli_id].mcast_no_buff_pkts );
LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.ustorm_queue_statistics.bcast_no_buff_pkts,
pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[cli_id].bcast_no_buff_pkts );
LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.ustorm_queue_statistics.coalesced_pkts,
pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[cli_id].coalesced_pkts );
LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.ustorm_queue_statistics.coalesced_events,
pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[cli_id].coalesced_events );
LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.ustorm_queue_statistics.coalesced_aborts,
pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[cli_id].coalesced_aborts );
if( !CHIP_IS_E1x(pdev) )
{
lm_stats_fw_assign_fcoe_ustorm(&pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->fcoe,
&pdev->vars.stats.stats_mirror.stats_fw.fcoe);
}
SET_FLAGS( *ptr_stats_flags_assigned, LM_STATS_FLAG_USTORM ) ;
}
if( LM_STATS_DO_ASSIGN( stats_flags_done, *ptr_stats_flags_assigned, LM_STATS_FLAG_CSTORM ) )
{
LM_STATS_FW_ASSIGN_TOE_U32(cstorm_toe.no_tx_cqes) ;
SET_FLAGS( *ptr_stats_flags_assigned, LM_STATS_FLAG_CSTORM ) ;
}
}
#ifdef VF_INVOLVED
void lm_pf_stats_vf_fw_assign(struct _lm_device_t *pdev, u32_t stats_flags_done, u32_t* ptr_stats_flags_assigned)
{
lm_stats_fw_t *mirror_stats_fw;
struct per_queue_stats *queue_stats;
const u8_t cli_id = LM_CLI_IDX_NDIS ;
u8_t vf_idx;
if CHK_NULL( ptr_stats_flags_assigned )
{
DbgBreakIf(!ptr_stats_flags_assigned) ;
return;
}
if( LM_STATS_DO_ASSIGN( stats_flags_done, *ptr_stats_flags_assigned, LM_STATS_FLAG_XSTORM ) )
{
for (vf_idx = 0; vf_idx < pdev->vfs_set.number_of_enabled_vfs; vf_idx++) {
mirror_stats_fw = pdev->vfs_set.vfs_array[vf_idx].vf_stats.mirror_stats_fw;
queue_stats = pdev->vfs_set.vfs_array[vf_idx].vf_stats.pf_fw_stats_virt_data;
REGPAIR_TO_U64(mirror_stats_fw->eth_xstorm_common.client_statistics[cli_id].unicast_bytes_sent,
queue_stats->xstorm_queue_statistics.ucast_bytes_sent);
REGPAIR_TO_U64(mirror_stats_fw->eth_xstorm_common.client_statistics[cli_id].multicast_bytes_sent,
queue_stats->xstorm_queue_statistics.mcast_bytes_sent);
REGPAIR_TO_U64(mirror_stats_fw->eth_xstorm_common.client_statistics[cli_id].broadcast_bytes_sent,
queue_stats->xstorm_queue_statistics.bcast_bytes_sent);
mirror_stats_fw->eth_xstorm_common.client_statistics[cli_id].total_sent_bytes =
mirror_stats_fw->eth_xstorm_common.client_statistics[cli_id].unicast_bytes_sent +
mirror_stats_fw->eth_xstorm_common.client_statistics[cli_id].multicast_bytes_sent +
mirror_stats_fw->eth_xstorm_common.client_statistics[cli_id].broadcast_bytes_sent;
LM_SIGN_EXTEND_VALUE_32( queue_stats->xstorm_queue_statistics.ucast_pkts_sent,
mirror_stats_fw->eth_xstorm_common.client_statistics[cli_id].unicast_pkts_sent );
LM_SIGN_EXTEND_VALUE_32( queue_stats->xstorm_queue_statistics.mcast_pkts_sent,
mirror_stats_fw->eth_xstorm_common.client_statistics[cli_id].multicast_pkts_sent );
LM_SIGN_EXTEND_VALUE_32( queue_stats->xstorm_queue_statistics.bcast_pkts_sent,
mirror_stats_fw->eth_xstorm_common.client_statistics[cli_id].broadcast_pkts_sent );
mirror_stats_fw->eth_xstorm_common.client_statistics[cli_id].total_sent_pkts =
mirror_stats_fw->eth_xstorm_common.client_statistics[cli_id].unicast_pkts_sent+
mirror_stats_fw->eth_xstorm_common.client_statistics[cli_id].multicast_pkts_sent +
mirror_stats_fw->eth_xstorm_common.client_statistics[cli_id].broadcast_pkts_sent;
}
SET_FLAGS( *ptr_stats_flags_assigned, LM_STATS_FLAG_XSTORM ) ;
}
if( LM_STATS_DO_ASSIGN( stats_flags_done, *ptr_stats_flags_assigned, LM_STATS_FLAG_TSTORM ) )
{
for (vf_idx = 0; vf_idx < pdev->vfs_set.number_of_enabled_vfs; vf_idx++) {
mirror_stats_fw = pdev->vfs_set.vfs_array[vf_idx].vf_stats.mirror_stats_fw;
queue_stats = pdev->vfs_set.vfs_array[vf_idx].vf_stats.pf_fw_stats_virt_data;
REGPAIR_TO_U64(mirror_stats_fw->eth_tstorm_common.client_statistics[cli_id].rcv_unicast_bytes,
queue_stats->tstorm_queue_statistics.rcv_ucast_bytes );
REGPAIR_TO_U64(mirror_stats_fw->eth_tstorm_common.client_statistics[cli_id].rcv_broadcast_bytes,
queue_stats->tstorm_queue_statistics.rcv_bcast_bytes );
REGPAIR_TO_U64(mirror_stats_fw->eth_tstorm_common.client_statistics[cli_id].rcv_multicast_bytes,
queue_stats->tstorm_queue_statistics.rcv_mcast_bytes );
LM_SIGN_EXTEND_VALUE_32( queue_stats->tstorm_queue_statistics.checksum_discard,
mirror_stats_fw->eth_tstorm_common.client_statistics[cli_id].checksum_discard );
LM_SIGN_EXTEND_VALUE_32( queue_stats->tstorm_queue_statistics.pkts_too_big_discard,
mirror_stats_fw->eth_tstorm_common.client_statistics[cli_id].packets_too_big_discard );
LM_SIGN_EXTEND_VALUE_32( queue_stats->tstorm_queue_statistics.rcv_ucast_pkts,
mirror_stats_fw->eth_tstorm_common.client_statistics[cli_id].rcv_unicast_pkts );
LM_SIGN_EXTEND_VALUE_32( queue_stats->tstorm_queue_statistics.rcv_bcast_pkts,
mirror_stats_fw->eth_tstorm_common.client_statistics[cli_id].rcv_broadcast_pkts );
LM_SIGN_EXTEND_VALUE_32( queue_stats->tstorm_queue_statistics.rcv_mcast_pkts,
mirror_stats_fw->eth_tstorm_common.client_statistics[cli_id].rcv_multicast_pkts );
LM_SIGN_EXTEND_VALUE_32( queue_stats->tstorm_queue_statistics.no_buff_discard,
mirror_stats_fw->eth_tstorm_common.client_statistics[cli_id].no_buff_discard );
LM_SIGN_EXTEND_VALUE_32( queue_stats->tstorm_queue_statistics.ttl0_discard,
mirror_stats_fw->eth_tstorm_common.client_statistics[cli_id].ttl0_discard );
}
SET_FLAGS( *ptr_stats_flags_assigned, LM_STATS_FLAG_TSTORM ) ;
}
if( LM_STATS_DO_ASSIGN( stats_flags_done, *ptr_stats_flags_assigned, LM_STATS_FLAG_USTORM ) )
{
for (vf_idx = 0; vf_idx < pdev->vfs_set.number_of_enabled_vfs; vf_idx++) {
mirror_stats_fw = pdev->vfs_set.vfs_array[vf_idx].vf_stats.mirror_stats_fw;
queue_stats = pdev->vfs_set.vfs_array[vf_idx].vf_stats.pf_fw_stats_virt_data;
REGPAIR_TO_U64(mirror_stats_fw->eth_ustorm_common.client_statistics[cli_id].ucast_no_buff_bytes,
queue_stats->ustorm_queue_statistics.ucast_no_buff_bytes );
REGPAIR_TO_U64(mirror_stats_fw->eth_ustorm_common.client_statistics[cli_id].mcast_no_buff_bytes,
queue_stats->ustorm_queue_statistics.mcast_no_buff_bytes );
REGPAIR_TO_U64(mirror_stats_fw->eth_ustorm_common.client_statistics[cli_id].bcast_no_buff_bytes,
queue_stats->ustorm_queue_statistics.bcast_no_buff_bytes );
REGPAIR_TO_U64(pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[cli_id].coalesced_bytes,
pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.ustorm_queue_statistics.coalesced_bytes );
LM_SIGN_EXTEND_VALUE_32( queue_stats->ustorm_queue_statistics.ucast_no_buff_pkts,
mirror_stats_fw->eth_ustorm_common.client_statistics[cli_id].ucast_no_buff_pkts );
LM_SIGN_EXTEND_VALUE_32( queue_stats->ustorm_queue_statistics.mcast_no_buff_pkts,
mirror_stats_fw->eth_ustorm_common.client_statistics[cli_id].mcast_no_buff_pkts );
LM_SIGN_EXTEND_VALUE_32( queue_stats->ustorm_queue_statistics.bcast_no_buff_pkts,
mirror_stats_fw->eth_ustorm_common.client_statistics[cli_id].bcast_no_buff_pkts );
LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.ustorm_queue_statistics.coalesced_pkts,
pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[cli_id].coalesced_pkts );
LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.ustorm_queue_statistics.coalesced_events,
pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[cli_id].coalesced_events );
LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.ustorm_queue_statistics.coalesced_aborts,
pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[cli_id].coalesced_aborts );
}
SET_FLAGS( *ptr_stats_flags_assigned, LM_STATS_FLAG_USTORM ) ;
}
if( LM_STATS_DO_ASSIGN( stats_flags_done, *ptr_stats_flags_assigned, LM_STATS_FLAG_CSTORM ) )
{
SET_FLAGS( *ptr_stats_flags_assigned, LM_STATS_FLAG_CSTORM ) ;
}
}
#endif
static u64_t lm_stats_hw_macs_assign(IN lm_device_t* pdev,
IN u8_t bits,
IN u64_t field_collect_val,
IN OUT u64_t *field_mirror_val)
{
if(HAS_MSTAT(pdev))
{
*field_mirror_val += field_collect_val;
return field_collect_val;
}
else
{
u64_t prev = *field_mirror_val;
*field_mirror_val = lm_update_wraparound_if_needed(bits, field_collect_val, *field_mirror_val,FALSE) ;
return *field_mirror_val - prev;
}
}
#define LM_STATS_HW_MAC_ASSIGN(field_collect, field_mirror, field_width)\
if (mac_query->field_collect != 0) { DbgMessage(pdev, INFORM, "assigning %s[=%x] to %s, width %d.\n", #field_collect, mac_query->field_collect, #field_mirror, field_width ); } \
macs[STATS_MACS_IDX_TOTAL].field_mirror += lm_stats_hw_macs_assign( pdev, \
field_width, \
mac_query->field_collect, \
&(macs[STATS_MACS_IDX_CURRENT].field_mirror) ) ;
#define LM_STATS_HW_MAC_ASSIGN_U32( field_collect, field_mirror ) LM_STATS_HW_MAC_ASSIGN(field_collect, field_mirror, 32)
#define LM_STATS_HW_MAC_ASSIGN_U36( field_collect, field_mirror ) LM_STATS_HW_MAC_ASSIGN(field_collect, field_mirror, 36)
#define LM_STATS_HW_MAC_ASSIGN_U42( field_collect, field_mirror ) LM_STATS_HW_MAC_ASSIGN(field_collect, field_mirror, 42)
#define LM_STATS_HW_NIG_ASSIGN_UXX(bits, block_name,field_collect,field_mirror) \
LM_SIGN_EXTEND_VALUE_##bits( pdev->vars.stats.stats_collect.stats_hw.addr_##block_name##_stats_query->field_collect, \
pdev->vars.stats.stats_mirror.stats_hw.nig.field_mirror ) ;
#define LM_STATS_HW_NIG_ASSIGN_U32(block_name,field_collect,field_mirror) LM_STATS_HW_NIG_ASSIGN_UXX(32, block_name,field_collect,field_mirror)
#define LM_STATS_NON_EMAC_ASSIGN_CODE(_field_width) \
{\
\
\
\
if (!IS_MULTI_VNIC(pdev)) { \
LM_STATS_HW_MAC_ASSIGN( stats_tx.tx_gtgca, stats_tx.tx_stat_ifhcoutucastpkts_bmac_bca, _field_width); \
LM_STATS_HW_MAC_ASSIGN( stats_tx.tx_gtgca, stats_tx.tx_stat_ifhcoutbroadcastpkts, _field_width); \
LM_STATS_HW_MAC_ASSIGN( stats_tx.tx_gtpkt, stats_tx.tx_stat_ifhcoutucastpkts_bmac_pkt , _field_width); \
LM_STATS_HW_MAC_ASSIGN( stats_tx.tx_gtmca, stats_tx.tx_stat_ifhcoutucastpkts_bmac_mca , _field_width); \
\
LM_STATS_HW_MAC_ASSIGN( stats_tx.tx_gtmca, stats_tx.tx_stat_ifhcoutmulticastpkts , _field_width); \
\
} \
\
LM_STATS_HW_MAC_ASSIGN( stats_rx.rx_grfcs, stats_rx.rx_stat_dot3statsfcserrors, _field_width); \
\
\
\
LM_STATS_HW_MAC_ASSIGN( stats_rx.rx_grovr, stats_rx.rx_stat_dot3statsframestoolong, _field_width); \
\
\
LM_STATS_HW_MAC_ASSIGN( stats_rx.rx_grxpf, stats_rx.rx_stat_xoffpauseframesreceived, _field_width); \
\
LM_STATS_HW_MAC_ASSIGN( stats_tx.tx_gtxpf, stats_tx.tx_stat_outxoffsent, _field_width); \
\
\
LM_STATS_HW_MAC_ASSIGN( stats_rx.rx_grxpf, stats_rx.rx_stat_maccontrolframesreceived_bmac_xpf, _field_width); \
LM_STATS_HW_MAC_ASSIGN( stats_rx.rx_grxcf, stats_rx.rx_stat_maccontrolframesreceived_bmac_xcf, _field_width); \
\
\
LM_STATS_HW_MAC_ASSIGN( stats_tx.tx_gt64, stats_tx.tx_stat_etherstatspkts64octets, _field_width); \
\
LM_STATS_HW_MAC_ASSIGN( stats_tx.tx_gt127, stats_tx.tx_stat_etherstatspkts65octetsto127octets, _field_width); \
\
LM_STATS_HW_MAC_ASSIGN( stats_tx.tx_gt255, stats_tx.tx_stat_etherstatspkts128octetsto255octets, _field_width); \
\
LM_STATS_HW_MAC_ASSIGN( stats_tx.tx_gt511, stats_tx.tx_stat_etherstatspkts256octetsto511octets, _field_width); \
\
LM_STATS_HW_MAC_ASSIGN( stats_tx.tx_gt1023, stats_tx.tx_stat_etherstatspkts512octetsto1023octets, _field_width); \
\
LM_STATS_HW_MAC_ASSIGN( stats_tx.tx_gt1518, stats_tx.tx_stat_etherstatspkts1024octetsto1522octet, _field_width); \
\
LM_STATS_HW_MAC_ASSIGN( stats_tx.tx_gt2047, stats_tx.tx_stat_etherstatspktsover1522octets_bmac_2047, _field_width); \
LM_STATS_HW_MAC_ASSIGN( stats_tx.tx_gt4095, stats_tx.tx_stat_etherstatspktsover1522octets_bmac_4095, _field_width); \
LM_STATS_HW_MAC_ASSIGN( stats_tx.tx_gt9216, stats_tx.tx_stat_etherstatspktsover1522octets_bmac_9216, _field_width); \
LM_STATS_HW_MAC_ASSIGN( stats_tx.tx_gt16383, stats_tx.tx_stat_etherstatspktsover1522octets_bmac_16383, _field_width);\
\
\
\
\
LM_STATS_HW_MAC_ASSIGN( stats_tx.tx_gterr, stats_tx.tx_stat_dot3statsinternalmactransmiterrors, _field_width); \
\
\
LM_STATS_HW_MAC_ASSIGN( stats_tx.tx_gtxpf, stats_tx.tx_stat_flowcontroldone, _field_width); \
\
LM_STATS_HW_MAC_ASSIGN( stats_rx.rx_grxpf, stats_rx.rx_stat_xoffstateentered, _field_width); \
\
\
\
LM_STATS_HW_MAC_ASSIGN( stats_tx.tx_gtufl, stats_tx.tx_stat_ifhcoutdiscards, _field_width); \
}
#define LM_STATS_BMAC_ASSIGN_CODE \
{ \
LM_STATS_HW_MAC_ASSIGN_U42( stats_rx.rx_grund, stats_rx.rx_stat_etherstatsundersizepkts ) ; \
LM_STATS_HW_MAC_ASSIGN_U36( stats_rx.rx_grjbr, stats_rx.rx_stat_etherstatsjabbers ) ; \
LM_STATS_HW_MAC_ASSIGN_U42( stats_rx.rx_grfrg, stats_rx.rx_stat_etherstatsfragments ) ; \
LM_STATS_HW_MAC_ASSIGN_U42( stats_rx.rx_grerb, stats_rx.rx_stat_ifhcinbadoctets ); \
}
#define LM_STATS_BMAC2_MSTAT_ASSIGN_CODE(_field_width) \
{\
LM_STATS_HW_MAC_ASSIGN( stats_tx.tx_gtxpp, stats_tx.tx_stat_pfcPacketCounter, _field_width); \
\
LM_STATS_HW_MAC_ASSIGN( stats_rx.rx_grxpp, stats_rx.rx_stat_pfcPacketCounter, _field_width); \
}
#define LM_STATS_MSTAT_ASSIGN_CODE \
{ \
LM_STATS_HW_MAC_ASSIGN( stats_rx.rx_grund, stats_rx.rx_stat_etherstatsundersizepkts, 39) ; \
LM_STATS_HW_MAC_ASSIGN( stats_rx.rx_grfrg, stats_rx.rx_stat_etherstatsfragments, 39) ; \
LM_STATS_HW_MAC_ASSIGN( stats_rx.rx_grerb, stats_rx.rx_stat_ifhcinbadoctets, 45); \
if (!IS_MULTI_VNIC(pdev)) {\
LM_STATS_HW_MAC_ASSIGN(stats_rx.rx_grbyt, stats_rx.rx_stat_ifhcinoctets, 45);\
LM_STATS_HW_MAC_ASSIGN(stats_rx.rx_gruca, stats_rx.rx_stat_ifhcinucastpkts, 39)\
LM_STATS_HW_MAC_ASSIGN(stats_rx.rx_grmca, stats_rx.rx_stat_ifhcinmulticastpkts, 39);\
LM_STATS_HW_MAC_ASSIGN(stats_rx.rx_grbca, stats_rx.rx_stat_ifhcinbroadcastpkts, 39);\
LM_STATS_HW_MAC_ASSIGN(stats_rx.rx_gr64, stats_rx.rx_stat_etherstatspkts64octets, 39);\
LM_STATS_HW_MAC_ASSIGN(stats_rx.rx_gr127, stats_rx.rx_stat_etherstatspkts65octetsto127octets, 39);\
LM_STATS_HW_MAC_ASSIGN(stats_rx.rx_gr255, stats_rx.rx_stat_etherstatspkts128octetsto255octets, 39);\
LM_STATS_HW_MAC_ASSIGN(stats_rx.rx_gr511, stats_rx.rx_stat_etherstatspkts256octetsto511octets, 39);\
LM_STATS_HW_MAC_ASSIGN(stats_rx.rx_gr1023, stats_rx.rx_stat_etherstatspkts512octetsto1023octets, 39);\
LM_STATS_HW_MAC_ASSIGN(stats_rx.rx_gr1518, stats_rx.rx_stat_etherstatspkts1024octetsto1522octets, 39);\
LM_STATS_HW_MAC_ASSIGN(stats_rx.rx_gr2047, stats_rx.rx_stat_etherstatspktsover1522octets, 39);\
}\
}
void lm_stats_hw_bmac1_assign( struct _lm_device_t *pdev)
{
stats_macs_t *macs = &pdev->vars.stats.stats_mirror.stats_hw.macs[STATS_MACS_IDX_CURRENT];
volatile struct _stats_bmac1_query_t *mac_query = pdev->vars.stats.stats_collect.stats_hw.u.s.addr_bmac1_stats_query;
LM_STATS_NON_EMAC_ASSIGN_CODE(36)
LM_STATS_BMAC_ASSIGN_CODE
}
void lm_stats_hw_bmac2_assign( struct _lm_device_t *pdev)
{
stats_macs_t *macs = &pdev->vars.stats.stats_mirror.stats_hw.macs[STATS_MACS_IDX_CURRENT];
volatile struct _stats_bmac2_query_t *mac_query = pdev->vars.stats.stats_collect.stats_hw.u.s.addr_bmac2_stats_query;
const u8_t bmac2_field_width = 36;
DbgBreakIf(mac_query == NULL);
LM_STATS_NON_EMAC_ASSIGN_CODE(bmac2_field_width)
LM_STATS_BMAC2_MSTAT_ASSIGN_CODE(bmac2_field_width)
LM_STATS_BMAC_ASSIGN_CODE
}
void lm_stats_hw_mstat_assign( lm_device_t* pdev)
{
stats_macs_t *macs = &pdev->vars.stats.stats_mirror.stats_hw.macs[STATS_MACS_IDX_CURRENT];
volatile struct _stats_mstat_query_t *mac_query = pdev->vars.stats.stats_collect.stats_hw.u.addr_mstat_stats_query;
const u8_t mstat_field_width = 39;
DbgBreakIf(mac_query == NULL);
DbgMessage(pdev, INFORM, "lm_stats_hw_mstat_assign: mac_query=%x\n", mac_query);
LM_STATS_NON_EMAC_ASSIGN_CODE(mstat_field_width)
LM_STATS_BMAC2_MSTAT_ASSIGN_CODE(mstat_field_width)
LM_STATS_MSTAT_ASSIGN_CODE
}
void lm_stats_hw_emac_assign( struct _lm_device_t *pdev)
{
stats_macs_t *macs = &pdev->vars.stats.stats_mirror.stats_hw.macs[STATS_MACS_IDX_CURRENT];
volatile struct _stats_emac_query_t *mac_query = pdev->vars.stats.stats_collect.stats_hw.u.s.addr_emac_stats_query;
DbgBreakIf(mac_query == NULL);
LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_ifhcinbadoctets, stats_rx.rx_stat_ifhcinbadoctets ) ;
LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_etherstatsfragments, stats_rx.rx_stat_etherstatsfragments ) ;
if (!IS_MULTI_VNIC(pdev)) {
LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_ifhcinoctets, stats_rx.rx_stat_ifhcinoctets );
LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_ifhcinucastpkts, stats_rx.rx_stat_ifhcinucastpkts )
LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_ifhcinmulticastpkts, stats_rx.rx_stat_ifhcinmulticastpkts );
LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_ifhcinbroadcastpkts, stats_rx.rx_stat_ifhcinbroadcastpkts );
LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_etherstatspkts64octets, stats_rx.rx_stat_etherstatspkts64octets );
LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_etherstatspkts65octetsto127octets, stats_rx.rx_stat_etherstatspkts65octetsto127octets );
LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_etherstatspkts128octetsto255octets, stats_rx.rx_stat_etherstatspkts128octetsto255octets );
LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_etherstatspkts256octetsto511octets, stats_rx.rx_stat_etherstatspkts256octetsto511octets );
LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_etherstatspkts512octetsto1023octets, stats_rx.rx_stat_etherstatspkts512octetsto1023octets);
LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_etherstatspkts1024octetsto1522octets, stats_rx.rx_stat_etherstatspkts1024octetsto1522octets);
LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_etherstatspktsover1522octets, stats_rx.rx_stat_etherstatspktsover1522octets);
LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_ifhcoutoctets, stats_tx.tx_stat_ifhcoutoctets);
LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_ifhcoutucastpkts, stats_tx.tx_stat_ifhcoutucastpkts);
LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_ifhcoutmulticastpkts, stats_tx.tx_stat_ifhcoutmulticastpkts);
LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_ifhcoutbroadcastpkts, stats_tx.tx_stat_ifhcoutbroadcastpkts);
}
LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_dot3statsfcserrors, stats_rx.rx_stat_dot3statsfcserrors ) ;
LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_dot3statsalignmenterrors, stats_rx.rx_stat_dot3statsalignmenterrors ) ;
LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_dot3statscarriersenseerrors, stats_rx.rx_stat_dot3statscarriersenseerrors ) ;
LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_xonpauseframesreceived, stats_rx.rx_stat_xonpauseframesreceived ) ;
LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_xoffpauseframesreceived, stats_rx.rx_stat_xoffpauseframesreceived ) ;
LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_maccontrolframesreceived, stats_rx.rx_stat_maccontrolframesreceived ) ;
LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_xoffstateentered, stats_rx.rx_stat_xoffstateentered ) ;
LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_dot3statsframestoolong, stats_rx.rx_stat_dot3statsframestoolong ) ;
LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_etherstatsjabbers, stats_rx.rx_stat_etherstatsjabbers ) ;
LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_etherstatsundersizepkts, stats_rx.rx_stat_etherstatsundersizepkts ) ;
LM_STATS_HW_MAC_ASSIGN_U32(stats_rx_err.rx_stat_falsecarriererrors, stats_rx_err.rx_stat_falsecarriererrors ) ;
LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_ifhcoutbadoctets, stats_tx.tx_stat_ifhcoutbadoctets ) ;
LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_etherstatscollisions, stats_tx.tx_stat_etherstatscollisions ) ;
LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_outxonsent, stats_tx.tx_stat_outxonsent ) ;
LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_outxoffsent, stats_tx.tx_stat_outxoffsent ) ;
LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_flowcontroldone, stats_tx.tx_stat_flowcontroldone ) ;
LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_dot3statssinglecollisionframes, stats_tx.tx_stat_dot3statssinglecollisionframes ) ;
LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_dot3statsmultiplecollisionframes, stats_tx.tx_stat_dot3statsmultiplecollisionframes ) ;
LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_dot3statsdeferredtransmissions, stats_tx.tx_stat_dot3statsdeferredtransmissions ) ;
LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_dot3statsexcessivecollisions, stats_tx.tx_stat_dot3statsexcessivecollisions ) ;
LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_dot3statslatecollisions, stats_tx.tx_stat_dot3statslatecollisions ) ;
LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_etherstatspkts64octets, stats_tx.tx_stat_etherstatspkts64octets ) ;
LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_etherstatspkts65octetsto127octets, stats_tx.tx_stat_etherstatspkts65octetsto127octets ) ;
LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_etherstatspkts128octetsto255octets, stats_tx.tx_stat_etherstatspkts128octetsto255octets ) ;
LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_etherstatspkts256octetsto511octets, stats_tx.tx_stat_etherstatspkts256octetsto511octets ) ;
LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_etherstatspkts512octetsto1023octets, stats_tx.tx_stat_etherstatspkts512octetsto1023octets ) ;
LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_etherstatspkts1024octetsto1522octet, stats_tx.tx_stat_etherstatspkts1024octetsto1522octet ) ;
LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_etherstatspktsover1522octets, stats_tx.tx_stat_etherstatspktsover1522octets ) ;
LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_dot3statsinternalmactransmiterrors, stats_tx.tx_stat_dot3statsinternalmactransmiterrors ) ;
}
void lm_stats_hw_assign( struct _lm_device_t *pdev )
{
if(HAS_MSTAT(pdev))
{
DbgMessage(pdev, INFORM, "lm_stats_hw_assign: device has MSTAT block.\n");
lm_stats_hw_mstat_assign(pdev);
}
else if (CHIP_IS_E2(pdev) && (pdev->vars.mac_type == MAC_TYPE_BMAC))
{
lm_stats_hw_bmac2_assign(pdev);
}
else if (pdev->vars.mac_type == MAC_TYPE_BMAC)
{
lm_stats_hw_bmac1_assign(pdev);
}
else if(pdev->vars.mac_type == MAC_TYPE_EMAC)
{
lm_stats_hw_emac_assign(pdev);
}
else
{
DbgBreakIf((pdev->vars.mac_type != MAC_TYPE_EMAC) && (pdev->vars.mac_type == MAC_TYPE_BMAC) && !HAS_MSTAT(pdev) );
}
{
LM_STATS_HW_NIG_ASSIGN_U32(nig, brb_discard, brb_discard ) ;
if (!IS_MULTI_VNIC(pdev))
{
LM_STATS_HW_NIG_ASSIGN_U32(nig, brb_packet, brb_packet );
LM_STATS_HW_NIG_ASSIGN_U32(nig, brb_truncate, brb_truncate );
LM_STATS_HW_NIG_ASSIGN_U32(nig, flow_ctrl_discard, flow_ctrl_discard );
LM_STATS_HW_NIG_ASSIGN_U32(nig, flow_ctrl_octets, flow_ctrl_octets );
LM_STATS_HW_NIG_ASSIGN_U32(nig, flow_ctrl_packet, flow_ctrl_packet );
LM_STATS_HW_NIG_ASSIGN_U32(nig, mng_discard, mng_discard );
LM_STATS_HW_NIG_ASSIGN_U32(nig, mng_octet_inp, mng_octet_inp );
LM_STATS_HW_NIG_ASSIGN_U32(nig, mng_octet_out, mng_octet_out );
LM_STATS_HW_NIG_ASSIGN_U32(nig, mng_packet_inp, mng_packet_inp );
LM_STATS_HW_NIG_ASSIGN_U32(nig, mng_packet_out, mng_packet_out );
LM_STATS_HW_NIG_ASSIGN_U32(nig, pbf_octets, pbf_octets );
LM_STATS_HW_NIG_ASSIGN_U32(nig, pbf_packet, pbf_packet );
LM_STATS_HW_NIG_ASSIGN_U32(nig, safc_inp, safc_inp );
}
if(HAS_MSTAT(pdev))
{
stats_macs_t* assigned_hw_stats = &pdev->vars.stats.stats_mirror.stats_hw.macs[STATS_MACS_IDX_TOTAL];
struct _stats_nig_ex_t* nig_ex_stats = &pdev->vars.stats.stats_collect.stats_hw.nig_ex_stats_query;
nig_ex_stats->egress_mac_pkt0 = assigned_hw_stats->stats_tx.tx_stat_etherstatspkts1024octetsto1522octet;
nig_ex_stats->egress_mac_pkt1 = assigned_hw_stats->stats_tx.tx_stat_etherstatspktsover1522octets_bmac_2047+
assigned_hw_stats->stats_tx.tx_stat_etherstatspktsover1522octets_bmac_4095+
assigned_hw_stats->stats_tx.tx_stat_etherstatspktsover1522octets_bmac_9216+
assigned_hw_stats->stats_tx.tx_stat_etherstatspktsover1522octets_bmac_16383;
}
else
{
LM_SIGN_EXTEND_VALUE_36( pdev->vars.stats.stats_collect.stats_hw.nig_ex_stats_query.egress_mac_pkt0, pdev->vars.stats.stats_mirror.stats_hw.nig_ex.egress_mac_pkt0 ) ;
LM_SIGN_EXTEND_VALUE_36( pdev->vars.stats.stats_collect.stats_hw.nig_ex_stats_query.egress_mac_pkt1, pdev->vars.stats.stats_mirror.stats_hw.nig_ex.egress_mac_pkt1 ) ;
}
}
}
static void lm_drv_info_to_mfw_assign_eth( struct _lm_device_t *pdev )
{
const u8_t client_id = LM_CLI_CID(pdev, LM_CLI_IDX_NDIS );
eth_stats_info_t* stats_eth = &pdev->vars.stats.stats_mirror.stats_drv.drv_info_to_mfw.eth_stats;
lm_client_con_params_t* cli_params = NULL;
if( client_id >= ARRSIZE(pdev->params.l2_cli_con_params) )
{
DbgBreakIf( client_id >= ARRSIZE(pdev->params.l2_cli_con_params) );
return;
}
#define DRV_INFO_TO_MFW_NOT_SUPPORTED 0
cli_params = &pdev->params.l2_cli_con_params[client_id];
ASSERT_STATIC( sizeof(stats_eth->version) <= sizeof(pdev->ver_str) );
ASSERT_STATIC( sizeof(stats_eth->mac_local) <= sizeof( pdev->params.mac_addr ) );
mm_memcpy( stats_eth->version, pdev->ver_str, sizeof(stats_eth->version) );
stats_eth->mtu_size = cli_params->mtu;
stats_eth->lso_max_size = DRV_INFO_TO_MFW_NOT_SUPPORTED;
stats_eth->lso_min_seg_cnt = DRV_INFO_TO_MFW_NOT_SUPPORTED;
stats_eth->ipv4_ofld_cnt = pdev->vars.stats.stats_mirror.stats_drv.drv_toe.ipv[STATS_IP_4_IDX].currently_established;
stats_eth->ipv6_ofld_cnt = pdev->vars.stats.stats_mirror.stats_drv.drv_toe.ipv[STATS_IP_6_IDX].currently_established;
stats_eth->promiscuous_mode = ( 0 != GET_FLAGS( pdev->client_info[client_id].last_set_rx_mask, LM_RX_MASK_PROMISCUOUS_MODE ) );
stats_eth->txq_size = cli_params->num_tx_desc;
stats_eth->rxq_size = cli_params->num_rx_desc;
stats_eth->txq_avg_depth = DRV_INFO_TO_MFW_NOT_SUPPORTED;
stats_eth->rxq_avg_depth = DRV_INFO_TO_MFW_NOT_SUPPORTED;
stats_eth->iov_offload = DRV_INFO_TO_MFW_NOT_SUPPORTED;
stats_eth->vf_cnt = 0;
stats_eth->netq_cnt = mm_get_vmq_cnt(pdev);
stats_eth->feature_flags = mm_get_feature_flags(pdev);
}
lm_status_t lm_stats_drv_info_to_mfw_assign( struct _lm_device_t *pdev, const enum drv_info_opcode drv_info_op )
{
lm_status_t lm_status = LM_STATUS_SUCCESS;
void* dest = (void*)pdev->vars.stats.stats_collect.drv_info_to_mfw.addr.eth_stats;
void* src = NULL;
u32_t size = 0;
if CHK_NULL(dest)
{
DbgBreakIf(!dest);
return LM_STATUS_FAILURE;
}
switch(drv_info_op)
{
case ETH_STATS_OPCODE:
lm_drv_info_to_mfw_assign_eth(pdev);
src = &pdev->vars.stats.stats_mirror.stats_drv.drv_info_to_mfw.eth_stats;
size = sizeof(pdev->vars.stats.stats_mirror.stats_drv.drv_info_to_mfw.eth_stats);
break;
case ISCSI_STATS_OPCODE:
src = &pdev->vars.stats.stats_mirror.stats_drv.drv_info_to_mfw.iscsi_stats;
size = sizeof(pdev->vars.stats.stats_mirror.stats_drv.drv_info_to_mfw.iscsi_stats);
break;
case FCOE_STATS_OPCODE:
src = &pdev->vars.stats.stats_mirror.stats_drv.drv_info_to_mfw.fcoe_stats;
size = sizeof(pdev->vars.stats.stats_mirror.stats_drv.drv_info_to_mfw.fcoe_stats);
break;
default:
lm_status = LM_STATUS_INVALID_PARAMETER;
break;
}
if( LM_STATUS_SUCCESS == lm_status)
{
mm_mem_zero( dest, size );
mm_memcpy( dest, src, size );
}
return lm_status;
}
void lm_stats_fw_reset( struct _lm_device_t* pdev)
{
if CHK_NULL( pdev )
{
DbgBreakIf(!pdev) ;
}
mm_memset( &pdev->vars.stats.stats_mirror.stats_fw, 0, sizeof(pdev->vars.stats.stats_mirror.stats_fw) ) ;
}
void lm_stats_get_dcb_stats( lm_device_t* pdev, lm_dcbx_stat *stats )
{
stats->pfc_frames_sent = (LM_STATS_HW_GET_MACS_U64( pdev, stats_tx.tx_stat_pfcPacketCounter ) );
stats->pfc_frames_received = (LM_STATS_HW_GET_MACS_U64( pdev, stats_rx.rx_stat_pfcPacketCounter ) );
}
void lm_stats_get_driver_stats( struct _lm_device_t* pdev, b10_driver_statistics_t *stats )
{
stats->ver_num = DRIVER_STATISTISTCS_VER_NUM;
stats->tx_lso_frames = pdev->vars.stats.stats_mirror.stats_drv.drv_eth.tx_lso_frames ;
stats->tx_aborted = pdev->vars.stats.stats_mirror.stats_drv.drv_eth.tx_aborted ;
stats->tx_no_bd = 0 ;
stats->tx_no_desc = 0 ;
stats->tx_no_coalesce_buf = pdev->vars.stats.stats_mirror.stats_drv.drv_eth.tx_no_coalesce_buf ;
stats->tx_no_map_reg = 0 ;
stats->rx_aborted = pdev->vars.stats.stats_mirror.stats_drv.drv_eth.rx_aborted ;
stats->rx_err = 0 ;
stats->rx_crc = 0 ;
stats->rx_phy_err = 0 ;
stats->rx_alignment = 0;
stats->rx_short_packet = 0 ;
stats->rx_giant_packet = 0 ;
}
void lm_stats_get_l2_driver_stats( struct _lm_device_t* pdev, b10_l2_driver_statistics_t *stats )
{
stats->ver_num = L2_DRIVER_STATISTISTCS_VER_NUM;
stats->RxIPv4FragCount = pdev->vars.stats.stats_mirror.stats_drv.drv_eth.rx_ipv4_frag_count ;
stats->RxIpCsErrorCount = pdev->vars.stats.stats_mirror.stats_drv.drv_eth.rx_ip_cs_error_count ;
stats->RxTcpCsErrorCount = pdev->vars.stats.stats_mirror.stats_drv.drv_eth.rx_tcp_cs_error_count ;
stats->RxLlcSnapCount = pdev->vars.stats.stats_mirror.stats_drv.drv_eth.rx_llc_snap_count ;
stats->RxPhyErrorCount = pdev->vars.stats.stats_mirror.stats_drv.drv_eth.rx_phy_error_count ;
stats->RxIpv6ExtCount = pdev->vars.stats.stats_mirror.stats_drv.drv_eth.rx_ipv6_ext_count ;
stats->TxNoL2Bd = pdev->vars.stats.stats_mirror.stats_drv.drv_eth.tx_no_l2_bd ;
stats->TxNoSqWqe = pdev->vars.stats.stats_mirror.stats_drv.drv_eth.tx_no_sq_wqe ;
stats->TxL2AssemblyBufUse = pdev->vars.stats.stats_mirror.stats_drv.drv_eth.tx_l2_assembly_buf_use ;
}
void lm_stats_get_l4_driver_stats( struct _lm_device_t* pdev, b10_l4_driver_statistics_t *stats )
{
u8_t idx = 0 ;
stats->ver_num = L4_DRIVER_STATISTISTCS_VER_NUM;
idx = STATS_IP_4_IDX ;
stats->CurrentlyIpv4Established = pdev->vars.stats.stats_mirror.stats_drv.drv_toe.ipv[idx].currently_established ;
stats->OutIpv4Resets = pdev->vars.stats.stats_mirror.stats_drv.drv_toe.ipv[idx].out_resets ;
stats->OutIpv4Fin = pdev->vars.stats.stats_mirror.stats_drv.drv_toe.ipv[idx].out_fin ;
stats->InIpv4Reset = pdev->vars.stats.stats_mirror.stats_drv.drv_toe.ipv[idx].in_reset ;
stats->InIpv4Fin = pdev->vars.stats.stats_mirror.stats_drv.drv_toe.ipv[idx].in_fin ;
idx = STATS_IP_6_IDX ;
stats->CurrentlyIpv6Established = pdev->vars.stats.stats_mirror.stats_drv.drv_toe.ipv[idx].currently_established ;
stats->OutIpv6Resets = pdev->vars.stats.stats_mirror.stats_drv.drv_toe.ipv[idx].out_resets ;
stats->OutIpv6Fin = pdev->vars.stats.stats_mirror.stats_drv.drv_toe.ipv[idx].out_fin ;
stats->InIpv6Reset = pdev->vars.stats.stats_mirror.stats_drv.drv_toe.ipv[idx].in_reset ;
stats->InIpv6Fin = pdev->vars.stats.stats_mirror.stats_drv.drv_toe.ipv[idx].in_fin ;
stats->RxIndicateReturnPendingCnt = pdev->vars.stats.stats_mirror.stats_drv.drv_toe.rx_indicate_return_pending_cnt ;
stats->RxIndicateReturnDoneCnt = pdev->vars.stats.stats_mirror.stats_drv.drv_toe.rx_indicate_return_done_cnt ;
stats->RxActiveGenBufCnt = pdev->vars.stats.stats_mirror.stats_drv.drv_toe.rx_active_gen_buf_cnt ;
stats->TxNoL4Bd = pdev->vars.stats.stats_mirror.stats_drv.drv_toe.tx_no_l4_bd ;
stats->TxL4AssemblyBufUse = pdev->vars.stats.stats_mirror.stats_drv.drv_toe.tx_l4_assembly_buf_use ;
}
void lm_stats_get_l2_chip_stats( struct _lm_device_t* pdev, void *buf, u8_t version)
{
u32_t idx = LM_CLI_IDX_NDIS ;
b10_l2_chip_statistics_t *stats = buf;
stats->ver_num = version ;
stats->IfHCInOctets = pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[idx].rcv_broadcast_bytes +
pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[idx].rcv_multicast_bytes +
pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[idx].rcv_unicast_bytes ;
stats->IfHCInBadOctets = (LM_STATS_HW_GET_MACS_U64( pdev, stats_rx.rx_stat_ifhcinbadoctets ) );
stats->IfHCOutOctets = pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[idx].total_sent_bytes ;
stats->IfHCOutBadOctets = (LM_STATS_HW_GET_MACS_U64( pdev, stats_tx.tx_stat_ifhcoutbadoctets ) );
stats->IfHCInUcastPkts = (pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[idx].rcv_unicast_pkts ) ;
stats->IfHCInMulticastPkts = (pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[idx].rcv_multicast_pkts ) ;
stats->IfHCInBroadcastPkts = (pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[idx].rcv_broadcast_pkts ) ;
stats->IfHCInUcastOctets = (pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[idx].rcv_unicast_bytes ) ;
stats->IfHCInMulticastOctets = (pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[idx].rcv_multicast_bytes ) ;
stats->IfHCInBroadcastOctets = (pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[idx].rcv_broadcast_bytes ) ;
stats->IfHCOutUcastOctets = (pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[idx].unicast_bytes_sent ) ;
stats->IfHCOutMulticastOctets = (pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[idx].multicast_bytes_sent ) ;
stats->IfHCOutBroadcastOctets = (pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[idx].broadcast_bytes_sent ) ;
stats->IfHCOutPkts = (pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[idx].total_sent_pkts ) ;
lm_get_stats( pdev, LM_STATS_UNICAST_FRAMES_XMIT, &stats->IfHCOutUcastPkts
#ifdef VF_INVOLVED
,NULL
#endif
) ;
lm_get_stats( pdev, LM_STATS_MULTICAST_FRAMES_XMIT, &stats->IfHCOutMulticastPkts
#ifdef VF_INVOLVED
,NULL
#endif
) ;
lm_get_stats( pdev, LM_STATS_BROADCAST_FRAMES_XMIT, &stats->IfHCOutBroadcastPkts
#ifdef VF_INVOLVED
,NULL
#endif
) ;
stats->IfHCInPkts = pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[idx].rcv_broadcast_pkts +
pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[idx].rcv_multicast_pkts +
pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[idx].rcv_unicast_pkts ;
stats->IfHCOutDiscards = (LM_STATS_HW_GET_MACS_U64( pdev, stats_tx.tx_stat_ifhcoutdiscards ) );
stats->IfHCInFalseCarrierErrors = (LM_STATS_HW_GET_MACS_U64( pdev, stats_rx_err.rx_stat_falsecarriererrors ) );
stats->Dot3StatsInternalMacTransmitErrors = (LM_STATS_HW_GET_MACS_U64( pdev, stats_tx.tx_stat_dot3statsinternalmactransmiterrors )) ;
stats->Dot3StatsCarrierSenseErrors = (LM_STATS_HW_GET_MACS_U64( pdev, stats_rx.rx_stat_dot3statscarriersenseerrors )) ;
stats->Dot3StatsFCSErrors = (LM_STATS_HW_GET_MACS_U64( pdev, stats_rx.rx_stat_dot3statsfcserrors )) ;
stats->Dot3StatsAlignmentErrors = (LM_STATS_HW_GET_MACS_U64( pdev, stats_rx.rx_stat_dot3statsalignmenterrors )) ;
stats->Dot3StatsSingleCollisionFrames = (LM_STATS_HW_GET_MACS_U64( pdev, stats_tx.tx_stat_dot3statssinglecollisionframes )) ;
stats->Dot3StatsMultipleCollisionFrames = (LM_STATS_HW_GET_MACS_U64( pdev, stats_tx.tx_stat_dot3statsmultiplecollisionframes )) ;
stats->Dot3StatsDeferredTransmissions = (LM_STATS_HW_GET_MACS_U64( pdev, stats_tx.tx_stat_dot3statsdeferredtransmissions )) ;
stats->Dot3StatsExcessiveCollisions = (LM_STATS_HW_GET_MACS_U64( pdev, stats_tx.tx_stat_dot3statsexcessivecollisions )) ;
stats->Dot3StatsLateCollisions = (LM_STATS_HW_GET_MACS_U64( pdev, stats_tx.tx_stat_dot3statslatecollisions )) ;
stats->EtherStatsCollisions = (LM_STATS_HW_GET_MACS_U64( pdev, stats_tx.tx_stat_etherstatscollisions )) ;
stats->EtherStatsFragments = (LM_STATS_HW_GET_MACS_U64( pdev, stats_rx.rx_stat_etherstatsfragments )) ;
stats->EtherStatsJabbers = (LM_STATS_HW_GET_MACS_U64( pdev, stats_rx.rx_stat_etherstatsjabbers )) ;
stats->EtherStatsUndersizePkts = (LM_STATS_HW_GET_MACS_U64( pdev, stats_rx.rx_stat_etherstatsundersizepkts )) ;
stats->EtherStatsOverrsizePkts = (LM_STATS_HW_GET_MACS_U64( pdev, stats_rx.rx_stat_dot3statsframestoolong )) ;
stats->EtherStatsPktsTx64Octets = (LM_STATS_HW_GET_MACS_U64( pdev, stats_tx.tx_stat_etherstatspkts64octets )) ;
stats->EtherStatsPktsTx65Octetsto127Octets = (LM_STATS_HW_GET_MACS_U64( pdev, stats_tx.tx_stat_etherstatspkts65octetsto127octets )) ;
stats->EtherStatsPktsTx128Octetsto255Octets = (LM_STATS_HW_GET_MACS_U64( pdev, stats_tx.tx_stat_etherstatspkts128octetsto255octets )) ;
stats->EtherStatsPktsTx256Octetsto511Octets = (LM_STATS_HW_GET_MACS_U64( pdev, stats_tx.tx_stat_etherstatspkts256octetsto511octets )) ;
stats->EtherStatsPktsTx512Octetsto1023Octets = (LM_STATS_HW_GET_MACS_U64( pdev, stats_tx.tx_stat_etherstatspkts512octetsto1023octets)) ;
stats->EtherStatsPktsTx1024Octetsto1522Octets = (pdev->vars.stats.stats_mirror.stats_hw.nig_ex.egress_mac_pkt0) ;
stats->EtherStatsPktsTxOver1522Octets = (pdev->vars.stats.stats_mirror.stats_hw.nig_ex.egress_mac_pkt1) ;
stats->XonPauseFramesReceived = (LM_STATS_HW_GET_MACS_U64( pdev, stats_rx.rx_stat_xonpauseframesreceived )) ;
stats->XoffPauseFramesReceived = (LM_STATS_HW_GET_MACS_U64( pdev, stats_rx.rx_stat_xoffpauseframesreceived )) ;
stats->OutXonSent = (LM_STATS_HW_GET_MACS_U64( pdev, stats_tx.tx_stat_outxonsent )) ;
stats->OutXoffSent = (LM_STATS_HW_GET_MACS_U64( pdev, stats_tx.tx_stat_outxoffsent )) ;
stats->FlowControlDone = (LM_STATS_HW_GET_MACS_U64( pdev, stats_tx.tx_stat_flowcontroldone )) ;
stats->MacControlFramesReceived = (LM_STATS_HW_GET_MACS_U64( pdev, stats_rx.rx_stat_maccontrolframesreceived )) ;
stats->MacControlFramesReceived += (LM_STATS_HW_GET_MACS_U64( pdev, stats_rx.rx_stat_maccontrolframesreceived_bmac_xcf )) ;
stats->XoffStateEntered = (LM_STATS_HW_GET_MACS_U64( pdev, stats_rx.rx_stat_xoffstateentered )) ;
lm_get_stats( pdev, LM_STATS_ERRORED_RECEIVE_CNT, &stats->IfInErrors
#ifdef VF_INVOLVED
,NULL
#endif
) ;
stats->IfInErrorsOctets = 0;
stats->IfInNoBrbBuffer = (pdev->vars.stats.stats_mirror.stats_hw.nig.brb_discard) ;
stats->IfInFramesL2FilterDiscards = (pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.port_statistics.mac_filter_discard) ;
stats->IfInTTL0Discards = (pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[idx].ttl0_discard) ;
stats->IfInxxOverflowDiscards = (pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.port_statistics.xxoverflow_discard) ;
stats->IfInMBUFDiscards = (pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[idx].no_buff_discard );
stats->IfInMBUFDiscards += (pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[idx].ucast_no_buff_pkts );
stats->IfInMBUFDiscards += (pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[idx].mcast_no_buff_pkts );
stats->IfInMBUFDiscards += (pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[idx].bcast_no_buff_pkts );
stats->Nig_brb_packet = (pdev->vars.stats.stats_mirror.stats_hw.nig.brb_packet) ;
stats->Nig_brb_truncate = (pdev->vars.stats.stats_mirror.stats_hw.nig.brb_truncate) ;
stats->Nig_flow_ctrl_discard = (pdev->vars.stats.stats_mirror.stats_hw.nig.flow_ctrl_discard) ;
stats->Nig_flow_ctrl_octets = (pdev->vars.stats.stats_mirror.stats_hw.nig.flow_ctrl_octets) ;
stats->Nig_flow_ctrl_packet = (pdev->vars.stats.stats_mirror.stats_hw.nig.flow_ctrl_packet) ;
stats->Nig_mng_discard = (pdev->vars.stats.stats_mirror.stats_hw.nig.mng_discard) ;
stats->Nig_mng_octet_inp = (pdev->vars.stats.stats_mirror.stats_hw.nig.mng_octet_inp) ;
stats->Nig_mng_octet_out = (pdev->vars.stats.stats_mirror.stats_hw.nig.mng_octet_out) ;
stats->Nig_mng_packet_inp = (pdev->vars.stats.stats_mirror.stats_hw.nig.mng_packet_inp) ;
stats->Nig_mng_packet_out = (pdev->vars.stats.stats_mirror.stats_hw.nig.mng_packet_out) ;
stats->Nig_pbf_octets = (pdev->vars.stats.stats_mirror.stats_hw.nig.pbf_octets) ;
stats->Nig_pbf_packet = (pdev->vars.stats.stats_mirror.stats_hw.nig.pbf_packet) ;
stats->Nig_safc_inp = (pdev->vars.stats.stats_mirror.stats_hw.nig.safc_inp) ;
if (version > L2_CHIP_STATISTICS_VER_NUM_1)
{
b10_l2_chip_statistics_v2_t *stats_v2 = buf;
stats_v2->v2.Tx_lpi_count = pdev->vars.stats.stats_mirror.stats_hw.misc.tx_lpi_count;
}
if (version > L2_CHIP_STATISTICS_VER_NUM_2)
{
b10_l2_chip_statistics_v3_t *stats_v3 = buf;
stats_v3->v3.coalesced_pkts = pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[idx].coalesced_pkts;
stats_v3->v3.coalesced_bytes = pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[idx].coalesced_bytes;
stats_v3->v3.coalesced_events = pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[idx].coalesced_events;
stats_v3->v3.coalesced_aborts = pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[idx].coalesced_aborts;
}
}
void lm_stats_get_l4_chip_stats( struct _lm_device_t* pdev, b10_l4_chip_statistics_t *stats )
{
u8_t idx = 0 ;
stats->ver_num = L4_CHIP_STATISTISTCS_VER_NUM ;
stats->NoTxCqes = pdev->vars.stats.stats_mirror.stats_fw.toe_cstorm_toe.no_tx_cqes ;
idx = STATS_IP_4_IDX ;
stats->InTCP4Segments = pdev->vars.stats.stats_mirror.stats_fw.toe_tstorm_toe.statistics[idx].ip_in_receives ;
stats->OutTCP4Segments = pdev->vars.stats.stats_mirror.stats_fw.toe_xstorm_toe.statistics[idx].tcp_out_segments ;
stats->RetransmittedTCP4Segments = pdev->vars.stats.stats_mirror.stats_fw.toe_xstorm_toe.statistics[idx].tcp_retransmitted_segments ;
stats->InTCP4Errors = pdev->vars.stats.stats_mirror.stats_fw.toe_tstorm_toe.statistics[idx].tcp_in_errors ;
stats->InIP4Receives = pdev->vars.stats.stats_mirror.stats_fw.toe_tstorm_toe.statistics[idx].ip_in_receives ;
stats->InIP4HeaderErrors = pdev->vars.stats.stats_mirror.stats_fw.toe_tstorm_toe.statistics[idx].ip_in_header_errors ;
stats->InIP4Discards = pdev->vars.stats.stats_mirror.stats_fw.toe_tstorm_toe.statistics[idx].ip_in_discards ;
stats->InIP4Delivers = pdev->vars.stats.stats_mirror.stats_fw.toe_tstorm_toe.statistics[idx].ip_in_delivers ;
stats->InIP4Octets = pdev->vars.stats.stats_mirror.stats_fw.toe_tstorm_toe.statistics[idx].ip_in_octets ;
stats->OutIP4Octets = pdev->vars.stats.stats_mirror.stats_fw.toe_xstorm_toe.statistics[idx].ip_out_octets ;
stats->InIP4TruncatedPackets = pdev->vars.stats.stats_mirror.stats_fw.toe_tstorm_toe.statistics[idx].ip_in_truncated_packets ;
idx = STATS_IP_6_IDX ;
stats->InTCP6Segments = pdev->vars.stats.stats_mirror.stats_fw.toe_tstorm_toe.statistics[idx].ip_in_receives ;
stats->OutTCP6Segments = pdev->vars.stats.stats_mirror.stats_fw.toe_xstorm_toe.statistics[idx].tcp_out_segments ;
stats->RetransmittedTCP6Segments = pdev->vars.stats.stats_mirror.stats_fw.toe_xstorm_toe.statistics[idx].tcp_retransmitted_segments ;
stats->InTCP6Errors = pdev->vars.stats.stats_mirror.stats_fw.toe_tstorm_toe.statistics[idx].tcp_in_errors ;
stats->InIP6Receives = pdev->vars.stats.stats_mirror.stats_fw.toe_tstorm_toe.statistics[idx].ip_in_receives ;
stats->InIP6HeaderErrors = pdev->vars.stats.stats_mirror.stats_fw.toe_tstorm_toe.statistics[idx].ip_in_header_errors ;
stats->InIP6Discards = pdev->vars.stats.stats_mirror.stats_fw.toe_tstorm_toe.statistics[idx].ip_in_discards ;
stats->InIP6Delivers = pdev->vars.stats.stats_mirror.stats_fw.toe_tstorm_toe.statistics[idx].ip_in_delivers ;
stats->InIP6Octets = pdev->vars.stats.stats_mirror.stats_fw.toe_tstorm_toe.statistics[idx].ip_in_octets ;
stats->OutIP6Octets = pdev->vars.stats.stats_mirror.stats_fw.toe_xstorm_toe.statistics[idx].ip_out_octets ;
stats->InIP6TruncatedPackets = pdev->vars.stats.stats_mirror.stats_fw.toe_tstorm_toe.statistics[idx].ip_in_truncated_packets ;
}
void lm_stats_hw_config_stats( struct _lm_device_t* pdev, u8_t b_enabled )
{
DbgMessage(pdev, WARNstat, "lm_stats_hw_config_stats: b_collect_enabled %s-->%s\n",
pdev->vars.stats.stats_collect.stats_hw.b_collect_enabled ? "TRUE":"FALSE",
b_enabled ? "TRUE":"FALSE" );
if (IS_PFDEV(pdev)) {
pdev->vars.stats.stats_collect.stats_hw.b_collect_enabled = b_enabled ;
}
}
void lm_stats_fw_config_stats( struct _lm_device_t* pdev, u8_t b_enabled )
{
DbgMessage(pdev, VERBOSEstat, "lm_stats_fw_config_stats: b_collect_enabled %s-->%s\n",
pdev->vars.stats.stats_collect.stats_fw.b_collect_enabled ? "TRUE":"FALSE",
b_enabled ? "TRUE":"FALSE" );
if (IS_PFDEV(pdev) || IS_CHANNEL_VFDEV(pdev)) {
pdev->vars.stats.stats_collect.stats_fw.b_collect_enabled = b_enabled ;
}
}
STATIC void lm_stats_mgmt_assign_func( IN struct _lm_device_t* pdev )
{
u64_t val = 0 ;
u64_t val_base = 0 ;
lm_status_t lm_status = LM_STATUS_SUCCESS ;
lm_stats_t stats_type = 0 ;
host_func_stats_t* mcp_func = NULL ;
host_func_stats_t* mcp_func_base = NULL ;
if CHK_NULL(pdev)
{
return;
}
if ( GET_FLAGS(pdev->params.test_mode, TEST_MODE_NO_MCP ) )
{
return;
}
mcp_func = &pdev->vars.stats.stats_mirror.stats_mcp_func ;
mcp_func_base = &pdev->vars.stats.stats_mirror.stats_mcp_func_base ;
stats_type = LM_STATS_BYTES_RCV ;
lm_status = lm_get_stats( pdev, stats_type, &val
#ifdef VF_INVOLVED
,NULL
#endif
) ;
if ERR_IF( LM_STATUS_SUCCESS != lm_status )
{
DbgMessage(pdev, WARNstat, "lm_stats_mcp_assign: lm_get_stats type=0x%X failed. lm_status=0x%X", stats_type, lm_status ) ;
}
else
{
val += (pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[LM_CLI_IDX_NDIS].rcv_error_bytes) ;
val+= LM_STATS_HI_LO_TO_64(mcp_func_base->total_bytes_received, val_base);
mcp_func->total_bytes_received_hi = (u32_t)U64_HI( val ) ;
mcp_func->total_bytes_received_lo = (u32_t)U64_LO( val ) ;
}
stats_type = LM_STATS_BYTES_XMIT ;
lm_status = lm_get_stats( pdev, stats_type, &val
#ifdef VF_INVOLVED
,NULL
#endif
) ;
if ERR_IF( LM_STATUS_SUCCESS != lm_status )
{
DbgMessage(pdev, WARNstat, "lm_stats_mcp_assign: lm_get_stats type=0x%X failed. lm_status=0x%X", stats_type, lm_status ) ;
}
else
{
val+= LM_STATS_HI_LO_TO_64(mcp_func_base->total_bytes_transmitted, val_base);
mcp_func->total_bytes_transmitted_hi = (u32_t)U64_HI( val ) ;
mcp_func->total_bytes_transmitted_lo = (u32_t)U64_LO( val ) ;
}
stats_type = LM_STATS_UNICAST_FRAMES_RCV ;
lm_status = lm_get_stats( pdev, stats_type, &val
#ifdef VF_INVOLVED
,NULL
#endif
) ;
if ERR_IF( LM_STATUS_SUCCESS != lm_status )
{
DbgMessage(pdev, WARNstat, "lm_stats_mcp_assign: lm_get_stats type=0x%X failed. lm_status=0x%X", stats_type, lm_status ) ;
}
else
{
val+= LM_STATS_HI_LO_TO_64(mcp_func_base->total_unicast_packets_received, val_base);
mcp_func->total_unicast_packets_received_hi = (u32_t)U64_HI( val ) ;
mcp_func->total_unicast_packets_received_lo = (u32_t)U64_LO( val ) ;
}
stats_type = LM_STATS_MULTICAST_FRAMES_RCV ;
lm_status = lm_get_stats( pdev, stats_type, &val
#ifdef VF_INVOLVED
,NULL
#endif
) ;
if ERR_IF( LM_STATUS_SUCCESS != lm_status )
{
DbgMessage(pdev, WARNstat, "lm_stats_mcp_assign: lm_get_stats type=0x%X failed. lm_status=0x%X", stats_type, lm_status ) ;
}
else
{
val+= LM_STATS_HI_LO_TO_64(mcp_func_base->total_multicast_packets_received, val_base);
mcp_func->total_multicast_packets_received_hi = (u32_t)U64_HI( val ) ;
mcp_func->total_multicast_packets_received_lo = (u32_t)U64_LO( val ) ;
}
stats_type = LM_STATS_BROADCAST_FRAMES_RCV ;
lm_status = lm_get_stats( pdev, stats_type, &val
#ifdef VF_INVOLVED
,NULL
#endif
) ;
if ERR_IF( LM_STATUS_SUCCESS != lm_status )
{
DbgMessage(pdev, WARNstat, "lm_stats_mcp_assign: lm_get_stats type=0x%X failed. lm_status=0x%X", stats_type, lm_status ) ;
}
else
{
val+= LM_STATS_HI_LO_TO_64(mcp_func_base->total_broadcast_packets_received, val_base);
mcp_func->total_broadcast_packets_received_hi = (u32_t)U64_HI( val ) ;
mcp_func->total_broadcast_packets_received_lo = (u32_t)U64_LO( val ) ;
}
stats_type = LM_STATS_UNICAST_FRAMES_XMIT ;
lm_status = lm_get_stats( pdev, stats_type, &val
#ifdef VF_INVOLVED
,NULL
#endif
) ;
if ERR_IF( LM_STATUS_SUCCESS != lm_status )
{
DbgMessage(pdev, WARNstat, "lm_stats_mcp_assign: lm_get_stats type=0x%X failed. lm_status=0x%X", stats_type, lm_status ) ;
}
else
{
val+= LM_STATS_HI_LO_TO_64(mcp_func_base->total_unicast_packets_transmitted, val_base);
mcp_func->total_unicast_packets_transmitted_hi = (u32_t)U64_HI( val ) ;
mcp_func->total_unicast_packets_transmitted_lo = (u32_t)U64_LO( val ) ;
}
stats_type = LM_STATS_MULTICAST_FRAMES_XMIT ;
lm_status = lm_get_stats( pdev, stats_type, &val
#ifdef VF_INVOLVED
,NULL
#endif
) ;
if ERR_IF( LM_STATUS_SUCCESS != lm_status )
{
DbgMessage(pdev, WARNstat, "lm_stats_mcp_assign: lm_get_stats type=0x%X failed. lm_status=0x%X", stats_type, lm_status ) ;
}
else
{
val+= LM_STATS_HI_LO_TO_64(mcp_func_base->total_multicast_packets_transmitted, val_base);
mcp_func->total_multicast_packets_transmitted_hi = (u32_t)U64_HI( val ) ;
mcp_func->total_multicast_packets_transmitted_lo = (u32_t)U64_LO( val ) ;
}
stats_type = LM_STATS_BROADCAST_FRAMES_XMIT ;
lm_status = lm_get_stats( pdev, stats_type, &val
#ifdef VF_INVOLVED
,NULL
#endif
) ;
if ERR_IF( LM_STATUS_SUCCESS != lm_status )
{
DbgMessage(pdev, WARNstat, "lm_stats_mcp_assign: lm_get_stats type=0x%X failed. lm_status=0x%X", stats_type, lm_status ) ;
}
else
{
val+= LM_STATS_HI_LO_TO_64(mcp_func_base->total_broadcast_packets_transmitted, val_base);
mcp_func->total_broadcast_packets_transmitted_hi = (u32_t)U64_HI( val ) ;
mcp_func->total_broadcast_packets_transmitted_lo = (u32_t)U64_LO( val ) ;
}
val = sizeof(pdev->vars.stats.stats_mirror.stats_mcp_func) ;
val = val/sizeof(u32_t) ;
mcp_func->host_func_stats_end = ++mcp_func->host_func_stats_start ;
REG_WR_DMAE_LEN(pdev,
pdev->vars.fw_func_stats_ptr,
mcp_func,
(u16_t)val ) ;
}
static void lm_stats_mgmt_read_func_base( IN struct _lm_device_t* pdev )
{
u64_t val = 0 ;
host_func_stats_t* mcp_func_base = NULL ;
if CHK_NULL(pdev)
{
return;
}
if( 0 == pdev->vars.fw_func_stats_ptr )
{
return;
}
if (GET_FLAGS(pdev->params.test_mode, TEST_MODE_NO_MCP ))
{
return;
}
mcp_func_base = &pdev->vars.stats.stats_mirror.stats_mcp_func_base ;
val = sizeof(pdev->vars.stats.stats_mirror.stats_mcp_func_base) ;
val = val/sizeof(u32_t) ;
REG_RD_DMAE_LEN(pdev,
pdev->vars.fw_func_stats_ptr,
mcp_func_base,
(u16_t)val ) ;
}
static void lm_stats_mgmt_clear_all_func( IN struct _lm_device_t* pdev )
{
u64_t val = 0 ;
u8_t func = 0;
u32_t fw_func_stats_ptr = 0;
val = sizeof(pdev->vars.stats.stats_mirror.stats_mcp_func);
mm_mem_zero(&pdev->vars.stats.stats_mirror.stats_mcp_func, (u32_t)val );
val = val/sizeof(u32_t) ;
LM_FOREACH_FUNC_MAILBOX_IN_PORT(pdev,func)
{
lm_setup_read_mgmt_stats_ptr(pdev, func, NULL, &fw_func_stats_ptr );
if( 0 != fw_func_stats_ptr )
{
REG_WR_DMAE_LEN(pdev,
fw_func_stats_ptr,
&pdev->vars.stats.stats_mirror.stats_mcp_func,
(u16_t)val ) ;
}
if(CHIP_IS_E1(pdev) || (!CHIP_IS_E1x(pdev) && (CHIP_PORT_MODE(pdev) == LM_CHIP_PORT_MODE_4)))
{
break;
}
}
}
void lm_stats_port_to_from( IN OUT struct _lm_device_t* pdev, u8_t b_is_to )
{
host_port_stats_t* mcp_port = NULL ;
lm_stats_hw_t* stats_hw = NULL ;
stats_macs_idx_t stats_macs_idx = STATS_MACS_IDX_MAX ;
u8_t i = 0 ;
mcp_port = &pdev->vars.stats.stats_mirror.stats_mcp_port ;
stats_hw = &pdev->vars.stats.stats_mirror.stats_hw ;
ASSERT_STATIC( STATS_MACS_IDX_MAX == MAC_STX_IDX_MAX );
ASSERT_STATIC( STATS_MACS_IDX_CURRENT < STATS_MACS_IDX_TOTAL );
switch( pdev->vars.mac_type )
{
case MAC_TYPE_EMAC:
case MAC_TYPE_BMAC:
case MAC_TYPE_UMAC:
case MAC_TYPE_XMAC:
stats_macs_idx = STATS_MACS_IDX_CURRENT ;
break;
case MAC_TYPE_NONE:
stats_macs_idx = STATS_MACS_IDX_TOTAL ;
break;
default:
DbgBreakMsg( "mac_type not acceptable" ) ;
return;
}
#define LM_STATS_PMF_TO_FROM( _mcp_field, _hw_field, _b_is_to ) \
if( _b_is_to )\
{ \
LM_STATS_64_TO_HI_LO( stats_hw->macs[i]._hw_field, mcp_port->mac_stx[i]._mcp_field );\
} \
else \
{ \
LM_STATS_HI_LO_TO_64( mcp_port->mac_stx[i]._mcp_field, stats_hw->macs[i]._hw_field ) ;\
}
for( i = stats_macs_idx; i < STATS_MACS_IDX_MAX; i++ )
{
LM_STATS_PMF_TO_FROM( rx_stat_dot3statsfcserrors, stats_rx.rx_stat_dot3statsfcserrors, b_is_to ) ;
LM_STATS_PMF_TO_FROM( rx_stat_dot3statsalignmenterrors, stats_rx.rx_stat_dot3statsalignmenterrors, b_is_to ) ;
LM_STATS_PMF_TO_FROM( rx_stat_dot3statscarriersenseerrors, stats_rx.rx_stat_dot3statscarriersenseerrors, b_is_to ) ;
LM_STATS_PMF_TO_FROM( rx_stat_etherstatsundersizepkts, stats_rx.rx_stat_etherstatsundersizepkts, b_is_to ) ;
LM_STATS_PMF_TO_FROM( rx_stat_xonpauseframesreceived, stats_rx.rx_stat_xonpauseframesreceived, b_is_to ) ;
LM_STATS_PMF_TO_FROM( rx_stat_xoffpauseframesreceived, stats_rx.rx_stat_xoffpauseframesreceived, b_is_to ) ;
LM_STATS_PMF_TO_FROM( tx_stat_outxonsent, stats_tx.tx_stat_outxonsent, b_is_to ) ;
LM_STATS_PMF_TO_FROM( tx_stat_outxoffsent, stats_tx.tx_stat_outxoffsent, b_is_to ) ;
LM_STATS_PMF_TO_FROM( tx_stat_dot3statssinglecollisionframes, stats_tx.tx_stat_dot3statssinglecollisionframes, b_is_to ) ;
LM_STATS_PMF_TO_FROM( tx_stat_dot3statsmultiplecollisionframes, stats_tx.tx_stat_dot3statsmultiplecollisionframes, b_is_to ) ;
LM_STATS_PMF_TO_FROM( tx_stat_dot3statslatecollisions, stats_tx.tx_stat_dot3statslatecollisions, b_is_to ) ;
LM_STATS_PMF_TO_FROM( tx_stat_dot3statsexcessivecollisions, stats_tx.tx_stat_dot3statsexcessivecollisions, b_is_to ) ;
LM_STATS_PMF_TO_FROM( rx_stat_maccontrolframesreceived, stats_rx.rx_stat_maccontrolframesreceived, b_is_to ) ;
LM_STATS_PMF_TO_FROM( rx_stat_mac_xpf, stats_rx.rx_stat_maccontrolframesreceived_bmac_xpf, b_is_to ) ;
LM_STATS_PMF_TO_FROM( rx_stat_mac_xcf, stats_rx.rx_stat_maccontrolframesreceived_bmac_xcf, b_is_to ) ;
LM_STATS_PMF_TO_FROM( tx_stat_etherstatspkts64octets, stats_tx.tx_stat_etherstatspkts64octets, b_is_to ) ;
LM_STATS_PMF_TO_FROM( tx_stat_etherstatspkts65octetsto127octets, stats_tx.tx_stat_etherstatspkts65octetsto127octets, b_is_to ) ;
LM_STATS_PMF_TO_FROM( tx_stat_etherstatspkts128octetsto255octets, stats_tx.tx_stat_etherstatspkts128octetsto255octets, b_is_to ) ;
LM_STATS_PMF_TO_FROM( tx_stat_etherstatspkts256octetsto511octets, stats_tx.tx_stat_etherstatspkts256octetsto511octets, b_is_to ) ;
LM_STATS_PMF_TO_FROM( tx_stat_etherstatspkts512octetsto1023octets, stats_tx.tx_stat_etherstatspkts512octetsto1023octets, b_is_to ) ;
LM_STATS_PMF_TO_FROM( tx_stat_etherstatspkts1024octetsto1522octets, stats_tx.tx_stat_etherstatspkts1024octetsto1522octet, b_is_to ) ;
LM_STATS_PMF_TO_FROM( tx_stat_etherstatspktsover1522octets, stats_tx.tx_stat_etherstatspktsover1522octets, b_is_to ) ;
LM_STATS_PMF_TO_FROM( tx_stat_mac_2047, stats_tx.tx_stat_etherstatspktsover1522octets_bmac_2047, b_is_to ) ;
LM_STATS_PMF_TO_FROM( tx_stat_mac_4095, stats_tx.tx_stat_etherstatspktsover1522octets_bmac_4095, b_is_to ) ;
LM_STATS_PMF_TO_FROM( tx_stat_mac_9216, stats_tx.tx_stat_etherstatspktsover1522octets_bmac_9216, b_is_to ) ;
LM_STATS_PMF_TO_FROM( tx_stat_mac_16383, stats_tx.tx_stat_etherstatspktsover1522octets_bmac_16383, b_is_to ) ;
LM_STATS_PMF_TO_FROM( rx_stat_etherstatsfragments, stats_rx.rx_stat_etherstatsfragments, b_is_to ) ;
LM_STATS_PMF_TO_FROM( rx_stat_etherstatsjabbers, stats_rx.rx_stat_etherstatsjabbers, b_is_to ) ;
LM_STATS_PMF_TO_FROM( tx_stat_dot3statsdeferredtransmissions, stats_tx.tx_stat_dot3statsdeferredtransmissions, b_is_to ) ;
LM_STATS_PMF_TO_FROM( tx_stat_dot3statsinternalmactransmiterrors, stats_tx.tx_stat_dot3statsinternalmactransmiterrors, b_is_to ) ;
LM_STATS_PMF_TO_FROM( tx_stat_etherstatscollisions, stats_tx.tx_stat_etherstatscollisions, b_is_to ) ;
LM_STATS_PMF_TO_FROM( tx_stat_flowcontroldone, stats_tx.tx_stat_flowcontroldone, b_is_to ) ;
LM_STATS_PMF_TO_FROM( rx_stat_xoffstateentered, stats_rx.rx_stat_xoffstateentered, b_is_to ) ;
LM_STATS_PMF_TO_FROM( rx_stat_ifhcinbadoctets, stats_rx.rx_stat_ifhcinbadoctets, b_is_to ) ;
LM_STATS_PMF_TO_FROM( tx_stat_ifhcoutbadoctets, stats_tx.tx_stat_ifhcoutbadoctets, b_is_to ) ;
LM_STATS_PMF_TO_FROM( tx_stat_mac_ufl, stats_tx.tx_stat_ifhcoutdiscards, b_is_to ) ;
LM_STATS_PMF_TO_FROM( rx_stat_dot3statscarriersenseerrors, stats_rx.rx_stat_dot3statscarriersenseerrors, b_is_to ) ;
}
if( b_is_to)
{
LM_STATS_64_TO_HI_LO( stats_hw->nig.brb_discard, mcp_port->brb_drop ) ;
LM_STATS_64_TO_HI_LO( stats_hw->macs->stats_tx.tx_stat_pfcPacketCounter, mcp_port->pfc_frames_tx );
LM_STATS_64_TO_HI_LO( stats_hw->macs->stats_rx.rx_stat_pfcPacketCounter, mcp_port->pfc_frames_rx );
LM_STATS_64_TO_HI_LO( stats_hw->misc.tx_lpi_count, mcp_port->eee_lpi_count);
}
else
{
LM_STATS_HI_LO_TO_64( mcp_port->brb_drop, stats_hw->nig.brb_discard ) ;
LM_STATS_HI_LO_TO_64( mcp_port->pfc_frames_tx, stats_hw->macs->stats_tx.tx_stat_pfcPacketCounter );
LM_STATS_HI_LO_TO_64( mcp_port->pfc_frames_rx, stats_hw->macs->stats_rx.rx_stat_pfcPacketCounter );
LM_STATS_HI_LO_TO_64( mcp_port->eee_lpi_count, stats_hw->misc.tx_lpi_count);
}
}
STATIC u16_t lm_stats_port_size(IN struct _lm_device_t *pdev)
{
const u32_t bc_rev_major = LM_GET_BC_REV_MAJOR(pdev);
const u8_t b_bc_pfc_support = bc_rev_major >= REQ_BC_VER_4_PFC_STATS_SUPPORTED;
size_t sizeof_port_stats = 0;
u32_t sizeof_port_satas_shmem = 0;
if (LM_SHMEM2_HAS(pdev,sizeof_port_stats))
{
LM_SHMEM2_READ(pdev,OFFSETOF(struct shmem2_region, sizeof_port_stats), &sizeof_port_satas_shmem);
sizeof_port_stats = min((size_t)sizeof_port_satas_shmem, sizeof(pdev->vars.stats.stats_mirror.stats_mcp_port));
}
else
{
if (b_bc_pfc_support)
{
sizeof_port_stats = OFFSETOF(host_port_stats_t, pfc_frames_rx_lo) +
sizeof(pdev->vars.stats.stats_mirror.stats_mcp_port.pfc_frames_rx_lo);
}
else
{
sizeof_port_stats = OFFSETOF(host_port_stats_t, not_used ) +
sizeof(pdev->vars.stats.stats_mirror.stats_mcp_port.not_used);
}
}
sizeof_port_stats /= sizeof(u32_t) ;
DbgBreakIf( sizeof_port_stats >= 1u<<(sizeof(u16_t)*8) );
return (u16_t)sizeof_port_stats;
}
lm_status_t lm_stats_port_zero( IN struct _lm_device_t* pdev )
{
u16_t size = 0 ;
lm_status_t lm_status = LM_STATUS_SUCCESS ;
if( 0 == pdev->vars.fw_port_stats_ptr )
{
return LM_STATUS_SUCCESS;
}
size = lm_stats_port_size(pdev);
REG_WR_DMAE_LEN_ZERO(pdev,
pdev->vars.fw_port_stats_ptr,
size ) ;
return lm_status ;
}
lm_status_t lm_stats_port_save( IN struct _lm_device_t* pdev )
{
u16_t size = 0 ;
lm_status_t lm_status = LM_STATUS_SUCCESS ;
host_port_stats_t* mcp_port = NULL ;
if( 0 == pdev->vars.fw_port_stats_ptr )
{
return LM_STATUS_SUCCESS;
}
lm_stats_port_to_from( pdev, TRUE ) ;
size = lm_stats_port_size(pdev);
mcp_port = &pdev->vars.stats.stats_mirror.stats_mcp_port ;
mcp_port->not_used = ++mcp_port->host_port_stats_counter ;
REG_WR_DMAE_LEN(pdev,
pdev->vars.fw_port_stats_ptr,
mcp_port,
size ) ;
return lm_status ;
}
lm_status_t lm_stats_port_load( IN struct _lm_device_t* pdev )
{
u16_t size = 0 ;
lm_status_t lm_status = LM_STATUS_SUCCESS ;
host_port_stats_t* mcp_port = NULL ;
if( 0 == pdev->vars.fw_port_stats_ptr )
{
return LM_STATUS_SUCCESS;
}
size = lm_stats_port_size(pdev);
mcp_port = &pdev->vars.stats.stats_mirror.stats_mcp_port ;
mcp_port->not_used = ++mcp_port->host_port_stats_counter ;
REG_RD_DMAE_LEN(pdev,
pdev->vars.fw_port_stats_ptr,
mcp_port,
size ) ;
lm_stats_port_to_from( pdev, FALSE ) ;
return lm_status ;
}
void lm_stats_mgmt_assign( IN struct _lm_device_t* pdev )
{
if CHK_NULL(pdev)
{
return;
}
if ( GET_FLAGS(pdev->params.test_mode, TEST_MODE_NO_MCP ) )
{
return;
}
if( pdev->vars.fw_func_stats_ptr )
{
lm_stats_mgmt_assign_func(pdev);
}
if( pdev->vars.fw_port_stats_ptr )
{
if( IS_PMF(pdev) )
{
lm_stats_port_save(pdev);
}
}
}
lm_status_t lm_stats_on_pmf_update( struct _lm_device_t* pdev, IN u8_t b_on )
{
lm_status_t lm_status = LM_STATUS_SUCCESS ;
if CHK_NULL(pdev)
{
return LM_STATUS_INVALID_PARAMETER;
}
if( b_on )
{
lm_status = lm_stats_port_load( pdev );
}
else
{
lm_status = lm_stats_on_update_state(pdev);
DbgBreakIf( ( LM_STATUS_SUCCESS != lm_status ) && ( LM_STATUS_LINK_DOWN != lm_status ) );
if( LM_STATUS_LINK_DOWN == lm_status )
{
lm_status = lm_stats_port_save( pdev );
}
}
return lm_status ;
}
lm_status_t lm_stats_on_pmf_init( struct _lm_device_t* pdev )
{
lm_status_t lm_status = LM_STATUS_SUCCESS ;
if CHK_NULL(pdev)
{
return LM_STATUS_INVALID_PARAMETER;
}
lm_status = lm_stats_port_zero( pdev ) ;
return lm_status ;
}
lm_status_t lm_stats_hw_collect( struct _lm_device_t* pdev )
{
lm_status_t lm_status = LM_STATUS_SUCCESS;
u8_t port = PORT_ID(pdev);
const u32_t pkt0 = port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 : NIG_REG_STAT0_EGRESS_MAC_PKT0 ;
const u32_t pkt1 = port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 : NIG_REG_STAT0_EGRESS_MAC_PKT1 ;
const u32_t eee = port ? MISC_REG_CPMU_LP_SM_ENT_CNT_P1 : MISC_REG_CPMU_LP_SM_ENT_CNT_P0 ;
lm_status = lm_stats_dmae( pdev ) ;
if( LM_STATUS_SUCCESS != lm_status )
{
return lm_status;
}
if (!CHIP_IS_E3(pdev))
{
REG_RD_DMAE( pdev, pkt0, &pdev->vars.stats.stats_collect.stats_hw.nig_ex_stats_query.egress_mac_pkt0 );
REG_RD_DMAE( pdev, pkt1, &pdev->vars.stats.stats_collect.stats_hw.nig_ex_stats_query.egress_mac_pkt1 );
}
if (CHIP_IS_E3(pdev))
{
pdev->vars.stats.stats_collect.stats_hw.misc_stats_query.tx_lpi_count = REG_RD(pdev, eee);
}
return lm_status ;
}
void lm_stats_init_port_part( struct _lm_device_t* pdev )
{
lm_stats_mgmt_clear_all_func(pdev);
}
void lm_stats_init_func_part( struct _lm_device_t* pdev )
{
if (IS_PMF(pdev) && IS_MULTI_VNIC(pdev))
{
lm_stats_on_pmf_init(pdev);
}
lm_stats_mgmt_read_func_base(pdev);
}