#include "lm5710.h"
#include "command.h"
#include "bd_chain.h"
#include "ecore_common.h"
#include "mm.h"
#define OOO_CID_USTRORM_PROD_DIFF (0x4000)
u8_t lm_is_rx_completion(lm_device_t *pdev, u8_t chain_idx)
{
u8_t result = FALSE;
lm_rcq_chain_t *rcq_chain = &LM_RCQ(pdev, chain_idx);
DbgBreakIf(!(pdev && rcq_chain));
if (rcq_chain->hw_con_idx_ptr &&
(mm_le16_to_cpu(*rcq_chain->hw_con_idx_ptr) !=
lm_bd_chain_cons_idx(&rcq_chain->bd_chain)))
{
result = TRUE;
}
DbgMessage(pdev, INFORMi, "lm_is_rx_completion: result is:%s\n", result? "TRUE" : "FALSE");
return result;
}
static void FORCEINLINE lm_rx_set_prods( lm_device_t *pdev,
u16_t const iro_prod_offset,
lm_bd_chain_t *rcq_chain_bd,
lm_bd_chain_t *rx_chain_bd,
lm_bd_chain_t *rx_chain_sge,
const u32_t chain_idx )
{
lm_rx_chain_t* rxq_chain = &LM_RXQ(pdev, chain_idx);
u32_t val32 = 0;
u64_t val64 = 0;
u16_t val16_lo = lm_bd_chain_prod_idx(rcq_chain_bd);
u16_t val16_hi = lm_bd_chain_prod_idx(rx_chain_bd);
u32_t const ustorm_bar_offset = (IS_CHANNEL_VFDEV(pdev)) ? VF_BAR0_USDM_QUEUES_OFFSET: BAR_USTRORM_INTMEM ;
if(OOO_CID(pdev) == chain_idx)
{
DbgBreakIfFastPath( NULL != rx_chain_sge );
DbgBreakIfFastPath(IS_CHANNEL_VFDEV(pdev));
LM_INTMEM_WRITE16(PFDEV(pdev),
TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(FUNC_ID(pdev)),
rxq_chain->common.bd_prod_without_next,
BAR_TSTRORM_INTMEM);
val16_lo += OOO_CID_USTRORM_PROD_DIFF;
val16_hi += OOO_CID_USTRORM_PROD_DIFF;
}
val32 = ((u32_t)(val16_hi << 16) | val16_lo);
if( rx_chain_sge )
{
val64 = (((u64_t)lm_bd_chain_prod_idx(rx_chain_sge))<<32) | val32 ;
LM_INTMEM_WRITE64(PFDEV(pdev),
iro_prod_offset,
val64,
ustorm_bar_offset);
}
else
{
LM_INTMEM_WRITE32(PFDEV(pdev),
iro_prod_offset,
val32,
ustorm_bar_offset);
}
}
u32_t
lm_post_buffers(
lm_device_t *pdev,
u32_t chain_idx,
lm_packet_t *packet,
u8_t const is_tpa)
{
lm_rx_chain_common_t* rxq_chain_common = NULL;
lm_bd_chain_t* rx_chain_bd = NULL;
lm_rx_chain_t* rxq_chain = NULL;
lm_tpa_chain_t * tpa_chain = NULL;
lm_bd_chain_t* bd_chain_to_check = NULL;
lm_rcq_chain_t* rcq_chain = &LM_RCQ(pdev, chain_idx);
lm_bd_chain_t* rx_chain_sge = NULL;
u32_t pkt_queued = 0;
struct eth_rx_bd* cur_bd = NULL;
struct eth_rx_sge* cur_sge = NULL;
u32_t prod_bseq = 0;
u32_t rcq_prod_bseq = 0;
u16_t current_prod = 0;
u16_t active_entry = 0;
DbgMessage(pdev, INFORMl2 , "### lm_post_buffers\n");
DbgBreakIfFastPath( rx_chain_sge && !lm_bd_chains_are_consistent( rx_chain_sge, rx_chain_bd ) );
if(FALSE == is_tpa)
{
rxq_chain_common = &LM_RXQ_COMMON(pdev, chain_idx);
rx_chain_bd = &LM_RXQ_CHAIN_BD(pdev, chain_idx);
rx_chain_sge = LM_RXQ_SGE_PTR_IF_VALID(pdev, chain_idx);
rxq_chain = &LM_RXQ(pdev, chain_idx);
tpa_chain = NULL;
bd_chain_to_check = &rcq_chain->bd_chain;
}
else
{
rxq_chain_common = &LM_TPA_COMMON(pdev, chain_idx);
rx_chain_bd = &LM_TPA_CHAIN_BD(pdev, chain_idx);
rx_chain_sge = NULL;
rxq_chain = NULL;
tpa_chain = &LM_TPA(pdev, chain_idx);
bd_chain_to_check = rx_chain_bd;
}
if(packet)
{
DbgBreakIfFastPath(SIG(packet) != L2PACKET_RX_SIG);
if(lm_bd_chain_is_empty(bd_chain_to_check))
{
s_list_push_tail(&rxq_chain_common->free_descq, &packet->link);
packet = NULL;
}
}
else if(!lm_bd_chain_is_empty(bd_chain_to_check))
{
packet = (lm_packet_t *) s_list_pop_head(&rxq_chain_common->free_descq);
}
prod_bseq = rxq_chain_common->prod_bseq;
rcq_prod_bseq = rcq_chain->prod_bseq;
while(packet)
{
current_prod = lm_bd_chain_prod_idx(rx_chain_bd);
cur_bd = lm_bd_chain_produce_bd(rx_chain_bd);
rxq_chain_common->bd_prod_without_next++;
cur_sge = rx_chain_sge ? lm_bd_chain_produce_bd(rx_chain_sge) : NULL;
prod_bseq += packet->l2pkt_rx_info->mem_size;
if(FALSE == is_tpa)
{
rcq_prod_bseq += packet->l2pkt_rx_info->mem_size;
lm_bd_chain_bd_produced(&rcq_chain->bd_chain);
}
packet->u1.rx.next_bd_idx = lm_bd_chain_prod_idx(rx_chain_bd);
#if L2_RX_BUF_SIG
DbgBreakIfFastPath(SIG(packet->u1.rx.mem_virt - pdev->params.rcv_buffer_offset) != L2PACKET_RX_SIG);
DbgBreakIfFastPath(END_SIG(packet->u1.rx.mem_virt, MAX_L2_CLI_BUFFER_SIZE(pdev, chain_idx)) != L2PACKET_RX_SIG);
#endif
cur_bd->addr_lo = mm_cpu_to_le32(packet->u1.rx.mem_phys[0].as_u32.low);
cur_bd->addr_hi = mm_cpu_to_le32(packet->u1.rx.mem_phys[0].as_u32.high);
if( cur_sge )
{
cur_sge->addr_lo = mm_cpu_to_le32(packet->u1.rx.mem_phys[1].as_u32.low);
cur_sge->addr_hi = mm_cpu_to_le32(packet->u1.rx.mem_phys[1].as_u32.high);
}
pkt_queued++;
if(FALSE == is_tpa)
{
s_list_push_tail(&rxq_chain->active_descq, &packet->link);
}
else
{
active_entry = LM_TPA_BD_ENTRY_TO_ACTIVE_ENTRY(pdev, chain_idx, current_prod);
LM_TPA_ACTIVE_ENTRY_BOUNDARIES_VERIFY(pdev, chain_idx,active_entry);
tpa_chain->sge_chain.active_descq_array[active_entry] = packet;
}
if(lm_bd_chain_is_empty(bd_chain_to_check))
{
break;
}
packet = (lm_packet_t *) s_list_pop_head(&rxq_chain_common->free_descq);
}
rxq_chain_common->prod_bseq = prod_bseq;
rcq_chain->prod_bseq = rcq_prod_bseq;
if(pkt_queued)
{
if(FALSE == is_tpa)
{
lm_rx_set_prods(pdev, rcq_chain->iro_prod_offset, &rcq_chain->bd_chain, rx_chain_bd, rx_chain_sge ,chain_idx);
}
else
{
lm_rx_set_prods(pdev, rcq_chain->iro_prod_offset, &rcq_chain->bd_chain, &LM_RXQ_CHAIN_BD(pdev, chain_idx), &LM_TPA_CHAIN_BD(pdev, chain_idx) ,chain_idx);
}
}
DbgMessage(pdev, INFORMl2 , "lm_post_buffers - bd con: %d bd prod: %d \n",
lm_bd_chain_cons_idx(rx_chain_bd),lm_bd_chain_prod_idx(rx_chain_bd));
DbgMessage(pdev, INFORMl2 , "lm_post_buffers - cq con: %d cq prod: %d \n",
lm_bd_chain_cons_idx(&rcq_chain->bd_chain) ,lm_bd_chain_prod_idx(&rcq_chain->bd_chain));
return pkt_queued;
}
__inline STATIC void
lm_tpa_sge_update_last_max(IN lm_device_t* pdev,
IN const u32_t chain_idx,
IN const u16_t new_index)
{
lm_tpa_sge_chain_t* sge_tpa_chain = &LM_SGE_TPA_CHAIN(pdev, chain_idx);
u16_t const prod_idx = lm_bd_chain_prod_idx(&LM_TPA_CHAIN_BD(pdev, chain_idx));
u16_t const prod_minus_new_sge = prod_idx - new_index;
u16_t const prod_minus_saved = prod_idx - sge_tpa_chain->last_max_con;
if(prod_minus_new_sge < prod_minus_saved)
{
sge_tpa_chain->last_max_con = new_index;
}
}
__inline STATIC void
lm_tpa_incr_sge_cons( IN lm_device_t* pdev,
IN const u32_t chain_idx,
IN const u16_t mask_entry_idx)
{
lm_tpa_sge_chain_t* sge_tpa_chain = &LM_SGE_TPA_CHAIN(pdev, chain_idx);
lm_bd_chain_t* bd_chain = &LM_TPA_CHAIN_BD(pdev, chain_idx);
u16_t bd_entry = 0;
u16_t active_entry = 0;
u16_t i = 0;
bd_chain->cons_idx += BIT_VEC64_ELEM_SZ;
DbgBreakIf(LM_TPA_MASK_LEN(pdev, chain_idx) <= mask_entry_idx);
sge_tpa_chain->mask_array[mask_entry_idx] = BIT_VEC64_ELEM_ONE_MASK;
DbgBreakIf(0 != (lm_bd_chain_bds_per_page(bd_chain) & BIT_VEC64_ELEM_MASK));
DbgBreakIf(BIT_VEC64_ELEM_SZ >= lm_bd_chain_bds_per_page(bd_chain));
if((lm_bd_chain_cons_idx(bd_chain) & lm_bd_chain_bds_per_page_mask(bd_chain)) == 0)
{
lm_bd_chain_bds_consumed(bd_chain, (BIT_VEC64_ELEM_SZ - lm_bd_chain_bds_skip_eop(bd_chain)));
for(i = 1; i <= lm_bd_chain_bds_skip_eop(bd_chain); i++ )
{
bd_entry = lm_bd_chain_cons_idx(bd_chain) - i;
active_entry = LM_TPA_BD_ENTRY_TO_ACTIVE_ENTRY(pdev, chain_idx, bd_entry);
LM_TPA_MASK_CLEAR_ACTIVE_BIT(pdev, chain_idx, active_entry);
}
}
else
{
lm_bd_chain_bds_consumed(bd_chain, BIT_VEC64_ELEM_SZ);
}
}
STATIC u32_t
lm_tpa_stop( IN lm_device_t* pdev,
INOUT s_list_t* rcvd_list,
IN const struct eth_end_agg_rx_cqe* cqe,
IN const u32_t chain_idx,
IN u32_t pkt_cnt,
IN const u8_t queue_index)
{
lm_tpa_chain_t* tpa_chain = &LM_TPA(pdev, chain_idx);
lm_tpa_sge_chain_t* sge_tpa_chain = &LM_SGE_TPA_CHAIN(pdev, chain_idx);
lm_bd_chain_t* bd_chain = &LM_TPA_CHAIN_BD(pdev, chain_idx);
lm_packet_t* pkt = tpa_chain->start_coales_bd[queue_index].packet;
u32_t sge_size = mm_le16_to_cpu(cqe->pkt_len) - pkt->l2pkt_rx_info->size;
u32_t const sge_num_elem = DIV_ROUND_UP_BITS(sge_size, LM_TPA_PAGE_BITS);
u32_t fw_sge_index = 0;
u16_t active_entry = 0;
u16_t first_max_set = 0;
u16_t last_max_set = 0;
u16_t i = 0;
u8_t b_force_first_enter = FALSE;
u16_t loop_cnt_dbg = 0;
const u32_t lm_tpa_page_size = LM_TPA_PAGE_SIZE;
DbgBreakIf( mm_le16_to_cpu(cqe->pkt_len) < pkt->l2pkt_rx_info->size);
DbgBreakIf( TRUE != tpa_chain->start_coales_bd[queue_index].is_entry_used);
tpa_chain->start_coales_bd[queue_index].is_entry_used = FALSE;
SET_FLAGS(pkt->l2pkt_rx_info->flags ,LM_RX_FLAG_START_RSC_TPA);
pkt->l2pkt_rx_info->total_packet_size = mm_le16_to_cpu(cqe->pkt_len);
pkt->l2pkt_rx_info->coal_seg_cnt = mm_le16_to_cpu(cqe->num_of_coalesced_segs);
pkt->l2pkt_rx_info->dup_ack_cnt = cqe->pure_ack_count;
pkt->l2pkt_rx_info->ts_delta = mm_le32_to_cpu(cqe->timestamp_delta);
DbgBreakIfFastPath(pkt->l2pkt_rx_info->total_packet_size < MIN_ETHERNET_PACKET_SIZE);
s_list_push_tail(rcvd_list, &pkt->link);
pkt_cnt++;
ASSERT_STATIC(LM_TPA_MAX_AGG_SIZE == ARRSIZE(cqe->sgl_or_raw_data.sgl));
DbgBreakIf(ARRSIZE(cqe->sgl_or_raw_data.sgl) < sge_num_elem);
if(0 == sge_num_elem )
{
DbgBreakIf( mm_le16_to_cpu(cqe->pkt_len) != pkt->l2pkt_rx_info->size);
return pkt_cnt;
}
for(fw_sge_index = 0; fw_sge_index < sge_num_elem; fw_sge_index++)
{
DbgBreakIf(ARRSIZE(cqe->sgl_or_raw_data.sgl) <= fw_sge_index);
active_entry = LM_TPA_BD_ENTRY_TO_ACTIVE_ENTRY(pdev, chain_idx, mm_le16_to_cpu(cqe->sgl_or_raw_data.sgl[fw_sge_index]));
LM_TPA_ACTIVE_ENTRY_BOUNDARIES_VERIFY(pdev, chain_idx, active_entry);
pkt = tpa_chain->sge_chain.active_descq_array[active_entry];
LM_TPA_MASK_CLEAR_ACTIVE_BIT(pdev, chain_idx, active_entry);
#if (DBG)
tpa_chain->dbg_params.pck_ret_from_chip++;
#endif
DbgBreakIf((fw_sge_index != (sge_num_elem - 1)) && (sge_size < LM_TPA_PAGE_SIZE ));
pkt->l2pkt_rx_info->size = min(sge_size ,lm_tpa_page_size);
s_list_push_tail(rcvd_list, &(pkt->link));
pkt_cnt++;
sge_size -= LM_TPA_PAGE_SIZE;
}
#if defined(_NTDDK_)
#pragma warning (push)
#pragma warning( disable:6385 )
#endif
lm_tpa_sge_update_last_max(pdev,
chain_idx,
mm_le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_num_elem -1]));
#if defined(_NTDDK_)
#pragma warning (pop)
#endif
first_max_set = LM_TPA_BD_ENTRY_TO_MASK_ENTRY(pdev, chain_idx, lm_bd_chain_cons_idx(bd_chain));
last_max_set = LM_TPA_BD_ENTRY_TO_MASK_ENTRY(pdev, chain_idx, sge_tpa_chain->last_max_con);
DbgBreakIf(0 != (lm_bd_chain_cons_idx(bd_chain) & BIT_VEC64_ELEM_MASK));
if((last_max_set == first_max_set) && (lm_bd_chain_is_full(bd_chain)))
{
b_force_first_enter = TRUE;
}
for (i = first_max_set;((i != last_max_set) || (TRUE == b_force_first_enter)); i = LM_TPA_MASK_NEXT_ELEM(pdev, chain_idx, i))
{
DbgBreakIf(LM_TPA_MASK_LEN(pdev, chain_idx) <= i);
if (sge_tpa_chain->mask_array[i])
{
break;
}
b_force_first_enter = FALSE;
lm_tpa_incr_sge_cons(pdev,
chain_idx,
i);
loop_cnt_dbg++;
DbgBreakIf(LM_TPA_MASK_LEN(pdev,chain_idx) < loop_cnt_dbg);
}
return pkt_cnt;
}
__inline STATIC void
lm_tpa_start( IN lm_device_t* pdev,
IN lm_packet_t* pkt,
IN const u32_t chain_idx,
IN const u8_t queue_index)
{
lm_tpa_chain_t* tpa_chain = &LM_TPA(pdev, chain_idx);
DbgBreakIf( FALSE != tpa_chain->start_coales_bd[queue_index].is_entry_used);
tpa_chain->start_coales_bd[queue_index].is_entry_used = TRUE;
tpa_chain->start_coales_bd[queue_index].packet = pkt;
}
__inline STATIC void
lm_tpa_start_flags_handle( IN lm_device_t* pdev,
IN const struct eth_fast_path_rx_cqe* cqe,
INOUT lm_packet_t* pkt,
IN const u16_t parse_flags)
{
DbgBreakIf(FALSE ==
((GET_FLAGS_WITH_OFFSET(parse_flags,PARSING_FLAGS_OVER_ETHERNET_PROTOCOL,
PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT) == PRS_FLAG_OVERETH_IPV4) ||
(GET_FLAGS_WITH_OFFSET(parse_flags,PARSING_FLAGS_OVER_ETHERNET_PROTOCOL,
PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT) == PRS_FLAG_OVERETH_IPV6)));
if(PRS_FLAG_OVERETH_IPV4 == GET_FLAGS_WITH_OFFSET(parse_flags,PARSING_FLAGS_OVER_ETHERNET_PROTOCOL,
PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT))
{
SET_FLAGS(pkt->l2pkt_rx_info->flags, LM_RX_FLAG_IS_IPV4_DATAGRAM);
DbgBreakIf(GET_FLAGS(cqe->status_flags, ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG));
DbgBreakIf(GET_FLAGS(cqe->type_error_flags, ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG));
SET_FLAGS(pkt->l2pkt_rx_info->flags, LM_RX_FLAG_IP_CKSUM_IS_GOOD);
}
else
{
SET_FLAGS(pkt->l2pkt_rx_info->flags, LM_RX_FLAG_IS_IPV6_DATAGRAM);
DbgBreakIf(0 == GET_FLAGS(cqe->status_flags, ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG));
}
DbgBreakIf( GET_FLAGS(parse_flags,PARSING_FLAGS_FRAGMENTATION_STATUS));
DbgBreakIf(PRS_FLAG_OVERIP_TCP != GET_FLAGS_WITH_OFFSET(parse_flags,PARSING_FLAGS_OVER_IP_PROTOCOL,
PARSING_FLAGS_OVER_IP_PROTOCOL_SHIFT));
SET_FLAGS(pkt->l2pkt_rx_info->flags, LM_RX_FLAG_IS_TCP_SEGMENT);
DbgBreakIf(GET_FLAGS(cqe->status_flags, ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG));
DbgBreakIf(GET_FLAGS(cqe->type_error_flags, ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG));
#define SHIFT_IS_GOOD 1
#define SHIFT_IS_BAD 2
ASSERT_STATIC(LM_RX_FLAG_UDP_CKSUM_IS_GOOD == LM_RX_FLAG_IS_UDP_DATAGRAM << SHIFT_IS_GOOD);
ASSERT_STATIC(LM_RX_FLAG_UDP_CKSUM_IS_BAD == LM_RX_FLAG_IS_UDP_DATAGRAM << SHIFT_IS_BAD);
ASSERT_STATIC(LM_RX_FLAG_TCP_CKSUM_IS_GOOD == LM_RX_FLAG_IS_TCP_SEGMENT << SHIFT_IS_GOOD);
ASSERT_STATIC(LM_RX_FLAG_TCP_CKSUM_IS_BAD == LM_RX_FLAG_IS_TCP_SEGMENT << SHIFT_IS_BAD);
SET_FLAGS(pkt->l2pkt_rx_info->flags , ( GET_FLAGS(pkt->l2pkt_rx_info->flags, (LM_RX_FLAG_IS_TCP_SEGMENT)) << SHIFT_IS_GOOD ) );
}
STATIC void
lm_regular_flags_handle( IN lm_device_t* pdev,
IN const struct eth_fast_path_rx_cqe* cqe,
INOUT lm_packet_t* pkt,
IN const u16_t parse_flags)
{
if(((GET_FLAGS(parse_flags,PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >>
PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT) == PRS_FLAG_OVERETH_IPV4) ||
((GET_FLAGS(parse_flags,PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >>
PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT) == PRS_FLAG_OVERETH_IPV6))
{
pkt->l2pkt_rx_info->flags |=
(GET_FLAGS(parse_flags,PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >>
PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT) == PRS_FLAG_OVERETH_IPV4 ?
LM_RX_FLAG_IS_IPV4_DATAGRAM :
LM_RX_FLAG_IS_IPV6_DATAGRAM;
if(!GET_FLAGS(cqe->status_flags, ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG))
{
if GET_FLAGS(cqe->type_error_flags, ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)
{
SET_FLAGS(pkt->l2pkt_rx_info->flags, LM_RX_FLAG_IP_CKSUM_IS_BAD);
LM_COMMON_DRV_STATS_ATOMIC_INC_ETH(pdev, rx_ip_cs_error_count);
}
else
{
SET_FLAGS(pkt->l2pkt_rx_info->flags, LM_RX_FLAG_IP_CKSUM_IS_GOOD);
}
}
}
if(!GET_FLAGS(parse_flags,PARSING_FLAGS_FRAGMENTATION_STATUS))
{
if((GET_FLAGS(parse_flags,PARSING_FLAGS_OVER_IP_PROTOCOL) >>
PARSING_FLAGS_OVER_IP_PROTOCOL_SHIFT) == PRS_FLAG_OVERIP_TCP)
{
SET_FLAGS(pkt->l2pkt_rx_info->flags, LM_RX_FLAG_IS_TCP_SEGMENT);
DbgMessage(pdev, INFORM, "--- TCP Packet --- \n");
}
else if((GET_FLAGS(parse_flags,PARSING_FLAGS_OVER_IP_PROTOCOL) >>
PARSING_FLAGS_OVER_IP_PROTOCOL_SHIFT) == PRS_FLAG_OVERIP_UDP)
{
SET_FLAGS(pkt->l2pkt_rx_info->flags , LM_RX_FLAG_IS_UDP_DATAGRAM);
DbgMessage(pdev, INFORM, "--- UDP Packet --- \n");
}
}
if( GET_FLAGS(pkt->l2pkt_rx_info->flags, (LM_RX_FLAG_IS_TCP_SEGMENT | LM_RX_FLAG_IS_UDP_DATAGRAM)) &&
!GET_FLAGS(cqe->status_flags, ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG))
{
ASSERT_STATIC(LM_RX_FLAG_UDP_CKSUM_IS_GOOD == LM_RX_FLAG_IS_UDP_DATAGRAM << SHIFT_IS_GOOD);
ASSERT_STATIC(LM_RX_FLAG_UDP_CKSUM_IS_BAD == LM_RX_FLAG_IS_UDP_DATAGRAM << SHIFT_IS_BAD);
ASSERT_STATIC(LM_RX_FLAG_TCP_CKSUM_IS_GOOD == LM_RX_FLAG_IS_TCP_SEGMENT << SHIFT_IS_GOOD);
ASSERT_STATIC(LM_RX_FLAG_TCP_CKSUM_IS_BAD == LM_RX_FLAG_IS_TCP_SEGMENT << SHIFT_IS_BAD);
DbgMessage(pdev, INFORM, " Checksum validated.\n");
if GET_FLAGS(cqe->type_error_flags, ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)
{
SET_FLAGS(pkt->l2pkt_rx_info->flags , ( GET_FLAGS(pkt->l2pkt_rx_info->flags, (LM_RX_FLAG_IS_TCP_SEGMENT | LM_RX_FLAG_IS_UDP_DATAGRAM)) << SHIFT_IS_BAD ) );
LM_COMMON_DRV_STATS_ATOMIC_INC_ETH(pdev, rx_tcp_cs_error_count);
DbgMessage(pdev, INFORM, " BAD checksum.\n");
}
else if (GET_FLAGS(pkt->l2pkt_rx_info->flags , LM_RX_FLAG_IP_CKSUM_IS_BAD))
{
SET_FLAGS(pkt->l2pkt_rx_info->flags , ( GET_FLAGS(pkt->l2pkt_rx_info->flags, (LM_RX_FLAG_IS_TCP_SEGMENT | LM_RX_FLAG_IS_UDP_DATAGRAM)) << SHIFT_IS_BAD ) );
DbgMessage(pdev, INFORM, " BAD IP checksum\n");
}
else
{
SET_FLAGS(pkt->l2pkt_rx_info->flags , ( GET_FLAGS(pkt->l2pkt_rx_info->flags, (LM_RX_FLAG_IS_TCP_SEGMENT | LM_RX_FLAG_IS_UDP_DATAGRAM)) << SHIFT_IS_GOOD ) );
DbgMessage(pdev, INFORM, " GOOD checksum.\n");
}
}
else
{
DbgMessage(pdev, INFORM, " Checksum NOT validated.\n");
if(GET_FLAGS(pkt->l2pkt_rx_info->flags, (LM_RX_FLAG_IS_TCP_SEGMENT | LM_RX_FLAG_IS_UDP_DATAGRAM)) &&
GET_FLAGS(cqe->status_flags, ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) &&
GET_FLAGS(cqe->pars_flags.flags, PARSING_FLAGS_TCP_OPTIONS_EXIST))
{
DbgMessage(pdev, INFORM, " TCP Options exist - forcing return value.\n");
if(GET_FLAGS(pkt->l2pkt_rx_info->flags , LM_RX_FLAG_IP_CKSUM_IS_BAD))
{
DbgMessage(pdev, INFORM, " IP checksum invalid - reporting BAD checksum.\n");
SET_FLAGS(pkt->l2pkt_rx_info->flags , ( GET_FLAGS(pkt->l2pkt_rx_info->flags, (LM_RX_FLAG_IS_TCP_SEGMENT | LM_RX_FLAG_IS_UDP_DATAGRAM)) << SHIFT_IS_BAD ) );
}
else
{
DbgMessage(pdev, INFORM, " IP checksum ok - reporting GOOD checksum.\n");
SET_FLAGS(pkt->l2pkt_rx_info->flags , ( GET_FLAGS(pkt->l2pkt_rx_info->flags, (LM_RX_FLAG_IS_TCP_SEGMENT | LM_RX_FLAG_IS_UDP_DATAGRAM)) << SHIFT_IS_GOOD ) );
}
}
}
}
__inline STATIC void
lm_recv_set_pkt_len( IN lm_device_t* pdev,
INOUT lm_packet_t* pkt,
IN const u16_t pkt_len,
IN const u32_t chain_idx)
{
pkt->l2pkt_rx_info->size = pkt_len;
DbgMessage(pdev, VERBOSEl2, "pkt_size: %d\n",pkt->l2pkt_rx_info->size);
}
INLINE STATIC u32_t
calc_cksum(u16_t *hdr, u32_t len_in_bytes, u32_t sum)
{
while (len_in_bytes > 1)
{
sum += NTOH16(*hdr);
len_in_bytes -= 2;
hdr++;
}
if (len_in_bytes)
{
sum += ((NTOH16(*hdr)) & 0xFF00);
}
return sum;
}
INLINE STATIC u8_t
validate_cksum(u32_t sum)
{
while (sum >> 16)
{
sum = (sum & 0xffff) + (sum >> 16);
}
return ((u16_t)(sum) == 0xffff);
}
INLINE STATIC u16_t
get_ip_hdr_len(u8_t *hdr)
{
u16_t ip_hdr_len = 40;
if ((hdr[0] & 0xf0) == 0x40)
{
ip_hdr_len = ((hdr[0] & 0xf) << 2);
}
return ip_hdr_len;
}
INLINE void
encap_pkt_parsing(struct _lm_device_t *pdev,
lm_packet_t *pkt)
{
u16_t tmp, inner_ip_hdr_len, tcp_length;
u32_t psuedo_cksum;
u8_t *hdr;
if (pkt->l2pkt_rx_info->total_packet_size < (2*ETHERNET_PACKET_HEADER_SIZE + 2*20 + ETHERNET_GRE_SIZE + 20))
{
return;
}
hdr = pkt->l2pkt_rx_info->mem_virt + pdev->params.rcv_buffer_offset + ETHERNET_PACKET_HEADER_SIZE;
if (pkt->l2pkt_rx_info->flags & LM_RX_FLAG_VALID_VLAN_TAG)
{
hdr += ETHERNET_VLAN_TAG_SIZE;
}
if (!(((pkt->l2pkt_rx_info->flags & LM_RX_FLAG_IS_IPV4_DATAGRAM) && (hdr[9] == 0x2f)) ||
((pkt->l2pkt_rx_info->flags & LM_RX_FLAG_IS_IPV6_DATAGRAM) && (hdr[6] == 0x2f))))
{
return;
}
hdr += get_ip_hdr_len(hdr);
if (((hdr[0] & 0xb0) != 0x20) || (hdr[2] != 0x65) || (hdr[3] != 0x58))
{
return;
}
hdr += ETHERNET_GRE_SIZE;
if ((hdr[12] == 0x81) && (hdr[13] == 0x00))
{
hdr += ETHERNET_VLAN_TAG_SIZE;
}
hdr += ETHERNET_PACKET_HEADER_SIZE;
inner_ip_hdr_len = get_ip_hdr_len(hdr);
if ((hdr[0] & 0xf0) == 0x40)
{
if (pkt->l2pkt_rx_info->flags & LM_RX_FLAG_IP_CKSUM_IS_GOOD)
{
if (!validate_cksum(calc_cksum((u16_t*)hdr, inner_ip_hdr_len, 0)))
{
SET_FLAGS(pkt->l2pkt_rx_info->flags, LM_RX_FLAG_IP_CKSUM_IS_BAD);
RESET_FLAGS(pkt->l2pkt_rx_info->flags, LM_RX_FLAG_IP_CKSUM_IS_GOOD);
}
}
if (hdr[9] == 0x06)
{
psuedo_cksum = calc_cksum((u16_t*)&hdr[12], 8, 0x06);
mm_memcpy(&tmp, &hdr[2], sizeof(u16_t));
tcp_length = NTOH16(tmp) - inner_ip_hdr_len;
psuedo_cksum += tcp_length;
}
else
{
return;
}
}
else if ((hdr[0] & 0xf0) == 0x60)
{
if (hdr[6] == 0x06)
{
psuedo_cksum = calc_cksum((u16_t*)&hdr[8], 32, 0x06);
mm_memcpy(&tmp, &hdr[4], sizeof(u16_t));
tcp_length = NTOH16(tmp) - (inner_ip_hdr_len - 40);
psuedo_cksum += tcp_length;
}
else
{
return;
}
}
else
{
return;
}
hdr += inner_ip_hdr_len;
SET_FLAGS(pkt->l2pkt_rx_info->flags, LM_RX_FLAG_IS_TCP_SEGMENT);
if (validate_cksum(calc_cksum((u16_t*)hdr, tcp_length, psuedo_cksum)))
{
SET_FLAGS(pkt->l2pkt_rx_info->flags, LM_RX_FLAG_TCP_CKSUM_IS_GOOD);
RESET_FLAGS(pkt->l2pkt_rx_info->flags, LM_RX_FLAG_TCP_CKSUM_IS_BAD);
}
else
{
SET_FLAGS(pkt->l2pkt_rx_info->flags, LM_RX_FLAG_TCP_CKSUM_IS_BAD);
RESET_FLAGS(pkt->l2pkt_rx_info->flags, LM_RX_FLAG_TCP_CKSUM_IS_GOOD);
}
}
u32_t
lm_get_packets_rcvd( struct _lm_device_t *pdev,
u32_t const chain_idx,
s_list_t *rcvd_list,
struct _sp_cqes_info *sp_cqes)
{
lm_rx_chain_t* rxq_chain = &LM_RXQ(pdev, chain_idx);
lm_rcq_chain_t* rcq_chain = &LM_RCQ(pdev, chain_idx);
lm_bd_chain_t* rx_chain_bd = &LM_RXQ_CHAIN_BD(pdev, chain_idx);
lm_bd_chain_t* rx_chain_sge = LM_RXQ_SGE_PTR_IF_VALID(pdev, chain_idx);
lm_tpa_chain_t* tpa_chain = &LM_TPA(pdev, chain_idx);
union eth_rx_cqe* cqe = NULL;
lm_packet_t* pkt = NULL;
u32_t pkt_cnt = 0;
u16_t rx_old_idx = 0;
u16_t cq_new_idx = 0;
u16_t cq_old_idx = 0;
enum eth_rx_cqe_type cqe_type = MAX_ETH_RX_CQE_TYPE;
DbgMessage(pdev, INFORMl2 , "lm_get_packets_rcvd inside!\n");
mm_mem_zero( sp_cqes, sizeof(struct _sp_cqes_info) );
cq_new_idx = mm_le16_to_cpu(*(rcq_chain->hw_con_idx_ptr));
if((cq_new_idx & lm_bd_chain_usable_bds_per_page(&rcq_chain->bd_chain)) == lm_bd_chain_usable_bds_per_page(&rcq_chain->bd_chain))
{
cq_new_idx+= lm_bd_chain_bds_skip_eop(&rcq_chain->bd_chain);
}
DbgBreakIfFastPath( rx_chain_sge && !lm_bd_chains_are_consistent( rx_chain_sge, rx_chain_bd ) );
rx_old_idx = lm_bd_chain_cons_idx(rx_chain_bd);
cq_old_idx = lm_bd_chain_cons_idx(&rcq_chain->bd_chain);
if (cq_old_idx == cq_new_idx)
{
DbgMessage(pdev, INFORMl2rx , "there is no change in the RCQ consumer index so exit!\n");
return pkt_cnt;
}
while(cq_old_idx != cq_new_idx)
{
DbgBreakIfFastPath(S16_SUB(cq_new_idx, cq_old_idx) <= 0);
cqe = (union eth_rx_cqe *)lm_bd_chain_consume_bd(&rcq_chain->bd_chain);
DbgBreakIfFastPath(cqe == NULL);
cq_old_idx = lm_bd_chain_cons_idx(&rcq_chain->bd_chain);
cqe_type = GET_FLAGS_WITH_OFFSET(cqe->ramrod_cqe.ramrod_type, COMMON_RAMROD_ETH_RX_CQE_TYPE, COMMON_RAMROD_ETH_RX_CQE_TYPE_SHIFT);
DbgBreakIf(MAX_ETH_RX_CQE_TYPE <= cqe_type);
switch(cqe_type)
{
case RX_ETH_CQE_TYPE_ETH_RAMROD:
{
if (cqe->ramrod_cqe.conn_type != TOE_CONNECTION_TYPE)
{
if (ERR_IF(sp_cqes->idx >= MAX_NUM_SPE))
{
DbgBreakMsgFastPath("too many spe completed\n");
DbgBreakIfAll(sp_cqes->idx >= MAX_NUM_SPE);
return pkt_cnt;
}
mm_memcpy((void*)(&(sp_cqes->sp_cqe[sp_cqes->idx++])), (const void*)cqe, sizeof(*cqe));
}
lm_bd_chain_bd_produced(&rcq_chain->bd_chain);
#if 0
pkt = (lm_packet_t *) s_list_pop_head(&rxq_chain->active_descq);
DbgBreakIfFastPath(pkt == NULL);
s_list_push_tail( &LM_RXQ(pdev, chain_idx).free_descq,
&pkt->link);
#endif
break;
}
case RX_ETH_CQE_TYPE_ETH_FASTPATH:
case RX_ETH_CQE_TYPE_ETH_START_AGG:
{
u16_t parse_flags = 0;
DbgMessage(pdev, INFORMl2rx, "lm_get_packets_rcvd- it is fast path, func=%d\n", FUNC_ID(pdev));
DbgBreakIf( (RX_ETH_CQE_TYPE_ETH_START_AGG == cqe_type)&&
(lm_tpa_state_disable == tpa_chain->state));
pkt = (lm_packet_t *) s_list_pop_head(&rxq_chain->active_descq);
parse_flags = mm_le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags);
DbgBreakIfFastPath( NULL == pkt );
#if DBG
if CHK_NULL( pkt )
{
return 0;
}
#endif
DbgBreakIfFastPath(SIG(pkt) != L2PACKET_RX_SIG);
#if L2_RX_BUF_SIG
DbgBreakIfFastPath(SIG(pkt->u1.rx.mem_virt - pdev->params.rcv_buffer_offset) != L2PACKET_RX_SIG);
DbgBreakIfFastPath(END_SIG(pkt->u1.rx.mem_virt, MAX_L2_CLI_BUFFER_SIZE(pdev, chain_idx)) != L2PACKET_RX_SIG);
#endif
lm_bd_chain_bds_consumed(rx_chain_bd, 1);
if( rx_chain_sge )
{
lm_bd_chain_bds_consumed(rx_chain_sge, 1);
}
#if defined(_NTDDK_)
#pragma warning (push)
#pragma warning( disable:28182 )
#endif
rx_old_idx = pkt->u1.rx.next_bd_idx;
CLEAR_FLAGS( pkt->l2pkt_rx_info->flags );
if(RX_ETH_CQE_TYPE_ETH_START_AGG == cqe_type)
{
lm_recv_set_pkt_len(pdev, pkt, mm_le16_to_cpu(cqe->fast_path_cqe.len_on_bd), chain_idx);
DbgBreakIf(0 != cqe->fast_path_cqe.pkt_len_or_gro_seg_len);
lm_tpa_start(pdev,
pkt,
chain_idx,
cqe->fast_path_cqe.queue_index);
lm_tpa_start_flags_handle(pdev,
&(cqe->fast_path_cqe),
pkt,
parse_flags);
}
else
{
lm_recv_set_pkt_len(pdev, pkt, mm_le16_to_cpu(cqe->fast_path_cqe.pkt_len_or_gro_seg_len), chain_idx);
pkt->l2pkt_rx_info->total_packet_size = pkt->l2pkt_rx_info->size;
DbgBreakIfFastPath((pkt->l2pkt_rx_info->total_packet_size < MIN_ETHERNET_PACKET_SIZE) || (pkt->l2pkt_rx_info->total_packet_size > MAX_CLI_PACKET_SIZE(pdev, chain_idx)));
pkt->size = pkt->l2pkt_rx_info->size;
if(OOO_CID(pdev) == chain_idx)
{
DbgBreakIfFastPath( ETH_FP_CQE_RAW != (GET_FLAGS( cqe->fast_path_cqe.type_error_flags, ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL ) >>
ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL_SHIFT));
ASSERT_STATIC( sizeof(pkt->u1.rx.sgl_or_raw_data.raw_data) == sizeof(cqe->fast_path_cqe.sgl_or_raw_data.raw_data) );
mm_memcpy( pkt->u1.rx.sgl_or_raw_data.raw_data, cqe->fast_path_cqe.sgl_or_raw_data.raw_data, sizeof(pkt->u1.rx.sgl_or_raw_data.raw_data) );
}
else
{
DbgBreakIfFastPath( ETH_FP_CQE_REGULAR != (GET_FLAGS( cqe->fast_path_cqe.type_error_flags, ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL )>>
ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL_SHIFT) ) ;
}
lm_regular_flags_handle(pdev,
&(cqe->fast_path_cqe),
pkt,
parse_flags);
if (GET_FLAGS(pdev->params.ofld_cap_to_ndis, LM_OFFLOAD_ENCAP_PACKET))
{
encap_pkt_parsing(pdev, pkt);
}
pkt_cnt++;
s_list_push_tail(rcvd_list, &pkt->link);
}
if GET_FLAGS(cqe->fast_path_cqe.status_flags, ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)
{
SET_FLAGS(pkt->l2pkt_rx_info->flags, LM_RX_FLAG_VALID_HASH_VALUE );
*pkt->u1.rx.hash_val_ptr = mm_le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
}
if(GET_FLAGS(parse_flags,PARSING_FLAGS_INNER_VLAN_EXIST))
{
u16_t vlan_tag = mm_le16_to_cpu(cqe->fast_path_cqe.vlan_tag);
DbgMessage(pdev, INFORMl2, "vlan frame recieved: %x\n",vlan_tag);
if ((!pdev->params.keep_vlan_tag) &&
( OOO_CID(pdev) != chain_idx))
{
SET_FLAGS(pkt->l2pkt_rx_info->flags , LM_RX_FLAG_VALID_VLAN_TAG);
pkt->l2pkt_rx_info->vlan_tag = vlan_tag;
DbgMessage(pdev, INFORMl2rx, "vlan removed from frame: %x\n",vlan_tag);
}
}
#if defined(_NTDDK_)
#pragma warning (pop)
#endif
#if DBG
if(GET_FLAGS(parse_flags,PARSING_FLAGS_FRAGMENTATION_STATUS))
{
LM_COMMON_DRV_STATS_ATOMIC_INC_ETH(pdev, rx_ipv4_frag_count);
}
if(GET_FLAGS(parse_flags,PARSING_FLAGS_LLC_SNAP))
{
LM_COMMON_DRV_STATS_ATOMIC_INC_ETH(pdev, rx_llc_snap_count);
}
if(GET_FLAGS(parse_flags,PARSING_FLAGS_IP_OPTIONS) &&
GET_FLAGS(pkt->l2pkt_rx_info->flags ,LM_RX_FLAG_IS_IPV6_DATAGRAM))
{
LM_COMMON_DRV_STATS_ATOMIC_INC_ETH(pdev, rx_ipv6_ext_count);
}
#endif
DbgBreakIfFastPath( GET_FLAGS(cqe->fast_path_cqe.type_error_flags, ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG) );
DbgBreakIfFastPath(cqe->fast_path_cqe.type_error_flags &
~(ETH_FAST_PATH_RX_CQE_TYPE |
ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG |
ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG |
ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL));
break;
}
case RX_ETH_CQE_TYPE_ETH_STOP_AGG:
{
DbgBreakIf( lm_tpa_state_disable == tpa_chain->state);
pkt_cnt = lm_tpa_stop(pdev,
rcvd_list,
&(cqe->end_agg_cqe),
chain_idx,
pkt_cnt,
cqe->end_agg_cqe.queue_index);
lm_bd_chain_bd_produced(&rcq_chain->bd_chain);
break;
}
case MAX_ETH_RX_CQE_TYPE:
default:
{
DbgBreakMsg("CQE type not supported");
}
}
}
rx_chain_bd->cons_idx = rx_old_idx;
if( rx_chain_sge )
{
rx_chain_sge->cons_idx = rx_old_idx;
}
lm_rx_set_prods(pdev, rcq_chain->iro_prod_offset, &rcq_chain->bd_chain, rx_chain_bd, rx_chain_sge ,chain_idx);
DbgMessage(pdev, INFORMl2rx, "lm_get_packets_rcvd- bd con: %d bd prod: %d \n",
lm_bd_chain_cons_idx(rx_chain_bd), lm_bd_chain_prod_idx(rx_chain_bd));
DbgMessage(pdev, INFORMl2rx, "lm_get_packets_rcvd- cq con: %d cq prod: %d \n",
lm_bd_chain_cons_idx(&rcq_chain->bd_chain), lm_bd_chain_prod_idx(&rcq_chain->bd_chain));
return pkt_cnt;
}
lm_status_t lm_complete_ramrods(
struct _lm_device_t *pdev,
struct _sp_cqes_info *sp_cqes)
{
u8_t idx;
for (idx = 0; idx < sp_cqes->idx; idx++) {
lm_eth_init_command_comp(pdev, &(sp_cqes->sp_cqe[idx].ramrod_cqe));
}
return LM_STATUS_SUCCESS;
}
void
lm_return_packet_bytes( struct _lm_device_t *pdev,
u32_t const qidx,
u32_t const returned_bytes)
{
lm_rx_chain_t *rxq = &LM_RXQ(pdev, qidx);
rxq->ret_bytes += returned_bytes;
#define HC_RET_BYTES_TH(pdev) (((pdev)->params.hc_threshold0[SM_RX_ID] < 32768) ? ((pdev)->params.hc_threshold0[SM_RX_ID] >> 1) : 16384)
if(S32_SUB(rxq->ret_bytes, rxq->ret_bytes_last_fw_update + HC_RET_BYTES_TH(pdev)) >= 0)
{
if (qidx < LM_MAX_RSS_CHAINS(pdev) && IS_PFDEV(pdev))
{
LM_INTMEM_WRITE32(PFDEV(pdev), rxq->hc_sb_info.iro_dhc_offset, rxq->ret_bytes, BAR_CSTRORM_INTMEM);
rxq->ret_bytes_last_fw_update = rxq->ret_bytes;
} else if (IS_VFDEV(pdev)) {
VF_REG_WR(pdev, VF_BAR0_CSDM_QUEUES_OFFSET + rxq->hc_sb_info.iro_dhc_offset, rxq->ret_bytes);
rxq->ret_bytes_last_fw_update = rxq->ret_bytes;
}
}
}