DbgBreakIfFastPath
DbgBreakIfFastPath(bd_chain->bd_left > bd_chain->capacity);
DbgBreakIfFastPath(!bd_chain->bd_left);
DbgBreakIfFastPath(bd_chain->bd_left == bd_chain->capacity);
DbgBreakIfFastPath(bd_chain->bd_left < nbds);
DbgBreakIfFastPath((bd_chain->prod_idx & bd_chain->bds_per_page_mask) > bd_chain->usable_bds_per_page); /* assertion relevant to 8b bd chain */
DbgBreakIfFastPath((bd_chain->prod_idx & bd_chain->bds_per_page_mask) == 0); /* GilR 5/13/2006 - this is currently the agreement with FW */
DbgBreakIfFastPath(bd_chain->bd_left < nbds);
DbgBreakIfFastPath(bd_chain->bd_left < 1);
DbgBreakIfFastPath(pkt == NULL);
DbgBreakIfFastPath( NULL == pkt );
DbgBreakIfFastPath(SIG(pkt) != L2PACKET_RX_SIG);
DbgBreakIfFastPath(SIG(pkt->u1.rx.mem_virt - pdev->params.rcv_buffer_offset) != L2PACKET_RX_SIG);
DbgBreakIfFastPath(END_SIG(pkt->u1.rx.mem_virt, MAX_L2_CLI_BUFFER_SIZE(pdev, chain_idx)) != L2PACKET_RX_SIG);
DbgBreakIfFastPath((pkt->l2pkt_rx_info->total_packet_size < MIN_ETHERNET_PACKET_SIZE) || (pkt->l2pkt_rx_info->total_packet_size > MAX_CLI_PACKET_SIZE(pdev, chain_idx)));
DbgBreakIfFastPath( ETH_FP_CQE_RAW != (GET_FLAGS( cqe->fast_path_cqe.type_error_flags, ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL ) >>
DbgBreakIfFastPath( ETH_FP_CQE_REGULAR != (GET_FLAGS( cqe->fast_path_cqe.type_error_flags, ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL )>>
DbgBreakIfFastPath( rx_chain_sge && !lm_bd_chains_are_consistent( rx_chain_sge, rx_chain_bd ) );
DbgBreakIfFastPath( GET_FLAGS(cqe->fast_path_cqe.type_error_flags, ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG) );
DbgBreakIfFastPath(cqe->fast_path_cqe.type_error_flags &
DbgBreakIfFastPath(SIG(packet) != L2PACKET_RX_SIG);
DbgBreakIfFastPath(SIG(packet->u1.rx.mem_virt - pdev->params.rcv_buffer_offset) != L2PACKET_RX_SIG);
DbgBreakIfFastPath(END_SIG(packet->u1.rx.mem_virt, MAX_L2_CLI_BUFFER_SIZE(pdev, chain_idx)) != L2PACKET_RX_SIG);
DbgBreakIfFastPath(pkt->l2pkt_rx_info->total_packet_size < MIN_ETHERNET_PACKET_SIZE);
DbgBreakIfFastPath( NULL != rx_chain_sge );
DbgBreakIfFastPath(IS_CHANNEL_VFDEV(pdev));
DbgBreakIfFastPath( rx_chain_sge && !lm_bd_chains_are_consistent( rx_chain_sge, rx_chain_bd ) );
DbgBreakIfFastPath(S16_SUB(cq_new_idx, cq_old_idx) <= 0);
DbgBreakIfFastPath(cqe == NULL);
DbgBreakIfFastPath( rx_chain_sge && !lm_bd_chains_are_consistent( rx_chain_sge, rx_chain_bd ) );
DbgBreakIfFastPath(!pdev);
DbgBreakIfFastPath(pkt_num == 0);
DbgBreakIfFastPath(pkt == NULL);
DbgBreakIfFastPath(total_hlen_bytes > sum_frag_size);
DbgBreakIfFastPath(hlen_reminder <= start_bd_nbytes);
DbgBreakIfFastPath((*frag)->size >= 0x10000 || (*frag)->size == 0);
DbgBreakIfFastPath(hlen_reminder <= prod_bd_nbytes);
DbgBreakIfFastPath(coalesce_buf == NULL);
DbgBreakIfFastPath(!(start_bd && generic_bd));
DbgBreakIfFastPath(frag->size >= 0x10000 || frag->size == 0);
DbgBreakIfFastPath( !parse_bd_ptr ) ;
DbgBreakIfFastPath(packet->size < ETHERNET_PACKET_HEADER_SIZE);
DbgBreakIfFastPath(S16_SUB(cq_new_idx, cq_old_idx) <= 0);
DbgBreakIfFastPath(elem == NULL);
DbgBreakIfFastPath(!pdev);