#include "lm5710.h"
#include "microcode_constants.h"
#include "eth_constants.h"
#include "bd_chain.h"
#include "ecore_common.h"
u8_t lm_is_tx_completion(lm_device_t *pdev, u8_t chain_idx)
{
u8_t result = FALSE;
lm_tx_chain_t *tx_chain = &LM_TXQ(pdev, chain_idx);
DbgBreakIf(!(pdev && tx_chain));
if ( tx_chain->hw_con_idx_ptr && (mm_le16_to_cpu(*tx_chain->hw_con_idx_ptr) != tx_chain->pkt_idx))
{
result = TRUE;
}
DbgMessage(pdev, INFORMi, "lm_is_tx_completion: result is:%s\n", result? "TRUE" : "FALSE");
return result;
}
static void lm_handle_lso_split(IN lm_address_t frag_addr_data_offset,
IN u16_t data_part_size,
IN lm_tx_chain_t *tx_chain,
IN struct eth_tx_start_bd *start_bd,
IN struct eth_tx_bd *generic_bd
)
{
struct eth_tx_bd *prod_bd;
u16_t old_nbd = mm_le16_to_cpu(start_bd->nbd);
u16_t old_nbytes = mm_le16_to_cpu(generic_bd->nbytes);
ASSERT_STATIC(OFFSETOF(struct eth_tx_bd, nbytes) == OFFSETOF(struct eth_tx_start_bd, nbytes)) ;
DbgBreakIfFastPath(!(start_bd && generic_bd));
start_bd->nbd = mm_cpu_to_le16(old_nbd + 1);
generic_bd->nbytes = mm_cpu_to_le16(old_nbytes - data_part_size);
LM_INC64(&frag_addr_data_offset, mm_le16_to_cpu(generic_bd->nbytes));
prod_bd = (struct eth_tx_bd *)lm_bd_chain_produce_bd(&tx_chain->bd_chain);
prod_bd->addr_lo = mm_cpu_to_le32(frag_addr_data_offset.as_u32.low);
prod_bd->addr_hi = mm_cpu_to_le32(frag_addr_data_offset.as_u32.high);
prod_bd->nbytes = mm_cpu_to_le16(data_part_size);
tx_chain->lso_split_used++;
DbgMessage(NULL, WARNl2tx, "#lm_handle_lso_split: after split: original bd nbytes=0x%x,new bd nbytes=0x%x\n",
mm_le16_to_cpu(generic_bd->nbytes), mm_le16_to_cpu(prod_bd->nbytes));
}
static void lm_pre_process_lso_packet(
IN lm_device_t *pdev,
IN lm_packet_t *packet,
IN lm_frag_list_t *frags,
OUT u8_t *split_required,
IN u16_t total_hlen_bytes
)
{
u32_t cnt;
u16_t sum_frag_size = 0;
u8_t hdr_nbds = 0;
*split_required = FALSE;
for(cnt = 0; cnt < frags->cnt; cnt++)
{
hdr_nbds++;
sum_frag_size += (u16_t)frags->frag_arr[cnt].size;
if (total_hlen_bytes <= sum_frag_size)
{
if (total_hlen_bytes < sum_frag_size)
{
*split_required = TRUE;
}
break;
}
}
DbgBreakIfFastPath(total_hlen_bytes > sum_frag_size);
packet->u1.tx.hdr_nbds = hdr_nbds;
}
static void lm_process_lso_packet(IN lm_packet_t *packet,
IN lm_device_t *pdev,
IN lm_tx_chain_t *tx_chain,
IN lm_frag_list_t *frags,
IN void *parse_bd,
IN struct eth_tx_start_bd *start_bd,
OUT lm_frag_t **frag,
IN u16_t total_hlen_bytes,
IN u8_t split_required)
{
struct eth_tx_bd *prod_bd = NULL;
u32_t cnt = 0;
u16_t hlen_reminder = total_hlen_bytes;
if ((packet->l2pkt_tx_info->lso_ip_hdr_len + packet->l2pkt_tx_info->lso_tcp_hdr_len) > 120) {
pdev->debug_info.number_of_long_LSO_headers++;
}
start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
if (CHIP_IS_E1x(pdev))
{
struct eth_tx_parse_bd_e1x *parse_bd_e1x = (struct eth_tx_parse_bd_e1x *)parse_bd;
parse_bd_e1x->lso_mss = mm_cpu_to_le16(packet->l2pkt_tx_info->lso_mss);
parse_bd_e1x->ip_id = mm_cpu_to_le16(packet->l2pkt_tx_info->lso_ipid);
parse_bd_e1x->tcp_send_seq = mm_cpu_to_le32(packet->l2pkt_tx_info->lso_tcp_send_seq);
parse_bd_e1x->tcp_flags = packet->l2pkt_tx_info->lso_tcp_flags;
parse_bd_e1x->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
if GET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_TCP_LSO_SNAP_FRAME)
{
parse_bd_e1x->global_data |= ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN;
}
}
else
{
struct eth_tx_parse_bd_e2 *parse_bd_e2 = (struct eth_tx_parse_bd_e2 *)parse_bd;
parse_bd_e2->parsing_data |= ETH_TX_PARSE_BD_E2_LSO_MSS & (packet->l2pkt_tx_info->lso_mss << ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT);
}
SET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_COMPUTE_TCP_UDP_CKSUM);
if (!GET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_IPV6_PACKET))
{
SET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_COMPUTE_IP_CKSUM);
}
RESET_FLAGS(start_bd->general_data, ETH_TX_START_BD_HDR_NBDS);
start_bd->general_data |= ((packet->u1.tx.hdr_nbds & ETH_TX_START_BD_HDR_NBDS) << ETH_TX_START_BD_HDR_NBDS_SHIFT);
if (split_required)
{
if ((start_bd->general_data & ETH_TX_START_BD_HDR_NBDS) == 1)
{
lm_handle_lso_split(frags->frag_arr[0].addr,
mm_le16_to_cpu(start_bd->nbytes) - hlen_reminder,
tx_chain,
start_bd,
(struct eth_tx_bd *)start_bd );
split_required = FALSE;
}
else
{
u16_t start_bd_nbytes = mm_le16_to_cpu(start_bd->nbytes);
DbgBreakIfFastPath(hlen_reminder <= start_bd_nbytes);
hlen_reminder -= start_bd_nbytes;
}
}
for(cnt = 1; cnt < frags->cnt; cnt++)
{
DbgBreakIfFastPath((*frag)->size >= 0x10000 || (*frag)->size == 0);
prod_bd = (struct eth_tx_bd *)lm_bd_chain_produce_bd(&tx_chain->bd_chain);
prod_bd->addr_lo = mm_cpu_to_le32((*frag)->addr.as_u32.low);
prod_bd->addr_hi = mm_cpu_to_le32((*frag)->addr.as_u32.high);
prod_bd->nbytes = mm_cpu_to_le16((u16_t) (*frag)->size);
if (split_required)
{
if (cnt == ((start_bd->general_data & ETH_TX_START_BD_HDR_NBDS) - 1))
{
lm_handle_lso_split((*frag)->addr,
mm_le16_to_cpu(prod_bd->nbytes) - hlen_reminder,
tx_chain,
start_bd,
prod_bd
);
split_required = FALSE;
}
else
{
u16_t prod_bd_nbytes = mm_le16_to_cpu(prod_bd->nbytes);
DbgBreakIfFastPath(hlen_reminder <= prod_bd_nbytes);
hlen_reminder -= prod_bd_nbytes;
}
}
packet->size += (*frag)->size;
(*frag)++;
}
LM_COMMON_DRV_STATS_INC_ETH(pdev, tx_lso_frames);
}
lm_coalesce_buffer_t *
lm_get_coalesce_buffer(
IN lm_device_t *pdev,
IN lm_tx_chain_t *txq,
IN u32_t buf_size)
{
lm_coalesce_buffer_t *coalesce_buf = NULL;
u32_t coalesce_buf_cnt, cnt;
if (ERR_IF(CHK_NULL(pdev) || CHK_NULL(txq) || !buf_size)) {
DbgBreakFastPath();
return NULL;
}
coalesce_buf_cnt = s_list_entry_cnt(&txq->coalesce_buf_list);
for(cnt = 0; cnt < coalesce_buf_cnt; cnt++)
{
coalesce_buf = (lm_coalesce_buffer_t *) s_list_pop_head(
&txq->coalesce_buf_list);
DbgBreakIfFastPath(coalesce_buf == NULL);
if(NULL == coalesce_buf)
{
DbgMessage(pdev, FATAL, "lm_get_coalesce_buffer:coalesce buffer was null\n");
break;
}
if(coalesce_buf->buf_size >= buf_size)
{
txq->coalesce_buf_used++;
break;
}
s_list_push_tail(&txq->coalesce_buf_list, &coalesce_buf->link);
coalesce_buf = NULL;
}
return coalesce_buf;
}
void
lm_put_coalesce_buffer(
IN lm_device_t *pdev,
IN lm_tx_chain_t *txq,
IN lm_coalesce_buffer_t *coalesce_buf)
{
if (ERR_IF(CHK_NULL(pdev) || CHK_NULL(txq) || CHK_NULL(coalesce_buf))) {
DbgBreakFastPath();
return;
}
s_list_push_tail(&txq->coalesce_buf_list, &coalesce_buf->link);
return;
}
static lm_status_t
lm_copy_packet_to_coalesce_buffer(
IN lm_device_t *pdev,
IN lm_tx_chain_t *txq,
IN lm_packet_t *lmpkt,
IN lm_frag_list_t *frags,
OUT lm_coalesce_buffer_t **coal_buf
)
{
lm_coalesce_buffer_t *coalesce_buf;
lm_frag_t* frag;
u32_t pkt_size = 0;
u32_t copied_bytes;
u32_t cnt;
if (ERR_IF(CHK_NULL(pdev) || CHK_NULL(txq) ||
CHK_NULL(lmpkt) || CHK_NULL(frags)))
{
DbgBreakFastPath();
return LM_STATUS_FAILURE;
}
frag = &frags->frag_arr[0];
for (cnt = 0; cnt < frags->cnt; cnt++, frag++) {
pkt_size += frag->size;
}
coalesce_buf = lm_get_coalesce_buffer(pdev, txq, pkt_size);
if(coalesce_buf == NULL)
{
DbgMessage(pdev, INFORMl2tx,
"#copy to coalesce buffer FAILED, (lmpkt=0x%p,pkt_size=%d)\n",
lmpkt, pkt_size);
LM_COMMON_DRV_STATS_ATOMIC_INC_ETH(pdev, tx_no_coalesce_buf);
return LM_STATUS_RESOURCE;
}
copied_bytes = mm_copy_packet_buf(
pdev, lmpkt, coalesce_buf->mem_virt, pkt_size);
if (ERR_IF(copied_bytes != pkt_size)) {
DbgBreakFastPath();
lm_put_coalesce_buffer(pdev, txq, coalesce_buf);
return LM_STATUS_FAILURE;
}
coalesce_buf->frags.frag_arr[0].size = pkt_size;
*coal_buf = coalesce_buf;
return LM_STATUS_SUCCESS;
}
static u8_t
lm_is_packet_coalescing_required(
IN lm_device_t *pdev,
IN lm_packet_t *lmpkt,
IN lm_frag_list_t *frags,
IN u8_t num_parsing_bds
)
{
u8_t to_copy = FALSE;
u8_t wnd_size = 0;
static u32_t const MAX_FETCH_BD = 13;
wnd_size = MAX_FETCH_BD - lmpkt->u1.tx.hdr_nbds - num_parsing_bds - 1;
if (frags->cnt > wnd_size)
{
if GET_FLAGS(lmpkt->l2pkt_tx_info->flags, LM_TX_FLAG_TCP_LSO_FRAME)
{
u8_t num_frags = (u8_t)frags->cnt;
u8_t wnd_idx = 0;
u8_t frag_idx = 0;
u32_t wnd_sum = 0;
for (wnd_idx = lmpkt->u1.tx.hdr_nbds; wnd_idx <= (num_frags - wnd_size); wnd_idx++)
{
for (frag_idx = 0; frag_idx < wnd_size; frag_idx++)
{
wnd_sum += frags->frag_arr[wnd_idx + frag_idx].size;
}
if (wnd_sum < lmpkt->l2pkt_tx_info->lso_mss)
{
DbgMessage(pdev, WARNl2tx,
"#copy to coalesce buffer IS REQUIRED for LSO packet, (lmpkt=0x%p,num_frags=%d)\n",
lmpkt, num_frags);
to_copy = TRUE;
break;
}
wnd_sum = 0;
}
}
else
{
DbgMessage(pdev, INFORMl2tx,
"#copy to coalesce buffer IS REQUIRED for NON LSO packet, (lmpkt=0x%p,num_frags=%d)\n",
lmpkt, frags->cnt);
to_copy = TRUE;
}
}
return to_copy;
}
#define LM_VLAN_PRI_BIT_LOCATION (13)
#define LM_GET_PRI_FROM_VLAN(_vlan) ((_vlan) >> LM_VLAN_PRI_BIT_LOCATION)
u8_t
lm_get_pri_from_send_packet_param(
lm_device_t *pdev,
lm_packet_t *packet)
{
u8_t pri = 0;
if GET_FLAGS(packet->l2pkt_tx_info->flags , (LM_TX_FLAG_INSERT_VLAN_TAG | LM_TX_FLAG_VLAN_TAG_EXISTS))
{
DbgMessage(pdev, INFORMl2, "Outband vlan 0X%x\n",packet->l2pkt_tx_info->vlan_tag);
pri = LM_GET_PRI_FROM_VLAN(packet->l2pkt_tx_info->vlan_tag);
}
return pri;
}
void
fill_bds_for_encapsulated_packet(
lm_device_t *pdev,
lm_packet_t *packet,
struct eth_tunnel_data *tunnel_data,
struct eth_tx_parse_2nd_bd *parse_bd_2nd_ptr,
u8_t eth_hlen)
{
DbgBreakIf(CHIP_IS_E1x(pdev));
ecore_set_fw_mac_addr(&tunnel_data->dst_hi,
&tunnel_data->dst_mid,
&tunnel_data->dst_lo,
packet->l2pkt_tx_info->dst_mac_addr);
tunnel_data->ip_hdr_start_inner_w = (packet->l2pkt_tx_info->encap_packet_inner_frame_offset +
packet->l2pkt_tx_info->encap_packet_inner_ip_relative_offset) >> 1;
tunnel_data->pseudo_csum = mm_cpu_to_le16(packet->l2pkt_tx_info->tcp_pseudo_csum);
tunnel_data->fw_ip_hdr_csum = mm_cpu_to_le16(packet->l2pkt_tx_info->fw_ip_csum);
if (GET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_IPV6_PACKET))
{
SET_FLAGS(tunnel_data->flags, ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER);
}
if (!parse_bd_2nd_ptr)
{
return;
}
parse_bd_2nd_ptr->global_data |= ( ((eth_hlen) >> 1) << ETH_TX_PARSE_2ND_BD_IP_HDR_START_OUTER_W_SHIFT);
if (!(GET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_IPV6_PACKET)))
{
parse_bd_2nd_ptr->global_data |= ( ((packet->l2pkt_tx_info->lso_ip_hdr_len) >> 1) << ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT);
}
parse_bd_2nd_ptr->global_data |= (packet->l2pkt_tx_info->tcp_nonce_sum_bit << ETH_TX_PARSE_2ND_BD_NS_FLG_SHIFT);
parse_bd_2nd_ptr->tcp_send_seq = mm_cpu_to_le32(packet->l2pkt_tx_info->lso_tcp_send_seq);
parse_bd_2nd_ptr->tcp_flags = packet->l2pkt_tx_info->lso_tcp_flags;
parse_bd_2nd_ptr->fw_ip_csum_wo_len_flags_frag = mm_cpu_to_le16(packet->l2pkt_tx_info->fw_ip_csum);
parse_bd_2nd_ptr->hw_ip_id = mm_cpu_to_le16(packet->l2pkt_tx_info->lso_ipid);
parse_bd_2nd_ptr->fw_ip_hdr_to_payload_w = (packet->l2pkt_tx_info->encap_packet_inner_frame_offset +
packet->l2pkt_tx_info->encap_packet_inner_ip_relative_offset +
packet->l2pkt_tx_info->encap_packet_inner_tcp_relative_offset +
packet->l2pkt_tx_info->lso_tcp_hdr_len -
eth_hlen) >> 1;
if (GET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_IPV6_PACKET))
{
;
parse_bd_2nd_ptr->fw_ip_hdr_to_payload_w -= 20;
}
}
lm_status_t
lm_send_packet(
lm_device_t *pdev,
u32_t chain_idx,
lm_packet_t *packet,
lm_frag_list_t *frags)
{
lm_tx_chain_t *tx_chain = NULL;
struct eth_tx_start_bd *start_bd = NULL;
struct eth_tx_parse_bd_e1x *parse_bd_e1x = NULL;
struct eth_tx_parse_bd_e2 *parse_bd_e2 = NULL;
struct eth_tx_parse_2nd_bd *parse_bd_2nd_ptr = NULL;
struct eth_tx_bd *prod_bd = NULL;
lm_frag_t *frag = NULL;
u16_t old_prod_idx = 0;
u32_t cnt = 0;
#if defined(__BIG_ENDIAN)
struct doorbell_set_prod dq_msg = {0, 0, {0}};
#elif defined(__LITTLE_ENDIAN)
struct doorbell_set_prod dq_msg = {{0}, 0, 0};
#endif
u8_t eth_hlen = ETHERNET_PACKET_HEADER_SIZE;
u8_t split_required = FALSE;
u8_t eth_addr_type = UNKNOWN_ADDRESS;
u16_t total_hlen_bytes = 0;
u16_t start_bd_nbd = 0;
u16_t vlan_tag = 0;
void* parse_bd_ptr = NULL;
u8_t is_encapsulated_offload = 0;
u8_t num_parsing_bds = 1;
DbgMessage(pdev, VERBOSEl2tx | VERBOSEl4tx, "### lm_send_packet\n");
tx_chain = &LM_TXQ(pdev, chain_idx);
old_prod_idx = lm_bd_chain_prod_idx(&tx_chain->bd_chain);
if GET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_VLAN_TAG_EXISTS)
{
eth_hlen += ETHERNET_VLAN_TAG_SIZE;
}
if GET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_TCP_LSO_SNAP_FRAME)
{
eth_hlen += ETHERNET_LLC_SNAP_SIZE;
}
is_encapsulated_offload = (GET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_IS_ENCAP_PACKET) &&
GET_FLAGS(packet->l2pkt_tx_info->flags, (LM_TX_FLAG_COMPUTE_IP_CKSUM | LM_TX_FLAG_COMPUTE_TCP_UDP_CKSUM | LM_TX_FLAG_TCP_LSO_FRAME)));
if (is_encapsulated_offload)
{
if (GET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_TCP_LSO_FRAME))
{
num_parsing_bds = 2;
}
total_hlen_bytes = packet->l2pkt_tx_info->encap_packet_inner_frame_offset +
packet->l2pkt_tx_info->encap_packet_inner_ip_relative_offset +
packet->l2pkt_tx_info->encap_packet_inner_tcp_relative_offset +
packet->l2pkt_tx_info->lso_tcp_hdr_len;
}
else
{
total_hlen_bytes = packet->l2pkt_tx_info->lso_ip_hdr_len + packet->l2pkt_tx_info->lso_tcp_hdr_len + eth_hlen;
}
if (GET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_TCP_LSO_FRAME))
{
lm_pre_process_lso_packet(pdev, packet, frags, &split_required, total_hlen_bytes);
}
if (lm_is_packet_coalescing_required(pdev, packet, frags, num_parsing_bds))
{
lm_coalesce_buffer_t *coalesce_buf = NULL;
lm_status_t lm_status;
if (ERR_IF(packet->u1.tx.coalesce_buf != NULL))
{
DbgBreakFastPath();
return LM_STATUS_FAILURE;
}
lm_status = lm_copy_packet_to_coalesce_buffer(
pdev, tx_chain, packet, frags, &coalesce_buf);
if (lm_status == LM_STATUS_SUCCESS)
{
LM_COMMON_DRV_STATS_ATOMIC_INC_ETH(pdev, tx_l2_assembly_buf_use);
packet->u1.tx.coalesce_buf = coalesce_buf;
packet->u1.tx.hdr_nbds = 1;
split_required = 1;
frags = &coalesce_buf->frags;
}
else
{
return lm_status;
}
}
if ((frags->cnt + num_parsing_bds + 1) > lm_bd_chain_avail_bds(&tx_chain->bd_chain))
{
LM_COMMON_DRV_STATS_ATOMIC_INC_ETH(pdev, tx_no_l2_bd);
if (packet->u1.tx.coalesce_buf)
{
lm_put_coalesce_buffer(pdev, tx_chain, packet->u1.tx.coalesce_buf);
packet->u1.tx.coalesce_buf = NULL;
}
return LM_STATUS_RESOURCE;
}
packet->size = 0;
start_bd = (struct eth_tx_start_bd *)lm_bd_chain_produce_bd(&tx_chain->bd_chain);
mm_mem_zero(start_bd, sizeof(union eth_tx_bd_types));
frag = frags->frag_arr;
start_bd->addr_lo = mm_cpu_to_le32(frag->addr.as_u32.low);
start_bd->addr_hi = mm_cpu_to_le32(frag->addr.as_u32.high);
start_bd->nbytes = mm_cpu_to_le16((u16_t) frag->size);
start_bd->bd_flags.as_bitfield = (u8_t) ETH_TX_BD_FLAGS_START_BD;
start_bd->nbd = 0;
start_bd->general_data |= ((num_parsing_bds - 1) << ETH_TX_START_BD_PARSE_NBDS_SHIFT);
if (is_encapsulated_offload)
{
start_bd->general_data |= ETH_TX_START_BD_TUNNEL_EXIST;
if (GET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_ENCAP_PACKET_IS_INNER_IPV6))
{
start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
}
}
else
{
if (GET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_IPV6_PACKET))
{
start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
}
}
if (GET_FLAGS(packet->l2pkt_tx_info->flags , LM_TX_FLAG_INSERT_VLAN_TAG))
{
DbgMessage(pdev, INFORMl2, "Outband vlan 0X%x\n",packet->l2pkt_tx_info->vlan_tag);
start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_VLAN_MODE & (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT));
vlan_tag = packet->l2pkt_tx_info->vlan_tag;
}
else if (GET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_VLAN_TAG_EXISTS))
{
DbgMessage(pdev, INFORMl2, "Inband vlan 0X%x\n",packet->l2pkt_tx_info->vlan_tag);
start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_VLAN_MODE & (X_ETH_INBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT));
vlan_tag = packet->l2pkt_tx_info->vlan_tag;
}
else
{
if (IS_VFDEV(pdev)) {
((u8_t*)&vlan_tag)[0] = packet->l2pkt_tx_info->eth_type[1];
((u8_t*)&vlan_tag)[1] = packet->l2pkt_tx_info->eth_type[0];
if (vlan_tag == VLAN_TAGGED_FRAME_ETH_TYPE) {
((u8_t*)&vlan_tag)[0] = packet->l2pkt_tx_info->eth_type[3];
((u8_t*)&vlan_tag)[1] = packet->l2pkt_tx_info->eth_type[2];
DbgMessage(pdev, INFORMl2, "Inband vlan (from packet) 0X%x\n",vlan_tag);
start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_VLAN_MODE & (X_ETH_INBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT));
}
} else {
vlan_tag = (u16_t)(pdev->tx_info.chain[chain_idx].eth_tx_prods.packets_prod);
}
}
start_bd->vlan_or_ethertype = mm_cpu_to_le16(vlan_tag);
if (GET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_FORCE_VLAN_MODE))
{
SET_FLAGS(start_bd->general_data, ETH_TX_START_BD_FORCE_VLAN_MODE);
}
packet->size += frag->size;
frag++;
parse_bd_ptr = lm_bd_chain_produce_bd(&tx_chain->bd_chain);
mm_mem_zero(parse_bd_ptr, sizeof(union eth_tx_bd_types));
if (CHIP_IS_E1x(pdev))
{
parse_bd_e1x = parse_bd_ptr;
parse_bd_e1x->global_data = (UNICAST_ADDRESS << ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE_SHIFT);
}
else
{
parse_bd_e2 = parse_bd_ptr;
parse_bd_e2->parsing_data = (UNICAST_ADDRESS << ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT);
}
start_bd_nbd++;
if (num_parsing_bds > 1)
{
parse_bd_2nd_ptr = lm_bd_chain_produce_bd(&tx_chain->bd_chain);
mm_mem_zero(parse_bd_2nd_ptr, sizeof(union eth_tx_bd_types));
start_bd_nbd++;
}
if (is_encapsulated_offload)
{
fill_bds_for_encapsulated_packet(pdev, packet, &parse_bd_e2->data.tunnel_data, parse_bd_2nd_ptr, eth_hlen);
}
if (IS_PFDEV(pdev) && (tx_chain->idx == FWD_CID(pdev)))
{
pdev->tx_info.forward_packets++;
}
if GET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_TCP_LSO_FRAME)
{
start_bd->nbd = mm_cpu_to_le16(start_bd_nbd);
lm_process_lso_packet(packet, pdev, tx_chain, frags, parse_bd_ptr, start_bd,
&frag, total_hlen_bytes, split_required);
start_bd_nbd = mm_cpu_to_le16(start_bd->nbd);
if (IS_PFDEV(pdev) && (tx_chain->idx == FWD_CID(pdev)))
{
pdev->tx_info.lso_forward_packets++;
}
}
else
{
struct eth_tx_bd *total_pkt_bytes_bd = NULL;
for(cnt = 1; cnt < frags->cnt; cnt++)
{
DbgMessage(pdev, VERBOSEl2tx | VERBOSEl4tx, " frag %d, hi 0x%x, lo 0x%x, size %d\n",
cnt, frag->addr.as_u32.high, frag->addr.as_u32.low, frag->size);
DbgBreakIfFastPath(frag->size >= 0x10000 || frag->size == 0);
prod_bd = (struct eth_tx_bd *)lm_bd_chain_produce_bd(&tx_chain->bd_chain);
prod_bd->addr_lo = mm_cpu_to_le32(frag->addr.as_u32.low);
prod_bd->addr_hi = mm_cpu_to_le32(frag->addr.as_u32.high);
prod_bd->nbytes = mm_cpu_to_le16((u16_t) frag->size);
if (NULL == total_pkt_bytes_bd)
{
total_pkt_bytes_bd = prod_bd;
}
packet->size += frag->size;
frag++;
}
if (NULL != total_pkt_bytes_bd)
{
total_pkt_bytes_bd->total_pkt_bytes = mm_cpu_to_le16((u16_t) packet->size);
}
}
if (GET_FLAGS(packet->l2pkt_tx_info->flags, (LM_TX_FLAG_COMPUTE_IP_CKSUM | LM_TX_FLAG_COMPUTE_TCP_UDP_CKSUM)))
{
if (GET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_COMPUTE_IP_CKSUM) &&
(!GET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_ENCAP_PACKET_IS_INNER_IPV6)))
{
start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
}
if (GET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_COMPUTE_TCP_UDP_CKSUM))
{
start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
if(packet->l2pkt_tx_info->cs_any_offset)
{
start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
}
}
if (CHIP_IS_E1x(pdev)) {
struct eth_tx_parse_bd_e1x *parse_bd_e1x = parse_bd_ptr;
if (CHK_NULL(parse_bd_ptr)) {
DbgBreakIfFastPath( !parse_bd_ptr ) ;
return LM_STATUS_FAILURE ;
}
parse_bd_e1x->ip_hlen_w = packet->l2pkt_tx_info->lso_ip_hdr_len >> 1;
parse_bd_e1x->global_data |= (( (eth_hlen) >> 1) << ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W_SHIFT);
parse_bd_e1x->total_hlen_w = mm_cpu_to_le16((packet->l2pkt_tx_info->lso_ip_hdr_len >> 1) + ( (eth_hlen) >> 1));
if(packet->l2pkt_tx_info->flags & LM_TX_FLAG_TCP_LSO_SNAP_FRAME) {
parse_bd_e1x->global_data |= ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN;
}
if (packet->l2pkt_tx_info->flags & LM_TX_FLAG_COMPUTE_TCP_UDP_CKSUM)
{
parse_bd_e1x->tcp_pseudo_csum = mm_cpu_to_le16(packet->l2pkt_tx_info->tcp_pseudo_csum);
parse_bd_e1x->global_data |= (packet->l2pkt_tx_info->tcp_nonce_sum_bit << ETH_TX_PARSE_BD_E1X_NS_FLG_SHIFT);
parse_bd_e1x->total_hlen_w = mm_cpu_to_le16((total_hlen_bytes) >> 1);
}
} else {
struct eth_tx_parse_bd_e2 *parse_bd_e2 = parse_bd_ptr;
u32_t val;
if (is_encapsulated_offload)
{
val = (( packet->l2pkt_tx_info->encap_packet_inner_frame_offset +
packet->l2pkt_tx_info->encap_packet_inner_ip_relative_offset +
packet->l2pkt_tx_info->encap_packet_inner_tcp_relative_offset ) >> 1 );
if (packet->l2pkt_tx_info->encap_packet_inner_tcp_relative_offset > 40) {
parse_bd_e2->parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
}
}
else
{
val = ((packet->l2pkt_tx_info->lso_ip_hdr_len + eth_hlen) >> 1);
if (packet->l2pkt_tx_info->lso_ip_hdr_len > 40) {
parse_bd_e2->parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
}
}
parse_bd_e2->parsing_data |= ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W & (val << ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT);
val = (packet->l2pkt_tx_info->lso_tcp_hdr_len >> 2);
parse_bd_e2->parsing_data |= ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW & (val << ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT);
parse_bd_e2->parsing_data = mm_cpu_to_le32(parse_bd_e2->parsing_data);
}
}
if ((!is_encapsulated_offload) &&
((!CHIP_IS_E1x(pdev) || IS_VFDEV(pdev))))
{
struct eth_tx_parse_bd_e2 *parse_bd_e2 = parse_bd_ptr;
ecore_set_fw_mac_addr(&parse_bd_e2->data.mac_addr.dst_hi,
&parse_bd_e2->data.mac_addr.dst_mid,
&parse_bd_e2->data.mac_addr.dst_lo,
packet->l2pkt_tx_info->dst_mac_addr);
ecore_set_fw_mac_addr(&parse_bd_e2->data.mac_addr.src_hi,
&parse_bd_e2->data.mac_addr.src_mid,
&parse_bd_e2->data.mac_addr.src_lo,
packet->l2pkt_tx_info->src_mac_addr);
if (pdev->params.mac_spoof_test) {
parse_bd_e2->data.mac_addr.src_lo++;
}
}
if (IS_ETH_MULTICAST(packet->l2pkt_tx_info->dst_mac_addr))
{
if (IS_ETH_BROADCAST(packet->l2pkt_tx_info->dst_mac_addr))
{
eth_addr_type = BROADCAST_ADDRESS;
}
else
{
eth_addr_type = MULTICAST_ADDRESS;
}
if (CHIP_IS_E1x(pdev))
{
struct eth_tx_parse_bd_e1x *parse_bd_e1x = parse_bd_ptr;
RESET_FLAGS(parse_bd_e1x->global_data, ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE);
parse_bd_e1x->global_data |= (eth_addr_type << ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE_SHIFT);
}
else
{
struct eth_tx_parse_bd_e2 *parse_bd_e2 = parse_bd_ptr;
RESET_FLAGS(parse_bd_e2->parsing_data, ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE);
parse_bd_e2->parsing_data |= (eth_addr_type << ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT);
}
}
packet->u1.tx.bd_used = start_bd_nbd += (u16_t)frags->cnt;
packet->u1.tx.next_bd_idx = lm_bd_chain_prod_idx(&tx_chain->bd_chain);
tx_chain->prod_bseq += packet->size;
DbgBreakIfFastPath(packet->size < ETHERNET_PACKET_HEADER_SIZE);
#if DBG
for(cnt = 0; cnt < start_bd_nbd; cnt++)
{
if (parse_bd_ptr && (cnt == 1))
{
if (CHIP_IS_E1x(pdev))
{
DbgMessage(pdev, VERBOSEl2tx,
" parse_bd: global_data 0x%x",
((struct eth_tx_parse_bd_e1x *)(&start_bd[cnt]))->global_data);
}
else
{
DbgMessage(pdev, VERBOSEl2tx,
" parse_bd: parsing_data 0x%08x",
mm_le32_to_cpu(((struct eth_tx_parse_bd_e2 *)(&start_bd[cnt]))->parsing_data));
}
}
else
{
DbgMessage(pdev, VERBOSEl2tx,
"-> frag: %d, bd_flags: %d, nbytes: %d, hi: 0x%x, lo: 0x%x",
cnt, start_bd[cnt].bd_flags.as_bitfield, mm_le16_to_cpu(start_bd[cnt].nbytes),
mm_le32_to_cpu(start_bd[cnt].addr_hi), mm_le32_to_cpu(start_bd[cnt].addr_lo));
if (cnt == 0)
{
DbgMessage(pdev, VERBOSEl2tx,
" start bd info: nbds: %d, vlan: 0x%x, hdr_nbds: %d",
start_bd_nbd, mm_le16_to_cpu(start_bd->vlan_or_ethertype),
(start_bd->general_data & ETH_TX_START_BD_HDR_NBDS));
}
}
}
#endif
start_bd->nbd = mm_cpu_to_le16(start_bd_nbd);
s_list_push_tail(&tx_chain->active_descq, &packet->link);
pdev->tx_info.chain[chain_idx].eth_tx_prods.bds_prod = pdev->tx_info.chain[chain_idx].eth_tx_prods.bds_prod +
S16_SUB(lm_bd_chain_prod_idx(&tx_chain->bd_chain), old_prod_idx);
pdev->tx_info.chain[chain_idx].eth_tx_prods.packets_prod = pdev->tx_info.chain[chain_idx].eth_tx_prods.packets_prod + 1;
dq_msg.header.data = DOORBELL_HDR_T_DB_TYPE;
dq_msg.zero_fill1 = 0;
dq_msg.prod = pdev->tx_info.chain[chain_idx].eth_tx_prods.bds_prod;
mm_write_barrier();
DOORBELL(pdev, chain_idx, *((u32_t *)&dq_msg));
return LM_STATUS_SUCCESS;
}
u32_t
lm_get_packets_sent( struct _lm_device_t* pdev,
u32_t chain_idx,
s_list_t *sent_list)
{
lm_tx_chain_t* tx_chain = &LM_TXQ(pdev, chain_idx);
lm_packet_t* pkt = 0;
u32_t pkt_cnt = 0;
u16_t old_idx = lm_bd_chain_cons_idx(&tx_chain->bd_chain);
u16_t new_idx = mm_le16_to_cpu(*(tx_chain->hw_con_idx_ptr));
u16_t pkt_num = S16_SUB(new_idx,tx_chain->pkt_idx);
DbgBreakIfFastPath(pkt_num == 0);
while(pkt_num > 0)
{
pkt = (lm_packet_t *) s_list_peek_head(&tx_chain->active_descq);
if (pkt == NULL)
{
lm_collect_idle_storms_dorrbell_asserts(PFDEV(pdev), TRUE, TRUE, TRUE);
DbgBreakIfFastPath(pkt == NULL);
return pkt_cnt;
}
pkt = (lm_packet_t *) s_list_pop_head(&tx_chain->active_descq);
old_idx = pkt->u1.tx.next_bd_idx;
pkt->status = LM_STATUS_SUCCESS;
lm_bd_chain_bds_consumed(&tx_chain->bd_chain, pkt->u1.tx.bd_used);
if (pkt->u1.tx.coalesce_buf) {
lm_put_coalesce_buffer(pdev, tx_chain, pkt->u1.tx.coalesce_buf);
pkt->u1.tx.coalesce_buf = NULL;
}
new_idx = mm_le16_to_cpu(*(tx_chain->hw_con_idx_ptr));
tx_chain->pkt_idx++;
pkt_num = S16_SUB(new_idx,tx_chain->pkt_idx);
pkt_cnt++;
s_list_push_tail(sent_list, &pkt->link);
}
tx_chain->bd_chain.cons_idx = old_idx;
DbgMessage(pdev, INFORMl2tx , "lm_get_packets_sent()- func: %d, txidx: %d, txbd con: %d txbd prod: %d \n",
FUNC_ID(pdev), chain_idx , lm_bd_chain_cons_idx(&tx_chain->bd_chain), lm_bd_chain_prod_idx(&tx_chain->bd_chain));
return pkt_cnt;
}