#include "lm5710.h"
#include "lm.h"
#include "lm_l4sp.h"
#include "command.h"
#include "context.h"
#include "bd_chain.h"
#include "mm.h"
#include "mm_l4if.h"
#include "lm_l4fp.h"
#include "lm_l4sp.h"
#include "everest_l5cm_constants.h"
#include "l4debug.h"
#define TOE_SP_PHYS_DATA_SIZE ((sizeof(lm_tcp_slow_path_phys_data_t) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK)
#define TOE_DB_RX_DATA_SIZE ((sizeof(struct toe_rx_db_data) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK)
#define TOE_DB_TX_DATA_SIZE ((sizeof(struct toe_tx_db_data) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK)
#define TCP_XCM_DEFAULT_DEL_ACK_MAX_CNT 2
l4_tcp_con_state_t lm_tcp_calc_state (
lm_device_t * pdev,
lm_tcp_state_t * tcp,
u8_t fin_was_sent
);
void lm_tcp_comp_cb(
struct _lm_device_t *pdev,
struct sq_pending_command *pending);
#define TOE_DBG_TTL 200
#define ISCSI_DBG_TTL 222
#define TIMERS_TICKS_PER_SEC (u32_t)(1000)
#define TSEMI_CLK1_TICKS_PER_SEC (u32_t)(1000)
u32_t lm_get_num_of_cashed_grq_bds(struct _lm_device_t *pdev)
{
return USTORM_TOE_GRQ_CACHE_NUM_BDS;
}
static void _fake_func_verify_defines(void)
{
ASSERT_STATIC( TIMERS_TICKS_PER_SEC == (1 / TIMERS_TICK_SIZE_CHIP) ) ;
ASSERT_STATIC( TSEMI_CLK1_TICKS_PER_SEC == (1 / TSEMI_CLK1_RESUL_CHIP) ) ;
}
static __inline u32_t lm_time_resolution(
lm_device_t *pdev,
u32_t src_time,
u32_t src_ticks_per_sec,
u32_t trg_ticks_per_sec)
{
u64_t result;
u64_t tmp_result;
u32_t dev_factor;
DbgBreakIf(!(src_ticks_per_sec && trg_ticks_per_sec));
if (trg_ticks_per_sec > src_ticks_per_sec){
dev_factor = trg_ticks_per_sec / src_ticks_per_sec;
result = src_time * dev_factor;
} else {
tmp_result = src_time * trg_ticks_per_sec;
#if defined(_VBD_)
result = CEIL_DIV(tmp_result, src_ticks_per_sec);
#else
if (tmp_result < 0xffffffff) {
result = (u32_t)tmp_result / src_ticks_per_sec;
} else {
DbgBreakIf(src_time < src_ticks_per_sec);
result = ((u64_t)(src_time / src_ticks_per_sec)) * trg_ticks_per_sec;
}
#endif
}
if(src_time && !result) {
result = 1;
}
DbgMessage(pdev, VERBOSEl4sp,
"lm_time_resulition: src_time=%d, src_ticks_per_sec=%d, trg_ticks_per_sec=%d, result=%d\n",
src_time, src_ticks_per_sec, trg_ticks_per_sec, result);
DbgBreakIf(result > 0xffffffff);
return (u32_t)result;
}
lm_status_t lm_tcp_erase_connection(
IN struct _lm_device_t * pdev,
IN lm_tcp_state_t * tcp)
{
lm_status_t status = LM_STATUS_SUCCESS;
lm_tcp_con_t *rx_con;
lm_tcp_con_t *tx_con;
MM_INIT_TCP_LOCK_HANDLE();
if (!lm_fl_reset_is_inprogress(pdev)) {
return LM_STATUS_FAILURE;
}
DbgMessage(pdev, FATAL, "##lm_tcp_erase_connection(0x%x)\n",tcp->cid);
if (tcp->ulp_type == TOE_CONNECTION_TYPE) {
rx_con = tcp->rx_con;
tx_con = tcp->tx_con;
mm_acquire_tcp_lock(pdev, tx_con);
tx_con->flags |= TCP_POST_BLOCKED;
lm_tcp_abort_bufs(pdev, tcp, tx_con, LM_STATUS_CONNECTION_CLOSED);
if (tx_con->abortion_under_flr) {
DbgMessage(pdev, FATAL, "##lm_tcp_erase_connection(0x%x): Tx aborted\n",tcp->cid);
}
mm_release_tcp_lock(pdev, tx_con);
mm_acquire_tcp_lock(pdev, rx_con);
rx_con->flags |= TCP_POST_BLOCKED;
if (mm_tcp_indicating_bufs(rx_con)) {
DbgMessage(pdev, FATAL, "##lm_tcp_erase_connection(0x%x): under indication\n",tcp->cid);
DbgBreak();
mm_release_tcp_lock(pdev, rx_con);
return LM_STATUS_FAILURE;
}
lm_tcp_abort_bufs(pdev, tcp, rx_con, LM_STATUS_CONNECTION_CLOSED);
if (rx_con->abortion_under_flr) {
DbgMessage(pdev, FATAL, "##lm_tcp_erase_connection(0x%x): Rx aborted\n",tcp->cid);
}
mm_release_tcp_lock(pdev, rx_con);
}
mm_tcp_del_tcp_state(pdev,tcp);
return status;
}
void lm_tcp_flush_db(
struct _lm_device_t * pdev,
lm_tcp_state_t *tcp)
{
struct toe_tx_doorbell dq_flush_msg;
lm_tcp_con_t *rx_con, *tx_con;
MM_INIT_TCP_LOCK_HANDLE();
DbgBreakIf(!(pdev && tcp));
if (tcp->ulp_type != TOE_CONNECTION_TYPE) {
DbgMessage(pdev, WARNl4sp, "##lm_tcp_flush_db is not sent for connection(0x%x) of type %d\n",tcp->cid, tcp->ulp_type);
return;
}
DbgMessage(pdev, INFORMl4sp, "##lm_tcp_flush_db (cid=0x%x)\n",tcp->cid);
rx_con = tcp->rx_con;
tx_con = tcp->tx_con;
dq_flush_msg.hdr.data = (TOE_CONNECTION_TYPE << DOORBELL_HDR_T_CONN_TYPE_SHIFT);
dq_flush_msg.params = TOE_TX_DOORBELL_FLUSH;
dq_flush_msg.nbytes = 0;
mm_acquire_tcp_lock(pdev, tx_con);
tx_con->flags |= TCP_DB_BLOCKED;
mm_release_tcp_lock(pdev, tx_con);
mm_acquire_tcp_lock(pdev, rx_con);
rx_con->flags |= TCP_DB_BLOCKED;
mm_release_tcp_lock(pdev, rx_con);
DOORBELL(pdev, tcp->cid, *((u32_t *)&dq_flush_msg));
}
static lm_status_t lm_tcp_alloc_resc(lm_device_t *pdev)
{
lm_toe_info_t *toe_info;
lm_bd_chain_t *bd_chain;
u32_t mem_size;
long i;
u8_t mm_cli_idx = 0;
DbgMessage(pdev, VERBOSEl4sp, "##lm_tcp_alloc_resc\n");
_fake_func_verify_defines();
mm_cli_idx = LM_RESOURCE_NDIS;
toe_info = &pdev->toe_info;
LM_TOE_FOREACH_TSS_IDX(pdev, i)
{
bd_chain = &toe_info->scqs[i].bd_chain;
mem_size = pdev->params.l4_scq_page_cnt * LM_PAGE_SIZE;
bd_chain->bd_chain_virt = mm_alloc_phys_mem(pdev, mem_size, &bd_chain->bd_chain_phy, 0, mm_cli_idx);
if (!bd_chain->bd_chain_virt) {
DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
return LM_STATUS_RESOURCE;
}
mm_memset(bd_chain->bd_chain_virt, 0, mem_size);
}
LM_TOE_FOREACH_RSS_IDX(pdev, i)
{
bd_chain = &toe_info->rcqs[i].bd_chain;
mem_size = pdev->params.l4_rcq_page_cnt * LM_PAGE_SIZE;
bd_chain->bd_chain_virt = mm_alloc_phys_mem(pdev, mem_size, &bd_chain->bd_chain_phy, 0, mm_cli_idx);
if (!bd_chain->bd_chain_virt) {
DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
return LM_STATUS_RESOURCE;
}
mm_memset(bd_chain->bd_chain_virt, 0, mem_size);
bd_chain = &toe_info->grqs[i].bd_chain;
mem_size = pdev->params.l4_grq_page_cnt * LM_PAGE_SIZE;
bd_chain->bd_chain_virt = mm_alloc_phys_mem(pdev, mem_size, &bd_chain->bd_chain_phy, 0, mm_cli_idx);
if (!bd_chain->bd_chain_virt) {
DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
return LM_STATUS_RESOURCE;
}
mm_memset(bd_chain->bd_chain_virt, 0, mem_size);
DbgBreakIf(toe_info->grqs[i].isles_pool);
if (!pdev->params.l4_isles_pool_size) {
pdev->params.l4_isles_pool_size = 2 * T_TCP_ISLE_ARRAY_SIZE;
} else if (pdev->params.l4_isles_pool_size < T_TCP_ISLE_ARRAY_SIZE) {
pdev->params.l4_isles_pool_size = T_TCP_ISLE_ARRAY_SIZE;
}
mem_size = pdev->params.l4_isles_pool_size * sizeof(lm_isle_t);
toe_info->grqs[i].isles_pool = (lm_isle_t*)mm_alloc_mem(pdev, mem_size, mm_cli_idx);
if (!toe_info->grqs[i].isles_pool) {
DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
return LM_STATUS_RESOURCE;
}
mm_memset(toe_info->grqs[i].isles_pool, 0, mem_size);
}
if (pdev->params.l4_data_integrity) {
u32_t pb_idx;
pdev->toe_info.integrity_info.pattern_size = 256;
pdev->toe_info.integrity_info.pattern_buf_size = 0x10000 + pdev->toe_info.integrity_info.pattern_size;
pdev->toe_info.integrity_info.pattern_buf = mm_alloc_mem(pdev, pdev->toe_info.integrity_info.pattern_buf_size, mm_cli_idx);
if (!pdev->toe_info.integrity_info.pattern_buf) {
DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
return LM_STATUS_RESOURCE;
}
for (pb_idx = 0; pb_idx < pdev->toe_info.integrity_info.pattern_buf_size; pb_idx++) {
pdev->toe_info.integrity_info.pattern_buf[pb_idx] = pb_idx % pdev->toe_info.integrity_info.pattern_size;
}
}
pdev->toe_info.rss_update_data = (struct toe_rss_update_ramrod_data *)
mm_alloc_phys_mem(pdev, sizeof(*pdev->toe_info.rss_update_data),
&pdev->toe_info.rss_update_data_phys,
0,0);
if (pdev->toe_info.rss_update_data == NULL)
{
DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
return LM_STATUS_RESOURCE;
}
return LM_STATUS_SUCCESS;
}
static void _lm_get_default_l4cli_params(lm_device_t *pdev, l4_ofld_params_t *l4_params)
{
lm_params_t *def_params = &pdev->params;
DbgBreakIf(def_params->l4cli_ack_frequency > 0xff);
l4_params->ack_frequency = def_params->l4cli_ack_frequency & 0xff;
DbgBreakIf(def_params->l4cli_delayed_ack_ticks > 0xff);
l4_params->delayed_ack_ticks = def_params->l4cli_delayed_ack_ticks & 0xff;
DbgBreakIf(def_params->l4cli_doubt_reachability_retx > 0xff);
l4_params->doubt_reachability_retx = def_params->l4cli_doubt_reachability_retx & 0xff;
l4_params->dup_ack_threshold = def_params->l4cli_dup_ack_threshold;
DbgBreakIf((def_params->l4cli_flags != 0) &&
(def_params->l4cli_flags != OFLD_PARAM_FLAG_SNAP_ENCAP));
l4_params->flags = def_params->l4cli_flags;
DbgBreakIf(def_params->l4cli_max_retx > 0xff);
l4_params->max_retx = def_params->l4cli_max_retx & 0xff;
l4_params->nce_stale_ticks = def_params->l4cli_nce_stale_ticks;
l4_params->push_ticks = def_params->l4cli_push_ticks;
DbgBreakIf(def_params->l4cli_starting_ip_id > 0xffff);
l4_params->starting_ip_id = def_params->l4cli_starting_ip_id & 0xffff;
l4_params->sws_prevention_ticks = def_params->l4cli_sws_prevention_ticks;
l4_params->ticks_per_second = def_params->l4cli_ticks_per_second;
}
u8_t lm_tcp_rx_fill_grq(struct _lm_device_t * pdev, u8_t sb_idx, d_list_t * bypass_gen_pool_list, u8_t filling_mode)
{
lm_toe_info_t * toe_info;
lm_tcp_grq_t * grq;
struct toe_rx_grq_bd * grq_bd;
lm_tcp_gen_buf_t * curr_gen_buf;
lm_bd_chain_t * bd_chain;
d_list_t tmp_gen_buf_list;
d_list_t free_gen_buf_list;
u16_t num_bufs;
u16_t num_bufs_threshold;
u32_t num_bypass_buffs;
u32_t avg_dpc_cnt;
toe_info = &pdev->toe_info;
grq = &toe_info->grqs[sb_idx];
bd_chain = &grq->bd_chain;
num_bufs = bd_chain->bd_left;
DbgMessage(pdev, VERBOSEl4rx, "###lm_tcp_rx_fill_grq bd_left (to be filled)= %d\n", bd_chain->bd_left);
if (!pdev->params.l4_grq_filling_threshold_divider) {
num_bufs_threshold = 1;
} else {
if (pdev->params.l4_grq_filling_threshold_divider < 2) {
pdev->params.l4_grq_filling_threshold_divider = 2;
}
num_bufs_threshold = bd_chain->capacity / pdev->params.l4_grq_filling_threshold_divider;
}
d_list_init(&tmp_gen_buf_list, NULL, NULL, 0);
d_list_init(&free_gen_buf_list, NULL, NULL, 0);
if (bypass_gen_pool_list != NULL) {
num_bypass_buffs = d_list_entry_cnt(bypass_gen_pool_list);
} else {
num_bypass_buffs = 0;
}
if (filling_mode == FILL_GRQ_MIN_CASHED_BDS) {
u16_t bufs_in_chain = bd_chain->capacity - num_bufs;
if (bufs_in_chain >= USTORM_TOE_GRQ_CACHE_NUM_BDS) {
return 0;
} else {
num_bufs = USTORM_TOE_GRQ_CACHE_NUM_BDS - bufs_in_chain;
}
} else if (filling_mode == FILL_GRQ_LOW_THRESHOLD) {
u16_t bufs_in_chain = bd_chain->capacity - num_bufs;
DbgBreakIf(grq->low_bds_threshold < USTORM_TOE_GRQ_CACHE_NUM_BDS);
if (grq->low_bds_threshold < USTORM_TOE_GRQ_CACHE_NUM_BDS) {
grq->low_bds_threshold = 3*GRQ_XOFF_TH;
}
if (bufs_in_chain >= grq->low_bds_threshold) {
return 0;
} else {
num_bufs = grq->low_bds_threshold - bufs_in_chain;
}
} else {
if (grq->high_bds_threshold) {
u16_t bufs_in_chain = bd_chain->capacity - num_bufs;
if (bufs_in_chain >= grq->high_bds_threshold) {
return 0;
} else {
num_bufs = grq->high_bds_threshold - bufs_in_chain;
}
}
if (num_bufs < num_bufs_threshold) {
if (num_bufs > num_bypass_buffs) {
num_bufs = (u16_t)num_bypass_buffs;
grq->gen_bufs_compensated_from_bypass_only += num_bypass_buffs;
}
if (!num_bufs) {
return 0;
}
}
}
if (num_bypass_buffs < num_bufs) {
u16_t num_required_buffs = num_bufs - num_bypass_buffs;
mm_tcp_get_gen_bufs(pdev, &tmp_gen_buf_list, num_required_buffs, sb_idx);
}
while ((d_list_entry_cnt(&tmp_gen_buf_list) < num_bufs) && num_bypass_buffs) {
lm_tcp_gen_buf_t * tmp_buf = NULL;
d_list_entry_t * curr_entry = d_list_pop_head(bypass_gen_pool_list);
tmp_buf = (lm_tcp_gen_buf_t *)curr_entry;
DbgBreakIf(!curr_entry);
if (tmp_buf->flags & GEN_FLAG_FREE_WHEN_DONE)
{
d_list_push_head(&free_gen_buf_list, curr_entry);
}
else
{
d_list_push_head(&tmp_gen_buf_list, curr_entry);
}
num_bypass_buffs--;
}
num_bufs = (u16_t)d_list_entry_cnt(&tmp_gen_buf_list);
if ((bypass_gen_pool_list != NULL) && d_list_entry_cnt(&free_gen_buf_list))
{
d_list_add_tail(bypass_gen_pool_list, &free_gen_buf_list);
}
grq->num_grqs_last_dpc = num_bufs;
if (grq->num_grqs_last_dpc) {
if (grq->num_grqs_last_dpc > grq->max_grqs_per_dpc) {
grq->max_grqs_per_dpc = grq->num_grqs_last_dpc;
}
if ((grq->sum_grqs_last_x_dpcs + grq->num_grqs_last_dpc) < grq->sum_grqs_last_x_dpcs) {
grq->avg_dpc_cnt = 0;
grq->sum_grqs_last_x_dpcs = 0;
}
grq->sum_grqs_last_x_dpcs += grq->num_grqs_last_dpc;
grq->avg_dpc_cnt++;
avg_dpc_cnt = grq->avg_dpc_cnt;
if (avg_dpc_cnt) {
grq->avg_grqs_per_dpc = grq->sum_grqs_last_x_dpcs / avg_dpc_cnt;
} else {
grq->sum_grqs_last_x_dpcs = 0;
}
}
DbgBreakIf(num_bufs != tmp_gen_buf_list.cnt);
if (num_bufs < bd_chain->bd_left) {
grq->num_deficient++;
}
if (!num_bufs) {
DbgMessage(pdev, WARNl4rx, "no buffers returned from generic pool\n");
return 0;
}
curr_gen_buf = (lm_tcp_gen_buf_t *)d_list_peek_head(&tmp_gen_buf_list);
if (filling_mode == FILL_GRQ_LOW_THRESHOLD) {
grq->gen_bufs_compensated_till_low_threshold += num_bufs;
}
while (num_bufs--) {
DbgBreakIf(SIG(curr_gen_buf->buf_virt) != L4GEN_BUFFER_SIG);
DbgMessage(pdev, VERBOSEl4rx, "curr_gen_buf->buf_virt=0x%p, END_SIG=0x%x\n", curr_gen_buf->buf_virt,
END_SIG(curr_gen_buf->buf_virt, LM_TCP_GEN_BUF_SIZE(pdev)));
DbgBreakIf(END_SIG(curr_gen_buf->buf_virt, LM_TCP_GEN_BUF_SIZE(pdev)) != L4GEN_BUFFER_SIG_END);
curr_gen_buf->ind_bytes = 0;
curr_gen_buf->ind_nbufs = 0;
curr_gen_buf->placed_bytes = 0;
curr_gen_buf->refcnt = 0;
curr_gen_buf->tcp = NULL;
grq_bd = (struct toe_rx_grq_bd *)lm_toe_bd_chain_produce_bd(bd_chain);
DbgBreakIf(!grq_bd);
DbgBreakIf(!curr_gen_buf || !curr_gen_buf->buf_phys.as_u64);
grq_bd->addr_hi = curr_gen_buf->buf_phys.as_u32.high;
grq_bd->addr_lo = curr_gen_buf->buf_phys.as_u32.low;
curr_gen_buf = (lm_tcp_gen_buf_t *)d_list_next_entry(&curr_gen_buf->link);
}
if (bd_chain->bd_left) {
DbgMessage(pdev, INFORMl4rx, "GRQ bd-chain wasn't filled completely\n");
}
if (d_list_entry_cnt(&tmp_gen_buf_list))
{
d_list_add_tail(&grq->active_gen_list, &tmp_gen_buf_list);
}
return (tmp_gen_buf_list.cnt != 0);
}
lm_status_t lm_tcp_init_resc(struct _lm_device_t *pdev, u8_t b_is_init )
{
lm_toe_info_t *toe_info;
lm_bd_chain_t *bd_chain;
long i;
u16_t volatile * sb_indexes;
u32_t sb_id;
DbgMessage(pdev, VERBOSEl4sp, "##lm_tcp_init_resc\n");
toe_info = &pdev->toe_info;
toe_info->state = LM_TOE_STATE_INIT;
toe_info->rss_update_cnt = 0;
toe_info->gen_buf_size = lm_tcp_calc_gen_buf_size(pdev);
LM_TCP_SET_UPDATE_WINDOW_MODE(pdev, LM_TOE_UPDATE_MODE_SHORT_LOOP);
if( b_is_init )
{
d_list_init(&toe_info->state_blk.neigh_list, NULL, NULL, 0);
d_list_init(&toe_info->state_blk.path_list, NULL, NULL, 0);
d_list_init(&toe_info->state_blk.tcp_list, NULL, NULL, 0);
}
pdev->ofld_info.state_blks[STATE_BLOCK_TOE] = &toe_info->state_blk;
LM_TOE_FOREACH_TSS_IDX(pdev, i)
{
lm_tcp_scq_t *scq = &toe_info->scqs[i];
bd_chain = &scq->bd_chain;
lm_bd_chain_setup(pdev, bd_chain, bd_chain->bd_chain_virt,
bd_chain->bd_chain_phy, (u16_t)pdev->params.l4_scq_page_cnt, sizeof(struct toe_tx_cqe), 1, TRUE);
sb_id = RSS_ID_TO_SB_ID(i);
#ifdef _VBD_
if (!CHIP_IS_E1x(pdev) && (pdev->params.l4_enable_rss == L4_RSS_DISABLED))
{
sb_id = LM_NON_RSS_SB(pdev);
}
#endif
sb_indexes = lm_get_sb_indexes(pdev, (u8_t)sb_id);
sb_indexes[HC_INDEX_TOE_TX_CQ_CONS] = 0;
scq->hw_con_idx_ptr = sb_indexes + HC_INDEX_TOE_TX_CQ_CONS;
scq->hc_sb_info.hc_sb = STATUS_BLOCK_NORMAL_TYPE;
scq->hc_sb_info.hc_index_value = HC_INDEX_TOE_TX_CQ_CONS;
}
if ( !b_is_init ) {
lm_tcp_clear_grqs(pdev);
}
LM_TOE_FOREACH_RSS_IDX(pdev, i)
{
lm_tcp_rcq_t *rcq = &toe_info->rcqs[i];
lm_tcp_grq_t *grq = &toe_info->grqs[i];
u8_t byte_counter_id;
sb_id = RSS_ID_TO_SB_ID(i);
#ifdef _VBD_
if (!CHIP_IS_E1x(pdev) && (pdev->params.l4_enable_rss == L4_RSS_DISABLED))
{
sb_id = LM_NON_RSS_SB(pdev);
}
#endif
byte_counter_id = CHIP_IS_E1x(pdev)? LM_FW_SB_ID(pdev, sb_id) : LM_FW_DHC_QZONE_ID(pdev, sb_id);
bd_chain = &rcq->bd_chain;
lm_bd_chain_setup(pdev, bd_chain, bd_chain->bd_chain_virt,
bd_chain->bd_chain_phy, (u16_t)pdev->params.l4_rcq_page_cnt, sizeof(struct toe_rx_cqe), 1, TRUE);
rcq->rss_update_pending = 0;
rcq->suspend_processing = FALSE;
rcq->update_cid = 0;
sb_indexes = lm_get_sb_indexes(pdev, (u8_t)sb_id);
sb_indexes[HC_INDEX_TOE_RX_CQ_CONS] = 0;
rcq->hw_con_idx_ptr = sb_indexes + HC_INDEX_TOE_RX_CQ_CONS;
rcq->hc_sb_info.hc_sb = STATUS_BLOCK_NORMAL_SL_TYPE;
rcq->hc_sb_info.hc_index_value = HC_INDEX_TOE_RX_CQ_CONS;
if (IS_PFDEV(pdev))
{
rcq->hc_sb_info.iro_dhc_offset = CSTORM_BYTE_COUNTER_OFFSET(byte_counter_id, HC_INDEX_TOE_RX_CQ_CONS);
}
else
{
DbgMessage(pdev, FATAL, "Dhc not implemented for VF yet\n");
}
if( b_is_init )
{
d_list_init(&grq->active_gen_list, NULL, NULL, 0);
d_list_init(&grq->aux_gen_list, NULL, NULL, 0);
if ((u8_t)i != LM_TOE_BASE_RSS_ID(pdev) ) {
grq->grq_compensate_on_alloc = TRUE;
pdev->toe_info.grqs[i].high_bds_threshold = 3*GRQ_XOFF_TH + 1;
} else {
grq->grq_compensate_on_alloc = FALSE;
pdev->toe_info.grqs[i].high_bds_threshold = 0;
}
grq->low_bds_threshold = 3*GRQ_XOFF_TH;
}
bd_chain = &grq->bd_chain;
lm_bd_chain_setup(pdev, bd_chain, bd_chain->bd_chain_virt,
bd_chain->bd_chain_phy, (u16_t)pdev->params.l4_grq_page_cnt, sizeof(struct toe_rx_grq_bd), 0, TRUE);
lm_tcp_rx_fill_grq(pdev, (u8_t)i, NULL, FILL_GRQ_MIN_CASHED_BDS);
}
LM_TOE_FOREACH_RSS_IDX(pdev, i)
{
lm_tcp_rx_fill_grq(pdev, (u8_t)i, NULL, FILL_GRQ_FULL);
}
return LM_STATUS_SUCCESS;
}
static void _lm_tcp_init_cstorm_intmem(lm_device_t *pdev)
{
lm_toe_info_t *toe_info;
lm_address_t phys_addr;
lm_tcp_scq_t *scq;
u16_t idx;
u8_t drv_toe_rss_id;
u8_t port;
u8_t fw_sb_id;
toe_info = &pdev->toe_info;
port = PORT_ID(pdev);
LM_TOE_FOREACH_TSS_IDX(pdev, drv_toe_rss_id)
{
scq = &toe_info->scqs[drv_toe_rss_id];
phys_addr = lm_bd_chain_phys_addr(&scq->bd_chain, 0);
DbgBreakIf(CSTORM_TOE_CQ_CONS_PTR_LO_SIZE != 4);
LM_INTMEM_WRITE32(pdev, CSTORM_TOE_CQ_CONS_PTR_LO_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), phys_addr.as_u32.low, BAR_CSTRORM_INTMEM);
DbgBreakIf (CSTORM_TOE_CQ_CONS_PTR_HI_SIZE != 4);
LM_INTMEM_WRITE32(pdev, CSTORM_TOE_CQ_CONS_PTR_HI_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), phys_addr.as_u32.high, BAR_CSTRORM_INTMEM);
idx = lm_bd_chain_prod_idx(&scq->bd_chain);
DbgBreakIf(CSTORM_TOE_CQ_PROD_SIZE != 2);
LM_INTMEM_WRITE16(pdev, CSTORM_TOE_CQ_PROD_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), idx, BAR_CSTRORM_INTMEM);
idx = lm_bd_chain_cons_idx(&scq->bd_chain);
DbgBreakIf(idx != 0);
DbgBreakIf(CSTORM_TOE_CQ_CONS_SIZE != 2);
LM_INTMEM_WRITE16(pdev, CSTORM_TOE_CQ_CONS_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), idx, BAR_CSTRORM_INTMEM);
phys_addr = lm_bd_chain_phys_addr(&scq->bd_chain, 1);
DbgBreakIf(CSTORM_TOE_CQ_NEXT_PAGE_BASE_ADDR_LO_SIZE != 4);
LM_INTMEM_WRITE32(pdev, CSTORM_TOE_CQ_NEXT_PAGE_BASE_ADDR_LO_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), phys_addr.as_u32.low, BAR_CSTRORM_INTMEM);
DbgBreakIf(CSTORM_TOE_CQ_NEXT_PAGE_BASE_ADDR_HI_SIZE != 4);
LM_INTMEM_WRITE32(pdev, CSTORM_TOE_CQ_NEXT_PAGE_BASE_ADDR_HI_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), phys_addr.as_u32.high, BAR_CSTRORM_INTMEM);
DbgBreakIf(CSTORM_TOE_CQ_NXT_PAGE_ADDR_VALID_SIZE != 1);
LM_INTMEM_WRITE8(pdev, CSTORM_TOE_CQ_NXT_PAGE_ADDR_VALID_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), 1, BAR_CSTRORM_INTMEM);
fw_sb_id = LM_FW_SB_ID(pdev, RSS_ID_TO_SB_ID(drv_toe_rss_id));
#ifdef _VBD_
if (!CHIP_IS_E1x(pdev) && (pdev->params.l4_enable_rss == L4_RSS_DISABLED))
{
fw_sb_id = LM_FW_SB_ID(pdev, RSS_ID_TO_SB_ID(LM_NON_RSS_SB(pdev)));
if (drv_toe_rss_id != LM_NON_RSS_CHAIN(pdev))
{
DbgBreak();
}
}
#endif
LM_INTMEM_WRITE8(pdev, CSTORM_TOE_STATUS_BLOCK_ID_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), fw_sb_id, BAR_CSTRORM_INTMEM);
LM_INTMEM_WRITE8(pdev, CSTORM_TOE_STATUS_BLOCK_INDEX_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), HC_INDEX_TOE_TX_CQ_CONS, BAR_CSTRORM_INTMEM);
}
}
static void _lm_set_ofld_params_ustorm_toe(lm_device_t *pdev, l4_ofld_params_t *l4_params)
{
u8_t func;
u32_t val32;
func = FUNC_ID(pdev);
val32 = lm_time_resolution(pdev, l4_params->push_ticks, 1000, 1000);
DbgBreakIf (USTORM_TOE_TCP_PUSH_TIMER_TICKS_SIZE != 4);
LM_INTMEM_WRITE32(pdev, USTORM_TOE_TCP_PUSH_TIMER_TICKS_OFFSET(func), val32, BAR_USTRORM_INTMEM);
}
static void _lm_tcp_init_ustorm_intmem(lm_device_t *pdev)
{
lm_toe_info_t *toe_info;
lm_address_t phys_addr;
lm_tcp_rcq_t *rcq;
lm_tcp_grq_t *grq;
struct toe_rx_grq_bd *grq_bd;
u16_t idx;
u8_t drv_toe_rss_id, grq_bd_idx;
u8_t port;
u8_t fw_sb_id;
u8_t sw_sb_id;
toe_info = &pdev->toe_info;
port = PORT_ID(pdev);
_lm_set_ofld_params_ustorm_toe(pdev, &(pdev->ofld_info.l4_params));
LM_TOE_FOREACH_RSS_IDX(pdev,drv_toe_rss_id)
{
rcq = &toe_info->rcqs[drv_toe_rss_id];
grq = &toe_info->grqs[drv_toe_rss_id];
grq_bd = (struct toe_rx_grq_bd *)grq->bd_chain.bd_chain_virt;
DbgBreakIf( USTORM_TOE_GRQ_CACHE_NUM_BDS > lm_bd_chain_usable_bds_per_page(&grq->bd_chain));
for(grq_bd_idx = 0; grq_bd_idx < USTORM_TOE_GRQ_CACHE_NUM_BDS; grq_bd_idx++) {
LM_INTMEM_WRITE32(pdev, USTORM_GRQ_CACHE_BD_LO_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id) ,port,grq_bd_idx), grq_bd->addr_lo, BAR_USTRORM_INTMEM);
LM_INTMEM_WRITE32(pdev, USTORM_GRQ_CACHE_BD_HI_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id),port,grq_bd_idx), grq_bd->addr_hi, BAR_USTRORM_INTMEM);
grq_bd++;
}
DbgBreakIf (USTORM_TOE_GRQ_LOCAL_PROD_SIZE != 1);
LM_INTMEM_WRITE8(pdev, USTORM_TOE_GRQ_LOCAL_PROD_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), (u8_t)USTORM_TOE_GRQ_CACHE_NUM_BDS, BAR_USTRORM_INTMEM);
DbgBreakIf (USTORM_TOE_GRQ_LOCAL_CONS_SIZE != 1);
LM_INTMEM_WRITE8(pdev, USTORM_TOE_GRQ_LOCAL_CONS_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), 0, BAR_USTRORM_INTMEM);
idx = lm_bd_chain_prod_idx(&grq->bd_chain);
DbgBreakIf (USTORM_TOE_GRQ_PROD_SIZE != 2);
LM_INTMEM_WRITE16(pdev, USTORM_TOE_GRQ_PROD_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), idx, BAR_USTRORM_INTMEM);
DbgBreakIf (USTORM_TOE_GRQ_CONS_SIZE != 2);
LM_INTMEM_WRITE16(pdev, USTORM_TOE_GRQ_CONS_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), (u8_t)USTORM_TOE_GRQ_CACHE_NUM_BDS, BAR_USTRORM_INTMEM);
phys_addr = lm_bd_chain_phys_addr(&grq->bd_chain, 0);
LM_INC64(&phys_addr, sizeof(struct toe_rx_grq_bd) * USTORM_TOE_GRQ_CACHE_NUM_BDS);
DbgBreakIf (USTORM_TOE_GRQ_CONS_PTR_LO_SIZE != 4);
LM_INTMEM_WRITE32(pdev, USTORM_TOE_GRQ_CONS_PTR_LO_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), phys_addr.as_u32.low, BAR_USTRORM_INTMEM);
DbgBreakIf (USTORM_TOE_GRQ_CONS_PTR_HI_SIZE != 4);
LM_INTMEM_WRITE32(pdev, USTORM_TOE_GRQ_CONS_PTR_HI_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), phys_addr.as_u32.high, BAR_USTRORM_INTMEM);
DbgBreakIf (USTORM_TOE_GRQ_BUF_SIZE_SIZE != 2);
DbgBreakIf(LM_TCP_GEN_BUF_SIZE(pdev) > 0xffff);
LM_INTMEM_WRITE16(pdev, USTORM_TOE_GRQ_BUF_SIZE_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), (u16_t)LM_TCP_GEN_BUF_SIZE(pdev), BAR_USTRORM_INTMEM);
phys_addr = lm_bd_chain_phys_addr(&rcq->bd_chain, 0);
DbgBreakIf (USTORM_TOE_CQ_CONS_PTR_LO_SIZE != 4);
LM_INTMEM_WRITE32(pdev, USTORM_TOE_CQ_CONS_PTR_LO_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), phys_addr.as_u32.low, BAR_USTRORM_INTMEM);
DbgBreakIf (USTORM_TOE_CQ_CONS_PTR_HI_SIZE != 4);
LM_INTMEM_WRITE32(pdev, USTORM_TOE_CQ_CONS_PTR_HI_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), phys_addr.as_u32.high, BAR_USTRORM_INTMEM);
phys_addr = lm_bd_chain_phys_addr(&rcq->bd_chain, 1);
DbgBreakIf (USTORM_TOE_CQ_NEXT_PAGE_BASE_ADDR_LO_SIZE != 4);
LM_INTMEM_WRITE32(pdev, USTORM_TOE_CQ_NEXT_PAGE_BASE_ADDR_LO_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), phys_addr.as_u32.low, BAR_USTRORM_INTMEM);
DbgBreakIf (USTORM_TOE_CQ_NEXT_PAGE_BASE_ADDR_HI_SIZE != 4);
LM_INTMEM_WRITE32(pdev, USTORM_TOE_CQ_NEXT_PAGE_BASE_ADDR_HI_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), phys_addr.as_u32.high, BAR_USTRORM_INTMEM);
DbgBreakIf (USTORM_TOE_CQ_NXT_PAGE_ADDR_VALID_SIZE != 1);
LM_INTMEM_WRITE8(pdev, USTORM_TOE_CQ_NXT_PAGE_ADDR_VALID_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), 1, BAR_USTRORM_INTMEM);
idx = lm_bd_chain_prod_idx(&rcq->bd_chain);
DbgBreakIf (USTORM_TOE_CQ_PROD_SIZE != 2);
LM_INTMEM_WRITE16(pdev, USTORM_TOE_CQ_PROD_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), idx, BAR_USTRORM_INTMEM);
if (pdev->params.enable_dynamic_hc[HC_INDEX_TOE_RX_CQ_CONS]) {
u32_t l4_quasi_byte_counter;
u16_t prod_idx_diff = lm_bd_chain_prod_idx(&rcq->bd_chain) - rcq->bd_chain.bds_per_page * rcq->bd_chain.page_cnt;
l4_quasi_byte_counter = prod_idx_diff;
l4_quasi_byte_counter <<= 16;
LM_INTMEM_WRITE32(pdev, rcq->hc_sb_info.iro_dhc_offset, l4_quasi_byte_counter, BAR_CSTRORM_INTMEM);
}
idx = lm_bd_chain_cons_idx(&rcq->bd_chain);
DbgBreakIf(idx != 0);
DbgBreakIf (USTORM_TOE_CQ_CONS_SIZE != 2);
LM_INTMEM_WRITE16(pdev, USTORM_TOE_CQ_CONS_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), idx, BAR_USTRORM_INTMEM);
fw_sb_id = LM_FW_SB_ID(pdev, RSS_ID_TO_SB_ID(drv_toe_rss_id));
sw_sb_id = RSS_ID_TO_SB_ID(drv_toe_rss_id);
if (RSS_ID_TO_SB_ID(drv_toe_rss_id) >= MAX_NDSB) {
DbgBreak();
break;
}
#ifdef _VBD_
if (!CHIP_IS_E1x(pdev) && (pdev->params.l4_enable_rss == L4_RSS_DISABLED))
{
fw_sb_id = LM_FW_SB_ID(pdev, RSS_ID_TO_SB_ID(LM_NON_RSS_SB(pdev)));
sw_sb_id = LM_NON_RSS_SB(pdev);
if (drv_toe_rss_id != LM_NON_RSS_CHAIN(pdev))
{
DbgBreak();
}
}
#endif
if (CHIP_IS_E1x(pdev)) {
if (pdev->params.enable_dynamic_hc[HC_INDEX_TOE_RX_CQ_CONS]) {
pdev->vars.status_blocks_arr[RSS_ID_TO_SB_ID(drv_toe_rss_id)].hc_status_block_data.e1x_sb_data.index_data[HC_INDEX_TOE_RX_CQ_CONS].flags |= HC_INDEX_DATA_DYNAMIC_HC_ENABLED;
} else {
pdev->vars.status_blocks_arr[RSS_ID_TO_SB_ID(drv_toe_rss_id)].hc_status_block_data.e1x_sb_data.index_data[HC_INDEX_TOE_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_DYNAMIC_HC_ENABLED;
}
LM_INTMEM_WRITE8(PFDEV(pdev), CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id)
+ OFFSETOF(struct hc_status_block_data_e1x, index_data)
+ sizeof(struct hc_index_data)*HC_INDEX_TOE_RX_CQ_CONS
+ OFFSETOF(struct hc_index_data,flags),
pdev->vars.status_blocks_arr[RSS_ID_TO_SB_ID(drv_toe_rss_id)].hc_status_block_data.e1x_sb_data.index_data[HC_INDEX_ETH_RX_CQ_CONS].flags, BAR_CSTRORM_INTMEM);
} else {
if (pdev->params.enable_dynamic_hc[HC_INDEX_TOE_RX_CQ_CONS]) {
pdev->vars.status_blocks_arr[sw_sb_id].hc_status_block_data.e2_sb_data.index_data[HC_INDEX_TOE_RX_CQ_CONS].flags |= HC_INDEX_DATA_DYNAMIC_HC_ENABLED;
} else {
pdev->vars.status_blocks_arr[sw_sb_id].hc_status_block_data.e2_sb_data.index_data[HC_INDEX_TOE_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_DYNAMIC_HC_ENABLED;
}
LM_INTMEM_WRITE8(PFDEV(pdev), CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id)
+ OFFSETOF(struct hc_status_block_data_e2, index_data)
+ sizeof(struct hc_index_data)*HC_INDEX_TOE_RX_CQ_CONS
+ OFFSETOF(struct hc_index_data,flags),
pdev->vars.status_blocks_arr[sw_sb_id].hc_status_block_data.e2_sb_data.index_data[HC_INDEX_ETH_RX_CQ_CONS].flags, BAR_CSTRORM_INTMEM);
}
LM_INTMEM_WRITE8(pdev, USTORM_TOE_STATUS_BLOCK_ID_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port),fw_sb_id, BAR_USTRORM_INTMEM);
LM_INTMEM_WRITE8(pdev, USTORM_TOE_STATUS_BLOCK_INDEX_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), HC_INDEX_TOE_RX_CQ_CONS, BAR_USTRORM_INTMEM);
}
DbgBreakIf (USTORM_INDIRECTION_TABLE_ENTRY_SIZE != 1);
if (pdev->params.l4_enable_rss == L4_RSS_DISABLED) {
LM_TOE_FOREACH_RSS_IDX(pdev, idx)
{
LM_INTMEM_WRITE8(pdev, USTORM_INDIRECTION_TABLE_OFFSET(port) + LM_TOE_FW_RSS_ID(pdev,idx), LM_TOE_FW_RSS_ID(pdev,(u8_t)idx), BAR_USTRORM_INTMEM);
}
} else {
for (idx = 0; idx < RSS_INDIRECTION_TABLE_SIZE; idx++) {
LM_INTMEM_WRITE8(pdev,USTORM_INDIRECTION_TABLE_OFFSET(port) + idx, pdev->toe_info.indirection_table[idx], BAR_USTRORM_INTMEM);
}
}
}
static void _lm_set_ofld_params_tstorm_common(lm_device_t *pdev, l4_ofld_params_t *l4_params)
{
u8_t func;
u32_t dup_ack_threshold;
func = FUNC_ID(pdev);
dup_ack_threshold = l4_params->dup_ack_threshold;
if(dup_ack_threshold > TCP_TSTORM_MAX_DUP_ACK_TH) {
DbgMessage(pdev, WARNl4sp,
"given dup_ack_threshold (=%d) too high. setting it to maximum allowed (=%d)\n",
dup_ack_threshold, TCP_TSTORM_MAX_DUP_ACK_TH);
dup_ack_threshold = TCP_TSTORM_MAX_DUP_ACK_TH;
}
DbgBreakIf (TSTORM_TCP_DUPLICATE_ACK_THRESHOLD_SIZE != 4);
LM_INTMEM_WRITE32(pdev, TSTORM_TCP_DUPLICATE_ACK_THRESHOLD_OFFSET(func), dup_ack_threshold, BAR_TSTRORM_INTMEM);
DbgBreakIf (TSTORM_TCP_MAX_CWND_SIZE != 4);
if(pdev->params.network_type == LM_NETOWRK_TYPE_WAN) {
LM_INTMEM_WRITE32(pdev, TSTORM_TCP_MAX_CWND_OFFSET(func), pdev->params.max_cwnd_wan, BAR_TSTRORM_INTMEM);
} else {
DbgBreakIf(pdev->params.network_type != LM_NETOWRK_TYPE_LAN);
LM_INTMEM_WRITE32(pdev, TSTORM_TCP_MAX_CWND_OFFSET(func), pdev->params.max_cwnd_lan, BAR_TSTRORM_INTMEM);
}
}
static void _lm_set_ofld_params_tstorm_toe(lm_device_t *pdev, l4_ofld_params_t *l4_params)
{
u8_t func;
func = FUNC_ID(pdev);
DbgBreakIf (TSTORM_TOE_MAX_SEG_RETRANSMIT_SIZE != 4);
LM_INTMEM_WRITE32(pdev, TSTORM_TOE_MAX_SEG_RETRANSMIT_OFFSET(func), l4_params->max_retx, BAR_TSTRORM_INTMEM);
DbgBreakIf (TSTORM_TOE_DOUBT_REACHABILITY_SIZE != 1);
LM_INTMEM_WRITE8(pdev, TSTORM_TOE_DOUBT_REACHABILITY_OFFSET(func), l4_params->doubt_reachability_retx, BAR_TSTRORM_INTMEM);
}
static void _lm_tcp_init_tstorm_intmem(lm_device_t *pdev)
{
_lm_set_ofld_params_tstorm_toe(pdev, &(pdev->ofld_info.l4_params));
DbgBreakIf (TSTORM_TOE_MAX_DOMINANCE_VALUE_SIZE != 1);
LM_INTMEM_WRITE8(pdev, TSTORM_TOE_MAX_DOMINANCE_VALUE_OFFSET, (u8_t)pdev->params.l4_max_dominance_value, BAR_TSTRORM_INTMEM);
DbgBreakIf (TSTORM_TOE_DOMINANCE_THRESHOLD_SIZE != 1);
LM_INTMEM_WRITE8(pdev, TSTORM_TOE_DOMINANCE_THRESHOLD_OFFSET, (u8_t)pdev->params.l4_dominance_threshold, BAR_TSTRORM_INTMEM);
}
static void _lm_set_ofld_params_xstorm_common(lm_device_t *pdev, l4_ofld_params_t *l4_params)
{
u8_t func, ack_frequency;
u32_t val32, max_reg, tmr_reg, delayed_ack_ticks;
func = FUNC_ID(pdev);
if (PORT_ID(pdev)) {
max_reg = XCM_REG_GLB_DEL_ACK_MAX_CNT_1;
tmr_reg = XCM_REG_GLB_DEL_ACK_TMR_VAL_1;
} else {
max_reg = XCM_REG_GLB_DEL_ACK_MAX_CNT_0;
tmr_reg = XCM_REG_GLB_DEL_ACK_TMR_VAL_0;
}
ack_frequency = l4_params->ack_frequency;
if(ack_frequency < TCP_XCM_MIN_GLB_DEL_ACK_MAX_CNT) {
DbgMessage(pdev, WARNl4sp,
"given ack_frequency (=%d) too low. setting it to minimum allowed (=%d)\n",
ack_frequency, TCP_XCM_DEFAULT_DEL_ACK_MAX_CNT);
ack_frequency = TCP_XCM_DEFAULT_DEL_ACK_MAX_CNT;
}
DbgBreakIf (XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_SIZE != 1);
LM_INTMEM_WRITE8(pdev, XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(func), ack_frequency, BAR_XSTRORM_INTMEM);
REG_WR(pdev, max_reg, ack_frequency);
delayed_ack_ticks = lm_time_resolution(pdev, l4_params->delayed_ack_ticks, 1000, TIMERS_TICKS_PER_SEC);
REG_WR(pdev, tmr_reg, delayed_ack_ticks);
val32 = lm_time_resolution(pdev, l4_params->sws_prevention_ticks, 1000 , TIMERS_TICKS_PER_SEC);
DbgBreakIf (XSTORM_TCP_TX_SWS_TIMER_VAL_SIZE != 4);
LM_INTMEM_WRITE32(pdev, XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(func), val32, BAR_XSTRORM_INTMEM);
DbgBreakIf (XSTORM_COMMON_RTC_RESOLUTION_SIZE != 2);
LM_INTMEM_WRITE16(pdev, XSTORM_COMMON_RTC_RESOLUTION_OFFSET, 1000 / l4_params->ticks_per_second , BAR_XSTRORM_INTMEM);
}
static void _lm_set_ofld_params_xstorm_toe(lm_device_t *pdev, l4_ofld_params_t *l4_params)
{
u8_t func;
func = FUNC_ID(pdev);
DbgBreakIf (XSTORM_TOE_LLC_SNAP_ENABLED_SIZE != 1);
if(l4_params->flags & OFLD_PARAM_FLAG_SNAP_ENCAP) {
LM_INTMEM_WRITE8(pdev, XSTORM_TOE_LLC_SNAP_ENABLED_OFFSET(func), 1, BAR_XSTRORM_INTMEM);
} else {
LM_INTMEM_WRITE8(pdev, XSTORM_TOE_LLC_SNAP_ENABLED_OFFSET(func), 0, BAR_XSTRORM_INTMEM);
}
}
static void _lm_tcp_init_xstorm_intmem(lm_device_t *pdev)
{
_lm_set_ofld_params_xstorm_toe(pdev, &(pdev->ofld_info.l4_params));
}
lm_status_t lm_tcp_init_chip_common(lm_device_t *pdev)
{
l4_ofld_params_t l4_params;
u8_t func;
DbgMessage(pdev, VERBOSEl4sp, "###lm_tcp_init_chip_common\n");
DbgBreakIf(!pdev);
func = FUNC_ID(pdev);
_lm_get_default_l4cli_params(pdev, &l4_params);
pdev->ofld_info.l4_params = l4_params;
_lm_set_ofld_params_xstorm_common(pdev, &l4_params);
_lm_set_ofld_params_tstorm_common(pdev, &l4_params);
DbgBreakIf (XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_SIZE != 1);
LM_INTMEM_WRITE8(pdev, XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(func), 1 , BAR_XSTRORM_INTMEM);
DbgBreakIf (XSTORM_TCP_IPID_SIZE != 2);
LM_INTMEM_WRITE16(pdev, XSTORM_TCP_IPID_OFFSET(func), TOE_XSTORM_IP_ID_INIT_HI, BAR_XSTRORM_INTMEM);
return LM_STATUS_SUCCESS;
}
lm_status_t lm_tcp_init_chip(lm_device_t *pdev)
{
DbgMessage(pdev, VERBOSEl4sp, "##lm_tcp_init_chip\n");
_lm_tcp_init_xstorm_intmem(pdev);
_lm_tcp_init_cstorm_intmem(pdev);
_lm_tcp_init_tstorm_intmem(pdev);
_lm_tcp_init_ustorm_intmem(pdev);
return LM_STATUS_SUCCESS;
}
lm_status_t lm_tcp_start_chip(lm_device_t *pdev)
{
lm_toe_info_t *toe_info;
u32_t to_cnt = 100000;
u64_t data;
struct toe_init_ramrod_data toe_init_data;
DbgMessage(pdev, VERBOSEl4sp, "##lm_tcp_start_chip\n");
toe_info = &pdev->toe_info;
DbgBreakIf(toe_info->state != LM_TOE_STATE_INIT);
toe_init_data.rss_num = LM_TOE_FW_RSS_ID(pdev,LM_TOE_BASE_RSS_ID(pdev));
data = *((u64_t*)(&toe_init_data));
lm_command_post(pdev, LM_SW_LEADING_RSS_CID(pdev), RAMROD_OPCODE_TOE_INIT, CMD_PRIORITY_NORMAL, TOE_CONNECTION_TYPE, data);
while (toe_info->state != LM_TOE_STATE_NORMAL && to_cnt) {
mm_wait(pdev,100);
to_cnt--;
}
if(toe_info->state != LM_TOE_STATE_NORMAL) {
#ifndef _VBD_CMD_
DbgMessage(pdev, FATAL, "TOE init ramrod did not complete\n");
#else
toe_info->state = LM_TOE_STATE_NORMAL;
lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_OPCODE_TOE_INIT, TOE_CONNECTION_TYPE, LM_SW_LEADING_RSS_CID(pdev));
#endif
#if defined(_VBD_)
DbgBreak();
#endif
}
lm_cid_recycled_cb_register(pdev, TOE_CONNECTION_TYPE, lm_tcp_recycle_cid_cb);
lm_sq_comp_cb_register(pdev, TOE_CONNECTION_TYPE, lm_tcp_comp_cb);
return LM_STATUS_SUCCESS;
}
lm_status_t lm_tcp_init(lm_device_t *pdev)
{
lm_toe_info_t *toe_info;
lm_status_t lm_status;
DbgMessage(pdev, VERBOSEl4sp, "###lm_tcp_init\n");
if (IS_VFDEV(pdev)) {
DbgMessage(pdev, FATAL, "###lm_tcp_init is not supported for VF\n");
return LM_STATUS_SUCCESS;
}
toe_info = &pdev->toe_info;
mm_memset(toe_info, 0 , sizeof(lm_toe_info_t));
toe_info->pdev = pdev;
lm_status = lm_tcp_alloc_resc(pdev);
DbgBreakIf((lm_status!=LM_STATUS_SUCCESS) && DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
if (lm_status != LM_STATUS_SUCCESS) {
return lm_status;
}
lm_status = lm_tcp_init_resc(pdev, TRUE);
DbgBreakIf(lm_status!=LM_STATUS_SUCCESS);
if (lm_status != LM_STATUS_SUCCESS) {
return lm_status;
}
lm_status = lm_tcp_init_chip(pdev);
DbgBreakIf(lm_status!=LM_STATUS_SUCCESS);
if (lm_status != LM_STATUS_SUCCESS) {
return lm_status;
}
lm_status = lm_tcp_start_chip(pdev);
DbgBreakIf(lm_status!=LM_STATUS_SUCCESS);
if (lm_status != LM_STATUS_SUCCESS) {
return lm_status;
}
return lm_status;
}
void lm_tcp_init_ramrod_comp(lm_device_t *pdev)
{
lm_toe_info_t *toe_info;
DbgMessage(pdev, VERBOSEl4sp, "##lm_tcp_init_ramrod_comp\n");
DbgBreakIf(!pdev);
toe_info = &pdev->toe_info;
DbgBreakIf(toe_info->state != LM_TOE_STATE_INIT);
toe_info->state = LM_TOE_STATE_NORMAL;
}
void lm_tcp_rss_update_ramrod_comp(
struct _lm_device_t *pdev,
lm_tcp_rcq_t *rcq,
u32_t cid,
u32_t update_stats_type,
u8_t update_suspend_rcq)
{
DbgBreakIf(rcq->suspend_processing == TRUE);
DbgMessage(pdev, INFORMl4sp, "lm_tcp_rss_update_ramrod_comp(): %d\n",update_stats_type);
switch (update_stats_type) {
case TOE_RSS_UPD_QUIET:
rcq->rss_update_stats_quiet++;
break;
case TOE_RSS_UPD_SLEEPING:
rcq->rss_update_stats_sleeping++;
break;
case TOE_RSS_UPD_DELAYED:
rcq->rss_update_stats_delayed++;
break;
default:
DbgBreak();
break;
}
rcq->update_cid = LM_SW_LEADING_RSS_CID(pdev);
if (update_suspend_rcq) {
lm_tcp_rss_update_suspend_rcq(pdev, rcq);
} else {
rcq->rss_update_processing_delayed++;
}
}
void lm_tcp_rss_update_suspend_rcq(
IN struct _lm_device_t * pdev,
IN lm_tcp_rcq_t * rcq)
{
void * cookie = NULL;
if (rcq->suspend_processing == FALSE)
{
mm_atomic_dec(&pdev->params.update_comp_cnt);
if (mm_atomic_dec(&pdev->params.update_toe_comp_cnt) == 0)
{
lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_OPCODE_TOE_RSS_UPDATE,
TOE_CONNECTION_TYPE, LM_TOE_FW_RSS_ID(pdev, LM_TOE_BASE_RSS_ID(pdev)));
}
}
rcq->suspend_processing = pdev->params.update_toe_comp_cnt ? TRUE : FALSE;
if (rcq->suspend_processing == FALSE)
{
DbgMessage(pdev, INFORMl4sp, "lm_tcp_rss_update_suspend_rcq(): calling lm_eth_update_ramrod_comp\n");
if (mm_atomic_dec(&pdev->params.update_suspend_cnt) == 0)
{
if (pdev->slowpath_info.set_rss_cookie)
{
cookie = (void *)pdev->slowpath_info.set_rss_cookie;
pdev->slowpath_info.set_rss_cookie = NULL;
mm_set_done(pdev, rcq->update_cid, cookie);
}
}
}
}
lm_status_t lm_tcp_init_neigh_state(
struct _lm_device_t *pdev,
lm_state_block_t *state_blk,
lm_neigh_state_t *neigh,
l4_neigh_const_state_t *neigh_const,
l4_neigh_cached_state_t *neigh_cached,
l4_neigh_delegated_state_t *neigh_delegated)
{
DbgMessage(pdev, VERBOSEl4sp, "###lm_tcp_init_neigh_state\n");
DbgBreakIf(!(pdev && state_blk && neigh && neigh_const && neigh_cached && neigh_delegated));
neigh->hdr.state_blk = state_blk;
neigh->hdr.state_id = STATE_ID_NEIGH;
neigh->hdr.status = STATE_STATUS_NORMAL;
d_list_push_tail(&state_blk->neigh_list, &neigh->hdr.link);
neigh->num_dependents = 0;
mm_memcpy(&neigh->neigh_cached, neigh_cached, sizeof(neigh->neigh_cached));
mm_memcpy(&neigh->neigh_const, neigh_const, sizeof(neigh->neigh_const));
mm_memcpy(&neigh->neigh_delegated, neigh_delegated, sizeof(neigh->neigh_delegated));
neigh->host_reachability_time = 0;
neigh->nic_reachability_time = 0;
neigh->stale = 0;
return LM_STATUS_SUCCESS;
}
lm_status_t lm_tcp_init_path_state(
struct _lm_device_t *pdev,
lm_state_block_t *state_blk,
lm_path_state_t *path,
lm_neigh_state_t *neigh,
l4_path_const_state_t *path_const,
l4_path_cached_state_t *path_cached,
l4_path_delegated_state_t *path_delegated)
{
DbgMessage(pdev, VERBOSEl4sp, "###lm_tcp_init_path_state\n");
DbgBreakIf(!(pdev && state_blk && path && neigh && path_const && path_cached && path_delegated));
DbgBreakIf(neigh->hdr.state_id != STATE_ID_NEIGH || neigh->hdr.status != STATE_STATUS_NORMAL);
path->hdr.state_blk = state_blk;
path->hdr.state_id = STATE_ID_PATH;
path->hdr.status = STATE_STATUS_NORMAL;
d_list_push_tail(&state_blk->path_list, &path->hdr.link);
path->neigh = neigh;
neigh->num_dependents++;
path->num_dependents = 0;
mm_memcpy(&path->path_cached, path_cached, sizeof(path->path_cached));
mm_memcpy(&path->path_const, path_const, sizeof(path->path_const));
mm_memcpy(&path->path_delegated, path_delegated, sizeof(path->path_delegated));
return LM_STATUS_SUCCESS;
}
lm_status_t lm_tcp_init_tcp_state(
struct _lm_device_t *pdev,
lm_state_block_t *state_blk,
lm_tcp_state_t *tcp,
lm_path_state_t *path,
l4_tcp_const_state_t *tcp_const,
l4_tcp_cached_state_t *tcp_cached,
l4_tcp_delegated_state_t *tcp_delegated,
u32_t tcp_cid_addr)
{
DbgMessage(pdev, VERBOSEl4sp, "###lm_tcp_init_tcp_state, ptr=%p, src_port=%d\n", tcp, tcp_const->src_port);
DbgBreakIf(!(pdev && state_blk && tcp && path && tcp_const && tcp_cached && tcp_delegated));
DbgBreakIf(path->hdr.state_id != STATE_ID_PATH || path->hdr.status != STATE_STATUS_NORMAL);
if (!tcp_cid_addr)
{
tcp->ulp_type = TOE_CONNECTION_TYPE;
}
else
{
tcp->ulp_type = lm_map_cid_to_proto(pdev, tcp_cid_addr);
tcp->cid = tcp_cid_addr;
lm_set_cid_resc(pdev, TOE_CONNECTION_TYPE, tcp, tcp_cid_addr);
}
tcp->hdr.state_blk = state_blk;
tcp->hdr.state_id = STATE_ID_TCP;
tcp->hdr.status = STATE_STATUS_INIT;
d_list_push_tail(&state_blk->tcp_list, &tcp->hdr.link);
tcp->path = path;
path->num_dependents++;
if (tcp->ulp_type == TOE_CONNECTION_TYPE)
{
pdev->toe_info.stats.total_ofld++;
}
else if (tcp->ulp_type == ISCSI_CONNECTION_TYPE)
{
pdev->iscsi_info.run_time.stats.total_ofld++;
}
mm_memcpy(&tcp->tcp_cached, tcp_cached, sizeof(tcp->tcp_cached));
mm_memcpy(&tcp->tcp_const, tcp_const, sizeof(tcp->tcp_const));
mm_memcpy(&tcp->tcp_delegated, tcp_delegated, sizeof(tcp->tcp_delegated));
return LM_STATUS_SUCCESS;
}
static u32_t _lm_tcp_calc_mss(u32_t path_mtu, u16_t remote_mss, u8_t is_ipv6, u8_t ts_enabled,
u8_t llc_snap_enabled, u8_t vlan_enabled)
{
#define MIN_MTU 576
#define IPV4_HDR_LEN 20
#define IPV6_HDR_LEN 40
#define TCP_HDR_LEN 20
#define TCP_OPTION_LEN 12
#define LLC_SNAP_LEN 8
#define VLAN_LEN 4
u32_t mss = 0;
u32_t hdrs = TCP_HDR_LEN;
UNREFERENCED_PARAMETER_(vlan_enabled);
UNREFERENCED_PARAMETER_(llc_snap_enabled);
if(is_ipv6) {
hdrs += IPV6_HDR_LEN;
} else {
hdrs += IPV4_HDR_LEN;
}
#ifdef LLC_SNAP_HEADER_ROOMS_WITH_PAYLOAD
if (llc_snap_enabled) {
hdrs += LLC_SNAP_LEN;
}
#endif
#ifdef VLAN_HEADER_ROOMS_WITH_PAYLOAD
if (vlan_enabled) {
hdrs += VLAN_LEN;
}
#endif
DbgBreakIf(path_mtu < MIN_MTU);
mss = path_mtu - hdrs;
if(mss > remote_mss) {
mss = remote_mss;
}
if(ts_enabled) {
mss -= TCP_OPTION_LEN;
}
if (!mss) {
DbgBreakIf(!mss);
mss = 1;
}
return mss;
}
static u32_t _lm_tcp_calc_frag_cnt(lm_device_t * pdev, u32_t initial_rcv_wnd, u32_t mss)
{
u32_t frag_cnt;
frag_cnt = initial_rcv_wnd / mss;
if (frag_cnt < (0x10000 / mss)) {
frag_cnt = 0x10000 / mss;
}
if ((pdev->params.l4_max_rcv_wnd_size > 0x10000) && (frag_cnt > (pdev->params.l4_max_rcv_wnd_size / mss))) {
frag_cnt = pdev->params.l4_max_rcv_wnd_size / mss;
}
frag_cnt = frag_cnt * 2 + 1;
if (pdev->params.l4_max_gen_buf_cnt && (frag_cnt > pdev->params.l4_max_gen_buf_cnt)) {
frag_cnt = pdev->params.l4_max_gen_buf_cnt;
}
return frag_cnt;
}
u32_t lm_tcp_calc_frag_cnt(
lm_device_t * pdev,
lm_tcp_state_t * tcp
)
{
u32_t mss, frag_cnt;
DbgBreakIf(!(pdev && tcp));
mss = _lm_tcp_calc_mss(tcp->path->path_cached.path_mtu,
tcp->tcp_const.remote_mss,
(tcp->path->path_const.ip_version == IP_VERSION_IPV6),
tcp->tcp_const.tcp_flags & TCP_FLAG_ENABLE_TIME_STAMP,
FALSE,
tcp->path->neigh->neigh_const.vlan_tag != 0);
frag_cnt = _lm_tcp_calc_frag_cnt(pdev, tcp->tcp_cached.initial_rcv_wnd, mss);
return frag_cnt;
}
static void _lm_tcp_init_qe_buffer(
struct _lm_device_t * pdev,
lm_tcp_qe_buffer_t * qe_buffer,
u8_t * mem_virt,
u32_t cnt,
u8_t cqe_size)
{
UNREFERENCED_PARAMETER_(pdev);
qe_buffer->left = cnt;
qe_buffer->first = (char *)mem_virt;
qe_buffer->head = qe_buffer->first;
qe_buffer->tail = qe_buffer->first;
qe_buffer->last = qe_buffer->first;
qe_buffer->last += (qe_buffer->left-1)*cqe_size;
qe_buffer->qe_size = cqe_size;
}
u32_t lm_tcp_rx_con_get_virt_size(struct _lm_device_t * pdev, lm_tcp_state_t * tcp)
{
u32_t frag_cnt;
u32_t mem_size;
u32_t mss;
DbgBreakIf(tcp->tcp_cached.initial_rcv_wnd == 0);
mss = _lm_tcp_calc_mss(tcp->path->path_cached.path_mtu,
tcp->tcp_const.remote_mss,
(tcp->path->path_const.ip_version == IP_VERSION_IPV6),
tcp->tcp_const.tcp_flags & TCP_FLAG_ENABLE_TIME_STAMP,
pdev->ofld_info.l4_params.flags & OFLD_PARAM_FLAG_SNAP_ENCAP,
tcp->path->neigh->neigh_const.vlan_tag != 0);
frag_cnt = _lm_tcp_calc_frag_cnt(pdev, tcp->tcp_cached.initial_rcv_wnd, mss);
DbgMessage(pdev, INFORMl4rx, "Calc #frags for rx-con initial_rcv_wnd: %d frag_cnt: %d\n", tcp->tcp_cached.initial_rcv_wnd, frag_cnt);
mem_size = sizeof(lm_frag_list_t) + (frag_cnt - 1)*sizeof(lm_frag_t);
return mem_size;
}
void lm_tcp_init_tcp_sp_data_mem(
struct _lm_device_t *pdev,
lm_tcp_state_t *tcp
)
{
lm_sp_req_manager_t *sp_req_mgr = NULL;
sp_req_mgr = lm_cid_sp_req_mgr(pdev, tcp->cid);
if CHK_NULL(sp_req_mgr)
{
DbgBreakIf(!sp_req_mgr);
return;
}
DbgBreakIf(sp_req_mgr->sp_data_phys_addr.as_u32.low & CACHE_LINE_SIZE_MASK);
tcp->sp_req_data.phys_addr = sp_req_mgr->sp_data_phys_addr;
tcp->sp_req_data.virt_addr = sp_req_mgr->sp_data_virt_addr;
}
void lm_tcp_init_tcp_phys_mem(
struct _lm_device_t *pdev,
lm_tcp_state_t *tcp,
lm_tcp_phy_mem_block_t * phy_mblk)
{
lm_tcp_con_t * con;
u32_t mem_size;
u16_t page_cnt,page_idx;
u32_t idx = 0;
u8_t bd_size;
u8_t block_idx;
#if (LM_PAGE_SIZE != 4096)
#error (LM_PAGE_SIZE != 4096)
#endif
con = tcp->tx_con;
page_cnt = (u16_t)pdev->params.l4_tx_chain_page_cnt;
bd_size = sizeof(struct toe_tx_bd);
block_idx = 0;
for (idx = 0 ; idx < 2; idx++) {
mem_size = LM_PAGE_SIZE;
for (page_idx = 0; page_idx < page_cnt; page_idx++) {
if (phy_mblk[block_idx].left < mem_size) {
block_idx++;
DbgBreakIf(block_idx == pdev->params.l4_num_of_blocks_per_connection);
}
DbgBreakIf(phy_mblk[block_idx].left < mem_size);
lm_bd_chain_add_page(pdev,&con->bd_chain,phy_mblk[block_idx].free, phy_mblk[block_idx].free_phy, bd_size, TRUE);
phy_mblk[block_idx].free += mem_size;
phy_mblk[block_idx].left -= mem_size;
LM_INC64(&phy_mblk[block_idx].free_phy, mem_size);
}
con = tcp->rx_con;
page_cnt = (u16_t)pdev->params.l4_rx_chain_page_cnt;
bd_size = sizeof(struct toe_rx_bd);
}
mem_size = TOE_SP_PHYS_DATA_SIZE;
if (phy_mblk[block_idx].left < mem_size) {
block_idx++;
DbgBreakIf(block_idx == pdev->params.l4_num_of_blocks_per_connection);
}
DbgBreakIf(mem_size > phy_mblk[block_idx].left);
DbgBreakIf(phy_mblk[block_idx].free_phy.as_u32.low & CACHE_LINE_SIZE_MASK);
tcp->sp_req_data.phys_addr = phy_mblk[block_idx].free_phy;
tcp->sp_req_data.virt_addr = (lm_tcp_slow_path_phys_data_t *)phy_mblk[block_idx].free;
mm_memset(tcp->sp_req_data.virt_addr, 0, mem_size);
phy_mblk[block_idx].free += mem_size;
phy_mblk[block_idx].left -= mem_size;
LM_INC64(&phy_mblk[block_idx].free_phy, mem_size);
mem_size = TOE_DB_TX_DATA_SIZE;
if (phy_mblk[block_idx].left < mem_size) {
block_idx++;
DbgBreakIf(block_idx == pdev->params.l4_num_of_blocks_per_connection);
}
DbgBreakIf(mem_size > phy_mblk[block_idx].left);
DbgBreakIf(phy_mblk[block_idx].free_phy.as_u32.low & CACHE_LINE_SIZE_MASK);
tcp->tx_con->phys_db_data = phy_mblk[block_idx].free_phy;
tcp->tx_con->db_data.tx = (volatile struct toe_tx_db_data *)phy_mblk[block_idx].free;
tcp->tx_con->db_data.tx->flags = 0;
tcp->tx_con->db_data.tx->bds_prod = 0;
tcp->tx_con->db_data.tx->bytes_prod_seq = tcp->tcp_delegated.send_una;
phy_mblk[block_idx].free += mem_size;
phy_mblk[block_idx].left -= mem_size;
LM_INC64(&phy_mblk[block_idx].free_phy, mem_size);
if (phy_mblk[block_idx].left < mem_size) {
block_idx++;
DbgBreakIf(block_idx == pdev->params.l4_num_of_blocks_per_connection);
}
mem_size = TOE_DB_RX_DATA_SIZE;
DbgBreakIf(mem_size > phy_mblk[block_idx].left);
DbgBreakIf(phy_mblk[block_idx].free_phy.as_u32.low & CACHE_LINE_SIZE_MASK);
tcp->rx_con->phys_db_data = phy_mblk[block_idx].free_phy;
tcp->rx_con->db_data.rx = (volatile struct toe_rx_db_data *)phy_mblk[block_idx].free;
phy_mblk[block_idx].free += mem_size;
phy_mblk[block_idx].left -= mem_size;
LM_INC64(&phy_mblk[block_idx].free_phy, mem_size);
tcp->rx_con->db_data.rx->rcv_win_right_edge = tcp->tcp_delegated.recv_win_seq;
tcp->rx_con->u.rx.sws_info.drv_rcv_win_right_edge = tcp->tcp_delegated.recv_win_seq;
tcp->rx_con->db_data.rx->bds_prod = 0;
tcp->rx_con->db_data.rx->bytes_prod = 0;
tcp->rx_con->db_data.rx->consumed_grq_bytes = 0;
tcp->rx_con->db_data.rx->flags = 0;
tcp->rx_con->db_data.rx->reserved1 = 0;
}
void lm_tcp_init_tcp_virt_mem(
struct _lm_device_t *pdev,
lm_tcp_state_t *tcp,
lm_tcp_mem_block_t * mblk)
{
lm_tcp_con_t * con;
u32_t mem_size;
u32_t idx = 0;
u8_t cqe_size;
con = tcp->tx_con;
cqe_size = sizeof(struct toe_tx_cqe);
for (idx = 0; idx < 2; idx++) {
if (pdev->params.l4_history_cqe_cnt) {
mem_size = pdev->params.l4_history_cqe_cnt*cqe_size;
DbgBreakIf(mblk->left < mem_size);
_lm_tcp_init_qe_buffer(pdev, &con->history_cqes, mblk->free, pdev->params.l4_history_cqe_cnt, cqe_size);
mblk->free += mem_size;
mblk->left -= mem_size;
} else {
DbgBreakMsg("MichalS: Currently History Count = 0 is not SUPPORTED\n");
}
con = tcp->rx_con;
cqe_size = sizeof(struct toe_rx_cqe);
}
mem_size = lm_tcp_rx_con_get_virt_size(pdev, tcp);
DbgBreakIf(mblk->left < mem_size);
tcp->rx_con->u.rx.gen_info.frag_list = (lm_frag_list_t *)mblk->free;
mblk->free += mem_size;
mblk->left -= mem_size;
}
lm_status_t lm_tcp_init_tcp_resc(
struct _lm_device_t *pdev,
lm_tcp_state_t *tcp,
lm_tcp_mem_block_t * mblk,
lm_tcp_phy_mem_block_t * phy_mblk)
{
DbgMessage(pdev, VERBOSEl4sp, "###lm_tcp_init_tx_con\n");
DbgBreakIf(!(pdev && tcp));
tcp->tx_con->type = TCP_CON_TYPE_TX;
mm_memset(&tcp->tx_con->u.tx, 0, sizeof(lm_tcp_con_tx_t));
tcp->tx_con->flags = (TCP_POST_BLOCKED | TCP_COMP_BLOCKED);
tcp->tx_con->tcp_state = tcp;
s_list_init(&tcp->tx_con->active_tb_list, NULL, NULL, 0);
tcp->rx_con->type = TCP_CON_TYPE_RX;
mm_memset(&tcp->rx_con->u.rx, 0, sizeof(lm_tcp_con_rx_t));
tcp->rx_con->flags = (TCP_POST_BLOCKED | TCP_COMP_BLOCKED);
tcp->rx_con->tcp_state = tcp;
s_list_init(&tcp->rx_con->active_tb_list, NULL, NULL, 0);
lm_tcp_init_tcp_phys_mem(pdev,tcp,phy_mblk);
lm_tcp_init_tcp_virt_mem(pdev,tcp,mblk);
tcp->rx_con->u.rx.sws_info.mss = tcp->tx_con->u.tx.mss =
_lm_tcp_calc_mss(tcp->path->path_cached.path_mtu,
tcp->tcp_const.remote_mss,
(tcp->path->path_const.ip_version == IP_VERSION_IPV6),
tcp->tcp_const.tcp_flags & TCP_FLAG_ENABLE_TIME_STAMP,
pdev->ofld_info.l4_params.flags & OFLD_PARAM_FLAG_SNAP_ENCAP,
tcp->path->neigh->neigh_const.vlan_tag != 0);
tcp->rx_con->u.rx.gen_info.max_frag_count = _lm_tcp_calc_frag_cnt(pdev, tcp->tcp_cached.initial_rcv_wnd, tcp->rx_con->u.rx.sws_info.mss);
return LM_STATUS_SUCCESS;
}
u32_t lm_tcp_get_virt_size(
struct _lm_device_t * pdev,
lm_tcp_state_t * tcp_state)
{
u32_t virt_size = 0;
u32_t mss = 0;
u32_t const chain_idx = LM_SW_LEADING_RSS_CID(pdev);
virt_size =
pdev->params.l4_history_cqe_cnt*sizeof(struct toe_tx_cqe) +
pdev->params.l4_history_cqe_cnt*sizeof(struct toe_rx_cqe);
if (tcp_state)
{
virt_size += lm_tcp_rx_con_get_virt_size(pdev,tcp_state);
}
else
{
#define LM_TCP_DEFAULT_WINDOW_SIZE 0x10000
if(CHK_NULL(pdev) ||
ERR_IF((ARRSIZE(pdev->params.l2_cli_con_params) <= chain_idx) ||
(CHIP_IS_E1H(pdev) && (chain_idx >= ETH_MAX_RX_CLIENTS_E1H)) ||
(CHIP_IS_E1(pdev) && (chain_idx >= ETH_MAX_RX_CLIENTS_E1)) ))
{
DbgBreakIf(1);
return 0;
}
mss = _lm_tcp_calc_mss(pdev->params.l2_cli_con_params[chain_idx].mtu, 0xffff, FALSE, FALSE, FALSE, FALSE);
virt_size += sizeof(lm_frag_list_t) +
(_lm_tcp_calc_frag_cnt(pdev, LM_TCP_DEFAULT_WINDOW_SIZE, mss) - 1)*sizeof(lm_frag_t);
}
return virt_size;
}
u32_t lm_tcp_get_phys_size(
struct _lm_device_t * pdev)
{
u32_t mem_size = TOE_SP_PHYS_DATA_SIZE + TOE_DB_TX_DATA_SIZE + TOE_DB_RX_DATA_SIZE;
mem_size = ((mem_size / LM_PAGE_SIZE) + 1) * LM_PAGE_SIZE;
mem_size += pdev->params.l4_rx_chain_page_cnt*LM_PAGE_SIZE +
pdev->params.l4_tx_chain_page_cnt*LM_PAGE_SIZE;
return mem_size;
}
lm_status_t lm_tcp_post_buffered_data(
struct _lm_device_t *pdev,
lm_tcp_state_t *tcp,
d_list_t *buffered_data)
{
lm_tcp_con_rx_gen_info_t * gen_info = NULL;
lm_tcp_gen_buf_t * curr_gen_buf = NULL;
DbgBreakIf(!buffered_data);
if(!d_list_is_empty(buffered_data)) {
gen_info = &tcp->rx_con->u.rx.gen_info;
curr_gen_buf = (lm_tcp_gen_buf_t *)d_list_peek_head(buffered_data);
DbgBreakIf(!d_list_is_empty(&gen_info->peninsula_list));
d_list_add_head(&gen_info->peninsula_list, buffered_data);
while (curr_gen_buf) {
gen_info->peninsula_nbytes += curr_gen_buf->placed_bytes;
curr_gen_buf = (lm_tcp_gen_buf_t *)d_list_next_entry(&curr_gen_buf->link);
}
DbgBreakIf(tcp->rx_con->flags & TCP_INDICATE_REJECTED);
tcp->rx_con->flags |= TCP_RX_COMP_DEFERRED;
}
return LM_STATUS_SUCCESS;
}
static u16_t lm_tcp_calc_tcp_pseudo_checksum(
struct _lm_device_t *pdev,
u32_t n_src_ip[4],
u32_t n_dst_ip[4],
u8_t ip_type)
{
#define D_IP_PROTOCOL_TCP 6
u32_t sum = 0;
int i;
if(ip_type == IP_VERSION_IPV4) {
sum += n_src_ip[0] & 0xffff;
sum += (n_src_ip[0]>>16) & 0xffff;
sum += n_dst_ip[0] & 0xffff;
sum += (n_dst_ip[0]>>16) & 0xffff;
} else {
for (i = 0; i < 4; i++) {
sum += n_src_ip[i] & 0xffff;
sum += (n_src_ip[i]>>16) & 0xffff;
}
for (i = 0; i < 4; i++) {
sum += n_dst_ip[i] & 0xffff;
sum += (n_dst_ip[i]>>16) & 0xffff;
}
}
sum += HTON16((u16_t)(D_IP_PROTOCOL_TCP));
while( sum >> 16 ) {
sum = (sum & 0xffff) + (sum >> 16);
}
DbgMessage(pdev, VERBOSEl4sp,
"_lm_tcp_calc_tcp_pseudo_checksum: n_src_ip=%x, n_dst_ip=%x, (u16_t)sum=%x\n",
n_src_ip[0], n_dst_ip[0], (u16_t)sum);
return (u16_t)sum;
}
static lm_status_t lm_locate_snd_next_info(
lm_tcp_con_t * tx_con,
u32_t snd_nxt,
u32_t snd_una,
u16_t * bd_idx,
u16_t * bd_offset,
lm_address_t * page_addr)
{
u32_t cur_seq = 0;
struct toe_tx_bd * cur_tx_bd = NULL;
DbgMessage(NULL, VERBOSEl4sp, "### lm_locate_snd_next_info\n");
if ((tx_con->bd_chain.cons_idx != 0) ||
(S32_SUB(tx_con->bytes_post_cnt ,S32_SUB(snd_nxt, snd_una)) < 0) ||
(tx_con->bytes_comp_cnt))
{
DbgBreakIf(tx_con->bd_chain.cons_idx != 0);
DbgBreakIf(S32_SUB(tx_con->bytes_post_cnt ,S32_SUB(snd_nxt, snd_una)) < 0);
DbgBreakIf(tx_con->bytes_comp_cnt);
return LM_STATUS_INVALID_PARAMETER;
}
*bd_idx = 0;
*bd_offset = 0;
*page_addr = tx_con->bd_chain.bd_chain_phy;
if (lm_bd_chain_prod_idx(&tx_con->bd_chain) == 0) {
if ((tx_con->bytes_post_cnt > 0) ||
(snd_nxt != snd_una))
{
DbgBreakIf(tx_con->bytes_post_cnt > 0);
DbgBreakIf(snd_nxt != snd_una);
return LM_STATUS_INVALID_PARAMETER;
}
return LM_STATUS_SUCCESS;
}
cur_seq = snd_una;
cur_tx_bd = (struct toe_tx_bd *)tx_con->bd_chain.bd_chain_virt;
while ((*bd_idx < lm_bd_chain_prod_idx(&tx_con->bd_chain))
&& S32_SUB(snd_nxt, cur_seq + cur_tx_bd->size) >= 0) {
cur_seq += cur_tx_bd->size;
lm_bd_chain_incr_bd(&tx_con->bd_chain, page_addr, (void**)&cur_tx_bd, bd_idx);
}
if ((S32_SUB(snd_nxt, cur_seq) < 0) ||
(S32_SUB(snd_nxt, cur_seq) > 0xffff))
{
DbgBreakIf(S32_SUB(snd_nxt, cur_seq) < 0 );
DbgBreakIf(S32_SUB(snd_nxt, cur_seq) > 0xffff );
return LM_STATUS_INVALID_PARAMETER;
}
*bd_offset = S32_SUB(snd_nxt, cur_seq);
return LM_STATUS_SUCCESS;
}
static lm_status_t _lm_tcp_init_xstorm_toe_context(
struct _lm_device_t *pdev,
lm_tcp_state_t * tcp)
{
struct toe_context * ctx = (struct toe_context *)tcp->ctx_virt;
struct xstorm_toe_ag_context * xctx_ag = &ctx->xstorm_ag_context;
struct xstorm_toe_st_context * xctx_st = &ctx->xstorm_st_context.context;
lm_address_t mem_phys = {{0}};
u16_t bd_idx = 0;
u16_t bd_offset = 0;
lm_status_t lm_status = LM_STATUS_SUCCESS;
mm_memset(xctx_ag, 0, sizeof(struct xstorm_toe_ag_context));
if(tcp->tcp_cached.tcp_flags & TCP_FLAG_ENABLE_NAGLING)
{
xctx_ag->agg_vars1 |= XSTORM_TOE_AG_CONTEXT_NAGLE_EN;
}
mem_phys = lm_bd_chain_phys_addr(&tcp->tx_con->bd_chain, 0);
xctx_ag->cmp_bd_cons = 0;
xctx_ag->cmp_bd_page_0_to_31 = mem_phys.as_u32.low;
xctx_ag->cmp_bd_page_32_to_63 = mem_phys.as_u32.high;
xctx_ag->cmp_bd_start_seq = tcp->tcp_delegated.send_una;
if (tcp->tx_con->bytes_comp_cnt)
{
DbgBreakIf(tcp->tx_con->bytes_comp_cnt);
return LM_STATUS_INVALID_PARAMETER;
}
xctx_ag->more_to_send = S32_SUB(tcp->tx_con->bytes_post_cnt,(S32_SUB(tcp->tcp_delegated.send_next,tcp->tcp_delegated.send_una)));
if ((tcp->tx_con->flags & TCP_FIN_REQ_POSTED) && !(tcp->tx_con->flags & TCP_FIN_REQ_COMPLETED)) {
xctx_ag->more_to_send--;
}
mm_memset(xctx_st, 0, sizeof(struct xstorm_toe_st_context));
lm_status = lm_locate_snd_next_info(tcp->tx_con, tcp->tcp_delegated.send_next, tcp->tcp_delegated.send_una,
&bd_idx, &bd_offset, &mem_phys);
if (lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
xctx_st->toe.tx_bd_cons = bd_idx;
xctx_st->toe.tx_bd_offset = bd_offset;
xctx_st->toe.tx_bd_page_base_hi = mem_phys.as_u32.high;
xctx_st->toe.tx_bd_page_base_lo = mem_phys.as_u32.low;
xctx_st->toe.bd_prod = lm_bd_chain_prod_idx(&tcp->tx_con->bd_chain);
xctx_st->toe.driver_doorbell_info_ptr_lo = tcp->tx_con->phys_db_data.as_u32.low;
xctx_st->toe.driver_doorbell_info_ptr_hi = tcp->tx_con->phys_db_data.as_u32.high;
return LM_STATUS_SUCCESS;
}
static lm_status_t _lm_tcp_init_ustorm_toe_context(
struct _lm_device_t *pdev,
lm_tcp_state_t *tcp)
{
struct toe_context * ctx = (struct toe_context *)tcp->ctx_virt;
struct ustorm_toe_ag_context *uctx_ag = &ctx->ustorm_ag_context;
struct ustorm_toe_st_context *uctx_st = &ctx->ustorm_st_context.context;
lm_address_t mem_phys = {{0}};
mm_memset(uctx_ag, 0, sizeof(struct ustorm_toe_ag_context));
uctx_ag->rq_prod = 0;
uctx_ag->driver_doorbell_info_ptr_hi = tcp->rx_con->phys_db_data.as_u32.high;
uctx_ag->driver_doorbell_info_ptr_lo = tcp->rx_con->phys_db_data.as_u32.low;
mm_memset(uctx_st, 0, sizeof(struct ustorm_toe_st_context));
uctx_st->indirection_ram_offset = (u16_t)tcp->tcp_const.hash_value;
uctx_st->pen_grq_placed_bytes = tcp->rx_con->u.rx.gen_info.peninsula_nbytes;
DbgMessage(pdev, INFORMl4sp, "_lm_tcp_init_ustorm_toe_context: IRO is 0x%x, IS is %d\n",
uctx_st->indirection_ram_offset, uctx_st->__indirection_shift);
if ((tcp->tcp_cached.rcv_indication_size > 0xffff) ||
(tcp->tcp_cached.rcv_indication_size != 0))
{
DbgBreakIf(tcp->tcp_cached.rcv_indication_size > 0xffff);
DbgBreakIf(tcp->tcp_cached.rcv_indication_size != 0);
return LM_STATUS_INVALID_PARAMETER;
}
uctx_st->rcv_indication_size = 1;
mem_phys = lm_bd_chain_phys_addr(&tcp->rx_con->bd_chain, 0);
uctx_st->pen_ring_params.rq_cons = 0;
uctx_st->pen_ring_params.rq_cons_addr_hi = mem_phys.as_u32.high;
uctx_st->pen_ring_params.rq_cons_addr_lo = mem_phys.as_u32.low;
uctx_st->prev_rcv_win_right_edge = tcp->rx_con->db_data.rx->rcv_win_right_edge;
if (pdev->params.l4_ignore_grq_push_enabled)
{
SET_FLAGS(uctx_st->flags2, USTORM_TOE_ST_CONTEXT_IGNORE_GRQ_PUSH);
}
if (pdev->params.l4_enable_rss == L4_RSS_DYNAMIC)
{
SET_FLAGS( uctx_st->flags2, USTORM_TOE_ST_CONTEXT_RSS_UPDATE_ENABLED );
}
uctx_st->initial_rcv_wnd = tcp->tcp_cached.initial_rcv_wnd;
uctx_st->rcv_nxt = tcp->tcp_delegated.recv_next;
return LM_STATUS_SUCCESS;
}
static lm_status_t _lm_tcp_init_cstorm_toe_context(
struct _lm_device_t *pdev,
lm_tcp_state_t *tcp)
{
struct toe_context *ctx = (struct toe_context *)tcp->ctx_virt;
struct cstorm_toe_ag_context *cctx_ag = &ctx->cstorm_ag_context;
struct cstorm_toe_st_context *cctx_st = &ctx->cstorm_st_context.context;
lm_address_t mem_phys = {{0}};
mm_memset(cctx_ag, 0, sizeof(struct cstorm_toe_ag_context));
if (tcp->tcp_cached.initial_rcv_wnd > MAX_INITIAL_RCV_WND)
{
DbgBreakIfAll(tcp->tcp_cached.initial_rcv_wnd > MAX_INITIAL_RCV_WND);
return LM_STATUS_INVALID_PARAMETER;
}
cctx_ag->bd_prod = lm_bd_chain_prod_idx(&tcp->tx_con->bd_chain);
cctx_ag->rel_seq = tcp->tcp_delegated.send_una;
cctx_ag->snd_max = tcp->tcp_delegated.send_max;
mm_memset(cctx_st, 0, sizeof(struct cstorm_toe_st_context));
mem_phys = lm_bd_chain_phys_addr(&tcp->tx_con->bd_chain, 0);
cctx_st->bds_ring_page_base_addr_hi = mem_phys.as_u32.high;
cctx_st->bds_ring_page_base_addr_lo = mem_phys.as_u32.low;
cctx_st->bd_cons = 0;
if (ERR_IF(tcp->tcp_const.hash_value >= (u8_t)USTORM_INDIRECTION_TABLE_SIZE)) {
if (tcp->tcp_const.hash_value >= (u8_t)USTORM_INDIRECTION_TABLE_SIZE)
{
DbgBreakIfAll(tcp->tcp_const.hash_value >= (u8_t)USTORM_INDIRECTION_TABLE_SIZE);
return LM_STATUS_INVALID_PARAMETER;
}
tcp->tcp_const.hash_value = LM_TOE_FW_RSS_ID(pdev,LM_TOE_BASE_RSS_ID(pdev));
}
cctx_st->prev_snd_max = tcp->tcp_delegated.send_una;
if (pdev->params.l4_enable_rss == L4_RSS_DISABLED)
{
cctx_st->cpu_id = LM_TOE_FW_RSS_ID(pdev,LM_TOE_BASE_RSS_ID(pdev));
}
else
{
cctx_st->cpu_id = pdev->toe_info.indirection_table[tcp->tcp_const.hash_value];
}
cctx_st->free_seq = tcp->tcp_delegated.send_una - 1;
return LM_STATUS_SUCCESS;
}
static lm_status_t _lm_tcp_init_tstorm_toe_context(
struct _lm_device_t *pdev,
lm_tcp_state_t * tcp)
{
struct toe_context * ctx = (struct toe_context *)tcp->ctx_virt;
struct tstorm_toe_ag_context * tctx_ag = &ctx->tstorm_ag_context;
struct tstorm_toe_st_context * tctx_st = &ctx->tstorm_st_context.context;
UNREFERENCED_PARAMETER_(pdev);
mm_mem_zero(tctx_ag, sizeof(struct tstorm_toe_ag_context));
mm_mem_zero(tctx_st, sizeof(struct tstorm_toe_st_context));
return LM_STATUS_SUCCESS;
}
static lm_status_t _lm_tcp_init_timers_context(
struct _lm_device_t *pdev,
lm_tcp_state_t *tcp)
{
struct toe_context * ctx = (struct toe_context *)tcp->ctx_virt;
SET_FLAGS(ctx->timers_context.flags, TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG);
UNREFERENCED_PARAMETER_(pdev);
return LM_STATUS_SUCCESS;
}
static lm_status_t _lm_tcp_init_toe_context(
struct _lm_device_t *pdev,
lm_tcp_state_t *tcp)
{
lm_status_t lm_status = LM_STATUS_SUCCESS;
lm_status = _lm_tcp_init_xstorm_toe_context(pdev, tcp);
if (lm_status != LM_STATUS_SUCCESS) {
return lm_status;
}
lm_status = _lm_tcp_init_ustorm_toe_context(pdev, tcp);
if (lm_status != LM_STATUS_SUCCESS) {
return lm_status;
}
lm_status = _lm_tcp_init_cstorm_toe_context(pdev, tcp);
if (lm_status != LM_STATUS_SUCCESS) {
return lm_status;
}
lm_status = _lm_tcp_init_tstorm_toe_context(pdev, tcp);
if (lm_status != LM_STATUS_SUCCESS) {
return lm_status;
}
lm_status = _lm_tcp_init_timers_context(pdev, tcp);
if (lm_status != LM_STATUS_SUCCESS) {
return lm_status;
}
lm_set_cdu_validation_data(pdev, tcp->cid, FALSE );
return LM_STATUS_SUCCESS;
}
static lm_status_t _lm_tcp_init_tstorm_tcp_context(
struct _lm_device_t *pdev,
lm_tcp_state_t *tcp
)
{
struct tstorm_toe_tcp_ag_context_section *ttcp_ag;
struct tstorm_tcp_st_context_section *ttcp_st;
l4_ofld_params_t *l4_params = &pdev->ofld_info.l4_params;
lm_path_state_t *path = tcp->path;
lm_neigh_state_t *neigh = path->neigh;
u32_t sm_rtt, sm_delta;
u32_t snd_wnd;
ASSERT_STATIC(sizeof(struct tstorm_toe_tcp_ag_context_section) == sizeof(struct tstorm_tcp_tcp_ag_context_section) );
if (tcp->ulp_type == TOE_CONNECTION_TYPE)
{
ttcp_ag = &((struct toe_context *)tcp->ctx_virt)->tstorm_ag_context.tcp;
ttcp_st = &((struct toe_context *)tcp->ctx_virt)->tstorm_st_context.context.tcp;
}
else
{
ttcp_ag = (struct tstorm_toe_tcp_ag_context_section *)&((struct iscsi_context *)tcp->ctx_virt)->tstorm_ag_context.tcp;
ttcp_st = &((struct iscsi_context *)tcp->ctx_virt)->tstorm_st_context.tcp;
}
mm_mem_zero(ttcp_ag, sizeof(struct tstorm_toe_tcp_ag_context_section));
mm_mem_zero(ttcp_st, sizeof(struct tstorm_tcp_st_context_section));
ttcp_ag->snd_max = tcp->tcp_delegated.send_max;
ttcp_ag->snd_nxt = tcp->tcp_delegated.send_next;
ttcp_ag->snd_una = tcp->tcp_delegated.send_una;
ttcp_st->flags2 |= TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN;
ttcp_st->flags2 |= TSTORM_TCP_ST_CONTEXT_SECTION_DA_COUNTER_EN;
ttcp_st->dup_ack_count = tcp->tcp_delegated.dup_ack_count;
if(tcp->tcp_const.tcp_flags & TCP_FLAG_ENABLE_TIME_STAMP) {
ttcp_st->flags1 |= TSTORM_TCP_ST_CONTEXT_SECTION_TIMESTAMP_EXISTS;
}
if(tcp->tcp_cached.tcp_flags & TCP_FLAG_ENABLE_KEEP_ALIVE) {
ttcp_st->flags1 |= TSTORM_TCP_ST_CONTEXT_SECTION_KA_ENABLED;
if ((tcp->tcp_cached.ka_time_out == 0) ||
(tcp->tcp_cached.ka_interval == 0))
{
DbgBreakIf(tcp->tcp_cached.ka_time_out == 0);
DbgBreakIf(tcp->tcp_cached.ka_interval == 0);
return LM_STATUS_INVALID_PARAMETER;
}
}
if(tcp->tcp_const.tcp_flags & TCP_FLAG_ENABLE_WIN_SCALING) {
ttcp_st->snd_wnd_scale = tcp->tcp_const.snd_seg_scale;
}
ttcp_st->cwnd = tcp->tcp_delegated.send_cwin - tcp->tcp_delegated.send_una;
if (ttcp_st->cwnd == 0xffffffff) {
ttcp_st->cwnd = 0x40000000;
}
ttcp_st->ka_interval =
lm_time_resolution(pdev, tcp->tcp_cached.ka_interval, l4_params->ticks_per_second, TIMERS_TICKS_PER_SEC);
ttcp_st->ka_max_probe_count = tcp->tcp_cached.ka_probe_cnt;
if(tcp->tcp_delegated.send_una == tcp->tcp_delegated.send_max) {
ttcp_st->ka_probe_count = tcp->tcp_delegated.u.keep_alive.probe_cnt;
} else {
ttcp_st->ka_probe_count = 0;
}
ttcp_st->ka_timeout =
lm_time_resolution(pdev, tcp->tcp_cached.ka_time_out, l4_params->ticks_per_second, TIMERS_TICKS_PER_SEC);
ttcp_st->msb_mac_address = mm_cpu_to_le16(NTOH16(*(u16 *)(&neigh->neigh_const.src_addr[0])));
ttcp_st->mid_mac_address = mm_cpu_to_le16(NTOH16(*(u16 *)(&neigh->neigh_const.src_addr[2])));
ttcp_st->lsb_mac_address = mm_cpu_to_le16(NTOH16(*(u16 *)(&neigh->neigh_const.src_addr[4])));
ttcp_st->max_rt_time =
lm_time_resolution(pdev, tcp->tcp_cached.max_rt, l4_params->ticks_per_second, TSEMI_CLK1_TICKS_PER_SEC);
if (ttcp_st->max_rt_time == 0) {
ttcp_st->max_rt_time = 0xffffffff;
ttcp_st->flags1 |= TSTORM_TCP_ST_CONTEXT_SECTION_MAX_SEG_RETRANSMIT_EN;
}
if (tcp->ulp_type == TOE_CONNECTION_TYPE) {
if (tcp->rx_con->u.rx.sws_info.mss > 0xffff)
{
DbgBreakIf(tcp->rx_con->u.rx.sws_info.mss > 0xffff);
return LM_STATUS_INVALID_PARAMETER;
}
ttcp_st->mss = tcp->rx_con->u.rx.sws_info.mss & 0xffff;
} else {
ttcp_st->mss = _lm_tcp_calc_mss(tcp->path->path_cached.path_mtu,
tcp->tcp_const.remote_mss,
(tcp->path->path_const.ip_version == IP_VERSION_IPV6),
tcp->tcp_const.tcp_flags & TCP_FLAG_ENABLE_TIME_STAMP,
pdev->ofld_info.l4_params.flags & OFLD_PARAM_FLAG_SNAP_ENCAP,
tcp->path->neigh->neigh_const.vlan_tag != 0) & 0xffff;
ttcp_st->expected_rel_seq = tcp->tcp_delegated.send_una;
}
DbgMessage(pdev, INFORMl4sp, "offload num_retx=%d, snd_wnd_probe_cnt=%d\n",tcp->tcp_delegated.u.retransmit.num_retx,tcp->tcp_delegated.snd_wnd_probe_count);
ttcp_st->persist_probe_count = tcp->tcp_delegated.snd_wnd_probe_count;
ttcp_st->prev_seg_seq = tcp->tcp_delegated.send_wl1;
ttcp_st->rcv_nxt = tcp->tcp_delegated.recv_next;
snd_wnd = (S32_SUB(tcp->tcp_delegated.send_cwin, tcp->tcp_delegated.send_win) > 0) ?
(tcp->tcp_delegated.send_win - tcp->tcp_delegated.send_una) :
(tcp->tcp_delegated.send_cwin - tcp->tcp_delegated.send_una);
if(tcp->tcp_delegated.send_una == tcp->tcp_delegated.send_max && snd_wnd > 0) {
ttcp_st->rto_exp = 0;
ttcp_st->retransmit_count = 0;
} else {
ttcp_st->retransmit_count = tcp->tcp_delegated.u.retransmit.num_retx;
ttcp_st->rto_exp = tcp->tcp_delegated.u.retransmit.num_retx;
}
ttcp_st->retransmit_start_time =
lm_time_resolution(pdev, tcp->tcp_delegated.total_rt, l4_params->ticks_per_second, TSEMI_CLK1_TICKS_PER_SEC);
sm_rtt = lm_time_resolution(pdev, tcp->tcp_delegated.sm_rtt, l4_params->ticks_per_second, TIMERS_TICKS_PER_SEC)/8;
if (sm_rtt > 30000) {
sm_rtt = 30000;
}
sm_delta = lm_time_resolution(pdev, tcp->tcp_delegated.sm_delta, l4_params->ticks_per_second, TIMERS_TICKS_PER_SEC)/4;
if (sm_delta > 30000) {
sm_delta = 30000;
}
ttcp_st->flags1 |= (sm_rtt << TSTORM_TCP_ST_CONTEXT_SECTION_RTT_SRTT_SHIFT);
ttcp_st->flags2 |= (sm_delta << TSTORM_TCP_ST_CONTEXT_SECTION_RTT_VARIATION_SHIFT);
if ((tcp->ulp_type == TOE_CONNECTION_TYPE) && (tcp->rx_con->flags & TCP_REMOTE_FIN_RECEIVED)) {
ttcp_st->flags1 |= TSTORM_TCP_ST_CONTEXT_SECTION_STOP_RX_PAYLOAD;
}
ttcp_st->ss_thresh = tcp->tcp_delegated.ss_thresh;
ttcp_st->timestamp_recent = tcp->tcp_delegated.ts_recent;
ttcp_st->timestamp_recent_time =
lm_time_resolution(pdev, tcp->tcp_delegated.ts_recent_age, l4_params->ticks_per_second, TSEMI_CLK1_TICKS_PER_SEC);
ttcp_st->vlan_id = neigh->neigh_const.vlan_tag;
ttcp_st->recent_seg_wnd = tcp->tcp_delegated.send_win - tcp->tcp_delegated.send_una;
ttcp_st->ooo_support_mode = (tcp->ulp_type == TOE_CONNECTION_TYPE)? TCP_TSTORM_OOO_SUPPORTED : TCP_TSTORM_OOO_DROP_AND_PROC_ACK;
ttcp_st->statistics_counter_id = (tcp->ulp_type == TOE_CONNECTION_TYPE)? LM_STATS_CNT_ID(pdev) : LM_CLI_IDX_ISCSI;
if( TOE_CONNECTION_TYPE == tcp->ulp_type )
{
SET_FLAGS( ttcp_st->flags2, 1<<TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L2_STATSTICS_SHIFT );
SET_FLAGS( ttcp_st->flags2, 1<<TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT );
}
return LM_STATUS_SUCCESS;
}
static lm_status_t _lm_tcp_init_xstorm_tcp_context(
struct _lm_device_t *pdev,
lm_tcp_state_t *tcp)
{
struct xstorm_toe_tcp_ag_context_section * xtcp_ag;
struct xstorm_common_context_section * xtcp_st;
lm_path_state_t * path = tcp->path;
lm_neigh_state_t * neigh = path->neigh;
l4_ofld_params_t * l4_params = &(pdev->ofld_info.l4_params);
u32_t src_ip[4], dst_ip[4];
u16_t pseudo_cs, i;
u32_t sm_rtt, sm_delta;
ASSERT_STATIC(sizeof(struct xstorm_toe_tcp_ag_context_section) == sizeof(struct xstorm_tcp_tcp_ag_context_section));
if (tcp->ulp_type == TOE_CONNECTION_TYPE) {
xtcp_ag = &((struct toe_context *)tcp->ctx_virt)->xstorm_ag_context.tcp;
xtcp_st = &((struct toe_context *)tcp->ctx_virt)->xstorm_st_context.context.common;
} else {
xtcp_ag = (struct xstorm_toe_tcp_ag_context_section *)&((struct iscsi_context *)tcp->ctx_virt)->xstorm_ag_context.tcp;
xtcp_st = &((struct iscsi_context *)tcp->ctx_virt)->xstorm_st_context.common;
}
mm_mem_zero(xtcp_ag, sizeof(struct xstorm_toe_tcp_ag_context_section));
mm_mem_zero(xtcp_st, sizeof(struct xstorm_common_context_section));
xtcp_ag->ack_to_far_end = tcp->tcp_delegated.recv_next;
if(tcp->tcp_delegated.send_una == tcp->tcp_delegated.send_max) {
if ((tcp->tcp_cached.ka_probe_cnt > 0) && (tcp->tcp_delegated.u.keep_alive.timeout_delta == 0)) {
xtcp_ag->ka_timer = 1;
} else if ((tcp->tcp_cached.ka_probe_cnt == 0) && (tcp->tcp_delegated.u.keep_alive.timeout_delta == 0)) {
if (tcp->tcp_cached.ka_time_out == 0) {
xtcp_ag->ka_timer = 0xffffffff;
} else {
if (tcp->tcp_cached.ka_time_out == 0xffffffff) {
xtcp_ag->ka_timer = 0xffffffff;
} else {
xtcp_ag->ka_timer =
tcp->tcp_cached.ka_time_out ?
lm_time_resolution(pdev, tcp->tcp_cached.ka_time_out, l4_params->ticks_per_second, TIMERS_TICKS_PER_SEC) :
1 ;
}
}
} else {
if (tcp->tcp_delegated.u.keep_alive.timeout_delta == 0xffffffff) {
xtcp_ag->ka_timer = 0xffffffff;
} else {
xtcp_ag->ka_timer = lm_time_resolution(pdev, tcp->tcp_delegated.u.keep_alive.timeout_delta, l4_params->ticks_per_second, TIMERS_TICKS_PER_SEC);
}
}
} else {
xtcp_ag->ka_timer = 0xffffffff;
}
if (tcp->ulp_type == TOE_CONNECTION_TYPE) {
xtcp_ag->local_adv_wnd = tcp->tcp_delegated.recv_win_seq;
} else if (tcp->ulp_type == ISCSI_CONNECTION_TYPE) {
xtcp_ag->local_adv_wnd = 0xFFFF << ((u16_t)tcp->tcp_const.rcv_seg_scale & 0xf);
}
if (tcp->ulp_type == TOE_CONNECTION_TYPE) {
if (tcp->rx_con->u.rx.sws_info.mss > 0xffff)
{
DbgBreakIf(tcp->rx_con->u.rx.sws_info.mss > 0xffff);
return LM_STATUS_INVALID_PARAMETER;
}
xtcp_ag->mss = tcp->rx_con->u.rx.sws_info.mss & 0xffff;
} else {
xtcp_ag->mss = _lm_tcp_calc_mss(tcp->path->path_cached.path_mtu,
tcp->tcp_const.remote_mss,
(tcp->path->path_const.ip_version == IP_VERSION_IPV6),
tcp->tcp_const.tcp_flags & TCP_FLAG_ENABLE_TIME_STAMP,
pdev->ofld_info.l4_params.flags & OFLD_PARAM_FLAG_SNAP_ENCAP,
tcp->path->neigh->neigh_const.vlan_tag != 0) & 0xfffc;
if (tcp->ulp_type == ISCSI_CONNECTION_TYPE)
{
if (xtcp_ag->mss < 4)
{
DbgBreakIf(xtcp_ag->mss < 4);
return LM_STATUS_INVALID_PARAMETER;
}
xtcp_ag->mss -= 4;
}
}
if (tcp->ulp_type == TOE_CONNECTION_TYPE) {
if (tcp->tcp_delegated.snd_wnd_probe_count == 0) {
xtcp_ag->tcp_agg_vars2 |= __XSTORM_TOE_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED;
}
}
xtcp_ag->tx_wnd =
(S32_SUB(tcp->tcp_delegated.send_cwin, tcp->tcp_delegated.send_win) > 0) ?
(tcp->tcp_delegated.send_win - tcp->tcp_delegated.send_una) :
(tcp->tcp_delegated.send_cwin - tcp->tcp_delegated.send_una);
if (xtcp_ag->tx_wnd == 0xffffffff) {
xtcp_ag->tx_wnd = 0x40000000;
}
if ((tcp->tcp_delegated.send_una == tcp->tcp_delegated.send_max) && ((xtcp_ag->tx_wnd > 0) || (tcp->tcp_delegated.u.retransmit.retx_ms == 0xffffffff))) {
sm_rtt = lm_time_resolution(pdev, tcp->tcp_delegated.sm_rtt, l4_params->ticks_per_second, TIMERS_TICKS_PER_SEC)/8;
if (sm_rtt > 30000) {
sm_rtt = 30000;
}
sm_delta = lm_time_resolution(pdev, tcp->tcp_delegated.sm_delta, l4_params->ticks_per_second, TIMERS_TICKS_PER_SEC)/4;
if (sm_delta > 30000) {
sm_delta = 30000;
}
xtcp_ag->rto_timer = (sm_rtt + (sm_delta << 2));
} else {
if (tcp->tcp_delegated.u.retransmit.retx_ms == 0xffffffff) {
xtcp_ag->rto_timer = 0xffffffff;
} else {
xtcp_ag->rto_timer = tcp->tcp_delegated.u.retransmit.retx_ms ? tcp->tcp_delegated.u.retransmit.retx_ms : 1 ;
;
}
}
xtcp_ag->snd_nxt = tcp->tcp_delegated.send_next;
xtcp_ag->snd_una = tcp->tcp_delegated.send_una;
xtcp_ag->tcp_agg_vars2 |= XSTORM_TOE_TCP_AG_CONTEXT_SECTION_DA_ENABLE;
xtcp_ag->ts_to_echo = tcp->tcp_delegated.ts_recent;
xtcp_st->ethernet.remote_addr_0 = neigh->neigh_cached.dst_addr[0];
xtcp_st->ethernet.remote_addr_1 = neigh->neigh_cached.dst_addr[1];
xtcp_st->ethernet.remote_addr_2 = neigh->neigh_cached.dst_addr[2];
xtcp_st->ethernet.remote_addr_3 = neigh->neigh_cached.dst_addr[3];
xtcp_st->ethernet.remote_addr_4 = neigh->neigh_cached.dst_addr[4];
xtcp_st->ethernet.remote_addr_5 = neigh->neigh_cached.dst_addr[5];
if (neigh->neigh_const.vlan_tag > 0xfff)
{
DbgBreakIf(neigh->neigh_const.vlan_tag > 0xfff);
return LM_STATUS_INVALID_PARAMETER;
}
xtcp_st->ethernet.vlan_params |= (neigh->neigh_const.vlan_tag << XSTORM_ETH_CONTEXT_SECTION_VLAN_ID_SHIFT);
if (tcp->tcp_cached.user_priority > 0x7)
{
DbgBreakIf(tcp->tcp_cached.user_priority > 0x7);
return LM_STATUS_INVALID_PARAMETER;
}
xtcp_st->ethernet.vlan_params |= (tcp->tcp_cached.user_priority << XSTORM_ETH_CONTEXT_SECTION_PRIORITY_SHIFT);
if ((0 != GET_FLAGS(xtcp_st->ethernet.vlan_params, XSTORM_ETH_CONTEXT_SECTION_VLAN_ID)) ||
(0 != GET_FLAGS(xtcp_st->ethernet.vlan_params, XSTORM_ETH_CONTEXT_SECTION_CFI)) ||
(0 != GET_FLAGS(xtcp_st->ethernet.vlan_params, XSTORM_ETH_CONTEXT_SECTION_PRIORITY)))
{
SET_FLAGS( xtcp_st->flags, XSTORM_COMMON_CONTEXT_SECTION_VLAN_MODE);
}
xtcp_st->ethernet.local_addr_0 = neigh->neigh_const.src_addr[0];
xtcp_st->ethernet.local_addr_1 = neigh->neigh_const.src_addr[1];
xtcp_st->ethernet.local_addr_2 = neigh->neigh_const.src_addr[2];
xtcp_st->ethernet.local_addr_3 = neigh->neigh_const.src_addr[3];
xtcp_st->ethernet.local_addr_4 = neigh->neigh_const.src_addr[4];
xtcp_st->ethernet.local_addr_5 = neigh->neigh_const.src_addr[5];
xtcp_st->ethernet.reserved_vlan_type = 0x8100;
xtcp_st->ip_version_1b = (tcp->path->path_const.ip_version == IP_VERSION_IPV4)? 0 : 1;
if (tcp->path->path_const.ip_version == IP_VERSION_IPV4) {
xtcp_st->ip_union.padded_ip_v4.ip_v4.ip_remote_addr = path->path_const.u.ipv4.dst_ip;
xtcp_st->ip_union.padded_ip_v4.ip_v4.ip_local_addr = path->path_const.u.ipv4.src_ip;
xtcp_st->ip_union.padded_ip_v4.ip_v4.tos = tcp->tcp_cached.tos_or_traffic_class;
#if DBG
xtcp_st->ip_union.padded_ip_v4.ip_v4.ttl = (tcp->ulp_type == TOE_CONNECTION_TYPE) ? TOE_DBG_TTL : ISCSI_DBG_TTL;
#else
xtcp_st->ip_union.padded_ip_v4.ip_v4.ttl = tcp->tcp_cached.ttl_or_hop_limit;
#endif
src_ip[0] = HTON32(path->path_const.u.ipv4.src_ip);
dst_ip[0] = HTON32(path->path_const.u.ipv4.dst_ip);
pseudo_cs = lm_tcp_calc_tcp_pseudo_checksum(pdev, src_ip, dst_ip, IP_VERSION_IPV4);
} else {
xtcp_st->ip_union.ip_v6.ip_remote_addr_lo_lo = path->path_const.u.ipv6.dst_ip[0];
xtcp_st->ip_union.ip_v6.ip_remote_addr_lo_hi = path->path_const.u.ipv6.dst_ip[1];
xtcp_st->ip_union.ip_v6.ip_remote_addr_hi_lo = path->path_const.u.ipv6.dst_ip[2];
xtcp_st->ip_union.ip_v6.ip_remote_addr_hi_hi = path->path_const.u.ipv6.dst_ip[3];
xtcp_st->ip_union.ip_v6.ip_local_addr_lo_lo = path->path_const.u.ipv6.src_ip[0];
xtcp_st->ip_union.ip_v6.ip_local_addr_lo_hi = path->path_const.u.ipv6.src_ip[1];
xtcp_st->ip_union.ip_v6.ip_local_addr_hi_lo = path->path_const.u.ipv6.src_ip[2];
xtcp_st->ip_union.ip_v6.ip_local_addr_hi_hi = path->path_const.u.ipv6.src_ip[3];
#if DBG
xtcp_st->ip_union.ip_v6.hop_limit = (tcp->ulp_type == TOE_CONNECTION_TYPE) ? TOE_DBG_TTL : ISCSI_DBG_TTL;
#else
xtcp_st->ip_union.ip_v6.hop_limit = tcp->tcp_cached.ttl_or_hop_limit;
#endif
DbgBreakIf(tcp->tcp_cached.flow_label > 0xffff);
xtcp_st->ip_union.ip_v6.priority_flow_label =
tcp->tcp_cached.flow_label << XSTORM_IP_V6_CONTEXT_SECTION_FLOW_LABEL_SHIFT |
tcp->tcp_cached.tos_or_traffic_class << XSTORM_IP_V6_CONTEXT_SECTION_TRAFFIC_CLASS_SHIFT;
for (i = 0; i < 4; i++) {
src_ip[i] = HTON32(path->path_const.u.ipv6.src_ip[i]);
dst_ip[i] = HTON32(path->path_const.u.ipv6.dst_ip[i]);
}
pseudo_cs = lm_tcp_calc_tcp_pseudo_checksum(pdev, src_ip, dst_ip, IP_VERSION_IPV6);
}
xtcp_st->tcp.local_port = tcp->tcp_const.src_port;
xtcp_st->tcp.pseudo_csum = NTOH16(pseudo_cs);
xtcp_st->tcp.remote_port = tcp->tcp_const.dst_port;
xtcp_st->tcp.snd_max = tcp->tcp_delegated.send_max;
if(tcp->tcp_const.tcp_flags & TCP_FLAG_ENABLE_TIME_STAMP) {
xtcp_st->tcp.ts_enabled = 1;
}
if(tcp->tcp_const.tcp_flags & TCP_FLAG_ENABLE_SACK) {
xtcp_st->tcp.tcp_params |= XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED;
}
if ((tcp->ulp_type == TOE_CONNECTION_TYPE) && (tcp->tx_con->flags & TCP_FIN_REQ_POSTED)) {
xtcp_st->tcp.tcp_params |= XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG;
}
xtcp_st->tcp.ts_time_diff = tcp->tcp_delegated.tstamp;
xtcp_st->tcp.window_scaling_factor = (u16_t)tcp->tcp_const.rcv_seg_scale & 0xf;
if( TOE_CONNECTION_TYPE == tcp->ulp_type )
{
xtcp_st->tcp.statistics_counter_id = LM_STATS_CNT_ID(pdev);
SET_FLAGS( xtcp_st->tcp.statistics_params, 1<<XSTORM_TCP_CONTEXT_SECTION_UPDATE_L2_STATSTICS_SHIFT );
SET_FLAGS( xtcp_st->tcp.statistics_params, 1<<XSTORM_TCP_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT );
}
if (tcp->ulp_type == ISCSI_CONNECTION_TYPE)
{
SET_FLAGS( xtcp_st->flags,(1 << XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT ));
SET_FLAGS( xtcp_st->flags,(PORT_ID(pdev) << XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT));
}
return LM_STATUS_SUCCESS;
}
static lm_status_t _lm_tcp_init_tcp_context(
struct _lm_device_t *pdev,
lm_tcp_state_t *tcp)
{
lm_status_t lm_status ;
lm_status = _lm_tcp_init_xstorm_tcp_context(pdev, tcp);
if (lm_status != LM_STATUS_SUCCESS) {
return lm_status;
}
lm_status = _lm_tcp_init_tstorm_tcp_context(pdev, tcp);
if (lm_status != LM_STATUS_SUCCESS) {
return lm_status;
}
return LM_STATUS_SUCCESS;
}
static lm_status_t _lm_tcp_init_iscsi_tcp_related_context(
struct _lm_device_t *pdev,
lm_tcp_state_t *tcp)
{
struct cstorm_iscsi_ag_context * ciscsi_ag = &((struct iscsi_context *)tcp->ctx_virt)->cstorm_ag_context;
struct cstorm_iscsi_st_context * ciscsi_st = &((struct iscsi_context *)tcp->ctx_virt)->cstorm_st_context;
struct xstorm_iscsi_ag_context * xiscsi_ag = &((struct iscsi_context *)tcp->ctx_virt)->xstorm_ag_context;
struct xstorm_iscsi_st_context * xiscsi_st = &((struct iscsi_context *)tcp->ctx_virt)->xstorm_st_context;
struct tstorm_iscsi_ag_context * tiscsi_ag = &((struct iscsi_context *)tcp->ctx_virt)->tstorm_ag_context;
struct tstorm_iscsi_st_context * tiscsi_st = &((struct iscsi_context *)tcp->ctx_virt)->tstorm_st_context;
UNREFERENCED_PARAMETER_(pdev);
ASSERT_STATIC(sizeof(struct cstorm_toe_ag_context) == sizeof(struct cstorm_iscsi_ag_context));
ciscsi_ag->rel_seq = tcp->tcp_delegated.send_next;
ciscsi_ag->rel_seq_th = tcp->tcp_delegated.send_next;
ciscsi_st->hq_tcp_seq = tcp->tcp_delegated.send_next;
xiscsi_ag->hq_cons_tcp_seq = tcp->tcp_delegated.send_next;
tiscsi_ag->tcp.wnd_right_edge = (xiscsi_ag->tcp.local_adv_wnd << xiscsi_st->common.tcp.window_scaling_factor) + xiscsi_ag->tcp.ack_to_far_end;
tiscsi_ag->tcp.wnd_right_edge_local = tiscsi_ag->tcp.wnd_right_edge;
tiscsi_st->iscsi.process_nxt = tcp->tcp_delegated.recv_next;
return LM_STATUS_SUCCESS;
}
static lm_status_t lm_tcp_init_tcp_context(
struct _lm_device_t *pdev,
lm_tcp_state_t *tcp)
{
s32_t cid;
lm_status_t lm_status;
lm_4tuple_t tuple = {{0}};
u32_t expect_rwin;
u8_t i;
DbgMessage(pdev, VERBOSEl4sp, "##lm_tcp_init_tcp_context\n");
if (tcp->cid == 0)
{
lm_status = lm_allocate_cid(pdev, TOE_CONNECTION_TYPE, (void*)tcp, &cid);
if(lm_status == LM_STATUS_RESOURCE){
DbgMessage(pdev, WARNl4sp, "lm_tcp_init_tcp_state: Failed in allocating cid\n");
return LM_STATUS_RESOURCE;
} else if (lm_status == LM_STATUS_PENDING) {
lm_sp_req_manager_block(pdev, (u32_t)cid);
}
tcp->cid = (u32_t)cid;
}
if (lm_cid_state(pdev, tcp->cid) == LM_CID_STATE_PENDING) {
return LM_STATUS_SUCCESS;
}
if (tcp->ulp_type == TOE_CONNECTION_TYPE) {
tcp->rx_con->u.rx.sws_info.extra_bytes = 0;
if (tcp->rx_con->u.rx.gen_info.peninsula_nbytes > tcp->tcp_cached.initial_rcv_wnd) {
tcp->rx_con->u.rx.sws_info.extra_bytes = tcp->rx_con->u.rx.gen_info.peninsula_nbytes - tcp->tcp_cached.initial_rcv_wnd;
tcp->rx_con->u.rx.sws_info.drv_rcv_win_right_edge = tcp->tcp_delegated.recv_next;
tcp->rx_con->db_data.rx->rcv_win_right_edge = tcp->tcp_delegated.recv_next;
DbgMessage(pdev, INFORMl4sp, "lm_tcp_init_tcp_state: pnb:%x, irw:%x, ext:%x, rnx:%x\n",tcp->rx_con->u.rx.gen_info.peninsula_nbytes,
tcp->tcp_cached.initial_rcv_wnd,tcp->rx_con->u.rx.sws_info.extra_bytes,tcp->tcp_delegated.recv_next);
} else {
expect_rwin = (u32_t)S32_SUB(
tcp->tcp_delegated.recv_win_seq,
tcp->tcp_delegated.recv_next);
expect_rwin += tcp->rx_con->u.rx.gen_info.peninsula_nbytes;
DbgMessage(pdev, INFORMl4sp, "lm_tcp_init_tcp_state: pnb:%x, irw:%x, rws:%x, rnx:%x\n",tcp->rx_con->u.rx.gen_info.peninsula_nbytes,
tcp->tcp_cached.initial_rcv_wnd,
tcp->tcp_delegated.recv_win_seq,
tcp->tcp_delegated.recv_next);
if (ERR_IF(expect_rwin != tcp->tcp_cached.initial_rcv_wnd)) {
u32_t delta;
if (expect_rwin > tcp->tcp_cached.initial_rcv_wnd) {
delta = expect_rwin - tcp->tcp_cached.initial_rcv_wnd;
tcp->tcp_delegated.recv_win_seq -= delta;
} else {
delta = tcp->tcp_cached.initial_rcv_wnd - expect_rwin;
tcp->tcp_delegated.recv_win_seq += delta;
}
tcp->rx_con->db_data.rx->rcv_win_right_edge = tcp->tcp_delegated.recv_win_seq;
tcp->rx_con->u.rx.sws_info.drv_rcv_win_right_edge = tcp->tcp_delegated.recv_win_seq;
}
}
}
if(tcp->path->path_const.ip_version == IP_VERSION_IPV4) {
tuple.ip_type = LM_IP_TYPE_V4;
tuple.dst_ip[0] = tcp->path->path_const.u.ipv4.dst_ip;
tuple.src_ip[0] = tcp->path->path_const.u.ipv4.src_ip;
} else {
tuple.ip_type = LM_IP_TYPE_V6;
for (i = 0; i < 4; i++) {
tuple.dst_ip[i] = tcp->path->path_const.u.ipv6.dst_ip[i];
tuple.src_ip[i] = tcp->path->path_const.u.ipv6.src_ip[i];
}
}
tuple.src_port = tcp->tcp_const.src_port;
tuple.dst_port = tcp->tcp_const.dst_port;
if (lm_searcher_mirror_hash_insert(pdev, tcp->cid, &tuple) != LM_STATUS_SUCCESS) {
DbgMessage(pdev, WARNl4sp, "lm_tcp_init_tcp_context: Failed inserting tuple to SRC hash\n");
tcp->in_searcher = 0;
return LM_STATUS_RESOURCE;
}
tcp->in_searcher = 1;
tcp->ctx_virt = (struct toe_context *)lm_get_context(pdev, tcp->cid);
if (!tcp->ctx_virt) {
DbgBreakIf(!tcp->ctx_virt);
return LM_STATUS_FAILURE;
}
tcp->ctx_phys.as_u64 = lm_get_context_phys(pdev, tcp->cid);
if (!tcp->ctx_phys.as_u64) {
DbgBreakIf(!tcp->ctx_phys.as_u64);
return LM_STATUS_FAILURE;
}
DbgMessage(pdev, VERBOSEl4sp,
"tcp->ctx_virt=%p, tcp->ctx_phys_high=%x, tcp->ctx_phys_low=%x\n",
tcp->ctx_virt, tcp->ctx_phys.as_u32.high, tcp->ctx_phys.as_u32.low);
if (tcp->ulp_type == TOE_CONNECTION_TYPE) {
lm_status = _lm_tcp_init_toe_context(pdev, tcp);
if (lm_status != LM_STATUS_SUCCESS) {
return lm_status;
}
}
lm_status = _lm_tcp_init_tcp_context(pdev, tcp);
if (lm_status != LM_STATUS_SUCCESS) {
return lm_status;
}
if (tcp->ulp_type == ISCSI_CONNECTION_TYPE) {
lm_status = _lm_tcp_init_iscsi_tcp_related_context(pdev, tcp);
if (lm_status != LM_STATUS_SUCCESS) {
return lm_status;
}
}
return LM_STATUS_SUCCESS;
}
void lm_tcp_recycle_cid_cb(
struct _lm_device_t *pdev,
void *cookie,
s32_t cid)
{
lm_tcp_state_t *tcp = (lm_tcp_state_t *)cookie;
lm_sp_req_common_t *sp_req = NULL;
MM_ACQUIRE_TOE_LOCK(pdev);
lm_set_cid_state(pdev, tcp->cid, LM_CID_STATE_VALID);
if (tcp->hdr.status == STATE_STATUS_INIT_CONTEXT)
{
lm_tcp_init_tcp_context(pdev,tcp);
}
lm_sp_req_manager_unblock(pdev,cid, &sp_req);
MM_RELEASE_TOE_LOCK(pdev);
}
void lm_tcp_comp_cb(struct _lm_device_t *pdev, struct sq_pending_command *pending)
{
lm_tcp_state_t * tcp = NULL;
lm_tcp_con_t * rx_con = NULL;
lm_tcp_con_t * tx_con = NULL;
struct toe_rx_cqe rx_cqe = {0};
struct toe_tx_cqe tx_cqe = {0};
u8_t i = 0;
u8_t cmp_rx = FALSE;
u8_t cmp_tx = FALSE;
MM_INIT_TCP_LOCK_HANDLE();
tcp = lm_cid_cookie(pdev, TOE_CONNECTION_TYPE, pending->cid);
if (tcp)
{
rx_con = tcp->rx_con;
tx_con = tcp->tx_con;
}
#define LM_TCP_SET_CQE(_param, _cid, _cmd) \
(_param) = (((_cid) << TOE_RX_CQE_CID_SHIFT) & TOE_RX_CQE_CID) | \
(((_cmd) << TOE_RX_CQE_COMPLETION_OPCODE_SHIFT) & TOE_RX_CQE_COMPLETION_OPCODE);
switch (pending->cmd)
{
case RAMROD_OPCODE_TOE_INIT:
DbgBreakMsg("Not Supported\n");
break;
case RAMROD_OPCODE_TOE_INITIATE_OFFLOAD:
LM_TCP_SET_CQE(rx_cqe.params1, pending->cid, RAMROD_OPCODE_TOE_INITIATE_OFFLOAD);
cmp_rx = TRUE;
break;
case RAMROD_OPCODE_TOE_SEARCHER_DELETE:
LM_TCP_SET_CQE(rx_cqe.params1, pending->cid, RAMROD_OPCODE_TOE_SEARCHER_DELETE);
cmp_rx = TRUE;
break;
case RAMROD_OPCODE_TOE_TERMINATE:
LM_TCP_SET_CQE(rx_cqe.params1, pending->cid, RAMROD_OPCODE_TOE_TERMINATE);
cmp_rx = (GET_FLAGS(tcp->sp_flags, SP_REQUEST_COMPLETED_RX) == 0);
LM_TCP_SET_CQE(tx_cqe.params, pending->cid, RAMROD_OPCODE_TOE_TERMINATE);
cmp_tx = (GET_FLAGS(tcp->sp_flags, SP_REQUEST_COMPLETED_TX) == 0);;
break;
case RAMROD_OPCODE_TOE_QUERY:
LM_TCP_SET_CQE(rx_cqe.params1, pending->cid, RAMROD_OPCODE_TOE_QUERY);
cmp_rx = TRUE;
break;
case RAMROD_OPCODE_TOE_RESET_SEND:
LM_TCP_SET_CQE(rx_cqe.params1, pending->cid, RAMROD_OPCODE_TOE_RESET_SEND);
cmp_rx = (GET_FLAGS(tcp->sp_flags, SP_REQUEST_COMPLETED_RX) == 0);
LM_TCP_SET_CQE(tx_cqe.params, pending->cid, RAMROD_OPCODE_TOE_RESET_SEND);
cmp_tx = (GET_FLAGS(tcp->sp_flags, SP_REQUEST_COMPLETED_TX) == 0);
break;
case RAMROD_OPCODE_TOE_EMPTY_RAMROD:
LM_TCP_SET_CQE(rx_cqe.params1, pending->cid, RAMROD_OPCODE_TOE_EMPTY_RAMROD);
cmp_rx = (GET_FLAGS(tcp->sp_flags, SP_REQUEST_COMPLETED_RX) == 0);
LM_TCP_SET_CQE(tx_cqe.params, pending->cid, RAMROD_OPCODE_TOE_EMPTY_RAMROD);
cmp_tx = (GET_FLAGS(tcp->sp_flags, SP_REQUEST_COMPLETED_TX) == 0);
break;
case RAMROD_OPCODE_TOE_INVALIDATE:
LM_TCP_SET_CQE(rx_cqe.params1, pending->cid, RAMROD_OPCODE_TOE_INVALIDATE);
cmp_rx = (GET_FLAGS(tcp->sp_flags, SP_REQUEST_COMPLETED_RX) == 0);
LM_TCP_SET_CQE(tx_cqe.params, pending->cid, RAMROD_OPCODE_TOE_INVALIDATE);
cmp_tx = (GET_FLAGS(tcp->sp_flags, SP_REQUEST_COMPLETED_TX) == 0);
break;
case RAMROD_OPCODE_TOE_UPDATE:
LM_TCP_SET_CQE(rx_cqe.params1, pending->cid, RAMROD_OPCODE_TOE_UPDATE);
cmp_rx = TRUE;
break;
case RAMROD_OPCODE_TOE_RSS_UPDATE:
for (i = 0; i < pdev->params.l4_rss_chain_cnt-1; i++)
{
mm_atomic_dec(&pdev->params.update_toe_comp_cnt);
mm_atomic_dec(&pdev->params.update_comp_cnt);
mm_atomic_dec(&pdev->params.update_suspend_cnt);
}
lm_tcp_rss_update_ramrod_comp(pdev,
&pdev->toe_info.rcqs[LM_TOE_BASE_RSS_ID(pdev)],
pending->cid,
TOE_RSS_UPD_QUIET ,
TRUE);
return;
}
if (cmp_rx)
{
lm_tcp_rx_process_cqe(pdev, &rx_cqe, tcp, 0 );
rx_con->dpc_info.snapshot_flags = rx_con->dpc_info.dpc_flags;
rx_con->dpc_info.dpc_flags = 0;
lm_tcp_rx_complete_tcp_sp(pdev, tcp, rx_con);
}
if (cmp_tx)
{
lm_tcp_tx_process_cqe(pdev, &tx_cqe, tcp);
tx_con->dpc_info.snapshot_flags = tx_con->dpc_info.dpc_flags;
tx_con->dpc_info.dpc_flags = 0;
lm_tcp_tx_complete_tcp_sp(pdev, tcp, tx_con);
}
}
static lm_status_t lm_tcp_init_tcp_state_machine(
struct _lm_device_t *pdev,
lm_tcp_state_t *tcp)
{
lm_tcp_con_t *con = tcp->rx_con;
lm_tcp_state_calculation_t *state_calc = &tcp->tcp_state_calc;
u64_t curr_time = 0;
lm_status_t lm_status = LM_STATUS_SUCCESS;
DbgMessage(pdev, VERBOSEl4sp, "##lm_tcp_init_tcp_state_machine\n");
state_calc->fin_request_time = state_calc->fin_completed_time =
state_calc->fin_reception_time = 0;
curr_time = mm_get_current_time(pdev);
switch (tcp->tcp_delegated.con_state) {
case L4_TCP_CON_STATE_ESTABLISHED:
break;
case L4_TCP_CON_STATE_FIN_WAIT1:
DbgMessage(pdev, WARNl4sp, "#tcp state offloaded in state FIN_WAIT1 (tcp=%p)\n", tcp);
state_calc->fin_request_time = curr_time;
break;
case L4_TCP_CON_STATE_FIN_WAIT2:
DbgMessage(pdev, WARNl4sp, "#tcp state offloaded in state FIN_WAIT2 (tcp=%p)\n", tcp);
state_calc->fin_request_time = curr_time - 1;
state_calc->fin_completed_time = curr_time;
break;
case L4_TCP_CON_STATE_CLOSE_WAIT:
DbgMessage(pdev, WARNl4sp, "#tcp state offloaded in state CLOSE_WAIT (tcp=%p)\n", tcp);
state_calc->fin_reception_time = curr_time;
break;
case L4_TCP_CON_STATE_CLOSING:
DbgMessage(pdev, WARNl4sp, "#tcp state offloaded in state CLOSING (tcp=%p)\n", tcp);
state_calc->fin_request_time = curr_time - 1;
state_calc->fin_reception_time = curr_time;
break;
case L4_TCP_CON_STATE_LAST_ACK:
DbgMessage(pdev, WARNl4sp, "#tcp state offloaded in state LAST_ACK (tcp=%p)\n", tcp);
state_calc->fin_reception_time = curr_time - 1;
state_calc->fin_request_time = curr_time;
break;
default:
DbgMessage(pdev, FATAL,
"Initiate offload in con state=%d is not allowed by WDK!\n",
tcp->tcp_delegated.con_state);
DbgBreak();
return LM_STATUS_FAILURE;
}
if (state_calc->fin_reception_time) {
DbgBreakIf(con->flags & TCP_REMOTE_FIN_RECEIVED);
con->flags |= TCP_REMOTE_FIN_RECEIVED;
if (con->flags & TCP_INDICATE_REJECTED) {
con->u.rx.flags |= TCP_CON_FIN_IND_PENDING;
} else {
con->flags |= TCP_REMOTE_FIN_RECEIVED_ALL_RX_INDICATED;
con->flags |= TCP_BUFFERS_ABORTED;
}
}
con = tcp->tx_con;
if (state_calc->fin_completed_time) {
volatile struct toe_tx_db_data *db_data = con->db_data.tx;
DbgBreakIf(!state_calc->fin_request_time);
DbgBreakIf(!s_list_is_empty(&con->active_tb_list));
con->flags |= (TCP_FIN_REQ_POSTED | TCP_FIN_REQ_COMPLETED);
db_data->flags |= (TOE_TX_DB_DATA_FIN << TOE_TX_DB_DATA_FIN_SHIFT);
db_data->bytes_prod_seq--;
} else if (state_calc->fin_request_time) {
DbgBreakIf(con->flags & TCP_DB_BLOCKED);
con->flags |= TCP_DB_BLOCKED;
DbgBreakIf(!(con->flags & TCP_POST_BLOCKED));
con->flags &= ~TCP_POST_BLOCKED;
con->u.tx.flags |= TCP_CON_FIN_REQ_LM_INTERNAL;
lm_status = lm_tcp_graceful_disconnect(pdev, tcp);
DbgBreakIf(lm_status != LM_STATUS_SUCCESS);
con->flags &= ~TCP_DB_BLOCKED;
con->flags |= TCP_POST_BLOCKED;
}
return LM_STATUS_SUCCESS;
}
lm_status_t lm_tcp_init_tcp_common(
struct _lm_device_t *pdev,
lm_tcp_state_t *tcp)
{
lm_status_t lm_status = LM_STATUS_SUCCESS;
DbgMessage(pdev, VERBOSEl4sp, "###lm_tcp_init_tcp_common\n");
DbgBreakIf(!(pdev && tcp));
lm_status = lm_tcp_init_tcp_state_machine(pdev, tcp);
if (lm_status != LM_STATUS_SUCCESS) {
return lm_status;
}
lm_status = lm_tcp_init_tcp_context(pdev, tcp);
if (lm_status != LM_STATUS_SUCCESS) {
return lm_status;
}
if (tcp->ulp_type == TOE_CONNECTION_TYPE) {
tcp->rx_con->u.rx.gen_info.dont_send_to_system_more_then_rwin = FALSE;
}
return LM_STATUS_SUCCESS;
}
static void _lm_tcp_comp_upload_neigh_request(
struct _lm_device_t * pdev,
lm_neigh_state_t * neigh_state)
{
DbgBreakIf(neigh_state->hdr.status != STATE_STATUS_UPLOAD_PENDING);
DbgBreakIf(neigh_state->hdr.state_id != STATE_ID_NEIGH);
DbgBreakIf(neigh_state->num_dependents);
neigh_state->hdr.status = STATE_STATUS_UPLOAD_DONE;
mm_tcp_complete_neigh_upload_request(pdev, neigh_state);
}
static void _lm_tcp_comp_upload_path_request(
struct _lm_device_t * pdev,
lm_path_state_t * path_state)
{
lm_neigh_state_t * neigh = NULL;
DbgBreakIf(path_state->hdr.status != STATE_STATUS_UPLOAD_PENDING);
DbgBreakIf(path_state->hdr.state_id != STATE_ID_PATH);
path_state->hdr.status = STATE_STATUS_UPLOAD_DONE;
DbgBreakIf(path_state->neigh->num_dependents == 0);
path_state->neigh->num_dependents--;
if ((path_state->neigh->num_dependents == 0) &&
(path_state->neigh->hdr.status == STATE_STATUS_UPLOAD_PENDING)) {
neigh = path_state->neigh;
}
path_state->neigh = NULL;
DbgBreakIf(path_state->num_dependents);
mm_tcp_complete_path_upload_request(pdev, path_state);
if (neigh) {
_lm_tcp_comp_upload_neigh_request(pdev, neigh);
}
}
static lm_status_t lm_tcp_post_initiate_offload_request(
struct _lm_device_t *pdev,
lm_tcp_state_t *tcp,
u8_t *command,
u64_t *data)
{
lm_tcp_con_t *con = tcp->tx_con;
int i = 0;
MM_INIT_TCP_LOCK_HANDLE();
DbgMessage(pdev, VERBOSEl4sp, "##lm_tcp_post_initiate_offload_request\n");
DbgBreakIf(tcp->hdr.status != STATE_STATUS_INIT_CONTEXT);
tcp->hdr.status = STATE_STATUS_OFFLOAD_PENDING;
if (tcp->ulp_type == TOE_CONNECTION_TYPE) {
con = tcp->tx_con;
for (i = 0; i < 2; i++) {
mm_acquire_tcp_lock(pdev, con);
DbgBreakIf(!(con->flags & TCP_POST_BLOCKED));
DbgBreakIf(!(con->flags & TCP_COMP_BLOCKED));
con->flags &= ~TCP_COMP_BLOCKED;
con->flags |= TCP_COMP_DEFERRED;
mm_release_tcp_lock(pdev, con);
con = tcp->rx_con;
}
}
tcp->sp_flags |= SP_TCP_OFLD_REQ_POSTED;
*command = (tcp->ulp_type == TOE_CONNECTION_TYPE)? RAMROD_OPCODE_TOE_INITIATE_OFFLOAD : L5CM_RAMROD_CMD_ID_ADD_NEW_CONNECTION;
*data = tcp->ctx_phys.as_u64;
return LM_STATUS_PENDING;
}
static lm_status_t lm_tcp_post_terminate_tcp_request (
IN struct _lm_device_t * pdev,
IN lm_tcp_state_t * tcp,
OUT u8_t * command,
OUT u64_t * data
)
{
DbgMessage(pdev, VERBOSEl4sp, "## lm_tcp_post_terminate_tcp_request\n");
DbgBreakIf(tcp->hdr.status != STATE_STATUS_UPLOAD_PENDING);
lm_tcp_flush_db(pdev,tcp);
SET_FLAGS(tcp->sp_flags, SP_TCP_TRM_REQ_POSTED );
*command = (tcp->ulp_type == TOE_CONNECTION_TYPE)? RAMROD_OPCODE_TOE_TERMINATE : L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
*data = 0;
return LM_STATUS_PENDING;
}
static lm_status_t lm_tcp_post_abortive_disconnect_request (
IN struct _lm_device_t * pdev,
IN lm_tcp_state_t * tcp,
OUT u8_t * command,
OUT u64_t * data
)
{
lm_tcp_con_t *rx_con = tcp->rx_con;
lm_tcp_con_t *tx_con = tcp->tx_con;
MM_INIT_TCP_LOCK_HANDLE();
DbgMessage(pdev, VERBOSEl4sp, "## lm_tcp_post_abortive_disconnect_request\n");
DbgBreakIf( (tcp->hdr.status != STATE_STATUS_NORMAL ) &&
(tcp->hdr.status != STATE_STATUS_ABORTED) );
mm_acquire_tcp_lock(pdev, tx_con);
tx_con->flags |= TCP_RST_REQ_POSTED;
mm_release_tcp_lock(pdev, tx_con);
mm_acquire_tcp_lock(pdev, rx_con);
rx_con->flags |= TCP_RST_REQ_POSTED;
mm_release_tcp_lock(pdev, rx_con);
*command = RAMROD_OPCODE_TOE_RESET_SEND;
*data = 0;
return LM_STATUS_PENDING;
}
static lm_status_t lm_tcp_post_upload_tcp_request (
IN struct _lm_device_t * pdev,
IN lm_tcp_state_t * tcp,
OUT u8_t * command,
OUT u64_t * data
)
{
lm_tcp_con_t *rx_con, *tx_con = NULL;
struct toe_spe spe = {{0}};
MM_INIT_TCP_LOCK_HANDLE();
DbgMessage(pdev, VERBOSEl4sp, "## lm_tcp_post_upload_tcp_request\n");
DbgBreakIf(tcp->hdr.status < STATE_STATUS_NORMAL);
DbgBreakIf(tcp->hdr.status >= STATE_STATUS_UPLOAD_PENDING);
DbgBreakIf(tcp->hdr.state_id != STATE_ID_TCP);
tcp->hdr.status = STATE_STATUS_UPLOAD_PENDING;
if (tcp->ulp_type == TOE_CONNECTION_TYPE) {
rx_con = tcp->rx_con;
tx_con = tcp->tx_con;
mm_acquire_tcp_lock(pdev, tx_con);
DbgBreakIf(tx_con->flags & TCP_TRM_REQ_POSTED);
tx_con->flags |= TCP_TRM_REQ_POSTED;
mm_release_tcp_lock(pdev, tx_con);
mm_acquire_tcp_lock(pdev, rx_con);
DbgBreakIf(rx_con->flags & TCP_TRM_REQ_POSTED);
rx_con->flags |= TCP_TRM_REQ_POSTED;
mm_release_tcp_lock(pdev, rx_con);
}
tcp->sp_flags |= SP_TCP_SRC_REQ_POSTED;
*command = (tcp->ulp_type == TOE_CONNECTION_TYPE)? RAMROD_OPCODE_TOE_SEARCHER_DELETE : L5CM_RAMROD_CMD_ID_SEARCHER_DELETE;
spe.toe_data.rx_completion.hash_value = (u16_t)(tcp->tcp_const.hash_value);
*data = *((u64_t*)(&(spe.toe_data.rx_completion)));
return LM_STATUS_PENDING;
}
static lm_status_t lm_tcp_post_query_request (
IN struct _lm_device_t * pdev,
IN lm_tcp_state_t * tcp,
OUT u8_t * command,
OUT u64_t * data,
IN lm_tcp_slow_path_request_t * request
)
{
struct toe_spe spe = {{0}};
UNREFERENCED_PARAMETER_(request);
DbgMessage(pdev, VERBOSEl4sp, "## lm_tcp_post_query_request\n");
tcp->sp_flags |= SP_TCP_QRY_REQ_POSTED;
*command = (tcp->ulp_type == TOE_CONNECTION_TYPE)? RAMROD_OPCODE_TOE_QUERY : L5CM_RAMROD_CMD_ID_QUERY;
mm_memset(tcp->sp_req_data.virt_addr, 0, TOE_SP_PHYS_DATA_SIZE);
spe.toe_data.phys_addr.hi = tcp->sp_req_data.phys_addr.as_u32.high;
spe.toe_data.phys_addr.lo = tcp->sp_req_data.phys_addr.as_u32.low;
*data = *((u64_t*)(&(spe.toe_data.phys_addr)));
return LM_STATUS_PENDING;
}
lm_status_t lm_tcp_post_upload_path_request (
struct _lm_device_t * pdev,
lm_path_state_t * path_state,
l4_path_delegated_state_t * ret_delegated)
{
DbgBreakIf(path_state->hdr.status != STATE_STATUS_NORMAL);
DbgBreakIf(path_state->hdr.state_id != STATE_ID_PATH);
*ret_delegated = path_state->path_delegated;
DbgMessage(pdev, INFORMl4sp, "lm_tcp_post_upload_path_request: num_dependents=%d\n", path_state->num_dependents);
if (path_state->num_dependents == 0) {
path_state->hdr.status = STATE_STATUS_UPLOAD_DONE;
return LM_STATUS_SUCCESS;
}
path_state->hdr.status = STATE_STATUS_UPLOAD_PENDING;
return LM_STATUS_PENDING;
}
lm_status_t lm_tcp_post_upload_neigh_request(
struct _lm_device_t * pdev,
lm_neigh_state_t * neigh_state
)
{
DbgBreakIf(neigh_state->hdr.status != STATE_STATUS_NORMAL);
DbgBreakIf(neigh_state->hdr.state_id != STATE_ID_NEIGH);
DbgMessage(pdev, INFORMl4sp, "lm_tcp_post_upload_neigh_request: num_dependents=%d\n", neigh_state->num_dependents);
#if DBG
{
lm_path_state_t * path = (lm_path_state_t *) d_list_peek_head(&pdev->toe_info.state_blk.path_list);
while(path) {
if(path->neigh == neigh_state) {
DbgBreakIf(path->hdr.status == STATE_STATUS_NORMAL);
}
path = (lm_path_state_t *) d_list_next_entry(&path->hdr.link);
}
}
#endif
if (neigh_state->num_dependents == 0) {
neigh_state->hdr.status = STATE_STATUS_UPLOAD_DONE;
return LM_STATUS_SUCCESS;
}
neigh_state->hdr.status = STATE_STATUS_UPLOAD_PENDING;
return LM_STATUS_PENDING;
}
static lm_status_t lm_tcp_set_tcp_cached(
struct _lm_device_t * pdev,
lm_tcp_state_t * tcp,
l4_tcp_cached_state_t * tcp_cached,
void * mem_virt
)
{
struct toe_update_ramrod_cached_params * ctx = mem_virt;
l4_ofld_params_t * l4_params = &(pdev->ofld_info.l4_params);
MM_INIT_TCP_LOCK_HANDLE();
DbgMessage(pdev, INFORMl4sp, "## lm_tcp_set_tcp_cached cid=%d\n", tcp->cid);
if ((tcp->tcp_cached.tcp_flags & TCP_FLAG_ENABLE_KEEP_ALIVE) !=
(tcp_cached->tcp_flags & TCP_FLAG_ENABLE_KEEP_ALIVE)) {
if (tcp_cached->tcp_flags & TCP_FLAG_ENABLE_KEEP_ALIVE) {
ctx->enable_keepalive = 1;
} else {
ctx->enable_keepalive = 0;
}
ctx->changed_fields |= TOE_UPDATE_RAMROD_CACHED_PARAMS_ENABLE_KEEPALIVE_CHANGED;
DbgMessage(pdev, INFORMl4sp, "## tcp_cached: [cid=%d] update : flag TCP_FLAG_ENABLE_KEEP_ALIVE changed to %d\n",
tcp->cid, ctx->enable_keepalive);
}
if ((tcp->tcp_cached.tcp_flags & TCP_FLAG_ENABLE_NAGLING) !=
(tcp_cached->tcp_flags & TCP_FLAG_ENABLE_NAGLING)) {
if (tcp_cached->tcp_flags & TCP_FLAG_ENABLE_NAGLING) {
ctx->enable_nagle = 1;
} else {
ctx->enable_nagle = 0;
}
ctx->changed_fields |= TOE_UPDATE_RAMROD_CACHED_PARAMS_ENABLE_NAGLE_CHANGED;
DbgMessage(pdev, INFORMl4sp, "## tcp_cached [cid=%d] update : flag TCP_FLAG_ENABLE_NAGLING changed to %d\n",
tcp->cid, ctx->enable_nagle);
}
if (tcp_cached->tcp_flags & TCP_FLAG_RESTART_KEEP_ALIVE) {
ctx->ka_restart = 1;
DbgMessage(pdev, INFORMl4sp, "## tcp_cached [cid=%d] update : flag TCP_FLAG_RESTART_KEEP_ALIVE set\n",
tcp->cid);
} else {
ctx->ka_restart = 0;
}
if (tcp_cached->tcp_flags & TCP_FLAG_RESTART_MAX_RT) {
ctx->retransmit_restart = 1;
DbgMessage(pdev, INFORMl4sp, "## tcp_cached [cid=%d] update : flag TOE_CACHED_RESTART_MAX_RT set\n",
tcp->cid);
} else {
ctx->retransmit_restart = 0;
}
if (tcp_cached->tcp_flags & TCP_FLAG_UPDATE_RCV_WINDOW) {
DbgMessage(pdev, INFORMl4sp, "## tcp_cached [cid=%d] update : flag TCP_FLAG_UPDATE_RCV_WINDOW set\n",
tcp->cid);
}
tcp->tcp_cached.tcp_flags = tcp_cached->tcp_flags;
if (tcp->path->path_const.ip_version == IP_VERSION_IPV6) {
if (tcp->tcp_cached.flow_label != tcp_cached->flow_label) {
DbgMessage(pdev, INFORMl4sp, "## tcp_cached [cid=%d] update : flow_label changed from %d to %d\n",
tcp->cid, tcp->tcp_cached.flow_label, tcp_cached->flow_label);
tcp->tcp_cached.flow_label = tcp_cached->flow_label;
ctx->flow_label= tcp->tcp_cached.flow_label;
ctx->changed_fields |= TOE_UPDATE_RAMROD_CACHED_PARAMS_FLOW_LABEL_CHANGED;
}
}
if (tcp->tcp_cached.initial_rcv_wnd != tcp_cached->initial_rcv_wnd) {
DbgMessage(pdev, INFORMl4sp, "## tcp_cached [cid=%d] update : initial_rcv_wnd changed from %d to %d\n",
tcp->cid, tcp->tcp_cached.initial_rcv_wnd, tcp_cached->initial_rcv_wnd);
mm_tcp_update_required_gen_bufs(pdev,
tcp->rx_con->u.rx.sws_info.mss,
tcp->rx_con->u.rx.sws_info.mss,
tcp_cached->initial_rcv_wnd,
tcp->tcp_cached.initial_rcv_wnd);
if ERR_IF(tcp_cached->initial_rcv_wnd > MAX_INITIAL_RCV_WND) {
DbgBreakIfAll(tcp_cached->initial_rcv_wnd > MAX_INITIAL_RCV_WND);
}
mm_acquire_tcp_lock(pdev,tcp->rx_con);
if (tcp->tcp_cached.initial_rcv_wnd < tcp_cached->initial_rcv_wnd) {
lm_tcp_rx_post_sws(pdev, tcp, tcp->rx_con, tcp_cached->initial_rcv_wnd - tcp->tcp_cached.initial_rcv_wnd, TCP_RX_POST_SWS_INC);
} else {
lm_tcp_rx_post_sws(pdev, tcp, tcp->rx_con, tcp->tcp_cached.initial_rcv_wnd - tcp_cached->initial_rcv_wnd, TCP_RX_POST_SWS_DEC);
pdev->toe_info.toe_events |= LM_TOE_EVENT_WINDOW_DECREASE;
}
mm_release_tcp_lock(pdev, tcp->rx_con);
tcp->tcp_cached.initial_rcv_wnd = tcp_cached->initial_rcv_wnd;
ctx->initial_rcv_wnd = tcp->tcp_cached.initial_rcv_wnd;
ctx->changed_fields |= TOE_UPDATE_RAMROD_CACHED_PARAMS_INITIAL_RCV_WND_CHANGED;
}
if (tcp->tcp_cached.ttl_or_hop_limit != tcp_cached->ttl_or_hop_limit) {
DbgMessage(pdev, INFORMl4sp, "## tcp_cached [cid=%d] update : ttl_or_hop_limit changed from %d to %d\n",
tcp->cid, tcp->tcp_cached.ttl_or_hop_limit, tcp_cached->ttl_or_hop_limit);
tcp->tcp_cached.ttl_or_hop_limit = tcp_cached->ttl_or_hop_limit;
if (tcp->path->path_const.ip_version == IP_VERSION_IPV4) {
ctx->ttl= tcp->tcp_cached.ttl_or_hop_limit;
ctx->changed_fields |= TOE_UPDATE_RAMROD_CACHED_PARAMS_TTL_CHANGED;
} else {
ctx->hop_limit = tcp->tcp_cached.ttl_or_hop_limit;
ctx->changed_fields |= TOE_UPDATE_RAMROD_CACHED_PARAMS_HOP_LIMIT_CHANGED;
}
}
if (tcp->tcp_cached.tos_or_traffic_class != tcp_cached->tos_or_traffic_class) {
DbgMessage(pdev, INFORMl4sp, "## tcp_cached [cid=%d] update : tos_or_traffic_class changed from %d to %d\n",
tcp->cid, tcp->tcp_cached.tos_or_traffic_class, tcp_cached->tos_or_traffic_class);
tcp->tcp_cached.tos_or_traffic_class = tcp_cached->tos_or_traffic_class;
if (tcp->path->path_const.ip_version == IP_VERSION_IPV4) {
ctx->tos = tcp_cached->tos_or_traffic_class;
ctx->changed_fields |= TOE_UPDATE_RAMROD_CACHED_PARAMS_TOS_CHANGED;
} else {
ctx->traffic_class = tcp_cached->tos_or_traffic_class;
ctx->changed_fields |= TOE_UPDATE_RAMROD_CACHED_PARAMS_TRAFFIC_CLASS_CHANGED;
}
}
if (tcp->tcp_cached.ka_probe_cnt != tcp_cached->ka_probe_cnt) {
DbgMessage(pdev, INFORMl4sp, "## tcp_cached [cid=%d] update : ka_probe_cnt changed from %d to %d\n",
tcp->cid, tcp->tcp_cached.ka_probe_cnt, tcp_cached->ka_probe_cnt);
tcp->tcp_cached.ka_probe_cnt = tcp_cached->ka_probe_cnt;
ctx->ka_max_probe_count = tcp_cached->ka_probe_cnt;
ctx->changed_fields |= TOE_UPDATE_RAMROD_CACHED_PARAMS_KA_MAX_PROBE_COUNT_CHANGED;
}
if (tcp->tcp_cached.user_priority != tcp_cached->user_priority) {
DbgMessage(pdev, INFORMl4sp, "## tcp_cached [cid=%d] update : user_priority changed from %d to %d\n",
tcp->cid, tcp->tcp_cached.user_priority, tcp_cached->user_priority);
DbgBreakIf(tcp_cached->user_priority > 0x7);
tcp->tcp_cached.user_priority = tcp_cached->user_priority;
ctx->user_priority = tcp_cached->user_priority;
ctx->changed_fields |= TOE_UPDATE_RAMROD_CACHED_PARAMS_USER_PRIORITY_CHANGED;
}
DbgBreakIf(tcp_cached->rcv_indication_size != 0);
if (tcp->tcp_cached.rcv_indication_size != tcp_cached->rcv_indication_size) {
DbgMessage(pdev, INFORMl4sp, "## tcp_cached [cid=%d] update : rcv_indication_size changed from %d to %d\n",
tcp->cid, tcp->tcp_cached.rcv_indication_size, tcp_cached->rcv_indication_size);
DbgBreakIf(tcp->tcp_cached.rcv_indication_size > 0xffff);
tcp->tcp_cached.rcv_indication_size = tcp_cached->rcv_indication_size;
ctx->rcv_indication_size = (u16_t)tcp_cached->rcv_indication_size;
ctx->changed_fields |= TOE_UPDATE_RAMROD_CACHED_PARAMS_RCV_INDICATION_SIZE_CHANGED;
}
if (tcp->tcp_cached.ka_time_out != tcp_cached->ka_time_out) {
DbgMessage(pdev, INFORMl4sp, "## tcp_cached [cid=%d] update : ka_time_out changed from %d to %d\n",
tcp->cid, tcp->tcp_cached.ka_time_out, tcp_cached->ka_time_out);
tcp->tcp_cached.ka_time_out = tcp_cached->ka_time_out;
ctx->ka_timeout =
lm_time_resolution(pdev, tcp->tcp_cached.ka_time_out, l4_params->ticks_per_second, TIMERS_TICKS_PER_SEC);
ctx->changed_fields |= TOE_UPDATE_RAMROD_CACHED_PARAMS_KA_TIMEOUT_CHANGED;
}
if (tcp->tcp_cached.ka_interval != tcp_cached->ka_interval) {
DbgMessage(pdev, INFORMl4sp, "## tcp_cached [cid=%d] update : ka_interval changed from %d to %d\n",
tcp->cid, tcp->tcp_cached.ka_interval, tcp_cached->ka_interval);
tcp->tcp_cached.ka_interval = tcp_cached->ka_interval;
ctx->ka_interval =
lm_time_resolution(pdev, tcp->tcp_cached.ka_interval, l4_params->ticks_per_second, TIMERS_TICKS_PER_SEC);
ctx->changed_fields |= TOE_UPDATE_RAMROD_CACHED_PARAMS_KA_INTERVAL_CHANGED;
}
if (tcp->tcp_cached.max_rt != tcp_cached->max_rt) {
DbgMessage(pdev, INFORMl4sp, "## tcp_cached [cid=%d] update : max_rt changed from %d to %d\n",
tcp->cid, tcp->tcp_cached.max_rt, tcp_cached->max_rt);
tcp->tcp_cached.max_rt = tcp_cached->max_rt;
ctx->max_rt =
lm_time_resolution(pdev, tcp->tcp_cached.max_rt, l4_params->ticks_per_second, TSEMI_CLK1_TICKS_PER_SEC);
ctx->changed_fields |= TOE_UPDATE_RAMROD_CACHED_PARAMS_MAX_RT_CHANGED;
}
if (!ctx->changed_fields && !ctx->ka_restart && !ctx->retransmit_restart) {
DbgMessage(pdev, INFORMl4sp, "## tcp_cached [cid=%d] update : nothing changed, completing synchronously\n", tcp->cid);
return LM_STATUS_SUCCESS;
}
return LM_STATUS_PENDING;
}
static lm_status_t lm_tcp_set_path_cached(
struct _lm_device_t * pdev,
lm_tcp_state_t * tcp,
l4_path_cached_state_t * path_cached,
void * mem_virt
)
{
struct toe_update_ramrod_cached_params * ctx = mem_virt;
u32_t new_mss = 0;
new_mss = _lm_tcp_calc_mss(path_cached->path_mtu,
tcp->tcp_const.remote_mss,
(tcp->path->path_const.ip_version == IP_VERSION_IPV6),
tcp->tcp_const.tcp_flags & TCP_FLAG_ENABLE_TIME_STAMP,
pdev->ofld_info.l4_params.flags & OFLD_PARAM_FLAG_SNAP_ENCAP,
tcp->path->neigh->neigh_const.vlan_tag != 0);
if (new_mss != tcp->rx_con->u.rx.sws_info.mss) {
DbgMessage(pdev, INFORMl4sp, "## path_cached: tcp [cid=%d] update : mss (as a result of pathMtu) from %d to %d\n",
tcp->cid, tcp->rx_con->u.rx.sws_info.mss, new_mss);
mm_tcp_update_required_gen_bufs(pdev,
new_mss,
tcp->rx_con->u.rx.sws_info.mss,
tcp->tcp_cached.initial_rcv_wnd,
tcp->tcp_cached.initial_rcv_wnd);
tcp->rx_con->u.rx.sws_info.mss = new_mss;
DbgBreakIf(new_mss > 0xffff);
ctx->mss = (u16_t)new_mss;
ctx->changed_fields |= TOE_UPDATE_RAMROD_CACHED_PARAMS_MSS_CHANGED;
}
if (ctx->changed_fields == 0) {
return LM_STATUS_SUCCESS;
}
return LM_STATUS_PENDING;
}
static lm_status_t lm_tcp_set_neigh_cached(
struct _lm_device_t * pdev,
lm_tcp_state_t * tcp,
l4_neigh_cached_state_t * neigh_cached,
void * mem_virt
)
{
struct toe_update_ramrod_cached_params * ctx = mem_virt;
int i = 0;
DbgMessage(pdev, INFORMl4sp, "## neigh_cached: tcp [cid=%d] update : neighbor dst_addr\n", tcp->cid);
for (i = 0; i < 6; i++) {
ctx->dest_addr[i] = (u8_t)neigh_cached->dst_addr[i];
}
ctx->changed_fields |= TOE_UPDATE_RAMROD_CACHED_PARAMS_DEST_ADDR_CHANGED;
return LM_STATUS_PENDING;
}
static lm_status_t lm_tcp_post_update_request (
IN struct _lm_device_t * pdev,
IN lm_tcp_state_t * tcp,
OUT u8_t * command,
OUT u64_t * data,
IN lm_tcp_slow_path_request_t * request
)
{
struct toe_spe spe = {{0}};
lm_status_t lm_status = LM_STATUS_FAILURE ;
DbgBreakIf(tcp->hdr.state_id != STATE_ID_TCP);
*command = RAMROD_OPCODE_TOE_UPDATE;
spe.toe_data.phys_addr.hi = tcp->sp_req_data.phys_addr.as_u32.high;
spe.toe_data.phys_addr.lo = tcp->sp_req_data.phys_addr.as_u32.low;
*data = *((u64_t*)(&(spe.toe_data.phys_addr)));
mm_memset(tcp->sp_req_data.virt_addr, 0, sizeof(struct toe_update_ramrod_cached_params));
DbgBreakIf((tcp->hdr.status != STATE_STATUS_NORMAL) &&
(tcp->hdr.status != STATE_STATUS_ABORTED));
switch(request->type) {
case SP_REQUEST_UPDATE_TCP:
lm_status = lm_tcp_set_tcp_cached(pdev, tcp,
request->sent_data.tcp_update_data.data,
tcp->sp_req_data.virt_addr);
break;
case SP_REQUEST_UPDATE_PATH:
DbgBreakIf(tcp->path->hdr.status != STATE_STATUS_NORMAL);
DbgBreakIf(tcp->path->neigh->hdr.status != STATE_STATUS_NORMAL);
lm_status = lm_tcp_set_path_cached(pdev, tcp,
request->sent_data.tcp_update_data.data,
tcp->sp_req_data.virt_addr);
break;
case SP_REQUEST_UPDATE_NEIGH:
DbgBreakIf(tcp->path->neigh->hdr.status != STATE_STATUS_NORMAL);
lm_status = lm_tcp_set_neigh_cached(pdev, tcp,
request->sent_data.tcp_update_data.data,
tcp->sp_req_data.virt_addr);
break;
case SP_REQUEST_UPDATE_PATH_RELINK:
DbgBreakIf(tcp->path->neigh->hdr.status != STATE_STATUS_NORMAL);
lm_status = lm_tcp_set_neigh_cached(pdev, tcp,
&((lm_tcp_path_relink_cached_t *)request->sent_data.tcp_update_data.data)->neigh_cached,
tcp->sp_req_data.virt_addr);
DbgBreakIf(tcp->path->hdr.status != STATE_STATUS_NORMAL);
DbgBreakIf(tcp->path->neigh->hdr.status != STATE_STATUS_NORMAL);
lm_tcp_set_path_cached(pdev, tcp, &((lm_tcp_path_relink_cached_t *)request->sent_data.tcp_update_data.data)->path_cached,
tcp->sp_req_data.virt_addr);
break;
}
return lm_status;
}
static lm_status_t lm_tcp_post_empty_ramrod_request(
IN struct _lm_device_t * pdev,
IN lm_tcp_state_t * tcp,
OUT u8_t * command,
OUT u64_t * data)
{
struct toe_spe spe = {{0}};
DbgMessage(pdev, VERBOSEl4sp, "## lm_tcp_post_empty_ramrod_request\n");
*command = RAMROD_OPCODE_TOE_EMPTY_RAMROD;
spe.toe_data.rx_completion.hash_value = (u16_t)(tcp->tcp_const.hash_value);
*data = *((u64_t*)(&(spe.toe_data.rx_completion)));
return LM_STATUS_PENDING;
}
static lm_status_t lm_tcp_post_invalidate_request(
IN struct _lm_device_t * pdev,
IN lm_tcp_state_t * tcp,
OUT u8_t * command,
OUT u64_t * data)
{
lm_tcp_con_t * rx_con = tcp->rx_con;
lm_tcp_con_t * tx_con = tcp->tx_con;
MM_INIT_TCP_LOCK_HANDLE();
DbgMessage(pdev, INFORMl4sp, "## lm_tcp_post_invalidate_request cid=%d\n", tcp->cid);
DbgBreakIf(tcp->hdr.status != STATE_STATUS_NORMAL &&
tcp->hdr.status != STATE_STATUS_ABORTED);
mm_acquire_tcp_lock(pdev, tx_con);
DbgBreakIf(tx_con->flags & TCP_INV_REQ_POSTED);
tx_con->flags |= TCP_INV_REQ_POSTED;
mm_release_tcp_lock(pdev, tx_con);
mm_acquire_tcp_lock(pdev, rx_con);
DbgBreakIf(rx_con->flags & TCP_INV_REQ_POSTED);
rx_con->flags |= TCP_INV_REQ_POSTED;
mm_release_tcp_lock(pdev, rx_con);
*command = RAMROD_OPCODE_TOE_INVALIDATE;
*data = 0;
return LM_STATUS_PENDING;
}
lm_status_t lm_tcp_post_slow_path_request(
struct _lm_device_t *pdev,
lm_tcp_state_t *tcp,
lm_tcp_slow_path_request_t *request)
{
lm_status_t lm_status = LM_STATUS_INVALID_PARAMETER;
u64_t data = 0;
u8_t command = 0;
DbgBreakIf(!(pdev && tcp && request));
DbgBreakIf(tcp->sp_request);
DbgMessage(pdev, VERBOSEl4sp, "### lm_tcp_post_slow_path_request cid=%d, type=%d\n", tcp->cid, request->type);
DbgBreakIf(tcp->cid && (tcp != lm_cid_cookie(pdev, TOE_CONNECTION_TYPE, tcp->cid)));
tcp->sp_request = request;
switch(request->type) {
case SP_REQUEST_INITIATE_OFFLOAD:
lm_status = lm_tcp_post_initiate_offload_request(pdev, tcp, &command, &data);
break;
case SP_REQUEST_TERMINATE1_OFFLOAD:
lm_status = lm_tcp_post_terminate_tcp_request(pdev, tcp, &command, &data);
break;
case SP_REQUEST_TERMINATE_OFFLOAD:
lm_status = lm_tcp_post_upload_tcp_request(pdev, tcp, &command, &data);
break;
case SP_REQUEST_QUERY:
lm_status = lm_tcp_post_query_request(pdev, tcp, &command, &data, request);
break;
case SP_REQUEST_UPDATE_TCP:
case SP_REQUEST_UPDATE_PATH:
case SP_REQUEST_UPDATE_NEIGH:
case SP_REQUEST_UPDATE_PATH_RELINK:
lm_status = lm_tcp_post_update_request(pdev, tcp, &command, &data, request);
break;
case SP_REQUEST_INVALIDATE:
lm_status = lm_tcp_post_invalidate_request(pdev, tcp, &command, &data);
break;
case SP_REQUEST_ABORTIVE_DISCONNECT:
lm_status = lm_tcp_post_abortive_disconnect_request(pdev,tcp, &command, &data);
break;
case SP_REQUEST_PENDING_LOCAL_FIN_DISCONNECT:
case SP_REQUEST_PENDING_REMOTE_DISCONNECT:
case SP_REQUEST_PENDING_TX_RST:
lm_status = lm_tcp_post_empty_ramrod_request(pdev, tcp, &command, &data);
break;
default:
DbgBreakMsg("Illegal slow path request type!\n");
}
if(lm_status == LM_STATUS_PENDING) {
DbgMessage(pdev, VERBOSEl4sp,
"calling lm_command_post, cid=%d, command=%d, con_type=%d, data=%lx\n",
tcp->cid, command, tcp->ulp_type, data);
if (tcp->hdr.status == STATE_STATUS_UPLOAD_DONE)
{
DbgBreakIf(tcp->hdr.status == STATE_STATUS_UPLOAD_DONE);
tcp->sp_request = NULL;
lm_status = LM_STATUS_INVALID_PARAMETER;
} else
{
lm_command_post(pdev, tcp->cid, command, CMD_PRIORITY_NORMAL, tcp->ulp_type, data);
}
} else {
tcp->sp_request = NULL;
}
request->status = lm_status;
return lm_status;
}
void lm_tcp_service_deferred_cqes(lm_device_t * pdev, lm_tcp_state_t * tcp)
{
lm_tcp_con_t * con = tcp->tx_con;
u8_t idx = 0, dead=FALSE;
MM_INIT_TCP_LOCK_HANDLE();
DbgMessage(pdev, INFORMl4sp, "### lm_tcp_service_deferred_cqes cid=%d\n", tcp->cid);
for (idx = 0; idx < 2; idx++) {
mm_acquire_tcp_lock(pdev, con);
while(con->flags & TCP_DEFERRED_PROCESSING) {
con->flags &= ~TCP_DEFERRED_PROCESSING;
DbgMessage(pdev, INFORMl4sp, "### deferred cid=%d\n", tcp->cid);
if (con->type == TCP_CON_TYPE_RX) {
lm_tcp_rx_complete_tcp_fp(pdev, con->tcp_state, con);
} else {
lm_tcp_tx_complete_tcp_fp(pdev, con->tcp_state, con);
}
if (con->dpc_info.snapshot_flags) {
mm_release_tcp_lock(pdev, con);
if (con->type == TCP_CON_TYPE_RX) {
lm_tcp_rx_complete_tcp_sp(pdev,tcp, con);
} else {
lm_tcp_tx_complete_tcp_sp(pdev,tcp, con);
}
mm_acquire_tcp_lock(pdev, con);
}
}
con->flags &= ~TCP_COMP_DEFERRED;
dead = lm_tcp_is_tcp_dead(pdev, tcp, TCP_IS_DEAD_OP_OFLD_COMP_DFRD);
mm_release_tcp_lock(pdev, con);
con = tcp->rx_con;
if (dead) {
mm_tcp_del_tcp_state(pdev, tcp);
}
}
}
void lm_tcp_comp_initiate_offload_request(
struct _lm_device_t *pdev,
lm_tcp_state_t *tcp,
u32_t comp_status)
{
lm_tcp_slow_path_request_t *sp_request;
lm_tcp_con_t *con;
lm_status_t lm_status = LM_STATUS_SUCCESS;
int i;
MM_INIT_TCP_LOCK_HANDLE();
DbgMessage(pdev, VERBOSEl4sp, "##lm_tcp_comp_initiate_offload_request\n");
MM_ACQUIRE_TOE_LOCK(pdev);
DbgBreakIf(tcp->hdr.status != STATE_STATUS_OFFLOAD_PENDING);
if(!comp_status)
{
tcp->hdr.status = STATE_STATUS_NORMAL;
if (tcp->ulp_type == TOE_CONNECTION_TYPE)
{
con = tcp->tx_con;
for (i = 0; i < 2; i++)
{
mm_acquire_tcp_lock(pdev, con);
DbgBreakIf(!(con->flags & TCP_COMP_DEFERRED));
DbgBreakIf(!(con->flags & TCP_POST_BLOCKED));
con->flags &= ~TCP_POST_BLOCKED;
mm_release_tcp_lock(pdev, con);
con = tcp->rx_con;
}
if( IP_VERSION_IPV4 == tcp->path->path_const.ip_version )
{
++pdev->vars.stats.stats_mirror.stats_drv.drv_toe.ipv[STATS_IP_4_IDX].currently_established;
}
else if( IP_VERSION_IPV6 == tcp->path->path_const.ip_version )
{
++pdev->vars.stats.stats_mirror.stats_drv.drv_toe.ipv[STATS_IP_6_IDX].currently_established;
}
}
}
else
{
#ifndef _VBD_CMD_
DbgMessage(pdev, FATAL, "initiate offload failed. err=%x\n", comp_status);
#endif
tcp->hdr.status = STATE_STATUS_INIT_OFFLOAD_ERR;
if (tcp->ulp_type == TOE_CONNECTION_TYPE)
{
con = tcp->tx_con;
for (i = 0; i < 2; i++)
{
mm_acquire_tcp_lock(pdev, con);
DbgBreakIf((con->flags & ~TCP_INDICATE_REJECTED) != (TCP_POST_BLOCKED | TCP_COMP_DEFERRED));
con->flags &= ~TCP_COMP_DEFERRED;
con->flags |= TCP_COMP_BLOCKED;
mm_release_tcp_lock(pdev, con);
con = tcp->rx_con;
}
}
lm_status = LM_STATUS_FAILURE;
}
DbgBreakIf(tcp->sp_flags & (SP_REQUEST_COMPLETED_RX | SP_REQUEST_COMPLETED_TX));
tcp->sp_request->status = lm_status;
sp_request = tcp->sp_request;
tcp->sp_request = NULL;
DbgBreakIf(!(tcp->sp_flags & SP_TCP_OFLD_REQ_POSTED));
tcp->sp_flags |= SP_TCP_OFLD_REQ_COMP;
mm_tcp_comp_slow_path_request(pdev, tcp, sp_request);
MM_RELEASE_TOE_LOCK(pdev);
if(!comp_status && (tcp->ulp_type == TOE_CONNECTION_TYPE)) {
lm_tcp_service_deferred_cqes(pdev, tcp);
}
}
void lm_tcp_collect_stats(
struct _lm_device_t *pdev,
lm_tcp_state_t *tcp)
{
if (tcp->tx_con && tcp->rx_con) {
pdev->toe_info.stats.tx_bytes_posted_total += tcp->tx_con->bytes_post_cnt;
pdev->toe_info.stats.tx_rq_complete_calls += tcp->tx_con->rq_completion_calls;
pdev->toe_info.stats.tx_bytes_completed_total += tcp->tx_con->bytes_comp_cnt;
pdev->toe_info.stats.tx_rq_bufs_completed += tcp->tx_con->buffer_completed_cnt;
pdev->toe_info.stats.total_tx_abortion_under_flr += tcp->tx_con->abortion_under_flr;
pdev->toe_info.stats.rx_rq_complete_calls += tcp->rx_con->rq_completion_calls;
pdev->toe_info.stats.rx_rq_bufs_completed += tcp->rx_con->buffer_completed_cnt;
pdev->toe_info.stats.rx_bytes_completed_total += tcp->rx_con->bytes_comp_cnt;
pdev->toe_info.stats.rx_accepted_indications += tcp->rx_con->u.rx.gen_info.num_success_indicates;
pdev->toe_info.stats.rx_bufs_indicated_accepted += tcp->rx_con->u.rx.gen_info.num_buffers_indicated;
pdev->toe_info.stats.rx_bytes_indicated_accepted += tcp->rx_con->u.rx.gen_info.bytes_indicated_accepted;
pdev->toe_info.stats.rx_rejected_indications += tcp->rx_con->u.rx.gen_info.num_failed_indicates;
pdev->toe_info.stats.rx_bufs_indicated_rejected += tcp->rx_con->u.rx.gen_info.bufs_indicated_rejected;
pdev->toe_info.stats.rx_bytes_indicated_rejected += tcp->rx_con->u.rx.gen_info.bytes_indicated_rejected;
pdev->toe_info.stats.total_num_non_full_indications += tcp->rx_con->u.rx.gen_info.num_non_full_indications;
pdev->toe_info.stats.rx_zero_byte_recv_reqs += tcp->rx_con->u.rx.rx_zero_byte_recv_reqs;
pdev->toe_info.stats.rx_bufs_copied_grq += tcp->rx_con->u.rx.gen_info.num_buffers_copied_grq;
pdev->toe_info.stats.rx_bufs_copied_rq += tcp->rx_con->u.rx.gen_info.num_buffers_copied_rq;
pdev->toe_info.stats.rx_bytes_copied_in_comp += tcp->rx_con->u.rx.gen_info.bytes_copied_cnt_in_comp;
pdev->toe_info.stats.rx_bytes_copied_in_post += tcp->rx_con->u.rx.gen_info.bytes_copied_cnt_in_post;
pdev->toe_info.stats.rx_bytes_copied_in_process += tcp->rx_con->u.rx.gen_info.bytes_copied_cnt_in_process;
if (pdev->toe_info.stats.max_number_of_isles_in_single_con < tcp->rx_con->u.rx.gen_info.max_number_of_isles) {
pdev->toe_info.stats.max_number_of_isles_in_single_con = tcp->rx_con->u.rx.gen_info.max_number_of_isles;
}
pdev->toe_info.stats.rx_bufs_posted_total += tcp->rx_con->buffer_post_cnt;
pdev->toe_info.stats.rx_bytes_posted_total += tcp->rx_con->bytes_post_cnt;
pdev->toe_info.stats.rx_bufs_skipped_post += tcp->rx_con->buffer_skip_post_cnt;
pdev->toe_info.stats.rx_bytes_skipped_post += tcp->rx_con->bytes_skip_post_cnt;
pdev->toe_info.stats.rx_bytes_skipped_push += tcp->rx_con->bytes_push_skip_cnt;
pdev->toe_info.stats.rx_partially_completed_buf_cnt += tcp->rx_con->partially_completed_buf_cnt;
pdev->toe_info.stats.total_droped_empty_isles += tcp->rx_con->droped_empty_isles;
pdev->toe_info.stats.total_droped_non_empty_isles += tcp->rx_con->droped_non_empty_isles;
pdev->toe_info.stats.total_rx_post_blocked += tcp->rx_con->rx_post_blocked;
pdev->toe_info.stats.total_zb_rx_post_blocked += tcp->rx_con->zb_rx_post_blocked;
if (tcp->aux_mem_flag & TCP_CON_AUX_RT_MEM_SUCCSESS_ALLOCATION) {
pdev->toe_info.stats.total_aux_mem_success_allocations++;
} else if (tcp->aux_mem_flag & TCP_CON_AUX_RT_MEM_FAILED_ALLOCATION) {
pdev->toe_info.stats.total_aux_mem_failed_allocations++;
}
pdev->toe_info.stats.total_rx_abortion_under_flr += tcp->rx_con->abortion_under_flr;
}
}
void lm_tcp_del_tcp_state(
struct _lm_device_t *pdev,
lm_tcp_state_t *tcp)
{
DbgMessage(pdev, VERBOSEl4sp, "###lm_tcp_del_tcp_state\n");
DbgBreakIf(!(pdev && tcp));
if (!lm_fl_reset_is_inprogress(pdev))
{
DbgBreakIf(tcp->hdr.status >= STATE_STATUS_OFFLOAD_PENDING &&
tcp->hdr.status < STATE_STATUS_UPLOAD_DONE);
}
else
{
DbgMessage(pdev, FATAL, "###lm_tcp_del_tcp_state under FLR\n");
}
lm_tcp_collect_stats(pdev, tcp);
d_list_remove_entry(
&tcp->hdr.state_blk->tcp_list,
&tcp->hdr.link);
if (tcp->ulp_type == TOE_CONNECTION_TYPE)
{
pdev->toe_info.stats.total_upld++;
}
else if (tcp->ulp_type == ISCSI_CONNECTION_TYPE)
{
pdev->iscsi_info.run_time.stats.total_upld++;
}
if (!lm_fl_reset_is_inprogress(pdev) && (tcp->path != NULL)) {
DbgBreakIf((tcp->hdr.status != STATE_STATUS_INIT_OFFLOAD_ERR) &&
(tcp->hdr.status != STATE_STATUS_INIT) &&
(tcp->hdr.status != STATE_STATUS_INIT_CONTEXT));
DbgBreakIf(tcp->path->hdr.status != STATE_STATUS_NORMAL);
tcp->path->num_dependents--;
tcp->path = NULL;
}
if (tcp->in_searcher) {
lm_searcher_mirror_hash_remove(pdev, tcp->cid);
tcp->in_searcher = 0;
}
if (tcp->cid != 0) {
u8_t notify_fw = 0;
if (!lm_fl_reset_is_inprogress(pdev) && (tcp->hdr.status == STATE_STATUS_UPLOAD_DONE)) {
notify_fw = 1;
}
lm_free_cid_resc(pdev, TOE_CONNECTION_TYPE, tcp->cid, notify_fw);
}
tcp->hdr.state_blk = NULL;
tcp->cid = 0;
tcp->ctx_virt = NULL;
tcp->ctx_phys.as_u64 = 0;
if (tcp->aux_memory != NULL) {
switch (tcp->type_of_aux_memory) {
case TCP_CON_AUX_RT_MEM:
DbgMessage(pdev, WARNl4sp,
"###lm_tcp_del_tcp_state: delete aux_mem (%d)\n",
tcp->aux_mem_size);
tcp->type_of_aux_memory = 0;
mm_rt_free_mem(pdev,tcp->aux_memory,tcp->aux_mem_size,LM_RESOURCE_NDIS);
break;
default:
break;
}
}
}
void lm_tcp_del_path_state(
struct _lm_device_t *pdev,
lm_path_state_t *path)
{
UNREFERENCED_PARAMETER_(pdev);
if (path->neigh != NULL) {
DbgBreakIf(path->neigh->hdr.status != STATE_STATUS_NORMAL);
path->neigh->num_dependents--;
path->neigh = NULL;
}
DbgBreakIf(!lm_fl_reset_is_inprogress(pdev) && (path->hdr.status != STATE_STATUS_UPLOAD_DONE));
d_list_remove_entry(&path->hdr.state_blk->path_list, &path->hdr.link);
}
void lm_tcp_del_neigh_state(
struct _lm_device_t *pdev,
lm_neigh_state_t *neigh)
{
UNREFERENCED_PARAMETER_(pdev);
DbgBreakIf(!lm_fl_reset_is_inprogress(pdev) && (neigh->hdr.status != STATE_STATUS_UPLOAD_DONE));
d_list_remove_entry(&neigh->hdr.state_blk->neigh_list, &neigh->hdr.link);
}
void lm_tcp_free_tcp_resc(
struct _lm_device_t *pdev,
lm_tcp_state_t *tcp)
{
lm_tcp_con_t *tcp_con;
d_list_t released_list_of_gen_bufs;
u8_t reset_in_progress = lm_reset_is_inprogress(pdev);
u32_t num_isles = 0;
u32_t num_bytes_in_isles = 0;
u32_t num_gen_bufs_in_isles = 0;
DbgMessage(pdev, VERBOSEl4sp, "###lm_tcp_free_tcp_resc tcp=%p\n", tcp);
DbgBreakIf(!(pdev && tcp));
DbgBreakIf(!reset_in_progress && tcp->hdr.status >= STATE_STATUS_OFFLOAD_PENDING &&
tcp->hdr.status < STATE_STATUS_UPLOAD_DONE);
DbgBreakIf(tcp->cid);
tcp_con = tcp->rx_con;
if (tcp_con) {
d_list_init(&released_list_of_gen_bufs, NULL, NULL, 0);
num_isles = d_list_entry_cnt(&tcp_con->u.rx.gen_info.isles_list);
num_bytes_in_isles = tcp_con->u.rx.gen_info.isle_nbytes;
lm_tcp_rx_clear_isles(pdev, tcp, &released_list_of_gen_bufs);
num_gen_bufs_in_isles = d_list_entry_cnt(&released_list_of_gen_bufs);
if(!d_list_is_empty(&tcp_con->u.rx.gen_info.dpc_peninsula_list)) {
if (!reset_in_progress) {
DbgBreak();
}
d_list_add_tail(&released_list_of_gen_bufs,&tcp_con->u.rx.gen_info.dpc_peninsula_list);
d_list_init(&tcp->rx_con->u.rx.gen_info.dpc_peninsula_list, NULL, NULL, 0);
}
if (!d_list_is_empty(&tcp_con->u.rx.gen_info.peninsula_list)) {
d_list_add_tail(&released_list_of_gen_bufs,&tcp_con->u.rx.gen_info.peninsula_list);
d_list_init(&tcp->rx_con->u.rx.gen_info.peninsula_list, NULL, NULL, 0);
if (!reset_in_progress) {
DbgBreakIf(tcp->hdr.status == STATE_STATUS_UPLOAD_DONE);
if (tcp->hdr.status == STATE_STATUS_UPLOAD_DONE) {
pdev->toe_info.stats.total_bytes_lost_on_upload += tcp_con->u.rx.gen_info.peninsula_nbytes;
}
}
}
if (!d_list_is_empty(&released_list_of_gen_bufs)) {
mm_tcp_return_list_of_gen_bufs(pdev, &released_list_of_gen_bufs, 0, NON_EXISTENT_SB_IDX);
if (!reset_in_progress && num_isles) {
s32_t delta = -(s32_t)num_gen_bufs_in_isles;
MM_ACQUIRE_ISLES_CONTROL_LOCK(pdev);
lm_tcp_update_isles_cnts(pdev, -(s32_t)num_isles, delta);
MM_RELEASE_ISLES_CONTROL_LOCK(pdev);
}
}
}
}
lm_status_t
lm_tcp_set_ofld_params(
lm_device_t *pdev,
lm_state_block_t *state_blk,
l4_ofld_params_t *params)
{
l4_ofld_params_t *curr_params = &pdev->ofld_info.l4_params;
UNREFERENCED_PARAMETER_(state_blk);
DbgMessage(pdev, VERBOSE, "###lm_tcp_set_ofld_params\n");
DbgBreakIf(!(params->delayed_ack_ticks &&
params->nce_stale_ticks &&
params->push_ticks &&
params->sws_prevention_ticks &&
params->ticks_per_second));
*curr_params = *params;
_lm_set_ofld_params_xstorm_common(pdev, curr_params);
_lm_set_ofld_params_tstorm_common(pdev, curr_params);
_lm_set_ofld_params_tstorm_toe(pdev, curr_params);
_lm_set_ofld_params_ustorm_toe(pdev, curr_params);
_lm_set_ofld_params_xstorm_toe(pdev, curr_params);
return LM_STATUS_SUCCESS;
}
void lm_tcp_indicate_rst_received(
struct _lm_device_t * pdev,
lm_tcp_state_t * tcp
)
{
lm_tcp_con_t *rx_con, *tx_con;
u8_t ip_version;
MM_INIT_TCP_LOCK_HANDLE();
ip_version = (tcp->path->path_const.ip_version == IP_VERSION_IPV4)? STATS_IP_4_IDX : STATS_IP_6_IDX;
LM_COMMON_DRV_STATS_ATOMIC_INC_TOE(pdev, ipv[ip_version].in_reset);
rx_con = tcp->rx_con;
tx_con = tcp->tx_con;
DbgBreakIf( ! (pdev && tcp) );
DbgBreakIf( (tcp->hdr.status != STATE_STATUS_NORMAL) &&
(tcp->hdr.status != STATE_STATUS_UPLOAD_PENDING) );
MM_ACQUIRE_TOE_LOCK(pdev);
if ( tcp->hdr.status == STATE_STATUS_NORMAL ) {
tcp->hdr.status = STATE_STATUS_ABORTED;
}
MM_RELEASE_TOE_LOCK(pdev);
mm_acquire_tcp_lock(pdev, tx_con);
DbgBreakIf(tx_con->flags & TCP_REMOTE_RST_RECEIVED_ALL_RX_INDICATED);
tx_con->flags |= TCP_REMOTE_RST_RECEIVED_ALL_RX_INDICATED;
lm_tcp_abort_bufs(pdev, tcp, tx_con, LM_STATUS_CONNECTION_RESET);
tx_con->u.tx.flags &= ~ TCP_CON_RST_IND_NOT_SAFE;
mm_release_tcp_lock(pdev, tx_con);
mm_acquire_tcp_lock(pdev, rx_con);
rx_con->u.rx.flags &= ~ (TCP_CON_RST_IND_PENDING | TCP_CON_FIN_IND_PENDING);
DbgBreakIf(rx_con->flags & TCP_REMOTE_RST_RECEIVED_ALL_RX_INDICATED);
rx_con->flags |= TCP_REMOTE_RST_RECEIVED_ALL_RX_INDICATED;
lm_tcp_abort_bufs(pdev, tcp, rx_con, LM_STATUS_CONNECTION_RESET);
mm_release_tcp_lock(pdev, rx_con);
mm_tcp_indicate_rst_received(pdev, tcp);
}
void lm_tcp_searcher_ramrod_complete(
IN struct _lm_device_t * pdev,
IN lm_tcp_state_t * tcp
)
{
lm_tcp_slow_path_request_t * request = tcp->sp_request;
DbgMessage(pdev, VERBOSEl4, "## lm_tcp_searcher_ramrod_comp\n");
DbgBreakIf(tcp->hdr.status != STATE_STATUS_UPLOAD_PENDING);
DbgBreakIf(request->type != SP_REQUEST_TERMINATE_OFFLOAD);
tcp->sp_request = NULL;
request->type = SP_REQUEST_TERMINATE1_OFFLOAD;
MM_ACQUIRE_TOE_LOCK(pdev);
DbgBreakIf(!tcp->in_searcher);
lm_searcher_mirror_hash_remove(pdev, tcp->cid);
tcp->in_searcher = 0;
DbgBreakIf(!(tcp->sp_flags & SP_TCP_SRC_REQ_POSTED));
tcp->sp_flags |= SP_TCP_SRC_REQ_COMP;
lm_tcp_post_slow_path_request(pdev, tcp, request);
MM_RELEASE_TOE_LOCK(pdev);
}
void lm_tcp_terminate_ramrod_complete(
IN struct _lm_device_t * pdev,
IN lm_tcp_state_t * tcp)
{
lm_tcp_slow_path_request_t * request = tcp->sp_request;
MM_ACQUIRE_TOE_LOCK(pdev);
tcp->sp_request = NULL;
request->type = SP_REQUEST_QUERY;
DbgBreakIf(tcp->sp_flags & ( SP_REQUEST_COMPLETED_TX | SP_REQUEST_COMPLETED_RX ));
DbgBreakIf(!(tcp->sp_flags & SP_TCP_TRM_REQ_POSTED));
tcp->sp_flags |= SP_TCP_TRM_REQ_COMP;
if (tcp->ulp_type == TOE_CONNECTION_TYPE) {
RESET_FLAGS(((struct toe_context *)tcp->ctx_virt)->timers_context.flags, __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS);
}
lm_tcp_post_slow_path_request(pdev, tcp, request);
MM_RELEASE_TOE_LOCK(pdev);
}
static void lm_tcp_rx_terminate_ramrod_complete(
IN struct _lm_device_t * pdev,
IN lm_tcp_state_t * tcp)
{
lm_tcp_con_t * rx_con = tcp->rx_con;
MM_INIT_TCP_LOCK_HANDLE();
DbgMessage(pdev, VERBOSEl4rx, "## lm_tcp_terminate_ramrod_comp_rx\n");
DbgBreakIf(tcp->hdr.status != STATE_STATUS_UPLOAD_PENDING);
mm_acquire_tcp_lock(pdev, rx_con);
DbgBreakIf( mm_tcp_indicating_bufs(rx_con) );
DbgBreakIf(rx_con->flags & TCP_TRM_REQ_COMPLETED);
rx_con->flags |= TCP_TRM_REQ_COMPLETED;
mm_release_tcp_lock(pdev, rx_con);
}
static void lm_tcp_tx_terminate_ramrod_complete(
IN struct _lm_device_t * pdev,
IN lm_tcp_state_t * tcp)
{
lm_tcp_con_t * tx_con = tcp->tx_con;
MM_INIT_TCP_LOCK_HANDLE();
DbgMessage(pdev, VERBOSEl4tx, "## lm_tcp_terminate_ramrod_comp_tx\n");
DbgBreakIf(tcp->hdr.status != STATE_STATUS_UPLOAD_PENDING);
mm_acquire_tcp_lock(pdev, tx_con);
DbgBreakIf(tx_con->flags & TCP_TRM_REQ_COMPLETED);
tx_con->flags |= TCP_TRM_REQ_COMPLETED;
mm_release_tcp_lock(pdev, tx_con);
}
static void lm_tcp_indicate_fin_received(
struct _lm_device_t * pdev,
lm_tcp_state_t * tcp
)
{
lm_tcp_con_t * rx_con;
u8_t ip_version;
MM_INIT_TCP_LOCK_HANDLE();
DbgMessage(pdev, INFORMl4rx , "##lm_tcp_indicate_fin_received cid=%d\n", tcp->cid);
DbgBreakIf( ! ( pdev && tcp ) );
ip_version = (tcp->path->path_const.ip_version == IP_VERSION_IPV4)? STATS_IP_4_IDX : STATS_IP_6_IDX;
LM_COMMON_DRV_STATS_ATOMIC_INC_TOE(pdev, ipv[ip_version].in_fin);
rx_con = tcp->rx_con;
mm_acquire_tcp_lock(pdev, rx_con);
rx_con->u.rx.flags &= ~TCP_CON_FIN_IND_PENDING;
DbgBreakIf(rx_con->flags & TCP_REMOTE_FIN_RECEIVED_ALL_RX_INDICATED);
rx_con->flags |= TCP_REMOTE_FIN_RECEIVED_ALL_RX_INDICATED;
lm_tcp_abort_bufs(pdev, tcp, rx_con, LM_STATUS_SUCCESS);
mm_release_tcp_lock(pdev, rx_con);
mm_tcp_indicate_fin_received(pdev, tcp);
}
void lm_tcp_process_retrieve_indication_cqe(
struct _lm_device_t * pdev,
lm_tcp_state_t * tcp,
l4_upload_reason_t upload_reason)
{
u32_t rx_flags = 0;
u32_t tx_flags = 0;
DbgMessage(pdev, INFORMl4, "###lm_tcp_process_retrieve_indication_cqe cid=%d upload_reason=%d\n", tcp->cid, upload_reason);
SET_FLAGS(rx_flags, TCP_RX_COMP_BLOCKED | TCP_UPLOAD_REQUESTED);
SET_FLAGS(tx_flags, TCP_TX_COMP_BLOCKED);
RESET_FLAGS(rx_flags, TCP_REMOTE_FIN_RECEIVED);
RESET_FLAGS(tx_flags, TCP_FIN_REQ_COMPLETED);
if (!GET_FLAGS(tcp->rx_con->flags, rx_flags) && !GET_FLAGS(tcp->tx_con->flags,tx_flags)) {
SET_FLAGS(tcp->rx_con->flags, TCP_UPLOAD_REQUESTED);
DbgMessage(pdev, INFORMl4, "###Indicating UP: cid=%d upload_reason=%d\n", tcp->cid, upload_reason);
mm_tcp_indicate_retrieve_indication(pdev, tcp, upload_reason);
}
}
static void lm_tcp_rx_fin_received_complete(
struct _lm_device_t * pdev,
lm_tcp_state_t * tcp,
u8_t upload
)
{
lm_tcp_con_t * rx_con;
u8_t indicate = 1;
u8_t is_empty_peninsula;
MM_INIT_TCP_LOCK_HANDLE();
DbgMessage(pdev, INFORMl4rx, "###lm_tcp_rx_fin_received_complete cid=%d\n", tcp->cid);
DbgBreakIf( ! (pdev && tcp) );
DbgBreakIf( tcp->hdr.status != STATE_STATUS_NORMAL && tcp->hdr.status != STATE_STATUS_UPLOAD_PENDING);
rx_con = tcp->rx_con;
mm_acquire_tcp_lock(pdev, rx_con);
DbgBreakIf( !d_list_is_empty(&tcp->rx_con->u.rx.gen_info.dpc_peninsula_list) );
DbgBreakIf(rx_con->flags & TCP_REMOTE_FIN_RECEIVED);
rx_con->flags |= TCP_REMOTE_FIN_RECEIVED;
is_empty_peninsula = (rx_con->u.rx.gen_info.peninsula_nbytes > 0 ? 0 : 1);
if (!is_empty_peninsula || mm_tcp_indicating_bufs(rx_con) ) {
DbgMessage(pdev, INFORMl4, "lm_tcp_process_fin_received_cqe - postponing fin indication cid=%d\n", tcp->cid);
rx_con->u.rx.flags |= TCP_CON_FIN_IND_PENDING;
indicate = 0;
}
tcp->tcp_state_calc.fin_reception_time = mm_get_current_time(pdev);
if (tcp->tcp_state_calc.fin_reception_time == tcp->tcp_state_calc.fin_request_time) {
tcp->tcp_state_calc.fin_request_time -= 1;
}
mm_release_tcp_lock(pdev, rx_con);
if (indicate)
{
lm_tcp_indicate_fin_received(pdev, tcp);
} else if(upload && !is_empty_peninsula)
{
lm_tcp_process_retrieve_indication_cqe(pdev, tcp, L4_UPLOAD_REASON_UPLOAD_REQUESTED);
pdev->toe_info.stats.total_fin_upld_requested++;
}
}
static void lm_tcp_comp_empty_ramrod_request(
IN struct _lm_device_t * pdev,
IN lm_tcp_state_t * tcp)
{
lm_tcp_slow_path_request_t * sp_req = tcp->sp_request;
MM_ACQUIRE_TOE_LOCK(pdev);
DbgBreakIf(tcp->sp_flags & ( SP_REQUEST_COMPLETED_TX | SP_REQUEST_COMPLETED_RX ));
sp_req->status = LM_STATUS_SUCCESS;
tcp->sp_request = NULL;
mm_tcp_comp_slow_path_request(pdev, tcp, sp_req);
MM_RELEASE_TOE_LOCK(pdev);
}
static void lm_tcp_rx_empty_ramrod_complete(
IN struct _lm_device_t * pdev,
IN lm_tcp_state_t * tcp,
IN u32_t sp_type)
{
u8_t indicate = 0;
DbgBreakIf(!tcp);
DbgMessage(pdev, INFORMl4rx | INFORMl4sp,
"###lm_tcp_process_empty_slow_path_rcqe cid=%d, request->type=%d\n",
tcp->cid, sp_type);
switch (sp_type) {
case SP_REQUEST_PENDING_LOCAL_FIN_DISCONNECT:
case SP_REQUEST_PENDING_TX_RST:
break;
case SP_REQUEST_PENDING_REMOTE_DISCONNECT:
if ( tcp->rx_con->u.rx.flags & TCP_CON_RST_IND_PENDING ) {
MM_ACQUIRE_TOE_LOCK(pdev);
tcp->sp_flags |= REMOTE_RST_INDICATED_RX;
if ( (tcp->sp_flags & REMOTE_RST_INDICATED_RX) && (tcp->sp_flags & REMOTE_RST_INDICATED_TX) ) {
indicate = 1;
}
MM_RELEASE_TOE_LOCK(pdev);
if (indicate) {
lm_tcp_indicate_rst_received(pdev, tcp);
}
}
else if ( tcp->rx_con->u.rx.flags & TCP_CON_FIN_IND_PENDING ) {
lm_tcp_indicate_fin_received(pdev, tcp);
}
break;
default:
{
DbgMessage(pdev, FATAL,
"'empty ramrod' opcode in cqe doesn't fit with sp_request->type %d\n",
sp_type);
DbgBreak();
}
}
}
static void lm_tcp_tx_empty_ramrod_complete(
IN struct _lm_device_t * pdev,
IN lm_tcp_state_t * tcp,
IN u32_t sp_type)
{
u8_t indicate = 0;
MM_INIT_TCP_LOCK_HANDLE();
DbgBreakIf(!tcp);
DbgMessage(pdev, INFORMl4tx | INFORMl4sp,
"###lm_tcp_process_empty_slow_path_scqe cid=%d, request->type=%d\n",
tcp->cid, sp_type);
switch (sp_type) {
case SP_REQUEST_PENDING_LOCAL_FIN_DISCONNECT:
mm_acquire_tcp_lock(pdev, tcp->tx_con);
lm_tcp_abort_bufs(pdev,tcp,tcp->tx_con,LM_STATUS_ABORTED);
mm_release_tcp_lock(pdev, tcp->tx_con);
break;
case SP_REQUEST_PENDING_REMOTE_DISCONNECT:
break;
case SP_REQUEST_PENDING_TX_RST:
if (tcp->tx_con->u.tx.flags & TCP_CON_RST_IND_NOT_SAFE ) {
MM_ACQUIRE_TOE_LOCK(pdev);
tcp->sp_flags |= REMOTE_RST_INDICATED_TX;
if ( (tcp->sp_flags & REMOTE_RST_INDICATED_RX) && (tcp->sp_flags & REMOTE_RST_INDICATED_TX) ) {
indicate = 1;
}
mm_acquire_tcp_lock(pdev, tcp->tx_con);
tcp->tx_con->u.tx.flags &= ~TCP_CON_RST_IND_NOT_SAFE;
mm_release_tcp_lock(pdev, tcp->tx_con);
MM_RELEASE_TOE_LOCK(pdev);
if (indicate) {
lm_tcp_indicate_rst_received(pdev, tcp);
}
}
break;
default:
{
DbgMessage(pdev, FATAL,
"'empty ramrod' opcode in cqe doesn't fit with sp_request->type %d\n",
sp_type);
DbgBreak();
}
}
}
static void lm_tcp_comp_abortive_disconnect_request(
struct _lm_device_t * pdev,
lm_tcp_state_t * tcp,
lm_tcp_slow_path_request_t * request
)
{
lm_tcp_con_t *rx_con, *tx_con;
u8_t delayed_rst = 0;
u8_t ip_version;
u8_t complete_sp_request = TRUE;
MM_INIT_TCP_LOCK_HANDLE();
DbgBreakIf( ! (pdev && tcp && request) );
ip_version = (tcp->path->path_const.ip_version == IP_VERSION_IPV4)? STATS_IP_4_IDX : STATS_IP_6_IDX;
LM_COMMON_DRV_STATS_ATOMIC_INC_TOE(pdev, ipv[ip_version].out_resets);
rx_con = tcp->rx_con;
tx_con = tcp->tx_con;
MM_ACQUIRE_TOE_LOCK(pdev);
DbgBreakIf( ( tcp->hdr.status != STATE_STATUS_NORMAL ) && ( tcp->hdr.status != STATE_STATUS_ABORTED ) );
request->status = LM_STATUS_SUCCESS;
tcp->hdr.status = STATE_STATUS_ABORTED;
tcp->tcp_state_calc.con_rst_flag = TRUE;
MM_RELEASE_TOE_LOCK(pdev);
mm_acquire_tcp_lock(pdev, tx_con);
tx_con->u.tx.flags &= ~ TCP_CON_RST_IND_NOT_SAFE;
lm_tcp_abort_bufs(pdev,tcp,tx_con, LM_STATUS_ABORTED);
mm_release_tcp_lock(pdev, tx_con);
mm_acquire_tcp_lock(pdev, rx_con);
if (mm_tcp_indicating_bufs(rx_con)) {
if (pdev->params.l4_support_pending_sp_req_complete) {
DbgBreakIf(DBG_BREAK_ON(ABORTIVE_DISCONNECT_DURING_IND));
complete_sp_request = FALSE;
tcp->sp_request_pending_completion = TRUE;
tcp->pending_abortive_disconnect++;
mm_atomic_inc(&pdev->toe_info.stats.total_aborive_disconnect_during_completion);
DbgMessage(pdev, INFORMl4sp, "Abortive disconnect completion during indication(%d)\n", tcp->cid);
} else {
DbgBreak();
}
}
if ( rx_con->u.rx.flags & TCP_CON_RST_IND_PENDING ) {
delayed_rst = 1;
}
rx_con->u.rx.flags &= ~ (TCP_CON_RST_IND_PENDING | TCP_CON_FIN_IND_PENDING);
lm_tcp_abort_bufs(pdev,tcp, rx_con, LM_STATUS_ABORTED);
mm_release_tcp_lock(pdev, rx_con);
if ( delayed_rst ) {
}
if (complete_sp_request) {
MM_ACQUIRE_TOE_LOCK(pdev);
DbgBreakIf(tcp->sp_flags & ( SP_REQUEST_COMPLETED_TX | SP_REQUEST_COMPLETED_RX ));
tcp->sp_request = NULL;
mm_tcp_comp_slow_path_request(pdev, tcp, request);
MM_RELEASE_TOE_LOCK(pdev);
}
}
static void lm_tcp_rx_rst_received_complete (
struct _lm_device_t * pdev,
lm_tcp_state_t * tcp
)
{
lm_tcp_con_t * rx_con;
u8_t indicate = 0;
MM_INIT_TCP_LOCK_HANDLE();
DbgMessage(pdev, INFORMl4rx , "###lm_tcp_process_rst_received_rcqe cid=%d\n", tcp->cid);
DbgBreakIf( ! (pdev && tcp) );
DbgBreakIf( (tcp->hdr.status != STATE_STATUS_NORMAL) &&
(tcp->hdr.status != STATE_STATUS_UPLOAD_PENDING) );
rx_con = tcp->rx_con;
MM_ACQUIRE_TOE_LOCK(pdev);
mm_acquire_tcp_lock(pdev, rx_con);
DbgBreakIf( !d_list_is_empty(&tcp->rx_con->u.rx.gen_info.dpc_peninsula_list) );
DbgBreakIf(rx_con->flags & TCP_REMOTE_RST_RECEIVED);
rx_con->flags |= TCP_REMOTE_RST_RECEIVED;
rx_con->u.rx.flags &= ~ TCP_CON_FIN_IND_PENDING;
if (rx_con->u.rx.gen_info.peninsula_nbytes || mm_tcp_indicating_bufs(rx_con) ) {
DbgMessage(pdev, INFORMl4rx , "lm_tcp_process_rst_received_cqe - postponing rst indication cid=%d\n", tcp->cid);
rx_con->u.rx.flags |= TCP_CON_RST_IND_PENDING;
} else {
tcp->sp_flags |= REMOTE_RST_INDICATED_RX;
}
mm_release_tcp_lock(pdev, rx_con);
if ( (tcp->sp_flags & REMOTE_RST_INDICATED_RX) && (tcp->sp_flags & REMOTE_RST_INDICATED_TX) ) {
indicate = 1;
tcp->tcp_state_calc.con_rst_flag = TRUE;
}
MM_RELEASE_TOE_LOCK(pdev);
if ( indicate ) {
lm_tcp_indicate_rst_received(pdev,tcp);
}
}
static void lm_tcp_tx_rst_received_complete (
struct _lm_device_t * pdev,
lm_tcp_state_t * tcp
)
{
lm_tcp_con_t * tx_con;
lm_status_t lm_status;
u8_t indicate = 0;
u8_t send_empty_ramrod = 0;
u8_t upload_on_fail = 0;
MM_INIT_TCP_LOCK_HANDLE();
DbgMessage(pdev, INFORMl4tx, "###lm_tcp_tx_rst_received_complete cid=%d\n", tcp->cid);
DbgBreakIf( ! (pdev && tcp) );
DbgBreakIf( (tcp->hdr.status != STATE_STATUS_NORMAL) &&
(tcp->hdr.status != STATE_STATUS_UPLOAD_PENDING) );
tx_con = tcp->tx_con;
MM_ACQUIRE_TOE_LOCK(pdev);
mm_acquire_tcp_lock(pdev, tx_con);
DbgBreakIf(tx_con->flags & TCP_REMOTE_RST_RECEIVED);
tx_con->flags |= TCP_REMOTE_RST_RECEIVED;
if ( s_list_entry_cnt(&tx_con->active_tb_list) > 0 ) {
DbgMessage(pdev, INFORMl4rx, "TX lm_tcp_process_rst_received_cqe - postponing rst indication cid=%d sending empty ramrod\n", tcp->cid);
tx_con->u.tx.flags |= TCP_CON_RST_IND_NOT_SAFE;
if (!(tx_con->flags & (TCP_RST_REQ_POSTED | TCP_INV_REQ_POSTED | TCP_TRM_REQ_POSTED))) {
send_empty_ramrod = TRUE;
}
} else {
tcp->sp_flags |= REMOTE_RST_INDICATED_TX;
}
mm_release_tcp_lock(pdev, tx_con);
if ( (tcp->sp_flags & REMOTE_RST_INDICATED_RX) && (tcp->sp_flags & REMOTE_RST_INDICATED_TX) ) {
indicate = 1;
tcp->tcp_state_calc.con_rst_flag = TRUE;
} else if ( tcp->sp_flags & REMOTE_RST_INDICATED_RX ) {
upload_on_fail = 1;
tcp->tcp_state_calc.con_rst_flag = TRUE;
}
if ( indicate ) {
MM_RELEASE_TOE_LOCK(pdev);
lm_tcp_indicate_rst_received(pdev,tcp);
} else if (send_empty_ramrod) {
DbgMessage(pdev, INFORMl4tx, "Sending Empty Ramrod TX\n");
lm_status = mm_tcp_post_empty_slow_path_request(pdev, tcp, SP_REQUEST_PENDING_TX_RST);
MM_RELEASE_TOE_LOCK(pdev);
if ((lm_status != LM_STATUS_PENDING) && (lm_status != LM_STATUS_UPLOAD_IN_PROGRESS)) {
if (upload_on_fail) {
DbgMessage(pdev, WARNl4sp, "Couldn't send empty ramrod on TX when we needed\n");
mm_tcp_indicate_retrieve_indication(pdev, tcp, L4_UPLOAD_REASON_UPLOAD_REQUESTED);
pdev->toe_info.stats.total_rst_upld_requested++;
}
}
}
else
{
MM_RELEASE_TOE_LOCK(pdev);
}
}
static void lm_tcp_rx_abortive_disconnect_ramrod_complete (
struct _lm_device_t * pdev,
lm_tcp_state_t * tcp)
{
lm_tcp_con_t * rx_con;
MM_INIT_TCP_LOCK_HANDLE();
DbgMessage(pdev, INFORMl4rx, "###lm_tcp_process_abortive_disconnect_request_rcqe cid=%d\n", tcp->cid);
DbgBreakIf( ! (pdev && tcp) );
rx_con = tcp->rx_con;
mm_acquire_tcp_lock(pdev, rx_con);
DbgBreakIf( !d_list_is_empty(&tcp->rx_con->u.rx.gen_info.peninsula_list) &&
(((lm_tcp_gen_buf_t *)(d_list_peek_tail(&tcp->rx_con->u.rx.gen_info.peninsula_list)))->placed_bytes == 0));
rx_con->flags |= TCP_RST_REQ_COMPLETED;
mm_release_tcp_lock(pdev, rx_con);
}
static void lm_tcp_tx_abortive_disconnect_ramrod_complete (
struct _lm_device_t * pdev,
lm_tcp_state_t * tcp)
{
lm_tcp_con_t * tx_con;
MM_INIT_TCP_LOCK_HANDLE();
DbgMessage(pdev, INFORMl4tx, "###lm_tcp_tx_abortive_disconnect_request_complete cid=%d\n", tcp->cid);
DbgBreakIf( ! (pdev && tcp) );
tx_con = tcp->tx_con;
mm_acquire_tcp_lock(pdev, tx_con);
tx_con->flags |= TCP_RST_REQ_COMPLETED;
mm_release_tcp_lock(pdev, tx_con);
}
static void lm_tcp_comp_invalidate_request(
struct _lm_device_t * pdev,
lm_tcp_state_t * tcp,
lm_tcp_slow_path_request_t * request)
{
DbgMessage(pdev, INFORMl4sp, "### Completing invalidate request cid=%d\n", tcp->cid);
MM_ACQUIRE_TOE_LOCK(pdev);
DbgBreakIf(!pdev || !tcp);
DbgBreakIf(tcp->hdr.status != STATE_STATUS_NORMAL && tcp->hdr.status != STATE_STATUS_ABORTED);
tcp->hdr.status = STATE_STATUS_INVALIDATED;
tcp->sp_request = NULL;
request->status = LM_STATUS_SUCCESS;
DbgBreakIf(tcp->sp_flags & ( SP_REQUEST_COMPLETED_TX | SP_REQUEST_COMPLETED_RX ));
mm_tcp_comp_slow_path_request(pdev, tcp, request);
MM_RELEASE_TOE_LOCK(pdev);
}
static void lm_tcp_tx_invalidate_ramrod_complete (
struct _lm_device_t * pdev,
lm_tcp_state_t * tcp)
{
lm_tcp_con_t * tx_con;
MM_INIT_TCP_LOCK_HANDLE();
DbgMessage(pdev, INFORMl4tx, "###lm_tcp_tx_invalidate_request_complete cid=%d\n", tcp->cid);
DbgBreakIf( ! (pdev && tcp) );
tx_con = tcp->tx_con;
mm_acquire_tcp_lock(pdev, tx_con);
DbgBreakIf(tx_con->flags & TCP_INV_REQ_COMPLETED);
tx_con->flags |= TCP_INV_REQ_COMPLETED;
mm_release_tcp_lock(pdev, tx_con);
}
static void lm_tcp_rx_invalidate_ramrod_complete (
struct _lm_device_t * pdev,
lm_tcp_state_t * tcp)
{
lm_tcp_con_t * rx_con;
MM_INIT_TCP_LOCK_HANDLE();
DbgMessage(pdev, INFORMl4rx, "###lm_tcp_process_invalidate_request_rcqe cid=%d\n", tcp->cid);
DbgBreakIf( ! (pdev && tcp) );
rx_con = tcp->rx_con;
mm_acquire_tcp_lock(pdev, rx_con);
DbgBreakIf( mm_tcp_indicating_bufs(rx_con) );
DbgBreakIf( !d_list_is_empty(&tcp->rx_con->u.rx.gen_info.peninsula_list) &&
(((lm_tcp_gen_buf_t *)(d_list_peek_tail(&tcp->rx_con->u.rx.gen_info.peninsula_list)))->placed_bytes == 0));
DbgBreakIf(rx_con->flags & TCP_INV_REQ_COMPLETED);
rx_con->flags |= TCP_INV_REQ_COMPLETED;
mm_release_tcp_lock(pdev, rx_con);
}
static void lm_tcp_get_delegated(
IN struct _lm_device_t * pdev,
IN lm_tcp_state_t * tcp,
IN void * ctx_p
)
{
struct xstorm_toe_tcp_ag_context_section * xag_tcp = NULL;
struct tstorm_tcp_st_context_section * tst_tcp = NULL;
struct xstorm_tcp_context_section * xst_tcp = NULL;
struct tstorm_toe_tcp_ag_context_section * tag_tcp = NULL;
struct ustorm_toe_st_context * ust_toe = NULL;
struct cstorm_toe_st_context * cst_toe = NULL;
struct xstorm_toe_ag_context * xag_toe = NULL;
struct xstorm_toe_context_section * xst_toe = NULL;
u32_t send_wnd;
u8_t sanity_check;
ASSERT_STATIC(sizeof(struct xstorm_toe_tcp_ag_context_section) == sizeof(struct xstorm_tcp_tcp_ag_context_section));
ASSERT_STATIC(sizeof(struct tstorm_toe_tcp_ag_context_section) == sizeof(struct tstorm_tcp_tcp_ag_context_section));
sanity_check = FALSE;
if (tcp->ulp_type == TOE_CONNECTION_TYPE)
{
xst_tcp = &((struct toe_context *)ctx_p)->xstorm_st_context.context.common.tcp;
xag_tcp = &((struct toe_context *)ctx_p)->xstorm_ag_context.tcp;
tst_tcp = &((struct toe_context *)ctx_p)->tstorm_st_context.context.tcp;
tag_tcp = &((struct toe_context *)ctx_p)->tstorm_ag_context.tcp;
xst_toe = &((struct toe_context *)ctx_p)->xstorm_st_context.context.toe;
xag_toe = &((struct toe_context *)ctx_p)->xstorm_ag_context;
cst_toe = &((struct toe_context *)ctx_p)->cstorm_st_context.context;
ust_toe = &((struct toe_context *)ctx_p)->ustorm_st_context.context;
if (S32_SUB(tcp->rx_con->u.rx.sws_info.drv_rcv_win_right_edge, tcp->rx_con->db_data.rx->rcv_win_right_edge) < 0) {
tcp->rx_con->u.rx.sws_info.drv_rcv_win_right_edge = tcp->rx_con->db_data.rx->rcv_win_right_edge;
}
tcp->tcp_delegated.recv_win_seq = tcp->rx_con->u.rx.sws_info.drv_rcv_win_right_edge +
tcp->rx_con->u.rx.gen_info.pending_indicated_bytes;
if (!lm_reset_is_inprogress(pdev))
{
sanity_check = TRUE;
}
}
else if (tcp->ulp_type == ISCSI_CONNECTION_TYPE)
{
xst_tcp = &((struct iscsi_context *)ctx_p)->xstorm_st_context.common.tcp;
xag_tcp = (struct xstorm_toe_tcp_ag_context_section *)&((struct iscsi_context *)ctx_p)->xstorm_ag_context.tcp;
tst_tcp = &((struct iscsi_context *)ctx_p)->tstorm_st_context.tcp;
tag_tcp = (struct tstorm_toe_tcp_ag_context_section *)&((struct toe_context *)ctx_p)->tstorm_ag_context.tcp;
tcp->tcp_delegated.recv_win_seq = tag_tcp->wnd_right_edge - tst_tcp->rcv_nxt;
}
else
{
DbgBreakMsg("lm_tcp_get_delegated: Unsupported protocol type \n") ;
return;
}
if (sanity_check)
{
#if !defined(_VBD_CMD_)
DbgBreakIf(((struct toe_context *)ctx_p)->ustorm_ag_context.__state == 0);
DbgBreakIf(((struct toe_context *)ctx_p)->tstorm_ag_context.__state == 0);
DbgBreakIf(((struct toe_context *)ctx_p)->xstorm_ag_context.__state == 0);
DbgBreakIf(S32_SUB(xag_tcp->snd_nxt, xst_tcp->snd_max) > 0);
DbgBreakIf(S32_SUB(xag_tcp->snd_una, tag_tcp->snd_una) != 0);
DbgBreakIf(S32_SUB(tag_tcp->snd_una, tag_tcp->snd_max) > 0);
DbgBreakIf(S32_SUB(xag_toe->cmp_bd_start_seq, tag_tcp->snd_una) > 0);
DbgBreakIf(S32_SUB(tst_tcp->rcv_nxt, xag_tcp->ack_to_far_end) != 0);
DbgBreakIf(S16_SUB(xag_toe->cmp_bd_cons, cst_toe->bd_cons) > 0);
DbgBreakIf(S16_SUB(xst_toe->tx_bd_cons, xst_toe->bd_prod) > 0);
DbgBreakIf(S16_SUB(xst_toe->bd_prod, tcp->tx_con->db_data.tx->bds_prod) > 0);
DbgBreakIf(S32_SUB(tag_tcp->snd_una, xag_tcp->snd_nxt) > 0);
DbgBreakIf(S32_SUB(tst_tcp->timestamp_recent, xag_tcp->ts_to_echo) < 0);
DbgBreakIf(((struct toe_context *)ctx_p)->cstorm_ag_context.rel_seq != tag_tcp->snd_una);
DbgBreakIf((u32_t)(tcp->tcp_delegated.send_una + (u32_t)tcp->tx_con->bytes_comp_cnt + (u32_t)tcp->tx_con->bytes_trm_aborted_cnt - (u32_t)tcp->tx_con->bytes_aborted_cnt) != tag_tcp->snd_una);
#endif
}
tcp->tcp_delegated.recv_next = tst_tcp->rcv_nxt;
tcp->tcp_delegated.send_una = tag_tcp->snd_una;
tcp->tcp_delegated.send_next = xag_tcp->snd_nxt;
tcp->tcp_delegated.send_max = xst_tcp->snd_max;
tcp->tcp_delegated.send_win = (tst_tcp->recent_seg_wnd << tcp->tcp_const.snd_seg_scale)
+ tcp->tcp_delegated.send_una;
send_wnd = tst_tcp->recent_seg_wnd << tcp->tcp_const.snd_seg_scale;
if ( tcp->tcp_delegated.max_send_win < tcp->tcp_delegated.send_win - tcp->tcp_delegated.send_una) {
tcp->tcp_delegated.max_send_win = tcp->tcp_delegated.send_win - tcp->tcp_delegated.send_una;
}
tcp->tcp_delegated.send_wl1 = tst_tcp->prev_seg_seq;
tcp->tcp_delegated.send_cwin = tst_tcp->cwnd + tcp->tcp_delegated.send_una;
tcp->tcp_delegated.ss_thresh = tst_tcp->ss_thresh;
tcp->tcp_delegated.sm_rtt = (tst_tcp->flags1 & TSTORM_TCP_ST_CONTEXT_SECTION_RTT_SRTT)
>> TSTORM_TCP_ST_CONTEXT_SECTION_RTT_SRTT_SHIFT;
tcp->tcp_delegated.sm_delta = (tst_tcp->flags2 & TSTORM_TCP_ST_CONTEXT_SECTION_RTT_VARIATION)
>> TSTORM_TCP_ST_CONTEXT_SECTION_RTT_VARIATION_SHIFT;
tcp->tcp_delegated.sm_rtt =
lm_time_resolution(pdev, tcp->tcp_delegated.sm_rtt, TIMERS_TICKS_PER_SEC, pdev->ofld_info.l4_params.ticks_per_second)*8;
tcp->tcp_delegated.sm_delta =
lm_time_resolution(pdev, tcp->tcp_delegated.sm_delta, TIMERS_TICKS_PER_SEC, pdev->ofld_info.l4_params.ticks_per_second)*4;
tcp->tcp_delegated.ts_recent = tst_tcp->timestamp_recent;
tcp->tcp_delegated.ts_recent_age =
lm_time_resolution(pdev, tst_tcp->timestamp_recent_time, TSEMI_CLK1_TICKS_PER_SEC, pdev->ofld_info.l4_params.ticks_per_second);
tcp->tcp_delegated.tstamp = xst_tcp->ts_time_diff;
tcp->tcp_delegated.total_rt =
lm_time_resolution(pdev, tst_tcp->retransmit_start_time, TIMERS_TICKS_PER_SEC, pdev->ofld_info.l4_params.ticks_per_second);
tcp->tcp_delegated.dup_ack_count = tst_tcp->dup_ack_count;
tcp->tcp_delegated.snd_wnd_probe_count = tst_tcp->persist_probe_count;
if(tcp->tcp_delegated.send_una == tcp->tcp_delegated.send_max && (send_wnd > 0)) {
if ( (tcp->tcp_cached.tcp_flags & TCP_FLAG_ENABLE_KEEP_ALIVE)) {
tcp->tcp_delegated.u.keep_alive.probe_cnt = tst_tcp->ka_probe_count;
tcp->tcp_delegated.u.keep_alive.timeout_delta =
lm_time_resolution(pdev, xag_tcp->ka_timer, TIMERS_TICKS_PER_SEC, pdev->ofld_info.l4_params.ticks_per_second);
if ((tcp->tcp_delegated.u.keep_alive.timeout_delta != 0xffffffff) &&
(tcp->tcp_delegated.u.keep_alive.timeout_delta > 0x8000000)) {
tcp->tcp_delegated.u.keep_alive.timeout_delta = 0;
}
} else {
tcp->tcp_delegated.u.keep_alive.probe_cnt = 0;
tcp->tcp_delegated.u.keep_alive.timeout_delta = 0xffffffff;
}
} else {
tcp->tcp_delegated.u.retransmit.num_retx = tst_tcp->retransmit_count;
if ((xag_tcp->rto_timer != 0xffffffff) && (xag_tcp->rto_timer > 0x8000000)) {
tcp->tcp_delegated.u.retransmit.retx_ms = 0;
} else {
tcp->tcp_delegated.u.retransmit.retx_ms = xag_tcp->rto_timer;
}
}
tcp->tcp_delegated.con_state = lm_tcp_calc_state(pdev, tcp,
xst_tcp->tcp_params & XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG ? 1 : 0);
pdev->toe_info.stats.con_state_on_upload[tcp->tcp_delegated.con_state]++;
}
void lm_init_sp_req_type(
struct _lm_device_t * pdev,
lm_tcp_state_t * tcp,
lm_tcp_slow_path_request_t * lm_req,
void * req_input_data)
{
UNREFERENCED_PARAMETER_(pdev);
switch(lm_req->type) {
case SP_REQUEST_INITIATE_OFFLOAD:
case SP_REQUEST_TERMINATE_OFFLOAD:
case SP_REQUEST_PENDING_LOCAL_FIN_DISCONNECT:
case SP_REQUEST_PENDING_REMOTE_DISCONNECT:
case SP_REQUEST_PENDING_TX_RST:
case SP_REQUEST_ABORTIVE_DISCONNECT:
case SP_REQUEST_INVALIDATE:
break;
case SP_REQUEST_UPDATE_TCP:
case SP_REQUEST_UPDATE_PATH:
case SP_REQUEST_UPDATE_NEIGH:
case SP_REQUEST_UPDATE_PATH_RELINK:
lm_req->sent_data.tcp_update_data.data = req_input_data;
break;
case SP_REQUEST_QUERY:
DbgBreakMsg("GilR - NOT IMPLEMENTED!\n");
break;
default:
DbgBreakMsg("Illegal slow path request type!\n");
}
lm_req->sp_req_common.req_post_func = (void *)lm_tcp_post_slow_path_request;
lm_req->sp_req_common.req_post_ctx = tcp;
}
static void _lm_tcp_comp_upload_tcp_request (
IN struct _lm_device_t * pdev,
IN lm_tcp_state_t * tcp
)
{
lm_tcp_con_t * rx_con = tcp->rx_con;
lm_tcp_con_t * tx_con = tcp->tx_con;
u8_t has_fin = 0;
u8_t has_rst = 0;
lm_tcp_slow_path_request_t * sp_req = tcp->sp_request;
lm_path_state_t * path = NULL;
lm_status_t lm_status = LM_STATUS_SUCCESS;
#if 0
#if (DBG && !defined(_VBD_CMD_) && !defined(__USER_MODE_DEBUG))
u32_t expect_rwin;
#endif
#endif
MM_INIT_TCP_LOCK_HANDLE();
if (tcp->ulp_type == TOE_CONNECTION_TYPE) {
mm_acquire_tcp_lock(pdev, tx_con);
lm_tcp_abort_bufs(pdev, tcp, tx_con, (tx_con->flags & TCP_CON_RST_IND_NOT_SAFE)? LM_STATUS_CONNECTION_RESET : LM_STATUS_UPLOAD_IN_PROGRESS);
has_rst |= (tx_con->u.tx.flags & TCP_CON_RST_IND_NOT_SAFE) ? 1 : 0;
tx_con->u.tx.flags &= ~(TCP_CON_RST_IND_NOT_SAFE);
mm_release_tcp_lock(pdev, tx_con);
mm_acquire_tcp_lock(pdev, rx_con);
lm_tcp_abort_bufs(pdev, tcp, rx_con, LM_STATUS_UPLOAD_IN_PROGRESS);
has_fin = rx_con->u.rx.flags & TCP_CON_FIN_IND_PENDING ? 1 : 0;
has_rst |= (rx_con->u.rx.flags & TCP_CON_RST_IND_PENDING) ? 1 : 0;
rx_con->u.rx.flags &= ~(TCP_CON_FIN_IND_PENDING | TCP_CON_RST_IND_PENDING);
lm_status = lm_tcp_rx_get_buffered_data_from_terminate(pdev, tcp,
&(tcp->sp_request->ret_data.tcp_upload_data.frag_list),
&(tcp->sp_request->ret_data.tcp_upload_data.ret_buf_ctx)
);
mm_release_tcp_lock(pdev, rx_con);
if ( has_rst ) {
mm_tcp_indicate_rst_received(pdev, tcp);
}
}
MM_ACQUIRE_TOE_LOCK(pdev);
DbgBreakIf(!(tcp->sp_flags & SP_TCP_QRY_REQ_POSTED));
tcp->sp_flags |= SP_TCP_QRY_REQ_COMP;
lm_tcp_get_delegated(pdev, tcp, &tcp->sp_req_data.virt_addr->toe_ctx);
tcp->sp_request = NULL;
sp_req->status = lm_status;
sp_req->type = SP_REQUEST_TERMINATE_OFFLOAD;
DbgBreakIf(tcp->path->num_dependents == 0);
tcp->path->num_dependents--;
if (TOE_CONNECTION_TYPE == tcp->ulp_type )
{
if( IP_VERSION_IPV4 == tcp->path->path_const.ip_version )
{
--pdev->vars.stats.stats_mirror.stats_drv.drv_toe.ipv[STATS_IP_4_IDX].currently_established;
}
else if( IP_VERSION_IPV6 == tcp->path->path_const.ip_version )
{
--pdev->vars.stats.stats_mirror.stats_drv.drv_toe.ipv[STATS_IP_6_IDX].currently_established;
}
}
if (tcp->path->hdr.status == STATE_STATUS_UPLOAD_PENDING &&
tcp->path->num_dependents == 0) {
path = tcp->path;
}
tcp->path = NULL;
#if 0
if (tcp->ulp_type == TOE_CONNECTION_TYPE) {
#if (DBG && !defined(_VBD_CMD_) && !defined(__USER_MODE_DEBUG))
expect_rwin = (u32_t) S32_SUB(
tcp->tcp_delegated.recv_win_seq,
tcp->tcp_delegated.recv_next);
if(sp_req->ret_data.tcp_upload_data.frag_list)
{
expect_rwin += (u32_t)sp_req->ret_data.tcp_upload_data.frag_list->size;
}
if((tcp->rx_con->flags & TCP_REMOTE_FIN_RECEIVED) ||
(tcp->rx_con->flags & TCP_REMOTE_RST_RECEIVED))
{
DbgBreakIf(
(expect_rwin != tcp->tcp_cached.initial_rcv_wnd) &&
(expect_rwin != tcp->tcp_cached.initial_rcv_wnd - 1));
}
else
{
DbgBreakIf(expect_rwin != tcp->tcp_cached.initial_rcv_wnd);
}
#endif
}
#endif
mm_tcp_comp_slow_path_request(pdev, tcp, sp_req);
if (path) {
DbgMessage(pdev, INFORMl4sp, "_lm_tcp_comp_upload_request: last tcp dependent of pending path %p\n", path);
_lm_tcp_comp_upload_path_request(pdev, path);
}
MM_RELEASE_TOE_LOCK(pdev);
}
lm_tcp_state_t * lm_tcp_get_next_path_dependent(
struct _lm_device_t *pdev,
void *path_state,
lm_tcp_state_t * tcp_state)
{
if (tcp_state == NULL) {
tcp_state = (lm_tcp_state_t *) d_list_peek_head(&pdev->toe_info.state_blk.tcp_list);
} else {
tcp_state = (lm_tcp_state_t *) d_list_next_entry(&tcp_state->hdr.link);
}
while(tcp_state) {
if (tcp_state->path == (lm_path_state_t*)path_state) {
return tcp_state;
}
tcp_state = (lm_tcp_state_t *) d_list_next_entry(&tcp_state->hdr.link);
}
return NULL;
}
lm_tcp_state_t * lm_tcp_get_next_neigh_dependent(
struct _lm_device_t *pdev,
void * neigh_state,
lm_tcp_state_t * tcp_state)
{
if (tcp_state == NULL) {
tcp_state = (lm_tcp_state_t *) d_list_peek_head(&pdev->toe_info.state_blk.tcp_list);
} else {
tcp_state = (lm_tcp_state_t *) d_list_next_entry(&tcp_state->hdr.link);
}
while(tcp_state) {
if (tcp_state->path && (tcp_state->path->neigh == (lm_neigh_state_t*)neigh_state)) {
return tcp_state;
}
tcp_state = (lm_tcp_state_t *) d_list_next_entry(&tcp_state->hdr.link);
}
return NULL;
}
void lm_tcp_update_ramrod_complete(lm_device_t * pdev, lm_tcp_state_t * tcp)
{
lm_tcp_slow_path_request_t *sp_req;
MM_INIT_TCP_LOCK_HANDLE();
DbgMessage(pdev, INFORMl4sp, "###lm_tcp_update_ramrod_complete cid=%d \n", tcp->cid);
MM_ACQUIRE_TOE_LOCK(pdev);
DbgBreakIf( (tcp->hdr.status != STATE_STATUS_NORMAL) &&
(tcp->hdr.status != STATE_STATUS_ABORTED));
DbgBreakIf(tcp->sp_request == NULL);
DbgBreakIf((tcp->sp_request->type != SP_REQUEST_UPDATE_NEIGH) &&
(tcp->sp_request->type != SP_REQUEST_UPDATE_PATH) &&
(tcp->sp_request->type != SP_REQUEST_UPDATE_TCP) &&
(tcp->sp_request->type != SP_REQUEST_UPDATE_PATH_RELINK));
sp_req = tcp->sp_request;
DbgBreakIf(tcp->sp_flags & ( SP_REQUEST_COMPLETED_TX | SP_REQUEST_COMPLETED_RX ));
sp_req->status = LM_STATUS_SUCCESS;
tcp->sp_request = NULL;
mm_acquire_tcp_lock(pdev, tcp->rx_con);
if ((sp_req->type == SP_REQUEST_UPDATE_TCP) && (GET_FLAGS(tcp->rx_con->db_data.rx->flags, TOE_RX_DB_DATA_IGNORE_WND_UPDATES)))
{
lm_tcp_rx_post_sws(pdev, tcp, tcp->rx_con, tcp->rx_con->dpc_info.dpc_fw_wnd_after_dec, TCP_RX_POST_SWS_SET);
}
mm_release_tcp_lock(pdev, tcp->rx_con);
mm_tcp_comp_slow_path_request(pdev, tcp, sp_req);
MM_RELEASE_TOE_LOCK(pdev);
}
void lm_tcp_query_ramrod_complete(
IN struct _lm_device_t * pdev,
IN lm_tcp_state_t * tcp
)
{
DbgMessage(pdev, VERBOSEl4, "## lm_tcp_query_ramrod_comp\n");
DbgBreakIf(! tcp->sp_request );
DbgBreakIf(tcp->sp_request->type != SP_REQUEST_QUERY);
if (tcp->hdr.status == STATE_STATUS_UPLOAD_PENDING) {
_lm_tcp_comp_upload_tcp_request(pdev, tcp);
} else {
DbgBreakMsg("Vladz: Not implemented yet!\n");
}
}
void lm_tcp_internal_query(
IN struct _lm_device_t * pdev)
{
lm_tcp_state_t *tcp_state;
u32_t status_arr[STATE_STATUS_ERR+1] = {0};
u32_t status, num_tcps, i;
DbgMessage(pdev, FATAL, "## lm_tcp_debug_query START version %d.%d.%d\n",
LM_DRIVER_MAJOR_VER, LM_DRIVER_MINOR_VER, LM_DRIVER_FIX_NUM);
num_tcps = d_list_entry_cnt(&pdev->toe_info.state_blk.tcp_list);
tcp_state = (lm_tcp_state_t *)d_list_peek_head(&pdev->toe_info.state_blk.tcp_list);
i = 0;
while (tcp_state) {
status = tcp_state->hdr.status;
status_arr[status]++;
if(status != STATE_STATUS_NORMAL) {
DbgMessage(pdev, FATAL, "# tcp ptr 0x%p (cid %d), has status=%d (!= normal)\n",
tcp_state, tcp_state->cid, status);
}
if(tcp_state->sp_request) {
DbgMessage(pdev, FATAL, "# tcp ptr 0x%p (cid %d), has slow path request of type %d, not completed by FW (sp comp flags=0x%x\n",
tcp_state, tcp_state->cid, tcp_state->sp_request->type, tcp_state->sp_flags);
}
if(tcp_state->tx_con->bytes_post_cnt != tcp_state->tx_con->bytes_comp_cnt) {
DbgMessage(pdev, FATAL, "# tcp ptr 0x%p (cid %d), has TX pending bytes (%d). (con->flags=0x%x)\n",
tcp_state, tcp_state->cid,
S64_SUB(tcp_state->tx_con->bytes_post_cnt, tcp_state->tx_con->bytes_comp_cnt),
tcp_state->tx_con->flags);
}
if(tcp_state->rx_con->bytes_post_cnt != tcp_state->rx_con->bytes_comp_cnt) {
DbgMessage(pdev, FATAL, "# tcp ptr 0x%p (cid %d), has RX pending bytes (%d). (con->flags=0x%x)\n",
tcp_state, tcp_state->cid,
S64_SUB(tcp_state->rx_con->bytes_post_cnt, tcp_state->rx_con->bytes_comp_cnt),
tcp_state->rx_con->flags);
}
tcp_state = (lm_tcp_state_t *)d_list_next_entry((d_list_entry_t*)tcp_state);
}
DbgMessage(pdev, FATAL, "# num offloaded connections=%d\n", num_tcps);
for (i = 0; i < STATE_STATUS_ERR+1; i++) {
if (status_arr[i]) {
DbgMessage(pdev, FATAL, "# num connections in status %d=%d\n", i, status_arr[i]);
}
}
DbgMessage(pdev, FATAL, "## lm_tcp_debug_query END\n");
}
void lm_tcp_upld_close_received_complete(
struct _lm_device_t * pdev,
lm_tcp_state_t * tcp,
l4_upload_reason_t upload_reason)
{
DbgMessage(pdev, INFORMl4sp , "###lm_tcp_drv_upl_received_complete cid=%d \n", tcp->cid);
MM_ACQUIRE_TOE_LOCK(pdev);
tcp->tcp_state_calc.con_upld_close_flag = TRUE;
MM_RELEASE_TOE_LOCK(pdev);
lm_tcp_process_retrieve_indication_cqe(pdev, tcp, upload_reason);
pdev->toe_info.stats.total_close_upld_requested++;
}
void lm_tcp_tx_complete_tcp_sp(
IN struct _lm_device_t * pdev,
IN lm_tcp_state_t * tcp,
IN lm_tcp_con_t * con)
{
u8_t complete_ramrod;
u32_t sp_type,sp_flags,flags,snapshot_flags;
lm_tcp_slow_path_request_t * request = NULL;
snapshot_flags = con->dpc_info.snapshot_flags;
if (con->dpc_info.snapshot_flags & LM_TCP_DPC_RESET_RECV) {
con->dpc_info.snapshot_flags &= ~LM_TCP_DPC_RESET_RECV;
lm_tcp_tx_rst_received_complete(pdev, con->tcp_state);
}
if (con->dpc_info.snapshot_flags & LM_TCP_DPC_RAMROD_CMP) {
con->dpc_info.snapshot_flags = 0;
complete_ramrod = FALSE;
MM_ACQUIRE_TOE_LOCK(pdev);
sp_type = tcp->sp_request->type;
MM_RELEASE_TOE_LOCK(pdev);
switch(sp_type) {
case SP_REQUEST_ABORTIVE_DISCONNECT:
lm_tcp_tx_abortive_disconnect_ramrod_complete(pdev, tcp);
break;
case SP_REQUEST_INVALIDATE:
lm_tcp_tx_invalidate_ramrod_complete(pdev, tcp);
break;
case SP_REQUEST_TERMINATE1_OFFLOAD:
lm_tcp_tx_terminate_ramrod_complete(pdev, tcp);
break;
case SP_REQUEST_PENDING_LOCAL_FIN_DISCONNECT:
case SP_REQUEST_PENDING_REMOTE_DISCONNECT:
case SP_REQUEST_PENDING_TX_RST:
lm_tcp_tx_empty_ramrod_complete(pdev, tcp, sp_type);
break;
default:
DbgMessage(pdev, FATAL, "unexpected sp completion type=%d\n", tcp->sp_request->type);
DbgBreak();
}
MM_ACQUIRE_TOE_LOCK(pdev);
DbgBreakIf(sp_type != tcp->sp_request->type);
tcp->sp_flags |= SP_REQUEST_COMPLETED_TX;
if ( tcp->sp_flags & SP_REQUEST_COMPLETED_RX ) {
complete_ramrod = TRUE;
tcp->sp_flags &= ~ ( SP_REQUEST_COMPLETED_TX | SP_REQUEST_COMPLETED_RX );
}
sp_flags = tcp->sp_flags;
flags = tcp->tx_con->flags;
MM_RELEASE_TOE_LOCK(pdev);
if (complete_ramrod) {
request = tcp->sp_request;
DbgBreakIf(request == NULL);
switch(sp_type) {
case SP_REQUEST_ABORTIVE_DISCONNECT:
DbgBreakIf(request->type != SP_REQUEST_ABORTIVE_DISCONNECT);
lm_tcp_comp_abortive_disconnect_request(pdev, tcp, request);
lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_OPCODE_TOE_RESET_SEND, tcp->ulp_type, tcp->cid);
break;
case SP_REQUEST_INVALIDATE:
DbgBreakIf(request->type != SP_REQUEST_INVALIDATE);
lm_tcp_comp_invalidate_request(pdev, tcp, request);
lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_OPCODE_TOE_INVALIDATE, tcp->ulp_type, tcp->cid);
break;
case SP_REQUEST_TERMINATE1_OFFLOAD:
DbgBreakIf(request->type != SP_REQUEST_TERMINATE1_OFFLOAD);
lm_tcp_terminate_ramrod_complete(pdev, tcp);
lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_OPCODE_TOE_TERMINATE, tcp->ulp_type, tcp->cid);
break;
case SP_REQUEST_PENDING_LOCAL_FIN_DISCONNECT:
case SP_REQUEST_PENDING_REMOTE_DISCONNECT:
case SP_REQUEST_PENDING_TX_RST:
lm_tcp_comp_empty_ramrod_request(pdev, tcp);
lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_OPCODE_TOE_EMPTY_RAMROD, tcp->ulp_type, tcp->cid);
break;
default:
DbgMessage(pdev, FATAL, "unexpected sp completion type=%d\n", tcp->sp_request->type);
DbgBreak();
}
}
}
}
void lm_tcp_rx_complete_tcp_sp(
IN struct _lm_device_t * pdev,
IN lm_tcp_state_t * tcp,
IN lm_tcp_con_t * con
)
{
u8_t complete_ramrod;
u32_t sp_type,sp_flags,flags,snapshot_flags;
lm_tcp_slow_path_request_t * request = NULL;
u32_t cid;
u8_t ulp_type;
snapshot_flags = con->dpc_info.snapshot_flags;
if (con->dpc_info.snapshot_flags & LM_TCP_DPC_FIN_RECV) {
lm_tcp_rx_fin_received_complete(pdev, tcp, 0);
con->dpc_info.snapshot_flags &= ~LM_TCP_DPC_FIN_RECV;
}
if (con->dpc_info.snapshot_flags & LM_TCP_DPC_FIN_RECV_UPL) {
lm_tcp_rx_fin_received_complete(pdev, tcp, 1);
con->dpc_info.snapshot_flags &= ~LM_TCP_DPC_FIN_RECV_UPL;
}
DbgMessage(pdev, INFORMl4rx, "lm_tcp_rx_complete_tcp_sp tcp=%p cid=%d \n", tcp, tcp->cid);
if (con->dpc_info.snapshot_flags & LM_TCP_DPC_RESET_RECV) {
con->dpc_info.snapshot_flags &= ~LM_TCP_DPC_RESET_RECV;
lm_tcp_rx_rst_received_complete(pdev, tcp);
}
if (con->dpc_info.snapshot_flags & (LM_TCP_DPC_URG | LM_TCP_DPC_RT_TO | LM_TCP_DPC_KA_TO | LM_TCP_DPC_DBT_RE | LM_TCP_DPC_OPT_ERR | LM_TCP_DPC_UPLD_CLOSE)) {
con->dpc_info.snapshot_flags &= ~(LM_TCP_DPC_TOO_BIG_ISLE | LM_TCP_DPC_TOO_MANY_ISLES);
if (con->dpc_info.snapshot_flags & LM_TCP_DPC_URG) {
con->dpc_info.snapshot_flags &= ~LM_TCP_DPC_URG;
lm_tcp_process_retrieve_indication_cqe(pdev, tcp, L4_UPLOAD_REASON_URG);
}
if (con->dpc_info.snapshot_flags & LM_TCP_DPC_RT_TO) {
con->dpc_info.snapshot_flags &= ~LM_TCP_DPC_RT_TO;
lm_tcp_process_retrieve_indication_cqe(pdev, tcp, L4_UPLOAD_REASON_RETRANSMIT_TIMEOUT);
}
if (con->dpc_info.snapshot_flags & LM_TCP_DPC_KA_TO) {
con->dpc_info.snapshot_flags &= ~LM_TCP_DPC_KA_TO;
lm_tcp_upld_close_received_complete(pdev, tcp, L4_UPLOAD_REASON_KEEP_ALIVE_TIMEOUT);
}
if (con->dpc_info.snapshot_flags & LM_TCP_DPC_DBT_RE) {
con->dpc_info.snapshot_flags &= ~LM_TCP_DPC_DBT_RE;
lm_tcp_process_retrieve_indication_cqe(pdev, tcp, L4_UPLOAD_REASON_UPLOAD_REQUESTED);
pdev->toe_info.stats.total_dbt_upld_requested++;
}
if (con->dpc_info.snapshot_flags & LM_TCP_DPC_OPT_ERR) {
con->dpc_info.snapshot_flags &= ~LM_TCP_DPC_OPT_ERR;
lm_tcp_process_retrieve_indication_cqe(pdev, tcp, L4_UPLOAD_REASON_UPLOAD_REQUESTED);
pdev->toe_info.stats.total_opt_upld_requested++;
}
if (con->dpc_info.snapshot_flags & LM_TCP_DPC_UPLD_CLOSE) {
con->dpc_info.snapshot_flags &= ~LM_TCP_DPC_UPLD_CLOSE;
lm_tcp_upld_close_received_complete(pdev, tcp, L4_UPLOAD_REASON_UPLOAD_REQUESTED);
}
} else if (con->dpc_info.snapshot_flags & LM_TCP_DPC_TOO_BIG_ISLE) {
con->dpc_info.snapshot_flags &= ~(LM_TCP_DPC_TOO_BIG_ISLE | LM_TCP_DPC_TOO_MANY_ISLES);
lm_tcp_process_retrieve_indication_cqe(pdev, tcp, L4_UPLOAD_REASON_UPLOAD_REQUESTED);
pdev->toe_info.stats.total_big_isle_upld_requesed++;
} else if (con->dpc_info.snapshot_flags & LM_TCP_DPC_TOO_MANY_ISLES) {
con->dpc_info.snapshot_flags &= ~LM_TCP_DPC_TOO_MANY_ISLES;
lm_tcp_process_retrieve_indication_cqe(pdev, tcp, L4_UPLOAD_REASON_UPLOAD_REQUESTED);
pdev->toe_info.stats.total_many_isles_upld_requesed++;
}
if (con->dpc_info.snapshot_flags & LM_TCP_DPC_RAMROD_CMP) {
con->dpc_info.snapshot_flags &= ~LM_TCP_DPC_RAMROD_CMP;
DbgBreakIf(con->dpc_info.snapshot_flags != 0);
cid = tcp->cid;
ulp_type = tcp->ulp_type;
switch (tcp->sp_request->type) {
case SP_REQUEST_UPDATE_NEIGH:
case SP_REQUEST_UPDATE_PATH:
case SP_REQUEST_UPDATE_TCP:
case SP_REQUEST_UPDATE_PATH_RELINK:
lm_tcp_update_ramrod_complete(pdev, tcp);
lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_OPCODE_TOE_UPDATE, ulp_type, cid);
return;
case SP_REQUEST_QUERY:
lm_tcp_query_ramrod_complete(pdev, tcp);
lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_OPCODE_TOE_QUERY, ulp_type, cid);
return;
case SP_REQUEST_TERMINATE_OFFLOAD:
lm_tcp_searcher_ramrod_complete(pdev, tcp);
lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_OPCODE_TOE_SEARCHER_DELETE, ulp_type, cid);
return;
case SP_REQUEST_INITIATE_OFFLOAD:
lm_tcp_comp_initiate_offload_request(pdev, tcp, TOE_INITIATE_OFFLOAD_RAMROD_DATA_LICENSE_FAILURE);
lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_OPCODE_TOE_INITIATE_OFFLOAD, tcp->ulp_type, cid);
return;
}
complete_ramrod = FALSE;
MM_ACQUIRE_TOE_LOCK(pdev);
sp_type = tcp->sp_request->type;
MM_RELEASE_TOE_LOCK(pdev);
switch(sp_type) {
case SP_REQUEST_ABORTIVE_DISCONNECT:
lm_tcp_rx_abortive_disconnect_ramrod_complete(pdev, tcp);
break;
case SP_REQUEST_INVALIDATE:
lm_tcp_rx_invalidate_ramrod_complete(pdev, tcp);
break;
case SP_REQUEST_TERMINATE1_OFFLOAD:
lm_tcp_rx_terminate_ramrod_complete(pdev, tcp);
break;
case SP_REQUEST_PENDING_LOCAL_FIN_DISCONNECT:
case SP_REQUEST_PENDING_REMOTE_DISCONNECT:
case SP_REQUEST_PENDING_TX_RST:
lm_tcp_rx_empty_ramrod_complete(pdev,tcp, sp_type);
break;
default:
DbgMessage(pdev, FATAL, "unexpected sp completion type=%d\n", tcp->sp_request->type);
DbgBreak();
}
MM_ACQUIRE_TOE_LOCK(pdev);
DbgBreakIf(sp_type != tcp->sp_request->type);
tcp->sp_flags |= SP_REQUEST_COMPLETED_RX;
if ( tcp->sp_flags & SP_REQUEST_COMPLETED_TX ) {
complete_ramrod = TRUE;
tcp->sp_flags &= ~ ( SP_REQUEST_COMPLETED_TX | SP_REQUEST_COMPLETED_RX );
}
sp_flags = tcp->sp_flags;
flags = tcp->rx_con->flags;
MM_RELEASE_TOE_LOCK(pdev);
if (complete_ramrod) {
request = tcp->sp_request;
DbgBreakIf(request == NULL);
switch(sp_type) {
case SP_REQUEST_ABORTIVE_DISCONNECT:
DbgBreakIf(request->type != SP_REQUEST_ABORTIVE_DISCONNECT);
lm_tcp_comp_abortive_disconnect_request(pdev, tcp, request);
lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_OPCODE_TOE_RESET_SEND, tcp->ulp_type, tcp->cid);
break;
case SP_REQUEST_INVALIDATE:
DbgBreakIf(request->type != SP_REQUEST_INVALIDATE);
lm_tcp_comp_invalidate_request(pdev, tcp, request);
lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_OPCODE_TOE_INVALIDATE, tcp->ulp_type, tcp->cid);
break;
case SP_REQUEST_TERMINATE1_OFFLOAD:
DbgBreakIf(request->type != SP_REQUEST_TERMINATE1_OFFLOAD);
lm_tcp_terminate_ramrod_complete(pdev, tcp);
lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_OPCODE_TOE_TERMINATE, tcp->ulp_type, tcp->cid);
break;
case SP_REQUEST_PENDING_LOCAL_FIN_DISCONNECT:
case SP_REQUEST_PENDING_REMOTE_DISCONNECT:
case SP_REQUEST_PENDING_TX_RST:
lm_tcp_comp_empty_ramrod_request(pdev, tcp);
lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_OPCODE_TOE_EMPTY_RAMROD, tcp->ulp_type, tcp->cid);
break;
default:
DbgMessage(pdev, FATAL, "unexpected sp completion type=%d\n", tcp->sp_request->type);
DbgBreak();
}
}
}
}
#define MSL 4
l4_tcp_con_state_t lm_tcp_calc_state (
lm_device_t * pdev,
lm_tcp_state_t * tcp,
u8_t fin_was_sent
)
{
enum {
NO_CLOSE = 0,
ACTIVE_CLOSE,
PASSIVE_CLOSE,
PASSIVE_BY_ACTIVE_CLOSE
} closing_type;
u32_t snd_max = tcp->tcp_delegated.send_max;
u32_t snd_una = tcp->tcp_delegated.send_una;
u8_t con_rst = tcp->tcp_state_calc.con_rst_flag;
u8_t con_upld_close = tcp->tcp_state_calc.con_upld_close_flag;
u64_t fin_completed_time = tcp->tcp_state_calc.fin_completed_time;
u64_t fin_reception_time = tcp->tcp_state_calc.fin_reception_time;
u64_t fin_request_time = tcp->tcp_state_calc.fin_request_time;
u64_t time_wait_state_entering_time = fin_completed_time > fin_reception_time ?
fin_completed_time : fin_reception_time;
l4_tcp_con_state_t tcp_state;
closing_type = NO_CLOSE;
if ( fin_reception_time == 0 ) {
if ( fin_request_time > 0 ) {
closing_type = ACTIVE_CLOSE;
}
} else if ( ( fin_reception_time < fin_request_time ) || (fin_request_time == 0) ) {
closing_type = PASSIVE_CLOSE;
} else if ( ( fin_reception_time >= fin_request_time ) && (fin_request_time > 0) ){
closing_type = PASSIVE_BY_ACTIVE_CLOSE;
}
if ((con_rst) || (con_upld_close)) {
tcp_state = L4_TCP_CON_STATE_CLOSED;
} else if ( closing_type == NO_CLOSE ) {
tcp_state = L4_TCP_CON_STATE_ESTABLISHED;
} else if ( ( closing_type == ACTIVE_CLOSE ) && fin_was_sent ) {
if ( snd_una == snd_max ){
tcp_state = L4_TCP_CON_STATE_FIN_WAIT2;
} else {
tcp_state = L4_TCP_CON_STATE_FIN_WAIT1;
}
} else if ( ( closing_type == PASSIVE_BY_ACTIVE_CLOSE ) && (! fin_was_sent ) ) {
tcp_state = L4_TCP_CON_STATE_CLOSE_WAIT;
} else if (closing_type == PASSIVE_BY_ACTIVE_CLOSE ) {
if (snd_una == snd_max) {
if ( mm_get_current_time(pdev) - time_wait_state_entering_time > 2*pdev->ofld_info.l4_params.ticks_per_second *MSL ) {
tcp_state = L4_TCP_CON_STATE_CLOSED;
} else {
tcp_state = L4_TCP_CON_STATE_TIME_WAIT;
}
} else {
tcp_state = L4_TCP_CON_STATE_CLOSING;
}
} else if (closing_type == PASSIVE_CLOSE ) {
if ( ! fin_was_sent ) {
tcp_state = L4_TCP_CON_STATE_CLOSE_WAIT;
} else if ( snd_una == snd_max ) {
tcp_state = L4_TCP_CON_STATE_CLOSED;
} else {
tcp_state = L4_TCP_CON_STATE_LAST_ACK;
}
} else {
tcp_state = L4_TCP_CON_STATE_ESTABLISHED;
}
return tcp_state;
}
void lm_tcp_clear_grqs(lm_device_t * pdev)
{
lm_tcp_grq_t * grq;
u8_t idx;
DbgBreakIf(!(pdev->params.ofld_cap & LM_OFFLOAD_CHIMNEY));
if (!lm_reset_is_inprogress(pdev)){
DbgBreakIf(!d_list_is_empty(&pdev->toe_info.state_blk.tcp_list));
DbgBreakIf(!d_list_is_empty(&pdev->toe_info.state_blk.path_list));
DbgBreakIf(!d_list_is_empty(&pdev->toe_info.state_blk.neigh_list));
}
if (IS_PFDEV(pdev)) {
DbgBreakIf(USTORM_TOE_GRQ_CONS_PTR_LO_SIZE != 4);
DbgBreakIf(USTORM_TOE_GRQ_CONS_PTR_HI_SIZE != 4);
}
LM_TOE_FOREACH_RSS_IDX(pdev, idx)
{
grq = &pdev->toe_info.grqs[idx];
MM_ACQUIRE_TOE_GRQ_LOCK(pdev, idx);
grq->grq_compensate_on_alloc = FALSE;
MM_RELEASE_TOE_GRQ_LOCK(pdev, idx);
}
LM_TOE_FOREACH_RSS_IDX(pdev, idx)
{
if (IS_PFDEV(pdev)) {
LM_INTMEM_WRITE32(pdev, USTORM_TOE_GRQ_CONS_PTR_LO_OFFSET(LM_TOE_FW_RSS_ID(pdev,idx), PORT_ID(pdev)), 0, BAR_USTRORM_INTMEM);
LM_INTMEM_WRITE32(pdev, USTORM_TOE_GRQ_CONS_PTR_HI_OFFSET(LM_TOE_FW_RSS_ID(pdev,idx), PORT_ID(pdev)), 0, BAR_USTRORM_INTMEM);
}
grq = &pdev->toe_info.grqs[idx];
if (!d_list_is_empty(&grq->aux_gen_list)) {
mm_tcp_return_list_of_gen_bufs(pdev, &grq->aux_gen_list, 0, NON_EXISTENT_SB_IDX);
d_list_clear(&grq->aux_gen_list);
}
if (!d_list_is_empty(&grq->active_gen_list)) {
mm_tcp_return_list_of_gen_bufs(pdev, &grq->active_gen_list, 0, NON_EXISTENT_SB_IDX);
d_list_clear(&grq->active_gen_list);
lm_bd_chain_reset(pdev, &grq->bd_chain);
}
}
}
lm_status_t lm_tcp_update_rss(struct _lm_device_t * pdev, u8_t * chain_indirection_table,
u32_t table_size, u8_t enable)
{
struct toe_rss_update_ramrod_data *data = pdev->toe_info.rss_update_data;
lm_status_t lm_status = LM_STATUS_SUCCESS;
u8_t value = 0;
u8_t send_ramrod = 0;
u8_t rss_idx = 0;
u16_t bitmap = 0;
u8_t i,j;
if (pdev->params.l4_enable_rss == L4_RSS_DISABLED || data == NULL)
{
return LM_STATUS_SUCCESS;
}
DbgBreakIf(pdev->params.l4_enable_rss != L4_RSS_DYNAMIC);
if (enable)
{
if (pdev->params.l4_grq_page_cnt > 2)
{
LM_TOE_FOREACH_RSS_IDX(pdev, rss_idx)
{
pdev->toe_info.grqs[rss_idx].high_bds_threshold = 2 * 512;
}
}
}
else
{
pdev->toe_info.grqs[LM_TOE_BASE_RSS_ID(pdev)].high_bds_threshold = 0;
}
for (j = 0; j < TOE_INDIRECTION_TABLE_SIZE/table_size; j++)
{
for (i = 0; i < table_size; i++)
{
value = LM_TOE_FW_RSS_ID(pdev,chain_indirection_table[i]);
if (pdev->toe_info.indirection_table[(j*table_size)+i] != value) {
pdev->toe_info.indirection_table[(j*table_size)+i] = value;
send_ramrod = TRUE;
}
}
}
if (send_ramrod)
{
pdev->params.update_comp_cnt = 0;
pdev->params.update_suspend_cnt = 0;
pdev->params.update_toe_comp_cnt = 0;
pdev->params.update_comp_cnt++;
pdev->params.update_suspend_cnt++;
LM_TOE_FOREACH_RSS_IDX(pdev, rss_idx)
{
bitmap |= (1<<LM_TOE_FW_RSS_ID(pdev,rss_idx));
}
mm_memcpy(data->indirection_table, pdev->toe_info.indirection_table, sizeof(data->indirection_table));
data->toe_rss_bitmap = bitmap;
pdev->params.update_comp_cnt += pdev->params.l4_rss_chain_cnt;
pdev->params.update_suspend_cnt += pdev->params.l4_rss_chain_cnt;
pdev->params.update_toe_comp_cnt = pdev->params.l4_rss_chain_cnt;
lm_status = lm_command_post(pdev,
LM_TOE_FW_RSS_ID(pdev, LM_TOE_BASE_RSS_ID(pdev)),
RAMROD_OPCODE_TOE_RSS_UPDATE,
CMD_PRIORITY_MEDIUM,
TOE_CONNECTION_TYPE,
pdev->toe_info.rss_update_data_phys.as_u64);
if (lm_status == LM_STATUS_SUCCESS)
{
lm_status = LM_STATUS_PENDING;
}
}
return lm_status;
}
void lm_tcp_rx_gen_bufs_alloc_cb(lm_device_t * pdev)
{
u8_t i;
LM_TOE_FOREACH_RSS_IDX(pdev, i)
{
lm_tcp_grq_t *grq = &pdev->toe_info.grqs[i];
MM_ACQUIRE_TOE_GRQ_LOCK(pdev, i);
if (grq->grq_compensate_on_alloc) {
if (lm_tcp_rx_fill_grq(pdev, i, NULL, FILL_GRQ_LOW_THRESHOLD)) {
DbgMessage(pdev, INFORMl4rx, "lm_toe_service_rx_intr: Updating GRQ producer\n");
LM_INTMEM_WRITE16(pdev, USTORM_TOE_GRQ_PROD_OFFSET(LM_TOE_FW_RSS_ID(pdev,i), PORT_ID(pdev)),
lm_bd_chain_prod_idx(&pdev->toe_info.grqs[i].bd_chain), BAR_USTRORM_INTMEM);
}
}
MM_RELEASE_TOE_GRQ_LOCK(pdev, i);
}
}
void lm_tcp_update_isles_cnts(struct _lm_device_t * pdev, s16_t number_of_isles, s32_t number_of_gen_bufs)
{
lm_toe_isles_t *archipelago = &pdev->toe_info.archipelago;
pdev->toe_info.archipelago.number_of_isles += number_of_isles;
pdev->toe_info.archipelago.gen_bufs_in_isles += number_of_gen_bufs;
if (archipelago->number_of_isles > archipelago->max_number_of_isles) {
archipelago->max_number_of_isles = archipelago->number_of_isles;
}
if (archipelago->gen_bufs_in_isles > archipelago->max_gen_bufs_in_isles) {
archipelago->max_gen_bufs_in_isles = archipelago->gen_bufs_in_isles;
}
if (pdev->params.l4_max_gen_bufs_in_archipelago
&& (archipelago->gen_bufs_in_isles > (s32_t)pdev->params.l4_max_gen_bufs_in_archipelago)) {
if (pdev->params.l4_limit_isles & L4_LI_NOTIFY) {
DbgBreak();
}
if (pdev->params.l4_limit_isles & L4_LI_MAX_GEN_BUFS_IN_ARCHIPELAGO) {
pdev->toe_info.archipelago.l4_decrease_archipelago = TRUE;
}
} else if (pdev->toe_info.archipelago.l4_decrease_archipelago) {
if (archipelago->gen_bufs_in_isles <= (s32_t)pdev->params.l4_valid_gen_bufs_in_archipelago) {
pdev->toe_info.archipelago.l4_decrease_archipelago = FALSE;
}
}
}
void lm_tcp_init_num_of_blocks_per_connection(
struct _lm_device_t *pdev,
u8_t num)
{
pdev->params.l4_num_of_blocks_per_connection = num;
}
u8_t lm_tcp_get_num_of_blocks_per_connection(
struct _lm_device_t *pdev)
{
return pdev->params.l4_num_of_blocks_per_connection;
}
lm_neigh_state_t * lm_tcp_get_next_neigh(
struct _lm_device_t *pdev,
lm_neigh_state_t * neigh_state)
{
if (neigh_state == NULL) {
neigh_state = (lm_neigh_state_t *) d_list_peek_head(&pdev->toe_info.state_blk.neigh_list);
} else {
neigh_state = (lm_neigh_state_t *) d_list_next_entry(&neigh_state->hdr.link);
}
return neigh_state;
}
lm_path_state_t * lm_tcp_get_next_path(
struct _lm_device_t *pdev,
lm_neigh_state_t * neigh_state,
lm_path_state_t * path_state)
{
if (path_state == NULL) {
path_state = (lm_path_state_t *) d_list_peek_head(&pdev->toe_info.state_blk.path_list);
} else {
path_state = (lm_path_state_t *) d_list_next_entry(&path_state->hdr.link);
}
if (neigh_state != NULL) {
while(path_state) {
if (path_state->neigh == neigh_state) {
return path_state;
}
path_state = (lm_path_state_t *) d_list_next_entry(&path_state->hdr.link);
}
}
return path_state;
}
lm_tcp_state_t * lm_tcp_get_next_tcp(
struct _lm_device_t *pdev,
lm_tcp_state_t * tcp_state)
{
if (tcp_state == NULL) {
tcp_state = (lm_tcp_state_t *) d_list_peek_head(&pdev->toe_info.state_blk.tcp_list);
} else {
tcp_state = (lm_tcp_state_t *) d_list_next_entry(&tcp_state->hdr.link);
}
return tcp_state;
}
u8_t lm_tcp_get_src_ip_cam_byte(
IN struct _lm_device_t * pdev,
IN lm_path_state_t * path)
{
u8_t src_ip_byte;
DbgBreakIf(!(pdev && path));
if (path->path_const.ip_version == IP_VERSION_IPV4) {
src_ip_byte = path->path_const.u.ipv4.src_ip & 0x000000FF;
} else {
src_ip_byte = path->path_const.u.ipv6.src_ip[0] & 0x000000FF;
}
return src_ip_byte;
}
lm_tcp_state_t* lm_tcp_find_offloaded_tcp_tuple(struct _lm_device_t * pdev, u8_t src_ip_byte, u8_t src_tcp_b, u8_t dst_tcp_b, lm_tcp_state_t * prev_tcp)
{
lm_tcp_state_t *connection_found = NULL;
lm_tcp_state_t *current_tcp = NULL;
while ((current_tcp = lm_tcp_get_next_tcp(pdev, prev_tcp))) {
u8_t c_src_tcp_b;
u8_t c_dst_tcp_b;
prev_tcp = current_tcp;
c_src_tcp_b = current_tcp->tcp_const.src_port & 0x00FF;
c_dst_tcp_b = current_tcp->tcp_const.dst_port & 0x00FF;
if ((c_src_tcp_b == src_tcp_b) && (c_dst_tcp_b == dst_tcp_b)) {
if ((current_tcp->path == NULL) || (lm_tcp_get_src_ip_cam_byte(pdev,current_tcp->path) == src_ip_byte)) {
connection_found = current_tcp;
break;
}
}
}
return connection_found;
}
u8_t * lm_tcp_get_pattern(struct _lm_device_t * pdev,
lm_tcp_state_t * tcp,
u8_t pattern_idx,
u32_t offset,
u32_t * pattern_size)
{
offset = tcp->integrity_info.current_offset_in_pattern_buf[pattern_idx] + offset;
offset = offset % pdev->toe_info.integrity_info.pattern_size;
if (*pattern_size > (pdev->toe_info.integrity_info.pattern_buf_size - pdev->toe_info.integrity_info.pattern_size)) {
*pattern_size = pdev->toe_info.integrity_info.pattern_buf_size - pdev->toe_info.integrity_info.pattern_size;
}
return (pdev->toe_info.integrity_info.pattern_buf + offset);
}
void lm_tcp_set_pattern_offset(struct _lm_device_t * pdev,
lm_tcp_state_t * tcp,
u8_t pattern_idx,
u32_t offset)
{
tcp->integrity_info.current_offset_in_pattern_buf[pattern_idx] += offset;
tcp->integrity_info.current_offset_in_pattern_buf[pattern_idx] =
tcp->integrity_info.current_offset_in_pattern_buf[pattern_idx] % pdev->toe_info.integrity_info.pattern_size;
return;
}
u32_t lm_tcp_find_pattern_offset(struct _lm_device_t * pdev, u8_t * sub_buf, u32_t sub_buf_size)
{
u32_t i,j;
for (j = 0; j < pdev->toe_info.integrity_info.pattern_size; j++) {
for (i = 0; i < sub_buf_size; i++) {
if (sub_buf[i] != pdev->toe_info.integrity_info.pattern_buf[j+i]) {
break;
}
}
if (i == sub_buf_size) {
return j;
}
}
return 0xFFFFFFFF;
}