#ifndef _MM_L4IF_H
#define _MM_L4IF_H
#if defined(DOS)
#include "sync.h"
#define MM_INIT_TCP_LOCK_HANDLE()
#define mm_acquire_tcp_lock(_pdev, con) LOCK()
#define mm_release_tcp_lock(_pdev, con) UNLOCK()
#define MM_ACQUIRE_TOE_LOCK(_pdev) LOCK()
#define MM_RELEASE_TOE_LOCK(_pdev) UNLOCK()
#define MM_ACQUIRE_TOE_GRQ_LOCK(_pdev, idx) LOCK()
#define MM_RELEASE_TOE_GRQ_LOCK(_pdev, idx) UNLOCK()
#define MM_ACQUIRE_TOE_GRQ_LOCK_DPC(_pdev, idx) LOCK()
#define MM_RELEASE_TOE_GRQ_LOCK_DPC(_pdev, idx) UNLOCK()
#elif defined(__LINUX) || defined(__SunOS)
void
mm_acquire_tcp_lock(
struct _lm_device_t *pdev,
lm_tcp_con_t *tcp_con);
void
mm_release_tcp_lock(
struct _lm_device_t *pdev,
lm_tcp_con_t *tcp_con);
#define MM_INIT_TCP_LOCK_HANDLE()
void MM_ACQUIRE_TOE_LOCK(struct _lm_device_t *_pdev);
void MM_RELEASE_TOE_LOCK(struct _lm_device_t *_pdev);
void MM_ACQUIRE_TOE_GRQ_LOCK(struct _lm_device_t *_pdev, u8_t idx);
void MM_RELEASE_TOE_GRQ_LOCK(struct _lm_device_t *_pdev, u8_t idx);
void MM_ACQUIRE_TOE_GRQ_LOCK_DPC(struct _lm_device_t *_pdev, u8_t idx);
void MM_RELEASE_TOE_GRQ_LOCK_DPC(struct _lm_device_t *_pdev, u8_t idx);
#elif defined(_VBD_) || defined(_VBD_CMD_)
#if USE_QUEUED_SLOCK
void
mm_acquire_tcp_q_lock(
lm_device_t *pdev,
lm_tcp_con_t *tcp_con,
void *ql_hdl);
void
mm_release_tcp_q_lock(
lm_device_t *pdev,
lm_tcp_con_t *tcp_con,
void *ql_hdl);
#define SIZEOF_QL_HDL 24
#define MM_INIT_TCP_LOCK_HANDLE() u8_t __ql_hdl[SIZEOF_QL_HDL] = {0}
#define mm_acquire_tcp_lock(pdev,tcp_con) mm_acquire_tcp_q_lock((pdev),(tcp_con),__ql_hdl)
#define mm_release_tcp_lock(pdev,tcp_con) mm_release_tcp_q_lock((pdev),(tcp_con),__ql_hdl)
#else
#define MM_INIT_TCP_LOCK_HANDLE()
void
mm_acquire_tcp_lock(
lm_device_t *pdev,
lm_tcp_con_t *tcp_con);
void
mm_release_tcp_lock(
lm_device_t *pdev,
lm_tcp_con_t *tcp_con);
#endif
void MM_ACQUIRE_TOE_LOCK(lm_device_t *_pdev);
void MM_RELEASE_TOE_LOCK(lm_device_t *_pdev);
void MM_ACQUIRE_TOE_GRQ_LOCK(lm_device_t *_pdev, u8_t idx);
void MM_RELEASE_TOE_GRQ_LOCK(lm_device_t *_pdev, u8_t idx);
void MM_ACQUIRE_TOE_GRQ_LOCK_DPC(lm_device_t *_pdev, u8_t idx);
void MM_RELEASE_TOE_GRQ_LOCK_DPC(lm_device_t *_pdev, u8_t idx);
#elif defined(__USER_MODE_DEBUG)
#define MM_INIT_TCP_LOCK_HANDLE()
__inline static void mm_acquire_tcp_lock(
struct _lm_device_t *pdev,
lm_tcp_con_t *tcp_con)
{
DbgMessage(pdev, INFORMl4, "Acquiring tcp lock for con %p\n", tcp_con);
}
__inline static void mm_release_tcp_lock(
struct _lm_device_t *pdev,
lm_tcp_con_t *tcp_con)
{
DbgMessage(pdev, INFORMl4, "Releasing tcp lock for con %p\n", tcp_con);
}
#define MM_ACQUIRE_TOE_LOCK(_pdev) DbgMessage(pdev, INFORMl4, "Acquiring global toe lock\n");
#define MM_RELEASE_TOE_LOCK(_pdev) DbgMessage(pdev, INFORMl4, "Releasing global toe lock\n");
#define MM_ACQUIRE_TOE_GRQ_LOCK(_pdev, idx) DbgMessage(pdev, INFORMl4, "Acquiring global toe grq lock\n");
#define MM_RELEASE_TOE_GRQ_LOCK(_pdev, idx) DbgMessage(pdev, INFORMl4, "Releasing global toe grq lock\n");
#define MM_ACQUIRE_TOE_GRQ_LOCK_DPC(_pdev, idx) DbgMessage(pdev, INFORMl4, "Acquiring global toe grq lock\n");
#define MM_RELEASE_TOE_GRQ_LOCK_DPC(_pdev, idx) DbgMessage(pdev, INFORMl4, "Releasing global toe grq lock\n");
#elif defined (NDISMONO)
#define MM_INIT_TCP_LOCK_HANDLE()
void
mm_acquire_tcp_lock(
lm_device_t *pdev,
lm_tcp_con_t *tcp_con);
void
mm_release_tcp_lock(
lm_device_t *pdev,
lm_tcp_con_t *tcp_con);
void MM_ACQUIRE_TOE_LOCK(lm_device_t *_pdev);
void MM_RELEASE_TOE_LOCK(lm_device_t *_pdev);
void MM_ACQUIRE_TOE_GRQ_LOCK(lm_device_t *_pdev, u8_t idx);
void MM_RELEASE_TOE_GRQ_LOCK(lm_device_t *_pdev, u8_t idx);
void MM_ACQUIRE_TOE_GRQ_LOCK_DPC(lm_device_t *_pdev, u8_t idx);
void MM_RELEASE_TOE_GRQ_LOCK_DPC(lm_device_t *_pdev, u8_t idx);
#endif
u32_t mm_tcp_rx_peninsula_to_rq_copy_dmae(
struct _lm_device_t * pdev,
lm_tcp_state_t * tcp,
lm_address_t gen_buf_phys,
u32_t gen_buf_offset,
lm_tcp_buffer_t * tcp_buf,
u32_t tcp_buf_offset,
u32_t nbytes
);
void mm_tcp_comp_slow_path_request(
struct _lm_device_t *pdev,
lm_tcp_state_t *tcp,
lm_tcp_slow_path_request_t *sp_request);
void mm_tcp_complete_bufs(
struct _lm_device_t *pdev,
lm_tcp_state_t *tcp,
lm_tcp_con_t *tcp_con,
s_list_t *buf_list,
lm_status_t lm_status
);
u8_t mm_tcp_indicating_bufs(
lm_tcp_con_t * con
);
void mm_tcp_abort_bufs (
IN struct _lm_device_t * pdev,
IN lm_tcp_state_t * tcp,
IN lm_tcp_con_t * con,
IN lm_status_t status
);
void mm_tcp_indicate_fin_received(
IN struct _lm_device_t * pdev,
IN lm_tcp_state_t * tcp
);
void mm_tcp_indicate_rst_received(
IN struct _lm_device_t * pdev,
IN lm_tcp_state_t * tcp
);
void mm_tcp_graceful_disconnect_done(
IN struct _lm_device_t * pdev,
IN lm_tcp_state_t * tcp,
IN lm_status_t status
);
void mm_tcp_rx_indicate_gen (
struct _lm_device_t * pdev,
lm_tcp_state_t * tcp
);
u32_t mm_tcp_get_gen_bufs(
struct _lm_device_t * pdev,
d_list_t * gb_list,
u32_t nbufs,
u8_t sb_idx
);
#define MM_TCP_RGB_COMPENSATE_GRQS 0x01
#define MM_TCP_RGB_COLLECT_GEN_BUFS 0x02
#define MM_TCP_RGB_USE_ALL_GEN_BUFS 0x80
#define NON_EXISTENT_SB_IDX 0xFF
void mm_tcp_return_gen_bufs(
struct _lm_device_t * pdev,
lm_tcp_gen_buf_t * gen_buf,
u32_t flags,
u8_t grq_idx
);
void mm_tcp_return_list_of_gen_bufs(
struct _lm_device_t * pdev,
d_list_t * gen_buf_list,
u32_t flags,
u8_t grq_idx
);
u32_t mm_tcp_copy_to_tcp_buf(
struct _lm_device_t * pdev,
lm_tcp_state_t * tcp_state,
lm_tcp_buffer_t * tcp_buf,
u8_t * mem_buf,
u32_t tcp_buf_offset,
u32_t nbytes
);
void
mm_tcp_indicate_retrieve_indication(
struct _lm_device_t *pdev,
lm_tcp_state_t *tcp_state,
l4_upload_reason_t upload_reason);
void mm_tcp_update_required_gen_bufs(
struct _lm_device_t * pdev,
u32_t new_mss,
u32_t old_mss,
u32_t new_initial_rcv_wnd,
u32_t old_initial_rcv_wnd);
void mm_tcp_complete_path_upload_request(
struct _lm_device_t * pdev,
lm_path_state_t * path);
void mm_tcp_complete_neigh_upload_request(
struct _lm_device_t * pdev,
lm_neigh_state_t * neigh
);
lm_status_t mm_tcp_post_empty_slow_path_request(
struct _lm_device_t * pdev,
lm_tcp_state_t * tcp,
u32_t request_type);
void mm_tcp_del_tcp_state(
struct _lm_device_t * pdev,
lm_tcp_state_t * tcp);
#endif