#ifndef _BD_CHAIN_H
#define _BD_CHAIN_H
#define BD_PER_PAGE(bd_size) (LM_PAGE_SIZE/(bd_size))
#define NUM_BDS_USED_FOR_NEXT_PTR(bd_size,is_chain_mode) ((is_chain_mode)? (1 + ((sizeof(lm_bd_chain_next)-1) / (bd_size))): 0)
#define USABLE_BDS_PER_PAGE(bd_size,is_chain_mode) ((u32_t) (BD_PER_PAGE(bd_size)-NUM_BDS_USED_FOR_NEXT_PTR(bd_size,is_chain_mode)))
__inline static u16_t lm_bd_chain_avail_bds(lm_bd_chain_t* bd_chain)
{
return bd_chain->bd_left;
}
__inline static u16_t lm_bd_chain_prod_idx(lm_bd_chain_t* bd_chain)
{
return bd_chain->prod_idx;
}
__inline static u16_t lm_bd_chain_cons_idx(lm_bd_chain_t* bd_chain)
{
return bd_chain->cons_idx;
}
__inline static u16_t lm_bd_chain_usable_bds_per_page(lm_bd_chain_t* bd_chain)
{
return bd_chain->usable_bds_per_page;
}
__inline static u16_t lm_bd_chain_page_cnt(lm_bd_chain_t* bd_chain)
{
return bd_chain->page_cnt;
}
__inline static u16_t lm_bd_chain_bds_per_page(lm_bd_chain_t* bd_chain)
{
return bd_chain->bds_per_page;
}
__inline static u16_t lm_bd_chain_bds_per_page_mask(lm_bd_chain_t* bd_chain)
{
return bd_chain->bds_per_page_mask;
}
__inline static u16_t lm_bd_chain_bds_skip_eop(lm_bd_chain_t* bd_chain)
{
return bd_chain->bds_skip_eop;
}
__inline static u8_t lm_bd_chain_is_empty(lm_bd_chain_t* bd_chain)
{
return (bd_chain->bd_left == 0);
}
__inline static u8_t lm_bd_chain_is_full(lm_bd_chain_t* bd_chain)
{
return (bd_chain->bd_left == bd_chain->capacity);
}
__inline static lm_address_t lm_bd_chain_phys_addr(lm_bd_chain_t* bd_chain, u8_t page_idx)
{
lm_address_t mem_phys = bd_chain->bd_chain_phy;
u8_t idx;
page_idx = page_idx % bd_chain->page_cnt;
if (bd_chain->b_is_chain_mode)
{
for(idx = 0; idx < page_idx; idx++)
{
LM_INC64(&mem_phys, LM_PAGE_SIZE);
}
}
else
{
mem_phys = bd_chain->pbl_phys_addr_table[page_idx];
}
return mem_phys;
}
__inline static void lm_bd_chain_set_next_ptrs(lm_bd_chain_t * bd_chain)
{
lm_address_t start_mem_phy;
lm_address_t mem_phy;
lm_bd_chain_next * next_bd;
u8_t *start_mem_virt;
u8_t *mem_virt;
u16_t idx;
mem_virt = bd_chain->bd_chain_virt;
mem_phy = bd_chain->bd_chain_phy;
DbgBreakIf(
((u32_t) PTR_SUB(mem_virt, 0) & LM_PAGE_MASK) !=
(mem_phy.as_u32.low & LM_PAGE_MASK));
DbgBreakIf(!bd_chain->b_is_chain_mode);
ASSERT_STATIC(OFFSETOF(struct eth_tx_next_bd, addr_hi) == OFFSETOF(lm_bd_chain_next, addr_hi)) ;
ASSERT_STATIC(OFFSETOF(struct eth_tx_next_bd, addr_lo) == OFFSETOF(lm_bd_chain_next, addr_lo)) ;
ASSERT_STATIC(OFFSETOF(struct eth_tx_next_bd, reserved)== OFFSETOF(lm_bd_chain_next, reserved) ) ;
ASSERT_STATIC(OFFSETOF(struct eth_rx_bd_next_page, addr_hi) == OFFSETOF(lm_bd_chain_next, addr_hi)) ;
ASSERT_STATIC(OFFSETOF(struct eth_rx_bd_next_page, addr_lo) == OFFSETOF(lm_bd_chain_next, addr_lo)) ;
ASSERT_STATIC(OFFSETOF(struct eth_rx_bd_next_page, reserved)== OFFSETOF(lm_bd_chain_next, reserved) ) ;
ASSERT_STATIC(OFFSETOF(struct eth_rx_cqe_next_page, addr_hi) == OFFSETOF(lm_bd_chain_next, addr_hi)) ;
ASSERT_STATIC(OFFSETOF(struct eth_rx_cqe_next_page, addr_lo) == OFFSETOF(lm_bd_chain_next, addr_lo)) ;
ASSERT_STATIC(OFFSETOF(struct eth_rx_cqe_next_page, reserved)== OFFSETOF(lm_bd_chain_next, reserved) ) ;
ASSERT_STATIC(OFFSETOF(struct toe_page_addr_bd, addr_hi) == OFFSETOF(lm_bd_chain_next, addr_hi)) ;
ASSERT_STATIC(OFFSETOF(struct toe_page_addr_bd, addr_lo) == OFFSETOF(lm_bd_chain_next, addr_lo)) ;
ASSERT_STATIC(OFFSETOF(struct toe_page_addr_bd, reserved)== OFFSETOF(lm_bd_chain_next, reserved) ) ;
start_mem_phy = mem_phy;
start_mem_virt = mem_virt;
for(idx = 0; idx < bd_chain->page_cnt-1; idx++)
{
if CHK_NULL(mem_virt)
{
DbgBreakIfAll(!mem_virt) ;
return ;
}
LM_INC64(&mem_phy, LM_PAGE_SIZE);
next_bd = (lm_bd_chain_next *)(mem_virt + (bd_chain->bd_size) * (bd_chain->usable_bds_per_page));
next_bd->addr_hi = mm_cpu_to_le32(mem_phy.as_u32.high);
next_bd->addr_lo = mm_cpu_to_le32(mem_phy.as_u32.low);
*((u8_t **) next_bd->reserved) = mem_virt + LM_PAGE_SIZE;
mem_virt += LM_PAGE_SIZE;
}
next_bd = (lm_bd_chain_next *)(mem_virt + (bd_chain->bd_size) * (bd_chain->usable_bds_per_page));
next_bd->addr_hi = mm_cpu_to_le32(start_mem_phy.as_u32.high);
next_bd->addr_lo = mm_cpu_to_le32(start_mem_phy.as_u32.low);
*((u8_t **) next_bd->reserved) = start_mem_virt;
}
unsigned long log2_align(unsigned long n);
__inline static lm_status_t lm_bd_chain_add_page(
struct _lm_device_t *pdev,
lm_bd_chain_t* bd_chain,
void *mem_virt,
lm_address_t mem_phys,
u8_t bd_size,
u8_t is_chain_mode)
{
lm_bd_chain_next * next_bd;
UNREFERENCED_PARAMETER_(pdev);
DbgBreakIf((bd_chain->page_cnt + 1) * BD_PER_PAGE(bd_size) > 0xffff);
if (is_chain_mode)
{
if (bd_chain->page_cnt) {
u16_t page_index;
DbgBreakIf(bd_chain->bd_size != bd_size);
next_bd = (lm_bd_chain_next *)((u8_t*)bd_chain->bd_chain_virt + (bd_chain->bd_size) * (bd_chain->usable_bds_per_page));
for (page_index = 0; page_index < bd_chain->page_cnt - 1; page_index++) {
next_bd = (lm_bd_chain_next *)((u8_t*)(*(void **)(next_bd->reserved)) + (bd_chain->bd_size) * (bd_chain->usable_bds_per_page));
}
next_bd->addr_hi = mm_cpu_to_le32(mem_phys.as_u32.high);
next_bd->addr_lo = mm_cpu_to_le32(mem_phys.as_u32.low);
*((u8_t **) next_bd->reserved) = mem_virt;
next_bd = (lm_bd_chain_next *)((u8_t*)mem_virt + (bd_chain->bd_size) * (bd_chain->usable_bds_per_page));
next_bd->addr_hi = mm_cpu_to_le32(bd_chain->bd_chain_phy.as_u32.high);
next_bd->addr_lo = mm_cpu_to_le32(bd_chain->bd_chain_phy.as_u32.low);
*((u8_t **) next_bd->reserved) = bd_chain->bd_chain_virt;
} else {
bd_chain->bd_chain_phy = mem_phys;
bd_chain->bd_chain_virt = mem_virt;
bd_chain->bd_size = bd_size;
bd_chain->bds_skip_eop = NUM_BDS_USED_FOR_NEXT_PTR(bd_size,is_chain_mode);
bd_chain->usable_bds_per_page = USABLE_BDS_PER_PAGE(bd_size,is_chain_mode);
bd_chain->bds_per_page = BD_PER_PAGE(bd_size);
bd_chain->b_is_chain_mode = TRUE;
bd_chain->num_bd_to_sub = 0;
bd_chain->usable_bds_mask = bd_chain->usable_bds_per_page;
DbgBreakIf(bd_chain->bds_per_page != log2_align((u32_t)bd_chain->bds_per_page));
bd_chain->bds_per_page_mask = bd_chain->bds_per_page - 1;
bd_chain->cons_idx = 0;
bd_chain->prod_idx = 0;
bd_chain->next_bd = bd_chain->bd_chain_virt;
next_bd = (lm_bd_chain_next *)((u8_t*)mem_virt + (bd_chain->bd_size) * (bd_chain->usable_bds_per_page));
next_bd->addr_hi = mm_cpu_to_le32(mem_phys.as_u32.high);
next_bd->addr_lo = mm_cpu_to_le32(mem_phys.as_u32.low);
*((u8_t **) next_bd->reserved) = mem_virt;
}
}
else
{
DbgBreak();
}
bd_chain->page_cnt++;
bd_chain->capacity = bd_chain->page_cnt * bd_chain->usable_bds_per_page;
bd_chain->bd_left = bd_chain->capacity;
return LM_STATUS_SUCCESS;
}
__inline static lm_status_t lm_bd_chain_setup(
struct _lm_device_t *pdev,
lm_bd_chain_t* bd_chain,
void *mem_virt,
lm_address_t mem_phys,
u16_t page_cnt,
u8_t bd_size,
u8_t is_full,
u8_t is_chain_mode)
{
DbgBreakIf(page_cnt * BD_PER_PAGE(bd_size) > 0xffff);
UNREFERENCED_PARAMETER_(pdev);
bd_chain->bd_chain_phy = mem_phys;
bd_chain->bd_chain_virt = mem_virt;
bd_chain->bd_size = bd_size;
bd_chain->bds_skip_eop = NUM_BDS_USED_FOR_NEXT_PTR(bd_size,is_chain_mode);
bd_chain->usable_bds_per_page = USABLE_BDS_PER_PAGE(bd_size,is_chain_mode);
bd_chain->bds_per_page = BD_PER_PAGE(bd_size);
DbgBreakIf(bd_chain->bds_per_page != log2_align((u32_t)bd_chain->bds_per_page));
bd_chain->bds_per_page_mask = bd_chain->bds_per_page - 1;
#ifdef __SunOS
bd_chain->capacity = page_cnt;
bd_chain->capacity *= bd_chain->usable_bds_per_page;
#else
bd_chain->capacity = page_cnt * bd_chain->usable_bds_per_page;
#endif
bd_chain->page_cnt = page_cnt;
bd_chain->next_bd = bd_chain->bd_chain_virt;
bd_chain->cons_idx = 0;
if(is_full) {
bd_chain->prod_idx = page_cnt * bd_chain->bds_per_page;
bd_chain->bd_left = 0;
} else {
bd_chain->prod_idx = 0;
bd_chain->bd_left = bd_chain->capacity;
}
if(is_chain_mode)
{
bd_chain->b_is_chain_mode = TRUE;
bd_chain->num_bd_to_sub = 0;
bd_chain->usable_bds_mask = bd_chain->usable_bds_per_page;
lm_bd_chain_set_next_ptrs(bd_chain);
}
return LM_STATUS_SUCCESS;
}
__inline static lm_status_t lm_bd_chain_pbl_set_ptrs(
IN void *buf_base_virt,
IN lm_address_t buf_base_phy,
IN lm_address_t *pbl_phys_table,
IN void *pbl_virt_table,
IN u32_t pbl_entries
)
{
u32_t i;
if (CHK_NULL(buf_base_virt) ||
CHK_NULL(pbl_phys_table) ||
CHK_NULL(pbl_virt_table) ||
(pbl_entries == 0))
{
return LM_STATUS_INVALID_PARAMETER;
}
for (i = 0; i < pbl_entries; i++)
{
#ifdef BIG_ENDIAN
pbl_phys_table[i].as_u32.low = mm_cpu_to_le32(buf_base_phy.as_u32.high);
pbl_phys_table[i].as_u32.high = mm_cpu_to_le32(buf_base_phy.as_u32.low);
#else
pbl_phys_table[i].as_u64 = buf_base_phy.as_u64;
#endif
*(void **)(((u8_t *)pbl_virt_table + (sizeof(void *) * i))) = buf_base_virt;
LM_INC64(&buf_base_phy, LM_PAGE_SIZE);
buf_base_virt = (u8_t *)buf_base_virt + LM_PAGE_SIZE;
}
return LM_STATUS_SUCCESS;
}
__inline static lm_status_t lm_bd_chain_pbl_setup(
struct _lm_device_t *pdev,
lm_bd_chain_t* bd_chain,
void *mem_virt,
lm_address_t mem_phys,
void *pbl_virt_table,
lm_address_t *pbl_phys_table,
u16_t page_cnt,
u8_t bd_size,
u8_t is_full)
{
lm_status_t lm_status;
lm_status = lm_bd_chain_setup(pdev,
bd_chain,
mem_virt,
mem_phys,
page_cnt,
bd_size,
is_full,
FALSE);
if (lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
bd_chain->pbl_phys_addr_table = pbl_phys_table;
bd_chain->pbl_virt_addr_table = pbl_virt_table;
bd_chain->b_is_chain_mode = FALSE;
bd_chain->num_bd_to_sub = 1;
bd_chain->usable_bds_mask = bd_chain->usable_bds_per_page - 1;
bd_chain->pbe_idx = page_cnt - 1;
lm_status = lm_bd_chain_pbl_set_ptrs(mem_virt,
mem_phys,
bd_chain->pbl_phys_addr_table,
bd_chain->pbl_virt_addr_table,
page_cnt);
if (lm_status != LM_STATUS_SUCCESS)
{
return lm_status;
}
return LM_STATUS_SUCCESS;
}
__inline static void lm_bd_chain_reset(struct _lm_device_t * pdev, lm_bd_chain_t * bd_chain)
{
DbgBreakIf(!bd_chain->bd_chain_virt);
mm_memset(bd_chain->bd_chain_virt, 0, bd_chain->page_cnt * LM_PAGE_SIZE);
if (bd_chain->b_is_chain_mode)
{
lm_bd_chain_setup(pdev,
bd_chain,
bd_chain->bd_chain_virt,
bd_chain->bd_chain_phy,
bd_chain->page_cnt,
bd_chain->bd_size,
FALSE,
bd_chain->b_is_chain_mode);
}
else
{
lm_bd_chain_pbl_setup(pdev,
bd_chain,
bd_chain->bd_chain_virt,
bd_chain->bd_chain_phy,
bd_chain->pbl_virt_addr_table,
bd_chain->pbl_phys_addr_table,
bd_chain->page_cnt,
bd_chain->bd_size,
FALSE);
}
}
__inline static void lm_bd_chain_incr_bd(
lm_bd_chain_t * bd_chain,
lm_address_t * phys,
void ** virt,
u16_t * bd_idx)
{
(*bd_idx)++;
*virt = ((char *)*virt) + bd_chain->bd_size;
if((*bd_idx & bd_chain->usable_bds_per_page) == bd_chain->usable_bds_per_page) {
if (bd_chain->b_is_chain_mode) {
lm_bd_chain_next *next_bd = (lm_bd_chain_next *)(*virt);
(*bd_idx) += bd_chain->bds_skip_eop;
*virt = *(void **)(next_bd->reserved);
phys->as_u32.high = next_bd->addr_hi;
phys->as_u32.low = next_bd->addr_lo;
} else {
DbgBreak();
}
}
}
__inline static void lm_bd_advance_page(lm_bd_chain_t* bd_chain, u16_t *idx_to_inc)
{
if (bd_chain->b_is_chain_mode)
{
lm_bd_chain_next *next_bd = (lm_bd_chain_next *)bd_chain->next_bd;
bd_chain->next_bd = *(void **)(next_bd->reserved);
*idx_to_inc += bd_chain->bds_skip_eop;
}
else
{
bd_chain->pbe_idx++;
if (bd_chain->pbe_idx == bd_chain->page_cnt) {
bd_chain->pbe_idx = 0;
}
bd_chain->next_bd = *(void **)((u8_t *)bd_chain->pbl_virt_addr_table + (sizeof(void *) * bd_chain->pbe_idx));
}
}
__inline static void lm_bd_chain_bds_consumed(lm_bd_chain_t* bd_chain, u16_t nbds)
{
bd_chain->bd_left += nbds;
DbgBreakIfFastPath(bd_chain->bd_left > bd_chain->capacity);
}
__inline static void *lm_toe_bd_chain_produce_bd(lm_bd_chain_t* bd_chain)
{
void *ret_bd = NULL;
u16_t prod_idx = 0;
DbgBreakIf(!bd_chain->bd_left);
prod_idx = bd_chain->prod_idx - bd_chain->num_bd_to_sub;
if((prod_idx & bd_chain->usable_bds_mask) == bd_chain->usable_bds_mask) {
lm_bd_advance_page(bd_chain, &bd_chain->prod_idx);
}
ret_bd = bd_chain->next_bd;
bd_chain->bd_left--;
bd_chain->prod_idx++;
bd_chain->next_bd += bd_chain->bd_size;
return ret_bd;
}
__inline static void *lm_bd_chain_produce_bd(lm_bd_chain_t* bd_chain)
{
void *ret_bd = NULL;
u16_t prod_idx = 0;
DbgBreakIfFastPath(!bd_chain->bd_left);
ret_bd = bd_chain->next_bd;
bd_chain->bd_left--;
bd_chain->prod_idx++;
bd_chain->next_bd += bd_chain->bd_size;
prod_idx = bd_chain->prod_idx - bd_chain->num_bd_to_sub;
if((prod_idx & bd_chain->usable_bds_mask) == bd_chain->usable_bds_mask) {
lm_bd_advance_page(bd_chain, &bd_chain->prod_idx);
}
return ret_bd;
}
__inline static void *lm_toe_bd_chain_consume_bd(lm_bd_chain_t* bd_chain)
{
void *ret_bd = NULL;
u16_t cons_idx = 0;
DbgBreakIf(bd_chain->bd_left == bd_chain->capacity);
cons_idx = bd_chain->cons_idx - bd_chain->num_bd_to_sub;
if((cons_idx & bd_chain->usable_bds_mask) == bd_chain->usable_bds_mask) {
lm_bd_advance_page(bd_chain, &bd_chain->cons_idx);
}
ret_bd = bd_chain->next_bd;
bd_chain->bd_left++;
bd_chain->cons_idx++;
bd_chain->next_bd += bd_chain->bd_size;
return ret_bd;
}
__inline static void *lm_bd_chain_consume_bd(lm_bd_chain_t* bd_chain)
{
void *ret_bd = NULL;
u16_t cons_idx = 0;
DbgBreakIfFastPath(bd_chain->bd_left == bd_chain->capacity);
ret_bd = bd_chain->next_bd;
bd_chain->bd_left++;
bd_chain->cons_idx++;
bd_chain->next_bd += bd_chain->bd_size;
cons_idx = bd_chain->cons_idx - bd_chain->num_bd_to_sub;
if((cons_idx & bd_chain->usable_bds_mask) == bd_chain->usable_bds_mask) {
lm_bd_advance_page(bd_chain, &bd_chain->cons_idx);
}
return ret_bd;
}
__inline static void *lm_bd_chain_consume_bd_contiguous(lm_bd_chain_t* bd_chain)
{
void *ret_bd = NULL;
u16_t cons_idx = 0;
DbgBreakIf(bd_chain->bd_left == bd_chain->capacity);
cons_idx = bd_chain->cons_idx - bd_chain->num_bd_to_sub;
if((cons_idx & bd_chain->usable_bds_mask) == bd_chain->usable_bds_mask) {
lm_bd_advance_page(bd_chain, &bd_chain->cons_idx);
return NULL;
}
ret_bd = bd_chain->next_bd;
bd_chain->bd_left++;
bd_chain->cons_idx++;
bd_chain->next_bd += bd_chain->bd_size;
return ret_bd;
}
__inline static void lm_toe_bd_chain_bds_produced(lm_bd_chain_t* bd_chain, u16_t nbds)
{
u16_t nbds_mod_usable_bds;
u8_t next_bds = 0;
DbgBreakIfFastPath(bd_chain->bd_left < nbds);
bd_chain->bd_left -= nbds;
nbds_mod_usable_bds = nbds;
while (nbds_mod_usable_bds >= bd_chain->usable_bds_per_page)
{
nbds_mod_usable_bds -= bd_chain->usable_bds_per_page;
}
next_bds += nbds / bd_chain->usable_bds_per_page;
if(next_bds && ((bd_chain->prod_idx & bd_chain->bds_per_page_mask) == 0)) {
next_bds--;
}
if((bd_chain->prod_idx & bd_chain->bds_per_page_mask) + nbds_mod_usable_bds > bd_chain->usable_bds_per_page) {
next_bds++;
}
bd_chain->prod_idx += nbds + next_bds * bd_chain->bds_skip_eop;
DbgBreakIfFastPath((bd_chain->prod_idx & bd_chain->bds_per_page_mask) > bd_chain->usable_bds_per_page);
DbgBreakIfFastPath((bd_chain->prod_idx & bd_chain->bds_per_page_mask) == 0);
}
__inline static void lm_bd_chain_bds_produced(lm_bd_chain_t* bd_chain, u16_t nbds)
{
u16_t nbds_mod_usable_bds;
u8_t next_bds = 0;
DbgBreakIfFastPath(bd_chain->bd_left < nbds);
bd_chain->bd_left -= nbds;
nbds_mod_usable_bds = nbds;
while (nbds_mod_usable_bds >= bd_chain->usable_bds_per_page)
{
nbds_mod_usable_bds -= bd_chain->usable_bds_per_page;
}
next_bds += nbds / bd_chain->usable_bds_per_page;
if((bd_chain->prod_idx & bd_chain->bds_per_page_mask) + nbds_mod_usable_bds > bd_chain->usable_bds_per_page) {
next_bds++;
}
bd_chain->prod_idx += nbds + next_bds * bd_chain->bds_skip_eop;
}
__inline static void lm_bd_chain_bd_produced(lm_bd_chain_t* bd_chain)
{
DbgBreakIfFastPath(bd_chain->bd_left < 1);
bd_chain->bd_left--;
if((bd_chain->prod_idx & bd_chain->bds_per_page_mask) + 1 > bd_chain->usable_bds_per_page) {
bd_chain->prod_idx += bd_chain->bds_skip_eop;
}
bd_chain->prod_idx++;
}
__inline static u8_t lm_bd_chains_are_consistent( lm_bd_chain_t* bd_chain,
lm_bd_chain_t* bd_chain2 )
{
const u32_t cmp_size = OFFSETOF(lm_bd_chain_t, reserved) - OFFSETOF(lm_bd_chain_t, page_cnt) ;
u8_t b_ret = 0;
ASSERT_STATIC( OFFSETOF(lm_bd_chain_t, page_cnt) < OFFSETOF(lm_bd_chain_t, reserved)) ;
b_ret = mm_memcmp( (u8_t*)bd_chain + OFFSETOF(lm_bd_chain_t, page_cnt),
(u8_t*)bd_chain2 + OFFSETOF(lm_bd_chain_t, page_cnt),
cmp_size );
return b_ret;
}
#endif