#include "bnxrcv.h"
#define BNX_RECV_INIT_FAIL_THRESH 1
#ifndef NUM_RX_CHAIN
#error NUM_RX_CHAIN is not defined.
#else
#if NUM_RX_CHAIN < 0
#error Invalid NUM_RX_CHAIN definition.
#elif NUM_RX_CHAIN > 1
#warning NUM_RX_CHAIN is greater than 1.
#endif
#endif
static ddi_dma_attr_t bnx_rx_jmb_dma_attrib = {
DMA_ATTR_V0,
0,
0xffffffffffffffff,
0x0ffffff,
BNX_DMA_ALIGNMENT,
0xffffffff,
1,
0x00ffffff,
0xffffffff,
BNX_RECV_MAX_FRAGS,
BNX_MIN_BYTES_PER_FRAGMENT,
0,
};
static int
bnx_rxbuffer_alloc(um_device_t *const umdevice, um_rxpacket_t *const umpacket)
{
int rc;
size_t pktsize;
size_t reallen;
uint_t dc_count;
lm_packet_t *lmpacket;
ddi_dma_cookie_t cookie;
lmpacket = &(umpacket->lmpacket);
rc = ddi_dma_alloc_handle(umdevice->os_param.dip,
&bnx_rx_jmb_dma_attrib, DDI_DMA_DONTWAIT,
(void *)0, &(umpacket->dma_handle));
if (rc != DDI_SUCCESS) {
return (-1);
}
pktsize = lmpacket->u1.rx.buf_size;
rc = ddi_dma_mem_alloc(umpacket->dma_handle, pktsize,
&bnxAccessAttribBUF, DDI_DMA_STREAMING, DDI_DMA_DONTWAIT,
(void *)0, (caddr_t *)&lmpacket->u1.rx.mem_virt, &reallen,
&umpacket->dma_acc_handle);
if (rc != DDI_SUCCESS) {
goto error1;
}
rc = ddi_dma_addr_bind_handle(umpacket->dma_handle, NULL,
(caddr_t)lmpacket->u1.rx.mem_virt, pktsize,
DDI_DMA_READ | DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, NULL,
&cookie, &dc_count);
if (rc != DDI_DMA_MAPPED) {
goto error2;
}
lmpacket->u1.rx.mem_phy.as_u64 = cookie.dmac_laddress;
return (0);
error2:
ddi_dma_mem_free(&(umpacket->dma_acc_handle));
error1:
ddi_dma_free_handle(&(umpacket->dma_handle));
return (-1);
}
static void
bnx_rxbuffer_free(um_device_t * const umdevice, um_rxpacket_t * const umpacket)
{
lm_packet_t *lmpacket;
lmpacket = &(umpacket->lmpacket);
lmpacket->u1.rx.mem_phy.as_u64 = 0;
lmpacket->u1.rx.buf_size = 0;
(void) ddi_dma_unbind_handle(umpacket->dma_handle);
lmpacket->u1.rx.mem_virt = NULL;
ddi_dma_mem_free(&umpacket->dma_acc_handle);
ddi_dma_free_handle(&(umpacket->dma_handle));
}
static void
bnx_recv_ring_init(um_device_t * const umdevice, const unsigned int ringidx)
{
s_list_t *srcq;
s_list_t *dstq;
lm_rx_chain_t *lmrxring;
um_recv_qinfo *recvinfo;
um_rxpacket_t *umpacket;
recvinfo = &_RX_QINFO(umdevice, ringidx);
recvinfo->processing = B_FALSE;
lmrxring = &umdevice->lm_dev.rx_info.chain[ringidx];
srcq = &(lmrxring->free_descq);
dstq = &(recvinfo->buffq);
s_list_init(dstq, NULL, NULL, 0);
while (1) {
umpacket = (um_rxpacket_t *)s_list_pop_head(srcq);
if (umpacket == NULL) {
break;
}
s_list_push_tail(dstq, &(umpacket->lmpacket.link));
}
dstq = &(recvinfo->waitq);
s_list_init(dstq, NULL, NULL, 0);
}
static void
bnx_recv_ring_fill(um_device_t * const umdevice, const unsigned int ringidx)
{
s_list_t *srcq;
s_list_t *dstq;
um_rxpacket_t *umpacket;
um_recv_qinfo *recvinfo;
recvinfo = &(_RX_QINFO(umdevice, ringidx));
srcq = &(recvinfo->buffq);
dstq = &(umdevice->lm_dev.rx_info.chain[ringidx].free_descq);
while (1) {
umpacket = (um_rxpacket_t *)s_list_pop_head(srcq);
if (umpacket == NULL) {
break;
}
if (bnx_rxbuffer_alloc(umdevice, umpacket) != 0) {
s_list_push_head(srcq, &umpacket->lmpacket.link);
break;
}
s_list_push_tail(dstq, &umpacket->lmpacket.link);
}
}
static void
bnx_recv_ring_recv(um_device_t *const umdevice, const unsigned int ringidx)
{
mblk_t *head = NULL;
mblk_t *tail = NULL;
s_list_t *srcq;
s_list_t *recvq;
s_list_t *freeq;
boolean_t dcopy;
boolean_t lm_rcvq_empty;
lm_packet_t *lmpacket;
um_rxpacket_t *umpacket;
um_recv_qinfo *recvinfo;
recvinfo = &(_RX_QINFO(umdevice, ringidx));
recvinfo->processing = B_TRUE;
srcq = &(recvinfo->waitq);
freeq = &(umdevice->lm_dev.rx_info.chain[ringidx].free_descq);
recvq = &(umdevice->lm_dev.rx_info.chain[ringidx].active_descq);
if (s_list_entry_cnt(recvq)) {
lm_rcvq_empty = B_FALSE;
} else {
lm_rcvq_empty = B_TRUE;
}
while (1) {
mblk_t *mp = NULL;
unsigned int pktlen;
int ofld_flags;
umpacket = (um_rxpacket_t *)s_list_pop_head(srcq);
if (umpacket == NULL) {
break;
}
lmpacket = &(umpacket->lmpacket);
if (lmpacket->status != LM_STATUS_SUCCESS) {
s_list_push_tail(freeq, &(lmpacket->link));
continue;
}
pktlen = lmpacket->size;
(void) ddi_dma_sync(umpacket->dma_handle, 0,
pktlen + L2RX_FRAME_HDR_LEN, DDI_DMA_SYNC_FORKERNEL);
dcopy = B_FALSE;
if (pktlen < umdevice->rx_copy_threshold) {
lm_device_t *lmdevice;
lmdevice = &(umdevice->lm_dev);
if ((lmdevice->params.keep_vlan_tag == 0) &&
(lmpacket->u1.rx.flags &
LM_RX_FLAG_VALID_VLAN_TAG)) {
mp = allocb(pktlen + 6, BPRI_MED);
if (mp != NULL) {
uint8_t *dataptr;
const uint16_t tpid = htons(0x8100);
uint16_t vlan_tag;
vlan_tag =
htons(lmpacket->u1.rx.vlan_tag);
dataptr = lmpacket->u1.rx.mem_virt +
L2RX_FRAME_HDR_LEN;
mp->b_rptr += 2;
bcopy(dataptr, mp->b_rptr, 12);
bcopy(&tpid, mp->b_rptr + 12, 2);
bcopy(&vlan_tag, mp->b_rptr + 14, 2);
dataptr = dataptr + 12;
bcopy(dataptr, mp->b_rptr + 16,
pktlen - 12);
mp->b_wptr = mp->b_rptr + pktlen + 4;
dcopy = B_TRUE;
goto sendup;
}
} else {
mp = allocb(pktlen + 2, BPRI_MED);
if (mp != NULL) {
uint8_t *dataptr;
dataptr = lmpacket->u1.rx.mem_virt +
L2RX_FRAME_HDR_LEN;
mp->b_rptr += 2;
bcopy(dataptr, mp->b_rptr, pktlen);
mp->b_wptr = mp->b_rptr + pktlen;
dcopy = B_TRUE;
goto sendup;
}
}
umdevice->recv_discards++;
s_list_push_tail(freeq, &(lmpacket->link));
continue;
}
if (lm_rcvq_empty == B_TRUE && !(s_list_entry_cnt(srcq))) {
umdevice->recv_discards++;
s_list_push_tail(freeq, &(lmpacket->link));
continue;
}
sendup:
ofld_flags = 0;
if ((umdevice->dev_var.enabled_oflds &
LM_OFFLOAD_RX_IP_CKSUM) &&
(lmpacket->u1.rx.flags & LM_RX_FLAG_IP_CKSUM_IS_GOOD)) {
ofld_flags |= HCK_IPV4_HDRCKSUM_OK;
}
if (((umdevice->dev_var.enabled_oflds &
LM_OFFLOAD_RX_TCP_CKSUM) &&
(lmpacket->u1.rx.flags & LM_RX_FLAG_TCP_CKSUM_IS_GOOD)) ||
((umdevice->dev_var.enabled_oflds &
LM_OFFLOAD_RX_UDP_CKSUM) &&
(lmpacket->u1.rx.flags & LM_RX_FLAG_UDP_CKSUM_IS_GOOD))) {
ofld_flags |= HCK_FULLCKSUM_OK;
}
if (ofld_flags != 0) {
mac_hcksum_set(mp, 0, 0, 0, 0, ofld_flags);
}
if (dcopy == B_TRUE) {
s_list_push_tail(freeq, &(lmpacket->link));
}
if (head == NULL) {
head = mp;
tail = mp;
} else {
tail->b_next = mp;
tail = mp;
}
tail->b_next = NULL;
}
if (head) {
mutex_exit(&umdevice->os_param.rcv_mutex);
mac_rx(umdevice->os_param.macp,
umdevice->os_param.rx_resc_handle[ringidx], head);
mutex_enter(&umdevice->os_param.rcv_mutex);
}
recvinfo->processing = B_FALSE;
}
static void
bnx_recv_ring_dump(um_device_t *const umdevice, const unsigned int ringidx)
{
s_list_t *srcq;
s_list_t *dstq;
um_rxpacket_t *umpacket;
srcq = &(_RX_QINFO(umdevice, ringidx).waitq);
dstq = &(umdevice->lm_dev.rx_info.chain[ringidx].free_descq);
while (1) {
umpacket = (um_rxpacket_t *)s_list_pop_head(srcq);
if (umpacket == NULL) {
break;
}
s_list_push_tail(dstq, &(umpacket->lmpacket.link));
}
}
static void
bnx_recv_ring_free(um_device_t *const umdevice, const unsigned int ringidx)
{
s_list_t *srcq;
s_list_t *dstq;
um_rxpacket_t *umpacket;
srcq = &(umdevice->lm_dev.rx_info.chain[ringidx].free_descq);
dstq = &(_RX_QINFO(umdevice, ringidx).buffq);
while (1) {
umpacket = (um_rxpacket_t *)s_list_pop_head(srcq);
if (umpacket == NULL) {
break;
}
bnx_rxbuffer_free(umdevice, umpacket);
s_list_push_tail(dstq, &umpacket->lmpacket.link);
}
}
static void
bnx_recv_ring_fini(um_device_t *const umdevice, const unsigned int ringidx)
{
s_list_t *srcq;
um_rxpacket_t *umpacket;
um_recv_qinfo *recvinfo;
recvinfo = &(_RX_QINFO(umdevice, ringidx));
srcq = &(recvinfo->buffq);
while (1) {
umpacket = (um_rxpacket_t *)s_list_pop_head(srcq);
if (umpacket == NULL) {
break;
}
}
}
int
bnx_rxpkts_init(um_device_t *const umdevice)
{
int i;
int alloccnt;
lm_device_t *lmdevice;
lmdevice = &(umdevice->lm_dev);
alloccnt = 0;
for (i = RX_CHAIN_IDX0; i < NUM_RX_CHAIN; i++) {
int post_count = 0;
bnx_recv_ring_init(umdevice, i);
bnx_recv_ring_fill(umdevice, i);
post_count =
s_list_entry_cnt(&lmdevice->rx_info.chain[i].free_descq);
if (post_count != lmdevice->params.l2_rx_desc_cnt[i]) {
cmn_err(CE_NOTE,
"!%s: %d rx buffers requested. %d allocated.\n",
umdevice->dev_name,
umdevice->lm_dev.params.l2_rx_desc_cnt[i],
post_count);
}
alloccnt += post_count;
}
if (alloccnt < BNX_RECV_INIT_FAIL_THRESH) {
cmn_err(CE_WARN,
"%s: Failed to allocate minimum number of RX buffers.\n",
umdevice->dev_name);
#if BNX_RECV_INIT_FAIL_THRESH > 1
#warning Need to implement code to free previously allocated rx buffers in bnx_rxpkts_init error path.
#endif
return (-1);
}
return (0);
}
void
bnx_rxpkts_intr(um_device_t *const umdevice)
{
int i;
um_recv_qinfo * recvinfo;
for (i = RX_CHAIN_IDX0; i < NUM_RX_CHAIN; i++) {
recvinfo = &(_RX_QINFO(umdevice, i));
if (recvinfo->processing == B_FALSE) {
bnx_recv_ring_recv(umdevice, i);
}
}
}
void
bnx_rxpkts_post(um_device_t *const umdevice)
{
int i;
um_recv_qinfo *recvinfo;
for (i = RX_CHAIN_IDX0; i < NUM_RX_CHAIN; i++) {
recvinfo = &(_RX_QINFO(umdevice, i));
if (recvinfo->processing == B_FALSE) {
bnx_recv_ring_fill(umdevice, i);
(void) lm_post_buffers(&(umdevice->lm_dev), i, NULL);
}
}
}
void
bnx_rxpkts_recycle(um_device_t *const umdevice)
{
int i;
for (i = NUM_RX_CHAIN - 1; i >= RX_CHAIN_IDX0; i--) {
bnx_recv_ring_dump(umdevice, i);
lm_abort(&(umdevice->lm_dev), ABORT_OP_RX_CHAIN, i);
}
}
void
bnx_rxpkts_fini(um_device_t *const umdevice)
{
int i;
for (i = NUM_RX_CHAIN - 1; i >= RX_CHAIN_IDX0; i--) {
bnx_recv_ring_dump(umdevice, i);
lm_abort(&(umdevice->lm_dev), ABORT_OP_RX_CHAIN, i);
bnx_recv_ring_free(umdevice, i);
bnx_recv_ring_fini(umdevice, i);
}
}