#include "bge_impl.h"
#define U32TOPTR(x) ((void *)(uintptr_t)(uint32_t)(x))
#define PTRTOU32(x) ((uint32_t)(uintptr_t)(void *)(x))
#define BGE_DBG BGE_DBG_RECV
static void bge_refill(bge_t *bgep, buff_ring_t *brp, sw_rbd_t *srbdp);
static void
bge_refill(bge_t *bgep, buff_ring_t *brp, sw_rbd_t *srbdp)
{
uint64_t slot;
_NOTE(ARGUNUSED(srbdp))
slot = brp->rf_next;
brp->rf_next = NEXT(slot, brp->desc.nslots);
bge_mbx_put(bgep, brp->chip_mbx_reg, slot);
}
static mblk_t *bge_receive_packet(bge_t *bgep, bge_rbd_t *hw_rbd_p,
recv_ring_t *rrp);
static mblk_t *
bge_receive_packet(bge_t *bgep, bge_rbd_t *hw_rbd_p, recv_ring_t *rrp)
{
bge_rbd_t hw_rbd;
buff_ring_t *brp;
sw_rbd_t *srbdp;
uchar_t *dp;
mblk_t *mp;
uint_t len;
uint_t minsize;
uint_t maxsize;
uint32_t pflags;
mp = NULL;
hw_rbd = *hw_rbd_p;
switch (hw_rbd.flags & (RBD_FLAG_MINI_RING|RBD_FLAG_JUMBO_RING)) {
case RBD_FLAG_MINI_RING|RBD_FLAG_JUMBO_RING:
default:
BGE_PKTDUMP((bgep, &hw_rbd, NULL, "bad ring flags!"));
goto error;
case RBD_FLAG_JUMBO_RING:
brp = &bgep->buff[BGE_JUMBO_BUFF_RING];
break;
#if (BGE_BUFF_RINGS_USED > 2)
case RBD_FLAG_MINI_RING:
brp = &bgep->buff[BGE_MINI_BUFF_RING];
break;
#endif
case 0:
brp = &bgep->buff[BGE_STD_BUFF_RING];
break;
}
if (hw_rbd.index >= brp->desc.nslots) {
BGE_PKTDUMP((bgep, &hw_rbd, NULL, "bad ring index!"));
goto error;
}
srbdp = &brp->sw_rbds[hw_rbd.index];
if (hw_rbd.opaque != srbdp->pbuf.token) {
BGE_PKTDUMP((bgep, &hw_rbd, srbdp, "bad ring token"));
goto refill;
}
if ((hw_rbd.flags & RBD_FLAG_PACKET_END) == 0) {
BGE_PKTDUMP((bgep, &hw_rbd, srbdp, "unterminated packet"));
goto refill;
}
if (hw_rbd.flags & RBD_FLAG_FRAME_HAS_ERROR) {
BGE_PKTDUMP((bgep, &hw_rbd, srbdp, "errored packet"));
goto refill;
}
len = hw_rbd.len;
#ifdef BGE_IPMI_ASF
if (bgep->asf_enabled && (hw_rbd.flags & RBD_FLAG_VLAN_TAG))
maxsize = bgep->chipid.ethmax_size + ETHERFCSL;
else
#endif
maxsize = bgep->chipid.ethmax_size + VLAN_TAGSZ + ETHERFCSL;
if (len > maxsize) {
BGE_PKTDUMP((bgep, &hw_rbd, srbdp, "oversize packet"));
goto refill;
}
#ifdef BGE_IPMI_ASF
if (bgep->asf_enabled && (hw_rbd.flags & RBD_FLAG_VLAN_TAG))
minsize = ETHERMIN + ETHERFCSL - VLAN_TAGSZ;
else
#endif
minsize = ETHERMIN + ETHERFCSL;
if (len < minsize) {
BGE_PKTDUMP((bgep, &hw_rbd, srbdp, "undersize packet"));
goto refill;
}
#ifdef BGE_IPMI_ASF
if (bgep->asf_enabled && (hw_rbd.flags & RBD_FLAG_VLAN_TAG)) {
mp = allocb(BGE_HEADROOM + len + VLAN_TAGSZ, 0);
} else {
#endif
mp = allocb(BGE_HEADROOM + len, 0);
#ifdef BGE_IPMI_ASF
}
#endif
if (mp == NULL) {
goto refill;
}
DMA_SYNC(srbdp->pbuf, DDI_DMA_SYNC_FORKERNEL);
if (bge_check_dma_handle(bgep, srbdp->pbuf.dma_hdl) != DDI_FM_OK) {
bgep->bge_dma_error = B_TRUE;
bgep->bge_chip_state = BGE_CHIP_ERROR;
return (NULL);
}
#ifdef BGE_IPMI_ASF
if (bgep->asf_enabled && (hw_rbd.flags & RBD_FLAG_VLAN_TAG)) {
struct ether_vlan_header *ehp;
mp->b_rptr = dp = mp->b_rptr + BGE_HEADROOM - VLAN_TAGSZ;
bcopy(DMA_VPTR(srbdp->pbuf), dp, 2 * ETHERADDRL);
ehp = (void *)dp;
ehp->ether_tpid = ntohs(ETHERTYPE_VLAN);
ehp->ether_tci = ntohs(hw_rbd.vlan_tci);
bcopy(((uchar_t *)(DMA_VPTR(srbdp->pbuf))) + 2 * ETHERADDRL,
dp + 2 * ETHERADDRL + VLAN_TAGSZ,
len - 2 * ETHERADDRL);
} else {
#endif
mp->b_rptr = dp = mp->b_rptr + BGE_HEADROOM;
bcopy(DMA_VPTR(srbdp->pbuf), dp, len);
#ifdef BGE_IPMI_ASF
}
if (bgep->asf_enabled && (hw_rbd.flags & RBD_FLAG_VLAN_TAG)) {
mp->b_wptr = dp + len + VLAN_TAGSZ - ETHERFCSL;
} else
#endif
mp->b_wptr = dp + len - ETHERFCSL;
if (bcmp(dp, dp+8, 8) == 0)
BGE_PKTDUMP((bgep, &hw_rbd, srbdp, "stuttered packet?"));
pflags = 0;
if (hw_rbd.flags & RBD_FLAG_TCP_UDP_CHECKSUM)
pflags |= HCK_FULLCKSUM;
if (hw_rbd.flags & RBD_FLAG_IP_CHECKSUM)
pflags |= HCK_IPV4_HDRCKSUM_OK;
if (pflags != 0)
mac_hcksum_set(mp, 0, 0, 0, hw_rbd.tcp_udp_cksum, pflags);
rrp->rx_pkts++;
rrp->rx_bytes += len;
refill:
bge_refill(bgep, brp, srbdp);
return (mp);
error:
bgep->bge_chip_state = BGE_CHIP_ERROR;
bge_fm_ereport(bgep, DDI_FM_DEVICE_INVAL_STATE);
return (NULL);
}
static mblk_t *bge_receive_ring(bge_t *bgep, recv_ring_t *rrp);
static mblk_t *
bge_receive_ring(bge_t *bgep, recv_ring_t *rrp)
{
bge_rbd_t *hw_rbd_p;
uint64_t slot;
mblk_t *head;
mblk_t **tail;
mblk_t *mp;
int recv_cnt = 0;
ASSERT(mutex_owned(rrp->rx_lock));
DMA_SYNC(rrp->desc, DDI_DMA_SYNC_FORKERNEL);
if (*rrp->prod_index_p >= rrp->desc.nslots) {
bgep->bge_chip_state = BGE_CHIP_ERROR;
bge_fm_ereport(bgep, DDI_FM_DEVICE_INVAL_STATE);
return (NULL);
}
if (bge_check_dma_handle(bgep, rrp->desc.dma_hdl) != DDI_FM_OK) {
rrp->rx_next = *rrp->prod_index_p;
bge_mbx_put(bgep, rrp->chip_mbx_reg, rrp->rx_next);
bgep->bge_dma_error = B_TRUE;
bgep->bge_chip_state = BGE_CHIP_ERROR;
return (NULL);
}
hw_rbd_p = DMA_VPTR(rrp->desc);
head = NULL;
tail = &head;
slot = rrp->rx_next;
while ((slot != *rrp->prod_index_p) &&
(recv_cnt < BGE_MAXPKT_RCVED)) {
if ((mp = bge_receive_packet(bgep, &hw_rbd_p[slot], rrp))
!= NULL) {
*tail = mp;
tail = &mp->b_next;
recv_cnt++;
}
rrp->rx_next = slot = NEXT(slot, rrp->desc.nslots);
}
bge_mbx_put(bgep, rrp->chip_mbx_reg, rrp->rx_next);
if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK)
bgep->bge_chip_state = BGE_CHIP_ERROR;
return (head);
}
mblk_t *
bge_poll_ring(void *arg, int bytes_to_pickup)
{
recv_ring_t *rrp = arg;
bge_t *bgep = rrp->bgep;
bge_rbd_t *hw_rbd_p;
uint64_t slot;
mblk_t *head;
mblk_t **tail;
mblk_t *mp;
size_t sz = 0;
mutex_enter(rrp->rx_lock);
DMA_SYNC(rrp->desc, DDI_DMA_SYNC_FORKERNEL);
if (*rrp->prod_index_p >= rrp->desc.nslots) {
bgep->bge_chip_state = BGE_CHIP_ERROR;
bge_fm_ereport(bgep, DDI_FM_DEVICE_INVAL_STATE);
mutex_exit(rrp->rx_lock);
return (NULL);
}
if (bge_check_dma_handle(bgep, rrp->desc.dma_hdl) != DDI_FM_OK) {
rrp->rx_next = *rrp->prod_index_p;
bge_mbx_put(bgep, rrp->chip_mbx_reg, rrp->rx_next);
bgep->bge_dma_error = B_TRUE;
bgep->bge_chip_state = BGE_CHIP_ERROR;
mutex_exit(rrp->rx_lock);
return (NULL);
}
hw_rbd_p = DMA_VPTR(rrp->desc);
head = NULL;
tail = &head;
slot = rrp->rx_next;
while ((slot != *rrp->prod_index_p) && (sz <= bytes_to_pickup)) {
if ((mp = bge_receive_packet(bgep, &hw_rbd_p[slot], rrp))
!= NULL) {
*tail = mp;
sz += msgdsize(mp);
tail = &mp->b_next;
}
rrp->rx_next = slot = NEXT(slot, rrp->desc.nslots);
}
bge_mbx_put(bgep, rrp->chip_mbx_reg, rrp->rx_next);
if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK)
bgep->bge_chip_state = BGE_CHIP_ERROR;
mutex_exit(rrp->rx_lock);
return (head);
}
void bge_receive(bge_t *bgep, bge_status_t *bsp);
void
bge_receive(bge_t *bgep, bge_status_t *bsp)
{
recv_ring_t *rrp;
uint64_t index;
mblk_t *mp;
for (index = 0; index < bgep->chipid.rx_rings; index++) {
rrp = &bgep->recv[index];
ASSERT(rrp->prod_index_p == RECV_INDEX_P(bsp, index));
if (*rrp->prod_index_p == rrp->rx_next || rrp->poll_flag)
continue;
if (mutex_tryenter(rrp->rx_lock) == 0)
continue;
mp = bge_receive_ring(bgep, rrp);
mutex_exit(rrp->rx_lock);
if (mp != NULL)
mac_rx_ring(bgep->mh, rrp->ring_handle, mp,
rrp->ring_gen_num);
}
}