#include "ixgbe_sw.h"
static int ixgbe_tx_copy(ixgbe_tx_ring_t *, tx_control_block_t **,
link_list_t *, const void *, size_t);
static int ixgbe_tx_bind(ixgbe_tx_ring_t *, tx_control_block_t **,
link_list_t *, uint8_t *, size_t);
static uint_t ixgbe_tcb_done(tx_control_block_t *);
static int ixgbe_tx_fill_ring(ixgbe_tx_ring_t *, link_list_t *,
ixgbe_tx_context_t *, size_t);
static void ixgbe_save_desc(tx_control_block_t *, uint64_t, size_t);
static tx_control_block_t *ixgbe_get_free_list(ixgbe_tx_ring_t *,
link_list_t *);
static int ixgbe_get_context(mblk_t *, ixgbe_tx_context_t *);
static boolean_t ixgbe_check_context(ixgbe_tx_ring_t *,
ixgbe_tx_context_t *);
static void ixgbe_fill_context(struct ixgbe_adv_tx_context_desc *,
ixgbe_tx_context_t *);
mblk_t *
ixgbe_ring_tx(void *arg, mblk_t *orig_mp)
{
ixgbe_tx_ring_t *tx_ring = (ixgbe_tx_ring_t *)arg;
ixgbe_t *ixgbe = tx_ring->ixgbe;
mblk_t *mp = orig_mp;
mblk_t *pull_mp = NULL;
tx_control_block_t *tcb;
size_t mbsize, offset, len;
uint32_t desc_total;
uint32_t copy_thresh;
int desc_num;
ixgbe_tx_context_t tx_context, *ctx = NULL;
link_list_t pending_list;
boolean_t limit_retry = B_FALSE;
ASSERT(mp->b_next == NULL);
if ((ixgbe->ixgbe_state & IXGBE_SUSPENDED) ||
(ixgbe->ixgbe_state & IXGBE_ERROR) ||
(ixgbe->ixgbe_state & IXGBE_OVERTEMP) ||
!(ixgbe->ixgbe_state & IXGBE_STARTED) ||
ixgbe->link_state != LINK_STATE_UP) {
freemsg(mp);
return (NULL);
}
copy_thresh = ixgbe->tx_copy_thresh;
mbsize = msgsize(mp);
if (ixgbe->tx_hcksum_enable) {
ctx = &tx_context;
if (ixgbe_get_context(mp, ctx) < 0) {
freemsg(mp);
return (NULL);
}
if ((ctx->lso_flag &&
((mbsize - ctx->mac_hdr_len) > IXGBE_LSO_MAXLEN)) ||
(!ctx->lso_flag &&
(mbsize > (ixgbe->max_frame_size - ETHERFCSL)))) {
freemsg(mp);
IXGBE_DEBUGLOG_0(ixgbe, "ixgbe_tx: packet oversize");
return (NULL);
}
}
retry:
if (tx_ring->tbd_free < ixgbe->tx_recycle_thresh) {
tx_ring->tx_recycle(tx_ring);
}
if (tx_ring->tbd_free < ixgbe->tx_overload_thresh) {
tx_ring->reschedule = B_TRUE;
tx_ring->stat_overload++;
if (pull_mp != NULL)
freemsg(pull_mp);
return (orig_mp);
}
LINK_LIST_INIT(&pending_list);
tcb = NULL;
desc_num = 0;
desc_total = 0;
offset = 0;
if ((ctx != NULL) && ctx->lso_flag) {
size_t hdr_len;
hdr_len = ctx->ip_hdr_len + ctx->mac_hdr_len + ctx->l4_hdr_len;
for (len = hdr_len; mp != NULL && len > 0; mp = mp->b_cont) {
size_t mlen = MBLKL(mp);
size_t amt = MIN(mlen, len);
int ret;
ret = ixgbe_tx_copy(tx_ring, &tcb, &pending_list,
mp->b_rptr, amt);
if (ret != 0) {
if (ret > 0)
tx_ring->stat_lso_header_fail++;
goto tx_failure;
}
len -= amt;
if (amt < mlen) {
offset = amt;
break;
}
}
ASSERT0(len);
desc_total += ixgbe_tcb_done(tcb);
tcb = NULL;
}
while (mp != NULL && desc_total < IXGBE_TX_DESC_LIMIT) {
uint8_t *rptr = mp->b_rptr + offset;
int ret;
len = MBLKL(mp) - offset;
offset = 0;
if (len > copy_thresh) {
ret = ixgbe_tx_bind(tx_ring, &tcb, &pending_list, rptr,
len);
} else {
ret = ixgbe_tx_copy(tx_ring, &tcb, &pending_list, rptr,
len);
}
if (ret < 0)
goto tx_failure;
desc_total += ret;
mp = mp->b_cont;
}
desc_total += ixgbe_tcb_done(tcb);
if (desc_total >= IXGBE_TX_DESC_LIMIT) {
VERIFY0(limit_retry);
tx_ring->stat_break_tbd_limit++;
ixgbe_put_free_list(tx_ring, &pending_list);
desc_total = 0;
offset = 0;
pull_mp = msgpullup(orig_mp, -1);
if (pull_mp == NULL) {
tx_ring->reschedule = B_TRUE;
return (orig_mp);
}
mp = pull_mp;
limit_retry = B_TRUE;
goto retry;
}
if (tx_ring->tbd_free <= (desc_total + 1)) {
tx_ring->tx_recycle(tx_ring);
}
mutex_enter(&tx_ring->tx_lock);
if (tx_ring->tbd_free <= (desc_total + 1)) {
tx_ring->stat_fail_no_tbd++;
mutex_exit(&tx_ring->tx_lock);
goto tx_failure;
}
tcb->mp = (pull_mp != NULL) ? pull_mp : orig_mp;
desc_num = ixgbe_tx_fill_ring(tx_ring, &pending_list, ctx,
mbsize);
ASSERT((desc_num == desc_total) || (desc_num == (desc_total + 1)));
tx_ring->stat_obytes += mbsize;
tx_ring->stat_opackets++;
mutex_exit(&tx_ring->tx_lock);
if (pull_mp != NULL) {
freemsg(orig_mp);
}
return (NULL);
tx_failure:
if (pull_mp) {
freemsg(pull_mp);
}
ixgbe_put_free_list(tx_ring, &pending_list);
tx_ring->reschedule = B_TRUE;
return (orig_mp);
}
static int
ixgbe_tx_copy(ixgbe_tx_ring_t *tx_ring, tx_control_block_t **tcbp,
link_list_t *pending_list, const void *buf, size_t len)
{
tx_control_block_t *tcb = *tcbp;
dma_buffer_t *tx_buf;
uint32_t desc_num = 0;
if (tcb == NULL || tcb->tx_type == USE_DMA ||
tcb->tx_buf.len + len > tcb->tx_buf.size) {
tx_control_block_t *newtcb;
newtcb = ixgbe_get_free_list(tx_ring, pending_list);
if (newtcb == NULL)
return (-1);
newtcb->tx_type = USE_COPY;
if (tcb != NULL)
desc_num += ixgbe_tcb_done(tcb);
*tcbp = tcb = newtcb;
}
ASSERT3S(tcb->tx_type, ==, USE_COPY);
tx_buf = &tcb->tx_buf;
if (len > 0) {
bcopy(buf, tx_buf->address + tx_buf->len, len);
tx_buf->len += len;
tcb->frag_num++;
}
return (desc_num);
}
static int
ixgbe_tx_bind(ixgbe_tx_ring_t *tx_ring, tx_control_block_t **tcbp,
link_list_t *pending_list, uint8_t *buf, size_t len)
{
tx_control_block_t *tcb = NULL;
uint_t desc_num = 0;
int status;
tcb = ixgbe_get_free_list(tx_ring, pending_list);
if (tcb == NULL)
return (-1);
status = ddi_dma_addr_bind_handle(tcb->tx_dma_handle, NULL,
(caddr_t)buf, len,
DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_DONTWAIT,
0, NULL, NULL);
if (status != DDI_DMA_MAPPED) {
tx_ring->stat_fail_dma_bind++;
return (-1);
}
tcb->frag_num++;
tcb->tx_type = USE_DMA;
if (*tcbp != NULL)
desc_num += ixgbe_tcb_done(*tcbp);
*tcbp = tcb;
return (desc_num);
}
static uint_t
ixgbe_tcb_done(tx_control_block_t *tcb)
{
uint_t desc_num = 0;
if (tcb->tx_type == USE_DMA) {
const ddi_dma_cookie_t *c;
for (c = ddi_dma_cookie_iter(tcb->tx_dma_handle, NULL);
c != NULL;
c = ddi_dma_cookie_iter(tcb->tx_dma_handle, c)) {
ixgbe_save_desc(tcb, c->dmac_laddress, c->dmac_size);
desc_num++;
}
} else if (tcb->tx_type == USE_COPY) {
dma_buffer_t *tx_buf = &tcb->tx_buf;
DMA_SYNC(tx_buf, DDI_DMA_SYNC_FORDEV);
ixgbe_save_desc(tcb, tx_buf->dma_address, tx_buf->len);
desc_num++;
} else {
panic("invalid tcb type");
}
return (desc_num);
}
static int
ixgbe_get_context(mblk_t *mp, ixgbe_tx_context_t *ctx)
{
uint32_t start;
uint32_t hckflags;
uint32_t lsoflags;
uint32_t lsocksum;
uint32_t mss;
uint32_t len;
uint32_t size;
uint32_t offset;
unsigned char *pos;
ushort_t etype;
uint32_t mac_hdr_len;
uint32_t l4_proto;
uint32_t l4_hdr_len;
ASSERT(mp != NULL);
mac_hcksum_get(mp, &start, NULL, NULL, NULL, &hckflags);
bzero(ctx, sizeof (ixgbe_tx_context_t));
if (hckflags == 0) {
return (0);
}
ctx->hcksum_flags = hckflags;
mac_lso_get(mp, &mss, &lsoflags);
ctx->mss = mss;
ctx->lso_flag = (lsoflags == HW_LSO);
etype = 0;
mac_hdr_len = 0;
l4_proto = 0;
size = len = MBLKL(mp);
offset = offsetof(struct ether_header, ether_type);
while (size <= offset) {
mp = mp->b_cont;
ASSERT(mp != NULL);
len = MBLKL(mp);
size += len;
}
pos = mp->b_rptr + offset + len - size;
etype = ntohs(*(ushort_t *)(uintptr_t)pos);
if (etype == ETHERTYPE_VLAN) {
offset = offsetof(struct ether_vlan_header, ether_type);
while (size <= offset) {
mp = mp->b_cont;
ASSERT(mp != NULL);
len = MBLKL(mp);
size += len;
}
pos = mp->b_rptr + offset + len - size;
etype = ntohs(*(ushort_t *)(uintptr_t)pos);
mac_hdr_len = sizeof (struct ether_vlan_header);
} else {
mac_hdr_len = sizeof (struct ether_header);
}
lsocksum = HCK_PARTIALCKSUM;
ctx->l3_proto = etype;
switch (etype) {
case ETHERTYPE_IP:
if (ctx->lso_flag) {
offset = offsetof(ipha_t, ipha_length) + mac_hdr_len;
while (size <= offset) {
mp = mp->b_cont;
ASSERT(mp != NULL);
len = MBLKL(mp);
size += len;
}
pos = mp->b_rptr + offset + len - size;
*((uint16_t *)(uintptr_t)(pos)) = 0;
offset = offsetof(ipha_t, ipha_hdr_checksum) +
mac_hdr_len;
while (size <= offset) {
mp = mp->b_cont;
ASSERT(mp != NULL);
len = MBLKL(mp);
size += len;
}
pos = mp->b_rptr + offset + len - size;
*((uint16_t *)(uintptr_t)(pos)) = 0;
lsocksum |= HCK_IPV4_HDRCKSUM;
}
offset = offsetof(ipha_t, ipha_protocol) + mac_hdr_len;
while (size <= offset) {
mp = mp->b_cont;
ASSERT(mp != NULL);
len = MBLKL(mp);
size += len;
}
pos = mp->b_rptr + offset + len - size;
l4_proto = *(uint8_t *)pos;
break;
case ETHERTYPE_IPV6:
if (ctx->lso_flag) {
offset = offsetof(ip6_t, ip6_plen) + mac_hdr_len;
while (size <= offset) {
mp = mp->b_cont;
ASSERT(mp != NULL);
len = MBLKL(mp);
size += len;
}
pos = mp->b_rptr + offset + len - size;
*((uint16_t *)(uintptr_t)(pos)) = 0;
}
offset = offsetof(ip6_t, ip6_nxt) + mac_hdr_len;
while (size <= offset) {
mp = mp->b_cont;
ASSERT(mp != NULL);
len = MBLKL(mp);
size += len;
}
pos = mp->b_rptr + offset + len - size;
l4_proto = *(uint8_t *)pos;
break;
default:
IXGBE_DEBUGLOG_0(NULL, "Ether type error with tx hcksum");
return (-2);
}
if (ctx->lso_flag) {
if ((ctx->hcksum_flags & lsocksum) != lsocksum) {
IXGBE_DEBUGLOG_2(NULL, "ixgbe_tx: h/w checksum flags "
"are not set for LSO, found 0x%x, needed bits 0x%x",
ctx->hcksum_flags, lsocksum);
return (-1);
}
offset = mac_hdr_len + start;
while (size <= offset) {
mp = mp->b_cont;
ASSERT(mp != NULL);
len = MBLKL(mp);
size += len;
}
pos = mp->b_rptr + offset + len - size;
l4_hdr_len = TCP_HDR_LENGTH((tcph_t *)pos);
} else {
l4_hdr_len = 0;
}
ctx->mac_hdr_len = mac_hdr_len;
ctx->ip_hdr_len = start;
ctx->l4_proto = l4_proto;
ctx->l4_hdr_len = l4_hdr_len;
return (0);
}
static boolean_t
ixgbe_check_context(ixgbe_tx_ring_t *tx_ring, ixgbe_tx_context_t *ctx)
{
ixgbe_tx_context_t *last;
if (ctx == NULL)
return (B_FALSE);
last = &tx_ring->tx_context;
if ((ctx->hcksum_flags != last->hcksum_flags) ||
(ctx->l4_proto != last->l4_proto) ||
(ctx->l3_proto != last->l3_proto) ||
(ctx->mac_hdr_len != last->mac_hdr_len) ||
(ctx->ip_hdr_len != last->ip_hdr_len) ||
(ctx->lso_flag != last->lso_flag) ||
(ctx->lso_flag && ((ctx->mss != last->mss) ||
(ctx->l4_hdr_len != last->l4_hdr_len)))) {
return (B_TRUE);
}
return (B_FALSE);
}
static void
ixgbe_fill_context(struct ixgbe_adv_tx_context_desc *ctx_tbd,
ixgbe_tx_context_t *ctx)
{
ctx_tbd->vlan_macip_lens = ctx->ip_hdr_len;
ctx_tbd->vlan_macip_lens |= ctx->mac_hdr_len <<
IXGBE_ADVTXD_MACLEN_SHIFT;
ctx_tbd->type_tucmd_mlhl =
IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
if (ctx->lso_flag || ctx->hcksum_flags & HCK_IPV4_HDRCKSUM) {
if (ctx->l3_proto == ETHERTYPE_IP) {
ctx_tbd->type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
} else {
ctx_tbd->type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
}
}
if (ctx->lso_flag || ctx->hcksum_flags & HCK_PARTIALCKSUM) {
switch (ctx->l4_proto) {
case IPPROTO_TCP:
ctx_tbd->type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
break;
case IPPROTO_UDP:
break;
default:
IXGBE_DEBUGLOG_0(NULL, "L4 type error with tx hcksum");
break;
}
}
ctx_tbd->seqnum_seed = 0;
if (ctx->lso_flag) {
ctx_tbd->mss_l4len_idx =
(ctx->l4_hdr_len << IXGBE_ADVTXD_L4LEN_SHIFT) |
(ctx->mss << IXGBE_ADVTXD_MSS_SHIFT);
} else {
ctx_tbd->mss_l4len_idx = 0;
}
}
static int
ixgbe_tx_fill_ring(ixgbe_tx_ring_t *tx_ring, link_list_t *pending_list,
ixgbe_tx_context_t *ctx, size_t mbsize)
{
struct ixgbe_hw *hw = &tx_ring->ixgbe->hw;
boolean_t load_context;
uint32_t index, tcb_index, desc_num;
union ixgbe_adv_tx_desc *tbd, *first_tbd;
tx_control_block_t *tcb, *first_tcb;
uint32_t hcksum_flags;
int i;
ASSERT(mutex_owned(&tx_ring->tx_lock));
tbd = NULL;
first_tbd = NULL;
first_tcb = NULL;
desc_num = 0;
hcksum_flags = 0;
load_context = B_FALSE;
index = tx_ring->tbd_tail;
tcb_index = tx_ring->tbd_tail;
if (ctx != NULL) {
hcksum_flags = ctx->hcksum_flags;
load_context = ixgbe_check_context(tx_ring, ctx);
if (load_context) {
tbd = &tx_ring->tbd_ring[index];
ixgbe_fill_context(
(struct ixgbe_adv_tx_context_desc *)tbd, ctx);
index = NEXT_INDEX(index, 1, tx_ring->ring_size);
desc_num++;
tx_ring->tx_context = *ctx;
}
}
first_tbd = &tx_ring->tbd_ring[index];
tcb = (tx_control_block_t *)LIST_POP_HEAD(pending_list);
first_tcb = tcb;
while (tcb != NULL) {
for (i = 0; i < tcb->desc_num; i++) {
tbd = &tx_ring->tbd_ring[index];
tbd->read.buffer_addr = tcb->desc[i].address;
tbd->read.cmd_type_len = tcb->desc[i].length;
tbd->read.cmd_type_len |= IXGBE_ADVTXD_DCMD_DEXT
| IXGBE_ADVTXD_DTYP_DATA;
tbd->read.olinfo_status = 0;
index = NEXT_INDEX(index, 1, tx_ring->ring_size);
desc_num++;
}
ASSERT(tx_ring->work_list[tcb_index] == NULL);
tx_ring->work_list[tcb_index] = tcb;
tcb_index = index;
tcb = (tx_control_block_t *)LIST_POP_HEAD(pending_list);
}
if (load_context) {
first_tcb->desc_num++;
}
first_tcb->last_index = PREV_INDEX(index, 1, tx_ring->ring_size);
ASSERT(first_tbd != NULL);
first_tbd->read.cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS;
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
if (ctx != NULL && ctx->lso_flag) {
first_tbd->read.cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
first_tbd->read.olinfo_status |=
(mbsize - ctx->mac_hdr_len - ctx->ip_hdr_len
- ctx->l4_hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT;
}
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_X550EM_a:
case ixgbe_mac_E610:
if (ctx != NULL && ctx->lso_flag) {
first_tbd->read.cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
first_tbd->read.olinfo_status |=
(mbsize - ctx->mac_hdr_len - ctx->ip_hdr_len
- ctx->l4_hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT;
} else {
first_tbd->read.olinfo_status |=
(mbsize << IXGBE_ADVTXD_PAYLEN_SHIFT);
}
break;
default:
break;
}
if (hcksum_flags != 0) {
if (hcksum_flags & HCK_IPV4_HDRCKSUM)
first_tbd->read.olinfo_status |=
IXGBE_ADVTXD_POPTS_IXSM;
if (hcksum_flags & HCK_PARTIALCKSUM)
first_tbd->read.olinfo_status |=
IXGBE_ADVTXD_POPTS_TXSM;
}
ASSERT(tbd != NULL);
tbd->read.cmd_type_len |=
IXGBE_ADVTXD_DCMD_EOP | IXGBE_ADVTXD_DCMD_RS;
DMA_SYNC(&tx_ring->tbd_area, DDI_DMA_SYNC_FORDEV);
i = ixgbe_atomic_reserve(&tx_ring->tbd_free, desc_num);
ASSERT(i >= 0);
tx_ring->tbd_tail = index;
IXGBE_WRITE_REG(hw, IXGBE_TDT(tx_ring->index), index);
if (ixgbe_check_acc_handle(tx_ring->ixgbe->osdep.reg_handle) !=
DDI_FM_OK) {
ddi_fm_service_impact(tx_ring->ixgbe->dip,
DDI_SERVICE_DEGRADED);
atomic_or_32(&tx_ring->ixgbe->ixgbe_state, IXGBE_ERROR);
}
return (desc_num);
}
static void
ixgbe_save_desc(tx_control_block_t *tcb, uint64_t address, size_t length)
{
sw_desc_t *desc;
desc = &tcb->desc[tcb->desc_num];
desc->address = address;
desc->length = length;
tcb->desc_num++;
}
uint32_t
ixgbe_tx_recycle_legacy(ixgbe_tx_ring_t *tx_ring)
{
uint32_t index, last_index, prev_index;
int desc_num;
boolean_t desc_done;
tx_control_block_t *tcb;
link_list_t pending_list;
ixgbe_t *ixgbe = tx_ring->ixgbe;
mutex_enter(&tx_ring->recycle_lock);
ASSERT(tx_ring->tbd_free <= tx_ring->ring_size);
if (tx_ring->tbd_free == tx_ring->ring_size) {
tx_ring->recycle_fail = 0;
tx_ring->stall_watchdog = 0;
if (tx_ring->reschedule) {
tx_ring->reschedule = B_FALSE;
mac_tx_ring_update(ixgbe->mac_hdl,
tx_ring->ring_handle);
}
mutex_exit(&tx_ring->recycle_lock);
return (0);
}
DMA_SYNC(&tx_ring->tbd_area, DDI_DMA_SYNC_FORKERNEL);
if (ixgbe_check_dma_handle(tx_ring->tbd_area.dma_handle) != DDI_FM_OK) {
mutex_exit(&tx_ring->recycle_lock);
ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
return (0);
}
LINK_LIST_INIT(&pending_list);
desc_num = 0;
index = tx_ring->tbd_head;
tcb = tx_ring->work_list[index];
ASSERT(tcb != NULL);
while (tcb != NULL) {
last_index = tcb->last_index;
if (last_index == MAX_TX_RING_SIZE)
break;
desc_done = tx_ring->tbd_ring[last_index].wb.status &
IXGBE_TXD_STAT_DD;
if (desc_done) {
while (tcb != NULL) {
tx_ring->work_list[index] = NULL;
LIST_PUSH_TAIL(&pending_list, &tcb->link);
desc_num += tcb->desc_num;
index = NEXT_INDEX(index, tcb->desc_num,
tx_ring->ring_size);
tcb = tx_ring->work_list[index];
prev_index = PREV_INDEX(index, 1,
tx_ring->ring_size);
if (prev_index == last_index)
break;
}
} else {
break;
}
}
if (desc_num == 0) {
tx_ring->recycle_fail++;
mutex_exit(&tx_ring->recycle_lock);
return (0);
}
tx_ring->recycle_fail = 0;
tx_ring->stall_watchdog = 0;
tx_ring->tbd_head = index;
atomic_add_32(&tx_ring->tbd_free, desc_num);
if ((tx_ring->tbd_free >= ixgbe->tx_resched_thresh) &&
(tx_ring->reschedule)) {
tx_ring->reschedule = B_FALSE;
mac_tx_ring_update(ixgbe->mac_hdl,
tx_ring->ring_handle);
}
mutex_exit(&tx_ring->recycle_lock);
ixgbe_put_free_list(tx_ring, &pending_list);
return (desc_num);
}
uint32_t
ixgbe_tx_recycle_head_wb(ixgbe_tx_ring_t *tx_ring)
{
uint32_t index;
uint32_t head_wb;
int desc_num;
tx_control_block_t *tcb;
link_list_t pending_list;
ixgbe_t *ixgbe = tx_ring->ixgbe;
mutex_enter(&tx_ring->recycle_lock);
ASSERT(tx_ring->tbd_free <= tx_ring->ring_size);
if (tx_ring->tbd_free == tx_ring->ring_size) {
tx_ring->recycle_fail = 0;
tx_ring->stall_watchdog = 0;
if (tx_ring->reschedule) {
tx_ring->reschedule = B_FALSE;
mac_tx_ring_update(ixgbe->mac_hdl,
tx_ring->ring_handle);
}
mutex_exit(&tx_ring->recycle_lock);
return (0);
}
(void) ddi_dma_sync(tx_ring->tbd_area.dma_handle,
sizeof (union ixgbe_adv_tx_desc) * tx_ring->ring_size,
sizeof (uint32_t),
DDI_DMA_SYNC_FORKERNEL);
if (ixgbe_check_dma_handle(tx_ring->tbd_area.dma_handle) != DDI_FM_OK) {
mutex_exit(&tx_ring->recycle_lock);
ddi_fm_service_impact(ixgbe->dip,
DDI_SERVICE_DEGRADED);
atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
return (0);
}
LINK_LIST_INIT(&pending_list);
desc_num = 0;
index = tx_ring->tbd_head;
head_wb = *tx_ring->tbd_head_wb;
while (index != head_wb) {
tcb = tx_ring->work_list[index];
ASSERT(tcb != NULL);
if (OFFSET(index, head_wb, tx_ring->ring_size) <
tcb->desc_num) {
break;
}
tx_ring->work_list[index] = NULL;
LIST_PUSH_TAIL(&pending_list, &tcb->link);
index = NEXT_INDEX(index, tcb->desc_num, tx_ring->ring_size);
desc_num += tcb->desc_num;
}
if (desc_num == 0) {
tx_ring->recycle_fail++;
mutex_exit(&tx_ring->recycle_lock);
return (0);
}
tx_ring->recycle_fail = 0;
tx_ring->stall_watchdog = 0;
tx_ring->tbd_head = index;
atomic_add_32(&tx_ring->tbd_free, desc_num);
if ((tx_ring->tbd_free >= ixgbe->tx_resched_thresh) &&
(tx_ring->reschedule)) {
tx_ring->reschedule = B_FALSE;
mac_tx_ring_update(ixgbe->mac_hdl,
tx_ring->ring_handle);
}
mutex_exit(&tx_ring->recycle_lock);
ixgbe_put_free_list(tx_ring, &pending_list);
return (desc_num);
}
void
ixgbe_free_tcb(tx_control_block_t *tcb)
{
if (tcb == NULL)
return;
switch (tcb->tx_type) {
case USE_COPY:
tcb->tx_buf.len = 0;
break;
case USE_DMA:
(void) ddi_dma_unbind_handle(tcb->tx_dma_handle);
break;
default:
break;
}
if (tcb->mp != NULL) {
freemsg(tcb->mp);
tcb->mp = NULL;
}
tcb->tx_type = USE_NONE;
tcb->last_index = MAX_TX_RING_SIZE;
tcb->frag_num = 0;
tcb->desc_num = 0;
}
static tx_control_block_t *
ixgbe_get_free_list(ixgbe_tx_ring_t *tx_ring, link_list_t *list)
{
tx_control_block_t *tcb;
if (ixgbe_atomic_reserve(&tx_ring->tcb_free, 1) < 0) {
tx_ring->stat_fail_no_tcb++;
return (NULL);
}
mutex_enter(&tx_ring->tcb_head_lock);
tcb = tx_ring->free_list[tx_ring->tcb_head];
ASSERT(tcb != NULL);
tx_ring->free_list[tx_ring->tcb_head] = NULL;
tx_ring->tcb_head = NEXT_INDEX(tx_ring->tcb_head, 1,
tx_ring->free_list_size);
mutex_exit(&tx_ring->tcb_head_lock);
LIST_PUSH_TAIL(list, &tcb->link);
return (tcb);
}
void
ixgbe_put_free_list(ixgbe_tx_ring_t *tx_ring, link_list_t *pending_list)
{
uint32_t index;
int tcb_num;
tx_control_block_t *tcb;
for (tcb = (tx_control_block_t *)LIST_GET_HEAD(pending_list);
tcb != NULL;
tcb = (tx_control_block_t *)LIST_GET_NEXT(pending_list, tcb)) {
ixgbe_free_tcb(tcb);
}
mutex_enter(&tx_ring->tcb_tail_lock);
index = tx_ring->tcb_tail;
tcb_num = 0;
tcb = (tx_control_block_t *)LIST_POP_HEAD(pending_list);
while (tcb != NULL) {
ASSERT(tx_ring->free_list[index] == NULL);
tx_ring->free_list[index] = tcb;
tcb_num++;
index = NEXT_INDEX(index, 1, tx_ring->free_list_size);
tcb = (tx_control_block_t *)LIST_POP_HEAD(pending_list);
}
tx_ring->tcb_tail = index;
atomic_add_32(&tx_ring->tcb_free, tcb_num);
mutex_exit(&tx_ring->tcb_tail_lock);
}