tbd
tbd = &((struct tx_bd_long *)txr->vaddr)[pi->ipi_new_pidx];
tbd->opaque = ((pi->ipi_nsegs + need_hi) << 24) | pi->ipi_new_pidx;
tbd->len = htole16(pi->ipi_segs[seg].ds_len);
tbd->addr = htole64(pi->ipi_segs[seg++].ds_addr);
tbd->flags_type = htole16(flags_type);
tbd = &((struct tx_bd_long *)txr->vaddr)[pi->ipi_new_pidx];
tbd->len = htole16(pi->ipi_segs[seg].ds_len);
tbd->addr = htole64(pi->ipi_segs[seg].ds_addr);
tbd->flags_type = htole16(flags_type);
struct tx_bd_long *tbd;
struct bwi_txbuf_data *tbd = &sc->sc_tx_bdata[BWI_TX_DATA_RING];
idx = tbd->tbd_idx;
while (tbd->tbd_buf[idx].tb_mbuf == NULL &&
tbd->tbd_used + BWI_TX_NSPRDESC < BWI_TX_NDESC &&
tbd->tbd_used++;
tbd->tbd_idx = idx;
struct bwi_txbuf_data *tbd = &sc->sc_tx_bdata[BWI_TX_DATA_RING];
idx = tbd->tbd_idx;
KASSERT(tbd->tbd_buf[idx].tb_mbuf == NULL, ("slot %d not empty", idx));
tbd->tbd_used++;
tbd->tbd_idx = (idx + 1) % BWI_TX_NDESC;
struct bwi_txbuf_data *tbd = &sc->sc_tx_bdata[i];
&tbd->tbd_buf[j].tb_dmap);
tbd->tbd_buf[k].tb_dmap);
struct bwi_txbuf_data *tbd = &sc->sc_tx_bdata[i];
struct bwi_txbuf *tb = &tbd->tbd_buf[j];
struct bwi_txbuf_data *tbd;
tbd = &sc->sc_tx_bdata[ring_idx];
tbd->tbd_idx = 0;
tbd->tbd_used = 0;
struct bwi_txbuf_data *tbd;
tbd = &sc->sc_tx_bdata[ring_idx];
struct bwi_txbuf *tb = &tbd->tbd_buf[i];
struct bwi_txbuf_data *tbd = &sc->sc_tx_bdata[BWI_TX_DATA_RING];
struct bwi_txbuf *tb = &tbd->tbd_buf[idx];
struct bwi_txbuf_data *tbd = &sc->sc_tx_bdata[BWI_TX_DATA_RING];
struct bwi_txbuf *tb = &tbd->tbd_buf[idx];
struct bwi_txbuf_data *tbd;
tbd = &sc->sc_tx_bdata[ring_idx];
KASSERT(tbd->tbd_used > 0, ("tbd_used %d", tbd->tbd_used));
tbd->tbd_used--;
tb = &tbd->tbd_buf[buf_idx];
if (tbd->tbd_used == 0)
tbd = &sc->sc_tx_data;
&tbd->tbd_buf[i].tb_dmap);
struct et_txbuf_data *tbd;
tbd = &sc->sc_tx_data;
if (tbd->tbd_buf[i].tb_dmap) {
tbd->tbd_buf[i].tb_dmap);
tbd->tbd_buf[i].tb_dmap = NULL;
struct et_txbuf_data *tbd;
tbd = &sc->sc_tx_data;
if (tbd->tbd_used > (ET_TX_NDESC * 2) / 3)
if (tbd->tbd_used + ET_NSEG_SPARE >= ET_TX_NDESC) {
if (tbd->tbd_used > 0)
struct et_txbuf_data *tbd;
tbd = &sc->sc_tx_data;
tb = &tbd->tbd_buf[i];
struct et_txbuf_data *tbd;
tbd = &sc->sc_tx_data;
tbd->tbd_start_index = 0;
tbd->tbd_start_wrap = 0;
tbd->tbd_used = 0;
struct et_txbuf_data *tbd;
tbd = &sc->sc_tx_data;
map = tbd->tbd_buf[first_idx].tb_dmap;
if (tbd->tbd_used + nsegs > ET_TX_NDESC - 1) {
tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap;
tbd->tbd_buf[last_idx].tb_dmap = map;
tbd->tbd_buf[last_idx].tb_mbuf = m;
tbd->tbd_used += nsegs;
MPASS(tbd->tbd_used <= ET_TX_NDESC);
struct et_txbuf_data *tbd;
tbd = &sc->sc_tx_data;
if (tbd->tbd_used == 0)
while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) {
MPASS(tbd->tbd_start_index < ET_TX_NDESC);
tb = &tbd->tbd_buf[tbd->tbd_start_index];
if (++tbd->tbd_start_index == ET_TX_NDESC) {
tbd->tbd_start_index = 0;
tbd->tbd_start_wrap ^= 1;
MPASS(tbd->tbd_used > 0);
tbd->tbd_used--;
if (tbd->tbd_used == 0)
if (tbd->tbd_used + ET_NSEG_SPARE < ET_TX_NDESC)
struct et_txbuf_data *tbd;
cbp->tbd[i + 1].tb_addr = htole32(segs[i].ds_addr);
cbp->tbd[i + 1].tb_size = htole32(segs[i].ds_len);
cbp->tbd[i].tb_addr = htole32(segs[i].ds_addr);
cbp->tbd[i].tb_size = htole32(segs[i].ds_len);
cbp->tbd[nseg].tb_size |= htole32(0x8000);
cbp->tbd[1].tb_size |= htole32(tcp_payload << 16);
txp->tx_cb->tbd[0].tb_addr = 0;
txp[i].tx_cb->tbd[0].tb_addr = 0;
htole32(FXP_TXCB_DMA_ADDR(sc, &tcbp[i].tbd[2]));
htole32(FXP_TXCB_DMA_ADDR(sc, &tcbp[i].tbd[0]));
struct fxp_tbd tbd[FXP_NTXSEG + 1];
#define tbd tx_cb_u.tbd