tx_ring
ACX_TX_RING_SIZE, (caddr_t *)&rd->tx_ring, BUS_DMA_NOWAIT);
rd->tx_ring, ACX_TX_RING_SIZE, NULL, BUS_DMA_WAITOK);
bd->tx_buf[i].tb_desc1 = &rd->tx_ring[i * 2];
bd->tx_buf[i].tb_desc2 = &rd->tx_ring[(i * 2) + 1];
if (rd->tx_ring != NULL) {
bzero(&rd->tx_ring[i], sizeof(struct acx_host_desc));
rd->tx_ring[i].h_ctrl = htole16(DESC_CTRL_HOSTOWN);
rd->tx_ring[i].h_next_desc = htole32(rd->tx_ring_paddr);
rd->tx_ring[i].h_next_desc = htole32(paddr);
bzero(rd->tx_ring, ACX_TX_RING_SIZE);
struct acx_host_desc *tx_ring;
qwx_dp_srng_cleanup(sc, &dp->tx_ring[i].tcl_data_ring);
qwx_dp_srng_cleanup(sc, &dp->tx_ring[i].tcl_comp_ring);
ret = qwx_dp_srng_setup(sc, &dp->tx_ring[i].tcl_data_ring,
ret = qwx_dp_srng_setup(sc, &dp->tx_ring[i].tcl_comp_ring,
srng = &sc->hal.srng_list[dp->tx_ring[i].tcl_data_ring.ring_id];
dp->tx_ring[i].tcl_data_ring.ring_id);
qwx_dp_tx_ring_free_tx_data(struct qwx_softc *sc, struct dp_tx_ring *tx_ring)
if (tx_ring->data == NULL)
struct qwx_tx_data *tx_data = &tx_ring->data[i];
free(tx_ring->data, M_DEVBUF,
tx_ring->data = NULL;
qwx_dp_tx_ring_alloc_tx_data(struct qwx_softc *sc, struct dp_tx_ring *tx_ring)
tx_ring->data = mallocarray(sc->hw_params.tx_ring_size,
if (tx_ring->data == NULL)
struct qwx_tx_data *tx_data = &tx_ring->data[i];
idr_init(&dp->tx_ring[i].txbuf_idr);
spin_lock_init(&dp->tx_ring[i].tx_idr_lock);
ret = qwx_dp_tx_ring_alloc_tx_data(sc, &dp->tx_ring[i]);
dp->tx_ring[i].cur = 0;
dp->tx_ring[i].queued = 0;
dp->tx_ring[i].tcl_data_ring_id = i;
dp->tx_ring[i].tx_status_head = 0;
dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1;
dp->tx_ring[i].tx_status = malloc(size, M_DEVBUF,
if (!dp->tx_ring[i].tx_status) {
spin_lock_bh(&dp->tx_ring[i].tx_idr_lock);
idr_for_each(&dp->tx_ring[i].txbuf_idr,
idr_destroy(&dp->tx_ring[i].txbuf_idr);
spin_unlock_bh(&dp->tx_ring[i].tx_idr_lock);
qwx_dp_tx_ring_free_tx_data(sc, &dp->tx_ring[i]);
free(dp->tx_ring[i].tx_status, M_DEVBUF,
dp->tx_ring[i].tx_status = NULL;
struct dp_tx_ring *tx_ring)
tx_data = &tx_ring->data[msdu_id];
if (tx_ring->queued > 0)
tx_ring->queued--;
qwx_dp_tx_htt_tx_complete_buf(struct qwx_softc *sc, struct dp_tx_ring *tx_ring,
qwx_dp_tx_free_txbuf(sc, ts->msdu_id, tx_ring);
uint8_t mac_id, uint32_t msdu_id, struct dp_tx_ring *tx_ring)
qwx_dp_tx_htt_tx_complete_buf(sc, tx_ring, &ts);
qwx_dp_tx_free_txbuf(sc, msdu_id, tx_ring);
qwx_dp_tx_complete_msdu(struct qwx_softc *sc, struct dp_tx_ring *tx_ring,
struct qwx_tx_data *tx_data = &tx_ring->data[msdu_id];
if (tx_ring->queued > 0)
tx_ring->queued--;
int hal_ring_id = dp->tx_ring[ring_id].tcl_comp_ring.ring_id;
struct dp_tx_ring *tx_ring = &dp->tx_ring[ring_id];
while ((QWX_TX_COMPL_NEXT(tx_ring->tx_status_head) !=
tx_ring->tx_status_tail) &&
memcpy(&tx_ring->tx_status[tx_ring->tx_status_head], desc,
tx_ring->tx_status_head =
QWX_TX_COMPL_NEXT(tx_ring->tx_status_head);
(ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head) ==
tx_ring->tx_status_tail))) {
while (QWX_TX_COMPL_NEXT(tx_ring->tx_status_tail) !=
tx_ring->tx_status_head) {
tx_ring->tx_status_tail =
QWX_TX_COMPL_NEXT(tx_ring->tx_status_tail);
tx_status = &tx_ring->tx_status[tx_ring->tx_status_tail];
(void *)tx_status, mac_id, msdu_id, tx_ring);
spin_lock(&tx_ring->tx_idr_lock);
msdu = idr_remove(&tx_ring->txbuf_idr, msdu_id);
spin_unlock(&tx_ring->tx_idr_lock);
spin_unlock(&tx_ring->tx_idr_lock);
qwx_dp_tx_complete_msdu(sc, tx_ring, msdu_id, &ts);
if (tx_ring->queued < sc->hw_params.tx_ring_size - 1) {
struct dp_tx_ring *tx_ring;
tx_ring = &dp->tx_ring[ti.ring_id];
if (tx_ring->queued >= sc->hw_params.tx_ring_size) {
msdu_id = tx_ring->cur;
tx_data = &tx_ring->data[msdu_id];
hal_ring_id = tx_ring->tcl_data_ring.ring_id;
tx_ring->queued++;
tx_ring->cur = (tx_ring->cur + 1) % sc->hw_params.tx_ring_size;
if (tx_ring->queued >= sc->hw_params.tx_ring_size - 1)
struct dp_tx_ring tx_ring[DP_TCL_NUM_RING_MAX];
struct dp_tx_ring *tx_ring)
tx_data = &tx_ring->data[msdu_id];
if (tx_ring->queued > 0)
tx_ring->queued--;
qwz_dp_tx_htt_tx_complete_buf(struct qwz_softc *sc, struct dp_tx_ring *tx_ring,
qwz_dp_tx_free_txbuf(sc, ts->msdu_id, tx_ring);
uint8_t mac_id, uint32_t msdu_id, struct dp_tx_ring *tx_ring)
qwz_dp_tx_htt_tx_complete_buf(sc, tx_ring, &ts);
qwz_dp_tx_free_txbuf(sc, msdu_id, tx_ring);
qwz_dp_tx_complete_msdu(struct qwz_softc *sc, struct dp_tx_ring *tx_ring,
struct qwz_tx_data *tx_data = &tx_ring->data[msdu_id];
if (tx_ring->queued > 0)
tx_ring->queued--;
int hal_ring_id = dp->tx_ring[ring_id].tcl_comp_ring.ring_id;
struct dp_tx_ring *tx_ring = &dp->tx_ring[ring_id];
while ((QWZ_TX_COMPL_NEXT(tx_ring->tx_status_head) !=
tx_ring->tx_status_tail) &&
memcpy(&tx_ring->tx_status[tx_ring->tx_status_head], desc,
tx_ring->tx_status_head =
QWZ_TX_COMPL_NEXT(tx_ring->tx_status_head);
(QWZ_TX_COMPL_NEXT(tx_ring->tx_status_head) ==
tx_ring->tx_status_tail))) {
while (QWZ_TX_COMPL_NEXT(tx_ring->tx_status_tail) !=
tx_ring->tx_status_head) {
tx_ring->tx_status_tail =
QWZ_TX_COMPL_NEXT(tx_ring->tx_status_tail);
tx_status = &tx_ring->tx_status[tx_ring->tx_status_tail];
(void *)tx_status, mac_id, msdu_id, tx_ring);
spin_lock(&tx_ring->tx_idr_lock);
msdu = idr_remove(&tx_ring->txbuf_idr, msdu_id);
spin_unlock(&tx_ring->tx_idr_lock);
spin_unlock(&tx_ring->tx_idr_lock);
qwz_dp_tx_complete_msdu(sc, tx_ring, msdu_id, &ts);
if (tx_ring->queued < sc->hw_params.tx_ring_size - 1) {
struct dp_tx_ring *tx_ring;
tx_ring = &dp->tx_ring[ti.ring_id];
if (tx_ring->queued >= sc->hw_params.tx_ring_size) {
msdu_id = tx_ring->cur;
tx_data = &tx_ring->data[msdu_id];
hal_ring_id = tx_ring->tcl_data_ring.ring_id;
tx_ring->queued++;
tx_ring->cur = (tx_ring->cur + 1) % sc->hw_params.tx_ring_size;
if (tx_ring->queued >= sc->hw_params.tx_ring_size - 1)
qwz_dp_srng_cleanup(sc, &dp->tx_ring[i].tcl_data_ring);
qwz_dp_srng_cleanup(sc, &dp->tx_ring[i].tcl_comp_ring);
ret = qwz_dp_srng_setup(sc, &dp->tx_ring[i].tcl_data_ring,
ret = qwz_dp_srng_setup(sc, &dp->tx_ring[i].tcl_comp_ring,
srng = &sc->hal.srng_list[dp->tx_ring[i].tcl_data_ring.ring_id];
qwz_dp_tx_ring_free_tx_data(struct qwz_softc *sc, struct dp_tx_ring *tx_ring)
if (tx_ring->data == NULL)
struct qwz_tx_data *tx_data = &tx_ring->data[i];
free(tx_ring->data, M_DEVBUF,
tx_ring->data = NULL;
qwz_dp_tx_ring_alloc_tx_data(struct qwz_softc *sc, struct dp_tx_ring *tx_ring)
tx_ring->data = mallocarray(sc->hw_params.tx_ring_size,
if (tx_ring->data == NULL)
struct qwz_tx_data *tx_data = &tx_ring->data[i];
idr_init(&dp->tx_ring[i].txbuf_idr);
spin_lock_init(&dp->tx_ring[i].tx_idr_lock);
ret = qwz_dp_tx_ring_alloc_tx_data(sc, &dp->tx_ring[i]);
dp->tx_ring[i].cur = 0;
dp->tx_ring[i].queued = 0;
dp->tx_ring[i].tcl_data_ring_id = i;
dp->tx_ring[i].tx_status_head = 0;
dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1;
dp->tx_ring[i].tx_status = malloc(size, M_DEVBUF,
if (!dp->tx_ring[i].tx_status) {
spin_lock_bh(&dp->tx_ring[i].tx_idr_lock);
idr_for_each(&dp->tx_ring[i].txbuf_idr,
idr_destroy(&dp->tx_ring[i].txbuf_idr);
spin_unlock_bh(&dp->tx_ring[i].tx_idr_lock);
qwz_dp_tx_ring_free_tx_data(sc, &dp->tx_ring[i]);
free(dp->tx_ring[i].tx_status, M_DEVBUF,
dp->tx_ring[i].tx_status = NULL;
struct dp_tx_ring tx_ring[DP_TCL_NUM_RING_MAX];
tx->tx_ring.vaddr = BNXT_DMA_KVA(tx->tx_ring_mem);
tx->tx_ring.paddr = BNXT_DMA_DVA(tx->tx_ring_mem);
&tx->tx_ring, cp->ring.phys_id,
bnxt_write_tx_doorbell(sc, &tx->tx_ring, 0);
tx->tx_slots = mallocarray(sizeof(*bs), tx->tx_ring.ring_size,
for (i = 0; i < tx->tx_ring.ring_size; i++) {
bnxt_free_slots(sc, tx->tx_slots, i, tx->tx_ring.ring_size);
&tx->tx_ring);
bnxt_free_slots(sc, tx->tx_slots, tx->tx_ring.ring_size,
tx->tx_ring.ring_size);
&tx->tx_ring);
free += tx->tx_ring.ring_size;
if (idx == tx->tx_ring.ring_size)
if (idx == tx->tx_ring.ring_size)
if (idx == tx->tx_ring.ring_size)
if (++tx->tx_prod >= tx->tx_ring.ring_size)
bnxt_write_tx_doorbell(sc, &tx->tx_ring, idx);
struct bnxt_ring tx_ring;
if (idx >= tx->tx_ring.ring_size)
idx -= tx->tx_ring.ring_size;
if (++tx->tx_cons >= tx->tx_ring.ring_size)
tx->tx_ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
tx->tx_ring.id = BNXT_TX_RING_ID_BASE + bq->q_index;
tx->tx_ring.doorbell = tx->tx_ring.id * 0x80;
tx->tx_ring.ring_size = PAGE_SIZE / sizeof(struct tx_bd_short);
struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
bzero(tx_ring->tr_desc, ET_TX_RING_SIZE);
bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0,
tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
bzero(tx_ring->tr_desc, ET_TX_RING_SIZE);
bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0,
tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr));
CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr));
tx_ring->tr_ready_index = 0;
tx_ring->tr_ready_wrap = 0;
struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
KKASSERT(tx_ring->tr_ready_index < ET_TX_NDESC);
first_idx = tx_ring->tr_ready_index;
td = &tx_ring->tr_desc[idx];
KKASSERT(tx_ring->tr_ready_index < ET_TX_NDESC);
if (++tx_ring->tr_ready_index == ET_TX_NDESC) {
tx_ring->tr_ready_index = 0;
tx_ring->tr_ready_wrap ^= 1;
td = &tx_ring->tr_desc[first_idx];
bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0,
tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
tx_ready_pos = __SHIFTIN(tx_ring->tr_ready_index,
if (tx_ring->tr_ready_wrap)
struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
bzero(&tx_ring->tr_desc[tbd->tbd_start_index],
bus_dmamap_sync(sc->sc_dmat, tx_ring->tr_dmap, 0,
tx_ring->tr_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
(void **)&tx_ring->tr_desc, &tx_ring->tr_paddr, &tx_ring->tr_dmap,
&tx_ring->tr_seg);
struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
et_dma_mem_destroy(sc, tx_ring->tr_desc, tx_ring->tr_dmap);
struct tx_ring *txr = &sc->tx_rings[i];
struct tx_ring *txr;
sc->tx_rings = mallocarray(sc->sc_nqueues, sizeof(struct tx_ring),
free(sc->tx_rings, M_DEVBUF, sc->sc_nqueues * sizeof(struct tx_ring));
struct tx_ring *txr = sc->tx_rings;
ngbe_free_transmit_buffers(struct tx_ring *txr)
int ngbe_encap(struct tx_ring *, struct mbuf *);
ngbe_allocate_transmit_buffers(struct tx_ring *txr)
ngbe_setup_transmit_ring(struct tx_ring *txr)
struct tx_ring *txr = sc->tx_rings;
struct tx_ring *txr;
struct tx_ring *txr = nq->txr;
int ngbe_tx_ctx_setup(struct tx_ring *, struct mbuf *,
void ngbe_txeof(struct tx_ring *);
ngbe_encap(struct tx_ring *txr, struct mbuf *m)
ngbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *m, uint32_t *cmd_type_len,
ngbe_txeof(struct tx_ring *txr)
struct tx_ring *txr = ifq->ifq_softc;
void ngbe_free_transmit_buffers(struct tx_ring *);
struct tx_ring *txr = sc->tx_rings;
int ngbe_allocate_transmit_buffers(struct tx_ring *);
int ngbe_setup_transmit_ring(struct tx_ring *);
struct tx_ring *txr;
struct tx_ring *tx_rings;
tx_ring = &sc->tx_ring[qid];
data = &tx_ring->tx_data[tx_ring->cur];
txd = &tx_ring->desc[tx_ring->cur];
bus_dmamap_sync(sc->sc_dmat, tx_ring->map, 0, MCLBYTES,
tx_ring->cur = (tx_ring->cur + 1) % RTWN_TX_LIST_COUNT;
tx_ring->queued++;
if (tx_ring->queued >= (RTWN_TX_LIST_COUNT - 1))
struct rtwn_tx_ring *tx_ring = &sc->tx_ring[qid];
bus_dmamap_sync(sc->sc_dmat, tx_ring->map, 0, MCLBYTES,
tx_data = &tx_ring->tx_data[i];
tx_desc = &tx_ring->desc[i];
tx_ring->queued--;
if (tx_ring->queued < (RTWN_TX_LIST_COUNT - 1))
struct rtwn_tx_ring tx_ring[RTWN_NTXQUEUES];
sc->tx_ring[RTWN_BK_QUEUE].map->dm_segs[0].ds_addr);
sc->tx_ring[RTWN_BE_QUEUE].map->dm_segs[0].ds_addr);
sc->tx_ring[RTWN_VI_QUEUE].map->dm_segs[0].ds_addr);
sc->tx_ring[RTWN_VO_QUEUE].map->dm_segs[0].ds_addr);
sc->tx_ring[RTWN_BEACON_QUEUE].map->dm_segs[0].ds_addr);
sc->tx_ring[RTWN_MGNT_QUEUE].map->dm_segs[0].ds_addr);
sc->tx_ring[RTWN_HIGH_QUEUE].map->dm_segs[0].ds_addr);
struct rtwn_tx_ring *tx_ring = &sc->tx_ring[qid];
BUS_DMA_NOWAIT, &tx_ring->map);
&tx_ring->seg, 1, &tx_ring->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
error = bus_dmamem_map(sc->sc_dmat, &tx_ring->seg, tx_ring->nsegs,
(caddr_t *)&tx_ring->desc, BUS_DMA_NOWAIT);
bus_dmamem_free(sc->sc_dmat, &tx_ring->seg, tx_ring->nsegs);
error = bus_dmamap_load(sc->sc_dmat, tx_ring->map, tx_ring->desc,
struct r92c_tx_desc_pci *desc = &tx_ring->desc[i];
desc->nextdescaddr = htole32(tx_ring->map->dm_segs[0].ds_addr
tx_data = &tx_ring->tx_data[i];
struct rtwn_tx_ring *tx_ring = &sc->tx_ring[qid];
struct r92c_tx_desc_pci *desc = &tx_ring->desc[i];
struct rtwn_tx_data *tx_data = &tx_ring->tx_data[i];
bus_dmamap_sync(sc->sc_dmat, tx_ring->map, 0, MCLBYTES,
tx_ring->queued = 0;
tx_ring->cur = 0;
struct rtwn_tx_ring *tx_ring = &sc->tx_ring[qid];
if (tx_ring->map != NULL) {
if (tx_ring->desc != NULL) {
bus_dmamap_unload(sc->sc_dmat, tx_ring->map);
bus_dmamem_unmap(sc->sc_dmat, (caddr_t)tx_ring->desc,
bus_dmamem_free(sc->sc_dmat, &tx_ring->seg, tx_ring->nsegs);
bus_dmamap_destroy(sc->sc_dmat, tx_ring->map);
tx_data = &tx_ring->tx_data[i];
tx_ring->queued = 0;
tx_ring->cur = 0;
struct rtwn_tx_ring *tx_ring;