Symbol: tx_ring
sys/dev/al_eth/al_eth.c
1144
al_eth_tx_do_cleanup(struct al_eth_ring *tx_ring)
sys/dev/al_eth/al_eth.c
1148
int qid = tx_ring->ring_id;
sys/dev/al_eth/al_eth.c
1150
total_done = al_eth_comp_tx_get(tx_ring->dma_q);
sys/dev/al_eth/al_eth.c
1151
device_printf_dbg(tx_ring->dev,
sys/dev/al_eth/al_eth.c
1153
next_to_clean = tx_ring->next_to_clean;
sys/dev/al_eth/al_eth.c
1159
tx_info = &tx_ring->tx_buffer_info[next_to_clean];
sys/dev/al_eth/al_eth.c
1168
device_printf_dbg(tx_ring->dev,
sys/dev/al_eth/al_eth.c
1172
bus_dmamap_unload(tx_ring->dma_buf_tag, tx_info->dma_map);
sys/dev/al_eth/al_eth.c
1176
next_to_clean = AL_ETH_TX_RING_IDX_NEXT(tx_ring, next_to_clean);
sys/dev/al_eth/al_eth.c
1179
tx_ring->next_to_clean = next_to_clean;
sys/dev/al_eth/al_eth.c
1181
device_printf_dbg(tx_ring->dev, "tx_poll: q %d done next to clean %x\n",
sys/dev/al_eth/al_eth.c
1192
al_eth_tx_csum(struct al_eth_ring *tx_ring, struct al_eth_tx_buffer *tx_info,
sys/dev/al_eth/al_eth.c
1219
struct al_eth_meta_data *meta = &tx_ring->hal_meta;
sys/dev/al_eth/al_eth.c
1290
al_eth_xmit_mbuf(struct al_eth_ring *tx_ring, struct mbuf *m)
sys/dev/al_eth/al_eth.c
1302
if (unlikely(tx_ring->stall) != 0) {
sys/dev/al_eth/al_eth.c
1304
if (al_udma_available_get(tx_ring->dma_q) >=
sys/dev/al_eth/al_eth.c
1307
tx_ring->stall = 0;
sys/dev/al_eth/al_eth.c
1313
device_printf(tx_ring->dev,
sys/dev/al_eth/al_eth.c
1315
tx_ring->ring_id);
sys/dev/al_eth/al_eth.c
1318
device_printf_dbg(tx_ring->dev,
sys/dev/al_eth/al_eth.c
1319
"queue %d is ready!\n", tx_ring->ring_id);
sys/dev/al_eth/al_eth.c
1323
next_to_use = tx_ring->next_to_use;
sys/dev/al_eth/al_eth.c
1324
tx_info = &tx_ring->tx_buffer_info[next_to_use];
sys/dev/al_eth/al_eth.c
1329
device_printf(tx_ring->dev, "mbuf is NULL\n");
sys/dev/al_eth/al_eth.c
1336
error = bus_dmamap_load_mbuf_sg(tx_ring->dma_buf_tag, tx_info->dma_map,
sys/dev/al_eth/al_eth.c
1347
device_printf(tx_ring->dev,
sys/dev/al_eth/al_eth.c
1354
device_printf(tx_ring->dev,
sys/dev/al_eth/al_eth.c
1359
device_printf(tx_ring->dev,
sys/dev/al_eth/al_eth.c
1367
al_eth_tx_csum(tx_ring, tx_info, hal_pkt, m);
sys/dev/al_eth/al_eth.c
1380
tx_info->tx_descs = al_eth_tx_pkt_prepare(tx_ring->dma_q, hal_pkt);
sys/dev/al_eth/al_eth.c
1389
if (unlikely(al_udma_available_get(tx_ring->dma_q) <
sys/dev/al_eth/al_eth.c
1391
tx_ring->stall = 1;
sys/dev/al_eth/al_eth.c
1392
device_printf_dbg(tx_ring->dev, "stall, stopping queue %d...\n",
sys/dev/al_eth/al_eth.c
1393
tx_ring->ring_id);
sys/dev/al_eth/al_eth.c
1397
tx_ring->next_to_use = AL_ETH_TX_RING_IDX_NEXT(tx_ring, next_to_use);
sys/dev/al_eth/al_eth.c
1400
al_eth_tx_dma_action(tx_ring->dma_q, tx_info->tx_descs);
sys/dev/al_eth/al_eth.c
1410
struct al_eth_ring *tx_ring = arg;
sys/dev/al_eth/al_eth.c
1413
tx_ring->cmpl_is_running = 1;
sys/dev/al_eth/al_eth.c
1417
al_eth_tx_do_cleanup(tx_ring);
sys/dev/al_eth/al_eth.c
1420
tx_ring->cmpl_is_running = 0;
sys/dev/al_eth/al_eth.c
1424
al_eth_irq_config(tx_ring->unmask_reg_offset, tx_ring->unmask_val);
sys/dev/al_eth/al_eth.c
1430
struct al_eth_ring *tx_ring = arg;
sys/dev/al_eth/al_eth.c
1434
device_printf_dbg(tx_ring->dev, "%s for ring ID = %d\n", __func__,
sys/dev/al_eth/al_eth.c
1435
tx_ring->ring_id);
sys/dev/al_eth/al_eth.c
1441
if ((napi == 0) || (napi && tx_ring->cmpl_is_running == 0))
sys/dev/al_eth/al_eth.c
1442
taskqueue_enqueue(tx_ring->cmpl_tq, &tx_ring->cmpl_task);
sys/dev/al_eth/al_eth.c
1686
struct al_eth_ring *tx_ring = arg;
sys/dev/al_eth/al_eth.c
1690
tx_ring->enqueue_is_running = 1;
sys/dev/al_eth/al_eth.c
1695
mtx_lock(&tx_ring->br_mtx);
sys/dev/al_eth/al_eth.c
1696
mbuf = drbr_dequeue(NULL, tx_ring->br);
sys/dev/al_eth/al_eth.c
1697
mtx_unlock(&tx_ring->br_mtx);
sys/dev/al_eth/al_eth.c
1702
al_eth_xmit_mbuf(tx_ring, mbuf);
sys/dev/al_eth/al_eth.c
1706
tx_ring->enqueue_is_running = 0;
sys/dev/al_eth/al_eth.c
1709
mtx_lock(&tx_ring->br_mtx);
sys/dev/al_eth/al_eth.c
1710
mbuf = drbr_dequeue(NULL, tx_ring->br);
sys/dev/al_eth/al_eth.c
1711
mtx_unlock(&tx_ring->br_mtx);
sys/dev/al_eth/al_eth.c
1714
al_eth_xmit_mbuf(tx_ring, mbuf);
sys/dev/al_eth/al_eth.c
1723
struct al_eth_ring *tx_ring;
sys/dev/al_eth/al_eth.c
1738
tx_ring = &adapter->tx_ring[i];
sys/dev/al_eth/al_eth.c
1743
ret = drbr_enqueue(ifp, tx_ring->br, m);
sys/dev/al_eth/al_eth.c
1749
if ((napi == 0) || ((napi != 0) && (tx_ring->enqueue_is_running == 0)))
sys/dev/al_eth/al_eth.c
1750
taskqueue_enqueue(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
sys/dev/al_eth/al_eth.c
1967
al_eth_tx_cmlp_irq_filter(adapter->tx_ring);
sys/dev/al_eth/al_eth.c
2128
adapter->irq_tbl[irq_idx].data = &adapter->tx_ring[i];
sys/dev/al_eth/al_eth.c
2278
struct al_eth_ring *tx_ring = &adapter->tx_ring[qid];
sys/dev/al_eth/al_eth.c
2279
device_t dev = tx_ring->dev;
sys/dev/al_eth/al_eth.c
2280
struct al_udma_q_params *q_params = &tx_ring->q_params;
sys/dev/al_eth/al_eth.c
2287
size = sizeof(struct al_eth_tx_buffer) * tx_ring->sw_count;
sys/dev/al_eth/al_eth.c
2289
tx_ring->tx_buffer_info = malloc(size, M_IFAL, M_ZERO | M_WAITOK);
sys/dev/al_eth/al_eth.c
2290
tx_ring->descs_size = tx_ring->hw_count * sizeof(union al_udma_desc);
sys/dev/al_eth/al_eth.c
2291
q_params->size = tx_ring->hw_count;
sys/dev/al_eth/al_eth.c
2296
(void**)&q_params->desc_base, tx_ring->descs_size);
sys/dev/al_eth/al_eth.c
2309
mtx_init(&tx_ring->br_mtx, "AlRingMtx", NULL, MTX_DEF);
sys/dev/al_eth/al_eth.c
2310
tx_ring->br = buf_ring_alloc(AL_BR_SIZE, M_DEVBUF, M_WAITOK,
sys/dev/al_eth/al_eth.c
2311
&tx_ring->br_mtx);
sys/dev/al_eth/al_eth.c
2314
TASK_INIT(&tx_ring->enqueue_task, 0, al_eth_start_xmit, tx_ring);
sys/dev/al_eth/al_eth.c
2315
tx_ring->enqueue_tq = taskqueue_create_fast("al_tx_enque", M_NOWAIT,
sys/dev/al_eth/al_eth.c
2316
taskqueue_thread_enqueue, &tx_ring->enqueue_tq);
sys/dev/al_eth/al_eth.c
2317
taskqueue_start_threads(&tx_ring->enqueue_tq, 1, PI_NET, "%s txeq",
sys/dev/al_eth/al_eth.c
2319
TASK_INIT(&tx_ring->cmpl_task, 0, al_eth_tx_cmpl_work, tx_ring);
sys/dev/al_eth/al_eth.c
2320
tx_ring->cmpl_tq = taskqueue_create_fast("al_tx_cmpl", M_NOWAIT,
sys/dev/al_eth/al_eth.c
2321
taskqueue_thread_enqueue, &tx_ring->cmpl_tq);
sys/dev/al_eth/al_eth.c
2322
taskqueue_start_threads(&tx_ring->cmpl_tq, 1, PI_REALTIME, "%s txcq",
sys/dev/al_eth/al_eth.c
2337
&tx_ring->dma_buf_tag);
sys/dev/al_eth/al_eth.c
2345
for (size = 0; size < tx_ring->sw_count; size++) {
sys/dev/al_eth/al_eth.c
2346
ret = bus_dmamap_create(tx_ring->dma_buf_tag, 0,
sys/dev/al_eth/al_eth.c
2347
&tx_ring->tx_buffer_info[size].dma_map);
sys/dev/al_eth/al_eth.c
2359
tx_ring->next_to_use = 0;
sys/dev/al_eth/al_eth.c
2360
tx_ring->next_to_clean = 0;
sys/dev/al_eth/al_eth.c
2375
struct al_eth_ring *tx_ring = &adapter->tx_ring[qid];
sys/dev/al_eth/al_eth.c
2376
struct al_udma_q_params *q_params = &tx_ring->q_params;
sys/dev/al_eth/al_eth.c
2380
while (taskqueue_cancel(tx_ring->cmpl_tq, &tx_ring->cmpl_task, NULL))
sys/dev/al_eth/al_eth.c
2381
taskqueue_drain(tx_ring->cmpl_tq, &tx_ring->cmpl_task);
sys/dev/al_eth/al_eth.c
2383
taskqueue_free(tx_ring->cmpl_tq);
sys/dev/al_eth/al_eth.c
2384
while (taskqueue_cancel(tx_ring->enqueue_tq,
sys/dev/al_eth/al_eth.c
2385
&tx_ring->enqueue_task, NULL)) {
sys/dev/al_eth/al_eth.c
2386
taskqueue_drain(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
sys/dev/al_eth/al_eth.c
2389
taskqueue_free(tx_ring->enqueue_tq);
sys/dev/al_eth/al_eth.c
2391
if (tx_ring->br != NULL) {
sys/dev/al_eth/al_eth.c
2392
drbr_flush(adapter->netdev, tx_ring->br);
sys/dev/al_eth/al_eth.c
2393
buf_ring_free(tx_ring->br, M_DEVBUF);
sys/dev/al_eth/al_eth.c
2396
for (size = 0; size < tx_ring->sw_count; size++) {
sys/dev/al_eth/al_eth.c
2397
m_freem(tx_ring->tx_buffer_info[size].m);
sys/dev/al_eth/al_eth.c
2398
tx_ring->tx_buffer_info[size].m = NULL;
sys/dev/al_eth/al_eth.c
2400
bus_dmamap_unload(tx_ring->dma_buf_tag,
sys/dev/al_eth/al_eth.c
2401
tx_ring->tx_buffer_info[size].dma_map);
sys/dev/al_eth/al_eth.c
2402
bus_dmamap_destroy(tx_ring->dma_buf_tag,
sys/dev/al_eth/al_eth.c
2403
tx_ring->tx_buffer_info[size].dma_map);
sys/dev/al_eth/al_eth.c
2405
bus_dma_tag_destroy(tx_ring->dma_buf_tag);
sys/dev/al_eth/al_eth.c
2407
free(tx_ring->tx_buffer_info, M_IFAL);
sys/dev/al_eth/al_eth.c
2408
tx_ring->tx_buffer_info = NULL;
sys/dev/al_eth/al_eth.c
2410
mtx_destroy(&tx_ring->br_mtx);
sys/dev/al_eth/al_eth.c
2434
if (adapter->tx_ring[i].q_params.desc_base)
sys/dev/al_eth/al_eth.c
3049
q_params = &adapter->tx_ring[qid].q_params;
sys/dev/al_eth/al_eth.c
985
struct al_eth_ring *ring = &adapter->tx_ring[i];
sys/dev/al_eth/al_eth.h
150
#define AL_ETH_TX_RING_IDX_NEXT(tx_ring, idx) (((idx) + 1) & (AL_ETH_DEFAULT_TX_SW_DESCS - 1))
sys/dev/al_eth/al_eth.h
274
struct al_eth_ring tx_ring[AL_ETH_NUM_QUEUES];
sys/dev/axgbe/if_axgbe_pci.c
1687
struct xgbe_ring *tx_ring;
sys/dev/axgbe/if_axgbe_pci.c
1701
tx_ring = (struct xgbe_ring*)malloc(ntxqs *
sys/dev/axgbe/if_axgbe_pci.c
1704
if (tx_ring == NULL) {
sys/dev/axgbe/if_axgbe_pci.c
1709
channel->tx_ring = tx_ring;
sys/dev/axgbe/if_axgbe_pci.c
1711
for (j = 0; j < ntxqs; j++, tx_ring++) {
sys/dev/axgbe/if_axgbe_pci.c
1712
tx_ring->rdata =
sys/dev/axgbe/if_axgbe_pci.c
1717
tx_ring->rdesc = (struct xgbe_ring_desc *)va[i*ntxqs + j];
sys/dev/axgbe/if_axgbe_pci.c
1718
tx_ring->rdesc_paddr = pa[i*ntxqs + j];
sys/dev/axgbe/if_axgbe_pci.c
1719
tx_ring->rdesc_count = scctx->isc_ntxd[j];
sys/dev/axgbe/if_axgbe_pci.c
1720
spin_lock_init(&tx_ring->lock);
sys/dev/axgbe/if_axgbe_pci.c
1734
tx_ring = channel->tx_ring;
sys/dev/axgbe/if_axgbe_pci.c
1735
for (k = 0; k < ntxqs ; k++, tx_ring++) {
sys/dev/axgbe/if_axgbe_pci.c
1736
if (tx_ring && tx_ring->rdata)
sys/dev/axgbe/if_axgbe_pci.c
1737
free(tx_ring->rdata, M_AXGBE);
sys/dev/axgbe/if_axgbe_pci.c
1739
free(channel->tx_ring, M_AXGBE);
sys/dev/axgbe/if_axgbe_pci.c
1741
channel->tx_ring = NULL;
sys/dev/axgbe/if_axgbe_pci.c
1829
struct xgbe_ring *tx_ring;
sys/dev/axgbe/if_axgbe_pci.c
1837
tx_ring = channel->tx_ring;
sys/dev/axgbe/if_axgbe_pci.c
1838
for (j = 0; j < sctx->isc_ntxqs ; j++, tx_ring++) {
sys/dev/axgbe/if_axgbe_pci.c
1839
if (tx_ring && tx_ring->rdata)
sys/dev/axgbe/if_axgbe_pci.c
1840
free(tx_ring->rdata, M_AXGBE);
sys/dev/axgbe/if_axgbe_pci.c
1842
free(channel->tx_ring, M_AXGBE);
sys/dev/axgbe/if_axgbe_pci.c
1843
channel->tx_ring = NULL;
sys/dev/axgbe/if_axgbe_pci.c
2061
if (channel->tx_ring && channel->rx_ring)
sys/dev/axgbe/if_axgbe_pci.c
2063
else if (channel->tx_ring)
sys/dev/axgbe/if_axgbe_pci.c
2081
if (channel->tx_ring && channel->rx_ring)
sys/dev/axgbe/if_axgbe_pci.c
2083
else if (channel->tx_ring)
sys/dev/axgbe/if_axgbe_pci.c
808
channel->tx_ring = NULL;
sys/dev/axgbe/xgbe-desc.c
135
ring = channel->tx_ring;
sys/dev/axgbe/xgbe-dev.c
1302
struct xgbe_ring *ring = channel->tx_ring;
sys/dev/axgbe/xgbe-dev.c
181
if (pdata->channel[i]->tx_ring)
sys/dev/axgbe/xgbe-dev.c
199
if (!pdata->channel[i]->tx_ring)
sys/dev/axgbe/xgbe-dev.c
2538
if (!pdata->channel[i]->tx_ring)
sys/dev/axgbe/xgbe-dev.c
2571
if (!pdata->channel[i]->tx_ring)
sys/dev/axgbe/xgbe-dev.c
2663
if (!pdata->channel[i]->tx_ring)
sys/dev/axgbe/xgbe-dev.c
2687
if (!pdata->channel[i]->tx_ring)
sys/dev/axgbe/xgbe-dev.c
297
if (!pdata->channel[i]->tx_ring)
sys/dev/axgbe/xgbe-dev.c
641
if (channel->tx_ring) {
sys/dev/axgbe/xgbe-txrx.c
186
ring = channel->tx_ring;
sys/dev/axgbe/xgbe-txrx.c
374
struct xgbe_ring *ring = channel->tx_ring;
sys/dev/axgbe/xgbe-txrx.c
392
struct xgbe_ring *ring = channel->tx_ring;
sys/dev/axgbe/xgbe.h
540
struct xgbe_ring *tx_ring;
sys/dev/bnxt/bnxt_en/bnxt_txrx.c
213
struct bnxt_ring *tx_ring = &softc->tx_rings[txqid];
sys/dev/bnxt/bnxt_en/bnxt_txrx.c
216
softc->db_ops.bnxt_db_tx(tx_ring, pidx);
sys/dev/e1000/em_txrx.c
140
struct tx_ring *txr = &que->txr;
sys/dev/e1000/em_txrx.c
247
struct tx_ring *txr = &que->txr;
sys/dev/e1000/em_txrx.c
347
struct tx_ring *txr = &que->txr;
sys/dev/e1000/em_txrx.c
476
struct tx_ring *txr = &que->txr;
sys/dev/e1000/em_txrx.c
487
struct tx_ring *txr = &que->txr;
sys/dev/e1000/em_txrx.c
94
struct tx_ring *txr;
sys/dev/e1000/if_em.c
1569
struct tx_ring *txr = &tx_que->txr;
sys/dev/e1000/if_em.c
1653
struct tx_ring *txr, struct rx_ring *rxr)
sys/dev/e1000/if_em.c
1814
struct tx_ring *txr = &sc->tx_queues[0].txr;
sys/dev/e1000/if_em.c
1914
struct tx_ring *txr = &sc->tx_queues[que->msix].txr;
sys/dev/e1000/if_em.c
2934
struct tx_ring *txr = &sc->tx_queues->txr;
sys/dev/e1000/if_em.c
3515
struct tx_ring *txr = &que->txr;
sys/dev/e1000/if_em.c
3598
struct tx_ring *txr = &tx_que->txr;
sys/dev/e1000/if_em.c
3626
struct tx_ring *txr;
sys/dev/e1000/if_em.c
3641
offp = (caddr_t)txr + offsetof(struct tx_ring, csum_flags);
sys/dev/e1000/if_em.c
465
struct tx_ring *, struct rx_ring *);
sys/dev/e1000/if_em.c
5002
struct tx_ring *txr = &tx_que->txr;
sys/dev/e1000/if_em.c
5719
struct tx_ring *txr = &sc->tx_queues->txr;
sys/dev/e1000/if_em.c
855
struct tx_ring *txr = &tx_que->txr;
sys/dev/e1000/if_em.h
458
struct tx_ring txr;
sys/dev/e1000/igb_txrx.c
149
igb_tx_ctx_setup(struct tx_ring *txr, if_pkt_info_t pi,
sys/dev/e1000/igb_txrx.c
238
struct tx_ring *txr = &que->txr;
sys/dev/e1000/igb_txrx.c
304
struct tx_ring *txr = &que->txr;
sys/dev/e1000/igb_txrx.c
315
struct tx_ring *txr = &que->txr;
sys/dev/e1000/igb_txrx.c
54
static int igb_tx_ctx_setup(struct tx_ring *, if_pkt_info_t, uint32_t *,
sys/dev/e1000/igb_txrx.c
56
static int igb_tso_setup(struct tx_ring *, if_pkt_info_t, uint32_t *,
sys/dev/e1000/igb_txrx.c
83
igb_tso_setup(struct tx_ring *txr, if_pkt_info_t pi, uint32_t *cmd_type_len,
sys/dev/ena/ena.c
1504
struct ena_ring *tx_ring = &adapter->tx_ring[qid];
sys/dev/ena/ena.c
1506
ENA_RING_MTX_LOCK(tx_ring);
sys/dev/ena/ena.c
1507
for (int i = 0; i < tx_ring->ring_size; i++) {
sys/dev/ena/ena.c
1508
struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
sys/dev/ena/ena.c
1531
ENA_RING_MTX_UNLOCK(tx_ring);
sys/dev/ena/ena.c
1611
ring = &adapter->tx_ring[i];
sys/dev/ena/ena.c
2101
struct ena_ring *tx_ring;
sys/dev/ena/ena.c
2110
tx_ring = &adapter->tx_ring[i];
sys/dev/ena/ena.c
2111
counter_u64_add(tx_ring->tx_stats.unmask_interrupt_num, 1);
sys/dev/ena/ena.c
2147
adapter->tx_ring[i].ring_size = new_tx_size;
sys/dev/ena/ena.c
2206
cur_tx_ring_size = adapter->tx_ring[0].ring_size;
sys/dev/ena/ena.c
3173
struct ena_ring *tx_ring)
sys/dev/ena/ena.c
3179
rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id);
sys/dev/ena/ena.c
3184
tx_ring->qid);
sys/dev/ena/ena.c
3191
tx_ring->qid);
sys/dev/ena/ena.c
3198
struct ena_ring *tx_ring)
sys/dev/ena/ena.c
3213
for (i = 0; i < tx_ring->ring_size; i++) {
sys/dev/ena/ena.c
3214
tx_buf = &tx_ring->tx_buffer_info[i];
sys/dev/ena/ena.c
3223
if (unlikely(!atomic_load_8(&tx_ring->first_interrupt) &&
sys/dev/ena/ena.c
3232
tx_ring->qid);
sys/dev/ena/ena.c
3243
tx_ring->tx_last_cleanup_ticks);
sys/dev/ena/ena.c
3249
tx_ring->qid, i, time_since_last_cleanup,
sys/dev/ena/ena.c
3272
cleanup_scheduled = !!(atomic_load_16(&tx_ring->que->cleanup_task.ta_pending));
sys/dev/ena/ena.c
3273
cleanup_running = !!(atomic_load_8((&tx_ring->cleanup_running)));
sys/dev/ena/ena.c
3275
reset_reason = check_cdesc_in_tx_cq(adapter, tx_ring);
sys/dev/ena/ena.c
3281
counter_u64_add(tx_ring->tx_stats.missing_tx_comp, new_missed_tx);
sys/dev/ena/ena.c
3295
struct ena_ring *tx_ring;
sys/dev/ena/ena.c
3314
tx_ring = &adapter->tx_ring[i];
sys/dev/ena/ena.c
3317
rc = check_missing_comp_in_tx_queue(adapter, tx_ring);
sys/dev/ena/ena.c
399
txr = &adapter->tx_ring[i];
sys/dev/ena/ena.c
413
que->tx_ring = txr;
sys/dev/ena/ena.c
431
txr = &adapter->tx_ring[i];
sys/dev/ena/ena.c
474
struct ena_ring *txr = &adapter->tx_ring[qid];
sys/dev/ena/ena.c
569
validate_tx_req_id(struct ena_ring *tx_ring, uint16_t req_id, int tx_req_id_rc)
sys/dev/ena/ena.c
571
struct ena_adapter *adapter = tx_ring->adapter;
sys/dev/ena/ena.c
579
req_id, tx_ring->qid);
sys/dev/ena/ena.c
583
req_id, tx_ring->qid);
sys/dev/ena/ena.c
584
counter_u64_add(tx_ring->tx_stats.bad_req_id, 1);
sys/dev/ena/ena.c
595
ena_release_all_tx_dmamap(struct ena_ring *tx_ring)
sys/dev/ena/ena.c
597
struct ena_adapter *adapter = tx_ring->adapter;
sys/dev/ena/ena.c
606
for (i = 0; i < tx_ring->ring_size; ++i) {
sys/dev/ena/ena.c
607
tx_info = &tx_ring->tx_buffer_info[i];
sys/dev/ena/ena.c
640
struct ena_ring *tx_ring = que->tx_ring;
sys/dev/ena/ena.c
650
size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size;
sys/dev/ena/ena.c
652
tx_ring->tx_buffer_info = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
sys/dev/ena/ena.c
653
if (unlikely(tx_ring->tx_buffer_info == NULL))
sys/dev/ena/ena.c
656
size = sizeof(uint16_t) * tx_ring->ring_size;
sys/dev/ena/ena.c
657
tx_ring->free_tx_ids = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
sys/dev/ena/ena.c
658
if (unlikely(tx_ring->free_tx_ids == NULL))
sys/dev/ena/ena.c
661
size = tx_ring->tx_max_header_size;
sys/dev/ena/ena.c
662
tx_ring->push_buf_intermediate_buf = malloc(size, M_DEVBUF,
sys/dev/ena/ena.c
664
if (unlikely(tx_ring->push_buf_intermediate_buf == NULL))
sys/dev/ena/ena.c
668
for (i = 0; i < tx_ring->ring_size; i++)
sys/dev/ena/ena.c
669
tx_ring->free_tx_ids[i] = i;
sys/dev/ena/ena.c
672
ena_reset_counters((counter_u64_t *)&tx_ring->tx_stats,
sys/dev/ena/ena.c
673
sizeof(tx_ring->tx_stats));
sys/dev/ena/ena.c
675
tx_ring->next_to_use = 0;
sys/dev/ena/ena.c
676
tx_ring->next_to_clean = 0;
sys/dev/ena/ena.c
677
tx_ring->acum_pkts = 0;
sys/dev/ena/ena.c
680
ENA_RING_MTX_LOCK(tx_ring);
sys/dev/ena/ena.c
681
drbr_flush(adapter->ifp, tx_ring->br);
sys/dev/ena/ena.c
682
ENA_RING_MTX_UNLOCK(tx_ring);
sys/dev/ena/ena.c
685
for (i = 0; i < tx_ring->ring_size; i++) {
sys/dev/ena/ena.c
687
&tx_ring->tx_buffer_info[i].dmamap);
sys/dev/ena/ena.c
696
map = tx_ring->tx_buffer_info[i].nm_info.map_seg;
sys/dev/ena/ena.c
712
TASK_INIT(&tx_ring->enqueue_task, 0, ena_deferred_mq_start, tx_ring);
sys/dev/ena/ena.c
713
tx_ring->enqueue_tq = taskqueue_create_fast("ena_tx_enque", M_NOWAIT,
sys/dev/ena/ena.c
714
taskqueue_thread_enqueue, &tx_ring->enqueue_tq);
sys/dev/ena/ena.c
715
if (unlikely(tx_ring->enqueue_tq == NULL)) {
sys/dev/ena/ena.c
718
i = tx_ring->ring_size;
sys/dev/ena/ena.c
722
tx_ring->running = true;
sys/dev/ena/ena.c
732
taskqueue_start_threads_cpuset(&tx_ring->enqueue_tq, 1, PI_NET,
sys/dev/ena/ena.c
738
ena_release_all_tx_dmamap(tx_ring);
sys/dev/ena/ena.c
740
free(tx_ring->free_tx_ids, M_DEVBUF);
sys/dev/ena/ena.c
741
tx_ring->free_tx_ids = NULL;
sys/dev/ena/ena.c
743
free(tx_ring->tx_buffer_info, M_DEVBUF);
sys/dev/ena/ena.c
744
tx_ring->tx_buffer_info = NULL;
sys/dev/ena/ena.c
759
struct ena_ring *tx_ring = &adapter->tx_ring[qid];
sys/dev/ena/ena.c
765
while (taskqueue_cancel(tx_ring->enqueue_tq, &tx_ring->enqueue_task, NULL))
sys/dev/ena/ena.c
766
taskqueue_drain(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
sys/dev/ena/ena.c
768
taskqueue_free(tx_ring->enqueue_tq);
sys/dev/ena/ena.c
770
ENA_RING_MTX_LOCK(tx_ring);
sys/dev/ena/ena.c
772
drbr_flush(adapter->ifp, tx_ring->br);
sys/dev/ena/ena.c
775
for (int i = 0; i < tx_ring->ring_size; i++) {
sys/dev/ena/ena.c
777
tx_ring->tx_buffer_info[i].dmamap, BUS_DMASYNC_POSTWRITE);
sys/dev/ena/ena.c
779
tx_ring->tx_buffer_info[i].dmamap);
sys/dev/ena/ena.c
781
tx_ring->tx_buffer_info[i].dmamap);
sys/dev/ena/ena.c
785
nm_info = &tx_ring->tx_buffer_info[i].nm_info;
sys/dev/ena/ena.c
801
m_freem(tx_ring->tx_buffer_info[i].mbuf);
sys/dev/ena/ena.c
802
tx_ring->tx_buffer_info[i].mbuf = NULL;
sys/dev/ena/ena.c
804
ENA_RING_MTX_UNLOCK(tx_ring);
sys/dev/ena/ena.c
807
free(tx_ring->tx_buffer_info, M_DEVBUF);
sys/dev/ena/ena.c
808
tx_ring->tx_buffer_info = NULL;
sys/dev/ena/ena.c
810
free(tx_ring->free_tx_ids, M_DEVBUF);
sys/dev/ena/ena.c
811
tx_ring->free_tx_ids = NULL;
sys/dev/ena/ena.c
813
free(tx_ring->push_buf_intermediate_buf, M_DEVBUF);
sys/dev/ena/ena.c
814
tx_ring->push_buf_intermediate_buf = NULL;
sys/dev/ena/ena.h
222
struct ena_ring *tx_ring;
sys/dev/ena/ena.h
486
struct ena_ring tx_ring[ENA_MAX_NUM_IO_QUEUES]
sys/dev/ena/ena.h
588
int validate_tx_req_id(struct ena_ring *tx_ring, uint16_t req_id, int tx_req_id_rc);
sys/dev/ena/ena.h
628
ena_ring_tx_doorbell(struct ena_ring *tx_ring)
sys/dev/ena/ena.h
630
ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
sys/dev/ena/ena.h
631
counter_u64_add(tx_ring->tx_stats.doorbells, 1);
sys/dev/ena/ena.h
632
tx_ring->acum_pkts = 0;
sys/dev/ena/ena_datapath.c
1003
rc = ena_tx_map_mbuf(tx_ring, tx_info, *mbuf, &push_hdr, &header_len);
sys/dev/ena/ena_datapath.c
101
txc = ena_tx_cleanup(tx_ring);
sys/dev/ena/ena_datapath.c
1018
if (tx_ring->acum_pkts == ENA_DB_THRESHOLD ||
sys/dev/ena/ena_datapath.c
1019
ena_com_is_doorbell_needed(tx_ring->ena_com_io_sq, &ena_tx_ctx)) {
sys/dev/ena/ena_datapath.c
1022
tx_ring->que->id);
sys/dev/ena/ena_datapath.c
1023
ena_ring_tx_doorbell(tx_ring);
sys/dev/ena/ena_datapath.c
1031
tx_ring->que->id);
sys/dev/ena/ena_datapath.c
1037
counter_u64_add(tx_ring->tx_stats.prepare_ctx_err, 1);
sys/dev/ena/ena_datapath.c
1042
counter_u64_add_protected(tx_ring->tx_stats.cnt, 1);
sys/dev/ena/ena_datapath.c
1043
counter_u64_add_protected(tx_ring->tx_stats.bytes,
sys/dev/ena/ena_datapath.c
1055
tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
sys/dev/ena/ena_datapath.c
1056
tx_ring->ring_size);
sys/dev/ena/ena_datapath.c
1062
if (unlikely(!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
sys/dev/ena/ena_datapath.c
1064
ena_log_io(pdev, DBG, "Stop queue %d\n", tx_ring->que->id);
sys/dev/ena/ena_datapath.c
1066
tx_ring->running = false;
sys/dev/ena/ena_datapath.c
1067
counter_u64_add(tx_ring->tx_stats.queue_stop, 1);
sys/dev/ena/ena_datapath.c
1078
if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
sys/dev/ena/ena_datapath.c
1080
tx_ring->running = true;
sys/dev/ena/ena_datapath.c
1081
counter_u64_add(tx_ring->tx_stats.queue_wakeup, 1);
sys/dev/ena/ena_datapath.c
1098
ena_start_xmit(struct ena_ring *tx_ring)
sys/dev/ena/ena_datapath.c
1101
struct ena_adapter *adapter = tx_ring->adapter;
sys/dev/ena/ena_datapath.c
1104
ENA_RING_MTX_ASSERT(tx_ring);
sys/dev/ena/ena_datapath.c
1112
while ((mbuf = drbr_peek(adapter->ifp, tx_ring->br)) != NULL) {
sys/dev/ena/ena_datapath.c
1117
if (unlikely(!tx_ring->running)) {
sys/dev/ena/ena_datapath.c
1118
drbr_putback(adapter->ifp, tx_ring->br, mbuf);
sys/dev/ena/ena_datapath.c
1122
if (unlikely((ret = ena_xmit_mbuf(tx_ring, &mbuf)) != 0)) {
sys/dev/ena/ena_datapath.c
1124
drbr_putback(adapter->ifp, tx_ring->br, mbuf);
sys/dev/ena/ena_datapath.c
1126
drbr_putback(adapter->ifp, tx_ring->br, mbuf);
sys/dev/ena/ena_datapath.c
1129
drbr_advance(adapter->ifp, tx_ring->br);
sys/dev/ena/ena_datapath.c
1135
drbr_advance(adapter->ifp, tx_ring->br);
sys/dev/ena/ena_datapath.c
114
counter_u64_add(tx_ring->tx_stats.unmask_interrupt_num, 1);
sys/dev/ena/ena_datapath.c
1140
tx_ring->acum_pkts++;
sys/dev/ena/ena_datapath.c
1145
if (likely(tx_ring->acum_pkts != 0)) {
sys/dev/ena/ena_datapath.c
1147
ena_ring_tx_doorbell(tx_ring);
sys/dev/ena/ena_datapath.c
1150
if (unlikely(!tx_ring->running))
sys/dev/ena/ena_datapath.c
1151
taskqueue_enqueue(tx_ring->que->cleanup_tq,
sys/dev/ena/ena_datapath.c
1152
&tx_ring->que->cleanup_task);
sys/dev/ena/ena_datapath.c
116
atomic_store_8(&tx_ring->cleanup_running, 0);
sys/dev/ena/ena_datapath.c
122
struct ena_ring *tx_ring = (struct ena_ring *)arg;
sys/dev/ena/ena_datapath.c
123
if_t ifp = tx_ring->adapter->ifp;
sys/dev/ena/ena_datapath.c
125
while (!drbr_empty(ifp, tx_ring->br) && tx_ring->running &&
sys/dev/ena/ena_datapath.c
127
ENA_RING_MTX_LOCK(tx_ring);
sys/dev/ena/ena_datapath.c
128
ena_start_xmit(tx_ring);
sys/dev/ena/ena_datapath.c
129
ENA_RING_MTX_UNLOCK(tx_ring);
sys/dev/ena/ena_datapath.c
137
struct ena_ring *tx_ring;
sys/dev/ena/ena_datapath.c
164
tx_ring = &adapter->tx_ring[i];
sys/dev/ena/ena_datapath.c
167
is_drbr_empty = drbr_empty(ifp, tx_ring->br);
sys/dev/ena/ena_datapath.c
168
ret = drbr_enqueue(ifp, tx_ring->br, m);
sys/dev/ena/ena_datapath.c
170
taskqueue_enqueue(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
sys/dev/ena/ena_datapath.c
174
if (is_drbr_empty && (ENA_RING_MTX_TRYLOCK(tx_ring) != 0)) {
sys/dev/ena/ena_datapath.c
175
ena_start_xmit(tx_ring);
sys/dev/ena/ena_datapath.c
176
ENA_RING_MTX_UNLOCK(tx_ring);
sys/dev/ena/ena_datapath.c
178
taskqueue_enqueue(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
sys/dev/ena/ena_datapath.c
188
struct ena_ring *tx_ring = adapter->tx_ring;
sys/dev/ena/ena_datapath.c
191
for (i = 0; i < adapter->num_io_queues; ++i, ++tx_ring)
sys/dev/ena/ena_datapath.c
192
if (!drbr_empty(ifp, tx_ring->br)) {
sys/dev/ena/ena_datapath.c
193
ENA_RING_MTX_LOCK(tx_ring);
sys/dev/ena/ena_datapath.c
194
drbr_flush(ifp, tx_ring->br);
sys/dev/ena/ena_datapath.c
195
ENA_RING_MTX_UNLOCK(tx_ring);
sys/dev/ena/ena_datapath.c
206
ena_get_tx_req_id(struct ena_ring *tx_ring, struct ena_com_io_cq *io_cq,
sys/dev/ena/ena_datapath.c
209
struct ena_adapter *adapter = tx_ring->adapter;
sys/dev/ena/ena_datapath.c
215
rc = validate_tx_req_id(tx_ring, *req_id, rc);
sys/dev/ena/ena_datapath.c
217
if (unlikely(tx_ring->tx_buffer_info[*req_id].mbuf == NULL)) {
sys/dev/ena/ena_datapath.c
220
*req_id, tx_ring->qid);
sys/dev/ena/ena_datapath.c
240
ena_tx_cleanup(struct ena_ring *tx_ring)
sys/dev/ena/ena_datapath.c
254
adapter = tx_ring->que->adapter;
sys/dev/ena/ena_datapath.c
255
ena_qid = ENA_IO_TXQ_IDX(tx_ring->que->id);
sys/dev/ena/ena_datapath.c
257
next_to_clean = tx_ring->next_to_clean;
sys/dev/ena/ena_datapath.c
260
if (netmap_tx_irq(adapter->ifp, tx_ring->qid) != NM_IRQ_PASS)
sys/dev/ena/ena_datapath.c
268
rc = ena_get_tx_req_id(tx_ring, io_cq, &req_id);
sys/dev/ena/ena_datapath.c
272
tx_info = &tx_ring->tx_buffer_info[req_id];
sys/dev/ena/ena_datapath.c
284
tx_ring->qid, mbuf);
sys/dev/ena/ena_datapath.c
290
tx_ring->free_tx_ids[next_to_clean] = req_id;
sys/dev/ena/ena_datapath.c
292
tx_ring->ring_size);
sys/dev/ena/ena_datapath.c
297
tx_ring->next_to_clean = next_to_clean;
sys/dev/ena/ena_datapath.c
308
tx_ring->qid, work_done);
sys/dev/ena/ena_datapath.c
312
tx_ring->next_to_clean = next_to_clean;
sys/dev/ena/ena_datapath.c
323
above_thresh = ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
sys/dev/ena/ena_datapath.c
325
if (unlikely(!tx_ring->running && above_thresh)) {
sys/dev/ena/ena_datapath.c
326
ENA_RING_MTX_LOCK(tx_ring);
sys/dev/ena/ena_datapath.c
328
tx_ring->ena_com_io_sq, ENA_TX_RESUME_THRESH);
sys/dev/ena/ena_datapath.c
329
if (!tx_ring->running && above_thresh) {
sys/dev/ena/ena_datapath.c
330
tx_ring->running = true;
sys/dev/ena/ena_datapath.c
331
counter_u64_add(tx_ring->tx_stats.queue_wakeup, 1);
sys/dev/ena/ena_datapath.c
332
taskqueue_enqueue(tx_ring->enqueue_tq,
sys/dev/ena/ena_datapath.c
333
&tx_ring->enqueue_task);
sys/dev/ena/ena_datapath.c
335
ENA_RING_MTX_UNLOCK(tx_ring);
sys/dev/ena/ena_datapath.c
338
tx_ring->tx_last_cleanup_ticks = ticks;
sys/dev/ena/ena_datapath.c
47
static inline int ena_get_tx_req_id(struct ena_ring *tx_ring,
sys/dev/ena/ena_datapath.c
56
static int ena_check_and_collapse_mbuf(struct ena_ring *tx_ring,
sys/dev/ena/ena_datapath.c
71
struct ena_ring *tx_ring;
sys/dev/ena/ena_datapath.c
78
tx_ring = que->tx_ring;
sys/dev/ena/ena_datapath.c
816
ena_check_and_collapse_mbuf(struct ena_ring *tx_ring, struct mbuf **mbuf)
sys/dev/ena/ena_datapath.c
822
adapter = tx_ring->adapter;
sys/dev/ena/ena_datapath.c
830
((*mbuf)->m_pkthdr.len < tx_ring->tx_max_header_size))
sys/dev/ena/ena_datapath.c
833
counter_u64_add(tx_ring->tx_stats.collapse, 1);
sys/dev/ena/ena_datapath.c
838
counter_u64_add(tx_ring->tx_stats.collapse_err, 1);
sys/dev/ena/ena_datapath.c
84
atomic_store_8(&tx_ring->cleanup_running, 1);
sys/dev/ena/ena_datapath.c
849
ena_tx_map_mbuf(struct ena_ring *tx_ring, struct ena_tx_buffer *tx_info,
sys/dev/ena/ena_datapath.c
852
struct ena_adapter *adapter = tx_ring->adapter;
sys/dev/ena/ena_datapath.c
876
if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
sys/dev/ena/ena_datapath.c
888
tx_ring->tx_max_header_size);
sys/dev/ena/ena_datapath.c
899
tx_ring->push_buf_intermediate_buf);
sys/dev/ena/ena_datapath.c
900
*push_hdr = tx_ring->push_buf_intermediate_buf;
sys/dev/ena/ena_datapath.c
902
counter_u64_add(tx_ring->tx_stats.llq_buffer_copy, 1);
sys/dev/ena/ena_datapath.c
910
if (mbuf->m_pkthdr.len <= tx_ring->tx_max_header_size) {
sys/dev/ena/ena_datapath.c
913
offset = tx_ring->tx_max_header_size;
sys/dev/ena/ena_datapath.c
958
counter_u64_add(tx_ring->tx_stats.dma_mapping_err, 1);
sys/dev/ena/ena_datapath.c
96
atomic_store_8(&tx_ring->first_interrupt, 1);
sys/dev/ena/ena_datapath.c
964
ena_xmit_mbuf(struct ena_ring *tx_ring, struct mbuf **mbuf)
sys/dev/ena/ena_datapath.c
980
ena_qid = ENA_IO_TXQ_IDX(tx_ring->que->id);
sys/dev/ena/ena_datapath.c
981
adapter = tx_ring->que->adapter;
sys/dev/ena/ena_datapath.c
986
rc = ena_check_and_collapse_mbuf(tx_ring, mbuf);
sys/dev/ena/ena_datapath.c
995
next_to_use = tx_ring->next_to_use;
sys/dev/ena/ena_datapath.c
996
req_id = tx_ring->free_tx_ids[next_to_use];
sys/dev/ena/ena_datapath.c
997
tx_info = &tx_ring->tx_buffer_info[req_id];
sys/dev/ena/ena_netmap.c
329
ctx.ring = &ctx.adapter->tx_ring[kring->ring_id];
sys/dev/ena/ena_netmap.c
349
struct ena_ring *tx_ring = ctx->ring;
sys/dev/ena/ena_netmap.c
371
tx_ring->acum_pkts++;
sys/dev/ena/ena_netmap.c
377
ena_ring_tx_doorbell(tx_ring);
sys/dev/ena/ena_netmap.c
391
struct ena_ring *tx_ring;
sys/dev/ena/ena_netmap.c
406
tx_ring = ctx->ring;
sys/dev/ena/ena_netmap.c
408
req_id = tx_ring->free_tx_ids[ctx->nt];
sys/dev/ena/ena_netmap.c
409
tx_info = &tx_ring->tx_buffer_info[req_id];
sys/dev/ena/ena_netmap.c
430
if (tx_ring->acum_pkts == ENA_DB_THRESHOLD ||
sys/dev/ena/ena_netmap.c
432
ena_ring_tx_doorbell(tx_ring);
sys/dev/ena/ena_netmap.c
438
"Tx ring[%d] is out of space\n", tx_ring->que->id);
sys/dev/ena/ena_netmap.c
445
counter_u64_add(tx_ring->tx_stats.prepare_ctx_err, 1);
sys/dev/ena/ena_netmap.c
452
counter_u64_add_protected(tx_ring->tx_stats.cnt, 1);
sys/dev/ena/ena_netmap.c
453
counter_u64_add_protected(tx_ring->tx_stats.bytes, packet_len);
sys/dev/ena/ena_netmap.c
556
struct ena_ring *tx_ring;
sys/dev/ena/ena_netmap.c
570
tx_ring = ctx->ring;
sys/dev/ena/ena_netmap.c
583
if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
sys/dev/ena/ena_netmap.c
595
tx_ring->tx_max_header_size);
sys/dev/ena/ena_netmap.c
612
tx_ring->push_buf_intermediate_buf);
sys/dev/ena/ena_netmap.c
619
*push_hdr = tx_ring->push_buf_intermediate_buf;
sys/dev/ena/ena_netmap.c
620
counter_u64_add(tx_ring->tx_stats.llq_buffer_copy, 1);
sys/dev/ena/ena_netmap.c
799
struct ena_ring *tx_ring = ctx->ring;
sys/dev/ena/ena_netmap.c
805
ctx->nt = tx_ring->next_to_clean;
sys/dev/ena/ena_netmap.c
813
rc = validate_tx_req_id(tx_ring, req_id, rc);
sys/dev/ena/ena_netmap.c
824
tx_ring->next_to_clean = ctx->nt;
sys/dev/ena/ena_netmap.c
825
ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs);
sys/dev/ena/ena_sysctl.c
239
struct ena_ring *tx_ring;
sys/dev/ena/ena_sysctl.c
267
tx_ring = adapter->tx_ring;
sys/dev/ena/ena_sysctl.c
309
for (i = 0; i < adapter->num_io_queues; ++i, ++tx_ring, ++rx_ring) {
sys/dev/ena/ena_sysctl.c
331
tx_stats = &tx_ring->tx_stats;
sys/dev/et/if_et.c
1036
struct et_txdesc_ring *tx_ring;
sys/dev/et/if_et.c
1110
tx_ring = &sc->sc_tx_ring;
sys/dev/et/if_et.c
1111
et_dma_ring_free(sc, &tx_ring->tr_dtag, (void *)&tx_ring->tr_desc,
sys/dev/et/if_et.c
1112
tx_ring->tr_dmap, &tx_ring->tr_paddr);
sys/dev/et/if_et.c
1369
struct et_txdesc_ring *tx_ring;
sys/dev/et/if_et.c
1420
tx_ring = &sc->sc_tx_ring;
sys/dev/et/if_et.c
1421
bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
sys/dev/et/if_et.c
1423
tx_ready_pos = tx_ring->tr_ready_index &
sys/dev/et/if_et.c
1425
if (tx_ring->tr_ready_wrap)
sys/dev/et/if_et.c
1669
struct et_txdesc_ring *tx_ring;
sys/dev/et/if_et.c
1673
tx_ring = &sc->sc_tx_ring;
sys/dev/et/if_et.c
1674
bzero(tx_ring->tr_desc, ET_TX_RING_SIZE);
sys/dev/et/if_et.c
1675
bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
sys/dev/et/if_et.c
1797
struct et_txdesc_ring *tx_ring;
sys/dev/et/if_et.c
1810
tx_ring = &sc->sc_tx_ring;
sys/dev/et/if_et.c
1811
CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr));
sys/dev/et/if_et.c
1812
CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr));
sys/dev/et/if_et.c
1825
tx_ring->tr_ready_index = 0;
sys/dev/et/if_et.c
1826
tx_ring->tr_ready_wrap = 0;
sys/dev/et/if_et.c
2141
struct et_txdesc_ring *tx_ring;
sys/dev/et/if_et.c
2150
tx_ring = &sc->sc_tx_ring;
sys/dev/et/if_et.c
2151
MPASS(tx_ring->tr_ready_index < ET_TX_NDESC);
sys/dev/et/if_et.c
2153
first_idx = tx_ring->tr_ready_index;
sys/dev/et/if_et.c
2203
td = &tx_ring->tr_desc[idx];
sys/dev/et/if_et.c
2214
MPASS(tx_ring->tr_ready_index < ET_TX_NDESC);
sys/dev/et/if_et.c
2215
if (++tx_ring->tr_ready_index == ET_TX_NDESC) {
sys/dev/et/if_et.c
2216
tx_ring->tr_ready_index = 0;
sys/dev/et/if_et.c
2217
tx_ring->tr_ready_wrap ^= 1;
sys/dev/et/if_et.c
2220
td = &tx_ring->tr_desc[first_idx];
sys/dev/et/if_et.c
2238
struct et_txdesc_ring *tx_ring;
sys/dev/et/if_et.c
2248
tx_ring = &sc->sc_tx_ring;
sys/dev/et/if_et.c
2257
bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
sys/dev/et/if_et.c
857
struct et_txdesc_ring *tx_ring;
sys/dev/et/if_et.c
876
tx_ring = &sc->sc_tx_ring;
sys/dev/et/if_et.c
878
&tx_ring->tr_dtag, (uint8_t **)&tx_ring->tr_desc, &tx_ring->tr_dmap,
sys/dev/et/if_et.c
879
&tx_ring->tr_paddr, "TX ring");
sys/dev/iavf/iavf_iflib.h
201
struct tx_ring txr;
sys/dev/iavf/iavf_lib.c
764
struct tx_ring *txr = &que->txr;
sys/dev/iavf/iavf_txrx_iflib.c
105
iavf_is_tx_desc_done(struct tx_ring *txr, int idx)
sys/dev/iavf/iavf_txrx_iflib.c
259
iavf_tso_setup(struct tx_ring *txr, if_pkt_info_t pi)
sys/dev/iavf/iavf_txrx_iflib.c
325
struct tx_ring *txr = &que->txr;
sys/dev/iavf/iavf_txrx_iflib.c
413
struct tx_ring *txr = &vsi->tx_queues[txqid].txr;
sys/dev/iavf/iavf_txrx_iflib.c
430
struct tx_ring *txr = &que->txr;
sys/dev/iavf/iavf_txrx_iflib.c
452
struct tx_ring *txr = &que->txr;
sys/dev/iavf/iavf_txrx_iflib.c
479
struct tx_ring *txr = &que->txr;
sys/dev/iavf/iavf_txrx_iflib.c
515
struct tx_ring *txr = &tx_que->txr;
sys/dev/iavf/iavf_vc_iflib.c
60
struct tx_ring *txr;
sys/dev/iavf/if_iavf_iflib.c
1094
struct tx_ring *txr = &que->txr;
sys/dev/iavf/if_iavf_iflib.c
1845
struct tx_ring *txr = &tx_que->txr;
sys/dev/iavf/if_iavf_iflib.c
1876
struct tx_ring *txr = &tx_que->txr;
sys/dev/iavf/if_iavf_iflib.c
1939
struct tx_ring *txr;
sys/dev/iavf/if_iavf_iflib.c
985
struct tx_ring *txr = &que->txr;
sys/dev/igc/if_igc.c
1050
struct tx_ring *txr = &sc->tx_queues[0].txr;
sys/dev/igc/if_igc.c
1124
struct tx_ring *txr = &sc->tx_queues[que->msix].txr;
sys/dev/igc/if_igc.c
146
struct tx_ring *, struct rx_ring *);
sys/dev/igc/if_igc.c
2024
struct tx_ring *txr = &que->txr;
sys/dev/igc/if_igc.c
2105
struct tx_ring *txr = &tx_que->txr;
sys/dev/igc/if_igc.c
2137
struct tx_ring *txr;
sys/dev/igc/if_igc.c
2718
struct tx_ring *txr = &tx_que->txr;
sys/dev/igc/if_igc.c
3279
struct tx_ring *txr = &sc->tx_queues->txr;
sys/dev/igc/if_igc.c
397
struct tx_ring *txr = &tx_que->txr;
sys/dev/igc/if_igc.c
850
struct tx_ring *txr = &tx_que->txr;
sys/dev/igc/if_igc.c
908
struct tx_ring *txr, struct rx_ring *rxr)
sys/dev/igc/if_igc.h
282
struct tx_ring txr;
sys/dev/igc/igc_txrx.c
124
igc_tso_setup(struct tx_ring *txr, if_pkt_info_t pi, uint32_t *cmd_type_len,
sys/dev/igc/igc_txrx.c
186
igc_tx_ctx_setup(struct tx_ring *txr, if_pkt_info_t pi,
sys/dev/igc/igc_txrx.c
270
struct tx_ring *txr = &que->txr;
sys/dev/igc/igc_txrx.c
332
struct tx_ring *txr = &que->txr;
sys/dev/igc/igc_txrx.c
343
struct tx_ring *txr = &que->txr;
sys/dev/igc/igc_txrx.c
56
static int igc_tx_ctx_setup(struct tx_ring *, if_pkt_info_t, uint32_t *,
sys/dev/igc/igc_txrx.c
58
static int igc_tso_setup(struct tx_ring *, if_pkt_info_t, uint32_t *,
sys/dev/igc/igc_txrx.c
83
struct tx_ring *txr;
sys/dev/ixgbe/if_fdir.c
155
ixgbe_atr(struct tx_ring *txr, struct mbuf *mp)
sys/dev/ixgbe/if_fdir.c
80
ixgbe_atr(struct tx_ring *txr, struct mbuf *mp)
sys/dev/ixgbe/if_ix.c
2038
struct tx_ring *txr = &tx_que->txr;
sys/dev/ixgbe/if_ix.c
2202
struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
sys/dev/ixgbe/if_ix.c
2229
struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
sys/dev/ixgbe/if_ix.c
3863
struct tx_ring *txr = &tx_que->txr;
sys/dev/ixgbe/if_ix.c
4061
struct tx_ring *txr = &tx_que->txr;
sys/dev/ixgbe/if_ix.c
504
struct tx_ring *txr = &que->txr;
sys/dev/ixgbe/if_ix.c
605
struct tx_ring *txr = &tx_que->txr;
sys/dev/ixgbe/if_ix.c
865
struct tx_ring *txr = &que->txr;
sys/dev/ixgbe/if_ixv.c
1250
struct tx_ring *txr = &que->txr;
sys/dev/ixgbe/if_ixv.c
1856
struct tx_ring *txr = &tx_que->txr;
sys/dev/ixgbe/if_ixv.c
269
struct tx_ring *txr = &que->txr;
sys/dev/ixgbe/if_ixv.c
367
struct tx_ring *txr = &que->txr;
sys/dev/ixgbe/ix_txrx.c
178
struct tx_ring *txr = &que->txr;
sys/dev/ixgbe/ix_txrx.c
258
struct tx_ring *txr = &que->txr;
sys/dev/ixgbe/ix_txrx.c
272
struct tx_ring *txr = &que->txr;
sys/dev/ixgbe/ixgbe.h
343
struct tx_ring txr;
sys/dev/ixgbe/ixgbe_fdir.h
55
void ixgbe_atr(struct tx_ring *, struct mbuf *);
sys/dev/ixl/if_ixl.c
1240
struct tx_ring *txr = &que->txr;
sys/dev/ixl/if_ixl.c
1323
struct tx_ring *txr = &que->txr;
sys/dev/ixl/ixl.h
383
struct tx_ring txr;
sys/dev/ixl/ixl_pf_iflib.c
46
struct tx_ring *txr = &que->txr;
sys/dev/ixl/ixl_pf_iflib.c
517
struct tx_ring *txr = &tx_que->txr;
sys/dev/ixl/ixl_pf_iflib.c
698
struct tx_ring *txr = &que->txr;
sys/dev/ixl/ixl_txrx.c
149
ixl_is_tx_desc_done(struct tx_ring *txr, int idx)
sys/dev/ixl/ixl_txrx.c
280
ixl_tso_setup(struct tx_ring *txr, if_pkt_info_t pi)
sys/dev/ixl/ixl_txrx.c
343
struct tx_ring *txr = &que->txr;
sys/dev/ixl/ixl_txrx.c
417
struct tx_ring *txr = &vsi->tx_queues[txqid].txr;
sys/dev/ixl/ixl_txrx.c
437
struct tx_ring *txr = &que->txr;
sys/dev/ixl/ixl_txrx.c
455
struct tx_ring *txr = &que->txr;
sys/dev/ixl/ixl_txrx.c
467
struct tx_ring *txr = &que->txr;
sys/dev/ixl/ixl_txrx.c
488
struct tx_ring *txr = &tx_que->txr;
sys/dev/ixl/ixl_txrx.c
792
struct tx_ring *txr = &tx_que->txr;
sys/dev/ixl/ixl_txrx.c
816
struct tx_ring *txr = &tx_que->txr;
sys/dev/ixl/ixl_txrx.c
906
struct tx_ring *txr;
sys/dev/mlx4/mlx4_en/en.h
594
struct mlx4_en_tx_ring **tx_ring;
sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
1266
struct mlx4_en_tx_ring *tx_ring;
sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
1360
tx_ring = priv->tx_ring[i];
sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
1362
err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
1374
for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
1375
*((u32 *) (tx_ring->buf + j)) = INIT_OWNER_BIT;
sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
1435
mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[tx_index]);
sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
1538
mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[i]);
sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
1544
mlx4_en_free_tx_buf(dev, priv->tx_ring[i]);
sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
1580
ring = priv->tx_ring[i];
sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
1619
priv->tx_ring[i]->bytes = 0;
sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
1620
priv->tx_ring[i]->packets = 0;
sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
1621
priv->tx_ring[i]->tx_csum = 0;
sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
1622
priv->tx_ring[i]->oversized_packets = 0;
sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
1676
if (priv->tx_ring && priv->tx_ring[i])
sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
1677
mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
1717
if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i],
sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
1741
if (priv->tx_ring[i])
sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
1742
mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
1809
kfree(priv->tx_ring);
sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
2184
priv->tx_ring = kcalloc(MAX_TX_RINGS,
sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
2186
if (!priv->tx_ring) {
sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
2362
tx_size == priv->tx_ring[0]->size)
sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
2731
struct mlx4_en_tx_ring *tx_ring;
sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
2858
tx_ring = priv->tx_ring[i];
sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
2864
CTLFLAG_RD, &tx_ring->packets, 0, "TX packets");
sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
2866
CTLFLAG_RD, &tx_ring->bytes, 0, "TX bytes");
sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
2868
CTLFLAG_RD, &tx_ring->tso_packets, 0, "TSO packets");
sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
2870
CTLFLAG_RD, &tx_ring->defrag_attempts, 0,
sys/dev/mlx4/mlx4_en/mlx4_en_port.c
176
const struct mlx4_en_tx_ring *ring = priv->tx_ring[i];
sys/dev/mlx4/mlx4_en/mlx4_en_port.c
226
ring = priv->tx_ring[i];
sys/dev/mlx4/mlx4_en/mlx4_en_port.c
451
const struct mlx4_en_tx_ring *ring = priv->tx_ring[i];
sys/dev/mlx4/mlx4_en/mlx4_en_port.c
458
priv->port_stats.oversized_packets += priv->tx_ring[i]->oversized_packets;
sys/dev/mlx4/mlx4_en/mlx4_en_tx.c
352
struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring];
sys/dev/mlx4/mlx4_en/mlx4_en_tx.c
427
struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring];
sys/dev/mlx4/mlx4_en/mlx4_en_tx.c
440
struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring];
sys/dev/mlx4/mlx4_en/mlx4_en_tx.c
466
struct mlx4_en_tx_ring *ring = priv->tx_ring[tx_ind];
sys/dev/mlx4/mlx4_en/mlx4_en_tx.c
642
struct mlx4_en_tx_ring *ring = priv->tx_ring[tx_ind];
sys/dev/mlx4/mlx4_en/mlx4_en_tx.c
933
struct mlx4_en_tx_ring *ring = priv->tx_ring[tx_ind];
sys/dev/mlx4/mlx4_en/mlx4_en_tx.c
974
ring = priv->tx_ring[i];
sys/dev/mlx4/mlx4_ib/mlx4_ib.h
419
struct mlx4_ib_tun_tx_buf *tx_ring;
sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
1383
sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr);
sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
1384
kfree(sqp->tx_ring[wire_tx_ix].ah);
sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
1385
sqp->tx_ring[wire_tx_ix].ah = ah;
sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
1387
sqp->tx_ring[wire_tx_ix].buf.map,
sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
1394
sqp->tx_ring[wire_tx_ix].buf.map,
sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
1398
list.addr = sqp->tx_ring[wire_tx_ix].buf.map;
sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
1421
sqp->tx_ring[wire_tx_ix].ah = NULL;
sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
1574
tun_qp->tx_ring = kcalloc(MLX4_NUM_TUNNEL_BUFS,
sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
1577
if (!tun_qp->tx_ring) {
sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
1606
tun_qp->tx_ring[i].buf.addr =
sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
1608
if (!tun_qp->tx_ring[i].buf.addr)
sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
1610
tun_qp->tx_ring[i].buf.map =
sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
1612
tun_qp->tx_ring[i].buf.addr,
sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
1616
tun_qp->tx_ring[i].buf.map)) {
sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
1617
kfree(tun_qp->tx_ring[i].buf.addr);
sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
1620
tun_qp->tx_ring[i].ah = NULL;
sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
1632
ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map,
sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
1634
kfree(tun_qp->tx_ring[i].buf.addr);
sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
1636
kfree(tun_qp->tx_ring);
sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
1637
tun_qp->tx_ring = NULL;
sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
1678
ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map,
sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
1680
kfree(tun_qp->tx_ring[i].buf.addr);
sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
1681
if (tun_qp->tx_ring[i].ah)
sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
1682
ib_destroy_ah(tun_qp->tx_ring[i].ah, 0);
sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
1684
kfree(tun_qp->tx_ring);
sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
1714
ib_destroy_ah(tun_qp->tx_ring[wc.wr_id &
sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
1716
tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
1731
ib_destroy_ah(tun_qp->tx_ring[wc.wr_id &
sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
1733
tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
1870
kfree(sqp->tx_ring[wc.wr_id &
sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
1872
sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
1900
kfree(sqp->tx_ring[wc.wr_id &
sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
1902
sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
558
tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr);
sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
559
if (tun_qp->tx_ring[tun_tx_ix].ah)
sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
560
ib_destroy_ah(tun_qp->tx_ring[tun_tx_ix].ah, 0);
sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
561
tun_qp->tx_ring[tun_tx_ix].ah = ah;
sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
563
tun_qp->tx_ring[tun_tx_ix].buf.map,
sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
607
tun_qp->tx_ring[tun_tx_ix].buf.map,
sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
611
list.addr = tun_qp->tx_ring[tun_tx_ix].buf.map;
sys/dev/mlx4/mlx4_ib/mlx4_ib_mad.c
633
tun_qp->tx_ring[tun_tx_ix].ah = NULL;
sys/dev/neta/if_mvneta.c
1622
return (mtx_trylock(&sc->tx_ring[q].ring_mtx));
sys/dev/neta/if_mvneta.c
1631
mtx_lock(&sc->tx_ring[q].ring_mtx);
sys/dev/neta/if_mvneta.c
1640
mtx_unlock(&sc->tx_ring[q].ring_mtx);
sys/dev/neta/if_mvnetavar.h
216
KASSERT(mtx_owned(&(sc)->tx_ring[(q)].ring_mtx),\
sys/dev/neta/if_mvnetavar.h
290
struct mvneta_tx_ring tx_ring[MVNETA_TX_QNUM_MAX];
sys/dev/neta/if_mvnetavar.h
319
(&(sc)->tx_ring[(q)])
sys/dev/netmap/netmap_kern.h
1810
struct e1000_buffer *buffer_info = &tx_ring->buffer_info[l];
sys/dev/netmap/netmap_kloop.c
737
bool tx_ring = (i < num_tx_rings);
sys/dev/netmap/netmap_kloop.c
758
((tx_ring && direct_tx) ||
sys/dev/netmap/netmap_kloop.c
759
(!tx_ring && direct_rx)) ? NULL :
sys/dev/netmap/netmap_kloop.c
762
(tx_ring ? direct_tx : direct_rx);
sys/dev/netmap/netmap_kloop.c
771
if (tx_ring && direct_tx) {
sys/dev/netmap/netmap_kloop.c
778
} else if (!tx_ring && direct_rx) {
sys/dev/nfe/if_nfe.c
2836
struct nfe_tx_ring *tx_ring;
sys/dev/nfe/if_nfe.c
2888
tx_ring = &sc->txq;
sys/dev/nfe/if_nfe.c
2890
tdata = &tx_ring->data[i];
sys/dev/nfe/if_nfe.c
2892
bus_dmamap_sync(tx_ring->tx_data_tag,
sys/dev/nfe/if_nfe.c
2894
bus_dmamap_unload(tx_ring->tx_data_tag,
sys/dev/qat/qat_common/adf_freebsd_uio_cleanup.c
84
int tx_ring = i - tx_rx_gap;
sys/dev/qat/qat_common/adf_freebsd_uio_cleanup.c
86
if (!test_bit(tx_ring, &orphan->tx_mask)) {
sys/dev/qat/qat_common/adf_transport.c
579
struct adf_etr_ring_data *tx_ring;
sys/dev/qat/qat_common/adf_transport.c
625
tx_ring = &bank->rings[i - hw_data->tx_rx_gap];
sys/dev/qat/qat_common/adf_transport.c
626
ring->inflights = tx_ring->inflights;
sys/dev/qlxgb/qla_hw.c
126
if (ha->hw.dma_buf.flags.tx_ring) {
sys/dev/qlxgb/qla_hw.c
127
qla_free_dmabuf(ha, &ha->hw.dma_buf.tx_ring);
sys/dev/qlxgb/qla_hw.c
128
ha->hw.dma_buf.flags.tx_ring = 0;
sys/dev/qlxgb/qla_hw.c
153
ha->hw.dma_buf.tx_ring.alignment = 8;
sys/dev/qlxgb/qla_hw.c
154
ha->hw.dma_buf.tx_ring.size =
sys/dev/qlxgb/qla_hw.c
157
if (qla_alloc_dmabuf(ha, &ha->hw.dma_buf.tx_ring)) {
sys/dev/qlxgb/qla_hw.c
161
ha->hw.dma_buf.flags.tx_ring = 1;
sys/dev/qlxgb/qla_hw.c
164
__func__, (void *)(ha->hw.dma_buf.tx_ring.dma_addr),
sys/dev/qlxgb/qla_hw.c
165
ha->hw.dma_buf.tx_ring.dma_b));
sys/dev/qlxgb/qla_hw.c
278
hw->tx_ring_base = hw->dma_buf.tx_ring.dma_b;
sys/dev/qlxgb/qla_hw.c
332
qla_host_to_le64(hw->dma_buf.tx_ring.dma_addr);
sys/dev/qlxgb/qla_hw.h
397
uint32_t tx_ring :1,
sys/dev/qlxgb/qla_hw.h
403
qla_dma_t tx_ring;
sys/dev/qlxgbe/ql_def.h
192
qla_tx_ring_t tx_ring[NUM_TX_RINGS];
sys/dev/qlxgbe/ql_hw.c
1241
if (ha->hw.dma_buf.flags.tx_ring) {
sys/dev/qlxgbe/ql_hw.c
1242
ql_free_dmabuf(ha, &ha->hw.dma_buf.tx_ring);
sys/dev/qlxgbe/ql_hw.c
1243
ha->hw.dma_buf.flags.tx_ring = 0;
sys/dev/qlxgbe/ql_hw.c
1273
hw->dma_buf.tx_ring.alignment = 8;
sys/dev/qlxgbe/ql_hw.c
1274
hw->dma_buf.tx_ring.size = size + PAGE_SIZE;
sys/dev/qlxgbe/ql_hw.c
1276
if (ql_alloc_dmabuf(ha, &hw->dma_buf.tx_ring)) {
sys/dev/qlxgbe/ql_hw.c
1281
vaddr = (uint8_t *)hw->dma_buf.tx_ring.dma_b;
sys/dev/qlxgbe/ql_hw.c
1282
paddr = hw->dma_buf.tx_ring.dma_addr;
sys/dev/qlxgbe/ql_hw.c
1304
ha->hw.dma_buf.flags.tx_ring = 1;
sys/dev/qlxgbe/ql_hw.c
1307
__func__, (void *)(hw->dma_buf.tx_ring.dma_addr),
sys/dev/qlxgbe/ql_hw.c
1308
hw->dma_buf.tx_ring.dma_b));
sys/dev/qlxgbe/ql_hw.c
2551
if (NULL != ha->tx_ring[txr_idx].tx_buf[j].m_head) {
sys/dev/qlxgbe/ql_hw.c
2555
ha->tx_ring[txr_idx].tx_buf[j].m_head));
sys/dev/qlxgbe/ql_hw.c
3389
tcntxt->tx_ring[0].paddr =
sys/dev/qlxgbe/ql_hw.c
3391
tcntxt->tx_ring[0].tx_consumer =
sys/dev/qlxgbe/ql_hw.c
3393
tcntxt->tx_ring[0].nentries = qla_host_to_le16(NUM_TX_DESCRIPTORS);
sys/dev/qlxgbe/ql_hw.c
3395
tcntxt->tx_ring[0].intr_id = qla_host_to_le16(hw->intr_id[intr_idx]);
sys/dev/qlxgbe/ql_hw.c
3396
tcntxt->tx_ring[0].intr_src_bit = qla_host_to_le16(0);
sys/dev/qlxgbe/ql_hw.c
3418
hw_tx_cntxt->tx_prod_reg = tcntxt_rsp->tx_ring[0].prod_index;
sys/dev/qlxgbe/ql_hw.c
3419
hw_tx_cntxt->tx_cntxt_id = tcntxt_rsp->tx_ring[0].cntxt_id;
sys/dev/qlxgbe/ql_hw.c
3743
txb = &ha->tx_ring[txr_idx].tx_buf[hw_tx_cntxt->txr_comp];
sys/dev/qlxgbe/ql_hw.c
871
CTLFLAG_RD, &ha->tx_ring[i].count,
sys/dev/qlxgbe/ql_hw.c
877
CTLFLAG_RD, &ha->tx_ring[i].iscsi_pkt_count,
sys/dev/qlxgbe/ql_hw.h
1115
q80_rq_tx_ring_t tx_ring[MAX_TCNTXT_RINGS];
sys/dev/qlxgbe/ql_hw.h
1132
q80_rsp_tx_ring_t tx_ring[MAX_TCNTXT_RINGS];
sys/dev/qlxgbe/ql_hw.h
1462
uint32_t tx_ring :1,
sys/dev/qlxgbe/ql_hw.h
1468
qla_dma_t tx_ring;
sys/dev/qlxgbe/ql_ioctl.c
521
bcopy(ha->hw.dma_buf.tx_ring.dma_b, ptr, size);
sys/dev/qlxgbe/ql_os.c
1267
if ((NULL != ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head) ||
sys/dev/qlxgbe/ql_os.c
1271
ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head));
sys/dev/qlxgbe/ql_os.c
1275
ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head));
sys/dev/qlxgbe/ql_os.c
1284
map = ha->tx_ring[txr_idx].tx_buf[tx_idx].map;
sys/dev/qlxgbe/ql_os.c
1343
ha->tx_ring[txr_idx].count++;
sys/dev/qlxgbe/ql_os.c
1345
ha->tx_ring[txr_idx].iscsi_pkt_count++;
sys/dev/qlxgbe/ql_os.c
1346
ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head = m_head;
sys/dev/qlxgbe/ql_os.c
1704
bzero((void *)ha->tx_ring[i].tx_buf,
sys/dev/qlxgbe/ql_os.c
1710
txb = &ha->tx_ring[j].tx_buf[i];
sys/dev/qlxgbe/ql_os.c
1766
qla_clear_tx_buf(ha, &ha->tx_ring[j].tx_buf[i]);
sys/dev/qlxgbe/ql_os.c
1775
bzero((void *)ha->tx_ring[i].tx_buf,
sys/dev/qlxge/qls_def.h
299
qla_tx_ring_t tx_ring[MAX_TX_RINGS];
sys/dev/qlxge/qls_hw.c
1135
txr = &ha->tx_ring[wid];
sys/dev/qlxge/qls_hw.c
1332
if (ha->tx_ring[r_idx].flags.wq_dma) {
sys/dev/qlxge/qls_hw.c
1333
qls_free_dmabuf(ha, &ha->tx_ring[r_idx].wq_dma);
sys/dev/qlxge/qls_hw.c
1334
ha->tx_ring[r_idx].flags.wq_dma = 0;
sys/dev/qlxge/qls_hw.c
1337
if (ha->tx_ring[r_idx].flags.privb_dma) {
sys/dev/qlxge/qls_hw.c
1338
qls_free_dmabuf(ha, &ha->tx_ring[r_idx].privb_dma);
sys/dev/qlxge/qls_hw.c
1339
ha->tx_ring[r_idx].flags.privb_dma = 0;
sys/dev/qlxge/qls_hw.c
1354
txb = &ha->tx_ring[i].tx_buf[j];
sys/dev/qlxge/qls_hw.c
1379
ha->tx_ring[ridx].wq_dma.alignment = 8;
sys/dev/qlxge/qls_hw.c
1380
ha->tx_ring[ridx].wq_dma.size =
sys/dev/qlxge/qls_hw.c
1383
ret = qls_alloc_dmabuf(ha, &ha->tx_ring[ridx].wq_dma);
sys/dev/qlxge/qls_hw.c
1389
ha->tx_ring[ridx].flags.wq_dma = 1;
sys/dev/qlxge/qls_hw.c
1391
ha->tx_ring[ridx].privb_dma.alignment = 8;
sys/dev/qlxge/qls_hw.c
1392
ha->tx_ring[ridx].privb_dma.size = QLA_TX_PRIVATE_BSIZE;
sys/dev/qlxge/qls_hw.c
1394
ret = qls_alloc_dmabuf(ha, &ha->tx_ring[ridx].privb_dma);
sys/dev/qlxge/qls_hw.c
1401
ha->tx_ring[ridx].flags.privb_dma = 1;
sys/dev/qlxge/qls_hw.c
1403
ha->tx_ring[ridx].wq_vaddr = ha->tx_ring[ridx].wq_dma.dma_b;
sys/dev/qlxge/qls_hw.c
1404
ha->tx_ring[ridx].wq_paddr = ha->tx_ring[ridx].wq_dma.dma_addr;
sys/dev/qlxge/qls_hw.c
1406
v_addr = ha->tx_ring[ridx].privb_dma.dma_b;
sys/dev/qlxge/qls_hw.c
1407
p_addr = ha->tx_ring[ridx].privb_dma.dma_addr;
sys/dev/qlxge/qls_hw.c
1409
ha->tx_ring[ridx].wq_icb_vaddr = v_addr;
sys/dev/qlxge/qls_hw.c
1410
ha->tx_ring[ridx].wq_icb_paddr = p_addr;
sys/dev/qlxge/qls_hw.c
1412
ha->tx_ring[ridx].txr_cons_vaddr =
sys/dev/qlxge/qls_hw.c
1414
ha->tx_ring[ridx].txr_cons_paddr = p_addr + (PAGE_SIZE >> 1);
sys/dev/qlxge/qls_hw.c
1419
txb = ha->tx_ring[ridx].tx_buf;
sys/dev/qlxge/qls_hw.c
1466
txb = &ha->tx_ring[i].tx_buf[j];
sys/dev/qlxge/qls_hw.c
639
txr_done = ha->tx_ring[txr_idx].txr_done;
sys/dev/qlxge/qls_hw.c
640
txr_next = ha->tx_ring[txr_idx].txr_next;
sys/dev/qlxge/qls_hw.c
643
ha->tx_ring[txr_idx].txr_free = NUM_TX_DESCRIPTORS;
sys/dev/qlxge/qls_hw.c
645
ha->tx_ring[txr_idx].txr_free = txr_done - txr_next;
sys/dev/qlxge/qls_hw.c
647
ha->tx_ring[txr_idx].txr_free = NUM_TX_DESCRIPTORS +
sys/dev/qlxge/qls_hw.c
651
if (ha->tx_ring[txr_idx].txr_free <= QLA_TX_MIN_FREE)
sys/dev/qlxge/qls_hw.c
685
if (ha->tx_ring[txr_idx].txr_free <= (NUM_TX_DESCRIPTORS >> 2)) {
sys/dev/qlxge/qls_hw.c
689
ha->tx_ring[txr_idx].txr_free);
sys/dev/qlxge/qls_hw.c
694
tx_mac = (q81_tx_mac_t *)&ha->tx_ring[txr_idx].wq_vaddr[txr_next];
sys/dev/qlxge/qls_hw.c
705
ha->tx_ring[txr_idx].tx_tso_frames++;
sys/dev/qlxge/qls_hw.c
707
ha->tx_ring[txr_idx].tx_frames++;
sys/dev/qlxge/qls_hw.c
717
ha->tx_ring[txr_idx].tx_vlan_frames++;
sys/dev/qlxge/qls_hw.c
740
ha->tx_ring[txr_idx].tx_buf[txr_next].oal_paddr;
sys/dev/qlxge/qls_hw.c
745
tx_desc = ha->tx_ring[txr_idx].tx_buf[txr_next].oal_vaddr;
sys/dev/qlxge/qls_hw.c
761
ha->tx_ring[txr_idx].txr_next = txr_next;
sys/dev/qlxge/qls_hw.c
763
ha->tx_ring[txr_idx].txr_free--;
sys/dev/qlxge/qls_hw.c
930
ha->tx_ring[0].wq_db_offset));
sys/dev/qlxge/qls_hw.h
920
(ha->tx_ring[wq_idx].wq_db_offset + Q81_WRKQ_INDEX_REG), idx)
sys/dev/qlxge/qls_hw.h
923
(ha->tx_ring[wq_idx].wq_db_offset + Q81_WRKQ_INDEX_REG))
sys/dev/qlxge/qls_hw.h
926
(ha->tx_ring[wq_idx].wq_db_offset + Q81_WRKQ_VALID_REG),\
sys/dev/qlxge/qls_hw.h
930
(ha->tx_ring[wq_idx].wq_db_offset + Q81_WRKQ_VALID_REG),\
sys/dev/qlxge/qls_isr.c
54
txb = &ha->tx_ring[txr_idx].tx_buf[tx_idx];
sys/dev/qlxge/qls_isr.c
66
ha->tx_ring[txr_idx].txr_done++;
sys/dev/qlxge/qls_isr.c
68
if (ha->tx_ring[txr_idx].txr_done == NUM_TX_DESCRIPTORS)
sys/dev/qlxge/qls_isr.c
69
ha->tx_ring[txr_idx].txr_done = 0;
sys/dev/qlxge/qls_os.c
1123
tx_idx = ha->tx_ring[txr_idx].txr_next;
sys/dev/qlxge/qls_os.c
1125
map = ha->tx_ring[txr_idx].tx_buf[tx_idx].map;
sys/dev/qlxge/qls_os.c
1183
ha->tx_ring[txr_idx].count++;
sys/dev/qlxge/qls_os.c
1184
ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head = m_head;
sys/dev/qlxge/qls_os.c
1185
ha->tx_ring[txr_idx].tx_buf[tx_idx].map = map;
sys/dev/qlxge/qls_os.c
1248
qls_flush_tx_buf(ha, &ha->tx_ring[j].tx_buf[i]);
sys/dev/qlxge/qls_os.c
166
(void *)ha->tx_ring[i].tx_frames));
sys/dev/qlxge/qls_os.c
171
(void *)ha->tx_ring[i].tx_tso_frames));
sys/dev/qlxge/qls_os.c
176
(void *)ha->tx_ring[i].tx_vlan_frames));
sys/dev/qlxge/qls_os.c
181
ha->tx_ring[i].txr_free);
sys/dev/qlxge/qls_os.c
186
ha->tx_ring[i].txr_next);
sys/dev/qlxge/qls_os.c
191
ha->tx_ring[i].txr_done);
sys/dev/qlxge/qls_os.c
196
*(ha->tx_ring[i].txr_cons_vaddr));
sys/dev/rtwn/pci/rtwn_pci_attach.c
267
struct rtwn_tx_ring *tx_ring = &pc->tx_ring[qid];
sys/dev/rtwn/pci/rtwn_pci_attach.c
274
size, 1, size, 0, NULL, NULL, &tx_ring->desc_dmat);
sys/dev/rtwn/pci/rtwn_pci_attach.c
280
error = bus_dmamem_alloc(tx_ring->desc_dmat, &tx_ring->desc,
sys/dev/rtwn/pci/rtwn_pci_attach.c
281
BUS_DMA_NOWAIT | BUS_DMA_ZERO, &tx_ring->desc_map);
sys/dev/rtwn/pci/rtwn_pci_attach.c
286
error = bus_dmamap_load(tx_ring->desc_dmat, tx_ring->desc_map,
sys/dev/rtwn/pci/rtwn_pci_attach.c
287
tx_ring->desc, size, rtwn_pci_dma_map_addr, &tx_ring->paddr,
sys/dev/rtwn/pci/rtwn_pci_attach.c
293
bus_dmamap_sync(tx_ring->desc_dmat, tx_ring->desc_map,
sys/dev/rtwn/pci/rtwn_pci_attach.c
298
MJUMPAGESIZE, 1, MJUMPAGESIZE, 0, NULL, NULL, &tx_ring->data_dmat);
sys/dev/rtwn/pci/rtwn_pci_attach.c
305
struct rtwn_tx_data *tx_data = &tx_ring->tx_data[i];
sys/dev/rtwn/pci/rtwn_pci_attach.c
306
void *tx_desc = (uint8_t *)tx_ring->desc + sc->txdesc_len * i;
sys/dev/rtwn/pci/rtwn_pci_attach.c
307
uint32_t next_desc_addr = tx_ring->paddr +
sys/dev/rtwn/pci/rtwn_pci_attach.c
312
error = bus_dmamap_create(tx_ring->data_dmat, 0, &tx_data->map);
sys/dev/rtwn/pci/rtwn_pci_attach.c
332
struct rtwn_tx_ring *ring = &pc->tx_ring[qid];
sys/dev/rtwn/pci/rtwn_pci_attach.c
369
struct rtwn_tx_ring *ring = &pc->tx_ring[RTWN_PCI_BEACON_QUEUE];
sys/dev/rtwn/pci/rtwn_pci_attach.c
416
struct rtwn_tx_ring *ring = &pc->tx_ring[qid];
sys/dev/rtwn/pci/rtwn_pci_attach.c
438
struct rtwn_tx_ring *tx_ring = &pc->tx_ring[qid];
sys/dev/rtwn/pci/rtwn_pci_attach.c
442
if (tx_ring->desc_dmat != NULL) {
sys/dev/rtwn/pci/rtwn_pci_attach.c
443
if (tx_ring->desc != NULL) {
sys/dev/rtwn/pci/rtwn_pci_attach.c
444
bus_dmamap_sync(tx_ring->desc_dmat,
sys/dev/rtwn/pci/rtwn_pci_attach.c
445
tx_ring->desc_map, BUS_DMASYNC_POSTWRITE);
sys/dev/rtwn/pci/rtwn_pci_attach.c
446
bus_dmamap_unload(tx_ring->desc_dmat,
sys/dev/rtwn/pci/rtwn_pci_attach.c
447
tx_ring->desc_map);
sys/dev/rtwn/pci/rtwn_pci_attach.c
448
bus_dmamem_free(tx_ring->desc_dmat, tx_ring->desc,
sys/dev/rtwn/pci/rtwn_pci_attach.c
449
tx_ring->desc_map);
sys/dev/rtwn/pci/rtwn_pci_attach.c
451
bus_dma_tag_destroy(tx_ring->desc_dmat);
sys/dev/rtwn/pci/rtwn_pci_attach.c
455
tx_data = &tx_ring->tx_data[i];
sys/dev/rtwn/pci/rtwn_pci_attach.c
458
bus_dmamap_sync(tx_ring->data_dmat, tx_data->map,
sys/dev/rtwn/pci/rtwn_pci_attach.c
460
bus_dmamap_unload(tx_ring->data_dmat, tx_data->map);
sys/dev/rtwn/pci/rtwn_pci_attach.c
465
if (tx_ring->data_dmat != NULL) {
sys/dev/rtwn/pci/rtwn_pci_attach.c
466
bus_dma_tag_destroy(tx_ring->data_dmat);
sys/dev/rtwn/pci/rtwn_pci_attach.c
467
tx_ring->data_dmat = NULL;
sys/dev/rtwn/pci/rtwn_pci_attach.c
471
tx_ring->queued = 0;
sys/dev/rtwn/pci/rtwn_pci_attach.c
472
tx_ring->last = tx_ring->cur = 0;
sys/dev/rtwn/pci/rtwn_pci_attach.c
520
__func__, (uintmax_t)pc->tx_ring[RTWN_PCI_BK_QUEUE].paddr,
sys/dev/rtwn/pci/rtwn_pci_attach.c
521
(uintmax_t)pc->tx_ring[RTWN_PCI_BE_QUEUE].paddr,
sys/dev/rtwn/pci/rtwn_pci_attach.c
522
(uintmax_t)pc->tx_ring[RTWN_PCI_VI_QUEUE].paddr,
sys/dev/rtwn/pci/rtwn_pci_attach.c
523
(uintmax_t)pc->tx_ring[RTWN_PCI_VO_QUEUE].paddr,
sys/dev/rtwn/pci/rtwn_pci_attach.c
524
(uintmax_t)pc->tx_ring[RTWN_PCI_BEACON_QUEUE].paddr,
sys/dev/rtwn/pci/rtwn_pci_attach.c
525
(uintmax_t)pc->tx_ring[RTWN_PCI_MGNT_QUEUE].paddr,
sys/dev/rtwn/pci/rtwn_pci_attach.c
526
(uintmax_t)pc->tx_ring[RTWN_PCI_HIGH_QUEUE].paddr,
sys/dev/rtwn/pci/rtwn_pci_attach.c
534
pc->tx_ring[RTWN_PCI_BK_QUEUE].paddr);
sys/dev/rtwn/pci/rtwn_pci_attach.c
536
pc->tx_ring[RTWN_PCI_BE_QUEUE].paddr);
sys/dev/rtwn/pci/rtwn_pci_attach.c
538
pc->tx_ring[RTWN_PCI_VI_QUEUE].paddr);
sys/dev/rtwn/pci/rtwn_pci_attach.c
540
pc->tx_ring[RTWN_PCI_VO_QUEUE].paddr);
sys/dev/rtwn/pci/rtwn_pci_attach.c
542
pc->tx_ring[RTWN_PCI_BEACON_QUEUE].paddr);
sys/dev/rtwn/pci/rtwn_pci_attach.c
544
pc->tx_ring[RTWN_PCI_MGNT_QUEUE].paddr);
sys/dev/rtwn/pci/rtwn_pci_attach.c
546
pc->tx_ring[RTWN_PCI_HIGH_QUEUE].paddr);
sys/dev/rtwn/pci/rtwn_pci_rx.c
276
struct rtwn_tx_ring *ring = &pc->tx_ring[qid];
sys/dev/rtwn/pci/rtwn_pci_tx.c
113
ring = &pc->tx_ring[qid];
sys/dev/rtwn/pci/rtwn_pci_tx.c
211
ring = &pc->tx_ring[RTWN_PCI_BEACON_QUEUE];
sys/dev/rtwn/pci/rtwn_pci_var.h
115
struct rtwn_tx_ring tx_ring[RTWN_PCI_NTXQUEUES];
sys/dev/sfxge/sfxge.c
94
#define SFXGE_PARAM_TX_RING SFXGE_PARAM(tx_ring)
sys/dev/sfxge/sfxge.c
97
SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_ring, CTLFLAG_RDTUN,
sys/dev/thunderbolt/nhi.c
1059
txd = &r->tx_ring[r->tx_ci].tx;
sys/dev/thunderbolt/nhi.c
421
r->tx_ring = (union nhi_ring_desc *)(ring);
sys/dev/thunderbolt/nhi.c
430
"TX %p [0x%jx]\n", ringnum, r->tx_ring, r->tx_ring_busaddr,
sys/dev/thunderbolt/nhi.c
787
desc = &r->tx_ring[r->tx_pi].tx;
sys/dev/thunderbolt/nhi_var.h
81
union nhi_ring_desc *tx_ring;
sys/dev/xen/netback/netback.c
1432
txb = &xnb->ring_configs[XNB_RING_TYPE_TX].back_ring.tx_ring;
sys/dev/xen/netback/netback.c
1475
xnb_ring2pkt(struct xnb_pkt *pkt, const netif_tx_back_ring_t *tx_ring,
sys/dev/xen/netback/netback.c
1495
if (RING_HAS_UNCONSUMED_REQUESTS_2(tx_ring, idx)) {
sys/dev/xen/netback/netback.c
1496
netif_tx_request_t *tx = RING_GET_REQUEST(tx_ring, idx);
sys/dev/xen/netback/netback.c
1507
RING_HAS_UNCONSUMED_REQUESTS_2(tx_ring, idx)) {
sys/dev/xen/netback/netback.c
1509
(netif_extra_info_t*) RING_GET_REQUEST(tx_ring, idx);
sys/dev/xen/netback/netback.c
1525
xnb_dump_txreq(start, RING_GET_REQUEST(tx_ring,
sys/dev/xen/netback/netback.c
1527
xnb_dump_txreq(idx, RING_GET_REQUEST(tx_ring,
sys/dev/xen/netback/netback.c
1543
xnb_dump_txreq(start, RING_GET_REQUEST(tx_ring, start));
sys/dev/xen/netback/netback.c
1544
xnb_dump_txreq(idx, RING_GET_REQUEST(tx_ring, idx));
sys/dev/xen/netback/netback.c
1555
while (more_data && RING_HAS_UNCONSUMED_REQUESTS_2(tx_ring, idx)) {
sys/dev/xen/netback/netback.c
1556
netif_tx_request_t *tx = RING_GET_REQUEST(tx_ring, idx);
sys/dev/xen/netback/netback.c
1564
xnb_dump_txreq(start, RING_GET_REQUEST(tx_ring, start));
sys/dev/xen/netback/netback.c
1565
xnb_dump_txreq(idx, RING_GET_REQUEST(tx_ring, idx));
sys/dev/xen/netback/netback.c
157
const netif_tx_back_ring_t *tx_ring,
sys/dev/xen/netback/netback.c
319
netif_tx_back_ring_t tx_ring;
sys/dev/xen/netback/netback.c
545
&xnb->ring_configs[XNB_RING_TYPE_TX].back_ring.tx_ring;
sys/dev/xen/netback/netback.c
729
BACK_RING_INIT(&ring->back_ring.tx_ring,
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib.h
277
struct ipoib_cm_tx_buf *tx_ring;
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib.h
368
struct ipoib_tx_buf *tx_ring;
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_cm.c
1010
p->tx_ring = kzalloc(ipoib_sendq_size * sizeof *p->tx_ring, GFP_KERNEL);
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_cm.c
1011
if (!p->tx_ring) {
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_cm.c
1016
memset(p->tx_ring, 0, ipoib_sendq_size * sizeof *p->tx_ring);
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_cm.c
1057
kfree(p->tx_ring);
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_cm.c
1078
if (p->tx_ring) {
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_cm.c
1095
tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)];
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_cm.c
1108
kfree(p->tx_ring);
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_cm.c
648
tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)];
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_cm.c
693
tx_req = &tx->tx_ring[wr_id];
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_ib.c
348
tx_req = &priv->tx_ring[wr_id];
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_ib.c
525
tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)];
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_ib.c
807
tx_req = &priv->tx_ring[priv->tx_tail &
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_main.c
802
priv->tx_ring = kzalloc(ipoib_sendq_size * sizeof *priv->tx_ring, GFP_KERNEL);
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_main.c
803
if (!priv->tx_ring) {
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_main.c
808
memset(priv->tx_ring, 0, ipoib_sendq_size * sizeof *priv->tx_ring);
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_main.c
818
kfree(priv->tx_ring);
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_main.c
869
kfree(priv->tx_ring);
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_main.c
872
priv->tx_ring = NULL;
sys/ofed/drivers/infiniband/ulp/sdp/sdp.h
150
#define tx_ring_posted(ssk) (ring_posted(ssk->tx_ring) + \
sys/ofed/drivers/infiniband/ulp/sdp/sdp.h
151
(ssk->tx_ring.rdma_inflight ? ssk->tx_ring.rdma_inflight->busy : 0))
sys/ofed/drivers/infiniband/ulp/sdp/sdp.h
153
#define tx_ring_posted(ssk) ring_posted(ssk->tx_ring)
sys/ofed/drivers/infiniband/ulp/sdp/sdp.h
280
#define tx_credits(ssk) (atomic_read(&ssk->tx_ring.credits))
sys/ofed/drivers/infiniband/ulp/sdp/sdp.h
360
struct sdp_tx_ring tx_ring;
sys/ofed/drivers/infiniband/ulp/sdp/sdp.h
459
ib_req_notify_cq(ssk->tx_ring.cq, IB_CQ_NEXT_COMP);
sys/ofed/drivers/infiniband/ulp/sdp/sdp_bcopy.c
124
unsigned long mseq = ring_head(ssk->tx_ring);
sys/ofed/drivers/infiniband/ulp/sdp/sdp_cma.c
118
qp_init_attr.send_cq = ssk->tx_ring.cq;
sys/ofed/drivers/infiniband/ulp/sdp/sdp_cma.c
179
atomic_set(&ssk->tx_ring.credits, ssk->max_bufs);
sys/ofed/drivers/infiniband/ulp/sdp/sdp_cma.c
211
atomic_set(&ssk->tx_ring.credits, ssk->max_bufs);
sys/ofed/drivers/infiniband/ulp/sdp/sdp_main.c
398
ssk->tx_ring.rdma_inflight = NULL;
sys/ofed/drivers/infiniband/ulp/sdp/sdp_main.c
402
ssk->tx_ring.buffer = NULL;
sys/ofed/drivers/infiniband/ulp/sdp/sdp_proc.c
147
tx_queue = sdp_sk(sk)->write_seq - sdp_sk(sk)->tx_ring.una_seq;
sys/ofed/drivers/infiniband/ulp/sdp/sdp_rx.c
421
atomic_set(&ssk->tx_ring.credits, mseq_ack - ring_head(ssk->tx_ring) +
sys/ofed/drivers/infiniband/ulp/sdp/sdp_rx.c
610
if (!ssk->rx_ring.cq || !ssk->tx_ring.cq)
sys/ofed/drivers/infiniband/ulp/sdp/sdp_tx.c
123
tx_req = &ssk->tx_ring.buffer[mseq & (SDP_TX_SIZE - 1)];
sys/ofed/drivers/infiniband/ulp/sdp/sdp_tx.c
159
atomic_inc(&ssk->tx_ring.head);
sys/ofed/drivers/infiniband/ulp/sdp/sdp_tx.c
160
atomic_dec(&ssk->tx_ring.credits);
sys/ofed/drivers/infiniband/ulp/sdp/sdp_tx.c
172
struct sdp_tx_ring *tx_ring = &ssk->tx_ring;
sys/ofed/drivers/infiniband/ulp/sdp/sdp_tx.c
174
if (unlikely(mseq != ring_tail(*tx_ring))) {
sys/ofed/drivers/infiniband/ulp/sdp/sdp_tx.c
176
mseq, ring_tail(*tx_ring));
sys/ofed/drivers/infiniband/ulp/sdp/sdp_tx.c
181
tx_req = &tx_ring->buffer[mseq & (SDP_TX_SIZE - 1)];
sys/ofed/drivers/infiniband/ulp/sdp/sdp_tx.c
191
atomic_inc(&tx_ring->tail);
sys/ofed/drivers/infiniband/ulp/sdp/sdp_tx.c
243
if (!ssk->tx_ring.rdma_inflight) {
sys/ofed/drivers/infiniband/ulp/sdp/sdp_tx.c
248
if (!ssk->tx_ring.rdma_inflight->busy) {
sys/ofed/drivers/infiniband/ulp/sdp/sdp_tx.c
257
ssk->tx_ring.rdma_inflight->busy = 0;
sys/ofed/drivers/infiniband/ulp/sdp/sdp_tx.c
288
if (!ssk->tx_ring.cq) {
sys/ofed/drivers/infiniband/ulp/sdp/sdp_tx.c
294
n = ib_poll_cq(ssk->tx_ring.cq, SDP_NUM_WC, ibwc);
sys/ofed/drivers/infiniband/ulp/sdp/sdp_tx.c
319
ring_head(ssk->tx_ring), ring_tail(ssk->tx_ring));
sys/ofed/drivers/infiniband/ulp/sdp/sdp_tx.c
340
callout_reset(&ssk->tx_ring.timer, SDP_TX_POLL_TIMEOUT,
sys/ofed/drivers/infiniband/ulp/sdp/sdp_tx.c
344
if (ssk->tx_ring.rdma_inflight && ssk->tx_ring.rdma_inflight->busy) {
sys/ofed/drivers/infiniband/ulp/sdp/sdp_tx.c
357
if (!callout_active(&ssk->tx_ring.timer))
sys/ofed/drivers/infiniband/ulp/sdp/sdp_tx.c
359
callout_deactivate(&ssk->tx_ring.timer);
sys/ofed/drivers/infiniband/ulp/sdp/sdp_tx.c
382
mb = sdp_send_completion(ssk, ring_tail(ssk->tx_ring));
sys/ofed/drivers/infiniband/ulp/sdp/sdp_tx.c
433
callout_init_rw(&ssk->tx_ring.timer, &ssk->lock, 0);
sys/ofed/drivers/infiniband/ulp/sdp/sdp_tx.c
435
atomic_set(&ssk->tx_ring.head, 1);
sys/ofed/drivers/infiniband/ulp/sdp/sdp_tx.c
436
atomic_set(&ssk->tx_ring.tail, 1);
sys/ofed/drivers/infiniband/ulp/sdp/sdp_tx.c
438
ssk->tx_ring.buffer = malloc(sizeof(*ssk->tx_ring.buffer) * SDP_TX_SIZE,
sys/ofed/drivers/infiniband/ulp/sdp/sdp_tx.c
448
ssk->tx_ring.cq = tx_cq;
sys/ofed/drivers/infiniband/ulp/sdp/sdp_tx.c
449
ssk->tx_ring.poll_cnt = 0;
sys/ofed/drivers/infiniband/ulp/sdp/sdp_tx.c
455
free(ssk->tx_ring.buffer, M_SDP);
sys/ofed/drivers/infiniband/ulp/sdp/sdp_tx.c
456
ssk->tx_ring.buffer = NULL;
sys/ofed/drivers/infiniband/ulp/sdp/sdp_tx.c
466
callout_stop(&ssk->tx_ring.timer);
sys/ofed/drivers/infiniband/ulp/sdp/sdp_tx.c
469
callout_drain(&ssk->tx_ring.timer);
sys/ofed/drivers/infiniband/ulp/sdp/sdp_tx.c
472
if (ssk->tx_ring.buffer) {
sys/ofed/drivers/infiniband/ulp/sdp/sdp_tx.c
474
free(ssk->tx_ring.buffer, M_SDP);
sys/ofed/drivers/infiniband/ulp/sdp/sdp_tx.c
475
ssk->tx_ring.buffer = NULL;
sys/ofed/drivers/infiniband/ulp/sdp/sdp_tx.c
478
if (ssk->tx_ring.cq) {
sys/ofed/drivers/infiniband/ulp/sdp/sdp_tx.c
479
ib_destroy_cq(ssk->tx_ring.cq);
sys/ofed/drivers/infiniband/ulp/sdp/sdp_tx.c
480
ssk->tx_ring.cq = NULL;
sys/ofed/drivers/infiniband/ulp/sdp/sdp_tx.c
483
WARN_ON(ring_head(ssk->tx_ring) != ring_tail(ssk->tx_ring));
sys/ofed/drivers/infiniband/ulp/sdp/sdp_tx.c
54
if (!callout_pending(&ssk->tx_ring.timer))
sys/ofed/drivers/infiniband/ulp/sdp/sdp_tx.c
55
callout_reset(&ssk->tx_ring.timer, SDP_TX_POLL_TIMEOUT,
sys/ofed/drivers/infiniband/ulp/sdp/sdp_tx.c
59
if (force || (++ssk->tx_ring.poll_cnt & (SDP_TX_POLL_MODER - 1)) == 0)
sys/ofed/drivers/infiniband/ulp/sdp/sdp_tx.c
87
mseq = ring_head(ssk->tx_ring);
sys/ofed/drivers/infiniband/ulp/sdp/sdp_zcopy.c
251
if (!ssk->tx_ring.rdma_inflight->busy) {
sys/ofed/drivers/infiniband/ulp/sdp/sdp_zcopy.c
271
!ssk->tx_ring.rdma_inflight->busy);
sys/ofed/drivers/infiniband/ulp/sdp/sdp_zcopy.c
533
ssk->tx_ring.rdma_inflight = rx_sa;
sys/ofed/drivers/infiniband/ulp/sdp/sdp_zcopy.c
606
ssk->tx_ring.rdma_inflight = NULL;
usr.sbin/virtual_oss/virtual_oss/int.h
183
struct virtual_ring tx_ring[2];
usr.sbin/virtual_oss/virtual_oss/main.c
101
size = vring_total_read_len(&pvc->tx_ring[0]);
usr.sbin/virtual_oss/virtual_oss/main.c
1037
vring_get_write(&pvc->tx_ring[1], &buf_ptr, &buf_len);
usr.sbin/virtual_oss/virtual_oss/main.c
106
size += vring_total_read_len(&pvc->tx_ring[1]);
usr.sbin/virtual_oss/virtual_oss/main.c
1073
vring_inc_write(&pvc->tx_ring[1], buf_len);
usr.sbin/virtual_oss/virtual_oss/main.c
1232
data.val = vring_total_read_len(&pvc->tx_ring[1]);
usr.sbin/virtual_oss/virtual_oss/main.c
1378
vring_reset(&pvc->tx_ring[1]);
usr.sbin/virtual_oss/virtual_oss/main.c
274
vring_free(&pvc->tx_ring[0]);
usr.sbin/virtual_oss/virtual_oss/main.c
275
vring_free(&pvc->tx_ring[1]);
usr.sbin/virtual_oss/virtual_oss/main.c
359
vring_free(&pvc->tx_ring[0]);
usr.sbin/virtual_oss/virtual_oss/main.c
360
vring_free(&pvc->tx_ring[1]);
usr.sbin/virtual_oss/virtual_oss/main.c
430
if (vring_alloc(&pvc->tx_ring[0], bufsize_internal))
usr.sbin/virtual_oss/virtual_oss/main.c
432
if (vring_alloc(&pvc->tx_ring[1], bufsize))
usr.sbin/virtual_oss/virtual_oss/main.c
447
vring_free(&pvc->tx_ring[1]);
usr.sbin/virtual_oss/virtual_oss/main.c
449
vring_free(&pvc->tx_ring[0]);
usr.sbin/virtual_oss/virtual_oss/main.c
899
vring_get_read(&pvc->tx_ring[1], &src_ptr, &src_len);
usr.sbin/virtual_oss/virtual_oss/main.c
900
vring_get_write(&pvc->tx_ring[0], &dst_ptr, &dst_len);
usr.sbin/virtual_oss/virtual_oss/main.c
920
vring_inc_read(&pvc->tx_ring[1], src_len);
usr.sbin/virtual_oss/virtual_oss/main.c
921
vring_inc_write(&pvc->tx_ring[0], dst_len);
usr.sbin/virtual_oss/virtual_oss/main.c
939
vring_get_read(&pvc->tx_ring[1], &src_ptr, &src_len);
usr.sbin/virtual_oss/virtual_oss/main.c
940
vring_get_write(&pvc->tx_ring[0], &dst_ptr, &dst_len);
usr.sbin/virtual_oss/virtual_oss/main.c
993
vring_inc_read(&pvc->tx_ring[1], src_len);
usr.sbin/virtual_oss/virtual_oss/main.c
994
vring_inc_write(&pvc->tx_ring[0], dst_len);
usr.sbin/virtual_oss/virtual_oss/virtual_oss.c
542
if (vclient_read_linear(pvc, &pvc->tx_ring[0],
usr.sbin/virtual_oss/virtual_oss/virtual_oss.c
595
if (vclient_read_linear(pvc, &pvc->tx_ring[0],