drivers/atm/fore200e.c
1088
struct host_rxq* rxq = &fore200e->host_rxq;
drivers/atm/fore200e.c
1095
entry = &rxq->host_entry[ rxq->head ];
drivers/atm/fore200e.c
1126
FORE200E_NEXT_ENTRY(rxq->head, QUEUE_SIZE_RX);
drivers/atm/fore200e.c
2113
struct host_rxq* rxq = &fore200e->host_rxq;
drivers/atm/fore200e.c
2121
&rxq->status,
drivers/atm/fore200e.c
2130
&rxq->rpd,
drivers/atm/fore200e.c
2135
fore200e_dma_chunk_free(fore200e, &rxq->status);
drivers/atm/fore200e.c
2145
rxq->host_entry[ i ].status =
drivers/atm/fore200e.c
2146
FORE200E_INDEX(rxq->status.align_addr, enum status, i);
drivers/atm/fore200e.c
2147
rxq->host_entry[ i ].rpd =
drivers/atm/fore200e.c
2148
FORE200E_INDEX(rxq->rpd.align_addr, struct rpd, i);
drivers/atm/fore200e.c
2149
rxq->host_entry[ i ].rpd_dma =
drivers/atm/fore200e.c
2150
FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i);
drivers/atm/fore200e.c
2151
rxq->host_entry[ i ].cp_entry = &cp_entry[ i ];
drivers/atm/fore200e.c
2153
*rxq->host_entry[ i ].status = STATUS_FREE;
drivers/atm/fore200e.c
2155
fore200e->bus->write(FORE200E_DMA_INDEX(rxq->status.dma_addr, enum status, i),
drivers/atm/fore200e.c
2158
fore200e->bus->write(FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i),
drivers/atm/fore200e.c
2163
rxq->head = 0;
drivers/bluetooth/btintel_pcie.c
1389
struct rxq *rxq;
drivers/bluetooth/btintel_pcie.c
1404
rxq = &data->rxq;
drivers/bluetooth/btintel_pcie.c
1410
urbd1 = &rxq->urbd1s[cr_tia];
drivers/bluetooth/btintel_pcie.c
1413
buf = &rxq->bufs[urbd1->frbd_tag];
drivers/bluetooth/btintel_pcie.c
1427
cr_tia = (cr_tia + 1) % rxq->count;
drivers/bluetooth/btintel_pcie.c
1639
ci->addr_frbdq = data->rxq.frbds_p_addr;
drivers/bluetooth/btintel_pcie.c
1640
ci->num_frbdq = data->rxq.count;
drivers/bluetooth/btintel_pcie.c
1642
ci->addr_urbdq1 = data->rxq.urbd1s_p_addr;
drivers/bluetooth/btintel_pcie.c
1643
ci->num_urbdq1 = data->rxq.count;
drivers/bluetooth/btintel_pcie.c
1697
struct rxq *rxq)
drivers/bluetooth/btintel_pcie.c
1700
dma_free_coherent(&data->pdev->dev, rxq->count * BTINTEL_PCIE_BUFFER_SIZE,
drivers/bluetooth/btintel_pcie.c
1701
rxq->buf_v_addr, rxq->buf_p_addr);
drivers/bluetooth/btintel_pcie.c
1702
kfree(rxq->bufs);
drivers/bluetooth/btintel_pcie.c
1706
struct rxq *rxq)
drivers/bluetooth/btintel_pcie.c
1712
rxq->bufs = kmalloc_objs(*buf, rxq->count);
drivers/bluetooth/btintel_pcie.c
1713
if (!rxq->bufs)
drivers/bluetooth/btintel_pcie.c
1719
rxq->buf_v_addr = dma_alloc_coherent(&data->pdev->dev,
drivers/bluetooth/btintel_pcie.c
1720
rxq->count * BTINTEL_PCIE_BUFFER_SIZE,
drivers/bluetooth/btintel_pcie.c
1721
&rxq->buf_p_addr,
drivers/bluetooth/btintel_pcie.c
1723
if (!rxq->buf_v_addr) {
drivers/bluetooth/btintel_pcie.c
1724
kfree(rxq->bufs);
drivers/bluetooth/btintel_pcie.c
1731
for (i = 0; i < rxq->count; i++) {
drivers/bluetooth/btintel_pcie.c
1732
buf = &rxq->bufs[i];
drivers/bluetooth/btintel_pcie.c
1733
buf->data_p_addr = rxq->buf_p_addr + (i * BTINTEL_PCIE_BUFFER_SIZE);
drivers/bluetooth/btintel_pcie.c
1734
buf->data = rxq->buf_v_addr + (i * BTINTEL_PCIE_BUFFER_SIZE);
drivers/bluetooth/btintel_pcie.c
1763
btintel_pcie_free_rxq_bufs(data, &data->rxq);
drivers/bluetooth/btintel_pcie.c
1819
data->rxq.count = BTINTEL_PCIE_RX_DESCS_COUNT;
drivers/bluetooth/btintel_pcie.c
1836
data->rxq.frbds_p_addr = p_addr;
drivers/bluetooth/btintel_pcie.c
1837
data->rxq.frbds = v_addr;
drivers/bluetooth/btintel_pcie.c
1843
data->rxq.urbd1s_p_addr = p_addr;
drivers/bluetooth/btintel_pcie.c
1844
data->rxq.urbd1s = v_addr;
drivers/bluetooth/btintel_pcie.c
1855
err = btintel_pcie_setup_rxq_bufs(data, &data->rxq);
drivers/bluetooth/btintel_pcie.c
440
static void btintel_pcie_prepare_rx(struct rxq *rxq, u16 frbd_index)
drivers/bluetooth/btintel_pcie.c
446
buf = &rxq->bufs[frbd_index];
drivers/bluetooth/btintel_pcie.c
448
frbd = &rxq->frbds[frbd_index];
drivers/bluetooth/btintel_pcie.c
459
struct rxq *rxq = &data->rxq;
drivers/bluetooth/btintel_pcie.c
463
if (frbd_index > rxq->count)
drivers/bluetooth/btintel_pcie.c
469
btintel_pcie_prepare_rx(rxq, frbd_index);
drivers/bluetooth/btintel_pcie.c
471
frbd_index = (frbd_index + 1) % rxq->count;
drivers/bluetooth/btintel_pcie.c
484
struct rxq *rxq = &data->rxq;
drivers/bluetooth/btintel_pcie.c
490
for (i = 0; i < rxq->count - 3; i++) {
drivers/bluetooth/btintel_pcie.h
338
rxq:6,
drivers/bluetooth/btintel_pcie.h
515
struct rxq rxq;
drivers/infiniband/hw/cxgb4/cm.c
140
static struct sk_buff_head rxq;
drivers/infiniband/hw/cxgb4/cm.c
4308
while ((skb = skb_dequeue(&rxq))) {
drivers/infiniband/hw/cxgb4/cm.c
4362
skb_queue_tail(&rxq, skb);
drivers/infiniband/hw/cxgb4/cm.c
4463
skb_queue_head_init(&rxq);
drivers/infiniband/hw/hfi1/chip.c
8455
struct hfi1_netdev_rxq *rxq = container_of(napi,
drivers/infiniband/hw/hfi1/chip.c
8457
struct hfi1_ctxtdata *rcd = rxq->rcd;
drivers/infiniband/hw/hfi1/driver.c
1640
struct hfi1_netdev_rxq *rxq = container_of(napi,
drivers/infiniband/hw/hfi1/driver.c
1688
skb = hfi1_ipoib_prepare_skb(rxq, tlen, packet->ebuf);
drivers/infiniband/hw/hfi1/ipoib.h
161
struct sk_buff *hfi1_ipoib_prepare_skb(struct hfi1_netdev_rxq *rxq,
drivers/infiniband/hw/hfi1/ipoib_rx.c
46
struct sk_buff *hfi1_ipoib_prepare_skb(struct hfi1_netdev_rxq *rxq,
drivers/infiniband/hw/hfi1/ipoib_rx.c
49
struct napi_struct *napi = &rxq->napi;
drivers/infiniband/hw/hfi1/netdev.h
54
struct hfi1_netdev_rxq *rxq;
drivers/infiniband/hw/hfi1/netdev.h
73
return dd->netdev_rx->rxq[ctxt].rcd;
drivers/infiniband/hw/hfi1/netdev_rx.c
194
rx->rxq = kcalloc_node(rx->num_rx_q, sizeof(*rx->rxq),
drivers/infiniband/hw/hfi1/netdev_rx.c
197
if (!rx->rxq) {
drivers/infiniband/hw/hfi1/netdev_rx.c
203
struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
drivers/infiniband/hw/hfi1/netdev_rx.c
205
rc = hfi1_netdev_allot_ctxt(rx, &rxq->rcd);
drivers/infiniband/hw/hfi1/netdev_rx.c
209
hfi1_rcd_get(rxq->rcd);
drivers/infiniband/hw/hfi1/netdev_rx.c
210
rxq->rx = rx;
drivers/infiniband/hw/hfi1/netdev_rx.c
211
rxq->rcd->napi = &rxq->napi;
drivers/infiniband/hw/hfi1/netdev_rx.c
213
i, rxq->rcd->ctxt);
drivers/infiniband/hw/hfi1/netdev_rx.c
218
set_bit(NAPI_STATE_NO_BUSY_POLL, &rxq->napi.state);
drivers/infiniband/hw/hfi1/netdev_rx.c
219
netif_napi_add(dev, &rxq->napi, hfi1_netdev_rx_napi);
drivers/infiniband/hw/hfi1/netdev_rx.c
220
rc = msix_netdev_request_rcd_irq(rxq->rcd);
drivers/infiniband/hw/hfi1/netdev_rx.c
230
struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
drivers/infiniband/hw/hfi1/netdev_rx.c
232
if (rxq->rcd) {
drivers/infiniband/hw/hfi1/netdev_rx.c
233
hfi1_netdev_deallocate_ctxt(dd, rxq->rcd);
drivers/infiniband/hw/hfi1/netdev_rx.c
234
hfi1_rcd_put(rxq->rcd);
drivers/infiniband/hw/hfi1/netdev_rx.c
235
rxq->rcd = NULL;
drivers/infiniband/hw/hfi1/netdev_rx.c
238
kfree(rx->rxq);
drivers/infiniband/hw/hfi1/netdev_rx.c
239
rx->rxq = NULL;
drivers/infiniband/hw/hfi1/netdev_rx.c
250
struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
drivers/infiniband/hw/hfi1/netdev_rx.c
252
netif_napi_del(&rxq->napi);
drivers/infiniband/hw/hfi1/netdev_rx.c
253
hfi1_netdev_deallocate_ctxt(dd, rxq->rcd);
drivers/infiniband/hw/hfi1/netdev_rx.c
254
hfi1_rcd_put(rxq->rcd);
drivers/infiniband/hw/hfi1/netdev_rx.c
255
rxq->rcd = NULL;
drivers/infiniband/hw/hfi1/netdev_rx.c
258
kfree(rx->rxq);
drivers/infiniband/hw/hfi1/netdev_rx.c
259
rx->rxq = NULL;
drivers/infiniband/hw/hfi1/netdev_rx.c
268
struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
drivers/infiniband/hw/hfi1/netdev_rx.c
271
rxq->rcd->ctxt);
drivers/infiniband/hw/hfi1/netdev_rx.c
272
napi_enable(&rxq->napi);
drivers/infiniband/hw/hfi1/netdev_rx.c
275
rxq->rcd);
drivers/infiniband/hw/hfi1/netdev_rx.c
286
struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
drivers/infiniband/hw/hfi1/netdev_rx.c
289
rxq->rcd->ctxt);
drivers/infiniband/hw/hfi1/netdev_rx.c
294
rxq->rcd);
drivers/infiniband/hw/hfi1/netdev_rx.c
295
napi_synchronize(&rxq->napi);
drivers/infiniband/hw/hfi1/netdev_rx.c
296
napi_disable(&rxq->napi);
drivers/infiniband/hw/hfi1/vnic.h
92
struct hfi1_vnic_rx_queue rxq[HFI1_NUM_VNIC_CTXT];
drivers/infiniband/hw/hfi1/vnic_main.c
292
static inline int hfi1_vnic_decap_skb(struct hfi1_vnic_rx_queue *rxq,
drivers/infiniband/hw/hfi1/vnic_main.c
295
struct hfi1_vnic_vport_info *vinfo = rxq->vinfo;
drivers/infiniband/hw/hfi1/vnic_main.c
303
vinfo->stats[rxq->idx].rx_oversize++;
drivers/infiniband/hw/hfi1/vnic_main.c
305
vinfo->stats[rxq->idx].rx_runt++;
drivers/infiniband/hw/hfi1/vnic_main.c
336
struct hfi1_vnic_rx_queue *rxq;
drivers/infiniband/hw/hfi1/vnic_main.c
370
rxq = &vinfo->rxq[q_idx];
drivers/infiniband/hw/hfi1/vnic_main.c
389
rc = hfi1_vnic_decap_skb(rxq, skb);
drivers/infiniband/hw/hfi1/vnic_main.c
392
hfi1_vnic_update_rx_counters(vinfo, rxq->idx, skb, rc);
drivers/infiniband/hw/hfi1/vnic_main.c
399
skb->protocol = eth_type_trans(skb, rxq->netdev);
drivers/infiniband/hw/hfi1/vnic_main.c
401
napi_gro_receive(&rxq->napi, skb);
drivers/infiniband/hw/hfi1/vnic_main.c
599
struct hfi1_vnic_rx_queue *rxq = &vinfo->rxq[i];
drivers/infiniband/hw/hfi1/vnic_main.c
601
rxq->idx = i;
drivers/infiniband/hw/hfi1/vnic_main.c
602
rxq->vinfo = vinfo;
drivers/infiniband/hw/hfi1/vnic_main.c
603
rxq->netdev = netdev;
drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c
288
struct sk_buff *skb, struct sk_buff_head *rxq,
drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c
297
spin_lock_irqsave(&rxq->lock, flags);
drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c
298
skb_queue_splice_tail_init(rxq, &received);
drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c
299
spin_unlock_irqrestore(&rxq->lock, flags);
drivers/net/dsa/mv88e6xxx/hwtstamp.c
251
struct sk_buff_head *rxq)
drivers/net/dsa/mv88e6xxx/hwtstamp.c
262
spin_lock_irqsave(&rxq->lock, flags);
drivers/net/dsa/mv88e6xxx/hwtstamp.c
263
skb_queue_splice_tail_init(rxq, &received);
drivers/net/dsa/mv88e6xxx/hwtstamp.c
264
spin_unlock_irqrestore(&rxq->lock, flags);
drivers/net/ethernet/alacritech/slic.h
543
struct slic_rx_queue rxq;
drivers/net/ethernet/alacritech/slicoss.c
120
static unsigned int slic_get_free_rx_descs(struct slic_rx_queue *rxq)
drivers/net/ethernet/alacritech/slicoss.c
122
return slic_get_free_queue_descs(rxq->put_idx, rxq->done_idx, rxq->len);
drivers/net/ethernet/alacritech/slicoss.c
396
struct slic_rx_queue *rxq = &sdev->rxq;
drivers/net/ethernet/alacritech/slicoss.c
405
while (slic_get_free_rx_descs(rxq) > SLIC_MAX_REQ_RX_DESCS) {
drivers/net/ethernet/alacritech/slicoss.c
437
buff = &rxq->rxbuffs[rxq->put_idx];
drivers/net/ethernet/alacritech/slicoss.c
446
rxq->put_idx = slic_next_queue_idx(rxq->put_idx, rxq->len);
drivers/net/ethernet/alacritech/slicoss.c
549
struct slic_rx_queue *rxq = &sdev->rxq;
drivers/net/ethernet/alacritech/slicoss.c
559
while (todo && (rxq->done_idx != rxq->put_idx)) {
drivers/net/ethernet/alacritech/slicoss.c
560
buff = &rxq->rxbuffs[rxq->done_idx];
drivers/net/ethernet/alacritech/slicoss.c
613
rxq->done_idx = slic_next_queue_idx(rxq->done_idx, rxq->len);
drivers/net/ethernet/alacritech/slicoss.c
918
struct slic_rx_queue *rxq = &sdev->rxq;
drivers/net/ethernet/alacritech/slicoss.c
921
rxq->len = SLIC_NUM_RX_LES;
drivers/net/ethernet/alacritech/slicoss.c
922
rxq->done_idx = 0;
drivers/net/ethernet/alacritech/slicoss.c
923
rxq->put_idx = 0;
drivers/net/ethernet/alacritech/slicoss.c
925
buff = kzalloc_objs(*buff, rxq->len);
drivers/net/ethernet/alacritech/slicoss.c
929
rxq->rxbuffs = buff;
drivers/net/ethernet/alacritech/slicoss.c
937
struct slic_rx_queue *rxq = &sdev->rxq;
drivers/net/ethernet/alacritech/slicoss.c
942
for (i = 0; i < rxq->len; i++) {
drivers/net/ethernet/alacritech/slicoss.c
943
buff = &rxq->rxbuffs[i];
drivers/net/ethernet/alacritech/slicoss.c
954
kfree(rxq->rxbuffs);
drivers/net/ethernet/atheros/alx/alx.h
96
struct alx_rx_queue *rxq;
drivers/net/ethernet/atheros/alx/hw.c
378
u32 rxq, txq, val;
drivers/net/ethernet/atheros/alx/hw.c
381
rxq = alx_read_mem32(hw, ALX_RXQ0);
drivers/net/ethernet/atheros/alx/hw.c
382
alx_write_mem32(hw, ALX_RXQ0, rxq & ~ALX_RXQ0_EN);
drivers/net/ethernet/atheros/alx/hw.c
609
u32 mac, txq, rxq;
drivers/net/ethernet/atheros/alx/hw.c
611
rxq = alx_read_mem32(hw, ALX_RXQ0);
drivers/net/ethernet/atheros/alx/hw.c
612
alx_write_mem32(hw, ALX_RXQ0, rxq | ALX_RXQ0_EN);
drivers/net/ethernet/atheros/alx/main.c
127
cur_buf = &rxq->bufs[cur];
drivers/net/ethernet/atheros/alx/main.c
134
rxq->write_idx = cur;
drivers/net/ethernet/atheros/alx/main.c
216
static int alx_clean_rx_irq(struct alx_rx_queue *rxq, int budget)
drivers/net/ethernet/atheros/alx/main.c
225
alx = netdev_priv(rxq->netdev);
drivers/net/ethernet/atheros/alx/main.c
228
rrd = &rxq->rrd[rxq->rrd_read_idx];
drivers/net/ethernet/atheros/alx/main.c
234
RRD_SI) != rxq->read_idx ||
drivers/net/ethernet/atheros/alx/main.c
241
rxb = &rxq->bufs[rxq->read_idx];
drivers/net/ethernet/atheros/alx/main.c
242
dma_unmap_single(rxq->dev,
drivers/net/ethernet/atheros/alx/main.c
260
skb->protocol = eth_type_trans(skb, rxq->netdev);
drivers/net/ethernet/atheros/alx/main.c
277
napi_gro_receive(&rxq->np->napi, skb);
drivers/net/ethernet/atheros/alx/main.c
281
if (++rxq->read_idx == rxq->count)
drivers/net/ethernet/atheros/alx/main.c
282
rxq->read_idx = 0;
drivers/net/ethernet/atheros/alx/main.c
283
if (++rxq->rrd_read_idx == rxq->count)
drivers/net/ethernet/atheros/alx/main.c
284
rxq->rrd_read_idx = 0;
drivers/net/ethernet/atheros/alx/main.c
307
if (np->rxq)
drivers/net/ethernet/atheros/alx/main.c
308
work = alx_clean_rx_irq(np->rxq, budget);
drivers/net/ethernet/atheros/alx/main.c
467
if (np->rxq) {
drivers/net/ethernet/atheros/alx/main.c
468
np->rxq->read_idx = 0;
drivers/net/ethernet/atheros/alx/main.c
469
np->rxq->write_idx = 0;
drivers/net/ethernet/atheros/alx/main.c
470
np->rxq->rrd_read_idx = 0;
drivers/net/ethernet/atheros/alx/main.c
471
alx_write_mem32(hw, ALX_RRD_ADDR_LO, np->rxq->rrd_dma);
drivers/net/ethernet/atheros/alx/main.c
472
alx_write_mem32(hw, ALX_RFD_ADDR_LO, np->rxq->rfd_dma);
drivers/net/ethernet/atheros/alx/main.c
506
static void alx_free_rxring_buf(struct alx_rx_queue *rxq)
drivers/net/ethernet/atheros/alx/main.c
511
if (!rxq->bufs)
drivers/net/ethernet/atheros/alx/main.c
514
for (i = 0; i < rxq->count; i++) {
drivers/net/ethernet/atheros/alx/main.c
515
cur_buf = rxq->bufs + i;
drivers/net/ethernet/atheros/alx/main.c
517
dma_unmap_single(rxq->dev,
drivers/net/ethernet/atheros/alx/main.c
528
rxq->write_idx = 0;
drivers/net/ethernet/atheros/alx/main.c
529
rxq->read_idx = 0;
drivers/net/ethernet/atheros/alx/main.c
530
rxq->rrd_read_idx = 0;
drivers/net/ethernet/atheros/alx/main.c
541
if (alx->qnapi[0] && alx->qnapi[0]->rxq)
drivers/net/ethernet/atheros/alx/main.c
542
alx_free_rxring_buf(alx->qnapi[0]->rxq);
drivers/net/ethernet/atheros/alx/main.c
630
static int alx_alloc_rx_ring(struct alx_priv *alx, struct alx_rx_queue *rxq,
drivers/net/ethernet/atheros/alx/main.c
633
rxq->bufs = kzalloc_objs(struct alx_buffer, rxq->count);
drivers/net/ethernet/atheros/alx/main.c
634
if (!rxq->bufs)
drivers/net/ethernet/atheros/alx/main.c
637
rxq->rrd = alx->descmem.virt + offset;
drivers/net/ethernet/atheros/alx/main.c
638
rxq->rrd_dma = alx->descmem.dma + offset;
drivers/net/ethernet/atheros/alx/main.c
639
offset += sizeof(struct alx_rrd) * rxq->count;
drivers/net/ethernet/atheros/alx/main.c
641
rxq->rfd = alx->descmem.virt + offset;
drivers/net/ethernet/atheros/alx/main.c
642
rxq->rfd_dma = alx->descmem.dma + offset;
drivers/net/ethernet/atheros/alx/main.c
643
offset += sizeof(struct alx_rfd) * rxq->count;
drivers/net/ethernet/atheros/alx/main.c
680
offset = alx_alloc_rx_ring(alx, alx->qnapi[0]->rxq, offset);
drivers/net/ethernet/atheros/alx/main.c
699
if (alx->qnapi[0] && alx->qnapi[0]->rxq)
drivers/net/ethernet/atheros/alx/main.c
700
kfree(alx->qnapi[0]->rxq->bufs);
drivers/net/ethernet/atheros/alx/main.c
721
kfree(np->rxq);
drivers/net/ethernet/atheros/alx/main.c
73
struct alx_rx_queue *rxq = alx->qnapi[0]->rxq;
drivers/net/ethernet/atheros/alx/main.c
741
struct alx_rx_queue *rxq;
drivers/net/ethernet/atheros/alx/main.c
778
rxq = kzalloc_obj(*rxq);
drivers/net/ethernet/atheros/alx/main.c
779
if (!rxq)
drivers/net/ethernet/atheros/alx/main.c
782
np->rxq = rxq;
drivers/net/ethernet/atheros/alx/main.c
783
rxq->np = alx->qnapi[0];
drivers/net/ethernet/atheros/alx/main.c
784
rxq->queue_idx = 0;
drivers/net/ethernet/atheros/alx/main.c
785
rxq->count = alx->rx_ringsz;
drivers/net/ethernet/atheros/alx/main.c
786
rxq->netdev = alx->dev;
drivers/net/ethernet/atheros/alx/main.c
787
rxq->dev = &alx->hw.pdev->dev;
drivers/net/ethernet/atheros/alx/main.c
79
next = cur = rxq->write_idx;
drivers/net/ethernet/atheros/alx/main.c
82
cur_buf = &rxq->bufs[cur];
drivers/net/ethernet/atheros/alx/main.c
84
while (!cur_buf->skb && next != rxq->read_idx) {
drivers/net/ethernet/atheros/alx/main.c
85
struct alx_rfd *rfd = &rxq->rfd[cur];
drivers/net/ethernet/atheros/alx/main.c
867
if (np->txq && np->rxq)
drivers/net/ethernet/atheros/alx/main.c
873
else if (np->rxq)
drivers/net/ethernet/atheros/alx/main.c
875
np->rxq->queue_idx);
drivers/net/ethernet/atheros/atl1c/atl1c_main.c
1228
u32 mac, txq, rxq;
drivers/net/ethernet/atheros/atl1c/atl1c_main.c
1235
AT_READ_REG(hw, REG_RXQ_CTRL, &rxq);
drivers/net/ethernet/atheros/atl1c/atl1c_main.c
1239
rxq |= RXQ_CTRL_EN;
drivers/net/ethernet/atheros/atl1c/atl1c_main.c
1253
AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq);
drivers/net/ethernet/broadcom/bcmsysport.c
2447
u32 txq, rxq;
drivers/net/ethernet/broadcom/bcmsysport.c
2469
if (of_property_read_u32(dn, "systemport,num-rxq", &rxq))
drivers/net/ethernet/broadcom/bcmsysport.c
2470
rxq = 1;
drivers/net/ethernet/broadcom/bcmsysport.c
2476
dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq);
drivers/net/ethernet/broadcom/bcmsysport.c
2609
priv->irq0, priv->irq1, txq, rxq);
drivers/net/ethernet/broadcom/bnge/bnge_netdev.h
531
u16 rxq;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
1914
struct bnx2x_vf_queue *rxq = vfq_get(vf, j);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
1920
if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) ==
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
1926
cur_query_entry->index = vfq_stat_id(vf, rxq);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
1597
q->sb_idx = setup_q->rxq.vf_sb;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
1600
init_p->rx.hc_rate = setup_q->rxq.hc_rate;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
1601
init_p->rx.sb_cq_index = setup_q->rxq.sb_index;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
1602
bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags,
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
1606
bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags,
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
1610
setup_p->gen_params.mtu = setup_q->rxq.mtu;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
1613
rxq_params->drop_flags = setup_q->rxq.drop_flags;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
1614
rxq_params->dscr_map = setup_q->rxq.rxq_addr;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
1615
rxq_params->sge_map = setup_q->rxq.sge_addr;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
1616
rxq_params->rcq_map = setup_q->rxq.rcq_addr;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
1617
rxq_params->rcq_np_map = setup_q->rxq.rcq_np_addr;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
1618
rxq_params->buf_sz = setup_q->rxq.buf_sz;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
1619
rxq_params->tpa_agg_sz = setup_q->rxq.tpa_agg_sz;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
1620
rxq_params->max_sges_pkt = setup_q->rxq.max_sge_pkt;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
1621
rxq_params->sge_buf_sz = setup_q->rxq.sge_buf_sz;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
1623
setup_q->rxq.cache_line_log;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
1624
rxq_params->sb_cq_index = setup_q->rxq.sb_index;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
636
req->rxq.rcq_addr = fp->rx_comp_mapping;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
637
req->rxq.rcq_np_addr = fp->rx_comp_mapping + BCM_PAGE_SIZE;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
638
req->rxq.rxq_addr = fp->rx_desc_mapping;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
639
req->rxq.sge_addr = fp->rx_sge_mapping;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
640
req->rxq.vf_sb = fp_idx;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
641
req->rxq.sb_index = HC_INDEX_ETH_RX_CQ_CONS;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
642
req->rxq.hc_rate = bp->rx_ticks ? 1000000/bp->rx_ticks : 0;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
643
req->rxq.mtu = bp->dev->mtu;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
644
req->rxq.buf_sz = fp->rx_buf_size;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
645
req->rxq.sge_buf_sz = BCM_PAGE_SIZE * PAGES_PER_SGE;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
646
req->rxq.tpa_agg_sz = tpa_agg_size;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
647
req->rxq.max_sge_pkt = SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
648
req->rxq.max_sge_pkt = ((req->rxq.max_sge_pkt + PAGES_PER_SGE - 1) &
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
650
req->rxq.flags = flags;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
651
req->rxq.drop_flags = 0;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
652
req->rxq.cache_line_log = BNX2X_RX_ALIGN_SHIFT;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
653
req->rxq.stat_id = -1; /* No stats at the moment */
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
273
} rxq;
drivers/net/ethernet/broadcom/bnxt/bnxt.c
15709
new_fltr->base.rxq = rxq_index;
drivers/net/ethernet/broadcom/bnxt/bnxt.c
15757
if (rps_may_expire_flow(bp->dev, fltr->base.rxq,
drivers/net/ethernet/broadcom/bnxt/bnxt.c
6279
u16 rxq = fltr->base.rxq;
drivers/net/ethernet/broadcom/bnxt/bnxt.c
6304
req->rfs_ring_tbl_idx = cpu_to_le16(rxq);
drivers/net/ethernet/broadcom/bnxt/bnxt.c
6310
req->dst_id = cpu_to_le16(rxq);
drivers/net/ethernet/broadcom/bnxt/bnxt.c
6338
vnic = &bp->vnic_info[fltr->base.rxq + 1];
drivers/net/ethernet/broadcom/bnxt/bnxt.h
1436
u16 rxq;
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
1133
fs->ring_cookie = fltr_base->rxq;
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
1221
fs->ring_cookie = fltr->base.rxq;
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
1302
fltr->base.rxq = ring;
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
1482
new_fltr->base.rxq = ethtool_get_flow_spec_ring(fs->ring_cookie);
drivers/net/ethernet/brocade/bna/bna.h
184
(q0) = rxp->rxq.single.only; \
drivers/net/ethernet/brocade/bna/bna.h
188
(q0) = rxp->rxq.slr.large; \
drivers/net/ethernet/brocade/bna/bna.h
189
(q1) = rxp->rxq.slr.small; \
drivers/net/ethernet/brocade/bna/bna.h
192
(q0) = rxp->rxq.hds.data; \
drivers/net/ethernet/brocade/bna/bna.h
193
(q1) = rxp->rxq.hds.hdr; \
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
1764
struct bna_rxq *rxq = NULL;
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
1766
rxq = list_first_entry(&rx_mod->rxq_free_q, struct bna_rxq, qe);
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
1767
list_del(&rxq->qe);
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
1770
return rxq;
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
1774
bna_rxq_put(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
1776
list_add_tail(&rxq->qe, &rx_mod->rxq_free_q);
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
1836
rxp->rxq.single.only = q0;
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
1837
rxp->rxq.single.reserved = NULL;
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
1840
rxp->rxq.slr.large = q0;
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
1841
rxp->rxq.slr.small = q1;
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
1844
rxp->rxq.hds.data = q0;
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
1845
rxp->rxq.hds.hdr = q1;
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
1853
bna_rxq_qpt_setup(struct bna_rxq *rxq,
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
1866
rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
1867
rxq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
1868
rxq->qpt.kv_qpt_ptr = qpt_mem->kva;
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
1869
rxq->qpt.page_count = page_count;
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
1870
rxq->qpt.page_size = page_size;
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
1872
rxq->rcb->sw_qpt = (void **) swqpt_mem->kva;
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
1873
rxq->rcb->sw_q = page_mem->kva;
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
1878
for (i = 0; i < rxq->qpt.page_count; i++) {
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
1879
rxq->rcb->sw_qpt[i] = kva;
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
1883
((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb =
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
1885
((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb =
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
2038
rx_mod->rxq = (struct bna_rxq *)
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
2073
rxq_ptr = &rx_mod->rxq[index];
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
2390
q0->rcb->rxq = q0;
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
2416
q1->rcb->rxq = q1;
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
2512
rxp->rxq.slr.large = NULL;
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
2513
rxp->rxq.slr.small = NULL;
drivers/net/ethernet/brocade/bna/bna_types.h
562
struct bna_rxq *rxq;
drivers/net/ethernet/brocade/bna/bna_types.h
699
union bna_rxq_u rxq;
drivers/net/ethernet/brocade/bna/bna_types.h
847
struct bna_rxq *rxq; /* BFI_MAX_RXQ entries */
drivers/net/ethernet/brocade/bna/bnad.c
2393
rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
drivers/net/ethernet/brocade/bna/bnad.c
2395
rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
drivers/net/ethernet/brocade/bna/bnad.c
2398
rcb[1]->rxq) {
drivers/net/ethernet/brocade/bna/bnad.c
2401
ccb->rcb[1]->rxq->rx_packets;
drivers/net/ethernet/brocade/bna/bnad.c
2404
ccb->rcb[1]->rxq->rx_bytes;
drivers/net/ethernet/brocade/bna/bnad.c
267
order = get_order(rcb->rxq->buffer_size);
drivers/net/ethernet/brocade/bna/bnad.c
273
unmap_q->map_size = rcb->rxq->buffer_size;
drivers/net/ethernet/brocade/bna/bnad.c
275
if (rcb->rxq->multi_buffer) {
drivers/net/ethernet/brocade/bna/bnad.c
277
unmap_q->map_size = rcb->rxq->buffer_size;
drivers/net/ethernet/brocade/bna/bnad.c
282
(rcb->rxq->buffer_size > 2048) ?
drivers/net/ethernet/brocade/bna/bnad.c
372
rcb->rxq->rxbuf_alloc_failed++;
drivers/net/ethernet/brocade/bna/bnad.c
381
rcb->rxq->rxbuf_map_failed++;
drivers/net/ethernet/brocade/bna/bnad.c
423
buff_sz = rcb->rxq->buffer_size;
drivers/net/ethernet/brocade/bna/bnad.c
435
rcb->rxq->rxbuf_alloc_failed++;
drivers/net/ethernet/brocade/bna/bnad.c
444
rcb->rxq->rxbuf_map_failed++;
drivers/net/ethernet/brocade/bna/bnad.c
685
rcb->rxq->rx_packets_with_error++;
drivers/net/ethernet/brocade/bna/bnad.c
695
rcb->rxq->rx_packets++;
drivers/net/ethernet/brocade/bna/bnad.c
696
rcb->rxq->rx_bytes += totlen;
drivers/net/ethernet/brocade/bna/bnad_ethtool.c
644
bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1]->rxq)
drivers/net/ethernet/brocade/bna/bnad_ethtool.c
691
bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1]->rxq)
drivers/net/ethernet/brocade/bna/bnad_ethtool.c
716
bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0]->rxq) {
drivers/net/ethernet/brocade/bna/bnad_ethtool.c
742
rcb[0]->rxq) {
drivers/net/ethernet/brocade/bna/bnad_ethtool.c
745
buf[bi++] = rcb->rxq->rx_packets;
drivers/net/ethernet/brocade/bna/bnad_ethtool.c
746
buf[bi++] = rcb->rxq->rx_bytes;
drivers/net/ethernet/brocade/bna/bnad_ethtool.c
747
buf[bi++] = rcb->rxq->
drivers/net/ethernet/brocade/bna/bnad_ethtool.c
749
buf[bi++] = rcb->rxq->
drivers/net/ethernet/brocade/bna/bnad_ethtool.c
751
buf[bi++] = rcb->rxq->rxbuf_map_failed;
drivers/net/ethernet/brocade/bna/bnad_ethtool.c
757
rcb[1]->rxq) {
drivers/net/ethernet/brocade/bna/bnad_ethtool.c
760
buf[bi++] = rcb->rxq->rx_packets;
drivers/net/ethernet/brocade/bna/bnad_ethtool.c
761
buf[bi++] = rcb->rxq->rx_bytes;
drivers/net/ethernet/brocade/bna/bnad_ethtool.c
762
buf[bi++] = rcb->rxq->
drivers/net/ethernet/brocade/bna/bnad_ethtool.c
764
buf[bi++] = rcb->rxq->
drivers/net/ethernet/brocade/bna/bnad_ethtool.c
766
buf[bi++] = rcb->rxq->rxbuf_map_failed;
drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
1075
lio->rxq = lio->linfo.rxpciq[0].s.q_no;
drivers/net/ethernet/cavium/liquidio/lio_main.c
3635
lio->rxq = lio->linfo.rxpciq[0].s.q_no;
drivers/net/ethernet/cavium/liquidio/lio_main.c
3646
lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
2154
lio->rxq = lio->linfo.rxpciq[0].s.q_no;
drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
2157
lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
drivers/net/ethernet/cavium/liquidio/octeon_network.h
110
int rxq;
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
3413
struct sge_rspq *rxq = (struct sge_rspq *)q; \
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
3414
QDESC_GET(rxq, rxq->desc, type, label); \
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h
253
static inline void cudbg_fill_qdesc_rxq(const struct sge_rspq *rxq,
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h
258
entry->qid = rxq->cntxt_id;
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h
259
entry->desc_size = rxq->iqe_len;
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h
260
entry->num_desc = rxq->size;
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h
261
entry->data_size = rxq->size * rxq->iqe_len;
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h
262
memcpy(entry->data, rxq->desc, entry->data_size);
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
1987
unsigned int rxqi, unsigned int rxq, unsigned int tc,
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
862
const struct sge_eth_rxq *rxq;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
866
rxq = &adapter->sge.ethrxq[pi->first_qset];
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
873
rss[i] = rxq[*queues].rspq.abs_id;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
100
rxq->stats.imm++;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
102
rxq->stats.an++;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
104
rxq->stats.pkts++;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
78
struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
95
rxq->stats.nomem++;
drivers/net/ethernet/chelsio/cxgb4/sge.c
2533
struct sge_ofld_rxq *rxq;
drivers/net/ethernet/chelsio/cxgb4/sge.c
2574
rxq = &adap->sge.eohw_rxq[eosw_txq->hwqid];
drivers/net/ethernet/chelsio/cxgb4/sge.c
2587
flowc->mnemval[3].val = cpu_to_be32(rxq->rspq.abs_id);
drivers/net/ethernet/chelsio/cxgb4/sge.c
3430
static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
drivers/net/ethernet/chelsio/cxgb4/sge.c
3433
struct adapter *adapter = rxq->rspq.adap;
drivers/net/ethernet/chelsio/cxgb4/sge.c
3439
skb = napi_get_frags(&rxq->rspq.napi);
drivers/net/ethernet/chelsio/cxgb4/sge.c
3442
rxq->stats.rx_drops++;
drivers/net/ethernet/chelsio/cxgb4/sge.c
3453
skb_record_rx_queue(skb, rxq->rspq.idx);
drivers/net/ethernet/chelsio/cxgb4/sge.c
3458
if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
drivers/net/ethernet/chelsio/cxgb4/sge.c
3464
rxq->stats.vlan_ex++;
drivers/net/ethernet/chelsio/cxgb4/sge.c
3466
ret = napi_gro_frags(&rxq->rspq.napi);
drivers/net/ethernet/chelsio/cxgb4/sge.c
3468
rxq->stats.lro_pkts++;
drivers/net/ethernet/chelsio/cxgb4/sge.c
3470
rxq->stats.lro_merged++;
drivers/net/ethernet/chelsio/cxgb4/sge.c
3471
rxq->stats.pkts++;
drivers/net/ethernet/chelsio/cxgb4/sge.c
3472
rxq->stats.rx_cso++;
drivers/net/ethernet/chelsio/cxgb4/sge.c
3525
struct sge_eth_rxq *rxq, struct sk_buff *skb)
drivers/net/ethernet/chelsio/cxgb4/sge.c
3534
rxq->stats.rx_drops++;
drivers/net/ethernet/chelsio/cxgb4/sge.c
3668
struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
drivers/net/ethernet/chelsio/cxgb4/sge.c
3703
rxq->stats.bad_rx_pkts++;
drivers/net/ethernet/chelsio/cxgb4/sge.c
3714
do_gro(rxq, si, pkt, tnl_hdr_len);
drivers/net/ethernet/chelsio/cxgb4/sge.c
3721
rxq->stats.rx_drops++;
drivers/net/ethernet/chelsio/cxgb4/sge.c
3727
ret = t4_rx_hststamp(adapter, rsp, rxq, skb);
drivers/net/ethernet/chelsio/cxgb4/sge.c
3748
rxq->stats.pkts++;
drivers/net/ethernet/chelsio/cxgb4/sge.c
3756
rxq->stats.rx_cso++;
drivers/net/ethernet/chelsio/cxgb4/sge.c
3767
rxq->stats.rx_cso++;
drivers/net/ethernet/chelsio/cxgb4/sge.c
3794
rxq->stats.vlan_ex++;
drivers/net/ethernet/chelsio/cxgb4/sge.c
3881
struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
drivers/net/ethernet/chelsio/cxgb4/sge.c
3903
free_rx_bufs(q->adap, &rxq->fl, 1);
drivers/net/ethernet/chelsio/cxgb4/sge.c
3912
rsd = &rxq->fl.sdesc[rxq->fl.cidx];
drivers/net/ethernet/chelsio/cxgb4/sge.c
3920
unmap_rx_buf(q->adap, &rxq->fl);
drivers/net/ethernet/chelsio/cxgb4/sge.c
3942
restore_rx_bufs(&si, &rxq->fl, frags);
drivers/net/ethernet/chelsio/cxgb4/sge.c
3959
if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 16)
drivers/net/ethernet/chelsio/cxgb4/sge.c
3960
__refill_fl(q->adap, &rxq->fl);
drivers/net/ethernet/chelsio/cxgb4/sge.c
4242
struct sge_eth_rxq *rxq;
drivers/net/ethernet/chelsio/cxgb4/sge.c
4250
rxq = container_of(fl, struct sge_eth_rxq, fl);
drivers/net/ethernet/chelsio/cxgb4/sge.c
4251
if (napi_schedule(&rxq->rspq.napi))
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
7576
unsigned int rxqi, unsigned int rxq, unsigned int tc,
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
7588
FW_PFVF_CMD_NIQ_V(rxq));
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
1236
struct sge_eth_rxq *rxq;
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
1239
rxq = &adapter->sge.ethrxq[pi->first_qset];
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
1241
t4vf_sge_intr_msix(0, &rxq->rspq);
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
1242
rxq++;
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
1810
const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset];
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
1814
for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
1817
stats->rx_csum += rxq->stats.rx_cso;
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
1818
stats->vlan_ex += rxq->stats.vlan_ex;
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
1820
stats->lro_pkts += rxq->stats.lro_pkts;
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
1821
stats->lro_merged += rxq->stats.lro_merged;
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
2059
#define R(s, v) S3("u", s, rxq[qs].v)
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
2062
const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL];
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
2068
(rxq[qs].rspq.netdev
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
2069
? rxq[qs].rspq.netdev->name
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
2072
(rxq[qs].rspq.netdev
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
2074
netdev_priv(rxq[qs].rspq.netdev))->port_id
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
2084
S3("u", "Intr delay:", qtimer_val(adapter, &rxq[qs].rspq));
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
2086
adapter->sge.counter_val[rxq[qs].rspq.pktcnt_idx]);
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
2198
#define R3(fmt, s, v) S3(fmt, s, rxq[qs].v)
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
2202
const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL];
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
2208
(rxq[qs].rspq.netdev
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
2209
? rxq[qs].rspq.netdev->name
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
2729
struct sge_eth_rxq *rxq = &s->ethrxq[qs];
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
2732
init_rspq(&rxq->rspq, 0, 0, 1024, iqe_size);
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
2733
rxq->fl.size = 72;
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
383
int rxq, msi, err;
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
397
for_each_ethrxq(s, rxq) {
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
401
&s->ethrxq[rxq].rspq);
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
409
while (--rxq >= 0)
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
410
free_irq(adapter->msix_info[--msi].vec, &s->ethrxq[rxq].rspq);
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
421
int rxq, msi;
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
425
for_each_ethrxq(s, rxq)
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
427
&s->ethrxq[rxq].rspq);
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
452
int rxq;
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
455
for_each_ethrxq(s, rxq)
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
456
qenable(&s->ethrxq[rxq].rspq);
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
477
int rxq;
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
479
for_each_ethrxq(s, rxq)
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
480
napi_disable(&s->ethrxq[rxq].rspq.napi);
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
632
struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset];
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
636
for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
637
err = t4vf_sge_alloc_rxq(adapter, &rxq->rspq, false,
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
639
&rxq->fl, t4vf_ethrx_handler);
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
649
rxq->rspq.idx = qs;
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
650
memset(&rxq->stats, 0, sizeof(rxq->stats));
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
663
struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset];
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
667
for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
668
IQ_MAP(s, rxq->rspq.abs_id) = &rxq->rspq;
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
680
rxq->fl.abs_id = rxq->fl.cntxt_id + s->egr_base;
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
681
EQ_MAP(s, rxq->fl.abs_id) = &rxq->fl;
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
705
struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset];
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
710
rss[qs] = rxq[qs].rspq.abs_id;
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
737
rxq[0].rspq.abs_id;
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
1564
static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
1567
struct adapter *adapter = rxq->rspq.adapter;
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
1573
skb = napi_get_frags(&rxq->rspq.napi);
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
1576
rxq->stats.rx_drops++;
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
1585
skb_record_rx_queue(skb, rxq->rspq.idx);
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
1591
rxq->stats.vlan_ex++;
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
1593
ret = napi_gro_frags(&rxq->rspq.napi);
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
1596
rxq->stats.lro_pkts++;
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
1598
rxq->stats.lro_merged++;
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
1599
rxq->stats.pkts++;
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
1600
rxq->stats.rx_cso++;
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
1618
struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
1630
do_gro(rxq, gl, pkt);
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
1640
rxq->stats.rx_drops++;
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
1647
rxq->stats.pkts++;
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
1653
rxq->stats.rx_cso++;
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
1658
rxq->stats.rx_cso++;
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
1664
rxq->stats.vlan_ex++;
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
1756
struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
1793
free_rx_bufs(rspq->adapter, &rxq->fl,
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
1806
BUG_ON(rxq->fl.avail == 0);
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
1807
sdesc = &rxq->fl.sdesc[rxq->fl.cidx];
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
1815
unmap_rx_buf(rspq->adapter, &rxq->fl);
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
1839
restore_rx_bufs(&gl, &rxq->fl, frag);
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
1869
fl_cap(&rxq->fl) - rxq->fl.avail >= 2*FL_PER_EQ_UNIT)
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
1870
__refill_fl(rspq->adapter, &rxq->fl);
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
2094
struct sge_eth_rxq *rxq;
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
2096
rxq = container_of(fl, struct sge_eth_rxq, fl);
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
2097
if (napi_schedule(&rxq->rspq.napi))
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
2564
struct sge_eth_rxq *rxq = s->ethrxq;
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
2570
for (qs = 0; qs < adapter->sge.ethqsets; qs++, rxq++, txq++) {
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
2571
if (rxq->rspq.desc)
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
2572
free_rspq_fl(adapter, &rxq->rspq, &rxq->fl);
drivers/net/ethernet/emulex/benet/be_cmds.c
1409
struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
drivers/net/ethernet/emulex/benet/be_cmds.c
1414
struct be_dma_mem *q_mem = &rxq->dma_mem;
drivers/net/ethernet/emulex/benet/be_cmds.c
1441
rxq->id = le16_to_cpu(resp->id);
drivers/net/ethernet/emulex/benet/be_cmds.c
1442
rxq->created = true;
drivers/net/ethernet/emulex/benet/be_cmds.h
2401
int be_cmd_rxq_create(struct be_adapter *adapter, struct be_queue_info *rxq,
drivers/net/ethernet/emulex/benet/be_main.c
2284
struct be_queue_info *rxq = &rxo->q;
drivers/net/ethernet/emulex/benet/be_main.c
2285
u32 frag_idx = rxq->tail;
drivers/net/ethernet/emulex/benet/be_main.c
2301
queue_tail_inc(rxq);
drivers/net/ethernet/emulex/benet/be_main.c
2302
atomic_dec(&rxq->used);
drivers/net/ethernet/emulex/benet/be_main.c
2597
struct be_queue_info *rxq = &rxo->q;
drivers/net/ethernet/emulex/benet/be_main.c
2604
page_info = &rxo->page_info_tbl[rxq->head];
drivers/net/ethernet/emulex/benet/be_main.c
2629
rxd = queue_head_node(rxq);
drivers/net/ethernet/emulex/benet/be_main.c
2645
queue_head_inc(rxq);
drivers/net/ethernet/emulex/benet/be_main.c
2646
page_info = &rxo->page_info_tbl[rxq->head];
drivers/net/ethernet/emulex/benet/be_main.c
2658
atomic_add(posted, &rxq->used);
drivers/net/ethernet/emulex/benet/be_main.c
2663
be_rxq_notify(adapter, rxq->id, notify);
drivers/net/ethernet/emulex/benet/be_main.c
2666
} else if (atomic_read(&rxq->used) == 0) {
drivers/net/ethernet/emulex/benet/be_main.c
2818
struct be_queue_info *rxq = &rxo->q;
drivers/net/ethernet/emulex/benet/be_main.c
2821
while (atomic_read(&rxq->used) > 0) {
drivers/net/ethernet/emulex/benet/be_main.c
2826
BUG_ON(atomic_read(&rxq->used));
drivers/net/ethernet/emulex/benet/be_main.c
2827
rxq->tail = 0;
drivers/net/ethernet/emulex/benet/be_main.c
2828
rxq->head = 0;
drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
56
xdp_buff->rxq = &ch->xdp_rxq;
drivers/net/ethernet/freescale/enetc/enetc.c
1877
xdp_init_buff(xdp_buff, ENETC_RXB_TRUESIZE, &rx_ring->xdp.rxq);
drivers/net/ethernet/freescale/enetc/enetc.c
3480
err = __xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0,
drivers/net/ethernet/freescale/enetc/enetc.c
3485
err = xdp_rxq_info_reg_mem_model(&bdr->xdp.rxq, MEM_TYPE_PAGE_SHARED,
drivers/net/ethernet/freescale/enetc/enetc.c
3488
xdp_rxq_info_unreg(&bdr->xdp.rxq);
drivers/net/ethernet/freescale/enetc/enetc.c
3532
xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq);
drivers/net/ethernet/freescale/enetc/enetc.c
3533
xdp_rxq_info_unreg(&rx_ring->xdp.rxq);
drivers/net/ethernet/freescale/enetc/enetc.h
122
struct xdp_rxq_info rxq;
drivers/net/ethernet/freescale/fec_main.c
1001
bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
drivers/net/ethernet/freescale/fec_main.c
1005
bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
drivers/net/ethernet/freescale/fec_main.c
1008
rxq->bd.cur = rxq->bd.base;
drivers/net/ethernet/freescale/fec_main.c
1079
struct fec_enet_priv_rx_q *rxq;
drivers/net/ethernet/freescale/fec_main.c
1083
rxq = fep->rx_queue[i];
drivers/net/ethernet/freescale/fec_main.c
1084
writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i));
drivers/net/ethernet/freescale/fec_main.c
1751
static int fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
drivers/net/ethernet/freescale/fec_main.c
1757
new_page = page_pool_dev_alloc_pages(rxq->page_pool);
drivers/net/ethernet/freescale/fec_main.c
1761
rxq->rx_buf[index].page = new_page;
drivers/net/ethernet/freescale/fec_main.c
1768
static int fec_enet_update_cbd_zc(struct fec_enet_priv_rx_q *rxq,
drivers/net/ethernet/freescale/fec_main.c
1774
new_xdp = xsk_buff_alloc(rxq->xsk_pool);
drivers/net/ethernet/freescale/fec_main.c
1778
rxq->rx_buf[index].xdp = new_xdp;
drivers/net/ethernet/freescale/fec_main.c
1837
struct fec_enet_priv_rx_q *rxq,
drivers/net/ethernet/freescale/fec_main.c
1848
page_pool_recycle_direct(rxq->page_pool, page);
drivers/net/ethernet/freescale/fec_main.c
1884
skb_record_rx_queue(skb, rxq->bd.qid);
drivers/net/ethernet/freescale/fec_main.c
1897
struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue];
drivers/net/ethernet/freescale/fec_main.c
1900
struct bufdesc *bdp = rxq->bd.cur;
drivers/net/ethernet/freescale/fec_main.c
1938
index = fec_enet_get_bd_index(bdp, &rxq->bd);
drivers/net/ethernet/freescale/fec_main.c
1939
page = rxq->rx_buf[index].page;
drivers/net/ethernet/freescale/fec_main.c
1941
if (fec_enet_update_cbd(rxq, bdp, index)) {
drivers/net/ethernet/freescale/fec_main.c
1961
skb = fec_build_skb(fep, rxq, bdp, page, pkt_len - sub_len);
drivers/net/ethernet/freescale/fec_main.c
1988
bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
drivers/net/ethernet/freescale/fec_main.c
1994
writel(0, rxq->bd.reg_desc_active);
drivers/net/ethernet/freescale/fec_main.c
1996
rxq->bd.cur = bdp;
drivers/net/ethernet/freescale/fec_main.c
2001
static void fec_xdp_drop(struct fec_enet_priv_rx_q *rxq,
drivers/net/ethernet/freescale/fec_main.c
2006
page_pool_put_page(rxq->page_pool, page, sync, true);
drivers/net/ethernet/freescale/fec_main.c
2022
struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue];
drivers/net/ethernet/freescale/fec_main.c
2024
struct bufdesc *bdp = rxq->bd.cur;
drivers/net/ethernet/freescale/fec_main.c
2049
xdp_init_buff(&xdp, PAGE_SIZE << fep->pagepool_order, &rxq->xdp_rxq);
drivers/net/ethernet/freescale/fec_main.c
2068
index = fec_enet_get_bd_index(bdp, &rxq->bd);
drivers/net/ethernet/freescale/fec_main.c
2069
page = rxq->rx_buf[index].page;
drivers/net/ethernet/freescale/fec_main.c
2072
if (fec_enet_update_cbd(rxq, bdp, index)) {
drivers/net/ethernet/freescale/fec_main.c
2096
rxq->stats[RX_XDP_PASS]++;
drivers/net/ethernet/freescale/fec_main.c
2101
skb = fec_build_skb(fep, rxq, bdp, page, pkt_len);
drivers/net/ethernet/freescale/fec_main.c
2109
rxq->stats[RX_XDP_REDIRECT]++;
drivers/net/ethernet/freescale/fec_main.c
2112
fec_xdp_drop(rxq, &xdp, sync);
drivers/net/ethernet/freescale/fec_main.c
2119
rxq->stats[RX_XDP_TX]++;
drivers/net/ethernet/freescale/fec_main.c
2122
rxq->stats[RX_XDP_TX_ERRORS]++;
drivers/net/ethernet/freescale/fec_main.c
2123
fec_xdp_drop(rxq, &xdp, sync);
drivers/net/ethernet/freescale/fec_main.c
2137
rxq->stats[RX_XDP_DROP]++;
drivers/net/ethernet/freescale/fec_main.c
2138
fec_xdp_drop(rxq, &xdp, sync);
drivers/net/ethernet/freescale/fec_main.c
2163
bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
drivers/net/ethernet/freescale/fec_main.c
2169
writel(0, rxq->bd.reg_desc_active);
drivers/net/ethernet/freescale/fec_main.c
2172
rxq->bd.cur = bdp;
drivers/net/ethernet/freescale/fec_main.c
2273
struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue];
drivers/net/ethernet/freescale/fec_main.c
2275
struct bufdesc *bdp = rxq->bd.cur;
drivers/net/ethernet/freescale/fec_main.c
2301
index = fec_enet_get_bd_index(bdp, &rxq->bd);
drivers/net/ethernet/freescale/fec_main.c
2302
xsk = rxq->rx_buf[index].xdp;
drivers/net/ethernet/freescale/fec_main.c
2304
if (fec_enet_update_cbd_zc(rxq, bdp, index))
drivers/net/ethernet/freescale/fec_main.c
2333
if (fec_enet_update_cbd_zc(rxq, bdp, index)) {
drivers/net/ethernet/freescale/fec_main.c
2357
rxq->stats[RX_XDP_PASS]++;
drivers/net/ethernet/freescale/fec_main.c
2368
rxq->stats[RX_XDP_TX]++;
drivers/net/ethernet/freescale/fec_main.c
2371
rxq->stats[RX_XDP_TX_ERRORS]++;
drivers/net/ethernet/freescale/fec_main.c
2379
rxq->stats[RX_XDP_REDIRECT]++;
drivers/net/ethernet/freescale/fec_main.c
2385
rxq->stats[RX_XDP_DROP]++;
drivers/net/ethernet/freescale/fec_main.c
2399
rxq->stats[RX_XDP_DROP]++;
drivers/net/ethernet/freescale/fec_main.c
2425
bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
drivers/net/ethernet/freescale/fec_main.c
2431
writel(0, rxq->bd.reg_desc_active);
drivers/net/ethernet/freescale/fec_main.c
2434
rxq->bd.cur = bdp;
drivers/net/ethernet/freescale/fec_main.c
2442
if (rxq->xsk_pool && xsk_uses_need_wakeup(rxq->xsk_pool)) {
drivers/net/ethernet/freescale/fec_main.c
2444
xsk_set_rx_need_wakeup(rxq->xsk_pool);
drivers/net/ethernet/freescale/fec_main.c
2446
xsk_clear_rx_need_wakeup(rxq->xsk_pool);
drivers/net/ethernet/freescale/fec_main.c
2460
struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i];
drivers/net/ethernet/freescale/fec_main.c
2463
if (rxq->xsk_pool)
drivers/net/ethernet/freescale/fec_main.c
3496
struct fec_enet_priv_rx_q *rxq;
drivers/net/ethernet/freescale/fec_main.c
3500
rxq = fep->rx_queue[i];
drivers/net/ethernet/freescale/fec_main.c
3503
xdp_stats[j] += rxq->stats[j];
drivers/net/ethernet/freescale/fec_main.c
3513
struct fec_enet_priv_rx_q *rxq;
drivers/net/ethernet/freescale/fec_main.c
3517
rxq = fep->rx_queue[i];
drivers/net/ethernet/freescale/fec_main.c
3519
if (!rxq->page_pool)
drivers/net/ethernet/freescale/fec_main.c
3522
page_pool_get_stats(rxq->page_pool, &stats);
drivers/net/ethernet/freescale/fec_main.c
3587
struct fec_enet_priv_rx_q *rxq;
drivers/net/ethernet/freescale/fec_main.c
3597
rxq = fep->rx_queue[i];
drivers/net/ethernet/freescale/fec_main.c
3599
rxq->stats[j] = 0;
drivers/net/ethernet/freescale/fec_main.c
3815
struct fec_enet_priv_rx_q *rxq)
drivers/net/ethernet/freescale/fec_main.c
3821
err = xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq->id, 0);
drivers/net/ethernet/freescale/fec_main.c
3827
allocator = rxq->xsk_pool ? NULL : rxq->page_pool;
drivers/net/ethernet/freescale/fec_main.c
3828
type = rxq->xsk_pool ? MEM_TYPE_XSK_BUFF_POOL : MEM_TYPE_PAGE_POOL;
drivers/net/ethernet/freescale/fec_main.c
3829
err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, type, allocator);
drivers/net/ethernet/freescale/fec_main.c
3832
xdp_rxq_info_unreg(&rxq->xdp_rxq);
drivers/net/ethernet/freescale/fec_main.c
3837
if (rxq->xsk_pool)
drivers/net/ethernet/freescale/fec_main.c
3838
xsk_pool_set_rxq_info(rxq->xsk_pool, &rxq->xdp_rxq);
drivers/net/ethernet/freescale/fec_main.c
3843
static void fec_xdp_rxq_info_unreg(struct fec_enet_priv_rx_q *rxq)
drivers/net/ethernet/freescale/fec_main.c
3845
if (xdp_rxq_info_is_reg(&rxq->xdp_rxq)) {
drivers/net/ethernet/freescale/fec_main.c
3846
xdp_rxq_info_unreg_mem_model(&rxq->xdp_rxq);
drivers/net/ethernet/freescale/fec_main.c
3847
xdp_rxq_info_unreg(&rxq->xdp_rxq);
drivers/net/ethernet/freescale/fec_main.c
3851
static void fec_free_rxq_buffers(struct fec_enet_priv_rx_q *rxq)
drivers/net/ethernet/freescale/fec_main.c
3853
bool xsk = !!rxq->xsk_pool;
drivers/net/ethernet/freescale/fec_main.c
3856
for (i = 0; i < rxq->bd.ring_size; i++) {
drivers/net/ethernet/freescale/fec_main.c
3857
union fec_rx_buffer *buf = &rxq->rx_buf[i];
drivers/net/ethernet/freescale/fec_main.c
3865
page_pool_put_full_page(rxq->page_pool,
drivers/net/ethernet/freescale/fec_main.c
3868
rxq->rx_buf[i].buf_p = NULL;
drivers/net/ethernet/freescale/fec_main.c
3872
page_pool_destroy(rxq->page_pool);
drivers/net/ethernet/freescale/fec_main.c
3873
rxq->page_pool = NULL;
drivers/net/ethernet/freescale/fec_main.c
3882
struct fec_enet_priv_rx_q *rxq;
drivers/net/ethernet/freescale/fec_main.c
3887
rxq = fep->rx_queue[q];
drivers/net/ethernet/freescale/fec_main.c
3889
fec_xdp_rxq_info_unreg(rxq);
drivers/net/ethernet/freescale/fec_main.c
3890
fec_free_rxq_buffers(rxq);
drivers/net/ethernet/freescale/fec_main.c
3893
rxq->stats[i] = 0;
drivers/net/ethernet/freescale/fec_main.c
3995
struct fec_enet_priv_rx_q *rxq)
drivers/net/ethernet/freescale/fec_main.c
3997
struct bufdesc *bdp = rxq->bd.base;
drivers/net/ethernet/freescale/fec_main.c
4002
err = fec_enet_create_page_pool(fep, rxq);
drivers/net/ethernet/freescale/fec_main.c
4005
__func__, rxq->bd.qid, err);
drivers/net/ethernet/freescale/fec_main.c
4022
for (i = 0; i < rxq->bd.ring_size; i++) {
drivers/net/ethernet/freescale/fec_main.c
4023
page = page_pool_dev_alloc_pages(rxq->page_pool);
drivers/net/ethernet/freescale/fec_main.c
4031
rxq->rx_buf[i].page = page;
drivers/net/ethernet/freescale/fec_main.c
4032
bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
drivers/net/ethernet/freescale/fec_main.c
4038
fec_free_rxq_buffers(rxq);
drivers/net/ethernet/freescale/fec_main.c
4044
struct fec_enet_priv_rx_q *rxq)
drivers/net/ethernet/freescale/fec_main.c
4046
union fec_rx_buffer *buf = &rxq->rx_buf[0];
drivers/net/ethernet/freescale/fec_main.c
4047
struct bufdesc *bdp = rxq->bd.base;
drivers/net/ethernet/freescale/fec_main.c
4051
for (i = 0; i < rxq->bd.ring_size; i++) {
drivers/net/ethernet/freescale/fec_main.c
4052
buf[i].xdp = xsk_buff_alloc(rxq->xsk_pool);
drivers/net/ethernet/freescale/fec_main.c
4058
bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
drivers/net/ethernet/freescale/fec_main.c
4061
for (; i < rxq->bd.ring_size; i++) {
drivers/net/ethernet/freescale/fec_main.c
4064
bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
drivers/net/ethernet/freescale/fec_main.c
4074
struct fec_enet_priv_rx_q *rxq;
drivers/net/ethernet/freescale/fec_main.c
4077
rxq = fep->rx_queue[queue];
drivers/net/ethernet/freescale/fec_main.c
4078
if (rxq->xsk_pool) {
drivers/net/ethernet/freescale/fec_main.c
4082
fec_alloc_rxq_buffers_zc(fep, rxq);
drivers/net/ethernet/freescale/fec_main.c
4084
err = fec_alloc_rxq_buffers_pp(fep, rxq);
drivers/net/ethernet/freescale/fec_main.c
4089
err = fec_xdp_rxq_info_reg(fep, rxq);
drivers/net/ethernet/freescale/fec_main.c
4412
static void fec_free_rxq(struct fec_enet_priv_rx_q *rxq)
drivers/net/ethernet/freescale/fec_main.c
4414
fec_xdp_rxq_info_unreg(rxq);
drivers/net/ethernet/freescale/fec_main.c
4415
fec_free_rxq_buffers(rxq);
drivers/net/ethernet/freescale/fec_main.c
4416
kfree(rxq);
drivers/net/ethernet/freescale/fec_main.c
4424
struct fec_enet_priv_rx_q *rxq;
drivers/net/ethernet/freescale/fec_main.c
4428
rxq = kzalloc_obj(*rxq);
drivers/net/ethernet/freescale/fec_main.c
4429
if (!rxq)
drivers/net/ethernet/freescale/fec_main.c
4433
rxq->bd = old_rxq->bd;
drivers/net/ethernet/freescale/fec_main.c
4434
rxq->id = queue;
drivers/net/ethernet/freescale/fec_main.c
4435
rxq->xsk_pool = pool;
drivers/net/ethernet/freescale/fec_main.c
4436
buf = &rxq->rx_buf[0];
drivers/net/ethernet/freescale/fec_main.c
4438
for (i = 0; i < rxq->bd.ring_size; i++) {
drivers/net/ethernet/freescale/fec_main.c
4447
if (fec_xdp_rxq_info_reg(fep, rxq))
drivers/net/ethernet/freescale/fec_main.c
4450
return rxq;
drivers/net/ethernet/freescale/fec_main.c
4456
kfree(rxq);
drivers/net/ethernet/freescale/fec_main.c
4465
struct fec_enet_priv_rx_q *rxq;
drivers/net/ethernet/freescale/fec_main.c
4469
rxq = kzalloc_obj(*rxq);
drivers/net/ethernet/freescale/fec_main.c
4470
if (!rxq)
drivers/net/ethernet/freescale/fec_main.c
4473
rxq->bd = old_rxq->bd;
drivers/net/ethernet/freescale/fec_main.c
4474
rxq->id = queue;
drivers/net/ethernet/freescale/fec_main.c
4476
if (fec_enet_create_page_pool(fep, rxq))
drivers/net/ethernet/freescale/fec_main.c
4479
buf = &rxq->rx_buf[0];
drivers/net/ethernet/freescale/fec_main.c
4480
for (; i < rxq->bd.ring_size; i++) {
drivers/net/ethernet/freescale/fec_main.c
4481
buf[i].page = page_pool_dev_alloc_pages(rxq->page_pool);
drivers/net/ethernet/freescale/fec_main.c
4486
if (fec_xdp_rxq_info_reg(fep, rxq))
drivers/net/ethernet/freescale/fec_main.c
4489
return rxq;
drivers/net/ethernet/freescale/fec_main.c
4493
page_pool_put_full_page(rxq->page_pool,
drivers/net/ethernet/freescale/fec_main.c
4496
page_pool_destroy(rxq->page_pool);
drivers/net/ethernet/freescale/fec_main.c
4498
kfree(rxq);
drivers/net/ethernet/freescale/fec_main.c
4503
static void fec_init_rxq_bd_buffers(struct fec_enet_priv_rx_q *rxq, bool xsk)
drivers/net/ethernet/freescale/fec_main.c
4505
union fec_rx_buffer *buf = &rxq->rx_buf[0];
drivers/net/ethernet/freescale/fec_main.c
4506
struct bufdesc *bdp = rxq->bd.base;
drivers/net/ethernet/freescale/fec_main.c
4509
for (int i = 0; i < rxq->bd.ring_size; i++) {
drivers/net/ethernet/freescale/fec_main.c
4518
bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
drivers/net/ethernet/freescale/fec_main.c
4528
struct fec_enet_priv_rx_q *rxq;
drivers/net/ethernet/freescale/fec_main.c
4535
rxq = pool ? fec_alloc_new_rxq_xsk(fep, queue, pool) :
drivers/net/ethernet/freescale/fec_main.c
4537
if (!rxq) {
drivers/net/ethernet/freescale/fec_main.c
4544
fep->rx_queue[queue] = rxq;
drivers/net/ethernet/freescale/fec_main.c
4545
fec_init_rxq_bd_buffers(rxq, !!pool);
drivers/net/ethernet/freescale/fec_main.c
4574
struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue];
drivers/net/ethernet/freescale/fec_main.c
4577
rxq->xsk_pool = pool;
drivers/net/ethernet/freescale/fec_main.c
4600
struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue];
drivers/net/ethernet/freescale/fec_main.c
4603
rxq->xsk_pool = NULL;
drivers/net/ethernet/freescale/fec_main.c
471
struct fec_enet_priv_rx_q *rxq)
drivers/net/ethernet/freescale/fec_main.c
477
.pool_size = rxq->bd.ring_size,
drivers/net/ethernet/freescale/fec_main.c
4819
struct fec_enet_priv_rx_q *rxq;
drivers/net/ethernet/freescale/fec_main.c
4827
rxq = fep->rx_queue[queue];
drivers/net/ethernet/freescale/fec_main.c
4828
if (!rxq->xsk_pool)
drivers/net/ethernet/freescale/fec_main.c
486
rxq->page_pool = page_pool_create(&pp_params);
drivers/net/ethernet/freescale/fec_main.c
487
if (IS_ERR(rxq->page_pool)) {
drivers/net/ethernet/freescale/fec_main.c
488
err = PTR_ERR(rxq->page_pool);
drivers/net/ethernet/freescale/fec_main.c
489
rxq->page_pool = NULL;
drivers/net/ethernet/freescale/fec_main.c
4969
struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i];
drivers/net/ethernet/freescale/fec_main.c
4970
unsigned size = dsize * rxq->bd.ring_size;
drivers/net/ethernet/freescale/fec_main.c
4972
rxq->bd.qid = i;
drivers/net/ethernet/freescale/fec_main.c
4973
rxq->bd.base = cbd_base;
drivers/net/ethernet/freescale/fec_main.c
4974
rxq->bd.cur = cbd_base;
drivers/net/ethernet/freescale/fec_main.c
4975
rxq->bd.dma = bd_dma;
drivers/net/ethernet/freescale/fec_main.c
4976
rxq->bd.dsize = dsize;
drivers/net/ethernet/freescale/fec_main.c
4977
rxq->bd.dsize_log2 = dsize_log2;
drivers/net/ethernet/freescale/fec_main.c
4978
rxq->bd.reg_desc_active = fep->hwp + offset_des_active_rxq[i];
drivers/net/ethernet/freescale/fec_main.c
4981
rxq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);
drivers/net/ethernet/freescale/fec_main.c
977
struct fec_enet_priv_rx_q *rxq;
drivers/net/ethernet/freescale/fec_main.c
984
rxq = fep->rx_queue[q];
drivers/net/ethernet/freescale/fec_main.c
985
bdp = rxq->bd.base;
drivers/net/ethernet/freescale/fec_main.c
987
for (i = 0; i < rxq->bd.ring_size; i++) {
drivers/net/ethernet/freescale/gianfar.c
1201
static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb)
drivers/net/ethernet/freescale/gianfar.c
1210
addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
drivers/net/ethernet/freescale/gianfar.c
1211
if (unlikely(dma_mapping_error(rxq->dev, addr))) {
drivers/net/ethernet/freescale/gianfar.c
2339
static void gfar_reuse_rx_page(struct gfar_priv_rx_q *rxq,
drivers/net/ethernet/freescale/gianfar.c
2343
u16 nta = rxq->next_to_alloc;
drivers/net/ethernet/freescale/gianfar.c
2345
new_rxb = &rxq->rx_buff[nta];
drivers/net/ethernet/freescale/gianfar.c
2349
rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0;
drivers/net/ethernet/freescale/gianfar.c
2355
dma_sync_single_range_for_device(rxq->dev, old_rxb->dma,
drivers/net/ethernet/freescale/gianfar.h
1319
static inline int gfar_rxbd_unused(struct gfar_priv_rx_q *rxq)
drivers/net/ethernet/freescale/gianfar.h
1321
if (rxq->next_to_clean > rxq->next_to_use)
drivers/net/ethernet/freescale/gianfar.h
1322
return rxq->next_to_clean - rxq->next_to_use - 1;
drivers/net/ethernet/freescale/gianfar.h
1324
return rxq->rx_ring_size + rxq->next_to_clean - rxq->next_to_use - 1;
drivers/net/ethernet/freescale/gianfar.h
1327
static inline u32 gfar_rxbd_dma_lastfree(struct gfar_priv_rx_q *rxq)
drivers/net/ethernet/freescale/gianfar.h
1333
i = rxq->next_to_use ? rxq->next_to_use - 1 : rxq->rx_ring_size - 1;
drivers/net/ethernet/freescale/gianfar.h
1334
bdp = &rxq->rx_bd_base[i];
drivers/net/ethernet/freescale/gianfar.h
1335
bdp_dma = lower_32_bits(rxq->rx_bd_dma_base);
drivers/net/ethernet/freescale/gianfar.h
1336
bdp_dma += (uintptr_t)bdp - (uintptr_t)rxq->rx_bd_base;
drivers/net/ethernet/fungible/funeth/funeth_main.c
300
if (irq->txq || irq->rxq) /* skip those in use */
drivers/net/ethernet/fungible/funeth/funeth_main.c
657
if (p->rxq) {
drivers/net/ethernet/fungible/funeth/funeth_main.c
658
prefetch(p->rxq->next_cqe_info);
drivers/net/ethernet/fungible/funeth/funeth_main.c
659
p->rxq->irq_cnt++;
drivers/net/ethernet/fungible/funeth/funeth_main.c
678
} else if (p->rxq) {
drivers/net/ethernet/fungible/funeth/funeth_main.c
680
qidx = p->rxq->qidx;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
513
struct funeth_rxq *q = irq->rxq;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
744
irq->rxq = q;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
778
irq->rxq = NULL;
drivers/net/ethernet/fungible/funeth/funeth_trace.h
75
TP_PROTO(const struct funeth_rxq *rxq,
drivers/net/ethernet/fungible/funeth/funeth_trace.h
81
TP_ARGS(rxq, num_rqes, pkt_len, hash, cls_vec),
drivers/net/ethernet/fungible/funeth/funeth_trace.h
90
__string(devname, rxq->netdev->name)
drivers/net/ethernet/fungible/funeth/funeth_trace.h
94
__entry->qidx = rxq->qidx;
drivers/net/ethernet/fungible/funeth/funeth_trace.h
95
__entry->cq_head = rxq->cq_head;
drivers/net/ethernet/fungible/funeth/funeth_txrx.h
219
struct funeth_rxq *rxq;
drivers/net/ethernet/hisilicon/hisi_femac.c
121
struct hisi_femac_queue rxq;
drivers/net/ethernet/hisilicon/hisi_femac.c
212
struct hisi_femac_queue *rxq = &priv->rxq;
drivers/net/ethernet/hisilicon/hisi_femac.c
218
pos = rxq->head;
drivers/net/ethernet/hisilicon/hisi_femac.c
220
if (!CIRC_SPACE(pos, rxq->tail, rxq->num))
drivers/net/ethernet/hisilicon/hisi_femac.c
222
if (unlikely(rxq->skb[pos])) {
drivers/net/ethernet/hisilicon/hisi_femac.c
224
pos, rxq->skb[pos]);
drivers/net/ethernet/hisilicon/hisi_femac.c
237
rxq->dma_phys[pos] = addr;
drivers/net/ethernet/hisilicon/hisi_femac.c
238
rxq->skb[pos] = skb;
drivers/net/ethernet/hisilicon/hisi_femac.c
240
pos = (pos + 1) % rxq->num;
drivers/net/ethernet/hisilicon/hisi_femac.c
242
rxq->head = pos;
drivers/net/ethernet/hisilicon/hisi_femac.c
248
struct hisi_femac_queue *rxq = &priv->rxq;
drivers/net/ethernet/hisilicon/hisi_femac.c
253
pos = rxq->tail;
drivers/net/ethernet/hisilicon/hisi_femac.c
264
skb = rxq->skb[pos];
drivers/net/ethernet/hisilicon/hisi_femac.c
269
rxq->skb[pos] = NULL;
drivers/net/ethernet/hisilicon/hisi_femac.c
271
addr = rxq->dma_phys[pos];
drivers/net/ethernet/hisilicon/hisi_femac.c
288
pos = (pos + 1) % rxq->num;
drivers/net/ethernet/hisilicon/hisi_femac.c
292
rxq->tail = pos;
drivers/net/ethernet/hisilicon/hisi_femac.c
376
ret = hisi_femac_init_queue(priv->dev, &priv->rxq, RXQ_NUM);
drivers/net/ethernet/hisilicon/hisi_femac.c
388
struct hisi_femac_queue *rxq = &priv->rxq;
drivers/net/ethernet/hisilicon/hisi_femac.c
393
pos = rxq->tail;
drivers/net/ethernet/hisilicon/hisi_femac.c
394
while (pos != rxq->head) {
drivers/net/ethernet/hisilicon/hisi_femac.c
395
skb = rxq->skb[pos];
drivers/net/ethernet/hisilicon/hisi_femac.c
398
pos, rxq->head);
drivers/net/ethernet/hisilicon/hisi_femac.c
402
dma_addr = rxq->dma_phys[pos];
drivers/net/ethernet/hisilicon/hisi_femac.c
407
rxq->skb[pos] = NULL;
drivers/net/ethernet/hisilicon/hisi_femac.c
408
pos = (pos + 1) % rxq->num;
drivers/net/ethernet/hisilicon/hisi_femac.c
410
rxq->tail = pos;
drivers/net/ethernet/huawei/hinic/hinic_main.c
83
static void gather_rx_stats(struct hinic_rxq_stats *nic_rx_stats, struct hinic_rxq *rxq)
drivers/net/ethernet/huawei/hinic/hinic_main.c
87
hinic_rxq_get_stats(rxq, &rx_stats);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
102
struct net_device *netdev = rxq->netdev;
drivers/net/ethernet/huawei/hinic/hinic_rx.c
115
rxq->rxq_stats.csum_errors++;
drivers/net/ethernet/huawei/hinic/hinic_rx.c
127
static struct sk_buff *rx_alloc_skb(struct hinic_rxq *rxq,
drivers/net/ethernet/huawei/hinic/hinic_rx.c
130
struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
138
skb = netdev_alloc_skb_ip_align(rxq->netdev, rxq->rq->buf_sz);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
142
addr = dma_map_single(&pdev->dev, skb->data, rxq->rq->buf_sz,
drivers/net/ethernet/huawei/hinic/hinic_rx.c
163
static void rx_unmap_skb(struct hinic_rxq *rxq, dma_addr_t dma_addr)
drivers/net/ethernet/huawei/hinic/hinic_rx.c
165
struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
170
dma_unmap_single(&pdev->dev, dma_addr, rxq->rq->buf_sz,
drivers/net/ethernet/huawei/hinic/hinic_rx.c
180
static void rx_free_skb(struct hinic_rxq *rxq, struct sk_buff *skb,
drivers/net/ethernet/huawei/hinic/hinic_rx.c
183
rx_unmap_skb(rxq, dma_addr);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
193
static int rx_alloc_pkts(struct hinic_rxq *rxq)
drivers/net/ethernet/huawei/hinic/hinic_rx.c
195
struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
204
free_wqebbs = hinic_get_rq_free_wqebbs(rxq->rq);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
211
skb = rx_alloc_skb(rxq, &dma_addr);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
217
rq_wqe = hinic_rq_get_wqe(rxq->rq, HINIC_RQ_WQE_SIZE,
drivers/net/ethernet/huawei/hinic/hinic_rx.c
220
rx_free_skb(rxq, skb, dma_addr);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
224
hinic_rq_prepare_wqe(rxq->rq, prod_idx, rq_wqe, &sge);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
226
hinic_rq_write_wqe(rxq->rq, prod_idx, rq_wqe, skb);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
233
hinic_rq_update(rxq->rq, prod_idx);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
243
static void free_all_rx_skbs(struct hinic_rxq *rxq)
drivers/net/ethernet/huawei/hinic/hinic_rx.c
245
struct hinic_rq *rq = rxq->rq;
drivers/net/ethernet/huawei/hinic/hinic_rx.c
258
rx_free_skb(rxq, rq->saved_skb[ci], hinic_sge_to_dma(&sge));
drivers/net/ethernet/huawei/hinic/hinic_rx.c
271
static int rx_recv_jumbo_pkt(struct hinic_rxq *rxq, struct sk_buff *head_skb,
drivers/net/ethernet/huawei/hinic/hinic_rx.c
281
rq_wqe = hinic_rq_read_next_wqe(rxq->rq, HINIC_RQ_WQE_SIZE,
drivers/net/ethernet/huawei/hinic/hinic_rx.c
286
hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
288
rx_unmap_skb(rxq, hinic_sge_to_dma(&sge));
drivers/net/ethernet/huawei/hinic/hinic_rx.c
354
static int rxq_recv(struct hinic_rxq *rxq, int budget)
drivers/net/ethernet/huawei/hinic/hinic_rx.c
356
struct hinic_qp *qp = container_of(rxq->rq, struct hinic_qp, rq);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
357
struct net_device *netdev = rxq->netdev;
drivers/net/ethernet/huawei/hinic/hinic_rx.c
359
struct hinic_rq *rq = rxq->rq;
drivers/net/ethernet/huawei/hinic/hinic_rx.c
379
rq_wqe = hinic_rq_read_wqe(rxq->rq, HINIC_RQ_WQE_SIZE, &skb,
drivers/net/ethernet/huawei/hinic/hinic_rx.c
389
hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
391
rx_unmap_skb(rxq, hinic_sge_to_dma(&sge));
drivers/net/ethernet/huawei/hinic/hinic_rx.c
393
rx_csum(rxq, status, skb);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
403
num_wqes = rx_recv_jumbo_pkt(rxq, skb, pkt_len -
drivers/net/ethernet/huawei/hinic/hinic_rx.c
422
skb->protocol = eth_type_trans(skb, rxq->netdev);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
424
napi_gro_receive(&rxq->napi, skb);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
435
(u16)(pkt_len >> rxq->rx_buff_shift) +
drivers/net/ethernet/huawei/hinic/hinic_rx.c
436
((pkt_len & (rxq->buf_len - 1)) ? 1 : 0);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
445
free_wqebbs = hinic_get_rq_free_wqebbs(rxq->rq);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
447
rx_alloc_pkts(rxq);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
449
u64_stats_update_begin(&rxq->rxq_stats.syncp);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
450
rxq->rxq_stats.pkts += pkts;
drivers/net/ethernet/huawei/hinic/hinic_rx.c
451
rxq->rxq_stats.bytes += rx_bytes;
drivers/net/ethernet/huawei/hinic/hinic_rx.c
452
u64_stats_update_end(&rxq->rxq_stats.syncp);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
459
struct hinic_rxq *rxq = container_of(napi, struct hinic_rxq, napi);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
460
struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
461
struct hinic_rq *rq = rxq->rq;
drivers/net/ethernet/huawei/hinic/hinic_rx.c
464
pkts = rxq_recv(rxq, budget);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
478
static void rx_add_napi(struct hinic_rxq *rxq)
drivers/net/ethernet/huawei/hinic/hinic_rx.c
480
struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
482
netif_napi_add_weight(rxq->netdev, &rxq->napi, rx_poll,
drivers/net/ethernet/huawei/hinic/hinic_rx.c
484
napi_enable(&rxq->napi);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
487
static void rx_del_napi(struct hinic_rxq *rxq)
drivers/net/ethernet/huawei/hinic/hinic_rx.c
489
napi_disable(&rxq->napi);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
490
netif_napi_del(&rxq->napi);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
495
struct hinic_rxq *rxq = (struct hinic_rxq *)data;
drivers/net/ethernet/huawei/hinic/hinic_rx.c
496
struct hinic_rq *rq = rxq->rq;
drivers/net/ethernet/huawei/hinic/hinic_rx.c
500
nic_dev = netdev_priv(rxq->netdev);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
506
nic_dev = netdev_priv(rxq->netdev);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
509
napi_schedule(&rxq->napi);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
513
static int rx_request_irq(struct hinic_rxq *rxq)
drivers/net/ethernet/huawei/hinic/hinic_rx.c
515
struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
519
struct hinic_rq *rq = rxq->rq;
drivers/net/ethernet/huawei/hinic/hinic_rx.c
525
rx_add_napi(rxq);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
53
static void hinic_rxq_clean_stats(struct hinic_rxq *rxq)
drivers/net/ethernet/huawei/hinic/hinic_rx.c
540
netif_err(nic_dev, drv, rxq->netdev,
drivers/net/ethernet/huawei/hinic/hinic_rx.c
545
err = request_irq(rq->irq, rx_irq, 0, rxq->irq_name, rxq);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
55
struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats;
drivers/net/ethernet/huawei/hinic/hinic_rx.c
557
free_irq(rq->irq, rxq);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
559
rx_del_napi(rxq);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
563
static void rx_free_irq(struct hinic_rxq *rxq)
drivers/net/ethernet/huawei/hinic/hinic_rx.c
565
struct hinic_rq *rq = rxq->rq;
drivers/net/ethernet/huawei/hinic/hinic_rx.c
568
free_irq(rq->irq, rxq);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
569
rx_del_napi(rxq);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
580
int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq,
drivers/net/ethernet/huawei/hinic/hinic_rx.c
586
rxq->netdev = netdev;
drivers/net/ethernet/huawei/hinic/hinic_rx.c
587
rxq->rq = rq;
drivers/net/ethernet/huawei/hinic/hinic_rx.c
588
rxq->buf_len = HINIC_RX_BUF_SZ;
drivers/net/ethernet/huawei/hinic/hinic_rx.c
589
rxq->rx_buff_shift = ilog2(HINIC_RX_BUF_SZ);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
591
rxq_stats_init(rxq);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
593
rxq->irq_name = devm_kasprintf(&netdev->dev, GFP_KERNEL,
drivers/net/ethernet/huawei/hinic/hinic_rx.c
595
if (!rxq->irq_name)
drivers/net/ethernet/huawei/hinic/hinic_rx.c
598
pkts = rx_alloc_pkts(rxq);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
604
err = rx_request_irq(rxq);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
614
free_all_rx_skbs(rxq);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
615
devm_kfree(&netdev->dev, rxq->irq_name);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
623
void hinic_clean_rxq(struct hinic_rxq *rxq)
drivers/net/ethernet/huawei/hinic/hinic_rx.c
625
struct net_device *netdev = rxq->netdev;
drivers/net/ethernet/huawei/hinic/hinic_rx.c
627
rx_free_irq(rxq);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
629
free_all_rx_skbs(rxq);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
630
devm_kfree(&netdev->dev, rxq->irq_name);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
71
void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats)
drivers/net/ethernet/huawei/hinic/hinic_rx.c
73
struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats;
drivers/net/ethernet/huawei/hinic/hinic_rx.c
91
static void rxq_stats_init(struct hinic_rxq *rxq)
drivers/net/ethernet/huawei/hinic/hinic_rx.c
93
struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats;
drivers/net/ethernet/huawei/hinic/hinic_rx.c
96
hinic_rxq_clean_stats(rxq);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
99
static void rx_csum(struct hinic_rxq *rxq, u32 status,
drivers/net/ethernet/huawei/hinic/hinic_rx.h
44
void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats);
drivers/net/ethernet/huawei/hinic/hinic_rx.h
46
int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq,
drivers/net/ethernet/huawei/hinic/hinic_rx.h
49
void hinic_clean_rxq(struct hinic_rxq *rxq);
drivers/net/ethernet/huawei/hinic3/hinic3_irq.c
184
struct hinic3_rxq *rxq;
drivers/net/ethernet/huawei/hinic3/hinic3_irq.c
188
rxq = container_of(dim, struct hinic3_rxq, dim);
drivers/net/ethernet/huawei/hinic3/hinic3_irq.c
19
struct hinic3_rxq *rxq = irq_cfg->rxq;
drivers/net/ethernet/huawei/hinic3/hinic3_irq.c
192
hinic3_update_queue_coal(rxq->netdev, rxq->q_id,
drivers/net/ethernet/huawei/hinic3/hinic3_irq.c
216
irq_cfg->rxq = &nic_dev->rxqs[q_id];
drivers/net/ethernet/huawei/hinic3/hinic3_irq.c
231
INIT_WORK(&irq_cfg->rxq->dim.work, hinic3_rx_dim_work);
drivers/net/ethernet/huawei/hinic3/hinic3_irq.c
232
irq_cfg->rxq->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE;
drivers/net/ethernet/huawei/hinic3/hinic3_irq.c
26
dim_update_sample(irq_cfg->total_events, rxq->rxq_stats.packets,
drivers/net/ethernet/huawei/hinic3/hinic3_irq.c
264
disable_work_sync(&irq_cfg->rxq->dim.work);
drivers/net/ethernet/huawei/hinic3/hinic3_irq.c
27
rxq->rxq_stats.bytes, &sample);
drivers/net/ethernet/huawei/hinic3/hinic3_irq.c
28
net_dim(&rxq->dim, &sample);
drivers/net/ethernet/huawei/hinic3/hinic3_irq.c
289
disable_work_sync(&irq_cfg->rxq->dim.work);
drivers/net/ethernet/huawei/hinic3/hinic3_irq.c
46
work_done = hinic3_rx_poll(irq_cfg->rxq, budget);
drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c
786
struct hinic3_rxq *rxq;
drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c
818
rxq = &nic_dev->rxqs[i];
drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c
819
rxq_stats = &rxq->rxq_stats;
drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h
75
struct hinic3_rxq *rxq;
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
118
static void rq_associate_cqes(struct hinic3_rxq *rxq)
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
125
qpages = &rxq->rq->wq.qpages;
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
127
for (i = 0; i < rxq->q_depth; i++) {
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
129
cqe_dma = rxq->cqe_start_paddr +
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
146
static u32 hinic3_rx_fill_buffers(struct hinic3_rxq *rxq)
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
148
u32 i, free_wqebbs = rxq->delta - 1;
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
154
rx_info = &rxq->rx_info[rxq->next_to_update];
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
156
err = rx_alloc_mapped_page(rxq->page_pool, rx_info,
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
157
rxq->buf_len);
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
163
rq_wqe_buf_set(rxq->rq, rxq->next_to_update, dma_addr,
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
164
rxq->buf_len);
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
165
rxq->next_to_update = (rxq->next_to_update + 1) & rxq->q_mask;
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
169
hinic3_write_db(rxq->rq, rxq->q_id & 3, DB_CFLAG_DP_RQ,
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
170
rxq->next_to_update << HINIC3_NORMAL_RQ_WQE);
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
171
rxq->delta -= i;
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
172
rxq->next_to_alloc = rxq->next_to_update;
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
213
static void hinic3_add_rx_frag(struct hinic3_rxq *rxq,
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
224
page_pool_dma_sync_for_cpu(rxq->page_pool, page, rx_info->page_offset,
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
225
rxq->buf_len);
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
230
page_pool_put_full_page(rxq->page_pool, page, false);
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
236
rx_info->page_offset, size, rxq->buf_len);
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
240
static void packaging_skb(struct hinic3_rxq *rxq, struct sk_buff *skb,
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
249
sw_ci = rxq->cons_idx & rxq->q_mask;
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
251
rx_info = &rxq->rx_info[sw_ci];
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
252
sw_ci = (sw_ci + 1) & rxq->q_mask;
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
253
if (unlikely(temp_pkt_len > rxq->buf_len)) {
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
254
size = rxq->buf_len;
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
255
temp_pkt_len -= rxq->buf_len;
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
260
hinic3_add_rx_frag(rxq, rx_info, skb, size);
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
268
static u32 hinic3_get_sge_num(struct hinic3_rxq *rxq, u32 pkt_len)
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
272
sge_num = pkt_len >> rxq->buf_len_shift;
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
273
sge_num += (pkt_len & (rxq->buf_len - 1)) ? 1 : 0;
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
278
static struct sk_buff *hinic3_fetch_rx_buffer(struct hinic3_rxq *rxq,
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
284
skb = napi_alloc_skb(&rxq->irq_cfg->napi, HINIC3_RX_HDR_SIZE);
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
288
sge_num = hinic3_get_sge_num(rxq, pkt_len);
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
291
packaging_skb(rxq, skb, sge_num, pkt_len);
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
293
rxq->cons_idx += sge_num;
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
294
rxq->delta += sge_num;
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
323
static void hinic3_rx_csum(struct hinic3_rxq *rxq, u32 offload_type,
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
330
struct net_device *netdev = rxq->netdev;
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
374
static int recv_one_pkt(struct hinic3_rxq *rxq, struct hinic3_rq_cqe *rx_cqe,
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
377
struct net_device *netdev = rxq->netdev;
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
382
skb = hinic3_fetch_rx_buffer(rxq, pkt_len);
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
391
hinic3_rx_csum(rxq, offload_type, status, skb);
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
397
skb_record_rx_queue(skb, rxq->q_id);
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
401
napi_gro_flush(&rxq->irq_cfg->napi, false);
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
404
napi_gro_receive(&rxq->irq_cfg->napi, skb);
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
501
struct hinic3_rxq *rxq;
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
506
rxq = &nic_dev->rxqs[q_id];
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
510
rxq->irq_id = msix_entry->vector;
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
511
rxq->msix_entry_idx = msix_entry->entry;
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
512
rxq->next_to_update = 0;
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
513
rxq->next_to_alloc = rqres->next_to_alloc;
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
514
rxq->q_depth = rq_depth;
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
515
rxq->delta = rxq->q_depth;
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
516
rxq->q_mask = rxq->q_depth - 1;
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
517
rxq->cons_idx = 0;
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
519
rxq->cqe_arr = rqres->cqe_start_vaddr;
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
520
rxq->cqe_start_paddr = rqres->cqe_start_paddr;
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
521
rxq->rx_info = rqres->rx_info;
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
522
rxq->page_pool = rqres->page_pool;
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
524
rxq->rq = &nic_dev->nic_io->rq[rxq->q_id];
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
526
rq_associate_cqes(rxq);
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
528
pkts = hinic3_rx_fill_buffers(rxq);
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
53
static void hinic3_rxq_stats_init(struct hinic3_rxq *rxq)
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
538
int hinic3_rx_poll(struct hinic3_rxq *rxq, int budget)
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
540
struct hinic3_nic_dev *nic_dev = netdev_priv(rxq->netdev);
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
548
sw_ci = rxq->cons_idx & rxq->q_mask;
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
549
rx_cqe = rxq->cqe_arr + sw_ci;
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
55
struct hinic3_rxq_stats *rxq_stats = &rxq->rxq_stats;
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
559
if (recv_one_pkt(rxq, rx_cqe, pkt_len, vlan_len, status))
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
565
num_wqe += hinic3_get_sge_num(rxq, pkt_len);
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
573
if (rxq->delta >= HINIC3_RX_BUFFER_WRITE)
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
574
hinic3_rx_fill_buffers(rxq);
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
66
struct hinic3_rxq *rxq;
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
74
rxq = &nic_dev->rxqs[q_id];
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
75
rxq->netdev = netdev;
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
76
rxq->dev = &pdev->dev;
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
77
rxq->q_id = q_id;
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
78
rxq->buf_len = nic_dev->rx_buf_len;
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
79
rxq->buf_len_shift = ilog2(nic_dev->rx_buf_len);
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
80
rxq->q_depth = nic_dev->q_params.rq_depth;
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
81
rxq->q_mask = nic_dev->q_params.rq_depth - 1;
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
83
hinic3_rxq_stats_init(rxq);
drivers/net/ethernet/huawei/hinic3/hinic3_rx.h
123
int hinic3_rx_poll(struct hinic3_rxq *rxq, int budget);
drivers/net/ethernet/ibm/ibmveth.h
41
#define h_register_logical_lan(ua, buflst, rxq, fltlst, mac) \
drivers/net/ethernet/ibm/ibmveth.h
42
plpar_hcall_norets(H_REGISTER_LOGICAL_LAN, ua, buflst, rxq, fltlst, mac)
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
2395
qpi->rxq.vsi_id != qci->vsi_id ||
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
2396
qpi->rxq.queue_id != vsi_queue_id) {
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
2411
&qpi->rxq) ||
drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
412
vqpi->rxq.vsi_id = vqci->vsi_id;
drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
413
vqpi->rxq.queue_id = i;
drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
414
vqpi->rxq.ring_len = adapter->rx_rings[i].count;
drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
415
vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma;
drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
416
vqpi->rxq.max_pkt_size = max_frame;
drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
417
vqpi->rxq.databuffer_size = adapter->rx_rings[i].rx_buf_len;
drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
419
vqpi->rxq.rxdid = adapter->rxdid;
drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
421
vqpi->rxq.crc_disable = !!(adapter->netdev->features &
drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
423
vqpi->rxq.flags = rx_flags;
drivers/net/ethernet/intel/ice/ice_base.c
1270
ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx)
drivers/net/ethernet/intel/ice/ice_base.c
1281
wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
drivers/net/ethernet/intel/ice/ice_base.h
26
ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx);
drivers/net/ethernet/intel/ice/ice_lib.c
1919
u16 txq = 0, rxq = 0;
drivers/net/ethernet/intel/ice/ice_lib.c
1946
ice_cfg_rxq_interrupt(vsi, rxq, reg_idx,
drivers/net/ethernet/intel/ice/ice_lib.c
1948
rxq++;
drivers/net/ethernet/intel/ice/ice_lib.c
2606
u32 rxq = 0;
drivers/net/ethernet/intel/ice/ice_lib.c
2626
wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0);
drivers/net/ethernet/intel/ice/ice_lib.c
2627
rxq++;
drivers/net/ethernet/intel/ice/virt/queues.c
781
if (!qci->qpair[i].rxq.crc_disable)
drivers/net/ethernet/intel/ice/virt/queues.c
792
qpi->rxq.vsi_id != qci->vsi_id ||
drivers/net/ethernet/intel/ice/virt/queues.c
793
qpi->rxq.queue_id != qpi->txq.queue_id ||
drivers/net/ethernet/intel/ice/virt/queues.c
796
!ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
drivers/net/ethernet/intel/ice/virt/queues.c
801
q_idx = qpi->rxq.queue_id;
drivers/net/ethernet/intel/ice/virt/queues.c
828
if (qpi->rxq.ring_len > 0) {
drivers/net/ethernet/intel/ice/virt/queues.c
833
ring->dma = qpi->rxq.dma_ring_addr;
drivers/net/ethernet/intel/ice/virt/queues.c
834
ring->count = qpi->rxq.ring_len;
drivers/net/ethernet/intel/ice/virt/queues.c
836
if (qpi->rxq.crc_disable)
drivers/net/ethernet/intel/ice/virt/queues.c
841
if (qpi->rxq.databuffer_size != 0 &&
drivers/net/ethernet/intel/ice/virt/queues.c
842
(qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
drivers/net/ethernet/intel/ice/virt/queues.c
843
qpi->rxq.databuffer_size < 1024))
drivers/net/ethernet/intel/ice/virt/queues.c
846
ring->rx_buf_len = qpi->rxq.databuffer_size;
drivers/net/ethernet/intel/ice/virt/queues.c
848
if (qpi->rxq.max_pkt_size > max_frame_size ||
drivers/net/ethernet/intel/ice/virt/queues.c
849
qpi->rxq.max_pkt_size < 64)
drivers/net/ethernet/intel/ice/virt/queues.c
852
vsi->max_frame = qpi->rxq.max_pkt_size;
drivers/net/ethernet/intel/ice/virt/queues.c
873
rxdid = qpi->rxq.rxdid;
drivers/net/ethernet/intel/ice/virt/queues.c
883
(qpi->rxq.flags & VIRTCHNL_PTP_RX_TSTAMP));
drivers/net/ethernet/intel/idpf/idpf_ethtool.c
1134
struct idpf_rx_queue *rxq;
drivers/net/ethernet/intel/idpf/idpf_ethtool.c
1138
rxq = &rxq_grp->splitq.rxq_sets[j]->rxq;
drivers/net/ethernet/intel/idpf/idpf_ethtool.c
1140
rxq = rxq_grp->singleq.rxqs[j];
drivers/net/ethernet/intel/idpf/idpf_ethtool.c
1142
if (!rxq)
drivers/net/ethernet/intel/idpf/idpf_ethtool.c
1146
start = u64_stats_fetch_begin(&rxq->stats_sync);
drivers/net/ethernet/intel/idpf/idpf_ethtool.c
1148
stats = &rxq->q_stats;
drivers/net/ethernet/intel/idpf/idpf_ethtool.c
1153
} while (u64_stats_fetch_retry(&rxq->stats_sync, start));
drivers/net/ethernet/intel/idpf/idpf_ethtool.c
1270
struct idpf_rx_queue *rxq;
drivers/net/ethernet/intel/idpf/idpf_ethtool.c
1273
rxq = &rxq_grp->splitq.rxq_sets[j]->rxq;
drivers/net/ethernet/intel/idpf/idpf_ethtool.c
1275
rxq = rxq_grp->singleq.rxqs[j];
drivers/net/ethernet/intel/idpf/idpf_ethtool.c
1276
if (!rxq)
drivers/net/ethernet/intel/idpf/idpf_ethtool.c
1279
idpf_add_queue_stats(&data, rxq, qtype);
drivers/net/ethernet/intel/idpf/idpf_ethtool.c
1310
return rsrc->rxq_grps[q_grp].splitq.rxq_sets[q_idx]->rxq.q_vector;
drivers/net/ethernet/intel/idpf/idpf_ptp.c
340
struct idpf_rx_queue *rxq;
drivers/net/ethernet/intel/idpf/idpf_ptp.c
345
rxq = grp->singleq.rxqs[i];
drivers/net/ethernet/intel/idpf/idpf_ptp.c
346
if (rxq)
drivers/net/ethernet/intel/idpf/idpf_ptp.c
347
WRITE_ONCE(rxq->cached_phc_time, systime);
drivers/net/ethernet/intel/idpf/idpf_ptp.c
351
rxq = &grp->splitq.rxq_sets[i]->rxq;
drivers/net/ethernet/intel/idpf/idpf_ptp.c
352
if (rxq)
drivers/net/ethernet/intel/idpf/idpf_ptp.c
353
WRITE_ONCE(rxq->cached_phc_time, systime);
drivers/net/ethernet/intel/idpf/idpf_ptp.c
710
rx_queue = &grp->splitq.rxq_sets[j]->rxq;
drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
1007
struct idpf_rx_queue *rxq;
drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
1009
rxq = libeth_xdp_buff_to_rq(xdp, typeof(*rxq), xdp_rxq);
drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
1011
idpf_rx_singleq_extract_fields(rxq, xdp->desc, &fields);
drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
1012
__idpf_rx_singleq_process_skb_fields(rxq, skb, xdp->desc,
drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
1128
struct idpf_rx_queue *rxq = q_vec->rx[i];
drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
1131
pkts_cleaned_per_q = idpf_rx_singleq_clean(rxq, budget_per_q);
drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
649
static void idpf_rx_singleq_csum(struct idpf_rx_queue *rxq,
drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
657
if (!libeth_rx_pt_has_checksum(rxq->xdp_rxq.dev, decoded))
drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
703
u64_stats_update_begin(&rxq->stats_sync);
drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
704
u64_stats_inc(&rxq->q_stats.hw_csum_err);
drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
705
u64_stats_update_end(&rxq->stats_sync);
drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
873
static void idpf_rx_buf_hw_update(struct idpf_rx_queue *rxq, u32 val)
drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
875
rxq->next_to_use = val;
drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
877
if (unlikely(!rxq->tail))
drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
881
writel(val, rxq->tail);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1046
idpf_xdp_rxq_info_deinit(q->rxq, rsrc->rxq_model);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1047
idpf_rx_desc_rel(q->rxq, rsrc->dev, rsrc->rxq_model);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1142
qs->qs[num++].rxq = qv->rx[i];
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1896
q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
3184
idpf_rx_hash(const struct idpf_rx_queue *rxq, struct sk_buff *skb,
drivers/net/ethernet/intel/idpf/idpf_txrx.c
3190
if (!libeth_rx_pt_has_hash(rxq->xdp_rxq.dev, decoded))
drivers/net/ethernet/intel/idpf/idpf_txrx.c
3209
static void idpf_rx_csum(struct idpf_rx_queue *rxq, struct sk_buff *skb,
drivers/net/ethernet/intel/idpf/idpf_txrx.c
3216
if (!libeth_rx_pt_has_checksum(rxq->xdp_rxq.dev, decoded))
drivers/net/ethernet/intel/idpf/idpf_txrx.c
3250
u64_stats_update_begin(&rxq->stats_sync);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
3251
u64_stats_inc(&rxq->q_stats.hw_csum_err);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
3252
u64_stats_update_end(&rxq->stats_sync);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
3300
static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb,
drivers/net/ethernet/intel/idpf/idpf_txrx.c
3353
u64_stats_update_begin(&rxq->stats_sync);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
3354
u64_stats_inc(&rxq->q_stats.rsc_pkts);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
3355
u64_stats_update_end(&rxq->stats_sync);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
3367
idpf_rx_hwtstamp(const struct idpf_rx_queue *rxq,
drivers/net/ethernet/intel/idpf/idpf_txrx.c
3377
cached_time = READ_ONCE(rxq->cached_phc_time);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
3400
__idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb,
drivers/net/ethernet/intel/idpf/idpf_txrx.c
3409
decoded = rxq->rx_ptype_lkup[rx_ptype];
drivers/net/ethernet/intel/idpf/idpf_txrx.c
3412
idpf_rx_hash(rxq, skb, rx_desc, decoded);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
3414
if (idpf_queue_has(PTP, rxq))
drivers/net/ethernet/intel/idpf/idpf_txrx.c
3415
idpf_rx_hwtstamp(rxq, rx_desc, skb);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
3419
return idpf_rx_rsc(rxq, skb, rx_desc, decoded);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
3422
idpf_rx_csum(rxq, skb, csum_bits, decoded);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
3431
struct idpf_rx_queue *rxq;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
3433
rxq = libeth_xdp_buff_to_rq(xdp, typeof(*rxq), xdp_rxq);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
3435
return !__idpf_rx_process_skb_fields(rxq, skb, xdp->desc);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
3526
static int idpf_rx_splitq_clean(struct idpf_rx_queue *rxq, int budget)
drivers/net/ethernet/intel/idpf/idpf_txrx.c
3530
u16 ntc = rxq->next_to_clean;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
3534
libeth_xdp_tx_init_bulk(&bq, rxq->xdp_prog, rxq->xdp_rxq.dev,
drivers/net/ethernet/intel/idpf/idpf_txrx.c
3535
rxq->xdpsqs, rxq->num_xdp_txq);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
3536
libeth_xdp_init_buff(xdp, &rxq->xdp, &rxq->xdp_rxq);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
3551
rx_desc = &rxq->rx[ntc].flex_adv_nic_3_wb;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
3556
if (idpf_queue_has(GEN_CHK, rxq) != gen_id)
drivers/net/ethernet/intel/idpf/idpf_txrx.c
3564
IDPF_RX_BUMP_NTC(rxq, ntc);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
3565
u64_stats_update_begin(&rxq->stats_sync);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
3566
u64_stats_inc(&rxq->q_stats.bad_descs);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
3567
u64_stats_update_end(&rxq->stats_sync);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
3577
rxq_set = container_of(rxq, struct idpf_rxq_set, rxq);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
3581
rx_bufq = &rxq->bufq_sets[bufq_id].bufq;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
3610
u64_stats_update_begin(&rxq->stats_sync);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
3611
u64_stats_inc(&rxq->q_stats.hsplit_buf_ovf);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
3612
u64_stats_update_end(&rxq->stats_sync);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
3625
IDPF_RX_BUMP_NTC(rxq, ntc);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
3631
idpf_xdp_run_pass(xdp, &bq, rxq->napi, &rs, rx_desc);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
3636
rxq->next_to_clean = ntc;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
3637
libeth_xdp_save_buff(&rxq->xdp, xdp);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
3639
u64_stats_update_begin(&rxq->stats_sync);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
3640
u64_stats_add(&rxq->q_stats.packets, rs.packets);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
3641
u64_stats_add(&rxq->q_stats.bytes, rs.bytes);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
3642
u64_stats_add(&rxq->q_stats.hsplit_pkts, rs.hsplit);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
3643
u64_stats_update_end(&rxq->stats_sync);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
3996
struct idpf_rx_queue *rxq = q_vector->rx[i];
drivers/net/ethernet/intel/idpf/idpf_txrx.c
4000
start = u64_stats_fetch_begin(&rxq->stats_sync);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
4001
pkts = u64_stats_read(&rxq->q_stats.packets);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
4002
bts = u64_stats_read(&rxq->q_stats.bytes);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
4003
} while (u64_stats_fetch_retry(&rxq->stats_sync, start));
drivers/net/ethernet/intel/idpf/idpf_txrx.c
413
static void idpf_rx_buf_rel_all(struct idpf_rx_queue *rxq)
drivers/net/ethernet/intel/idpf/idpf_txrx.c
416
.fqes = rxq->rx_buf,
drivers/net/ethernet/intel/idpf/idpf_txrx.c
417
.pp = rxq->pp,
drivers/net/ethernet/intel/idpf/idpf_txrx.c
420
if (!rxq->rx_buf)
drivers/net/ethernet/intel/idpf/idpf_txrx.c
423
for (u32 i = 0; i < rxq->desc_count; i++)
drivers/net/ethernet/intel/idpf/idpf_txrx.c
424
idpf_rx_page_rel(&rxq->rx_buf[i]);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
427
rxq->rx_buf = NULL;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
428
rxq->pp = NULL;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
4295
struct idpf_rx_queue *rxq = q_vec->rx[i];
drivers/net/ethernet/intel/idpf/idpf_txrx.c
4298
pkts_cleaned_per_q = idpf_queue_has(XSK, rxq) ?
drivers/net/ethernet/intel/idpf/idpf_txrx.c
4299
idpf_xskrq_poll(rxq, budget_per_q) :
drivers/net/ethernet/intel/idpf/idpf_txrx.c
4300
idpf_rx_splitq_clean(rxq, budget_per_q);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
439
static void idpf_rx_desc_rel(struct idpf_rx_queue *rxq, struct device *dev,
drivers/net/ethernet/intel/idpf/idpf_txrx.c
4398
q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
442
if (!rxq)
drivers/net/ethernet/intel/idpf/idpf_txrx.c
445
if (!idpf_queue_has(XSK, rxq))
drivers/net/ethernet/intel/idpf/idpf_txrx.c
446
libeth_xdp_return_stash(&rxq->xdp);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
449
idpf_rx_buf_rel_all(rxq);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
451
idpf_xsk_clear_queue(rxq, VIRTCHNL2_QUEUE_TYPE_RX);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
453
rxq->next_to_alloc = 0;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
454
rxq->next_to_clean = 0;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
455
rxq->next_to_use = 0;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
456
if (!rxq->desc_ring)
drivers/net/ethernet/intel/idpf/idpf_txrx.c
459
dmam_free_coherent(dev, rxq->size, rxq->desc_ring, rxq->dma);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
460
rxq->desc_ring = NULL;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
516
idpf_rx_desc_rel(&rx_qgrp->splitq.rxq_sets[j]->rxq,
drivers/net/ethernet/intel/idpf/idpf_txrx.c
676
static int idpf_rx_buf_alloc_singleq(struct idpf_rx_queue *rxq)
drivers/net/ethernet/intel/idpf/idpf_txrx.c
678
if (idpf_rx_singleq_buf_hw_alloc_all(rxq, rxq->desc_count - 1))
drivers/net/ethernet/intel/idpf/idpf_txrx.c
684
idpf_rx_buf_rel_all(rxq);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
695
static int idpf_rx_bufs_init_singleq(struct idpf_rx_queue *rxq)
drivers/net/ethernet/intel/idpf/idpf_txrx.c
698
.count = rxq->desc_count,
drivers/net/ethernet/intel/idpf/idpf_txrx.c
701
.nid = idpf_q_vector_to_mem(rxq->q_vector),
drivers/net/ethernet/intel/idpf/idpf_txrx.c
705
ret = libeth_rx_fq_create(&fq, &rxq->q_vector->napi);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
709
rxq->pp = fq.pp;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
710
rxq->rx_buf = fq.fqes;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
711
rxq->truesize = fq.truesize;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
712
rxq->rx_buf_size = fq.buf_len;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
714
return idpf_rx_buf_alloc_singleq(rxq);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
844
struct idpf_rx_queue *rxq)
drivers/net/ethernet/intel/idpf/idpf_txrx.c
848
rxq->size = rxq->desc_count * sizeof(union virtchnl2_rx_desc);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
851
rxq->size = ALIGN(rxq->size, 4096);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
852
rxq->desc_ring = dmam_alloc_coherent(dev, rxq->size,
drivers/net/ethernet/intel/idpf/idpf_txrx.c
853
&rxq->dma, GFP_KERNEL);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
854
if (!rxq->desc_ring) {
drivers/net/ethernet/intel/idpf/idpf_txrx.c
856
rxq->size);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
860
rxq->next_to_alloc = 0;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
861
rxq->next_to_clean = 0;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
862
rxq->next_to_use = 0;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
863
idpf_queue_set(GEN_CHK, rxq);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
865
idpf_xsk_setup_queue(vport, rxq, VIRTCHNL2_QUEUE_TYPE_RX);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
924
q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
977
err = idpf_rx_desc_alloc(vport, q->rxq);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
981
err = idpf_xdp_rxq_info_init(q->rxq);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
986
err = idpf_rx_bufs_init_singleq(q->rxq);
drivers/net/ethernet/intel/idpf/idpf_txrx.h
1125
bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rxq,
drivers/net/ethernet/intel/idpf/idpf_txrx.h
88
#define IDPF_RX_BUFQ_WORKING_SET(rxq) ((rxq)->desc_count - 1)
drivers/net/ethernet/intel/idpf/idpf_txrx.h
897
struct idpf_rx_queue rxq;
drivers/net/ethernet/intel/idpf/idpf_txrx.h
90
#define IDPF_RX_BUMP_NTC(rxq, ntc) \
drivers/net/ethernet/intel/idpf/idpf_txrx.h
92
if (unlikely(++(ntc) == (rxq)->desc_count)) { \
drivers/net/ethernet/intel/idpf/idpf_txrx.h
94
idpf_queue_change(GEN_CHK, rxq); \
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
2045
idpf_fill_rxq_config_chunk(qs->qv_rsrc, qs->qs[i].rxq,
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
2083
goto rxq;
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
2093
rxq:
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
2098
qs->qs[k++].rxq =
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
2099
&rx_qgrp->splitq.rxq_sets[j]->rxq;
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
2101
qs->qs[k++].rxq = rx_qgrp->singleq.rxqs[j];
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
2179
qid = q->rxq->q_id;
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
2260
qs->qs[k++].rxq =
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
2261
&rx_qgrp->splitq.rxq_sets[j]->rxq;
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
2263
qs->qs[k++].rxq = rx_qgrp->singleq.rxqs[j];
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
2348
qid = q->rxq->q_id;
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
2350
if (idpf_queue_has(NOIRQ, q->rxq))
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
2353
vec = q->rxq->q_vector;
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
2442
qs->qs[k++].rxq =
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
2443
&rx_qgrp->splitq.rxq_sets[j]->rxq;
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
2445
qs->qs[k++].rxq = rx_qgrp->singleq.rxqs[j];
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
3942
q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
134
struct idpf_rx_queue *rxq;
drivers/net/ethernet/intel/idpf/xdp.c
103
static int __idpf_xdp_rxq_info_deinit(struct idpf_rx_queue *rxq, void *arg)
drivers/net/ethernet/intel/idpf/xdp.c
106
rxq->xdpsqs = NULL;
drivers/net/ethernet/intel/idpf/xdp.c
107
rxq->num_xdp_txq = 0;
drivers/net/ethernet/intel/idpf/xdp.c
11
int (*fn)(struct idpf_rx_queue *rxq, void *arg),
drivers/net/ethernet/intel/idpf/xdp.c
110
if (!idpf_queue_has(XSK, rxq))
drivers/net/ethernet/intel/idpf/xdp.c
111
xdp_rxq_info_detach_mem_model(&rxq->xdp_rxq);
drivers/net/ethernet/intel/idpf/xdp.c
113
xdp_rxq_info_unreg(&rxq->xdp_rxq);
drivers/net/ethernet/intel/idpf/xdp.c
118
void idpf_xdp_rxq_info_deinit(struct idpf_rx_queue *rxq, u32 model)
drivers/net/ethernet/intel/idpf/xdp.c
120
__idpf_xdp_rxq_info_deinit(rxq, (void *)(size_t)model);
drivers/net/ethernet/intel/idpf/xdp.c
129
static int idpf_xdp_rxq_assign_prog(struct idpf_rx_queue *rxq, void *arg)
drivers/net/ethernet/intel/idpf/xdp.c
137
old = rcu_replace_pointer(rxq->xdp_prog, prog, lockdep_rtnl_is_held());
drivers/net/ethernet/intel/idpf/xdp.c
33
q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
drivers/net/ethernet/intel/idpf/xdp.c
389
const struct idpf_rx_queue *rxq;
drivers/net/ethernet/intel/idpf/xdp.c
392
rxq = libeth_xdp_buff_to_rq(xdp, typeof(*rxq), xdp_rxq);
drivers/net/ethernet/intel/idpf/xdp.c
396
pt = rxq->rx_ptype_lkup[idpf_xdp_rx_pt(&desc)];
drivers/net/ethernet/intel/idpf/xdp.c
397
if (!libeth_rx_pt_has_hash(rxq->xdp_rxq.dev, pt))
drivers/net/ethernet/intel/idpf/xdp.c
410
const struct idpf_rx_queue *rxq;
drivers/net/ethernet/intel/idpf/xdp.c
414
rxq = libeth_xdp_buff_to_rq(xdp, typeof(*rxq), xdp_rxq);
drivers/net/ethernet/intel/idpf/xdp.c
416
if (!idpf_queue_has(PTP, rxq))
drivers/net/ethernet/intel/idpf/xdp.c
424
cached_time = READ_ONCE(rxq->cached_phc_time);
drivers/net/ethernet/intel/idpf/xdp.c
46
static int __idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq, void *arg)
drivers/net/ethernet/intel/idpf/xdp.c
48
const struct idpf_vport *vport = rxq->q_vector->vport;
drivers/net/ethernet/intel/idpf/xdp.c
54
if (idpf_queue_has(XSK, rxq))
drivers/net/ethernet/intel/idpf/xdp.c
55
frag_size = rxq->bufq_sets[0].bufq.truesize;
drivers/net/ethernet/intel/idpf/xdp.c
57
err = __xdp_rxq_info_reg(&rxq->xdp_rxq, vport->netdev, rxq->idx,
drivers/net/ethernet/intel/idpf/xdp.c
58
rxq->q_vector->napi.napi_id,
drivers/net/ethernet/intel/idpf/xdp.c
66
if (idpf_queue_has(XSK, rxq)) {
drivers/net/ethernet/intel/idpf/xdp.c
67
err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq,
drivers/net/ethernet/intel/idpf/xdp.c
69
rxq->pool);
drivers/net/ethernet/intel/idpf/xdp.c
75
pp = split ? rxq->bufq_sets[0].bufq.pp : rxq->pp;
drivers/net/ethernet/intel/idpf/xdp.c
76
xdp_rxq_info_attach_page_pool(&rxq->xdp_rxq, pp);
drivers/net/ethernet/intel/idpf/xdp.c
82
rxq->xdpsqs = &vport->txqs[rsrc->xdp_txq_offset];
drivers/net/ethernet/intel/idpf/xdp.c
83
rxq->num_xdp_txq = vport->num_xdp_txq;
drivers/net/ethernet/intel/idpf/xdp.c
88
xdp_rxq_info_unreg(&rxq->xdp_rxq);
drivers/net/ethernet/intel/idpf/xdp.c
93
int idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq)
drivers/net/ethernet/intel/idpf/xdp.c
95
return __idpf_xdp_rxq_info_init(rxq, NULL);
drivers/net/ethernet/intel/idpf/xdp.h
11
int idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq);
drivers/net/ethernet/intel/idpf/xdp.h
13
void idpf_xdp_rxq_info_deinit(struct idpf_rx_queue *rxq, u32 model);
drivers/net/ethernet/intel/idpf/xsk.c
128
struct idpf_rx_queue *rxq;
drivers/net/ethernet/intel/idpf/xsk.c
13
struct idpf_rx_queue *rxq)
drivers/net/ethernet/intel/idpf/xsk.c
133
rxq = q;
drivers/net/ethernet/intel/idpf/xsk.c
134
if (!idpf_queue_has_clear(XSK, rxq))
drivers/net/ethernet/intel/idpf/xsk.c
137
rxq->pool = NULL;
drivers/net/ethernet/intel/idpf/xsk.c
17
pool = xsk_get_pool_from_qid(vport->netdev, rxq->idx);
drivers/net/ethernet/intel/idpf/xsk.c
21
rxq->pool = pool;
drivers/net/ethernet/intel/idpf/xsk.c
23
idpf_queue_set(XSK, rxq);
drivers/net/ethernet/intel/idpf/xsk.c
38
qid = grp->splitq.rxq_sets[0]->rxq.idx;
drivers/net/ethernet/intel/idpf/xsk.c
467
int idpf_xskrq_poll(struct idpf_rx_queue *rxq, u32 budget)
drivers/net/ethernet/intel/idpf/xsk.c
472
u32 ntc = rxq->next_to_clean;
drivers/net/ethernet/intel/idpf/xsk.c
475
u32 cnt = rxq->desc_count;
drivers/net/ethernet/intel/idpf/xsk.c
477
wake = xsk_uses_need_wakeup(rxq->pool);
drivers/net/ethernet/intel/idpf/xsk.c
479
xsk_clear_rx_need_wakeup(rxq->pool);
drivers/net/ethernet/intel/idpf/xsk.c
481
gen = idpf_queue_has(GEN_CHK, rxq);
drivers/net/ethernet/intel/idpf/xsk.c
483
libeth_xsk_tx_init_bulk(&bq, rxq->xdp_prog, rxq->xdp_rxq.dev,
drivers/net/ethernet/intel/idpf/xsk.c
484
rxq->xdpsqs, rxq->num_xdp_txq);
drivers/net/ethernet/intel/idpf/xsk.c
485
xdp = rxq->xsk;
drivers/net/ethernet/intel/idpf/xsk.c
493
rx_desc = &rxq->rx[ntc].flex_adv_nic_3_wb;
drivers/net/ethernet/intel/idpf/xsk.c
504
bufq = &rxq->bufq_sets[bufq_id].bufq;
drivers/net/ethernet/intel/idpf/xsk.c
520
idpf_queue_change(GEN_CHK, rxq);
drivers/net/ethernet/intel/idpf/xsk.c
526
fail = !idpf_xsk_run_pass(xdp, &bq, rxq->napi, &rs, rx_desc);
drivers/net/ethernet/intel/idpf/xsk.c
535
rxq->next_to_clean = ntc;
drivers/net/ethernet/intel/idpf/xsk.c
536
rxq->xsk = xdp;
drivers/net/ethernet/intel/idpf/xsk.c
540
u64_stats_update_begin(&rxq->stats_sync);
drivers/net/ethernet/intel/idpf/xsk.c
541
u64_stats_add(&rxq->q_stats.packets, rs.packets);
drivers/net/ethernet/intel/idpf/xsk.c
542
u64_stats_add(&rxq->q_stats.bytes, rs.bytes);
drivers/net/ethernet/intel/idpf/xsk.c
543
u64_stats_update_end(&rxq->stats_sync);
drivers/net/ethernet/intel/idpf/xsk.c
549
xsk_set_rx_need_wakeup(rxq->pool);
drivers/net/ethernet/intel/idpf/xsk.h
27
int idpf_xskrq_poll(struct idpf_rx_queue *rxq, u32 budget);
drivers/net/ethernet/intel/igc/igc_main.c
7053
if (!(ctx->xdp.rxq->dev->features & NETIF_F_RXHASH))
drivers/net/ethernet/intel/igc/igc_main.c
7065
struct igc_adapter *adapter = netdev_priv(ctx->xdp.rxq->dev);
drivers/net/ethernet/intel/libeth/xdp.c
304
if (xdp->base.rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL)
drivers/net/ethernet/marvell/mv643xx_eth.c
1937
struct rx_queue *rxq = mp->rxq + index;
drivers/net/ethernet/marvell/mv643xx_eth.c
1942
rxq->index = index;
drivers/net/ethernet/marvell/mv643xx_eth.c
1944
rxq->rx_ring_size = mp->rx_ring_size;
drivers/net/ethernet/marvell/mv643xx_eth.c
1946
rxq->rx_desc_count = 0;
drivers/net/ethernet/marvell/mv643xx_eth.c
1947
rxq->rx_curr_desc = 0;
drivers/net/ethernet/marvell/mv643xx_eth.c
1948
rxq->rx_used_desc = 0;
drivers/net/ethernet/marvell/mv643xx_eth.c
1950
size = rxq->rx_ring_size * sizeof(struct rx_desc);
drivers/net/ethernet/marvell/mv643xx_eth.c
1953
rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr,
drivers/net/ethernet/marvell/mv643xx_eth.c
1955
rxq->rx_desc_dma = mp->rx_desc_sram_addr;
drivers/net/ethernet/marvell/mv643xx_eth.c
1957
rxq->rx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
drivers/net/ethernet/marvell/mv643xx_eth.c
1958
size, &rxq->rx_desc_dma,
drivers/net/ethernet/marvell/mv643xx_eth.c
1962
if (rxq->rx_desc_area == NULL) {
drivers/net/ethernet/marvell/mv643xx_eth.c
1967
memset(rxq->rx_desc_area, 0, size);
drivers/net/ethernet/marvell/mv643xx_eth.c
1969
rxq->rx_desc_area_size = size;
drivers/net/ethernet/marvell/mv643xx_eth.c
1970
rxq->rx_skb = kzalloc_objs(*rxq->rx_skb, rxq->rx_ring_size);
drivers/net/ethernet/marvell/mv643xx_eth.c
1971
if (rxq->rx_skb == NULL)
drivers/net/ethernet/marvell/mv643xx_eth.c
1974
rx_desc = rxq->rx_desc_area;
drivers/net/ethernet/marvell/mv643xx_eth.c
1975
for (i = 0; i < rxq->rx_ring_size; i++) {
drivers/net/ethernet/marvell/mv643xx_eth.c
1979
if (nexti == rxq->rx_ring_size)
drivers/net/ethernet/marvell/mv643xx_eth.c
1982
rx_desc[i].next_desc_ptr = rxq->rx_desc_dma +
drivers/net/ethernet/marvell/mv643xx_eth.c
1991
iounmap(rxq->rx_desc_area);
drivers/net/ethernet/marvell/mv643xx_eth.c
1994
rxq->rx_desc_area,
drivers/net/ethernet/marvell/mv643xx_eth.c
1995
rxq->rx_desc_dma);
drivers/net/ethernet/marvell/mv643xx_eth.c
2001
static void rxq_deinit(struct rx_queue *rxq)
drivers/net/ethernet/marvell/mv643xx_eth.c
2003
struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
drivers/net/ethernet/marvell/mv643xx_eth.c
2006
rxq_disable(rxq);
drivers/net/ethernet/marvell/mv643xx_eth.c
2008
for (i = 0; i < rxq->rx_ring_size; i++) {
drivers/net/ethernet/marvell/mv643xx_eth.c
2009
if (rxq->rx_skb[i]) {
drivers/net/ethernet/marvell/mv643xx_eth.c
2010
dev_consume_skb_any(rxq->rx_skb[i]);
drivers/net/ethernet/marvell/mv643xx_eth.c
2011
rxq->rx_desc_count--;
drivers/net/ethernet/marvell/mv643xx_eth.c
2015
if (rxq->rx_desc_count) {
drivers/net/ethernet/marvell/mv643xx_eth.c
2017
rxq->rx_desc_count);
drivers/net/ethernet/marvell/mv643xx_eth.c
2020
if (rxq->index == 0 &&
drivers/net/ethernet/marvell/mv643xx_eth.c
2021
rxq->rx_desc_area_size <= mp->rx_desc_sram_size)
drivers/net/ethernet/marvell/mv643xx_eth.c
2022
iounmap(rxq->rx_desc_area);
drivers/net/ethernet/marvell/mv643xx_eth.c
2024
dma_free_coherent(mp->dev->dev.parent, rxq->rx_desc_area_size,
drivers/net/ethernet/marvell/mv643xx_eth.c
2025
rxq->rx_desc_area, rxq->rx_desc_dma);
drivers/net/ethernet/marvell/mv643xx_eth.c
2027
kfree(rxq->rx_skb);
drivers/net/ethernet/marvell/mv643xx_eth.c
2289
work_done += rxq_process(mp->rxq + queue, work_tbd);
drivers/net/ethernet/marvell/mv643xx_eth.c
2291
work_done += rxq_refill(mp->rxq + queue, work_tbd);
drivers/net/ethernet/marvell/mv643xx_eth.c
2379
struct rx_queue *rxq = mp->rxq + i;
drivers/net/ethernet/marvell/mv643xx_eth.c
2382
addr = (u32)rxq->rx_desc_dma;
drivers/net/ethernet/marvell/mv643xx_eth.c
2383
addr += rxq->rx_curr_desc * sizeof(struct rx_desc);
drivers/net/ethernet/marvell/mv643xx_eth.c
2386
rxq_enable(rxq);
drivers/net/ethernet/marvell/mv643xx_eth.c
2445
rxq_deinit(mp->rxq + i);
drivers/net/ethernet/marvell/mv643xx_eth.c
2449
rxq_refill(mp->rxq + i, INT_MAX);
drivers/net/ethernet/marvell/mv643xx_eth.c
2479
rxq_deinit(mp->rxq + i);
drivers/net/ethernet/marvell/mv643xx_eth.c
2493
rxq_disable(mp->rxq + i);
drivers/net/ethernet/marvell/mv643xx_eth.c
2537
rxq_deinit(mp->rxq + i);
drivers/net/ethernet/marvell/mv643xx_eth.c
399
struct rx_queue rxq[8];
drivers/net/ethernet/marvell/mv643xx_eth.c
441
static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq)
drivers/net/ethernet/marvell/mv643xx_eth.c
443
return container_of(rxq, struct mv643xx_eth_private, rxq[rxq->index]);
drivers/net/ethernet/marvell/mv643xx_eth.c
451
static void rxq_enable(struct rx_queue *rxq)
drivers/net/ethernet/marvell/mv643xx_eth.c
453
struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
drivers/net/ethernet/marvell/mv643xx_eth.c
454
wrlp(mp, RXQ_COMMAND, 1 << rxq->index);
drivers/net/ethernet/marvell/mv643xx_eth.c
457
static void rxq_disable(struct rx_queue *rxq)
drivers/net/ethernet/marvell/mv643xx_eth.c
459
struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
drivers/net/ethernet/marvell/mv643xx_eth.c
460
u8 mask = 1 << rxq->index;
drivers/net/ethernet/marvell/mv643xx_eth.c
506
static int rxq_process(struct rx_queue *rxq, int budget)
drivers/net/ethernet/marvell/mv643xx_eth.c
508
struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
drivers/net/ethernet/marvell/mv643xx_eth.c
513
while (rx < budget && rxq->rx_desc_count) {
drivers/net/ethernet/marvell/mv643xx_eth.c
519
rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc];
drivers/net/ethernet/marvell/mv643xx_eth.c
526
skb = rxq->rx_skb[rxq->rx_curr_desc];
drivers/net/ethernet/marvell/mv643xx_eth.c
527
rxq->rx_skb[rxq->rx_curr_desc] = NULL;
drivers/net/ethernet/marvell/mv643xx_eth.c
529
rxq->rx_curr_desc++;
drivers/net/ethernet/marvell/mv643xx_eth.c
530
if (rxq->rx_curr_desc == rxq->rx_ring_size)
drivers/net/ethernet/marvell/mv643xx_eth.c
531
rxq->rx_curr_desc = 0;
drivers/net/ethernet/marvell/mv643xx_eth.c
535
rxq->rx_desc_count--;
drivers/net/ethernet/marvell/mv643xx_eth.c
538
mp->work_rx_refill |= 1 << rxq->index;
drivers/net/ethernet/marvell/mv643xx_eth.c
593
mp->work_rx &= ~(1 << rxq->index);
drivers/net/ethernet/marvell/mv643xx_eth.c
598
static int rxq_refill(struct rx_queue *rxq, int budget)
drivers/net/ethernet/marvell/mv643xx_eth.c
600
struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
drivers/net/ethernet/marvell/mv643xx_eth.c
604
while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) {
drivers/net/ethernet/marvell/mv643xx_eth.c
621
rxq->rx_desc_count++;
drivers/net/ethernet/marvell/mv643xx_eth.c
623
rx = rxq->rx_used_desc++;
drivers/net/ethernet/marvell/mv643xx_eth.c
624
if (rxq->rx_used_desc == rxq->rx_ring_size)
drivers/net/ethernet/marvell/mv643xx_eth.c
625
rxq->rx_used_desc = 0;
drivers/net/ethernet/marvell/mv643xx_eth.c
627
rx_desc = rxq->rx_desc_area + rx;
drivers/net/ethernet/marvell/mv643xx_eth.c
634
rxq->rx_skb[rx] = skb;
drivers/net/ethernet/marvell/mv643xx_eth.c
648
mp->work_rx_refill &= ~(1 << rxq->index);
drivers/net/ethernet/marvell/mvneta.c
1003
struct mvneta_rx_queue *rxq,
drivers/net/ethernet/marvell/mvneta.c
1008
val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id));
drivers/net/ethernet/marvell/mvneta.c
1013
mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
drivers/net/ethernet/marvell/mvneta.c
1018
struct mvneta_rx_queue *rxq)
drivers/net/ethernet/marvell/mvneta.c
1022
val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
drivers/net/ethernet/marvell/mvneta.c
1024
mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
drivers/net/ethernet/marvell/mvneta.c
1029
struct mvneta_rx_queue *rxq)
drivers/net/ethernet/marvell/mvneta.c
1033
val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
drivers/net/ethernet/marvell/mvneta.c
1035
mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
drivers/net/ethernet/marvell/mvneta.c
1040
struct mvneta_rx_queue *rxq)
drivers/net/ethernet/marvell/mvneta.c
1044
val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
drivers/net/ethernet/marvell/mvneta.c
1048
mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
drivers/net/ethernet/marvell/mvneta.c
1053
struct mvneta_rx_queue *rxq)
drivers/net/ethernet/marvell/mvneta.c
1057
val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
drivers/net/ethernet/marvell/mvneta.c
1061
mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
drivers/net/ethernet/marvell/mvneta.c
109
#define MVNETA_VLAN_PRIO_RXQ_MAP(prio, rxq) ((rxq) << ((prio) * 3))
drivers/net/ethernet/marvell/mvneta.c
1269
struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
drivers/net/ethernet/marvell/mvneta.c
1271
if (rxq->descs)
drivers/net/ethernet/marvell/mvneta.c
134
#define MVNETA_CPU_RXQ_ACCESS(rxq) BIT(rxq)
drivers/net/ethernet/marvell/mvneta.c
1498
int rxq, txq;
drivers/net/ethernet/marvell/mvneta.c
1500
for (rxq = 0; rxq < rxq_number; rxq++)
drivers/net/ethernet/marvell/mvneta.c
1501
if ((rxq % max_cpu) == cpu)
drivers/net/ethernet/marvell/mvneta.c
1502
rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
drivers/net/ethernet/marvell/mvneta.c
1686
struct mvneta_rx_queue *rxq, u32 value)
drivers/net/ethernet/marvell/mvneta.c
1688
mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id),
drivers/net/ethernet/marvell/mvneta.c
1696
struct mvneta_rx_queue *rxq, u32 value)
drivers/net/ethernet/marvell/mvneta.c
1704
mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val);
drivers/net/ethernet/marvell/mvneta.c
1724
struct mvneta_rx_queue *rxq)
drivers/net/ethernet/marvell/mvneta.c
1729
i = rx_desc - rxq->descs;
drivers/net/ethernet/marvell/mvneta.c
1730
rxq->buf_virt_addr[i] = virt_addr;
drivers/net/ethernet/marvell/mvneta.c
1937
struct mvneta_rx_queue *rxq,
drivers/net/ethernet/marvell/mvneta.c
1943
page = page_pool_alloc_pages(rxq->page_pool,
drivers/net/ethernet/marvell/mvneta.c
1949
mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq);
drivers/net/ethernet/marvell/mvneta.c
1987
struct mvneta_rx_queue *rxq)
drivers/net/ethernet/marvell/mvneta.c
1991
rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
drivers/net/ethernet/marvell/mvneta.c
1993
mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
drivers/net/ethernet/marvell/mvneta.c
1998
mvneta_rxq_next_desc_get(rxq);
drivers/net/ethernet/marvell/mvneta.c
2010
for (i = 0; i < rxq->size; i++) {
drivers/net/ethernet/marvell/mvneta.c
2011
struct mvneta_rx_desc *rx_desc = rxq->descs + i;
drivers/net/ethernet/marvell/mvneta.c
2012
void *data = rxq->buf_virt_addr[i];
drivers/net/ethernet/marvell/mvneta.c
2016
page_pool_put_full_page(rxq->page_pool, data, false);
drivers/net/ethernet/marvell/mvneta.c
2018
if (xdp_rxq_info_is_reg(&rxq->xdp_rxq))
drivers/net/ethernet/marvell/mvneta.c
2019
xdp_rxq_info_unreg(&rxq->xdp_rxq);
drivers/net/ethernet/marvell/mvneta.c
2020
page_pool_destroy(rxq->page_pool);
drivers/net/ethernet/marvell/mvneta.c
2021
rxq->page_pool = NULL;
drivers/net/ethernet/marvell/mvneta.c
2041
int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
drivers/net/ethernet/marvell/mvneta.c
2044
int curr_desc = rxq->first_to_refill;
drivers/net/ethernet/marvell/mvneta.c
2047
for (i = 0; (i < rxq->refill_num) && (i < 64); i++) {
drivers/net/ethernet/marvell/mvneta.c
2048
rx_desc = rxq->descs + curr_desc;
drivers/net/ethernet/marvell/mvneta.c
2050
if (mvneta_rx_refill(pp, rx_desc, rxq, GFP_ATOMIC)) {
drivers/net/ethernet/marvell/mvneta.c
2054
rxq->id, i, rxq->refill_num);
drivers/net/ethernet/marvell/mvneta.c
2063
curr_desc = MVNETA_QUEUE_NEXT_DESC(rxq, curr_desc);
drivers/net/ethernet/marvell/mvneta.c
2065
rxq->refill_num -= i;
drivers/net/ethernet/marvell/mvneta.c
2066
rxq->first_to_refill = curr_desc;
drivers/net/ethernet/marvell/mvneta.c
2072
mvneta_xdp_put_buff(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
drivers/net/ethernet/marvell/mvneta.c
2082
page_pool_put_full_page(rxq->page_pool,
drivers/net/ethernet/marvell/mvneta.c
2086
page_pool_put_page(rxq->page_pool, virt_to_head_page(xdp->data),
drivers/net/ethernet/marvell/mvneta.c
2257
mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
drivers/net/ethernet/marvell/mvneta.c
2281
mvneta_xdp_put_buff(pp, rxq, xdp, sync);
drivers/net/ethernet/marvell/mvneta.c
2292
mvneta_xdp_put_buff(pp, rxq, xdp, sync);
drivers/net/ethernet/marvell/mvneta.c
2301
mvneta_xdp_put_buff(pp, rxq, xdp, sync);
drivers/net/ethernet/marvell/mvneta.c
2316
struct mvneta_rx_queue *rxq,
drivers/net/ethernet/marvell/mvneta.c
2334
dma_dir = page_pool_get_dma_dir(rxq->page_pool);
drivers/net/ethernet/marvell/mvneta.c
2351
struct mvneta_rx_queue *rxq,
drivers/net/ethernet/marvell/mvneta.c
2367
dma_dir = page_pool_get_dma_dir(rxq->page_pool);
drivers/net/ethernet/marvell/mvneta.c
2389
page_pool_put_full_page(rxq->page_pool, page, true);
drivers/net/ethernet/marvell/mvneta.c
2429
struct mvneta_rx_queue *rxq)
drivers/net/ethernet/marvell/mvneta.c
2438
xdp_init_buff(&xdp_buf, PAGE_SIZE, &rxq->xdp_rxq);
drivers/net/ethernet/marvell/mvneta.c
2442
rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq);
drivers/net/ethernet/marvell/mvneta.c
2448
struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
drivers/net/ethernet/marvell/mvneta.c
2453
index = rx_desc - rxq->descs;
drivers/net/ethernet/marvell/mvneta.c
2454
page = (struct page *)rxq->buf_virt_addr[index];
drivers/net/ethernet/marvell/mvneta.c
2458
rxq->refill_num++;
drivers/net/ethernet/marvell/mvneta.c
2471
mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf,
drivers/net/ethernet/marvell/mvneta.c
2476
page_pool_put_full_page(rxq->page_pool, page,
drivers/net/ethernet/marvell/mvneta.c
2481
mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, &xdp_buf,
drivers/net/ethernet/marvell/mvneta.c
2490
mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1);
drivers/net/ethernet/marvell/mvneta.c
2495
mvneta_run_xdp(pp, rxq, xdp_prog, &xdp_buf, frame_sz, &ps))
drivers/net/ethernet/marvell/mvneta.c
2498
skb = mvneta_swbm_build_skb(pp, rxq->page_pool, &xdp_buf, desc_status);
drivers/net/ethernet/marvell/mvneta.c
2502
mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1);
drivers/net/ethernet/marvell/mvneta.c
2522
mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1);
drivers/net/ethernet/marvell/mvneta.c
2531
refill = mvneta_rx_refill_queue(pp, rxq);
drivers/net/ethernet/marvell/mvneta.c
2534
mvneta_rxq_desc_num_update(pp, rxq, rx_proc, refill);
drivers/net/ethernet/marvell/mvneta.c
2542
struct mvneta_rx_queue *rxq)
drivers/net/ethernet/marvell/mvneta.c
2550
rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
drivers/net/ethernet/marvell/mvneta.c
2559
struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
drivers/net/ethernet/marvell/mvneta.c
2667
mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
drivers/net/ethernet/marvell/mvneta.c
3357
struct mvneta_rx_queue *rxq, int size)
drivers/net/ethernet/marvell/mvneta.c
3372
rxq->page_pool = page_pool_create(&pp_params);
drivers/net/ethernet/marvell/mvneta.c
3373
if (IS_ERR(rxq->page_pool)) {
drivers/net/ethernet/marvell/mvneta.c
3374
err = PTR_ERR(rxq->page_pool);
drivers/net/ethernet/marvell/mvneta.c
3375
rxq->page_pool = NULL;
drivers/net/ethernet/marvell/mvneta.c
3379
err = __xdp_rxq_info_reg(&rxq->xdp_rxq, pp->dev, rxq->id, 0,
drivers/net/ethernet/marvell/mvneta.c
3384
err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
drivers/net/ethernet/marvell/mvneta.c
3385
rxq->page_pool);
drivers/net/ethernet/marvell/mvneta.c
3392
xdp_rxq_info_unreg(&rxq->xdp_rxq);
drivers/net/ethernet/marvell/mvneta.c
3394
page_pool_destroy(rxq->page_pool);
drivers/net/ethernet/marvell/mvneta.c
3395
rxq->page_pool = NULL;
drivers/net/ethernet/marvell/mvneta.c
3400
static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
drivers/net/ethernet/marvell/mvneta.c
3405
err = mvneta_create_page_pool(pp, rxq, num);
drivers/net/ethernet/marvell/mvneta.c
3410
memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
drivers/net/ethernet/marvell/mvneta.c
3411
if (mvneta_rx_refill(pp, rxq->descs + i, rxq,
drivers/net/ethernet/marvell/mvneta.c
3415
__func__, rxq->id, i, num);
drivers/net/ethernet/marvell/mvneta.c
3423
mvneta_rxq_non_occup_desc_add(pp, rxq, i);
drivers/net/ethernet/marvell/mvneta.c
3450
struct mvneta_rx_queue *rxq)
drivers/net/ethernet/marvell/mvneta.c
3452
rxq->size = pp->rx_ring_size;
drivers/net/ethernet/marvell/mvneta.c
3455
rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
drivers/net/ethernet/marvell/mvneta.c
3456
rxq->size * MVNETA_DESC_ALIGNED_SIZE,
drivers/net/ethernet/marvell/mvneta.c
3457
&rxq->descs_phys, GFP_KERNEL);
drivers/net/ethernet/marvell/mvneta.c
3458
if (!rxq->descs)
drivers/net/ethernet/marvell/mvneta.c
3461
rxq->last_desc = rxq->size - 1;
drivers/net/ethernet/marvell/mvneta.c
3467
struct mvneta_rx_queue *rxq)
drivers/net/ethernet/marvell/mvneta.c
3470
mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
drivers/net/ethernet/marvell/mvneta.c
3471
mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
drivers/net/ethernet/marvell/mvneta.c
3474
mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
drivers/net/ethernet/marvell/mvneta.c
3475
mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
drivers/net/ethernet/marvell/mvneta.c
3479
mvneta_rxq_offset_set(pp, rxq, 0);
drivers/net/ethernet/marvell/mvneta.c
3480
mvneta_rxq_buf_size_set(pp, rxq, PAGE_SIZE < SZ_64K ?
drivers/net/ethernet/marvell/mvneta.c
3483
mvneta_rxq_bm_disable(pp, rxq);
drivers/net/ethernet/marvell/mvneta.c
3484
mvneta_rxq_fill(pp, rxq, rxq->size);
drivers/net/ethernet/marvell/mvneta.c
3487
mvneta_rxq_offset_set(pp, rxq,
drivers/net/ethernet/marvell/mvneta.c
3490
mvneta_rxq_bm_enable(pp, rxq);
drivers/net/ethernet/marvell/mvneta.c
3492
mvneta_rxq_long_pool_set(pp, rxq);
drivers/net/ethernet/marvell/mvneta.c
3493
mvneta_rxq_short_pool_set(pp, rxq);
drivers/net/ethernet/marvell/mvneta.c
3494
mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size);
drivers/net/ethernet/marvell/mvneta.c
3500
struct mvneta_rx_queue *rxq)
drivers/net/ethernet/marvell/mvneta.c
3505
ret = mvneta_rxq_sw_init(pp, rxq);
drivers/net/ethernet/marvell/mvneta.c
3509
mvneta_rxq_hw_init(pp, rxq);
drivers/net/ethernet/marvell/mvneta.c
3516
struct mvneta_rx_queue *rxq)
drivers/net/ethernet/marvell/mvneta.c
3518
mvneta_rxq_drop_pkts(pp, rxq);
drivers/net/ethernet/marvell/mvneta.c
3520
if (rxq->descs)
drivers/net/ethernet/marvell/mvneta.c
3522
rxq->size * MVNETA_DESC_ALIGNED_SIZE,
drivers/net/ethernet/marvell/mvneta.c
3523
rxq->descs,
drivers/net/ethernet/marvell/mvneta.c
3524
rxq->descs_phys);
drivers/net/ethernet/marvell/mvneta.c
3526
rxq->descs = NULL;
drivers/net/ethernet/marvell/mvneta.c
3527
rxq->last_desc = 0;
drivers/net/ethernet/marvell/mvneta.c
3528
rxq->next_desc_to_proc = 0;
drivers/net/ethernet/marvell/mvneta.c
3529
rxq->descs_phys = 0;
drivers/net/ethernet/marvell/mvneta.c
3530
rxq->first_to_refill = 0;
drivers/net/ethernet/marvell/mvneta.c
3531
rxq->refill_num = 0;
drivers/net/ethernet/marvell/mvneta.c
4392
int rxq;
drivers/net/ethernet/marvell/mvneta.c
4394
for (rxq = 0; rxq < rxq_number; rxq++)
drivers/net/ethernet/marvell/mvneta.c
4395
if ((rxq % max_cpu) == cpu)
drivers/net/ethernet/marvell/mvneta.c
4396
rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
drivers/net/ethernet/marvell/mvneta.c
4736
struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
drivers/net/ethernet/marvell/mvneta.c
4737
rxq->time_coal = c->rx_coalesce_usecs;
drivers/net/ethernet/marvell/mvneta.c
4738
rxq->pkts_coal = c->rx_max_coalesced_frames;
drivers/net/ethernet/marvell/mvneta.c
4739
mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
drivers/net/ethernet/marvell/mvneta.c
4740
mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
drivers/net/ethernet/marvell/mvneta.c
5167
static void mvneta_map_vlan_prio_to_rxq(struct mvneta_port *pp, u8 pri, u8 rxq)
drivers/net/ethernet/marvell/mvneta.c
5172
val |= MVNETA_VLAN_PRIO_RXQ_MAP(pri, rxq);
drivers/net/ethernet/marvell/mvneta.c
5244
int rxq, txq, tc, ret;
drivers/net/ethernet/marvell/mvneta.c
5269
for (rxq = mqprio->qopt.offset[tc];
drivers/net/ethernet/marvell/mvneta.c
5270
rxq < mqprio->qopt.count[tc] + mqprio->qopt.offset[tc];
drivers/net/ethernet/marvell/mvneta.c
5271
rxq++) {
drivers/net/ethernet/marvell/mvneta.c
5272
if (rxq >= rxq_number)
drivers/net/ethernet/marvell/mvneta.c
5275
mvneta_map_vlan_prio_to_rxq(pp, tc, rxq);
drivers/net/ethernet/marvell/mvneta.c
5392
struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
drivers/net/ethernet/marvell/mvneta.c
5393
rxq->id = queue;
drivers/net/ethernet/marvell/mvneta.c
5394
rxq->size = pp->rx_ring_size;
drivers/net/ethernet/marvell/mvneta.c
5395
rxq->pkts_coal = MVNETA_RX_COAL_PKTS;
drivers/net/ethernet/marvell/mvneta.c
5396
rxq->time_coal = MVNETA_RX_COAL_USEC;
drivers/net/ethernet/marvell/mvneta.c
5397
rxq->buf_virt_addr
drivers/net/ethernet/marvell/mvneta.c
5399
rxq->size,
drivers/net/ethernet/marvell/mvneta.c
5400
sizeof(*rxq->buf_virt_addr),
drivers/net/ethernet/marvell/mvneta.c
5402
if (!rxq->buf_virt_addr)
drivers/net/ethernet/marvell/mvneta.c
5820
struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
drivers/net/ethernet/marvell/mvneta.c
5822
mvneta_rxq_drop_pkts(pp, rxq);
drivers/net/ethernet/marvell/mvneta.c
5872
struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
drivers/net/ethernet/marvell/mvneta.c
5874
rxq->next_desc_to_proc = 0;
drivers/net/ethernet/marvell/mvneta.c
5875
mvneta_rxq_hw_init(pp, rxq);
drivers/net/ethernet/marvell/mvneta.c
857
struct mvneta_rx_queue *rxq,
drivers/net/ethernet/marvell/mvneta.c
864
mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
drivers/net/ethernet/marvell/mvneta.c
870
mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
drivers/net/ethernet/marvell/mvneta.c
876
struct mvneta_rx_queue *rxq)
drivers/net/ethernet/marvell/mvneta.c
880
val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id));
drivers/net/ethernet/marvell/mvneta.c
888
struct mvneta_rx_queue *rxq,
drivers/net/ethernet/marvell/mvneta.c
896
mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
drivers/net/ethernet/marvell/mvneta.c
916
mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
drivers/net/ethernet/marvell/mvneta.c
922
mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
drivers/net/ethernet/marvell/mvneta.c
924
int rx_desc = rxq->next_desc_to_proc;
drivers/net/ethernet/marvell/mvneta.c
926
rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
drivers/net/ethernet/marvell/mvneta.c
927
prefetch(rxq->descs + rxq->next_desc_to_proc);
drivers/net/ethernet/marvell/mvneta.c
928
return rxq->descs + rx_desc;
drivers/net/ethernet/marvell/mvneta.c
946
struct mvneta_rx_queue *rxq,
drivers/net/ethernet/marvell/mvneta.c
951
val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
drivers/net/ethernet/marvell/mvneta.c
956
mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
drivers/net/ethernet/marvell/mvpp2/mvpp2.h
177
#define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
drivers/net/ethernet/marvell/mvpp2/mvpp2.h
180
#define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq))
drivers/net/ethernet/marvell/mvpp2/mvpp2.h
261
#define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
drivers/net/ethernet/marvell/mvpp2/mvpp2.h
47
#define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq))
drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
1456
static inline u32 mvpp22_rxfh_indir(struct mvpp2_port *port, u32 rxq)
drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
1464
cpu = rxq / nrxqs;
drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
1472
return port->first_rxq + ((rxq * nrxqs + rxq / cpus) % port->nrxqs);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
1206
int rxq;
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
1229
for (rxq = 0; rxq < port->nrxqs; rxq++)
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
1230
mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
1242
for (rxq = 0; rxq < port->nrxqs; rxq++)
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
1243
mvpp2_rxq_short_pool_set(port, rxq,
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2429
mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2431
int rx_desc = rxq->next_desc_to_proc;
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2433
rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2434
prefetch(rxq->descs + rxq->next_desc_to_proc);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2435
return rxq->descs + rx_desc;
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2708
struct mvpp2_rx_queue *rxq)
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2712
mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2724
struct mvpp2_rx_queue *rxq)
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2728
if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2729
rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2731
mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2733
rxq->pkts_coal);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2776
struct mvpp2_rx_queue *rxq)
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2779
u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2782
rxq->time_coal =
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2786
val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2789
mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2943
struct mvpp2_rx_queue *rxq)
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2950
rxq->size = port->rx_ring_size;
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2953
rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2954
rxq->size * MVPP2_DESC_ALIGNED_SIZE,
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2955
&rxq->descs_dma, GFP_KERNEL);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2956
if (!rxq->descs)
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2959
rxq->last_desc = rxq->size - 1;
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2962
mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2966
mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2968
rxq_dma = rxq->descs_dma;
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2970
rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2972
mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2977
mvpp2_rxq_offset_set(port, rxq->id, MVPP2_SKB_HEADROOM);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2980
mvpp2_rx_pkts_coal_set(port, rxq);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2981
mvpp2_rx_time_coal_set(port, rxq);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2984
mvpp2_set_rxq_free_tresh(port, rxq);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2987
mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2990
err = xdp_rxq_info_reg(&rxq->xdp_rxq_short, port->dev, rxq->logic_rxq, 0);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2994
err = xdp_rxq_info_reg(&rxq->xdp_rxq_long, port->dev, rxq->logic_rxq, 0);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2999
err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq_short,
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3001
priv->page_pool[rxq->logic_rxq]);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3005
err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq_long,
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3007
priv->page_pool[rxq->logic_rxq +
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3016
xdp_rxq_info_unreg_mem_model(&rxq->xdp_rxq_short);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3018
xdp_rxq_info_unreg(&rxq->xdp_rxq_long);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3020
xdp_rxq_info_unreg(&rxq->xdp_rxq_short);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3023
rxq->size * MVPP2_DESC_ALIGNED_SIZE,
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3024
rxq->descs, rxq->descs_dma);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3030
struct mvpp2_rx_queue *rxq)
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3034
rx_received = mvpp2_rxq_received(port, rxq->id);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3039
struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3050
mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3055
struct mvpp2_rx_queue *rxq)
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3059
if (xdp_rxq_info_is_reg(&rxq->xdp_rxq_short))
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3060
xdp_rxq_info_unreg(&rxq->xdp_rxq_short);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3062
if (xdp_rxq_info_is_reg(&rxq->xdp_rxq_long))
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3063
xdp_rxq_info_unreg(&rxq->xdp_rxq_long);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3065
mvpp2_rxq_drop_pkts(port, rxq);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3067
if (rxq->descs)
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3069
rxq->size * MVPP2_DESC_ALIGNED_SIZE,
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3070
rxq->descs,
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3071
rxq->descs_dma);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3073
rxq->descs = NULL;
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3074
rxq->last_desc = 0;
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3075
rxq->next_desc_to_proc = 0;
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3076
rxq->descs_dma = 0;
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3081
mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3083
mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3896
int rx_todo, struct mvpp2_rx_queue *rxq)
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3910
rx_received = mvpp2_rxq_received(port, rxq->id);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3915
struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3977
xdp_rxq = &rxq->xdp_rxq_short;
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3979
xdp_rxq = &rxq->xdp_rxq_long;
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
4077
mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
4540
struct mvpp2_rx_queue *rxq;
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
4542
rxq = mvpp2_get_rx_queue(port, cause_rx);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
4543
if (!rxq)
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
4546
count = mvpp2_rx(port, napi, budget, rxq);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
4554
cause_rx &= ~(1 << rxq->logic_rxq);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
5408
struct mvpp2_rx_queue *rxq = port->rxqs[queue];
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
5410
rxq->time_coal = c->rx_coalesce_usecs;
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
5411
rxq->pkts_coal = c->rx_max_coalesced_frames;
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
5412
mvpp2_rx_pkts_coal_set(port, rxq);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
5413
mvpp2_rx_time_coal_set(port, rxq);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
6061
struct mvpp2_rx_queue *rxq;
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
6064
rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
6065
if (!rxq) {
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
6070
rxq->id = port->first_rxq + queue;
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
6071
rxq->port = port->id;
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
6072
rxq->logic_rxq = queue;
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
6074
port->rxqs[queue] = rxq;
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
6081
struct mvpp2_rx_queue *rxq = port->rxqs[queue];
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
6083
rxq->size = port->rx_ring_size;
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
6084
rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
6085
rxq->time_coal = MVPP2_RX_COAL_USEC;
drivers/net/ethernet/marvell/sky2.c
1327
unsigned rxq = rxqaddr[sky2->port];
drivers/net/ethernet/marvell/sky2.c
1331
sky2_write8(hw, RB_ADDR(rxq, RB_CTRL), RB_DIS_OP_MD);
drivers/net/ethernet/marvell/sky2.c
1334
if (sky2_read8(hw, RB_ADDR(rxq, Q_RSL))
drivers/net/ethernet/marvell/sky2.c
1335
== sky2_read8(hw, RB_ADDR(rxq, Q_RL)))
drivers/net/ethernet/marvell/sky2.c
1340
sky2_write32(hw, Q_ADDR(rxq, Q_CSR), BMU_RST_SET | BMU_FIFO_RST);
drivers/net/ethernet/marvell/sky2.c
1343
sky2_write32(hw, Y2_QADDR(rxq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
drivers/net/ethernet/marvell/sky2.c
1480
static inline void sky2_rx_update(struct sky2_port *sky2, unsigned rxq)
drivers/net/ethernet/marvell/sky2.c
1482
sky2_put_idx(sky2->hw, rxq, sky2->rx_put);
drivers/net/ethernet/marvell/sky2.c
1522
unsigned rxq = rxqaddr[sky2->port];
drivers/net/ethernet/marvell/sky2.c
1526
sky2_qset(hw, rxq);
drivers/net/ethernet/marvell/sky2.c
1530
sky2_write32(hw, Q_ADDR(rxq, Q_WM), BMU_WM_PEX);
drivers/net/ethernet/marvell/sky2.c
1537
sky2_write32(hw, Q_ADDR(rxq, Q_TEST), F_M_RX_RAM_DIS);
drivers/net/ethernet/marvell/sky2.c
1539
sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1);
drivers/net/ethernet/marvell/sky2.c
1568
sky2_rx_update(sky2, rxq);
drivers/net/ethernet/marvell/sky2.c
2932
unsigned rxq = rxqaddr[port];
drivers/net/ethernet/marvell/sky2.c
2935
u8 fifo_rp = sky2_read8(hw, Q_ADDR(rxq, Q_RP));
drivers/net/ethernet/marvell/sky2.c
2936
u8 fifo_lev = sky2_read8(hw, Q_ADDR(rxq, Q_RL));
drivers/net/ethernet/marvell/sky2.c
2948
fifo_rp, sky2_read8(hw, Q_ADDR(rxq, Q_WP)));
drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
704
int mlx5e_rx_res_tls_tir_create(struct mlx5e_rx_res *res, unsigned int rxq,
drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
716
rqtn = mlx5e_rx_res_get_rqtn_direct(res, rxq);
drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
77
int mlx5e_rx_res_tls_tir_create(struct mlx5e_rx_res *res, unsigned int rxq,
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
244
if (unlikely(!(_ctx->xdp.rxq->dev->features & NETIF_F_RXHASH)))
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
80
if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) {
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
346
c = resync->priv->channels.c[priv_rx->rxq];
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
54
u32 rxq;
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
571
c = priv->channels.c[priv_rx->rxq];
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
625
int rxq = sk_rx_queue_get(sk);
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
627
if (unlikely(rxq == -1))
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
628
rxq = 0;
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
630
return rxq;
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
642
int rxq, err;
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
676
rxq = mlx5e_ktls_sk_get_rxq(sk);
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
677
priv_rx->rxq = rxq;
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
680
priv_rx->rq_stats = &priv->channel_stats[rxq]->rq;
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
684
err = mlx5e_rx_res_tls_tir_create(priv->rx_res, rxq, &priv_rx->tir);
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
696
err = post_rx_param_wqes(priv->channels.c[rxq], priv_rx, start_offload_tcp_sn);
drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
436
arfs_rule->rxq, arfs_rule->flow_id,
drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
448
priv->channel_stats[arfs_rule->rxq]->rq.arfs_expired++;
drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
527
priv->channel_stats[arfs_rule->rxq]->rq.arfs_err++;
drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
596
dest.tir_num = mlx5e_rx_res_get_tirn_direct(priv->rx_res, arfs_rule->rxq);
drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
600
priv->channel_stats[arfs_rule->rxq]->rq.arfs_err++;
drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
603
__func__, arfs_rule->filter_id, arfs_rule->rxq,
drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
613
struct mlx5_flow_handle *rule, u16 rxq)
drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
619
dst.tir_num = mlx5e_rx_res_get_tirn_direct(priv->rx_res, rxq);
drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
622
priv->channel_stats[rxq]->rq.arfs_err++;
drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
624
"Failed to modify aRFS rule destination to rq=%d\n", rxq);
drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
646
priv->channel_stats[arfs_rule->rxq]->rq.arfs_add++;
drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
649
arfs_rule->rxq);
drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
658
u16 rxq, u32 flow_id)
drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
666
priv->channel_stats[rxq]->rq.arfs_err++;
drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
671
rule->rxq = rxq;
drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
762
if (arfs_rule->rxq == rxq_index || work_busy(&arfs_rule->arfs_work)) {
drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
768
priv->channel_stats[arfs_rule->rxq]->rq.arfs_request_out++;
drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
769
arfs_rule->rxq = rxq_index;
drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
90
int rxq;
drivers/net/ethernet/micrel/ks8851_common.c
227
static void ks8851_rx_pkts(struct ks8851_net *ks, struct sk_buff_head *rxq)
drivers/net/ethernet/micrel/ks8851_common.c
287
__skb_queue_tail(rxq, skb);
drivers/net/ethernet/micrel/ks8851_common.c
314
struct sk_buff_head rxq;
drivers/net/ethernet/micrel/ks8851_common.c
356
__skb_queue_head_init(&rxq);
drivers/net/ethernet/micrel/ks8851_common.c
357
ks8851_rx_pkts(ks, &rxq);
drivers/net/ethernet/micrel/ks8851_common.c
382
while ((skb = __skb_dequeue(&rxq)))
drivers/net/ethernet/microsoft/mana/hw_channel.c
108
mana_hwc_post_rx_wqe(hwc->rxq, rx_req);
drivers/net/ethernet/microsoft/mana/hw_channel.c
143
hwc->rxq->gdma_wq->id = val;
drivers/net/ethernet/microsoft/mana/hw_channel.c
171
hwc->rxq->msg_buf->gpa_mkey = val;
drivers/net/ethernet/microsoft/mana/hw_channel.c
246
struct hwc_wq *hwc_rxq = hwc->rxq;
drivers/net/ethernet/microsoft/mana/hw_channel.c
633
struct hwc_wq *hwc_rxq = hwc->rxq;
drivers/net/ethernet/microsoft/mana/hw_channel.c
664
struct gdma_queue *rq = hwc->rxq->gdma_wq;
drivers/net/ethernet/microsoft/mana/hw_channel.c
723
hwc->cq, &hwc->rxq);
drivers/net/ethernet/microsoft/mana/hw_channel.c
820
if (hwc->rxq)
drivers/net/ethernet/microsoft/mana/hw_channel.c
821
mana_hwc_destroy_wq(hwc, hwc->rxq);
drivers/net/ethernet/microsoft/mana/hw_channel.c
90
mana_hwc_post_rx_wqe(hwc->rxq, rx_req);
drivers/net/ethernet/microsoft/mana/mana_bpf.c
107
rxq->xdp_rc = xdp_do_redirect(ndev, xdp, prog);
drivers/net/ethernet/microsoft/mana/mana_bpf.c
108
if (!rxq->xdp_rc) {
drivers/net/ethernet/microsoft/mana/mana_bpf.c
109
rxq->xdp_flush = true;
drivers/net/ethernet/microsoft/mana/mana_bpf.c
80
u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
drivers/net/ethernet/microsoft/mana/mana_bpf.c
88
prog = rcu_dereference(rxq->bpf_prog);
drivers/net/ethernet/microsoft/mana/mana_bpf.c
93
xdp_init_buff(xdp, PAGE_SIZE, &rxq->xdp_rxq);
drivers/net/ethernet/microsoft/mana/mana_bpf.c
98
rx_stats = &rxq->stats;
drivers/net/ethernet/microsoft/mana/mana_en.c
1671
static int mana_fence_rq(struct mana_port_context *apc, struct mana_rxq *rxq)
drivers/net/ethernet/microsoft/mana/mana_en.c
1677
init_completion(&rxq->fence_event);
drivers/net/ethernet/microsoft/mana/mana_en.c
1681
req.wq_obj_handle = rxq->rxobj;
drivers/net/ethernet/microsoft/mana/mana_en.c
1687
rxq->rxq_idx, err);
drivers/net/ethernet/microsoft/mana/mana_en.c
1694
rxq->rxq_idx, err, resp.hdr.status);
drivers/net/ethernet/microsoft/mana/mana_en.c
1701
if (wait_for_completion_timeout(&rxq->fence_event, 10 * HZ) == 0) {
drivers/net/ethernet/microsoft/mana/mana_en.c
1703
rxq->rxq_idx);
drivers/net/ethernet/microsoft/mana/mana_en.c
1713
struct mana_rxq *rxq;
drivers/net/ethernet/microsoft/mana/mana_en.c
1717
rxq = apc->rxqs[rxq_idx];
drivers/net/ethernet/microsoft/mana/mana_en.c
1718
err = mana_fence_rq(apc, rxq);
drivers/net/ethernet/microsoft/mana/mana_en.c
1880
static void mana_post_pkt_rxq(struct mana_rxq *rxq)
drivers/net/ethernet/microsoft/mana/mana_en.c
1886
curr_index = rxq->buf_index++;
drivers/net/ethernet/microsoft/mana/mana_en.c
1887
if (rxq->buf_index == rxq->num_rx_buf)
drivers/net/ethernet/microsoft/mana/mana_en.c
1888
rxq->buf_index = 0;
drivers/net/ethernet/microsoft/mana/mana_en.c
1890
recv_buf_oob = &rxq->rx_oobs[curr_index];
drivers/net/ethernet/microsoft/mana/mana_en.c
1892
err = mana_gd_post_work_request(rxq->gdma_rq, &recv_buf_oob->wqe_req,
drivers/net/ethernet/microsoft/mana/mana_en.c
1900
static struct sk_buff *mana_build_skb(struct mana_rxq *rxq, void *buf_va,
drivers/net/ethernet/microsoft/mana/mana_en.c
1903
struct sk_buff *skb = napi_build_skb(buf_va, rxq->alloc_size);
drivers/net/ethernet/microsoft/mana/mana_en.c
1918
skb_reserve(skb, rxq->headroom);
drivers/net/ethernet/microsoft/mana/mana_en.c
1925
struct mana_rxcomp_oob *cqe, struct mana_rxq *rxq)
drivers/net/ethernet/microsoft/mana/mana_en.c
1927
struct mana_stats_rx *rx_stats = &rxq->stats;
drivers/net/ethernet/microsoft/mana/mana_en.c
1928
struct net_device *ndev = rxq->ndev;
drivers/net/ethernet/microsoft/mana/mana_en.c
1930
u16 rxq_idx = rxq->rxq_idx;
drivers/net/ethernet/microsoft/mana/mana_en.c
1937
rxq->rx_cq.work_done++;
drivers/net/ethernet/microsoft/mana/mana_en.c
1938
napi = &rxq->rx_cq.napi;
drivers/net/ethernet/microsoft/mana/mana_en.c
1945
act = mana_run_xdp(ndev, rxq, &xdp, buf_va, pkt_len);
drivers/net/ethernet/microsoft/mana/mana_en.c
1947
if (act == XDP_REDIRECT && !rxq->xdp_rc)
drivers/net/ethernet/microsoft/mana/mana_en.c
1953
skb = mana_build_skb(rxq, buf_va, pkt_len, &xdp);
drivers/net/ethernet/microsoft/mana/mana_en.c
2012
if (rxq->frag_count == 1)
drivers/net/ethernet/microsoft/mana/mana_en.c
2013
page_pool_recycle_direct(rxq->page_pool,
drivers/net/ethernet/microsoft/mana/mana_en.c
2016
page_pool_free_va(rxq->page_pool, buf_va, true);
drivers/net/ethernet/microsoft/mana/mana_en.c
2018
WARN_ON_ONCE(rxq->xdp_save_va);
drivers/net/ethernet/microsoft/mana/mana_en.c
2020
rxq->xdp_save_va = buf_va;
drivers/net/ethernet/microsoft/mana/mana_en.c
2028
static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
drivers/net/ethernet/microsoft/mana/mana_en.c
2039
if (rxq->frag_count == 1) {
drivers/net/ethernet/microsoft/mana/mana_en.c
2041
if (rxq->xdp_save_va) {
drivers/net/ethernet/microsoft/mana/mana_en.c
2042
va = rxq->xdp_save_va;
drivers/net/ethernet/microsoft/mana/mana_en.c
2044
rxq->xdp_save_va = NULL;
drivers/net/ethernet/microsoft/mana/mana_en.c
2046
page = page_pool_dev_alloc_pages(rxq->page_pool);
drivers/net/ethernet/microsoft/mana/mana_en.c
2054
*da = dma_map_single(dev, va + rxq->headroom, rxq->datasize,
drivers/net/ethernet/microsoft/mana/mana_en.c
2057
mana_put_rx_page(rxq, page, *from_pool);
drivers/net/ethernet/microsoft/mana/mana_en.c
2064
page = page_pool_dev_alloc_frag(rxq->page_pool, &offset,
drivers/net/ethernet/microsoft/mana/mana_en.c
2065
rxq->alloc_size);
drivers/net/ethernet/microsoft/mana/mana_en.c
2070
*da = page_pool_get_dma_addr(page) + offset + rxq->headroom;
drivers/net/ethernet/microsoft/mana/mana_en.c
2077
static void mana_refill_rx_oob(struct device *dev, struct mana_rxq *rxq,
drivers/net/ethernet/microsoft/mana/mana_en.c
2085
va = mana_get_rxfrag(rxq, dev, &da, &from_pool);
drivers/net/ethernet/microsoft/mana/mana_en.c
2088
if (!rxoob->from_pool || rxq->frag_count == 1)
drivers/net/ethernet/microsoft/mana/mana_en.c
2089
dma_unmap_single(dev, rxoob->sgl[0].address, rxq->datasize,
drivers/net/ethernet/microsoft/mana/mana_en.c
2099
static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
drivers/net/ethernet/microsoft/mana/mana_en.c
2103
struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context;
drivers/net/ethernet/microsoft/mana/mana_en.c
2104
struct net_device *ndev = rxq->ndev;
drivers/net/ethernet/microsoft/mana/mana_en.c
2120
rxbuf_oob = &rxq->rx_oobs[rxq->buf_index];
drivers/net/ethernet/microsoft/mana/mana_en.c
2130
complete(&rxq->fence_event);
drivers/net/ethernet/microsoft/mana/mana_en.c
2145
rxq->gdma_id, cq->gdma_id, rxq->rxobj);
drivers/net/ethernet/microsoft/mana/mana_en.c
2149
curr = rxq->buf_index;
drivers/net/ethernet/microsoft/mana/mana_en.c
2150
rxbuf_oob = &rxq->rx_oobs[curr];
drivers/net/ethernet/microsoft/mana/mana_en.c
2153
mana_refill_rx_oob(dev, rxq, rxbuf_oob, &old_buf, &old_fp);
drivers/net/ethernet/microsoft/mana/mana_en.c
2158
mana_rx_skb(old_buf, old_fp, oob, rxq);
drivers/net/ethernet/microsoft/mana/mana_en.c
2161
mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu);
drivers/net/ethernet/microsoft/mana/mana_en.c
2163
mana_post_pkt_rxq(rxq);
drivers/net/ethernet/microsoft/mana/mana_en.c
2169
struct mana_rxq *rxq = cq->rxq;
drivers/net/ethernet/microsoft/mana/mana_en.c
2182
rxq->xdp_flush = false;
drivers/net/ethernet/microsoft/mana/mana_en.c
2189
if (WARN_ON_ONCE(comp[i].wq_num != cq->rxq->gdma_id))
drivers/net/ethernet/microsoft/mana/mana_en.c
2192
mana_process_rx_cqe(rxq, cq, &comp[i]);
drivers/net/ethernet/microsoft/mana/mana_en.c
2196
struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context;
drivers/net/ethernet/microsoft/mana/mana_en.c
2198
mana_gd_wq_ring_doorbell(gc, rxq->gdma_rq);
drivers/net/ethernet/microsoft/mana/mana_en.c
2201
if (rxq->xdp_flush)
drivers/net/ethernet/microsoft/mana/mana_en.c
2462
struct mana_rxq *rxq, bool napi_initialized)
drivers/net/ethernet/microsoft/mana/mana_en.c
2472
if (!rxq)
drivers/net/ethernet/microsoft/mana/mana_en.c
2475
debugfs_remove_recursive(rxq->mana_rx_debugfs);
drivers/net/ethernet/microsoft/mana/mana_en.c
2476
rxq->mana_rx_debugfs = NULL;
drivers/net/ethernet/microsoft/mana/mana_en.c
2478
napi = &rxq->rx_cq.napi;
drivers/net/ethernet/microsoft/mana/mana_en.c
2486
xdp_rxq_info_unreg(&rxq->xdp_rxq);
drivers/net/ethernet/microsoft/mana/mana_en.c
2488
mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj);
drivers/net/ethernet/microsoft/mana/mana_en.c
2490
mana_deinit_cq(apc, &rxq->rx_cq);
drivers/net/ethernet/microsoft/mana/mana_en.c
2492
if (rxq->xdp_save_va)
drivers/net/ethernet/microsoft/mana/mana_en.c
2493
put_page(virt_to_head_page(rxq->xdp_save_va));
drivers/net/ethernet/microsoft/mana/mana_en.c
2495
for (i = 0; i < rxq->num_rx_buf; i++) {
drivers/net/ethernet/microsoft/mana/mana_en.c
2496
rx_oob = &rxq->rx_oobs[i];
drivers/net/ethernet/microsoft/mana/mana_en.c
2503
if (rxq->frag_count == 1 || !rx_oob->from_pool) {
drivers/net/ethernet/microsoft/mana/mana_en.c
2506
mana_put_rx_page(rxq, page, rx_oob->from_pool);
drivers/net/ethernet/microsoft/mana/mana_en.c
2508
page_pool_free_va(rxq->page_pool, rx_oob->buf_va, true);
drivers/net/ethernet/microsoft/mana/mana_en.c
2514
page_pool_destroy(rxq->page_pool);
drivers/net/ethernet/microsoft/mana/mana_en.c
2516
if (rxq->gdma_rq)
drivers/net/ethernet/microsoft/mana/mana_en.c
2517
mana_gd_destroy_queue(gc, rxq->gdma_rq);
drivers/net/ethernet/microsoft/mana/mana_en.c
2519
kfree(rxq);
drivers/net/ethernet/microsoft/mana/mana_en.c
2523
struct mana_rxq *rxq, struct device *dev)
drivers/net/ethernet/microsoft/mana/mana_en.c
2525
struct mana_port_context *mpc = netdev_priv(rxq->ndev);
drivers/net/ethernet/microsoft/mana/mana_en.c
2531
va = mana_get_rxbuf_pre(rxq, &da);
drivers/net/ethernet/microsoft/mana/mana_en.c
2533
va = mana_get_rxfrag(rxq, dev, &da, &from_pool);
drivers/net/ethernet/microsoft/mana/mana_en.c
2542
rx_oob->sgl[0].size = rxq->datasize;
drivers/net/ethernet/microsoft/mana/mana_en.c
2552
struct mana_rxq *rxq, u32 *rxq_size, u32 *cq_size)
drivers/net/ethernet/microsoft/mana/mana_en.c
2560
WARN_ON(rxq->datasize == 0);
drivers/net/ethernet/microsoft/mana/mana_en.c
2565
for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
drivers/net/ethernet/microsoft/mana/mana_en.c
2566
rx_oob = &rxq->rx_oobs[buf_idx];
drivers/net/ethernet/microsoft/mana/mana_en.c
2571
ret = mana_fill_rx_oob(rx_oob, apc->ac->gdma_dev->gpa_mkey, rxq,
drivers/net/ethernet/microsoft/mana/mana_en.c
2591
static int mana_push_wqe(struct mana_rxq *rxq)
drivers/net/ethernet/microsoft/mana/mana_en.c
2597
for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
drivers/net/ethernet/microsoft/mana/mana_en.c
2598
rx_oob = &rxq->rx_oobs[buf_idx];
drivers/net/ethernet/microsoft/mana/mana_en.c
2600
err = mana_gd_post_and_ring(rxq->gdma_rq, &rx_oob->wqe_req,
drivers/net/ethernet/microsoft/mana/mana_en.c
2609
static int mana_create_page_pool(struct mana_rxq *rxq, struct gdma_context *gc)
drivers/net/ethernet/microsoft/mana/mana_en.c
2611
struct mana_port_context *mpc = netdev_priv(rxq->ndev);
drivers/net/ethernet/microsoft/mana/mana_en.c
2615
pprm.pool_size = mpc->rx_queue_size / rxq->frag_count + 1;
drivers/net/ethernet/microsoft/mana/mana_en.c
2617
pprm.napi = &rxq->rx_cq.napi;
drivers/net/ethernet/microsoft/mana/mana_en.c
2618
pprm.netdev = rxq->ndev;
drivers/net/ethernet/microsoft/mana/mana_en.c
2619
pprm.order = get_order(rxq->alloc_size);
drivers/net/ethernet/microsoft/mana/mana_en.c
2620
pprm.queue_idx = rxq->rxq_idx;
drivers/net/ethernet/microsoft/mana/mana_en.c
2626
if (rxq->frag_count > 1) {
drivers/net/ethernet/microsoft/mana/mana_en.c
2632
rxq->page_pool = page_pool_create(&pprm);
drivers/net/ethernet/microsoft/mana/mana_en.c
2634
if (IS_ERR(rxq->page_pool)) {
drivers/net/ethernet/microsoft/mana/mana_en.c
2635
ret = PTR_ERR(rxq->page_pool);
drivers/net/ethernet/microsoft/mana/mana_en.c
2636
rxq->page_pool = NULL;
drivers/net/ethernet/microsoft/mana/mana_en.c
2654
struct mana_rxq *rxq;
drivers/net/ethernet/microsoft/mana/mana_en.c
2659
rxq = kzalloc_flex(*rxq, rx_oobs, apc->rx_queue_size);
drivers/net/ethernet/microsoft/mana/mana_en.c
2660
if (!rxq)
drivers/net/ethernet/microsoft/mana/mana_en.c
2663
rxq->ndev = ndev;
drivers/net/ethernet/microsoft/mana/mana_en.c
2664
rxq->num_rx_buf = apc->rx_queue_size;
drivers/net/ethernet/microsoft/mana/mana_en.c
2665
rxq->rxq_idx = rxq_idx;
drivers/net/ethernet/microsoft/mana/mana_en.c
2666
rxq->rxobj = INVALID_MANA_HANDLE;
drivers/net/ethernet/microsoft/mana/mana_en.c
2668
mana_get_rxbuf_cfg(apc, ndev->mtu, &rxq->datasize, &rxq->alloc_size,
drivers/net/ethernet/microsoft/mana/mana_en.c
2669
&rxq->headroom, &rxq->frag_count);
drivers/net/ethernet/microsoft/mana/mana_en.c
2671
err = mana_create_page_pool(rxq, gc);
drivers/net/ethernet/microsoft/mana/mana_en.c
2677
err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size);
drivers/net/ethernet/microsoft/mana/mana_en.c
2689
err = mana_gd_create_mana_wq_cq(gd, &spec, &rxq->gdma_rq);
drivers/net/ethernet/microsoft/mana/mana_en.c
2694
cq = &rxq->rx_cq;
drivers/net/ethernet/microsoft/mana/mana_en.c
2696
cq->rxq = rxq;
drivers/net/ethernet/microsoft/mana/mana_en.c
2711
wq_spec.gdma_region = rxq->gdma_rq->mem_info.dma_region_handle;
drivers/net/ethernet/microsoft/mana/mana_en.c
2712
wq_spec.queue_size = rxq->gdma_rq->queue_size;
drivers/net/ethernet/microsoft/mana/mana_en.c
2720
&wq_spec, &cq_spec, &rxq->rxobj);
drivers/net/ethernet/microsoft/mana/mana_en.c
2724
rxq->gdma_rq->id = wq_spec.queue_index;
drivers/net/ethernet/microsoft/mana/mana_en.c
2727
rxq->gdma_rq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
drivers/net/ethernet/microsoft/mana/mana_en.c
2730
rxq->gdma_id = rxq->gdma_rq->id;
drivers/net/ethernet/microsoft/mana/mana_en.c
2733
err = mana_push_wqe(rxq);
drivers/net/ethernet/microsoft/mana/mana_en.c
2746
WARN_ON(xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq_idx,
drivers/net/ethernet/microsoft/mana/mana_en.c
2748
WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
drivers/net/ethernet/microsoft/mana/mana_en.c
2749
rxq->page_pool));
drivers/net/ethernet/microsoft/mana/mana_en.c
2756
return rxq;
drivers/net/ethernet/microsoft/mana/mana_en.c
2760
mana_destroy_rxq(apc, rxq, false);
drivers/net/ethernet/microsoft/mana/mana_en.c
2770
struct mana_rxq *rxq;
drivers/net/ethernet/microsoft/mana/mana_en.c
2773
rxq = apc->rxqs[idx];
drivers/net/ethernet/microsoft/mana/mana_en.c
2776
rxq->mana_rx_debugfs = debugfs_create_dir(qnum, apc->mana_port_debugfs);
drivers/net/ethernet/microsoft/mana/mana_en.c
2777
debugfs_create_u32("rq_head", 0400, rxq->mana_rx_debugfs, &rxq->gdma_rq->head);
drivers/net/ethernet/microsoft/mana/mana_en.c
2778
debugfs_create_u32("rq_tail", 0400, rxq->mana_rx_debugfs, &rxq->gdma_rq->tail);
drivers/net/ethernet/microsoft/mana/mana_en.c
2779
debugfs_create_u32("rq_nbuf", 0400, rxq->mana_rx_debugfs, &rxq->num_rx_buf);
drivers/net/ethernet/microsoft/mana/mana_en.c
2780
debugfs_create_u32("cq_head", 0400, rxq->mana_rx_debugfs,
drivers/net/ethernet/microsoft/mana/mana_en.c
2781
&rxq->rx_cq.gdma_cq->head);
drivers/net/ethernet/microsoft/mana/mana_en.c
2782
debugfs_create_u32("cq_tail", 0400, rxq->mana_rx_debugfs,
drivers/net/ethernet/microsoft/mana/mana_en.c
2783
&rxq->rx_cq.gdma_cq->tail);
drivers/net/ethernet/microsoft/mana/mana_en.c
2784
debugfs_create_u32("cq_budget", 0400, rxq->mana_rx_debugfs, &rxq->rx_cq.budget);
drivers/net/ethernet/microsoft/mana/mana_en.c
2785
debugfs_create_file("rxq_dump", 0400, rxq->mana_rx_debugfs, rxq->gdma_rq, &mana_dbg_q_fops);
drivers/net/ethernet/microsoft/mana/mana_en.c
2786
debugfs_create_file("cq_dump", 0400, rxq->mana_rx_debugfs, rxq->rx_cq.gdma_cq,
drivers/net/ethernet/microsoft/mana/mana_en.c
2794
struct mana_rxq *rxq;
drivers/net/ethernet/microsoft/mana/mana_en.c
2799
rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev);
drivers/net/ethernet/microsoft/mana/mana_en.c
2800
if (!rxq) {
drivers/net/ethernet/microsoft/mana/mana_en.c
2806
u64_stats_init(&rxq->stats.syncp);
drivers/net/ethernet/microsoft/mana/mana_en.c
2808
apc->rxqs[i] = rxq;
drivers/net/ethernet/microsoft/mana/mana_en.c
2821
struct mana_rxq *rxq;
drivers/net/ethernet/microsoft/mana/mana_en.c
2825
rxq = apc->rxqs[rxq_idx];
drivers/net/ethernet/microsoft/mana/mana_en.c
2826
if (!rxq)
drivers/net/ethernet/microsoft/mana/mana_en.c
2829
mana_destroy_rxq(apc, rxq, true);
drivers/net/ethernet/microsoft/mana/mana_en.c
62
static void mana_put_rx_page(struct mana_rxq *rxq, struct page *page,
drivers/net/ethernet/microsoft/mana/mana_en.c
66
page_pool_put_full_page(rxq->page_pool, page, false);
drivers/net/ethernet/microsoft/mana/mana_en.c
702
static void *mana_get_rxbuf_pre(struct mana_rxq *rxq, dma_addr_t *da)
drivers/net/ethernet/microsoft/mana/mana_en.c
704
struct net_device *ndev = rxq->ndev;
drivers/net/ethernet/microsoft/mana/mana_en.c
716
if (mpc->rxbpre_datasize != rxq->datasize) {
drivers/net/ethernet/microsoft/mana/mana_en.c
718
mpc->rxbpre_datasize, rxq->datasize);
drivers/net/ethernet/microsoft/mana/mana_en.c
722
if (mpc->rxbpre_alloc_size != rxq->alloc_size) {
drivers/net/ethernet/microsoft/mana/mana_en.c
724
mpc->rxbpre_alloc_size, rxq->alloc_size);
drivers/net/ethernet/microsoft/mana/mana_en.c
728
if (mpc->rxbpre_headroom != rxq->headroom) {
drivers/net/ethernet/microsoft/mana/mana_en.c
730
mpc->rxbpre_headroom, rxq->headroom);
drivers/net/ethernet/pensando/ionic/ionic_dev.h
256
struct ionic_rxq_desc *rxq;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
1041
struct ionic_qcq *rxq;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
1061
lif->kern_pid, NULL, &rxq);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
1065
rxq->q.features = features;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
1067
ionic_link_qcq_interrupts(lif->adminqcq, rxq);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
1068
ionic_debugfs_add_qcq(lif, rxq);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
1070
lif->hwstamp_rxq = rxq;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
1073
err = ionic_lif_rxq_init(lif, rxq);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
1078
ionic_rx_fill(&rxq->q, NULL);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
1079
err = ionic_qcq_enable(rxq);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
1088
ionic_lif_qcq_deinit(lif, rxq);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
1091
ionic_debugfs_del_qcq(rxq);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
1092
ionic_qcq_free(lif, rxq);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
1093
devm_kfree(lif->ionic->dev, rxq);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
473
struct ionic_queue *rxq,
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
487
xdp_init_buff(&xdp_buf, IONIC_PAGE_SIZE, rxq->xdp_rxq_info);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
491
page_pool_dma_sync_for_cpu(rxq->page_pool, buf_info->page,
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
522
page_pool_dma_sync_for_cpu(rxq->page_pool, bi->page,
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
543
ionic_rx_put_buf_direct(rxq, buf_info);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
554
txq = rxq->partner;
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
577
ionic_xdp_rx_unlink_bufs(rxq, buf_info, nbufs);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
587
ionic_xdp_rx_unlink_bufs(rxq, buf_info, nbufs);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
588
rxq->xdp_flush = true;
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
599
ionic_rx_put_buf_direct(rxq, buf_info);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
629
q->index, comp->status, comp->len, q->rxq[q->head_idx].len);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
815
desc = &q->rxq[q->head_idx];
drivers/net/ethernet/qlogic/qede/qede.h
470
struct qede_rx_queue *rxq;
drivers/net/ethernet/qlogic/qede/qede.h
535
int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy);
drivers/net/ethernet/qlogic/qede/qede.h
574
bool qede_has_rx_work(struct qede_rx_queue *rxq);
drivers/net/ethernet/qlogic/qede/qede.h
576
void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count);
drivers/net/ethernet/qlogic/qede/qede.h
577
void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq);
drivers/net/ethernet/qlogic/qede/qede_ethtool.c
1550
struct qede_rx_queue *rxq = NULL;
drivers/net/ethernet/qlogic/qede/qede_ethtool.c
1558
rxq = edev->fp_array[i].rxq;
drivers/net/ethernet/qlogic/qede/qede_ethtool.c
1563
if (!rxq) {
drivers/net/ethernet/qlogic/qede/qede_ethtool.c
1573
if (!qede_has_rx_work(rxq)) {
drivers/net/ethernet/qlogic/qede/qede_ethtool.c
1579
cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
drivers/net/ethernet/qlogic/qede/qede_ethtool.c
1582
sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
drivers/net/ethernet/qlogic/qede/qede_ethtool.c
1583
sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
drivers/net/ethernet/qlogic/qede/qede_ethtool.c
1589
rxq->rx_headroom);
drivers/net/ethernet/qlogic/qede/qede_ethtool.c
1599
qede_recycle_rx_bd_ring(rxq, 1);
drivers/net/ethernet/qlogic/qede/qede_ethtool.c
1600
qed_chain_recycle_consumed(&rxq->rx_comp_ring);
drivers/net/ethernet/qlogic/qede/qede_ethtool.c
1605
qede_recycle_rx_bd_ring(rxq, 1);
drivers/net/ethernet/qlogic/qede/qede_ethtool.c
1606
qed_chain_recycle_consumed(&rxq->rx_comp_ring);
drivers/net/ethernet/qlogic/qede/qede_ethtool.c
1614
qede_update_rx_prod(edev, rxq);
drivers/net/ethernet/qlogic/qede/qede_ethtool.c
2183
fp->rxq->handle);
drivers/net/ethernet/qlogic/qede/qede_ethtool.c
2241
rx_handle = fp->rxq->handle;
drivers/net/ethernet/qlogic/qede/qede_ethtool.c
286
struct qede_rx_queue *rxq, u8 **buf)
drivers/net/ethernet/qlogic/qede/qede_ethtool.c
291
ethtool_sprintf(buf, "%d: %s", rxq->rxq_id,
drivers/net/ethernet/qlogic/qede/qede_ethtool.c
312
qede_get_strings_stats_rxq(edev, fp->rxq, &buf);
drivers/net/ethernet/qlogic/qede/qede_ethtool.c
367
static void qede_get_ethtool_stats_rxq(struct qede_rx_queue *rxq, u64 **buf)
drivers/net/ethernet/qlogic/qede/qede_ethtool.c
372
**buf = *((u64 *)(((void *)rxq) + qede_rqstats_arr[i].offset));
drivers/net/ethernet/qlogic/qede/qede_ethtool.c
393
qede_get_ethtool_stats_rxq(fp->rxq, &buf);
drivers/net/ethernet/qlogic/qede/qede_ethtool.c
773
rx_handle = fp->rxq->handle;
drivers/net/ethernet/qlogic/qede/qede_ethtool.c
859
fp->rxq->handle);
drivers/net/ethernet/qlogic/qede/qede_filter.c
599
rss->rss_ind_table[i] = edev->fp_array[idx].rxq->handle;
drivers/net/ethernet/qlogic/qede/qede_fp.c
1027
qede_reuse_page(rxq, &tpa_info->buffer);
drivers/net/ethernet/qlogic/qede/qede_fp.c
1082
struct qede_rx_queue *rxq,
drivers/net/ethernet/qlogic/qede/qede_fp.c
1091
xdp_init_buff(&xdp, rxq->rx_buf_seg_size, &rxq->xdp_rxq);
drivers/net/ethernet/qlogic/qede/qede_fp.c
1105
rxq->xdp_no_pass++;
drivers/net/ethernet/qlogic/qede/qede_fp.c
1110
if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
drivers/net/ethernet/qlogic/qede/qede_fp.c
1111
qede_recycle_rx_bd_ring(rxq, 1);
drivers/net/ethernet/qlogic/qede/qede_fp.c
1123
dma_unmap_page(rxq->dev, bd->mapping, PAGE_SIZE,
drivers/net/ethernet/qlogic/qede/qede_fp.c
1124
rxq->data_direction);
drivers/net/ethernet/qlogic/qede/qede_fp.c
1129
dma_sync_single_for_device(rxq->dev,
drivers/net/ethernet/qlogic/qede/qede_fp.c
1131
*len, rxq->data_direction);
drivers/net/ethernet/qlogic/qede/qede_fp.c
1136
qede_rx_bd_ring_consume(rxq);
drivers/net/ethernet/qlogic/qede/qede_fp.c
1140
if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
drivers/net/ethernet/qlogic/qede/qede_fp.c
1141
qede_recycle_rx_bd_ring(rxq, 1);
drivers/net/ethernet/qlogic/qede/qede_fp.c
1147
dma_unmap_page(rxq->dev, bd->mapping, PAGE_SIZE,
drivers/net/ethernet/qlogic/qede/qede_fp.c
1148
rxq->data_direction);
drivers/net/ethernet/qlogic/qede/qede_fp.c
1155
qede_rx_bd_ring_consume(rxq);
drivers/net/ethernet/qlogic/qede/qede_fp.c
1164
qede_recycle_rx_bd_ring(rxq, cqe->bd_num);
drivers/net/ethernet/qlogic/qede/qede_fp.c
1171
struct qede_rx_queue *rxq,
drivers/net/ethernet/qlogic/qede/qede_fp.c
1185
u16 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size :
drivers/net/ethernet/qlogic/qede/qede_fp.c
1196
if (unlikely(qede_alloc_rx_buffer(rxq, true)))
drivers/net/ethernet/qlogic/qede/qede_fp.c
1202
bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
drivers/net/ethernet/qlogic/qede/qede_fp.c
1203
bd = &rxq->sw_rx_ring[bd_cons_idx];
drivers/net/ethernet/qlogic/qede/qede_fp.c
1204
qede_rx_bd_ring_consume(rxq);
drivers/net/ethernet/qlogic/qede/qede_fp.c
1206
dma_unmap_page(rxq->dev, bd->mapping,
drivers/net/ethernet/qlogic/qede/qede_fp.c
1210
rxq->rx_headroom, cur_size, PAGE_SIZE);
drivers/net/ethernet/qlogic/qede/qede_fp.c
1226
struct qede_rx_queue *rxq,
drivers/net/ethernet/qlogic/qede/qede_fp.c
1232
qede_tpa_start(edev, rxq, &cqe->fast_path_tpa_start);
drivers/net/ethernet/qlogic/qede/qede_fp.c
1235
qede_tpa_cont(edev, rxq, &cqe->fast_path_tpa_cont);
drivers/net/ethernet/qlogic/qede/qede_fp.c
1246
struct qede_rx_queue *rxq)
drivers/net/ethernet/qlogic/qede/qede_fp.c
1248
struct bpf_prog *xdp_prog = READ_ONCE(rxq->xdp_prog);
drivers/net/ethernet/qlogic/qede/qede_fp.c
1259
cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
drivers/net/ethernet/qlogic/qede/qede_fp.c
1273
return qede_rx_process_tpa_cqe(edev, fp, rxq, cqe, cqe_type);
drivers/net/ethernet/qlogic/qede/qede_fp.c
1278
bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
drivers/net/ethernet/qlogic/qede/qede_fp.c
1279
bd = &rxq->sw_rx_ring[bd_cons_idx];
drivers/net/ethernet/qlogic/qede/qede_fp.c
1283
pad = fp_cqe->placement_offset + rxq->rx_headroom;
drivers/net/ethernet/qlogic/qede/qede_fp.c
1287
if (!qede_rx_xdp(edev, fp, rxq, xdp_prog, bd, fp_cqe,
drivers/net/ethernet/qlogic/qede/qede_fp.c
1298
rxq->rx_ip_frags++;
drivers/net/ethernet/qlogic/qede/qede_fp.c
1300
rxq->rx_hw_errors++;
drivers/net/ethernet/qlogic/qede/qede_fp.c
1306
skb = qede_rx_build_skb(edev, rxq, bd, len, pad);
drivers/net/ethernet/qlogic/qede/qede_fp.c
1308
rxq->rx_alloc_errors++;
drivers/net/ethernet/qlogic/qede/qede_fp.c
1309
qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num);
drivers/net/ethernet/qlogic/qede/qede_fp.c
1317
u16 unmapped_frags = qede_rx_build_jumbo(edev, rxq, skb,
drivers/net/ethernet/qlogic/qede/qede_fp.c
1321
qede_recycle_rx_bd_ring(rxq, unmapped_frags);
drivers/net/ethernet/qlogic/qede/qede_fp.c
1331
skb_record_rx_queue(skb, rxq->rxq_id);
drivers/net/ethernet/qlogic/qede/qede_fp.c
1335
qede_skb_receive(edev, fp, rxq, skb, le16_to_cpu(fp_cqe->vlan_tag));
drivers/net/ethernet/qlogic/qede/qede_fp.c
1342
struct qede_rx_queue *rxq = fp->rxq;
drivers/net/ethernet/qlogic/qede/qede_fp.c
1347
hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
drivers/net/ethernet/qlogic/qede/qede_fp.c
1348
sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
drivers/net/ethernet/qlogic/qede/qede_fp.c
1359
rcv_pkts += qede_rx_process_cqe(edev, fp, rxq);
drivers/net/ethernet/qlogic/qede/qede_fp.c
1360
qed_chain_recycle_consumed(&rxq->rx_comp_ring);
drivers/net/ethernet/qlogic/qede/qede_fp.c
1361
sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
drivers/net/ethernet/qlogic/qede/qede_fp.c
1365
rxq->rcv_pkts += rcv_pkts;
drivers/net/ethernet/qlogic/qede/qede_fp.c
1368
while (rxq->num_rx_buffers - rxq->filled_buffers)
drivers/net/ethernet/qlogic/qede/qede_fp.c
1369
if (qede_alloc_rx_buffer(rxq, false))
drivers/net/ethernet/qlogic/qede/qede_fp.c
1373
qede_update_rx_prod(edev, rxq);
drivers/net/ethernet/qlogic/qede/qede_fp.c
1395
if (qede_has_rx_work(fp->rxq))
drivers/net/ethernet/qlogic/qede/qede_fp.c
1440
qede_has_rx_work(fp->rxq)) ?
drivers/net/ethernet/qlogic/qede/qede_fp.c
28
int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy)
drivers/net/ethernet/qlogic/qede/qede_fp.c
39
if (allow_lazy && likely(rxq->filled_buffers > 12)) {
drivers/net/ethernet/qlogic/qede/qede_fp.c
40
rxq->filled_buffers--;
drivers/net/ethernet/qlogic/qede/qede_fp.c
503
bool qede_has_rx_work(struct qede_rx_queue *rxq)
drivers/net/ethernet/qlogic/qede/qede_fp.c
51
mapping = dma_map_page(rxq->dev, data, 0,
drivers/net/ethernet/qlogic/qede/qede_fp.c
510
hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
drivers/net/ethernet/qlogic/qede/qede_fp.c
511
sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
drivers/net/ethernet/qlogic/qede/qede_fp.c
516
static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
drivers/net/ethernet/qlogic/qede/qede_fp.c
518
qed_chain_consume(&rxq->rx_bd_ring);
drivers/net/ethernet/qlogic/qede/qede_fp.c
519
rxq->sw_rx_cons++;
drivers/net/ethernet/qlogic/qede/qede_fp.c
52
PAGE_SIZE, rxq->data_direction);
drivers/net/ethernet/qlogic/qede/qede_fp.c
525
static inline void qede_reuse_page(struct qede_rx_queue *rxq,
drivers/net/ethernet/qlogic/qede/qede_fp.c
528
struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
drivers/net/ethernet/qlogic/qede/qede_fp.c
53
if (unlikely(dma_mapping_error(rxq->dev, mapping))) {
drivers/net/ethernet/qlogic/qede/qede_fp.c
532
curr_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
drivers/net/ethernet/qlogic/qede/qede_fp.c
539
rxq->rx_headroom);
drivers/net/ethernet/qlogic/qede/qede_fp.c
541
rxq->sw_rx_prod++;
drivers/net/ethernet/qlogic/qede/qede_fp.c
548
void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count)
drivers/net/ethernet/qlogic/qede/qede_fp.c
553
curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
drivers/net/ethernet/qlogic/qede/qede_fp.c
554
qede_reuse_page(rxq, curr_cons);
drivers/net/ethernet/qlogic/qede/qede_fp.c
555
qede_rx_bd_ring_consume(rxq);
drivers/net/ethernet/qlogic/qede/qede_fp.c
559
static inline int qede_realloc_rx_buffer(struct qede_rx_queue *rxq,
drivers/net/ethernet/qlogic/qede/qede_fp.c
563
curr_cons->page_offset += rxq->rx_buf_seg_size;
drivers/net/ethernet/qlogic/qede/qede_fp.c
566
if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
drivers/net/ethernet/qlogic/qede/qede_fp.c
570
curr_cons->page_offset -= rxq->rx_buf_seg_size;
drivers/net/ethernet/qlogic/qede/qede_fp.c
575
dma_unmap_page(rxq->dev, curr_cons->mapping,
drivers/net/ethernet/qlogic/qede/qede_fp.c
576
PAGE_SIZE, rxq->data_direction);
drivers/net/ethernet/qlogic/qede/qede_fp.c
58
sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
drivers/net/ethernet/qlogic/qede/qede_fp.c
583
qede_reuse_page(rxq, curr_cons);
drivers/net/ethernet/qlogic/qede/qede_fp.c
589
void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
drivers/net/ethernet/qlogic/qede/qede_fp.c
591
u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring);
drivers/net/ethernet/qlogic/qede/qede_fp.c
592
u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring);
drivers/net/ethernet/qlogic/qede/qede_fp.c
605
internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
drivers/net/ethernet/qlogic/qede/qede_fp.c
64
rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring);
drivers/net/ethernet/qlogic/qede/qede_fp.c
640
struct qede_rx_queue *rxq,
drivers/net/ethernet/qlogic/qede/qede_fp.c
666
struct qede_rx_queue *rxq,
drivers/net/ethernet/qlogic/qede/qede_fp.c
669
struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons &
drivers/net/ethernet/qlogic/qede/qede_fp.c
671
struct qede_agg_info *tpa_info = &rxq->tpa_info[tpa_agg_index];
drivers/net/ethernet/qlogic/qede/qede_fp.c
68
rxq->rx_headroom);
drivers/net/ethernet/qlogic/qede/qede_fp.c
680
current_bd->page_offset + rxq->rx_headroom,
drivers/net/ethernet/qlogic/qede/qede_fp.c
683
if (unlikely(qede_realloc_rx_buffer(rxq, current_bd))) {
drivers/net/ethernet/qlogic/qede/qede_fp.c
691
qede_rx_bd_ring_consume(rxq);
drivers/net/ethernet/qlogic/qede/qede_fp.c
694
skb->truesize += rxq->rx_buf_seg_size;
drivers/net/ethernet/qlogic/qede/qede_fp.c
70
rxq->sw_rx_prod++;
drivers/net/ethernet/qlogic/qede/qede_fp.c
701
qede_recycle_rx_bd_ring(rxq, 1);
drivers/net/ethernet/qlogic/qede/qede_fp.c
71
rxq->filled_buffers++;
drivers/net/ethernet/qlogic/qede/qede_fp.c
741
qede_build_skb(struct qede_rx_queue *rxq,
drivers/net/ethernet/qlogic/qede/qede_fp.c
748
skb = build_skb(buf, rxq->rx_buf_seg_size);
drivers/net/ethernet/qlogic/qede/qede_fp.c
761
struct qede_rx_queue *rxq,
drivers/net/ethernet/qlogic/qede/qede_fp.c
767
skb = qede_build_skb(rxq, bd, len, pad);
drivers/net/ethernet/qlogic/qede/qede_fp.c
768
bd->page_offset += rxq->rx_buf_seg_size;
drivers/net/ethernet/qlogic/qede/qede_fp.c
771
if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
drivers/net/ethernet/qlogic/qede/qede_fp.c
774
bd->page_offset -= rxq->rx_buf_seg_size;
drivers/net/ethernet/qlogic/qede/qede_fp.c
781
qede_reuse_page(rxq, bd);
drivers/net/ethernet/qlogic/qede/qede_fp.c
785
qede_rx_bd_ring_consume(rxq);
drivers/net/ethernet/qlogic/qede/qede_fp.c
792
struct qede_rx_queue *rxq,
drivers/net/ethernet/qlogic/qede/qede_fp.c
810
qede_reuse_page(rxq, bd);
drivers/net/ethernet/qlogic/qede/qede_fp.c
814
skb = qede_build_skb(rxq, bd, len, pad);
drivers/net/ethernet/qlogic/qede/qede_fp.c
816
if (unlikely(qede_realloc_rx_buffer(rxq, bd))) {
drivers/net/ethernet/qlogic/qede/qede_fp.c
827
qede_rx_bd_ring_consume(rxq);
drivers/net/ethernet/qlogic/qede/qede_fp.c
833
struct qede_rx_queue *rxq,
drivers/net/ethernet/qlogic/qede/qede_fp.c
836
struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
drivers/net/ethernet/qlogic/qede/qede_fp.c
840
sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
drivers/net/ethernet/qlogic/qede/qede_fp.c
841
pad = cqe->placement_offset + rxq->rx_headroom;
drivers/net/ethernet/qlogic/qede/qede_fp.c
843
tpa_info->skb = qede_tpa_rx_build_skb(edev, rxq, sw_rx_data_cons,
drivers/net/ethernet/qlogic/qede/qede_fp.c
857
qede_rx_bd_ring_consume(rxq);
drivers/net/ethernet/qlogic/qede/qede_fp.c
879
qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
drivers/net/ethernet/qlogic/qede/qede_fp.c
954
skb_record_rx_queue(skb, fp->rxq->rxq_id);
drivers/net/ethernet/qlogic/qede/qede_fp.c
955
qede_skb_receive(edev, fp, fp->rxq, skb, vlan_tag);
drivers/net/ethernet/qlogic/qede/qede_fp.c
959
struct qede_rx_queue *rxq,
drivers/net/ethernet/qlogic/qede/qede_fp.c
965
qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
drivers/net/ethernet/qlogic/qede/qede_fp.c
977
struct qede_rx_queue *rxq = fp->rxq;
drivers/net/ethernet/qlogic/qede/qede_fp.c
982
tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
drivers/net/ethernet/qlogic/qede/qede_fp.c
986
dma_unmap_page(rxq->dev, tpa_info->buffer.mapping,
drivers/net/ethernet/qlogic/qede/qede_fp.c
987
PAGE_SIZE, rxq->data_direction);
drivers/net/ethernet/qlogic/qede/qede_fp.c
990
qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
drivers/net/ethernet/qlogic/qede/qede_main.c
1014
fp->rxq = kzalloc_obj(*fp->rxq);
drivers/net/ethernet/qlogic/qede/qede_main.c
1015
if (!fp->rxq)
drivers/net/ethernet/qlogic/qede/qede_main.c
1506
struct qede_rx_queue *rxq)
drivers/net/ethernet/qlogic/qede/qede_main.c
1510
for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) {
drivers/net/ethernet/qlogic/qede/qede_main.c
1514
rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX];
drivers/net/ethernet/qlogic/qede/qede_main.c
1518
rx_buf->mapping, PAGE_SIZE, rxq->data_direction);
drivers/net/ethernet/qlogic/qede/qede_main.c
1525
static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
drivers/net/ethernet/qlogic/qede/qede_main.c
1528
qede_free_rx_buffers(edev, rxq);
drivers/net/ethernet/qlogic/qede/qede_main.c
1531
kfree(rxq->sw_rx_ring);
drivers/net/ethernet/qlogic/qede/qede_main.c
1534
edev->ops->common->chain_free(edev->cdev, &rxq->rx_bd_ring);
drivers/net/ethernet/qlogic/qede/qede_main.c
1535
edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring);
drivers/net/ethernet/qlogic/qede/qede_main.c
1538
static void qede_set_tpa_param(struct qede_rx_queue *rxq)
drivers/net/ethernet/qlogic/qede/qede_main.c
1543
struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
drivers/net/ethernet/qlogic/qede/qede_main.c
1550
static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
drivers/net/ethernet/qlogic/qede/qede_main.c
1559
rxq->num_rx_buffers = edev->q_num_rx_buffers;
drivers/net/ethernet/qlogic/qede/qede_main.c
1561
rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + edev->ndev->mtu;
drivers/net/ethernet/qlogic/qede/qede_main.c
1563
rxq->rx_headroom = edev->xdp_prog ? XDP_PACKET_HEADROOM : NET_SKB_PAD;
drivers/net/ethernet/qlogic/qede/qede_main.c
1564
size = rxq->rx_headroom +
drivers/net/ethernet/qlogic/qede/qede_main.c
1568
if (rxq->rx_buf_size + size > PAGE_SIZE)
drivers/net/ethernet/qlogic/qede/qede_main.c
1569
rxq->rx_buf_size = PAGE_SIZE - size;
drivers/net/ethernet/qlogic/qede/qede_main.c
1575
size = size + rxq->rx_buf_size;
drivers/net/ethernet/qlogic/qede/qede_main.c
1576
rxq->rx_buf_seg_size = roundup_pow_of_two(size);
drivers/net/ethernet/qlogic/qede/qede_main.c
1578
rxq->rx_buf_seg_size = PAGE_SIZE;
drivers/net/ethernet/qlogic/qede/qede_main.c
1583
size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE;
drivers/net/ethernet/qlogic/qede/qede_main.c
1584
rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
drivers/net/ethernet/qlogic/qede/qede_main.c
1585
if (!rxq->sw_rx_ring) {
drivers/net/ethernet/qlogic/qede/qede_main.c
1596
rc = edev->ops->common->chain_alloc(cdev, &rxq->rx_bd_ring, ¶ms);
drivers/net/ethernet/qlogic/qede/qede_main.c
1605
rc = edev->ops->common->chain_alloc(cdev, &rxq->rx_comp_ring, ¶ms);
drivers/net/ethernet/qlogic/qede/qede_main.c
1610
rxq->filled_buffers = 0;
drivers/net/ethernet/qlogic/qede/qede_main.c
1611
for (i = 0; i < rxq->num_rx_buffers; i++) {
drivers/net/ethernet/qlogic/qede/qede_main.c
1612
rc = qede_alloc_rx_buffer(rxq, false);
drivers/net/ethernet/qlogic/qede/qede_main.c
1622
qede_set_tpa_param(rxq);
drivers/net/ethernet/qlogic/qede/qede_main.c
1683
qede_free_mem_rxq(edev, fp->rxq);
drivers/net/ethernet/qlogic/qede/qede_main.c
1708
rc = qede_alloc_mem_rxq(edev, fp->rxq);
drivers/net/ethernet/qlogic/qede/qede_main.c
1840
fp->rxq->rxq_id = rxq_index++;
drivers/net/ethernet/qlogic/qede/qede_main.c
1844
fp->rxq->data_direction = DMA_BIDIRECTIONAL;
drivers/net/ethernet/qlogic/qede/qede_main.c
1846
fp->rxq->data_direction = DMA_FROM_DEVICE;
drivers/net/ethernet/qlogic/qede/qede_main.c
1847
fp->rxq->dev = &edev->pdev->dev;
drivers/net/ethernet/qlogic/qede/qede_main.c
1850
WARN_ON(xdp_rxq_info_reg(&fp->rxq->xdp_rxq, edev->ndev,
drivers/net/ethernet/qlogic/qede/qede_main.c
1851
fp->rxq->rxq_id, 0) < 0);
drivers/net/ethernet/qlogic/qede/qede_main.c
1853
if (xdp_rxq_info_reg_mem_model(&fp->rxq->xdp_rxq,
drivers/net/ethernet/qlogic/qede/qede_main.c
2141
rc = edev->ops->q_rx_stop(cdev, i, fp->rxq->handle);
drivers/net/ethernet/qlogic/qede/qede_main.c
2154
bpf_prog_put(fp->rxq->xdp_prog);
drivers/net/ethernet/qlogic/qede/qede_main.c
2265
struct qede_rx_queue *rxq = fp->rxq;
drivers/net/ethernet/qlogic/qede/qede_main.c
2270
q_params.queue_id = rxq->rxq_id;
drivers/net/ethernet/qlogic/qede/qede_main.c
2276
qed_chain_get_pbl_phys(&rxq->rx_comp_ring);
drivers/net/ethernet/qlogic/qede/qede_main.c
2277
page_cnt = qed_chain_get_page_cnt(&rxq->rx_comp_ring);
drivers/net/ethernet/qlogic/qede/qede_main.c
2280
rxq->rx_buf_size,
drivers/net/ethernet/qlogic/qede/qede_main.c
2281
rxq->rx_bd_ring.p_phys_addr,
drivers/net/ethernet/qlogic/qede/qede_main.c
2291
rxq->hw_rxq_prod_addr = ret_params.p_prod;
drivers/net/ethernet/qlogic/qede/qede_main.c
2292
rxq->handle = ret_params.p_handle;
drivers/net/ethernet/qlogic/qede/qede_main.c
2295
rxq->hw_cons_ptr = val;
drivers/net/ethernet/qlogic/qede/qede_main.c
2297
qede_update_rx_prod(edev, rxq);
drivers/net/ethernet/qlogic/qede/qede_main.c
2306
fp->rxq->xdp_prog = edev->xdp_prog;
drivers/net/ethernet/qlogic/qede/qede_main.c
2862
if (qede_has_rx_work(fp->rxq))
drivers/net/ethernet/qlogic/qede/qede_main.c
2869
if (le16_to_cpu(*fp->rxq->hw_cons_ptr) -
drivers/net/ethernet/qlogic/qede/qede_main.c
2870
qed_chain_get_cons_idx(&fp->rxq->rx_comp_ring) >
drivers/net/ethernet/qlogic/qede/qede_main.c
946
if (fp->rxq && xdp_rxq_info_is_reg(&fp->rxq->xdp_rxq))
drivers/net/ethernet/qlogic/qede/qede_main.c
947
xdp_rxq_info_unreg(&fp->rxq->xdp_rxq);
drivers/net/ethernet/qlogic/qede/qede_main.c
948
kfree(fp->rxq);
drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
455
struct sxgbe_rx_queue *rxq[SXGBE_RX_QUEUES];
drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
1142
(priv->rxq[queue_num])->irq_no,
drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
1144
dev->name, priv->rxq[queue_num]);
drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
1442
for (; priv->rxq[qnum]->cur_rx - priv->rxq[qnum]->dirty_rx > 0;
drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
1443
priv->rxq[qnum]->dirty_rx++) {
drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
1444
unsigned int entry = priv->rxq[qnum]->dirty_rx % rxsize;
drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
1447
p = priv->rxq[qnum]->dma_rx + entry;
drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
1449
if (likely(priv->rxq[qnum]->rx_skbuff[entry] == NULL)) {
drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
1457
priv->rxq[qnum]->rx_skbuff[entry] = skb;
drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
1458
priv->rxq[qnum]->rx_skbuff_dma[entry] =
drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
1463
priv->rxq[qnum]->rx_skbuff_dma[entry];
drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
1486
unsigned int entry = priv->rxq[qnum]->cur_rx;
drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
1497
p = priv->rxq[qnum]->dma_rx + entry;
drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
1504
next_entry = (++priv->rxq[qnum]->cur_rx) % rxsize;
drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
1505
prefetch(priv->rxq[qnum]->dma_rx + next_entry);
drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
1520
skb = priv->rxq[qnum]->rx_skbuff[entry];
drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
1528
priv->rxq[qnum]->rx_skbuff[entry] = NULL;
drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
1670
struct sxgbe_rx_queue *rxq = (struct sxgbe_rx_queue *)dev_id;
drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
1671
struct sxgbe_priv_data *priv = rxq->priv_ptr;
drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
1674
status = priv->hw->dma->rx_dma_int_status(priv->ioaddr, rxq->queue_no,
drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
1678
priv->hw->dma->disable_dma_irq(priv->ioaddr, rxq->queue_no);
drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
1689
rxq->queue_no, priv->rx_tc);
drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
326
priv->hw->desc->init_rx_desc(&priv->rxq[j]->dma_rx[i],
drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
584
priv->rxq[queue_num], rx_rsize);
drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
593
priv->rxq[queue_num]->priv_ptr = priv;
drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
607
free_rx_ring(priv->device, priv->rxq[queue_num], rx_rsize);
drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
659
free_rx_ring(priv->device, priv->rxq[queue_num], rx_rsize);
drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
682
priv->rxq[queue_num] = devm_kmalloc(priv->device,
drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
684
if (!priv->rxq[queue_num])
drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
968
(priv->rxq[queue_num])->dma_rx_phy,
drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
133
priv->rxq[i]->irq_no = irq_of_parse_and_map(node, chan++);
drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
134
if (priv->rxq[i]->irq_no <= 0) {
drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
154
irq_dispose_mapping(priv->rxq[i]->irq_no);
drivers/net/ethernet/sfc/ptp.c
1182
while ((skb = skb_dequeue(&ptp->rxq))) {
drivers/net/ethernet/sfc/ptp.c
1194
skb_queue_head(&ptp->rxq, skb);
drivers/net/ethernet/sfc/ptp.c
1472
efx_ptp_deliver_rx_queue(&efx->ptp_data->rxq);
drivers/net/ethernet/sfc/ptp.c
1580
skb_queue_head_init(&ptp->rxq);
drivers/net/ethernet/sfc/ptp.c
1687
skb_queue_purge(&efx->ptp_data->rxq);
drivers/net/ethernet/sfc/ptp.c
1779
skb_queue_tail(&ptp->rxq, skb);
drivers/net/ethernet/sfc/ptp.c
296
struct sk_buff_head rxq;
drivers/net/ethernet/sfc/siena/ptp.c
1231
while ((skb = skb_dequeue(&ptp->rxq))) {
drivers/net/ethernet/sfc/siena/ptp.c
1246
skb_queue_head(&ptp->rxq, skb);
drivers/net/ethernet/sfc/siena/ptp.c
1362
efx_ptp_deliver_rx_queue(&efx->ptp_data->rxq);
drivers/net/ethernet/sfc/siena/ptp.c
1459
skb_queue_head_init(&ptp->rxq);
drivers/net/ethernet/sfc/siena/ptp.c
1568
skb_queue_purge(&efx->ptp_data->rxq);
drivers/net/ethernet/sfc/siena/ptp.c
1693
skb_queue_tail(&ptp->rxq, skb);
drivers/net/ethernet/sfc/siena/ptp.c
288
struct sk_buff_head rxq;
drivers/net/ethernet/sfc/siena/siena_sriov.c
766
unsigned vf_rxq = req->u.mac_filter.rxq;
drivers/net/ethernet/sfc/siena/vfdi.h
195
u32 rxq;
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
5166
bool zc = !!(xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL);
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
7759
u32 rxq;
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
7949
rxq = priv->plat->rx_queues_to_use;
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
7952
priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
drivers/net/ethernet/ti/am65-cpsw-nuss.c
512
struct xdp_rxq_info *rxq;
drivers/net/ethernet/ti/am65-cpsw-nuss.c
525
rxq = &common->ports[port].xdp_rxq[id];
drivers/net/ethernet/ti/am65-cpsw-nuss.c
527
if (xdp_rxq_info_is_reg(rxq))
drivers/net/ethernet/ti/am65-cpsw-nuss.c
528
xdp_rxq_info_unreg(rxq);
drivers/net/ethernet/ti/am65-cpsw-nuss.c
570
struct xdp_rxq_info *rxq;
drivers/net/ethernet/ti/am65-cpsw-nuss.c
593
rxq = &common->ports[port].xdp_rxq[id];
drivers/net/ethernet/ti/am65-cpsw-nuss.c
594
ret = xdp_rxq_info_reg(rxq, common->ports[port].ndev,
drivers/net/ethernet/ti/am65-cpsw-nuss.c
599
ret = xdp_rxq_info_reg_mem_model(rxq,
drivers/net/ethernet/ti/cpsw_priv.c
1165
struct xdp_rxq_info *rxq;
drivers/net/ethernet/ti/cpsw_priv.c
1170
rxq = &priv->xdp_rxq[ch];
drivers/net/ethernet/ti/cpsw_priv.c
1172
ret = xdp_rxq_info_reg(rxq, priv->ndev, ch, 0);
drivers/net/ethernet/ti/cpsw_priv.c
1176
ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool);
drivers/net/ethernet/ti/cpsw_priv.c
1178
xdp_rxq_info_unreg(rxq);
drivers/net/ethernet/ti/cpsw_priv.c
1185
struct xdp_rxq_info *rxq = &priv->xdp_rxq[ch];
drivers/net/ethernet/ti/cpsw_priv.c
1187
if (!xdp_rxq_info_is_reg(rxq))
drivers/net/ethernet/ti/cpsw_priv.c
1190
xdp_rxq_info_unreg(rxq);
drivers/net/ethernet/ti/icssg/icssg_prueth.c
586
struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq;
drivers/net/ethernet/ti/icssg/icssg_prueth.c
588
if (xdp_rxq_info_is_reg(rxq))
drivers/net/ethernet/ti/icssg/icssg_prueth.c
589
xdp_rxq_info_unreg(rxq);
drivers/net/ethernet/ti/icssg/icssg_prueth.c
594
struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq;
drivers/net/ethernet/ti/icssg/icssg_prueth.c
599
ret = xdp_rxq_info_reg(rxq, emac->ndev, 0, emac->napi_rx.napi_id);
drivers/net/ethernet/ti/icssg/icssg_prueth.c
604
ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_XSK_BUFF_POOL, NULL);
drivers/net/ethernet/ti/icssg/icssg_prueth.c
607
xsk_pool_set_rxq_info(rx_chn->xsk_pool, rxq);
drivers/net/ethernet/ti/icssg/icssg_prueth.c
609
ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool);
drivers/net/ethernet/xscale/ixp4xx_eth.c
1057
err = qmgr_request_queue(port->plat->rxq, RX_DESCS, 0, 0,
drivers/net/ethernet/xscale/ixp4xx_eth.c
1086
qmgr_release_queue(port->plat->rxq);
drivers/net/ethernet/xscale/ixp4xx_eth.c
1097
qmgr_release_queue(port->plat->rxq);
drivers/net/ethernet/xscale/ixp4xx_eth.c
1256
msg.byte5 = port->plat->rxq | 0x80;
drivers/net/ethernet/xscale/ixp4xx_eth.c
1257
msg.byte7 = port->plat->rxq << 4;
drivers/net/ethernet/xscale/ixp4xx_eth.c
1325
qmgr_set_irq(port->plat->rxq, QUEUE_IRQ_SRC_NOT_EMPTY,
drivers/net/ethernet/xscale/ixp4xx_eth.c
1346
qmgr_disable_irq(port->plat->rxq);
drivers/net/ethernet/xscale/ixp4xx_eth.c
1362
while (queue_get_desc(port->plat->rxq, port, 0) >= 0)
drivers/net/ethernet/xscale/ixp4xx_eth.c
1473
plat->rxq = queue_spec.args[0];
drivers/net/ethernet/xscale/ixp4xx_eth.c
166
u8 rxq; /* configurable, currently 0 - 31 only */
drivers/net/ethernet/xscale/ixp4xx_eth.c
701
qmgr_disable_irq(port->plat->rxq);
drivers/net/ethernet/xscale/ixp4xx_eth.c
709
unsigned int rxq = port->plat->rxq, rxfreeq = RXFREE_QUEUE(port->id);
drivers/net/ethernet/xscale/ixp4xx_eth.c
725
if ((n = queue_get_desc(rxq, port, 0)) < 0) {
drivers/net/ethernet/xscale/ixp4xx_eth.c
730
qmgr_enable_irq(rxq);
drivers/net/ethernet/xscale/ixp4xx_eth.c
731
if (!qmgr_stat_below_low_watermark(rxq) &&
drivers/net/ethernet/xscale/ixp4xx_eth.c
736
qmgr_disable_irq(rxq);
drivers/net/netdevsim/netdev.c
132
int rxq;
drivers/net/netdevsim/netdev.c
154
rxq = skb_get_queue_mapping(skb);
drivers/net/netdevsim/netdev.c
155
if (rxq >= peer_dev->num_rx_queues)
drivers/net/netdevsim/netdev.c
156
rxq = rxq % peer_dev->num_rx_queues;
drivers/net/netdevsim/netdev.c
157
rq = peer_ns->rq[rxq];
drivers/net/tap.c
199
__u32 rxq;
drivers/net/tap.c
208
rxq = skb_get_hash(skb);
drivers/net/tap.c
209
if (rxq) {
drivers/net/tap.c
210
queue = rcu_dereference(tap->taps[rxq % numvtaps]);
drivers/net/tap.c
215
rxq = skb_get_rx_queue(skb);
drivers/net/tap.c
217
while (unlikely(rxq >= numvtaps))
drivers/net/tap.c
218
rxq -= numvtaps;
drivers/net/tap.c
220
queue = rcu_dereference(tap->taps[rxq]);
drivers/net/usb/lan78xx.c
3477
temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
drivers/net/usb/lan78xx.c
3480
while (!skb_queue_empty(&dev->rxq) ||
drivers/net/usb/lan78xx.c
4026
state = defer_bh(dev, skb, &dev->rxq, state);
drivers/net/usb/lan78xx.c
4040
spin_lock_irqsave(&dev->rxq.lock, lockflags);
drivers/net/usb/lan78xx.c
4049
lan78xx_queue_skb(&dev->rxq, skb, rx_start);
drivers/net/usb/lan78xx.c
4073
spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
drivers/net/usb/lan78xx.c
425
struct sk_buff_head rxq;
drivers/net/usb/lan78xx.c
4445
unlink_urbs(dev, &dev->rxq);
drivers/net/usb/lan78xx.c
4654
skb_queue_head_init(&dev->rxq);
drivers/net/usb/usbnet.c
1148
unlink_urbs(dev, &dev->rxq);
drivers/net/usb/usbnet.c
1215
unlink_urbs(dev, &dev->rxq);
drivers/net/usb/usbnet.c
1555
for (i = 0; i < 10 && dev->rxq.qlen < RX_QLEN(dev); i++) {
drivers/net/usb/usbnet.c
1620
if (dev->txq.qlen + dev->rxq.qlen + dev->done.qlen == 0)
drivers/net/usb/usbnet.c
1631
int temp = dev->rxq.qlen;
drivers/net/usb/usbnet.c
1636
if (temp != dev->rxq.qlen)
drivers/net/usb/usbnet.c
1639
temp, dev->rxq.qlen);
drivers/net/usb/usbnet.c
1640
if (dev->rxq.qlen < RX_QLEN(dev))
drivers/net/usb/usbnet.c
1780
skb_queue_head_init (&dev->rxq);
drivers/net/usb/usbnet.c
528
spin_lock_irqsave(&dev->rxq.lock, lockflags);
drivers/net/usb/usbnet.c
556
__usbnet_queue_skb(&dev->rxq, skb, rx_start);
drivers/net/usb/usbnet.c
562
spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
drivers/net/usb/usbnet.c
677
state = defer_bh(dev, skb, &dev->rxq, state);
drivers/net/usb/usbnet.c
783
(void) unlink_urbs (dev, &dev->rxq);
drivers/net/usb/usbnet.c
815
unlink_urbs(dev, &dev->rxq);
drivers/net/usb/usbnet.c
818
wait_skb_queue_empty(&dev->rxq);
drivers/net/veth.c
355
int ret, rxq;
drivers/net/veth.c
365
rxq = skb_get_queue_mapping(skb);
drivers/net/veth.c
366
if (rxq < rcv->real_num_rx_queues) {
drivers/net/veth.c
367
rq = &rcv_priv->rq[rxq];
drivers/net/veth.c
391
txq = netdev_get_tx_queue(dev, rxq);
drivers/net/veth.c
654
xdp->rxq = &rq->xdp_rxq;
drivers/net/veth.c
666
xdp->rxq->mem.type = frame->mem_type;
drivers/net/veth.c
678
xdp->rxq->mem.type = frame->mem_type;
drivers/net/veth.c
827
xdp->rxq->mem = rq->xdp_mem;
drivers/net/veth.c
839
xdp->rxq->mem = rq->xdp_mem;
drivers/net/virtio_net.c
658
static int rxq2vq(int rxq)
drivers/net/virtio_net.c
660
return rxq * 2;
drivers/net/virtio_net.c
6683
if (!(xdp->rxq->dev->features & NETIF_F_RXHASH))
drivers/net/virtio_net.c
6686
vi = netdev_priv(xdp->rxq->dev);
drivers/net/wan/ixp4xx_hss.c
1125
qmgr_set_irq(port->rxq, QUEUE_IRQ_SRC_NOT_EMPTY,
drivers/net/wan/ixp4xx_hss.c
1161
qmgr_disable_irq(port->rxq);
drivers/net/wan/ixp4xx_hss.c
1169
while (queue_get_desc(port->rxq, port, 0) >= 0)
drivers/net/wan/ixp4xx_hss.c
1451
port->rxq = queue_spec.args[0];
drivers/net/wan/ixp4xx_hss.c
264
unsigned int rxq;
drivers/net/wan/ixp4xx_hss.c
656
qmgr_disable_irq(port->rxq);
drivers/net/wan/ixp4xx_hss.c
664
unsigned int rxq = port->rxq;
drivers/net/wan/ixp4xx_hss.c
681
n = queue_get_desc(rxq, port, 0);
drivers/net/wan/ixp4xx_hss.c
688
qmgr_enable_irq(rxq);
drivers/net/wan/ixp4xx_hss.c
689
if (!qmgr_stat_empty(rxq) &&
drivers/net/wan/ixp4xx_hss.c
696
qmgr_disable_irq(rxq);
drivers/net/wan/ixp4xx_hss.c
932
err = qmgr_request_queue(port->rxq, RX_DESCS, 0, 0,
drivers/net/wan/ixp4xx_hss.c
958
qmgr_release_queue(port->rxq);
drivers/net/wan/ixp4xx_hss.c
969
qmgr_release_queue(port->rxq);
drivers/net/wireless/ath/ath6kl/htc_mbox.c
1910
struct list_head *rxq,
drivers/net/wireless/ath/ath6kl/htc_mbox.c
1919
n_scat_pkt = get_queue_depth(rxq);
drivers/net/wireless/ath/ath6kl/htc_mbox.c
1922
if ((get_queue_depth(rxq) - n_scat_pkt) > 0) {
drivers/net/wireless/ath/ath6kl/htc_mbox.c
1935
__func__, get_queue_depth(rxq), n_scat_pkt);
drivers/net/wireless/ath/ath6kl/htc_mbox.c
1942
get_queue_depth(rxq), n_scat_pkt);
drivers/net/wireless/ath/ath6kl/htc_mbox.c
1952
packet = list_first_entry(rxq, struct htc_packet, list);
drivers/net/wireless/ath/ath6kl/htc_mbox.c
1959
list_add(&packet->list, rxq);
drivers/net/wireless/intel/ipw2x00/ipw2100.c
2646
struct ipw2100_bd_queue *rxq = &priv->rx_queue;
drivers/net/wireless/intel/ipw2x00/ipw2100.c
2659
if (r >= rxq->entries) {
drivers/net/wireless/intel/ipw2x00/ipw2100.c
2664
i = (rxq->next + 1) % rxq->entries;
drivers/net/wireless/intel/ipw2x00/ipw2100.c
2736
rxq->drv[i].status.info.field = 0;
drivers/net/wireless/intel/ipw2x00/ipw2100.c
2738
i = (i + 1) % rxq->entries;
drivers/net/wireless/intel/ipw2x00/ipw2100.c
2743
rxq->next = (i ? i : rxq->entries) - 1;
drivers/net/wireless/intel/ipw2x00/ipw2100.c
2746
IPW_MEM_HOST_SHARED_RX_WRITE_INDEX, rxq->next);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
11741
if (priv->rxq) {
drivers/net/wireless/intel/ipw2x00/ipw2200.c
11742
ipw_rx_queue_free(priv, priv->rxq);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
11743
priv->rxq = NULL;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3382
struct ipw_rx_queue *rxq)
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3387
spin_lock_irqsave(&rxq->lock, flags);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3389
INIT_LIST_HEAD(&rxq->rx_free);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3390
INIT_LIST_HEAD(&rxq->rx_used);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3396
if (rxq->pool[i].skb != NULL) {
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3398
rxq->pool[i].dma_addr,
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3400
dev_kfree_skb_irq(rxq->pool[i].skb);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3401
rxq->pool[i].skb = NULL;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3403
list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3408
rxq->read = rxq->write = 0;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3409
rxq->free_count = 0;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3410
spin_unlock_irqrestore(&rxq->lock, flags);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3474
if (!priv->rxq)
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3475
priv->rxq = ipw_rx_queue_alloc(priv);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3477
ipw_rx_queue_reset(priv, priv->rxq);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3478
if (!priv->rxq) {
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3597
ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3608
if (priv->rxq) {
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3609
ipw_rx_queue_free(priv, priv->rxq);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3610
priv->rxq = NULL;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
5093
struct ipw_rx_queue *rxq = priv->rxq;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
5099
spin_lock_irqsave(&rxq->lock, flags);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
5100
write = rxq->write;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
5101
while ((ipw_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
drivers/net/wireless/intel/ipw2x00/ipw2200.c
5102
element = rxq->rx_free.next;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
5106
ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
drivers/net/wireless/intel/ipw2x00/ipw2200.c
5108
rxq->queue[rxq->write] = rxb;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
5109
rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
5110
rxq->free_count--;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
5112
spin_unlock_irqrestore(&rxq->lock, flags);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
5116
if (rxq->free_count <= RX_LOW_WATERMARK)
drivers/net/wireless/intel/ipw2x00/ipw2200.c
5120
if (write != rxq->write)
drivers/net/wireless/intel/ipw2x00/ipw2200.c
5121
ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
5133
struct ipw_rx_queue *rxq = priv->rxq;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
5138
spin_lock_irqsave(&rxq->lock, flags);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
5139
while (!list_empty(&rxq->rx_used)) {
drivers/net/wireless/intel/ipw2x00/ipw2200.c
5140
element = rxq->rx_used.next;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
5157
list_add_tail(&rxb->list, &rxq->rx_free);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
5158
rxq->free_count++;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
5160
spin_unlock_irqrestore(&rxq->lock, flags);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
5179
static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
drivers/net/wireless/intel/ipw2x00/ipw2200.c
5183
if (!rxq)
drivers/net/wireless/intel/ipw2x00/ipw2200.c
5187
if (rxq->pool[i].skb != NULL) {
drivers/net/wireless/intel/ipw2x00/ipw2200.c
5189
rxq->pool[i].dma_addr,
drivers/net/wireless/intel/ipw2x00/ipw2200.c
5191
dev_kfree_skb(rxq->pool[i].skb);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
5195
kfree(rxq);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
5200
struct ipw_rx_queue *rxq;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
5203
rxq = kzalloc_obj(*rxq);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
5204
if (unlikely(!rxq)) {
drivers/net/wireless/intel/ipw2x00/ipw2200.c
5208
spin_lock_init(&rxq->lock);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
5209
INIT_LIST_HEAD(&rxq->rx_free);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
5210
INIT_LIST_HEAD(&rxq->rx_used);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
5214
list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
5218
rxq->read = rxq->write = 0;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
5219
rxq->free_count = 0;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
5221
return rxq;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
8217
i = priv->rxq->read;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
8219
if (ipw_rx_queue_space (priv->rxq) > (RX_QUEUE_SIZE / 2))
drivers/net/wireless/intel/ipw2x00/ipw2200.c
8223
rxb = priv->rxq->queue[i];
drivers/net/wireless/intel/ipw2x00/ipw2200.c
8228
priv->rxq->queue[i] = NULL;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
8385
list_add_tail(&rxb->list, &priv->rxq->rx_used);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
8392
priv->rxq->read = i;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
8398
priv->rxq->read = i;
drivers/net/wireless/intel/ipw2x00/ipw2200.h
1188
struct ipw_rx_queue *rxq;
drivers/net/wireless/intel/iwlegacy/3945-mac.c
1003
if (rxq->free_count <= RX_LOW_WATERMARK &&
drivers/net/wireless/intel/iwlegacy/3945-mac.c
1007
priority, rxq->free_count);
drivers/net/wireless/intel/iwlegacy/3945-mac.c
1025
spin_lock_irqsave(&rxq->lock, flags);
drivers/net/wireless/intel/iwlegacy/3945-mac.c
1027
if (list_empty(&rxq->rx_used)) {
drivers/net/wireless/intel/iwlegacy/3945-mac.c
1028
spin_unlock_irqrestore(&rxq->lock, flags);
drivers/net/wireless/intel/iwlegacy/3945-mac.c
1036
element = rxq->rx_used.next;
drivers/net/wireless/intel/iwlegacy/3945-mac.c
1042
list_add_tail(&rxb->list, &rxq->rx_free);
drivers/net/wireless/intel/iwlegacy/3945-mac.c
1043
rxq->free_count++;
drivers/net/wireless/intel/iwlegacy/3945-mac.c
1046
spin_unlock_irqrestore(&rxq->lock, flags);
drivers/net/wireless/intel/iwlegacy/3945-mac.c
1051
il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq)
drivers/net/wireless/intel/iwlegacy/3945-mac.c
1055
spin_lock_irqsave(&rxq->lock, flags);
drivers/net/wireless/intel/iwlegacy/3945-mac.c
1056
INIT_LIST_HEAD(&rxq->rx_free);
drivers/net/wireless/intel/iwlegacy/3945-mac.c
1057
INIT_LIST_HEAD(&rxq->rx_used);
drivers/net/wireless/intel/iwlegacy/3945-mac.c
1062
if (rxq->pool[i].page != NULL) {
drivers/net/wireless/intel/iwlegacy/3945-mac.c
1064
rxq->pool[i].page_dma,
drivers/net/wireless/intel/iwlegacy/3945-mac.c
1067
__il_free_pages(il, rxq->pool[i].page);
drivers/net/wireless/intel/iwlegacy/3945-mac.c
1068
rxq->pool[i].page = NULL;
drivers/net/wireless/intel/iwlegacy/3945-mac.c
1070
list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
drivers/net/wireless/intel/iwlegacy/3945-mac.c
1075
rxq->read = rxq->write = 0;
drivers/net/wireless/intel/iwlegacy/3945-mac.c
1076
rxq->write_actual = 0;
drivers/net/wireless/intel/iwlegacy/3945-mac.c
1077
rxq->free_count = 0;
drivers/net/wireless/intel/iwlegacy/3945-mac.c
1078
spin_unlock_irqrestore(&rxq->lock, flags);
drivers/net/wireless/intel/iwlegacy/3945-mac.c
1108
il3945_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq)
drivers/net/wireless/intel/iwlegacy/3945-mac.c
1112
if (rxq->pool[i].page != NULL) {
drivers/net/wireless/intel/iwlegacy/3945-mac.c
1114
rxq->pool[i].page_dma,
drivers/net/wireless/intel/iwlegacy/3945-mac.c
1117
__il_free_pages(il, rxq->pool[i].page);
drivers/net/wireless/intel/iwlegacy/3945-mac.c
1118
rxq->pool[i].page = NULL;
drivers/net/wireless/intel/iwlegacy/3945-mac.c
1122
dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
drivers/net/wireless/intel/iwlegacy/3945-mac.c
1123
rxq->bd_dma);
drivers/net/wireless/intel/iwlegacy/3945-mac.c
1125
rxq->rb_stts, rxq->rb_stts_dma);
drivers/net/wireless/intel/iwlegacy/3945-mac.c
1126
rxq->bd = NULL;
drivers/net/wireless/intel/iwlegacy/3945-mac.c
1127
rxq->rb_stts = NULL;
drivers/net/wireless/intel/iwlegacy/3945-mac.c
1142
struct il_rx_queue *rxq = &il->rxq;
drivers/net/wireless/intel/iwlegacy/3945-mac.c
1152
r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
drivers/net/wireless/intel/iwlegacy/3945-mac.c
1153
i = rxq->read;
drivers/net/wireless/intel/iwlegacy/3945-mac.c
1156
total_empty = r - rxq->write_actual;
drivers/net/wireless/intel/iwlegacy/3945-mac.c
1167
rxb = rxq->queue[i];
drivers/net/wireless/intel/iwlegacy/3945-mac.c
1174
rxq->queue[i] = NULL;
drivers/net/wireless/intel/iwlegacy/3945-mac.c
1216
spin_lock_irqsave(&rxq->lock, flags);
drivers/net/wireless/intel/iwlegacy/3945-mac.c
1226
list_add_tail(&rxb->list, &rxq->rx_used);
drivers/net/wireless/intel/iwlegacy/3945-mac.c
1228
list_add_tail(&rxb->list, &rxq->rx_free);
drivers/net/wireless/intel/iwlegacy/3945-mac.c
1229
rxq->free_count++;
drivers/net/wireless/intel/iwlegacy/3945-mac.c
1232
list_add_tail(&rxb->list, &rxq->rx_used);
drivers/net/wireless/intel/iwlegacy/3945-mac.c
1234
spin_unlock_irqrestore(&rxq->lock, flags);
drivers/net/wireless/intel/iwlegacy/3945-mac.c
1242
rxq->read = i;
drivers/net/wireless/intel/iwlegacy/3945-mac.c
1250
rxq->read = i;
drivers/net/wireless/intel/iwlegacy/3945-mac.c
1420
il_rx_queue_update_write_ptr(il, &il->rxq);
drivers/net/wireless/intel/iwlegacy/3945-mac.c
3786
if (il->rxq.bd)
drivers/net/wireless/intel/iwlegacy/3945-mac.c
3787
il3945_rx_queue_free(il, &il->rxq);
drivers/net/wireless/intel/iwlegacy/3945-mac.c
929
struct il_rx_queue *rxq = &il->rxq;
drivers/net/wireless/intel/iwlegacy/3945-mac.c
934
spin_lock_irqsave(&rxq->lock, flags);
drivers/net/wireless/intel/iwlegacy/3945-mac.c
935
while (il_rx_queue_space(rxq) > 0 && rxq->free_count) {
drivers/net/wireless/intel/iwlegacy/3945-mac.c
937
element = rxq->rx_free.next;
drivers/net/wireless/intel/iwlegacy/3945-mac.c
942
rxq->bd[rxq->write] =
drivers/net/wireless/intel/iwlegacy/3945-mac.c
944
rxq->queue[rxq->write] = rxb;
drivers/net/wireless/intel/iwlegacy/3945-mac.c
945
rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
drivers/net/wireless/intel/iwlegacy/3945-mac.c
946
rxq->free_count--;
drivers/net/wireless/intel/iwlegacy/3945-mac.c
948
spin_unlock_irqrestore(&rxq->lock, flags);
drivers/net/wireless/intel/iwlegacy/3945-mac.c
951
if (rxq->free_count <= RX_LOW_WATERMARK)
drivers/net/wireless/intel/iwlegacy/3945-mac.c
956
if (rxq->write_actual != (rxq->write & ~0x7) ||
drivers/net/wireless/intel/iwlegacy/3945-mac.c
957
abs(rxq->write - rxq->read) > 7) {
drivers/net/wireless/intel/iwlegacy/3945-mac.c
958
spin_lock_irqsave(&rxq->lock, flags);
drivers/net/wireless/intel/iwlegacy/3945-mac.c
959
rxq->need_update = 1;
drivers/net/wireless/intel/iwlegacy/3945-mac.c
960
spin_unlock_irqrestore(&rxq->lock, flags);
drivers/net/wireless/intel/iwlegacy/3945-mac.c
961
il_rx_queue_update_write_ptr(il, rxq);
drivers/net/wireless/intel/iwlegacy/3945-mac.c
976
struct il_rx_queue *rxq = &il->rxq;
drivers/net/wireless/intel/iwlegacy/3945-mac.c
985
spin_lock_irqsave(&rxq->lock, flags);
drivers/net/wireless/intel/iwlegacy/3945-mac.c
986
if (list_empty(&rxq->rx_used)) {
drivers/net/wireless/intel/iwlegacy/3945-mac.c
987
spin_unlock_irqrestore(&rxq->lock, flags);
drivers/net/wireless/intel/iwlegacy/3945-mac.c
990
spin_unlock_irqrestore(&rxq->lock, flags);
drivers/net/wireless/intel/iwlegacy/3945-mac.c
992
if (rxq->free_count > RX_LOW_WATERMARK)
drivers/net/wireless/intel/iwlegacy/3945.c
778
il3945_rx_init(struct il_priv *il, struct il_rx_queue *rxq)
drivers/net/wireless/intel/iwlegacy/3945.c
780
il_wr(il, FH39_RCSR_RBD_BASE(0), rxq->bd_dma);
drivers/net/wireless/intel/iwlegacy/3945.c
781
il_wr(il, FH39_RCSR_RPTR_ADDR(0), rxq->rb_stts_dma);
drivers/net/wireless/intel/iwlegacy/3945.c
957
struct il_rx_queue *rxq = &il->rxq;
drivers/net/wireless/intel/iwlegacy/3945.c
967
if (!rxq->bd) {
drivers/net/wireless/intel/iwlegacy/3945.c
974
il3945_rx_queue_reset(il, rxq);
drivers/net/wireless/intel/iwlegacy/3945.c
978
il3945_rx_init(il, rxq);
drivers/net/wireless/intel/iwlegacy/3945.c
985
il_wr(il, FH39_RCSR_WPTR(0), rxq->write & ~7);
drivers/net/wireless/intel/iwlegacy/3945.h
177
void il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq);
drivers/net/wireless/intel/iwlegacy/4965-mac.c
101
__il_free_pages(il, rxq->pool[i].page);
drivers/net/wireless/intel/iwlegacy/4965-mac.c
102
rxq->pool[i].page = NULL;
drivers/net/wireless/intel/iwlegacy/4965-mac.c
104
list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
drivers/net/wireless/intel/iwlegacy/4965-mac.c
108
rxq->queue[i] = NULL;
drivers/net/wireless/intel/iwlegacy/4965-mac.c
112
rxq->read = rxq->write = 0;
drivers/net/wireless/intel/iwlegacy/4965-mac.c
113
rxq->write_actual = 0;
drivers/net/wireless/intel/iwlegacy/4965-mac.c
114
rxq->free_count = 0;
drivers/net/wireless/intel/iwlegacy/4965-mac.c
115
spin_unlock_irqrestore(&rxq->lock, flags);
drivers/net/wireless/intel/iwlegacy/4965-mac.c
119
il4965_rx_init(struct il_priv *il, struct il_rx_queue *rxq)
drivers/net/wireless/intel/iwlegacy/4965-mac.c
137
il_wr(il, FH49_RSCSR_CHNL0_RBDCB_BASE_REG, (u32) (rxq->bd_dma >> 8));
drivers/net/wireless/intel/iwlegacy/4965-mac.c
140
il_wr(il, FH49_RSCSR_CHNL0_STTS_WPTR_REG, rxq->rb_stts_dma >> 4);
drivers/net/wireless/intel/iwlegacy/4965-mac.c
184
struct il_rx_queue *rxq = &il->rxq;
drivers/net/wireless/intel/iwlegacy/4965-mac.c
197
if (!rxq->bd) {
drivers/net/wireless/intel/iwlegacy/4965-mac.c
204
il4965_rx_queue_reset(il, rxq);
drivers/net/wireless/intel/iwlegacy/4965-mac.c
208
il4965_rx_init(il, rxq);
drivers/net/wireless/intel/iwlegacy/4965-mac.c
212
rxq->need_update = 1;
drivers/net/wireless/intel/iwlegacy/4965-mac.c
213
il_rx_queue_update_write_ptr(il, rxq);
drivers/net/wireless/intel/iwlegacy/4965-mac.c
253
struct il_rx_queue *rxq = &il->rxq;
drivers/net/wireless/intel/iwlegacy/4965-mac.c
258
spin_lock_irqsave(&rxq->lock, flags);
drivers/net/wireless/intel/iwlegacy/4965-mac.c
259
while (il_rx_queue_space(rxq) > 0 && rxq->free_count) {
drivers/net/wireless/intel/iwlegacy/4965-mac.c
261
rxb = rxq->queue[rxq->write];
drivers/net/wireless/intel/iwlegacy/4965-mac.c
265
element = rxq->rx_free.next;
drivers/net/wireless/intel/iwlegacy/4965-mac.c
270
rxq->bd[rxq->write] =
drivers/net/wireless/intel/iwlegacy/4965-mac.c
272
rxq->queue[rxq->write] = rxb;
drivers/net/wireless/intel/iwlegacy/4965-mac.c
273
rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
drivers/net/wireless/intel/iwlegacy/4965-mac.c
274
rxq->free_count--;
drivers/net/wireless/intel/iwlegacy/4965-mac.c
276
spin_unlock_irqrestore(&rxq->lock, flags);
drivers/net/wireless/intel/iwlegacy/4965-mac.c
279
if (rxq->free_count <= RX_LOW_WATERMARK)
drivers/net/wireless/intel/iwlegacy/4965-mac.c
284
if (rxq->write_actual != (rxq->write & ~0x7)) {
drivers/net/wireless/intel/iwlegacy/4965-mac.c
285
spin_lock_irqsave(&rxq->lock, flags);
drivers/net/wireless/intel/iwlegacy/4965-mac.c
286
rxq->need_update = 1;
drivers/net/wireless/intel/iwlegacy/4965-mac.c
287
spin_unlock_irqrestore(&rxq->lock, flags);
drivers/net/wireless/intel/iwlegacy/4965-mac.c
288
il_rx_queue_update_write_ptr(il, rxq);
drivers/net/wireless/intel/iwlegacy/4965-mac.c
303
struct il_rx_queue *rxq = &il->rxq;
drivers/net/wireless/intel/iwlegacy/4965-mac.c
312
spin_lock_irqsave(&rxq->lock, flags);
drivers/net/wireless/intel/iwlegacy/4965-mac.c
313
if (list_empty(&rxq->rx_used)) {
drivers/net/wireless/intel/iwlegacy/4965-mac.c
314
spin_unlock_irqrestore(&rxq->lock, flags);
drivers/net/wireless/intel/iwlegacy/4965-mac.c
317
spin_unlock_irqrestore(&rxq->lock, flags);
drivers/net/wireless/intel/iwlegacy/4965-mac.c
319
if (rxq->free_count > RX_LOW_WATERMARK)
drivers/net/wireless/intel/iwlegacy/4965-mac.c
332
if (rxq->free_count <= RX_LOW_WATERMARK &&
drivers/net/wireless/intel/iwlegacy/4965-mac.c
338
rxq->free_count);
drivers/net/wireless/intel/iwlegacy/4965-mac.c
354
spin_lock_irqsave(&rxq->lock, flags);
drivers/net/wireless/intel/iwlegacy/4965-mac.c
356
if (list_empty(&rxq->rx_used)) {
drivers/net/wireless/intel/iwlegacy/4965-mac.c
357
spin_unlock_irqrestore(&rxq->lock, flags);
drivers/net/wireless/intel/iwlegacy/4965-mac.c
365
element = rxq->rx_used.next;
drivers/net/wireless/intel/iwlegacy/4965-mac.c
373
list_add_tail(&rxb->list, &rxq->rx_free);
drivers/net/wireless/intel/iwlegacy/4965-mac.c
374
rxq->free_count++;
drivers/net/wireless/intel/iwlegacy/4965-mac.c
377
spin_unlock_irqrestore(&rxq->lock, flags);
drivers/net/wireless/intel/iwlegacy/4965-mac.c
407
il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq)
drivers/net/wireless/intel/iwlegacy/4965-mac.c
411
if (rxq->pool[i].page != NULL) {
drivers/net/wireless/intel/iwlegacy/4965-mac.c
413
rxq->pool[i].page_dma,
drivers/net/wireless/intel/iwlegacy/4965-mac.c
416
__il_free_pages(il, rxq->pool[i].page);
drivers/net/wireless/intel/iwlegacy/4965-mac.c
417
rxq->pool[i].page = NULL;
drivers/net/wireless/intel/iwlegacy/4965-mac.c
421
dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
drivers/net/wireless/intel/iwlegacy/4965-mac.c
4211
struct il_rx_queue *rxq = &il->rxq;
drivers/net/wireless/intel/iwlegacy/4965-mac.c
422
rxq->bd_dma);
drivers/net/wireless/intel/iwlegacy/4965-mac.c
4221
r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
drivers/net/wireless/intel/iwlegacy/4965-mac.c
4222
i = rxq->read;
drivers/net/wireless/intel/iwlegacy/4965-mac.c
4229
total_empty = r - rxq->write_actual;
drivers/net/wireless/intel/iwlegacy/4965-mac.c
4237
rxb = rxq->queue[i];
drivers/net/wireless/intel/iwlegacy/4965-mac.c
424
rxq->rb_stts, rxq->rb_stts_dma);
drivers/net/wireless/intel/iwlegacy/4965-mac.c
4244
rxq->queue[i] = NULL;
drivers/net/wireless/intel/iwlegacy/4965-mac.c
425
rxq->bd = NULL;
drivers/net/wireless/intel/iwlegacy/4965-mac.c
426
rxq->rb_stts = NULL;
drivers/net/wireless/intel/iwlegacy/4965-mac.c
4286
spin_lock_irqsave(&rxq->lock, flags);
drivers/net/wireless/intel/iwlegacy/4965-mac.c
4297
list_add_tail(&rxb->list, &rxq->rx_used);
drivers/net/wireless/intel/iwlegacy/4965-mac.c
4299
list_add_tail(&rxb->list, &rxq->rx_free);
drivers/net/wireless/intel/iwlegacy/4965-mac.c
4300
rxq->free_count++;
drivers/net/wireless/intel/iwlegacy/4965-mac.c
4303
list_add_tail(&rxb->list, &rxq->rx_used);
drivers/net/wireless/intel/iwlegacy/4965-mac.c
4305
spin_unlock_irqrestore(&rxq->lock, flags);
drivers/net/wireless/intel/iwlegacy/4965-mac.c
4313
rxq->read = i;
drivers/net/wireless/intel/iwlegacy/4965-mac.c
4321
rxq->read = i;
drivers/net/wireless/intel/iwlegacy/4965-mac.c
4467
il_rx_queue_update_write_ptr(il, &il->rxq);
drivers/net/wireless/intel/iwlegacy/4965-mac.c
6739
if (il->rxq.bd)
drivers/net/wireless/intel/iwlegacy/4965-mac.c
6740
il4965_rx_queue_free(il, &il->rxq);
drivers/net/wireless/intel/iwlegacy/4965-mac.c
85
il4965_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq)
drivers/net/wireless/intel/iwlegacy/4965-mac.c
89
spin_lock_irqsave(&rxq->lock, flags);
drivers/net/wireless/intel/iwlegacy/4965-mac.c
90
INIT_LIST_HEAD(&rxq->rx_free);
drivers/net/wireless/intel/iwlegacy/4965-mac.c
91
INIT_LIST_HEAD(&rxq->rx_used);
drivers/net/wireless/intel/iwlegacy/4965-mac.c
96
if (rxq->pool[i].page != NULL) {
drivers/net/wireless/intel/iwlegacy/4965-mac.c
98
rxq->pool[i].page_dma,
drivers/net/wireless/intel/iwlegacy/4965.h
40
void il4965_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq);
drivers/net/wireless/intel/iwlegacy/4965.h
41
int il4965_rx_init(struct il_priv *il, struct il_rx_queue *rxq);
drivers/net/wireless/intel/iwlegacy/4965.h
51
void il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq);
drivers/net/wireless/intel/iwlegacy/common.c
2597
struct il_rx_queue *rxq = &il->rxq;
drivers/net/wireless/intel/iwlegacy/common.c
2601
spin_lock_init(&rxq->lock);
drivers/net/wireless/intel/iwlegacy/common.c
2602
INIT_LIST_HEAD(&rxq->rx_free);
drivers/net/wireless/intel/iwlegacy/common.c
2603
INIT_LIST_HEAD(&rxq->rx_used);
drivers/net/wireless/intel/iwlegacy/common.c
2606
rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
drivers/net/wireless/intel/iwlegacy/common.c
2608
if (!rxq->bd)
drivers/net/wireless/intel/iwlegacy/common.c
2611
rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct il_rb_status),
drivers/net/wireless/intel/iwlegacy/common.c
2612
&rxq->rb_stts_dma, GFP_KERNEL);
drivers/net/wireless/intel/iwlegacy/common.c
2613
if (!rxq->rb_stts)
drivers/net/wireless/intel/iwlegacy/common.c
2618
list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
drivers/net/wireless/intel/iwlegacy/common.c
2622
rxq->read = rxq->write = 0;
drivers/net/wireless/intel/iwlegacy/common.c
2623
rxq->write_actual = 0;
drivers/net/wireless/intel/iwlegacy/common.c
2624
rxq->free_count = 0;
drivers/net/wireless/intel/iwlegacy/common.c
2625
rxq->need_update = 0;
drivers/net/wireless/intel/iwlegacy/common.c
2629
dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
drivers/net/wireless/intel/iwlegacy/common.c
2630
rxq->bd_dma);
drivers/net/wireless/intel/iwlegacy/common.h
1265
struct il_rx_queue rxq;
drivers/net/wireless/intel/iwlegacy/debug.c
864
struct il_rx_queue *rxq = &il->rxq;
drivers/net/wireless/intel/iwlegacy/debug.c
869
pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n", rxq->read);
drivers/net/wireless/intel/iwlegacy/debug.c
870
pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n", rxq->write);
drivers/net/wireless/intel/iwlegacy/debug.c
873
rxq->free_count);
drivers/net/wireless/intel/iwlegacy/debug.c
874
if (rxq->rb_stts) {
drivers/net/wireless/intel/iwlegacy/debug.c
877
le16_to_cpu(rxq->rb_stts->
drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
449
__le32 rxq;
drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-v2.c
179
cpu_to_le64(trans_pcie->rxq->bd_dma);
drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-v2.c
247
cpu_to_le64(trans_pcie->rxq->rb_stts_dma);
drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-v2.c
255
cpu_to_le64(trans_pcie->rxq->used_bd_dma);
drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
216
rx_cfg->free_rbd_addr = cpu_to_le64(trans_pcie->rxq->bd_dma);
drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
217
rx_cfg->used_rbd_addr = cpu_to_le64(trans_pcie->rxq->used_bd_dma);
drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
218
rx_cfg->status_wr_ptr = cpu_to_le64(trans_pcie->rxq->rb_stts_dma);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/internal.h
196
struct iwl_rxq *rxq)
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/internal.h
199
__le16 *rb_stts = rxq->rb_stts;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/internal.h
203
struct iwl_rb_status *rb_stts = rxq->rb_stts;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/internal.h
410
struct iwl_rxq *rxq;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/internal.h
573
void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/internal.h
576
struct iwl_rxq *rxq);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1007
struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1015
ret = iwl_pcie_rx_handle(trans, rxq->id, budget);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1018
rxq->id, ret, budget);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1026
napi_complete_done(&rxq->napi, ret);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1034
struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1042
ret = iwl_pcie_rx_handle(trans, rxq->id, budget);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1043
IWL_DEBUG_ISR(trans, "[%d] handled %d, budget %d\n", rxq->id, ret,
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1047
int irq_line = rxq->id;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1051
rxq->id == 1)
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1058
napi_complete_done(&rxq->napi, ret);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1069
if (unlikely(!trans_pcie->rxq))
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1073
struct iwl_rxq *rxq = &trans_pcie->rxq[i];
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1075
if (rxq && rxq->napi.poll)
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1076
napi_synchronize(&rxq->napi);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1087
if (!trans_pcie->rxq) {
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1092
def_rxq = trans_pcie->rxq;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1110
struct iwl_rxq *rxq = &trans_pcie->rxq[i];
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1112
spin_lock_bh(&rxq->lock);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1118
rxq->read = 0;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1119
rxq->write = 0;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1120
rxq->write_actual = 0;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1121
memset(rxq->rb_stts, 0,
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1126
iwl_pcie_rx_init_rxb_lists(rxq);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1128
spin_unlock_bh(&rxq->lock);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1130
if (!rxq->napi.poll) {
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1136
netif_napi_add(trans_pcie->napi_dev, &rxq->napi,
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1138
napi_enable(&rxq->napi);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1178
iwl_pcie_rx_hw_init(trans, trans_pcie->rxq);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1180
iwl_pcie_rxq_restock(trans, trans_pcie->rxq);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1182
spin_lock_bh(&trans_pcie->rxq->lock);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1183
iwl_pcie_rxq_inc_wr_ptr(trans, trans_pcie->rxq);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1184
spin_unlock_bh(&trans_pcie->rxq->lock);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
120
static int iwl_rxq_space(const struct iwl_rxq *rxq)
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1212
if (!trans_pcie->rxq) {
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
123
WARN_ON(rxq->queue_size & (rxq->queue_size - 1));
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1231
struct iwl_rxq *rxq = &trans_pcie->rxq[i];
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1233
iwl_pcie_free_rxq_dma(trans, rxq);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1235
if (rxq->napi.poll) {
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1236
napi_disable(&rxq->napi);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1237
netif_napi_del(&rxq->napi);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1242
kfree(trans_pcie->rxq);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1248
static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq *rxq,
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1252
list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1264
struct iwl_rxq *rxq, bool emergency)
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1271
list_add_tail(&rxb->list, &rxq->rx_used);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1277
rxq->used_count++;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1284
if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1287
iwl_pcie_rx_move_to_allocator(rxq, rba);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1295
struct iwl_rxq *rxq,
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
131
return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1328
rxq->id, offset);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1333
FH_RSCSR_RXQ_POS != rxq->id,
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1335
rxq->id,
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1341
rxq->id, offset,
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1377
if (rxq->id == IWL_DEFAULT_RX_QUEUE)
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1378
iwl_op_mode_rx(trans->op_mode, &rxq->napi,
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1381
iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi,
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1382
&rxcb, rxq->id);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1434
iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1436
list_add_tail(&rxb->list, &rxq->rx_free);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1437
rxq->free_count++;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1440
iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1444
struct iwl_rxq *rxq, int i,
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1455
rxb = rxq->queue[i];
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1456
rxq->queue[i] = NULL;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1461
struct iwl_rx_completion_desc_bz *cd = rxq->used_bd;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1466
struct iwl_rx_completion_desc *cd = rxq->used_bd;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1471
__le32 *cd = rxq->used_bd;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1501
struct iwl_rxq *rxq;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1505
if (WARN_ON_ONCE(!trans_pcie->rxq || !trans_pcie->rxq[queue].bd))
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1508
rxq = &trans_pcie->rxq[queue];
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1511
spin_lock(&rxq->lock);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1514
r = iwl_get_closed_rb_stts(trans, rxq);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1515
i = rxq->read;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1518
r &= (rxq->queue_size - 1);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1522
IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1533
if (unlikely(rb_pending_alloc >= rxq->queue_size / 2 &&
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1535
iwl_pcie_rx_move_to_allocator(rxq, rba);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1542
IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1544
rxb = iwl_pcie_get_rxb(trans, rxq, i, &join);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1548
if (unlikely(join || rxq->next_rb_is_fragment)) {
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1549
rxq->next_rb_is_fragment = join;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1562
list_add_tail(&rxb->list, &rxq->rx_free);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1563
rxq->free_count++;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1565
iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1568
i = (i + 1) & (rxq->queue_size - 1);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1577
if (rxq->used_count >= RX_CLAIM_REQ_ALLOC)
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1578
iwl_pcie_rx_allocator_get(trans, rxq);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1580
if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) {
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1582
iwl_pcie_rx_move_to_allocator(rxq, rba);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1587
if (rb_pending_alloc < rxq->queue_size / 3) {
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1594
rxq->read = i;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1595
spin_unlock(&rxq->lock);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1596
iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1597
iwl_pcie_rxq_restock(trans, rxq);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1604
rxq->read = i;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1605
spin_unlock(&rxq->lock);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1620
iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1622
iwl_pcie_rxq_restock(trans, rxq);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1644
struct iwl_rxq *rxq;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1651
if (!trans_pcie->rxq) {
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1659
rxq = &trans_pcie->rxq[entry->entry];
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
1664
if (!napi_schedule(&rxq->napi))
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
168
struct iwl_rxq *rxq)
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
172
lockdep_assert_held(&rxq->lock);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
188
rxq->need_update = true;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
193
rxq->write_actual = round_down(rxq->write, 8);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
195
iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
197
iwl_write32(trans, HBUS_TARG_WRPTR, rxq->write_actual |
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
198
HBUS_TARG_WRPTR_RX_Q(rxq->id));
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
200
iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
2003
iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
201
rxq->write_actual);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
210
struct iwl_rxq *rxq = &trans_pcie->rxq[i];
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
2100
if (napi_schedule_prep(&trans_pcie->rxq[0].napi)) {
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
2102
__napi_schedule(&trans_pcie->rxq[0].napi);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
212
if (!rxq->need_update)
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
214
spin_lock_bh(&rxq->lock);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
215
iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
216
rxq->need_update = false;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
217
spin_unlock_bh(&rxq->lock);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
222
struct iwl_rxq *rxq,
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
226
struct iwl_rx_transfer_desc *bd = rxq->bd;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
230
bd[rxq->write].addr = cpu_to_le64(rxb->page_dma);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
231
bd[rxq->write].rbid = cpu_to_le16(rxb->vid);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
2316
if (napi_schedule_prep(&trans_pcie->rxq[0].napi)) {
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
2318
__napi_schedule(&trans_pcie->rxq[0].napi);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
2326
if (napi_schedule_prep(&trans_pcie->rxq[1].napi)) {
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
2328
__napi_schedule(&trans_pcie->rxq[1].napi);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
233
__le64 *bd = rxq->bd;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
235
bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
239
(u32)rxb->vid, rxq->id, rxq->write);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
2421
iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
246
struct iwl_rxq *rxq)
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
262
spin_lock_bh(&rxq->lock);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
263
while (rxq->free_count) {
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
265
rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
272
iwl_pcie_restock_bd(trans, rxq, rxb);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
273
rxq->write = (rxq->write + 1) & (rxq->queue_size - 1);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
274
rxq->free_count--;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
276
spin_unlock_bh(&rxq->lock);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
282
if (rxq->write_actual != (rxq->write & ~0x7)) {
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
283
spin_lock_bh(&rxq->lock);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
284
iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
285
spin_unlock_bh(&rxq->lock);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
293
struct iwl_rxq *rxq)
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
308
spin_lock_bh(&rxq->lock);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
309
while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
310
__le32 *bd = (__le32 *)rxq->bd;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
312
rxb = rxq->queue[rxq->write];
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
316
rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
322
bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
323
rxq->queue[rxq->write] = rxb;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
324
rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
325
rxq->free_count--;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
327
spin_unlock_bh(&rxq->lock);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
331
if (rxq->write_actual != (rxq->write & ~0x7)) {
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
332
spin_lock_bh(&rxq->lock);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
333
iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
334
spin_unlock_bh(&rxq->lock);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
350
void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq)
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
353
iwl_pcie_rxmq_restock(trans, rxq);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
355
iwl_pcie_rxsq_restock(trans, rxq);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
431
struct iwl_rxq *rxq)
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
440
spin_lock_bh(&rxq->lock);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
441
if (list_empty(&rxq->rx_used)) {
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
442
spin_unlock_bh(&rxq->lock);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
445
spin_unlock_bh(&rxq->lock);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
451
spin_lock_bh(&rxq->lock);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
453
if (list_empty(&rxq->rx_used)) {
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
454
spin_unlock_bh(&rxq->lock);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
458
rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
461
spin_unlock_bh(&rxq->lock);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
473
spin_lock_bh(&rxq->lock);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
474
list_add(&rxb->list, &rxq->rx_used);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
475
spin_unlock_bh(&rxq->lock);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
480
spin_lock_bh(&rxq->lock);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
482
list_add_tail(&rxb->list, &rxq->rx_free);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
483
rxq->free_count++;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
485
spin_unlock_bh(&rxq->lock);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
615
struct iwl_rxq *rxq)
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
621
lockdep_assert_held(&rxq->lock);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
641
list_move(&rxb->list, &rxq->rx_free);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
645
rxq->used_count -= RX_CLAIM_REQ_ALLOC;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
646
rxq->free_count += RX_CLAIM_REQ_ALLOC;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
680
struct iwl_rxq *rxq)
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
684
if (rxq->bd)
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
686
free_size * rxq->queue_size,
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
687
rxq->bd, rxq->bd_dma);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
688
rxq->bd_dma = 0;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
689
rxq->bd = NULL;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
691
rxq->rb_stts_dma = 0;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
692
rxq->rb_stts = NULL;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
694
if (rxq->used_bd)
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
697
rxq->queue_size,
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
698
rxq->used_bd, rxq->used_bd_dma);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
699
rxq->used_bd_dma = 0;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
700
rxq->used_bd = NULL;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
715
struct iwl_rxq *rxq)
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
723
spin_lock_init(&rxq->lock);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
725
rxq->queue_size = iwl_trans_get_num_rbds(trans);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
727
rxq->queue_size = RX_QUEUE_SIZE;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
735
rxq->bd = dma_alloc_coherent(dev, free_size * rxq->queue_size,
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
736
&rxq->bd_dma, GFP_KERNEL);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
737
if (!rxq->bd)
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
741
rxq->used_bd = dma_alloc_coherent(dev,
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
743
rxq->queue_size,
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
744
&rxq->used_bd_dma,
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
746
if (!rxq->used_bd)
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
750
rxq->rb_stts = (u8 *)trans_pcie->base_rb_stts + rxq->id * rb_stts_size;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
751
rxq->rb_stts_dma =
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
752
trans_pcie->base_rb_stts_dma + rxq->id * rb_stts_size;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
758
struct iwl_rxq *rxq = &trans_pcie->rxq[i];
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
760
iwl_pcie_free_rxq_dma(trans, rxq);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
773
if (WARN_ON(trans_pcie->rxq))
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
776
trans_pcie->rxq = kzalloc_objs(struct iwl_rxq, trans->info.num_rxqs);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
782
if (!trans_pcie->rxq || !trans_pcie->rx_pool ||
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
805
struct iwl_rxq *rxq = &trans_pcie->rxq[i];
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
807
rxq->id = i;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
808
ret = iwl_pcie_alloc_rxq_dma(trans, rxq);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
827
kfree(trans_pcie->rxq);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
828
trans_pcie->rxq = NULL;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
833
static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
868
(u32)(rxq->bd_dma >> 8));
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
872
rxq->rb_stts_dma >> 4);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
936
trans_pcie->rxq[i].bd_dma);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
940
trans_pcie->rxq[i].used_bd_dma);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
944
trans_pcie->rxq[i].rb_stts_dma);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
988
void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
990
lockdep_assert_held(&rxq->lock);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
992
INIT_LIST_HEAD(&rxq->rx_free);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
993
INIT_LIST_HEAD(&rxq->rx_used);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
994
rxq->free_count = 0;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
995
rxq->used_count = 0;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
2523
if (queue >= trans->info.num_rxqs || !trans_pcie->rxq)
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
2526
data->fr_bd_cb = trans_pcie->rxq[queue].bd_dma;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
2527
data->urbd_stts_wrptr = trans_pcie->rxq[queue].rb_stts_dma;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
2528
data->ur_bd_cb = trans_pcie->rxq[queue].used_bd_dma;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
2838
if (!trans_pcie->rxq)
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
2846
struct iwl_rxq *rxq = &trans_pcie->rxq[i];
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
2848
spin_lock_bh(&rxq->lock);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
2853
rxq->read);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
2855
rxq->write);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
2857
rxq->write_actual);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
2859
rxq->need_update);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
2861
rxq->free_count);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
2862
if (rxq->rb_stts) {
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
2863
u32 r = iwl_get_closed_rb_stts(trans, rxq);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
2870
spin_unlock_bh(&rxq->lock);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
3282
struct iwl_rxq *rxq = &trans_pcie->rxq[0];
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
3285
spin_lock_bh(&rxq->lock);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
3287
r = iwl_get_closed_rb_stts(trans, rxq);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
3289
for (i = rxq->read, j = 0;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
3292
struct iwl_rx_mem_buffer *rxb = rxq->queue[i];
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
3309
spin_unlock_bh(&rxq->lock);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
3580
struct iwl_rxq *rxq = &trans_pcie->rxq[0];
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
3582
spin_lock_bh(&rxq->lock);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
3583
num_rbs = iwl_get_closed_rb_stts(trans, rxq);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
3584
num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
3585
spin_unlock_bh(&rxq->lock);
drivers/net/wireless/marvell/mwl8k.c
1168
struct mwl8k_rx_queue *rxq = priv->rxq + index;
drivers/net/wireless/marvell/mwl8k.c
1172
rxq->rxd_count = 0;
drivers/net/wireless/marvell/mwl8k.c
1173
rxq->head = 0;
drivers/net/wireless/marvell/mwl8k.c
1174
rxq->tail = 0;
drivers/net/wireless/marvell/mwl8k.c
1178
rxq->rxd = dma_alloc_coherent(&priv->pdev->dev, size, &rxq->rxd_dma,
drivers/net/wireless/marvell/mwl8k.c
1180
if (rxq->rxd == NULL) {
drivers/net/wireless/marvell/mwl8k.c
1185
rxq->buf = kzalloc_objs(*rxq->buf, MWL8K_RX_DESCS);
drivers/net/wireless/marvell/mwl8k.c
1186
if (rxq->buf == NULL) {
drivers/net/wireless/marvell/mwl8k.c
1187
dma_free_coherent(&priv->pdev->dev, size, rxq->rxd,
drivers/net/wireless/marvell/mwl8k.c
1188
rxq->rxd_dma);
drivers/net/wireless/marvell/mwl8k.c
1199
rxd = rxq->rxd + (i * priv->rxd_ops->rxd_size);
drivers/net/wireless/marvell/mwl8k.c
1204
next_dma_addr = rxq->rxd_dma + (nexti * desc_size);
drivers/net/wireless/marvell/mwl8k.c
1215
struct mwl8k_rx_queue *rxq = priv->rxq + index;
drivers/net/wireless/marvell/mwl8k.c
1218
while (rxq->rxd_count < MWL8K_RX_DESCS && limit--) {
drivers/net/wireless/marvell/mwl8k.c
1235
rxq->rxd_count++;
drivers/net/wireless/marvell/mwl8k.c
1236
rx = rxq->tail++;
drivers/net/wireless/marvell/mwl8k.c
1237
if (rxq->tail == MWL8K_RX_DESCS)
drivers/net/wireless/marvell/mwl8k.c
1238
rxq->tail = 0;
drivers/net/wireless/marvell/mwl8k.c
1239
rxq->buf[rx].skb = skb;
drivers/net/wireless/marvell/mwl8k.c
1240
dma_unmap_addr_set(&rxq->buf[rx], dma, addr);
drivers/net/wireless/marvell/mwl8k.c
1242
rxd = rxq->rxd + (rx * priv->rxd_ops->rxd_size);
drivers/net/wireless/marvell/mwl8k.c
1255
struct mwl8k_rx_queue *rxq = priv->rxq + index;
drivers/net/wireless/marvell/mwl8k.c
1258
if (rxq->rxd == NULL)
drivers/net/wireless/marvell/mwl8k.c
1262
if (rxq->buf[i].skb != NULL) {
drivers/net/wireless/marvell/mwl8k.c
1264
dma_unmap_addr(&rxq->buf[i], dma),
drivers/net/wireless/marvell/mwl8k.c
1266
dma_unmap_addr_set(&rxq->buf[i], dma, 0);
drivers/net/wireless/marvell/mwl8k.c
1268
kfree_skb(rxq->buf[i].skb);
drivers/net/wireless/marvell/mwl8k.c
1269
rxq->buf[i].skb = NULL;
drivers/net/wireless/marvell/mwl8k.c
1273
kfree(rxq->buf);
drivers/net/wireless/marvell/mwl8k.c
1274
rxq->buf = NULL;
drivers/net/wireless/marvell/mwl8k.c
1277
MWL8K_RX_DESCS * priv->rxd_ops->rxd_size, rxq->rxd,
drivers/net/wireless/marvell/mwl8k.c
1278
rxq->rxd_dma);
drivers/net/wireless/marvell/mwl8k.c
1279
rxq->rxd = NULL;
drivers/net/wireless/marvell/mwl8k.c
1332
struct mwl8k_rx_queue *rxq = priv->rxq + index;
drivers/net/wireless/marvell/mwl8k.c
1336
while (rxq->rxd_count && limit--) {
drivers/net/wireless/marvell/mwl8k.c
1344
skb = rxq->buf[rxq->head].skb;
drivers/net/wireless/marvell/mwl8k.c
1348
rxd = rxq->rxd + (rxq->head * priv->rxd_ops->rxd_size);
drivers/net/wireless/marvell/mwl8k.c
1355
rxq->buf[rxq->head].skb = NULL;
drivers/net/wireless/marvell/mwl8k.c
1358
dma_unmap_addr(&rxq->buf[rxq->head], dma),
drivers/net/wireless/marvell/mwl8k.c
1360
dma_unmap_addr_set(&rxq->buf[rxq->head], dma, 0);
drivers/net/wireless/marvell/mwl8k.c
1362
rxq->head++;
drivers/net/wireless/marvell/mwl8k.c
1363
if (rxq->head == MWL8K_RX_DESCS)
drivers/net/wireless/marvell/mwl8k.c
1364
rxq->head = 0;
drivers/net/wireless/marvell/mwl8k.c
1366
rxq->rxd_count--;
drivers/net/wireless/marvell/mwl8k.c
247
struct mwl8k_rx_queue rxq[MWL8K_RX_QUEUES];
drivers/net/wireless/marvell/mwl8k.c
2484
cmd->rx_queue_ptr = cpu_to_le32(priv->rxq[0].rxd_dma);
drivers/net/wireless/marvell/mwl8k.c
2581
iowrite32(priv->rxq[0].rxd_dma, priv->sram + off);
drivers/net/wireless/marvell/mwl8k.c
2584
iowrite32(priv->rxq[0].rxd_dma, priv->sram + off);
drivers/net/wireless/marvell/mwl8k.c
2650
cmd->rx_queue_ptr = cpu_to_le32(priv->rxq[0].rxd_dma);
drivers/net/wwan/t7xx/t7xx_hif_cldma.c
1009
t7xx_cldma_set_recv_skb(&md_ctrl->rxq[CLDMA_Q_IDX_DUMP],
drivers/net/wwan/t7xx/t7xx_hif_cldma.c
1053
t7xx_cldma_rxq_init(&md_ctrl->rxq[j]);
drivers/net/wwan/t7xx/t7xx_hif_cldma.c
1118
md_ctrl->rxq[qno].recv_skb = t7xx_cldma_default_recv_skb;
drivers/net/wwan/t7xx/t7xx_hif_cldma.c
1139
t7xx_cldma_hw_set_start_addr(hw_info, qno_t, md_ctrl->rxq[qno_t].tr_done->gpd_addr,
drivers/net/wwan/t7xx/t7xx_hif_cldma.c
1278
if (md_ctrl->rxq[i].worker) {
drivers/net/wwan/t7xx/t7xx_hif_cldma.c
1279
destroy_workqueue(md_ctrl->rxq[i].worker);
drivers/net/wwan/t7xx/t7xx_hif_cldma.c
1280
md_ctrl->rxq[i].worker = NULL;
drivers/net/wwan/t7xx/t7xx_hif_cldma.c
1325
md_cd_queue_struct_init(&md_ctrl->rxq[i], md_ctrl, MTK_RX, i);
drivers/net/wwan/t7xx/t7xx_hif_cldma.c
1326
INIT_WORK(&md_ctrl->rxq[i].cldma_work, t7xx_cldma_rx_done);
drivers/net/wwan/t7xx/t7xx_hif_cldma.c
1328
md_ctrl->rxq[i].worker =
drivers/net/wwan/t7xx/t7xx_hif_cldma.c
1332
if (!md_ctrl->rxq[i].worker)
drivers/net/wwan/t7xx/t7xx_hif_cldma.c
601
queue_work(md_ctrl->rxq[i].worker, &md_ctrl->rxq[i].cldma_work);
drivers/net/wwan/t7xx/t7xx_hif_cldma.c
655
flush_work(&md_ctrl->rxq[i].cldma_work);
drivers/net/wwan/t7xx/t7xx_hif_cldma.c
704
cancel_work_sync(&md_ctrl->rxq[i].cldma_work);
drivers/net/wwan/t7xx/t7xx_hif_cldma.c
707
md_cd_queue_struct_reset(&md_ctrl->rxq[i], md_ctrl, MTK_RX, i);
drivers/net/wwan/t7xx/t7xx_hif_cldma.c
740
if (md_ctrl->rxq[i].tr_done)
drivers/net/wwan/t7xx/t7xx_hif_cldma.c
742
md_ctrl->rxq[i].tr_done->gpd_addr,
drivers/net/wwan/t7xx/t7xx_hif_cldma.c
778
struct cldma_queue *rxq = &md_ctrl->rxq[qnum];
drivers/net/wwan/t7xx/t7xx_hif_cldma.c
784
spin_lock_irqsave(&rxq->ring_lock, flags);
drivers/net/wwan/t7xx/t7xx_hif_cldma.c
785
t7xx_cldma_q_reset(rxq);
drivers/net/wwan/t7xx/t7xx_hif_cldma.c
786
list_for_each_entry(req, &rxq->tr_ring->gpd_ring, entry) {
drivers/net/wwan/t7xx/t7xx_hif_cldma.c
797
list_for_each_entry(req, &rxq->tr_ring->gpd_ring, entry) {
drivers/net/wwan/t7xx/t7xx_hif_cldma.c
801
ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, rxq->tr_ring->pkt_size, GFP_ATOMIC);
drivers/net/wwan/t7xx/t7xx_hif_cldma.c
807
spin_unlock_irqrestore(&rxq->ring_lock, flags);
drivers/net/wwan/t7xx/t7xx_hif_cldma.c
998
t7xx_cldma_set_recv_skb(&md_ctrl->rxq[qno], t7xx_port_proxy_recv_skb);
drivers/net/wwan/t7xx/t7xx_hif_cldma.h
102
struct cldma_queue rxq[CLDMA_RXQ_NUM];
drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c
227
rx_q = &dpmaif_ctrl->rxq[rx_idx];
drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c
267
rx_q = &dpmaif_ctrl->rxq[i];
drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c
294
rx_q = &dpmaif_ctrl->rxq[i];
drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c
303
struct dpmaif_rx_queue *rxq;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c
314
rxq = &dpmaif_ctrl->rxq[i];
drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c
315
rxq->que_started = true;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c
316
rxq->index = i;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c
317
rxq->budget = rxq->bat_req->bat_size_cnt - 1;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c
319
hw_init_para.pkt_bat_base_addr[i] = rxq->bat_req->bat_bus_addr;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c
320
hw_init_para.pkt_bat_size_cnt[i] = rxq->bat_req->bat_size_cnt;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c
321
hw_init_para.pit_base_addr[i] = rxq->pit_bus_addr;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c
322
hw_init_para.pit_size_cnt[i] = rxq->pit_size_cnt;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c
323
hw_init_para.frg_bat_base_addr[i] = rxq->bat_frag->bat_bus_addr;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c
324
hw_init_para.frg_bat_size_cnt[i] = rxq->bat_frag->bat_size_cnt;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c
356
ret = t7xx_dpmaif_dl_snd_hw_bat_cnt(hw_info, rxq->bat_req->bat_size_cnt - 1);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c
360
ret = t7xx_dpmaif_dl_snd_hw_frg_cnt(hw_info, rxq->bat_frag->bat_size_cnt - 1);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c
372
t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_frag);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c
375
t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_req);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c
432
struct dpmaif_rx_queue *rxq;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c
442
rxq = &dpmaif_ctrl->rxq[que_cnt];
drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c
443
rxq->que_started = true;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h
178
struct dpmaif_rx_queue rxq[DPMAIF_RXQ_NUM];
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
1011
static int t7xx_dpmaif_rx_alloc(struct dpmaif_rx_queue *rxq)
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
1013
rxq->pit_size_cnt = DPMAIF_PIT_COUNT;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
1014
rxq->pit_rd_idx = 0;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
1015
rxq->pit_wr_idx = 0;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
1016
rxq->pit_release_rd_idx = 0;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
1017
rxq->expect_pit_seq = 0;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
1018
rxq->pit_remain_release_cnt = 0;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
1019
memset(&rxq->rx_data_info, 0, sizeof(rxq->rx_data_info));
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
1021
rxq->pit_base = dma_alloc_coherent(rxq->dpmaif_ctrl->dev,
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
1022
rxq->pit_size_cnt * sizeof(struct dpmaif_pit),
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
1023
&rxq->pit_bus_addr, GFP_KERNEL | __GFP_ZERO);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
1024
if (!rxq->pit_base)
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
1027
rxq->bat_req = &rxq->dpmaif_ctrl->bat_req;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
1028
atomic_inc(&rxq->bat_req->refcnt);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
1030
rxq->bat_frag = &rxq->dpmaif_ctrl->bat_frag;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
1031
atomic_inc(&rxq->bat_frag->refcnt);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
1035
static void t7xx_dpmaif_rx_buf_free(const struct dpmaif_rx_queue *rxq)
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
1037
if (!rxq->dpmaif_ctrl)
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
1040
t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_req);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
1041
t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_frag);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
1043
if (rxq->pit_base)
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
1044
dma_free_coherent(rxq->dpmaif_ctrl->dev,
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
1045
rxq->pit_size_cnt * sizeof(struct dpmaif_pit),
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
1046
rxq->pit_base, rxq->pit_bus_addr);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
1068
struct dpmaif_rx_queue *rxq;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
1078
rxq = &dpmaif_ctrl->rxq[DPF_RX_QNO_DFT];
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
1080
t7xx_dpmaif_bat_release_and_add(rxq);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
1081
t7xx_dpmaif_frag_bat_release_and_add(rxq);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
1121
struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[i];
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
1124
timeout = readx_poll_timeout_atomic(atomic_read, &rxq->rx_processing, value,
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
1131
rxq->que_started = false;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
1135
static void t7xx_dpmaif_stop_rxq(struct dpmaif_rx_queue *rxq)
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
1139
rxq->que_started = false;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
1142
cnt = t7xx_ring_buf_rd_wr_count(rxq->pit_size_cnt, rxq->pit_rd_idx,
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
1143
rxq->pit_wr_idx, DPMAIF_READ);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
1146
dev_err(rxq->dpmaif_ctrl->dev, "Stop RX SW failed, %d\n", cnt);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
1151
memset(rxq->pit_base, 0, rxq->pit_size_cnt * sizeof(struct dpmaif_pit));
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
1152
memset(rxq->bat_req->bat_base, 0, rxq->bat_req->bat_size_cnt * sizeof(struct dpmaif_bat));
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
1153
bitmap_zero(rxq->bat_req->bat_bitmap, rxq->bat_req->bat_size_cnt);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
1154
memset(&rxq->rx_data_info, 0, sizeof(rxq->rx_data_info));
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
1156
rxq->pit_rd_idx = 0;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
1157
rxq->pit_wr_idx = 0;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
1158
rxq->pit_release_rd_idx = 0;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
1159
rxq->expect_pit_seq = 0;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
1160
rxq->pit_remain_release_cnt = 0;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
1161
rxq->bat_req->bat_release_rd_idx = 0;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
1162
rxq->bat_req->bat_wr_idx = 0;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
1163
rxq->bat_frag->bat_release_rd_idx = 0;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
1164
rxq->bat_frag->bat_wr_idx = 0;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
1172
t7xx_dpmaif_stop_rxq(&dpmaif_ctrl->rxq[i]);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
236
static int t7xx_dpmaifq_release_pit_entry(struct dpmaif_rx_queue *rxq,
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
239
struct dpmaif_hw_info *hw_info = &rxq->dpmaif_ctrl->hw_info;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
243
if (!rxq->que_started)
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
246
if (rel_entry_num >= rxq->pit_size_cnt) {
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
247
dev_err(rxq->dpmaif_ctrl->dev, "Invalid PIT release index\n");
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
251
old_rel_idx = rxq->pit_release_rd_idx;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
253
hw_wr_idx = rxq->pit_wr_idx;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
254
if (hw_wr_idx < old_rel_idx && new_rel_idx >= rxq->pit_size_cnt)
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
255
new_rel_idx -= rxq->pit_size_cnt;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
257
ret = t7xx_dpmaif_dlq_add_pit_remain_cnt(hw_info, rxq->index, rel_entry_num);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
259
dev_err(rxq->dpmaif_ctrl->dev, "PIT release failure: %d\n", ret);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
263
rxq->pit_release_rd_idx = new_rel_idx;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
276
static int t7xx_frag_bat_cur_bid_check(struct dpmaif_rx_queue *rxq,
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
279
struct dpmaif_bat_request *bat_frag = rxq->bat_frag;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
393
static int t7xx_dpmaif_set_frag_to_skb(const struct dpmaif_rx_queue *rxq,
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
399
struct device *dev = rxq->dpmaif_ctrl->dev;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
404
page_info = rxq->bat_frag->bat_skb;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
430
static int t7xx_dpmaif_get_frag(struct dpmaif_rx_queue *rxq,
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
437
ret = t7xx_frag_bat_cur_bid_check(rxq, cur_bid);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
441
ret = t7xx_dpmaif_set_frag_to_skb(rxq, pkt_info, skb_info->cur_skb);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
443
dev_err(rxq->dpmaif_ctrl->dev, "Failed to set frag data to skb: %d\n", ret);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
447
t7xx_dpmaif_set_bat_mask(rxq->bat_frag, cur_bid);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
451
static int t7xx_bat_cur_bid_check(struct dpmaif_rx_queue *rxq, const unsigned int cur_bid)
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
453
struct dpmaif_bat_skb *bat_skb = rxq->bat_req->bat_skb;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
467
static int t7xx_dpmaif_check_pit_seq(struct dpmaif_rx_queue *rxq,
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
470
unsigned int cur_pit_seq, expect_pit_seq = rxq->expect_pit_seq;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
477
rxq->expect_pit_seq++;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
478
if (rxq->expect_pit_seq >= DPMAIF_DL_PIT_SEQ_VALUE)
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
479
rxq->expect_pit_seq = 0;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
505
static int t7xx_dpmaif_release_bat_entry(const struct dpmaif_rx_queue *rxq,
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
509
struct dpmaif_hw_info *hw_info = &rxq->dpmaif_ctrl->hw_info;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
514
if (!rxq->que_started || !rel_entry_num)
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
518
bat = rxq->bat_frag;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
519
hw_rd_idx = t7xx_dpmaif_dl_get_frg_rd_idx(hw_info, rxq->index);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
521
bat = rxq->bat_req;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
522
hw_rd_idx = t7xx_dpmaif_dl_get_bat_rd_idx(hw_info, rxq->index);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
561
static int t7xx_dpmaif_pit_release_and_add(struct dpmaif_rx_queue *rxq)
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
565
if (rxq->pit_remain_release_cnt < DPMAIF_PIT_CNT_THRESHOLD)
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
568
ret = t7xx_dpmaifq_release_pit_entry(rxq, rxq->pit_remain_release_cnt);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
572
rxq->pit_remain_release_cnt = 0;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
576
static int t7xx_dpmaif_bat_release_and_add(const struct dpmaif_rx_queue *rxq)
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
581
bid_cnt = t7xx_dpmaif_avail_pkt_bat_cnt(rxq->bat_req);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
585
ret = t7xx_dpmaif_release_bat_entry(rxq, bid_cnt, BAT_TYPE_NORMAL);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
587
dev_err(rxq->dpmaif_ctrl->dev, "Release PKT BAT failed: %d\n", ret);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
591
ret = t7xx_dpmaif_rx_buf_alloc(rxq->dpmaif_ctrl, rxq->bat_req, rxq->index, bid_cnt, false);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
593
dev_err(rxq->dpmaif_ctrl->dev, "Allocate new RX buffer failed: %d\n", ret);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
598
static int t7xx_dpmaif_frag_bat_release_and_add(const struct dpmaif_rx_queue *rxq)
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
603
bid_cnt = t7xx_dpmaif_avail_pkt_bat_cnt(rxq->bat_frag);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
607
ret = t7xx_dpmaif_release_bat_entry(rxq, bid_cnt, BAT_TYPE_FRAG);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
609
dev_err(rxq->dpmaif_ctrl->dev, "Release BAT entry failed: %d\n", ret);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
613
return t7xx_dpmaif_rx_frag_alloc(rxq->dpmaif_ctrl, rxq->bat_frag, bid_cnt, false);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
616
static void t7xx_dpmaif_parse_msg_pit(const struct dpmaif_rx_queue *rxq,
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
628
static int t7xx_dpmaif_set_data_to_skb(const struct dpmaif_rx_queue *rxq,
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
633
struct device *dev = rxq->dpmaif_ctrl->dev;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
639
bat_skb = rxq->bat_req->bat_skb;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
664
static int t7xx_dpmaif_get_rx_pkt(struct dpmaif_rx_queue *rxq,
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
671
ret = t7xx_bat_cur_bid_check(rxq, cur_bid);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
675
ret = t7xx_dpmaif_set_data_to_skb(rxq, pkt_info, skb_info);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
677
dev_err(rxq->dpmaif_ctrl->dev, "RX set data to skb failed: %d\n", ret);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
681
t7xx_dpmaif_set_bat_mask(rxq->bat_req, cur_bid);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
685
static int t7xx_dpmaifq_rx_notify_hw(struct dpmaif_rx_queue *rxq)
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
687
struct dpmaif_ctrl *dpmaif_ctrl = rxq->dpmaif_ctrl;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
692
ret = t7xx_dpmaif_pit_release_and_add(rxq);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
694
dev_err(dpmaif_ctrl->dev, "RXQ%u update PIT failed: %d\n", rxq->index, ret);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
699
static void t7xx_dpmaif_rx_skb(struct dpmaif_rx_queue *rxq,
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
702
struct dpmaif_ctrl *dpmaif_ctrl = rxq->dpmaif_ctrl;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
720
dpmaif_ctrl->callbacks->recv_skb(dpmaif_ctrl->t7xx_dev->ccmni_ctlb, skb, &rxq->napi);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
723
static int t7xx_dpmaif_rx_start(struct dpmaif_rx_queue *rxq, const unsigned int pit_cnt,
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
727
struct device *dev = rxq->dpmaif_ctrl->dev;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
731
pit_len = rxq->pit_size_cnt;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
732
skb_info = &rxq->rx_data_info;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
733
cur_pit = rxq->pit_rd_idx;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
742
pkt_info = (struct dpmaif_pit *)rxq->pit_base + cur_pit;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
743
if (t7xx_dpmaif_check_pit_seq(rxq, pkt_info)) {
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
744
dev_err_ratelimited(dev, "RXQ%u checks PIT SEQ fail\n", rxq->index);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
752
dev_err(dev, "RXQ%u received repeated PIT\n", rxq->index);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
755
t7xx_dpmaif_parse_msg_pit(rxq, pkt_info, skb_info);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
759
ret = t7xx_dpmaif_get_rx_pkt(rxq, pkt_info, skb_info);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
763
ret = t7xx_dpmaif_get_frag(rxq, pkt_info, skb_info);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
767
dev_err_ratelimited(dev, "RXQ%u error payload\n", rxq->index);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
773
t7xx_dpmaif_rx_skb(rxq, skb_info);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
785
rxq->pit_rd_idx = cur_pit;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
786
rxq->pit_remain_release_cnt++;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
789
ret = t7xx_dpmaifq_rx_notify_hw(rxq);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
796
ret = t7xx_dpmaifq_rx_notify_hw(rxq);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
804
static unsigned int t7xx_dpmaifq_poll_pit(struct dpmaif_rx_queue *rxq)
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
808
if (!rxq->que_started)
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
811
hw_wr_idx = t7xx_dpmaif_dl_dlq_pit_get_wr_idx(&rxq->dpmaif_ctrl->hw_info, rxq->index);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
812
pit_cnt = t7xx_ring_buf_rd_wr_count(rxq->pit_size_cnt, rxq->pit_rd_idx, hw_wr_idx,
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
814
rxq->pit_wr_idx = hw_wr_idx;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
822
struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[q_num];
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
826
cnt = t7xx_dpmaifq_poll_pit(rxq);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
830
ret = t7xx_dpmaif_rx_start(rxq, cnt, budget, once_more);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
832
dev_err(dpmaif_ctrl->dev, "dlq%u rx ERR:%d\n", rxq->index, ret);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
839
struct dpmaif_rx_queue *rxq = container_of(napi, struct dpmaif_rx_queue, napi);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
84
struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[q_num];
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
840
struct t7xx_pci_dev *t7xx_dev = rxq->dpmaif_ctrl->t7xx_dev;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
843
atomic_set(&rxq->rx_processing, 1);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
847
if (!rxq->que_started) {
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
848
atomic_set(&rxq->rx_processing, 0);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
849
pm_runtime_put_autosuspend(rxq->dpmaif_ctrl->dev);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
85
struct dpmaif_bat_request *bat_req = rxq->bat_req;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
850
dev_err(rxq->dpmaif_ctrl->dev, "Work RXQ: %d has not been started\n", rxq->index);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
854
if (!rxq->sleep_lock_pending)
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
860
rxq->sleep_lock_pending = true;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
865
rxq->sleep_lock_pending = false;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
868
int rx_cnt = t7xx_dpmaif_napi_rx_data_collect(rxq->dpmaif_ctrl, rxq->index,
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
879
t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
88
if (!rxq->que_started) {
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
882
t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
883
t7xx_dpmaif_dlq_unmask_rx_done(&rxq->dpmaif_ctrl->hw_info, rxq->index);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
884
t7xx_pci_enable_sleep(rxq->dpmaif_ctrl->t7xx_dev);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
885
pm_runtime_put_autosuspend(rxq->dpmaif_ctrl->dev);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
886
atomic_set(&rxq->rx_processing, 0);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
888
t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
89
dev_err(dpmaif_ctrl->dev, "RX queue %d has not been started\n", rxq->index);
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
896
struct dpmaif_rx_queue *rxq;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
906
rxq = &dpmaif_ctrl->rxq[qno];
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
907
ctrl = rxq->dpmaif_ctrl;
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
919
napi_schedule(&rxq->napi);
drivers/net/wwan/t7xx/t7xx_netdev.c
271
ctlb->napi[i] = &ctlb->hif_ctrl->rxq[i].napi;
drivers/net/wwan/wwan_core.c
1061
spin_lock_irqsave(&port->rxq.lock, flags);
drivers/net/wwan/wwan_core.c
1062
skb_queue_walk(&port->rxq, skb)
drivers/net/wwan/wwan_core.c
1064
spin_unlock_irqrestore(&port->rxq.lock, flags);
drivers/net/wwan/wwan_core.c
653
skb_queue_head_init(&port->rxq);
drivers/net/wwan/wwan_core.c
694
skb_queue_purge(&port->rxq);
drivers/net/wwan/wwan_core.c
716
skb_queue_tail(&port->rxq, skb);
drivers/net/wwan/wwan_core.c
771
skb_queue_purge(&port->rxq);
drivers/net/wwan/wwan_core.c
800
return skb_queue_empty(&port->rxq) && port->ops;
drivers/net/wwan/wwan_core.c
877
skb = skb_dequeue(&port->rxq);
drivers/net/wwan/wwan_core.c
890
skb_queue_head(&port->rxq, skb);
drivers/net/wwan/wwan_core.c
92
struct sk_buff_head rxq;
drivers/net/xen-netfront.c
1228
struct sk_buff_head *rxq)
drivers/net/xen-netfront.c
1234
while ((skb = __skb_dequeue(rxq)) != NULL) {
drivers/net/xen-netfront.c
1273
struct sk_buff_head rxq;
drivers/net/xen-netfront.c
1281
skb_queue_head_init(&rxq);
drivers/net/xen-netfront.c
1349
__skb_queue_tail(&rxq, skb);
drivers/net/xen-netfront.c
1360
work_done -= handle_incoming_queue(queue, &rxq);
drivers/target/iscsi/cxgbit/cxgbit.h
196
struct sk_buff_head rxq;
drivers/target/iscsi/cxgbit/cxgbit_cm.c
1356
skb_queue_head_init(&csk->rxq);
drivers/target/iscsi/cxgbit/cxgbit_cm.c
1665
spin_lock_bh(&csk->rxq.lock);
drivers/target/iscsi/cxgbit/cxgbit_cm.c
1666
__skb_queue_tail(&csk->rxq, skb);
drivers/target/iscsi/cxgbit/cxgbit_cm.c
1667
spin_unlock_bh(&csk->rxq.lock);
drivers/target/iscsi/cxgbit/cxgbit_cm.c
782
__skb_queue_purge(&csk->rxq);
drivers/target/iscsi/cxgbit/cxgbit_main.c
108
spin_lock_bh(&csk->rxq.lock);
drivers/target/iscsi/cxgbit/cxgbit_main.c
109
__skb_queue_tail(&csk->rxq, skb);
drivers/target/iscsi/cxgbit/cxgbit_main.c
110
if (skb_queue_len(&csk->rxq) == 1)
drivers/target/iscsi/cxgbit/cxgbit_main.c
112
spin_unlock_bh(&csk->rxq.lock);
drivers/target/iscsi/cxgbit/cxgbit_main.c
344
spin_lock(&csk->rxq.lock);
drivers/target/iscsi/cxgbit/cxgbit_main.c
345
__skb_queue_tail(&csk->rxq, skb);
drivers/target/iscsi/cxgbit/cxgbit_main.c
346
if (skb_queue_len(&csk->rxq) == 1)
drivers/target/iscsi/cxgbit/cxgbit_main.c
348
spin_unlock(&csk->rxq.lock);
drivers/target/iscsi/cxgbit/cxgbit_main.c
573
spin_lock(&csk->rxq.lock);
drivers/target/iscsi/cxgbit/cxgbit_main.c
574
__skb_queue_tail(&csk->rxq, skb);
drivers/target/iscsi/cxgbit/cxgbit_main.c
575
if (skb_queue_len(&csk->rxq) == 1)
drivers/target/iscsi/cxgbit/cxgbit_main.c
577
spin_unlock(&csk->rxq.lock);
drivers/target/iscsi/cxgbit/cxgbit_target.c
1594
static bool cxgbit_rxq_len(struct cxgbit_sock *csk, struct sk_buff_head *rxq)
drivers/target/iscsi/cxgbit/cxgbit_target.c
1596
spin_lock_bh(&csk->rxq.lock);
drivers/target/iscsi/cxgbit/cxgbit_target.c
1597
if (skb_queue_len(&csk->rxq)) {
drivers/target/iscsi/cxgbit/cxgbit_target.c
1598
skb_queue_splice_init(&csk->rxq, rxq);
drivers/target/iscsi/cxgbit/cxgbit_target.c
1599
spin_unlock_bh(&csk->rxq.lock);
drivers/target/iscsi/cxgbit/cxgbit_target.c
1602
spin_unlock_bh(&csk->rxq.lock);
drivers/target/iscsi/cxgbit/cxgbit_target.c
1609
struct sk_buff_head rxq;
drivers/target/iscsi/cxgbit/cxgbit_target.c
1611
skb_queue_head_init(&rxq);
drivers/target/iscsi/cxgbit/cxgbit_target.c
1613
wait_event_interruptible(csk->waitq, cxgbit_rxq_len(csk, &rxq));
drivers/target/iscsi/cxgbit/cxgbit_target.c
1618
while ((skb = __skb_dequeue(&rxq))) {
drivers/target/iscsi/cxgbit/cxgbit_target.c
1625
__skb_queue_purge(&rxq);
drivers/vdpa/vdpa_sim/vdpa_sim_net.c
199
struct vdpasim_virtqueue *rxq = &vdpasim->vqs[0];
drivers/vdpa/vdpa_sim/vdpa_sim_net.c
216
if (!txq->ready || !rxq->ready)
drivers/vdpa/vdpa_sim/vdpa_sim_net.c
240
err = vringh_getdesc_iotlb(&rxq->vring, NULL, &rxq->in_iov,
drivers/vdpa/vdpa_sim/vdpa_sim_net.c
241
&rxq->head, GFP_ATOMIC);
drivers/vdpa/vdpa_sim/vdpa_sim_net.c
248
write = vringh_iov_push_iotlb(&rxq->vring, &rxq->in_iov,
drivers/vdpa/vdpa_sim/vdpa_sim_net.c
259
vdpasim_net_complete(rxq, write);
drivers/vhost/net.c
1214
msg.msg_control = vhost_net_buf_consume(&nvq->rxq);
drivers/vhost/net.c
131
struct vhost_net_buf rxq;
drivers/vhost/net.c
1345
n->vqs[VHOST_NET_VQ_RX].rxq.queue = queue;
drivers/vhost/net.c
1370
vhost_net_buf_init(&n->vqs[i].rxq);
drivers/vhost/net.c
1448
kfree(n->vqs[VHOST_NET_VQ_RX].rxq.queue);
drivers/vhost/net.c
154
static void *vhost_net_buf_get_ptr(struct vhost_net_buf *rxq)
drivers/vhost/net.c
156
if (rxq->tail != rxq->head)
drivers/vhost/net.c
157
return rxq->queue[rxq->head];
drivers/vhost/net.c
162
static int vhost_net_buf_get_size(struct vhost_net_buf *rxq)
drivers/vhost/net.c
164
return rxq->tail - rxq->head;
drivers/vhost/net.c
167
static int vhost_net_buf_is_empty(struct vhost_net_buf *rxq)
drivers/vhost/net.c
169
return rxq->tail == rxq->head;
drivers/vhost/net.c
172
static void *vhost_net_buf_consume(struct vhost_net_buf *rxq)
drivers/vhost/net.c
174
void *ret = vhost_net_buf_get_ptr(rxq);
drivers/vhost/net.c
175
++rxq->head;
drivers/vhost/net.c
181
struct vhost_net_buf *rxq = &nvq->rxq;
drivers/vhost/net.c
183
rxq->head = 0;
drivers/vhost/net.c
184
rxq->tail = ptr_ring_consume_batched(nvq->rx_ring, rxq->queue,
drivers/vhost/net.c
186
return rxq->tail;
drivers/vhost/net.c
191
struct vhost_net_buf *rxq = &nvq->rxq;
drivers/vhost/net.c
193
if (nvq->rx_ring && !vhost_net_buf_is_empty(rxq)) {
drivers/vhost/net.c
194
ptr_ring_unconsume(nvq->rx_ring, rxq->queue + rxq->head,
drivers/vhost/net.c
195
vhost_net_buf_get_size(rxq),
drivers/vhost/net.c
197
rxq->head = rxq->tail = 0;
drivers/vhost/net.c
214
struct vhost_net_buf *rxq = &nvq->rxq;
drivers/vhost/net.c
216
if (!vhost_net_buf_is_empty(rxq))
drivers/vhost/net.c
223
return vhost_net_buf_peek_len(vhost_net_buf_get_ptr(rxq));
drivers/vhost/net.c
226
static void vhost_net_buf_init(struct vhost_net_buf *rxq)
drivers/vhost/net.c
228
rxq->head = rxq->tail = 0;
drivers/vhost/net.c
319
vhost_net_buf_init(&n->vqs[i].rxq);
include/linux/avf/virtchnl.h
422
struct virtchnl_rxq_info rxq;
include/linux/netdevice.h
4190
int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
include/linux/netdevice.h
4192
unsigned int txq, unsigned int rxq);
include/linux/usb/usbnet.h
54
struct sk_buff_head rxq;
include/net/libeth/xdp.h
116
static inline u32 libeth_xdpsq_num(u32 rxq, u32 txq, u32 max)
include/net/libeth/xdp.h
118
return min(max(nr_cpu_ids, rxq), max - txq);
include/net/libeth/xdp.h
1182
struct xdp_rxq_info *rxq)
include/net/libeth/xdp.h
1189
dst->base.rxq = rxq;
include/net/libeth/xdp.h
1286
xdp_init_buff(&xdp->base, fqe->truesize, xdp->base.rxq);
include/net/libeth/xdp.h
1711
container_of_const((xdp)->base.rxq, type, member)
include/net/mana/hw_channel.h
191
struct hwc_wq *rxq;
include/net/mana/mana.h
282
struct mana_rxq *rxq;
include/net/mana/mana.h
585
u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
include/net/netdev_queues.h
185
void netdev_queue_config(struct net_device *dev, int rxq,
include/net/netdev_rx_queue.h
46
__netif_get_rx_queue(struct net_device *dev, unsigned int rxq)
include/net/netdev_rx_queue.h
48
return dev->_rx + rxq;
include/net/netdev_rx_queue.h
61
int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq);
include/net/page_pool/memory_provider.h
18
struct netdev_rx_queue *rxq);
include/net/page_pool/memory_provider.h
19
void (*uninstall)(void *mp_priv, struct netdev_rx_queue *rxq);
include/net/xdp.h
145
xdp_init_buff(struct xdp_buff *xdp, u32 frame_sz, struct xdp_rxq_info *rxq)
include/net/xdp.h
147
xdp->rxq = rxq;
include/net/xdp.h
434
if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL)
include/net/xdp.h
443
xdp_frame->mem_type = xdp->rxq->mem.type;
include/net/xdp.h
699
if (act == XDP_TX && netif_is_bond_slave(xdp->rxq->dev))
include/net/xdp.h
91
struct xdp_rxq_info *rxq;
include/net/xdp_sock_drv.h
372
struct xdp_rxq_info *rxq)
include/net/xdp_sock_drv.h
81
struct xdp_rxq_info *rxq)
include/net/xdp_sock_drv.h
83
xp_set_rxq_info(pool, rxq);
include/net/xsk_buff_pool.h
136
void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq);
include/trace/events/xdp.h
334
const struct xdp_rxq_info *rxq),
include/trace/events/xdp.h
336
TP_ARGS(xa, rxq),
include/trace/events/xdp.h
343
__field(const struct xdp_rxq_info *, rxq)
include/trace/events/xdp.h
352
__entry->rxq = rxq;
include/trace/events/xdp.h
353
__entry->ifindex = rxq->dev->ifindex;
io_uring/zcrx.c
1066
struct netdev_rx_queue *rxq)
io_uring/zcrx.c
1071
type = rxq ? NETDEV_A_QUEUE_IO_URING : NETDEV_A_PAGE_POOL_IO_URING;
io_uring/zcrx.c
1080
static void io_pp_uninstall(void *mp_priv, struct netdev_rx_queue *rxq)
io_uring/zcrx.c
1082
struct pp_memory_provider_params *p = &rxq->mp_params;
kernel/bpf/cpumap.c
187
struct xdp_rxq_info rxq = {};
kernel/bpf/cpumap.c
191
xdp.rxq = &rxq;
kernel/bpf/cpumap.c
198
rxq.dev = xdpf->dev_rx;
kernel/bpf/cpumap.c
199
rxq.mem.type = xdpf->mem_type;
kernel/bpf/devmap.c
342
struct xdp_rxq_info rxq = { .dev = rx_dev };
kernel/bpf/devmap.c
353
xdp.rxq = &rxq;
net/bpf/test_run.c
102
struct xdp_rxq_info rxq;
net/bpf/test_run.c
1240
xdp->rxq = &rxqueue->xdp_rxq;
net/bpf/test_run.c
1263
dev_put(xdp->rxq->dev);
net/bpf/test_run.c
140
xdp_init_buff(new_ctx, TEST_XDP_FRAME_SIZE, &xdp->rxq);
net/bpf/test_run.c
145
frm->mem_type = new_ctx->rxq->mem.type;
net/bpf/test_run.c
187
xdp_rxq_info_reg(&xdp->rxq, orig_ctx->rxq->dev, 0, 0);
net/bpf/test_run.c
188
xdp->rxq.mem.type = MEM_TYPE_PAGE_POOL;
net/bpf/test_run.c
189
xdp->rxq.mem.id = pp->xdp_mem_id;
net/bpf/test_run.c
190
xdp->dev = orig_ctx->rxq->dev;
net/bpf/test_run.c
238
head->frame->mem_type = head->orig_ctx.rxq->mem.type;
net/core/dev.c
12318
struct netdev_rx_queue *rxq = &dev->_rx[i];
net/core/dev.c
12319
struct pp_memory_provider_params *p = &rxq->mp_params;
net/core/dev.c
12322
p->mp_ops->uninstall(rxq->mp_params.mp_priv, rxq);
net/core/dev.c
3242
int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
net/core/dev.c
3246
if (rxq < 1 || rxq > dev->num_rx_queues)
net/core/dev.c
3253
rxq);
net/core/dev.c
3258
dev->real_num_rx_queues = rxq;
net/core/dev.c
3273
unsigned int txq, unsigned int rxq)
net/core/dev.c
3279
rxq < 1 || rxq > dev->num_rx_queues)
net/core/dev.c
3285
if (rxq > dev->real_num_rx_queues) {
net/core/dev.c
3286
err = netif_set_real_num_rx_queues(dev, rxq);
net/core/dev.c
3295
if (rxq < dev->real_num_rx_queues)
net/core/dev.c
3296
WARN_ON(netif_set_real_num_rx_queues(dev, rxq));
net/core/dev.c
7275
struct netdev_rx_queue *rxq;
net/core/dev.c
7284
rxq = __netif_get_rx_queue(dev, queue_index);
net/core/dev.c
7285
rxq->napi = napi;
net/core/devmem.c
126
struct netdev_rx_queue *rxq;
net/core/devmem.c
140
xa_for_each(&binding->bound_rxqs, xa_idx, rxq) {
net/core/devmem.c
146
rxq_idx = get_netdev_rx_queue_index(rxq);
net/core/devmem.c
162
struct netdev_rx_queue *rxq;
net/core/devmem.c
170
rxq = __netif_get_rx_queue(dev, rxq_idx);
net/core/devmem.c
171
err = xa_alloc(&binding->bound_rxqs, &xa_idx, rxq, xa_limit_32b,
net/core/devmem.c
497
struct netdev_rx_queue *rxq)
net/core/devmem.c
500
int type = rxq ? NETDEV_A_QUEUE_DMABUF : NETDEV_A_PAGE_POOL_DMABUF;
net/core/devmem.c
506
struct netdev_rx_queue *rxq)
net/core/devmem.c
513
if (bound_rxq == rxq) {
net/core/filter.c
10349
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq),
net/core/filter.c
10351
offsetof(struct xdp_buff, rxq));
net/core/filter.c
10359
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq),
net/core/filter.c
10361
offsetof(struct xdp_buff, rxq));
net/core/filter.c
4159
struct xdp_rxq_info *rxq = xdp->rxq;
net/core/filter.c
4162
if (!rxq->frag_size || rxq->frag_size > xdp->frame_sz)
net/core/filter.c
4165
tailroom = rxq->frag_size - skb_frag_size(frag) -
net/core/filter.c
4166
skb_frag_off(frag) % rxq->frag_size;
net/core/filter.c
4174
if (rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL)
net/core/filter.c
4201
enum xdp_mem_type mem_type = xdp->rxq->mem.type;
net/core/filter.c
4397
master = netdev_master_upper_dev_get_rcu(xdp->rxq->dev);
net/core/filter.c
4399
if (slave && slave != xdp->rxq->dev) {
net/core/filter.c
6395
return bpf_ipv4_fib_lookup(dev_net(ctx->rxq->dev), params,
net/core/filter.c
6400
return bpf_ipv6_fib_lookup(dev_net(ctx->rxq->dev), params,
net/core/filter.c
6533
struct net_device *dev = xdp->rxq->dev;
net/core/filter.c
7184
struct net_device *dev = ctx->rxq->dev;
net/core/filter.c
7208
struct net_device *dev = ctx->rxq->dev;
net/core/filter.c
7232
struct net_device *dev = ctx->rxq->dev;
net/core/net-sysfs.c
2103
int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0;
net/core/net-sysfs.c
2117
rxq = real_rx;
net/core/net-sysfs.c
2128
net_rx_queue_update_kobjects(dev, rxq, 0);
net/core/netdev-genl.c
394
struct netdev_rx_queue *rxq;
net/core/netdev-genl.c
409
rxq = __netif_get_rx_queue(netdev, q_idx);
net/core/netdev-genl.c
410
if (nla_put_napi_id(rsp, rxq->napi))
net/core/netdev-genl.c
413
params = &rxq->mp_params;
net/core/netdev-genl.c
415
params->mp_ops->nl_fill(params->mp_priv, rsp, rxq))
net/core/netdev-genl.c
418
if (rxq->pool)
net/core/netdev_rx_queue.c
117
struct netdev_rx_queue *rxq;
net/core/netdev_rx_queue.c
146
rxq = __netif_get_rx_queue(dev, rxq_idx);
net/core/netdev_rx_queue.c
147
if (rxq->mp_params.mp_ops) {
net/core/netdev_rx_queue.c
152
if (rxq->pool) {
net/core/netdev_rx_queue.c
159
rxq->mp_params = *p;
net/core/netdev_rx_queue.c
16
struct netdev_rx_queue *rxq = __netif_get_rx_queue(dev, idx);
net/core/netdev_rx_queue.c
171
memset(&rxq->mp_params, 0, sizeof(rxq->mp_params));
net/core/netdev_rx_queue.c
18
return !!rxq->mp_params.mp_ops;
net/core/netdev_rx_queue.c
190
struct netdev_rx_queue *rxq;
net/core/netdev_rx_queue.c
196
rxq = __netif_get_rx_queue(dev, ifq_idx);
net/core/netdev_rx_queue.c
202
!rxq->mp_params.mp_ops)
net/core/netdev_rx_queue.c
205
if (WARN_ON_ONCE(rxq->mp_params.mp_ops != old_p->mp_ops ||
net/core/netdev_rx_queue.c
206
rxq->mp_params.mp_priv != old_p->mp_priv))
net/core/netdev_rx_queue.c
210
memset(&rxq->mp_params, 0, sizeof(rxq->mp_params));
net/core/netdev_rx_queue.c
27
struct netdev_rx_queue *rxq = __netif_get_rx_queue(dev, rxq_idx);
net/core/netdev_rx_queue.c
52
err = page_pool_check_memory_provider(dev, rxq);
net/core/page_pool.c
198
struct netdev_rx_queue *rxq;
net/core/page_pool.c
279
rxq = __netif_get_rx_queue(pool->slow.netdev,
net/core/page_pool.c
281
pool->mp_priv = rxq->mp_params.mp_priv;
net/core/page_pool.c
282
pool->mp_ops = rxq->mp_params.mp_ops;
net/core/page_pool_priv.h
44
struct netdev_rx_queue *rxq);
net/core/page_pool_priv.h
54
struct netdev_rx_queue *rxq)
net/core/page_pool_user.c
356
struct netdev_rx_queue *rxq)
net/core/page_pool_user.c
358
void *binding = rxq->mp_params.mp_priv;
net/core/page_pool_user.c
370
if (pool->slow.queue_idx == get_netdev_rx_queue_index(rxq)) {
net/core/xdp.c
545
__xdp_return(netmem, xdp->rxq->mem.type, true, NULL);
net/core/xdp.c
559
xdp->rxq->mem.type, true, xdp);
net/core/xdp.c
562
__xdp_return(virt_to_netmem(xdp->data), xdp->rxq->mem.type, true, xdp);
net/core/xdp.c
635
const struct xdp_rxq_info *rxq = xdp->rxq;
net/core/xdp.c
657
if (rxq->mem.type == MEM_TYPE_PAGE_POOL)
net/core/xdp.c
660
skb_record_rx_queue(skb, rxq->queue_index);
net/core/xdp.c
670
skb->protocol = eth_type_trans(skb, rxq->dev);
net/core/xdp.c
741
const struct xdp_rxq_info *rxq = xdp->rxq;
net/core/xdp.c
775
skb_record_rx_queue(skb, rxq->queue_index);
net/core/xdp.c
786
skb->protocol = eth_type_trans(skb, rxq->dev);
net/netfilter/nf_conntrack_bpf.c
298
nfct = __bpf_nf_ct_alloc_entry(dev_net(ctx->rxq->dev), bpf_tuple, tuple__sz,
net/netfilter/nf_conntrack_bpf.c
332
caller_net = dev_net(ctx->rxq->dev);
net/netfilter/nf_flow_table_bpf.c
94
tuplehash = bpf_xdp_flow_tuple_lookup(xdp->rxq->dev, &tuple, proto);
net/xdp/xsk.c
1444
struct netdev_rx_queue *rxq;
net/xdp/xsk.c
1446
rxq = __netif_get_rx_queue(dev, qid);
net/xdp/xsk.c
1447
if (rxq->napi)
net/xdp/xsk.c
1448
__sk_mark_napi_id_once(sk, rxq->napi->napi_id);
net/xdp/xsk.c
338
if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
net/xdp/xsk.c
381
if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) {
net/xdp/xsk_buff_pool.c
119
void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq)
net/xdp/xsk_buff_pool.c
124
pool->heads[i].xdp.rxq = rxq;
net/xfrm/xfrm_state_bpf.c
68
struct net *net = dev_net(xdp->rxq->dev);
tools/testing/selftests/bpf/progs/test_xdp_bpf2bpf.c
29
struct xdp_rxq_info *rxq;
tools/testing/selftests/bpf/progs/test_xdp_bpf2bpf.c
49
meta.ifindex = xdp->rxq->dev->ifindex;
tools/testing/selftests/bpf/progs/test_xdp_bpf2bpf.c
56
test_result_fentry = xdp->rxq->dev->ifindex;
tools/testing/selftests/bpf/progs/type_cast.c
29
dev = kctx->rxq->dev;
tools/testing/selftests/bpf/xdp_hw_metadata.c
444
static int verify_metadata(struct xsk *rx_xsk, int rxq, int server_fd, clockid_t clock_id)
tools/testing/selftests/bpf/xdp_hw_metadata.c
447
struct pollfd fds[rxq + 1];
tools/testing/selftests/bpf/xdp_hw_metadata.c
455
for (i = 0; i < rxq; i++) {
tools/testing/selftests/bpf/xdp_hw_metadata.c
461
fds[rxq].fd = server_fd;
tools/testing/selftests/bpf/xdp_hw_metadata.c
462
fds[rxq].events = POLLIN;
tools/testing/selftests/bpf/xdp_hw_metadata.c
463
fds[rxq].revents = 0;
tools/testing/selftests/bpf/xdp_hw_metadata.c
468
for (i = 0; i < rxq; i++) {
tools/testing/selftests/bpf/xdp_hw_metadata.c
474
ret = poll(fds, rxq + 1, 1000);
tools/testing/selftests/bpf/xdp_hw_metadata.c
483
if (fds[rxq].revents)
tools/testing/selftests/bpf/xdp_hw_metadata.c
486
for (i = 0; i < rxq; i++) {
tools/testing/selftests/bpf/xdp_hw_metadata.c
626
for (i = 0; i < rxq; i++)
tools/testing/selftests/bpf/xdp_hw_metadata.c
73
int rxq;
tools/testing/selftests/bpf/xdp_hw_metadata.c
752
rxq = rxq_num(ifname);
tools/testing/selftests/bpf/xdp_hw_metadata.c
753
printf("rxq: %d\n", rxq);
tools/testing/selftests/bpf/xdp_hw_metadata.c
755
if (launch_time_queue >= rxq || launch_time_queue < 0)
tools/testing/selftests/bpf/xdp_hw_metadata.c
766
if (i < rxq)
tools/testing/selftests/bpf/xdp_hw_metadata.c
828
rx_xsk = malloc(sizeof(struct xsk) * rxq);
tools/testing/selftests/bpf/xdp_hw_metadata.c
832
for (i = 0; i < rxq; i++) {
tools/testing/selftests/bpf/xdp_hw_metadata.c
864
for (i = 0; i < rxq; i++) {
tools/testing/selftests/bpf/xdp_hw_metadata.c
882
ret = verify_metadata(rx_xsk, rxq, server_fd, clock_id);