arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c
329
pko_mem_queue_qos.s.qid = queue;
arch/mips/cavium-octeon/executive/cvmx-pko.c
83
config.s.qid = base_queue + queue;
arch/mips/include/asm/octeon/cvmx-pko-defs.h
1023
uint64_t qid:8;
arch/mips/include/asm/octeon/cvmx-pko-defs.h
1025
uint64_t qid:8;
arch/mips/include/asm/octeon/cvmx-pko-defs.h
1047
uint64_t qid:8;
arch/mips/include/asm/octeon/cvmx-pko-defs.h
1049
uint64_t qid:8;
arch/mips/include/asm/octeon/cvmx-pko-defs.h
1199
uint64_t qid:7;
arch/mips/include/asm/octeon/cvmx-pko-defs.h
1201
uint64_t qid:7;
arch/s390/include/asm/ap.h
149
static inline struct ap_queue_status ap_tapq(ap_qid_t qid,
arch/s390/include/asm/ap.h
162
: [qid] "d" (qid)
arch/s390/include/asm/ap.h
177
static inline struct ap_queue_status ap_test_queue(ap_qid_t qid, int tbit,
arch/s390/include/asm/ap.h
181
qid |= 1UL << 23; /* set T bit*/
arch/s390/include/asm/ap.h
182
return ap_tapq(qid, info);
arch/s390/include/asm/ap.h
192
static inline struct ap_queue_status ap_rapq(ap_qid_t qid, int fbit)
arch/s390/include/asm/ap.h
194
unsigned long reg0 = qid | (1UL << 24); /* fc 1UL is RAPQ */
arch/s390/include/asm/ap.h
217
static inline struct ap_queue_status ap_zapq(ap_qid_t qid, int fbit)
arch/s390/include/asm/ap.h
219
unsigned long reg0 = qid | (2UL << 24); /* fc 2UL is ZAPQ */
arch/s390/include/asm/ap.h
317
static inline struct ap_queue_status ap_aqic(ap_qid_t qid,
arch/s390/include/asm/ap.h
321
unsigned long reg0 = qid | (3UL << 24); /* fc 3UL is AQIC */
arch/s390/include/asm/ap.h
366
static inline struct ap_queue_status ap_qact(ap_qid_t qid, int ifbit,
arch/s390/include/asm/ap.h
369
unsigned long reg0 = qid | (5UL << 24) | ((ifbit & 0x01) << 22);
arch/s390/include/asm/ap.h
397
static inline struct ap_queue_status ap_bapq(ap_qid_t qid)
arch/s390/include/asm/ap.h
399
unsigned long reg0 = qid | (7UL << 24); /* fc 7 is BAPQ */
arch/s390/include/asm/ap.h
423
static inline struct ap_queue_status ap_aapq(ap_qid_t qid, unsigned int sec_idx)
arch/s390/include/asm/ap.h
425
unsigned long reg0 = qid | (8UL << 24); /* fc 8 is AAPQ */
arch/s390/include/asm/ap.h
453
static inline struct ap_queue_status ap_nqap(ap_qid_t qid,
arch/s390/include/asm/ap.h
457
unsigned long reg0 = qid | 0x40000000UL; /* 0x4... is last msg part */
arch/s390/include/asm/ap.h
509
static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
arch/s390/include/asm/ap.h
516
unsigned long reg0 = resgr0 && *resgr0 ? *resgr0 : qid | 0x80000000UL;
arch/s390/include/uapi/asm/zcrypt.h
222
unsigned int qid:16;
arch/s390/include/uapi/asm/zcrypt.h
342
unsigned int qid:14;
drivers/accel/habanalabs/common/command_submission.c
1476
static u32 get_stream_master_qid_mask(struct hl_device *hdev, u32 qid)
drivers/accel/habanalabs/common/command_submission.c
1481
if (qid == hdev->stream_master_qid_arr[i])
drivers/block/ublk_drv.c
1132
int qid)
drivers/block/ublk_drv.c
1134
return dev->queues[qid];
drivers/block/virtio_blk.c
354
int qid = vq->index;
drivers/block/virtio_blk.c
359
spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
drivers/block/virtio_blk.c
362
while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
drivers/block/virtio_blk.c
374
spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
drivers/block/virtio_blk.c
433
int qid = hctx->queue_num;
drivers/block/virtio_blk.c
442
spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
drivers/block/virtio_blk.c
443
err = virtblk_add_req(vblk->vqs[qid].vq, vbr);
drivers/block/virtio_blk.c
445
virtqueue_kick(vblk->vqs[qid].vq);
drivers/block/virtio_blk.c
451
spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
drivers/block/virtio_blk.c
456
if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
drivers/block/virtio_blk.c
458
spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
drivers/block/virtio_blk.c
461
virtqueue_notify(vblk->vqs[qid].vq);
drivers/block/xen-blkfront.c
897
int qid = hctx->queue_num;
drivers/block/xen-blkfront.c
901
rinfo = get_rinfo(info, qid);
drivers/clk/zynqmp/clkc.c
208
qdata.qid = PM_QID_CLOCK_GET_NUM_CLOCKS;
drivers/clk/zynqmp/clkc.c
233
qdata.qid = PM_QID_CLOCK_GET_NAME;
drivers/clk/zynqmp/clkc.c
269
qdata.qid = PM_QID_CLOCK_GET_TOPOLOGY;
drivers/clk/zynqmp/clkc.c
322
qdata.qid = PM_QID_CLOCK_GET_FIXEDFACTOR_PARAMS;
drivers/clk/zynqmp/clkc.c
366
qdata.qid = PM_QID_CLOCK_GET_PARENTS;
drivers/clk/zynqmp/clkc.c
392
qdata.qid = PM_QID_CLOCK_GET_ATTRIBUTES;
drivers/clk/zynqmp/divider.c
228
qdata.qid = PM_QID_CLOCK_GET_MAX_DIVISOR;
drivers/crypto/chelsio/chcr_algo.c
1180
wrparam.qid = u_ctx->lldi.rxq_ids[reqctx->rxqidx];
drivers/crypto/chelsio/chcr_algo.c
1214
unsigned short qid,
drivers/crypto/chelsio/chcr_algo.c
1314
wrparam.qid = qid;
drivers/crypto/chelsio/chcr_algo.c
2348
unsigned short qid,
drivers/crypto/chelsio/chcr_algo.c
2473
chcr_add_aead_dst_ent(req, phys_cpl, qid);
drivers/crypto/chelsio/chcr_algo.c
2620
unsigned short qid)
drivers/crypto/chelsio/chcr_algo.c
2637
dsgl_walk_end(&dsgl_walk, qid, rx_channel_id);
drivers/crypto/chelsio/chcr_algo.c
2666
unsigned short qid)
drivers/crypto/chelsio/chcr_algo.c
2681
dsgl_walk_end(&dsgl_walk, qid, rx_channel_id);
drivers/crypto/chelsio/chcr_algo.c
2949
unsigned short qid,
drivers/crypto/chelsio/chcr_algo.c
3026
chcr_add_aead_dst_ent(req, phys_cpl, qid);
drivers/crypto/chelsio/chcr_algo.c
3046
unsigned short qid,
drivers/crypto/chelsio/chcr_algo.c
3146
chcr_add_aead_dst_ent(req, phys_cpl, qid);
drivers/crypto/chelsio/chcr_algo.c
354
static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
drivers/crypto/chelsio/chcr_algo.c
371
phys_cpl->rss_hdr_int.qid = htons(qid);
drivers/crypto/chelsio/chcr_algo.c
738
unsigned int qid, fid, portno;
drivers/crypto/chelsio/chcr_algo.c
741
qid = u_ctx->lldi.rxq_ids[rxqidx];
drivers/crypto/chelsio/chcr_algo.c
754
chcr_req->wreq.rx_chid_to_rx_q_id = FILL_WR_RX_Q_ID(rx_channel_id, qid,
drivers/crypto/chelsio/chcr_algo.c
848
chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
drivers/crypto/chelsio/chcr_algo.h
197
#define FILL_WR_RX_Q_ID(cid, qid, lcb, fid) \
drivers/crypto/chelsio/chcr_algo.h
200
FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_V((qid)) | \
drivers/crypto/chelsio/chcr_algo.h
205
#define FILL_ULPTX_CMD_DEST(cid, qid) \
drivers/crypto/chelsio/chcr_algo.h
211
ULP_TXPKT_FID_V(qid))
drivers/crypto/chelsio/chcr_algo.h
260
unsigned short qid;
drivers/crypto/chelsio/chcr_crypto.h
335
unsigned short qid);
drivers/crypto/chelsio/chcr_crypto.h
345
unsigned short qid);
drivers/dma/amd/qdma/qdma.c
261
enum dma_transfer_direction dir, u16 qid)
drivers/dma/amd/qdma/qdma.c
271
ret = qdma_prog_context(qdev, type, QDMA_CTXT_READ, qid, data);
drivers/dma/amd/qdma/qdma.c
277
qdma_err(qdev, "queue %d already in use", qid);
drivers/dma/amd/qdma/qdma.c
311
queue->qid, NULL);
drivers/dma/amd/qdma/qdma.c
347
enum dma_transfer_direction dir, u16 qid)
drivers/dma/amd/qdma/qdma.c
360
ret = qdma_prog_context(qdev, type, QDMA_CTXT_WRITE, qid, ctxt);
drivers/dma/amd/qdma/qdma.c
362
qdma_err(qdev, "Failed setup SW desc ctxt for queue: %d", qid);
drivers/dma/amd/qdma/qdma.c
466
q->qid = i;
drivers/dma/amd/qdma/qdma.c
585
ret = qdma_setup_queue_context(qdev, &desc, queue->dir, queue->qid);
drivers/dma/amd/qdma/qdma.c
617
queue->issued_vdesc->pidx, CHAN_STR(queue), queue->qid);
drivers/dma/amd/qdma/qdma.c
622
queue->pidx, CHAN_STR(queue), queue->qid);
drivers/dma/amd/qdma/qdma.c
840
u16 qid;
drivers/dma/amd/qdma/qdma.c
855
qid = FIELD_GET(QDMA_INTR_MASK_QID, intr_ent);
drivers/dma/amd/qdma/qdma.c
860
q += qid;
drivers/dma/amd/qdma/qdma.h
217
u16 qid;
drivers/firmware/xilinx/zynqmp-debug.c
272
qdata.qid = pm_api_arg[0];
drivers/firmware/xilinx/zynqmp-debug.c
281
switch (qdata.qid) {
drivers/firmware/xilinx/zynqmp.c
657
qdata.qid, qdata.arg1,
drivers/firmware/xilinx/zynqmp.c
667
if (qdata.qid == PM_QID_CLOCK_GET_NAME ||
drivers/firmware/xilinx/zynqmp.c
668
qdata.qid == PM_QID_PINCTRL_GET_FUNCTION_NAME)
drivers/firmware/xilinx/zynqmp.c
678
ret = zynqmp_pm_invoke_fn(PM_QUERY_DATA, out, 4, qdata.qid,
drivers/firmware/xilinx/zynqmp.c
686
return qdata.qid == PM_QID_CLOCK_GET_NAME ? 0 : ret;
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
685
struct amdgpu_usermode_queue *amdgpu_userq_get(struct amdgpu_userq_mgr *uq_mgr, u32 qid)
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
690
queue = xa_load(&uq_mgr->userq_xa, qid);
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
770
u32 qid;
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
870
r = xa_alloc(&uq_mgr->userq_xa, &qid, queue,
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
894
xa_erase(&uq_mgr->userq_xa, qid);
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
902
queue_name = kasprintf(GFP_KERNEL, "queue-%d", qid);
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
916
args->out.queue_id = qid;
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
116
struct amdgpu_usermode_queue *amdgpu_userq_get(struct amdgpu_userq_mgr *uq_mgr, u32 qid);
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
1385
unsigned int *qid,
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
1390
int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid);
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
1391
int pqm_update_queue_properties(struct process_queue_manager *pqm, unsigned int qid,
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
1393
int pqm_update_mqd(struct process_queue_manager *pqm, unsigned int qid,
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
1395
int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
1398
unsigned int qid);
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
1400
unsigned int qid,
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
1415
unsigned int qid,
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
104
int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
1068
unsigned int qid,
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
1074
pqn = get_queue_by_qid(pqm, qid);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
1076
pr_debug("amdkfd: No queue %d exists for operation\n", qid);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
114
pqn = get_queue_by_qid(pqm, qid);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
248
unsigned int qid)
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
258
q_properties->queue_id = qid;
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
312
unsigned int *qid,
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
34
struct process_queue_manager *pqm, unsigned int qid)
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
358
*qid = q_data->q_id;
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
360
retval = find_available_queue_slot(pqm, qid);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
39
if ((pqn->q && pqn->q->properties.queue_id == qid) ||
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
40
(pqn->kq && pqn->kq->queue->properties.queue_id == qid))
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
402
retval = init_user_queue(pqm, dev, &q, properties, *qid);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
423
retval = init_user_queue(pqm, dev, &q, properties, *qid);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
48
unsigned int qid)
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
485
clear_bit(*qid, pqm->queue_slot_bitmap);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
492
int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
50
if (qid >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
504
pqn = get_queue_by_qid(pqm, qid);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
53
if (__test_and_set_bit(qid, pqm->queue_slot_bitmap)) {
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
54
pr_err("Cannot create new queue because requested qid(%u) is in use\n", qid);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
553
clear_bit(qid, pqm->queue_slot_bitmap);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
564
unsigned int qid, struct queue_properties *p)
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
569
pqn = get_queue_by_qid(pqm, qid);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
571
pr_debug("No queue %d exists for update operation\n", qid);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
62
unsigned int *qid)
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
622
unsigned int qid, struct mqd_update_info *minfo)
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
627
pqn = get_queue_by_qid(pqm, qid);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
629
pr_debug("No queue %d exists for update operation\n", qid);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
664
unsigned int qid)
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
668
pqn = get_queue_by_qid(pqm, qid);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
673
unsigned int qid,
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
680
pqn = get_queue_by_qid(pqm, qid);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
683
qid);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
78
*qid = found;
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
792
unsigned int qid,
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
798
pqn = get_queue_by_qid(pqm, qid);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
800
pr_debug("amdkfd: No queue %d exists for operation\n", qid);
drivers/gpu/drm/panthor/panthor_sched.c
3485
u64 drm_client_id, u32 gid, u32 qid)
drivers/gpu/drm/panthor/panthor_sched.c
3577
queue->name = kasprintf(GFP_KERNEL, "panthor-queue-%llu-%u-%u", drm_client_id, gid, qid);
drivers/infiniband/hw/bng_re/bng_roce_hsi.h
5604
__le32 qid;
drivers/infiniband/hw/bng_re/bng_roce_hsi.h
5676
__le32 qid;
drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
289
static inline u32 map_qp_id_to_tbl_indx(u32 qid, struct bnxt_qplib_rcfw *rcfw)
drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
292
return (qid == 1) ? rcfw->qp_tbl_size - 1 : (qid % (rcfw->qp_tbl_size - 2));
drivers/infiniband/hw/bnxt_re/roce_hsi.h
4178
__le32 qid;
drivers/infiniband/hw/bnxt_re/roce_hsi.h
4250
__le32 qid;
drivers/infiniband/hw/cxgb4/cm.c
1881
__func__, ep->com.qp->wq.sq.qid, ep,
drivers/infiniband/hw/cxgb4/cm.c
3036
ep->com.qp->wq.sq.qid);
drivers/infiniband/hw/cxgb4/cq.c
195
CQE_QPID_V(wq->sq.qid));
drivers/infiniband/hw/cxgb4/cq.c
229
CQE_QPID_V(wq->sq.qid));
drivers/infiniband/hw/cxgb4/cq.c
425
WARN_ONCE(1, "Unexpected DRAIN CQE qp id %u!\n", wq->sq.qid);
drivers/infiniband/hw/cxgb4/cq.c
454
(CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq))
drivers/infiniband/hw/cxgb4/device.c
107
le.qid = wq->sq.qid;
drivers/infiniband/hw/cxgb4/device.c
113
le.qid = wq->rq.qid;
drivers/infiniband/hw/cxgb4/device.c
1186
u32 qid = be32_to_cpu(rc->pldbuflen_qid);
drivers/infiniband/hw/cxgb4/device.c
1187
c4iw_ev_handler(dev, qid);
drivers/infiniband/hw/cxgb4/device.c
1378
qp->wq.sq.qid,
drivers/infiniband/hw/cxgb4/device.c
1383
pci_name(ctx->lldi.pdev), qp->wq.sq.qid);
drivers/infiniband/hw/cxgb4/device.c
1391
qp->wq.rq.qid,
drivers/infiniband/hw/cxgb4/device.c
1397
pci_name(ctx->lldi.pdev), qp->wq.rq.qid);
drivers/infiniband/hw/cxgb4/device.c
152
lep->qid, lep->opcode,
drivers/infiniband/hw/cxgb4/device.c
250
if (id != qp->wq.sq.qid)
drivers/infiniband/hw/cxgb4/device.c
271
qp->wq.sq.qid, qp->srq ? "srq" : "rq",
drivers/infiniband/hw/cxgb4/device.c
272
qp->srq ? qp->srq->idx : qp->wq.rq.qid,
drivers/infiniband/hw/cxgb4/device.c
292
qp->wq.sq.qid, qp->wq.rq.qid,
drivers/infiniband/hw/cxgb4/device.c
306
qp->wq.sq.qid, qp->wq.rq.qid,
drivers/infiniband/hw/cxgb4/device.c
479
dev->rdev.stats.qid.total, dev->rdev.stats.qid.cur,
drivers/infiniband/hw/cxgb4/device.c
480
dev->rdev.stats.qid.max, dev->rdev.stats.qid.fail);
drivers/infiniband/hw/cxgb4/device.c
526
dev->rdev.stats.qid.max = 0;
drivers/infiniband/hw/cxgb4/device.c
527
dev->rdev.stats.qid.fail = 0;
drivers/infiniband/hw/cxgb4/device.c
757
if (!(entry->qid & rdev->qpmask)) {
drivers/infiniband/hw/cxgb4/device.c
759
entry->qid);
drivers/infiniband/hw/cxgb4/device.c
761
rdev->stats.qid.cur -= rdev->qpmask + 1;
drivers/infiniband/hw/cxgb4/device.c
848
rdev->stats.qid.total = rdev->lldi.vr->qp.size;
drivers/infiniband/hw/cxgb4/ev.c
211
CQE_STATUS(err_cqe), qhp->wq.sq.qid);
drivers/infiniband/hw/cxgb4/ev.c
222
int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
drivers/infiniband/hw/cxgb4/ev.c
228
chp = xa_load(&dev->cqs, qid);
drivers/infiniband/hw/cxgb4/ev.c
238
pr_debug("unknown cqid 0x%x\n", qid);
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
1043
int c4iw_ev_handler(struct c4iw_dev *rnicp, u32 qid);
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
1045
void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
1048
void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
105
u32 qid;
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
1054
void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid,
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
129
struct c4iw_stat qid;
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
165
u16 qid;
drivers/infiniband/hw/cxgb4/qp.c
1049
CQE_QPID_V(qhp->wq.sq.qid));
drivers/infiniband/hw/cxgb4/qp.c
1564
pr_debug("qhp %p qid 0x%x tid %u\n", qhp, qhp->wq.sq.qid,
drivers/infiniband/hw/cxgb4/qp.c
159
c4iw_put_qpid(rdev, wq->sq.qid, uctx);
drivers/infiniband/hw/cxgb4/qp.c
167
c4iw_put_qpid(rdev, wq->rq.qid, uctx);
drivers/infiniband/hw/cxgb4/qp.c
1704
pr_debug("qhp %p qid 0x%x tid %u\n", qhp, qhp->wq.sq.qid, ep->hwtid);
drivers/infiniband/hw/cxgb4/qp.c
1724
qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
drivers/infiniband/hw/cxgb4/qp.c
1761
qhp->wq.sq.qid, qhp->ep->hwtid, qhp->ep->ird, qhp->ep->ord);
drivers/infiniband/hw/cxgb4/qp.c
177
void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid,
drivers/infiniband/hw/cxgb4/qp.c
1806
wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid);
drivers/infiniband/hw/cxgb4/qp.c
1807
wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid);
drivers/infiniband/hw/cxgb4/qp.c
1812
wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid);
drivers/infiniband/hw/cxgb4/qp.c
1827
qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
drivers/infiniband/hw/cxgb4/qp.c
184
ret = cxgb4_bar2_sge_qregs(rdev->lldi.ports[0], qid, qtype,
drivers/infiniband/hw/cxgb4/qp.c
1851
qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state,
drivers/infiniband/hw/cxgb4/qp.c
2036
qhp->wq.sq.qid);
drivers/infiniband/hw/cxgb4/qp.c
2094
__xa_erase(&rhp->qps, qhp->wq.sq.qid);
drivers/infiniband/hw/cxgb4/qp.c
2104
pr_debug("ib_qp %p qpid 0x%0x\n", ib_qp, qhp->wq.sq.qid);
drivers/infiniband/hw/cxgb4/qp.c
213
wq->sq.qid = c4iw_get_qpid(rdev, uctx);
drivers/infiniband/hw/cxgb4/qp.c
214
if (!wq->sq.qid)
drivers/infiniband/hw/cxgb4/qp.c
218
wq->rq.qid = c4iw_get_qpid(rdev, uctx);
drivers/infiniband/hw/cxgb4/qp.c
219
if (!wq->rq.qid) {
drivers/infiniband/hw/cxgb4/qp.c
2216
ret = xa_insert_irq(&rhp->qps, qhp->wq.sq.qid, qhp, GFP_KERNEL);
drivers/infiniband/hw/cxgb4/qp.c
2257
uresp.sqid = qhp->wq.sq.qid;
drivers/infiniband/hw/cxgb4/qp.c
2261
uresp.rqid = qhp->wq.rq.qid;
drivers/infiniband/hw/cxgb4/qp.c
2347
qhp->ibqp.qp_num = qhp->wq.sq.qid;
drivers/infiniband/hw/cxgb4/qp.c
2352
qhp->wq.sq.qid, qhp->wq.sq.size, qhp->wq.sq.memsize,
drivers/infiniband/hw/cxgb4/qp.c
2353
attrs->cap.max_send_wr, qhp->wq.rq.qid, qhp->wq.rq.size,
drivers/infiniband/hw/cxgb4/qp.c
2369
xa_erase_irq(&rhp->qps, qhp->wq.sq.qid);
drivers/infiniband/hw/cxgb4/qp.c
2519
res->u.srq.eqid = cpu_to_be32(wq->qid);
drivers/infiniband/hw/cxgb4/qp.c
2529
c4iw_put_qpid(rdev, wq->qid, uctx);
drivers/infiniband/hw/cxgb4/qp.c
2545
wq->qid = c4iw_get_qpid(rdev, uctx);
drivers/infiniband/hw/cxgb4/qp.c
2546
if (!wq->qid)
drivers/infiniband/hw/cxgb4/qp.c
2573
wq->bar2_va = c4iw_bar2_addrs(rdev, wq->qid, CXGB4_BAR2_QTYPE_EGRESS,
drivers/infiniband/hw/cxgb4/qp.c
2583
pci_name(rdev->lldi.pdev), wq->qid);
drivers/infiniband/hw/cxgb4/qp.c
2612
res->u.srq.eqid = cpu_to_be32(wq->qid);
drivers/infiniband/hw/cxgb4/qp.c
2636
ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, wq->qid, __func__);
drivers/infiniband/hw/cxgb4/qp.c
2642
__func__, srq->idx, wq->qid, srq->pdid, wq->queue,
drivers/infiniband/hw/cxgb4/qp.c
2660
c4iw_put_qpid(rdev, wq->qid, uctx);
drivers/infiniband/hw/cxgb4/qp.c
2770
uresp.srqid = srq->wq.qid;
drivers/infiniband/hw/cxgb4/qp.c
279
wq->sq.bar2_va = c4iw_bar2_addrs(rdev, wq->sq.qid,
drivers/infiniband/hw/cxgb4/qp.c
2801
__func__, srq->wq.qid, srq->idx, srq->wq.size,
drivers/infiniband/hw/cxgb4/qp.c
2832
pr_debug("%s id %d\n", __func__, srq->wq.qid);
drivers/infiniband/hw/cxgb4/qp.c
284
wq->rq.bar2_va = c4iw_bar2_addrs(rdev, wq->rq.qid,
drivers/infiniband/hw/cxgb4/qp.c
294
pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid);
drivers/infiniband/hw/cxgb4/qp.c
345
res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
drivers/infiniband/hw/cxgb4/qp.c
374
res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
drivers/infiniband/hw/cxgb4/qp.c
379
ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, wq->sq.qid, __func__);
drivers/infiniband/hw/cxgb4/qp.c
384
wq->sq.qid, wq->rq.qid, wq->db,
drivers/infiniband/hw/cxgb4/qp.c
405
c4iw_put_qpid(rdev, wq->rq.qid, uctx);
drivers/infiniband/hw/cxgb4/qp.c
407
c4iw_put_qpid(rdev, wq->sq.qid, uctx);
drivers/infiniband/hw/cxgb4/qp.c
998
CQE_QPID_V(qhp->wq.sq.qid));
drivers/infiniband/hw/cxgb4/resource.c
111
u32 qid;
drivers/infiniband/hw/cxgb4/resource.c
119
qid = entry->qid;
drivers/infiniband/hw/cxgb4/resource.c
122
qid = c4iw_get_resource(&rdev->resource.qid_table);
drivers/infiniband/hw/cxgb4/resource.c
123
if (!qid)
drivers/infiniband/hw/cxgb4/resource.c
126
rdev->stats.qid.cur += rdev->qpmask + 1;
drivers/infiniband/hw/cxgb4/resource.c
128
for (i = qid+1; i & rdev->qpmask; i++) {
drivers/infiniband/hw/cxgb4/resource.c
132
entry->qid = i;
drivers/infiniband/hw/cxgb4/resource.c
143
entry->qid = qid;
drivers/infiniband/hw/cxgb4/resource.c
145
for (i = qid+1; i & rdev->qpmask; i++) {
drivers/infiniband/hw/cxgb4/resource.c
149
entry->qid = i;
drivers/infiniband/hw/cxgb4/resource.c
155
pr_debug("qid 0x%x\n", qid);
drivers/infiniband/hw/cxgb4/resource.c
157
if (rdev->stats.qid.cur > rdev->stats.qid.max)
drivers/infiniband/hw/cxgb4/resource.c
158
rdev->stats.qid.max = rdev->stats.qid.cur;
drivers/infiniband/hw/cxgb4/resource.c
160
return qid;
drivers/infiniband/hw/cxgb4/resource.c
163
void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
drivers/infiniband/hw/cxgb4/resource.c
171
pr_debug("qid 0x%x\n", qid);
drivers/infiniband/hw/cxgb4/resource.c
172
entry->qid = qid;
drivers/infiniband/hw/cxgb4/resource.c
181
u32 qid;
drivers/infiniband/hw/cxgb4/resource.c
189
qid = entry->qid;
drivers/infiniband/hw/cxgb4/resource.c
192
qid = c4iw_get_resource(&rdev->resource.qid_table);
drivers/infiniband/hw/cxgb4/resource.c
193
if (!qid) {
drivers/infiniband/hw/cxgb4/resource.c
195
rdev->stats.qid.fail++;
drivers/infiniband/hw/cxgb4/resource.c
200
rdev->stats.qid.cur += rdev->qpmask + 1;
drivers/infiniband/hw/cxgb4/resource.c
202
for (i = qid+1; i & rdev->qpmask; i++) {
drivers/infiniband/hw/cxgb4/resource.c
206
entry->qid = i;
drivers/infiniband/hw/cxgb4/resource.c
217
entry->qid = qid;
drivers/infiniband/hw/cxgb4/resource.c
219
for (i = qid + 1; i & rdev->qpmask; i++) {
drivers/infiniband/hw/cxgb4/resource.c
223
entry->qid = i;
drivers/infiniband/hw/cxgb4/resource.c
229
pr_debug("qid 0x%x\n", qid);
drivers/infiniband/hw/cxgb4/resource.c
231
if (rdev->stats.qid.cur > rdev->stats.qid.max)
drivers/infiniband/hw/cxgb4/resource.c
232
rdev->stats.qid.max = rdev->stats.qid.cur;
drivers/infiniband/hw/cxgb4/resource.c
234
return qid;
drivers/infiniband/hw/cxgb4/resource.c
237
void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
drivers/infiniband/hw/cxgb4/resource.c
245
pr_debug("qid 0x%x\n", qid);
drivers/infiniband/hw/cxgb4/resource.c
246
entry->qid = qid;
drivers/infiniband/hw/cxgb4/restrack.c
42
if (rdma_nl_put_driver_u32(msg, "sqid", wq->sq.qid))
drivers/infiniband/hw/cxgb4/restrack.c
70
if (rdma_nl_put_driver_u32(msg, "rqid", wq->rq.qid))
drivers/infiniband/hw/cxgb4/t4.h
343
u32 qid;
drivers/infiniband/hw/cxgb4/t4.h
370
u32 qid;
drivers/infiniband/hw/cxgb4/t4.h
407
u32 qid;
drivers/infiniband/hw/cxgb4/t4.h
51
__be16 qid;
drivers/infiniband/hw/cxgb4/t4.h
622
writel(QID_V(wq->sq.qid) | PIDX_V(inc), wq->db);
drivers/infiniband/hw/cxgb4/t4.h
647
writel(QID_V(wq->rq.qid) | PIDX_V(inc), wq->db);
drivers/infiniband/hw/ionic/ionic_admin.c
101
u32 qtf, qid;
drivers/infiniband/hw/ionic/ionic_admin.c
130
qid = ionic_v1_cqe_qtf_qid(qtf);
drivers/infiniband/hw/ionic/ionic_admin.c
139
if (unlikely(qid != aq->aqid)) {
drivers/infiniband/hw/ionic/ionic_admin.c
141
"bad cqe qid %u\n", qid);
drivers/infiniband/hw/ionic/ionic_admin.c
149
aq->q.cons, qid);
drivers/infiniband/hw/ionic/ionic_admin.c
473
u32 qid, u32 cid, u16 opcode)
drivers/infiniband/hw/ionic/ionic_admin.c
480
.qid_ver = cpu_to_le32(qid),
drivers/infiniband/hw/ionic/ionic_admin.c
899
u32 evt, qid;
drivers/infiniband/hw/ionic/ionic_admin.c
915
qid = ionic_v1_eqe_evt_qid(evt);
drivers/infiniband/hw/ionic/ionic_admin.c
919
ionic_cq_event(dev, qid, code);
drivers/infiniband/hw/ionic/ionic_admin.c
923
ionic_qp_event(dev, qid, code);
drivers/infiniband/hw/ionic/ionic_controlpath.c
2449
int prod, qtf, qid, type;
drivers/infiniband/hw/ionic/ionic_controlpath.c
2461
qid = ionic_v1_cqe_qtf_qid(qtf);
drivers/infiniband/hw/ionic/ionic_controlpath.c
2464
if (qid == qpid && type != IONIC_V1_CQE_TYPE_ADMIN)
drivers/infiniband/hw/ionic/ionic_datapath.c
478
u32 qtf, qid;
drivers/infiniband/hw/ionic/ionic_datapath.c
510
qid = ionic_v1_cqe_qtf_qid(qtf);
drivers/infiniband/hw/ionic/ionic_datapath.c
520
qp = xa_load(&dev->qp_tbl, qid);
drivers/infiniband/hw/ionic/ionic_datapath.c
522
ibdev_dbg(&dev->ibdev, "missing qp for qid %u\n", qid);
drivers/infiniband/hw/ionic/ionic_hw_stats.c
85
dma_addr_t dma, size_t len, int qid, int op)
drivers/infiniband/hw/ionic/ionic_hw_stats.c
95
.id_ver = cpu_to_le32(qid),
drivers/infiniband/hw/ionic/ionic_queue.h
218
static inline void ionic_queue_dbell_init(struct ionic_queue *q, u32 qid)
drivers/infiniband/hw/ionic/ionic_queue.h
220
q->dbell = IONIC_DBELL_QID(qid);
drivers/infiniband/hw/ionic/ionic_res.h
146
static inline u32 ionic_qid_to_bitid(u32 qid, u8 qgrp_shift, u8 half_qid_shift)
drivers/infiniband/hw/ionic/ionic_res.h
148
u32 udma_bit = (qid & BIT(qgrp_shift)) << (half_qid_shift - qgrp_shift);
drivers/infiniband/hw/ionic/ionic_res.h
149
u32 grp_bits = (qid & GENMASK(half_qid_shift, qgrp_shift + 1)) >> 1;
drivers/infiniband/hw/ionic/ionic_res.h
150
u32 idx_bits = qid & (BIT(qgrp_shift) - 1);
drivers/infiniband/hw/mana/mana_ib.h
567
u32 qid, bool is_sq)
drivers/infiniband/hw/mana/mana_ib.h
573
qid |= MANA_SENDQ_MASK;
drivers/infiniband/hw/mana/mana_ib.h
576
qp = xa_load(&mdev->qp_table_wq, qid);
drivers/iommu/riscv/iommu.c
111
_q->qid = RISCV_IOMMU_INTR_ ## name; \
drivers/iommu/riscv/iommu.c
121
#define Q_IPSR(q) BIT((q)->qid)
drivers/iommu/riscv/iommu.c
184
dev_err(iommu->dev, "queue #%u allocation failed\n", queue->qid);
drivers/iommu/riscv/iommu.c
192
queue->qid, logsz + 1);
drivers/iommu/riscv/iommu.c
224
const unsigned int irq = iommu->irqs[riscv_iommu_queue_vec(iommu, queue->qid)];
drivers/iommu/riscv/iommu.c
245
if (queue->qid == RISCV_IOMMU_INTR_CQ)
drivers/iommu/riscv/iommu.c
272
dev_err(iommu->dev, "queue #%u failed to start\n", queue->qid);
drivers/iommu/riscv/iommu.c
294
free_irq(iommu->irqs[riscv_iommu_queue_vec(iommu, queue->qid)], queue);
drivers/iommu/riscv/iommu.c
302
queue->qid, csr);
drivers/iommu/riscv/iommu.c
470
queue->qid,
drivers/iommu/riscv/iommu.c
554
queue->qid,
drivers/iommu/riscv/iommu.h
33
u8 qid; /* queue identifier, same as RISCV_IOMMU_INTR_XX */
drivers/net/bonding/bond_options.c
1705
u16 qid;
drivers/net/bonding/bond_options.c
1716
if (sscanf(++delim, "%hd\n", &qid) != 1)
drivers/net/bonding/bond_options.c
1721
qid > bond->dev->real_num_tx_queues)
drivers/net/bonding/bond_options.c
1737
else if (qid && qid == slave->queue_id) {
drivers/net/bonding/bond_options.c
1746
WRITE_ONCE(update_slave->queue_id, qid);
drivers/net/ethernet/airoha/airoha_eth.c
1929
int i, qid;
drivers/net/ethernet/airoha/airoha_eth.c
1933
qid = skb_get_queue_mapping(skb) % ARRAY_SIZE(qdma->q_tx);
drivers/net/ethernet/airoha/airoha_eth.c
1937
qid / AIROHA_NUM_QOS_QUEUES) |
drivers/net/ethernet/airoha/airoha_eth.c
1939
qid % AIROHA_NUM_QOS_QUEUES) |
drivers/net/ethernet/airoha/airoha_eth.c
1964
q = &qdma->q_tx[qid];
drivers/net/ethernet/airoha/airoha_eth.c
1970
txq = netdev_get_tx_queue(dev, qid);
drivers/net/ethernet/airoha/airoha_eth.c
2027
airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid),
drivers/net/ethernet/airoha/airoha_eth.c
2536
opt->qid = AIROHA_NUM_TX_RING + channel;
drivers/net/ethernet/airoha/airoha_eth.c
2760
opt->qid = AIROHA_NUM_TX_RING + channel;
drivers/net/ethernet/airoha/airoha_eth.c
543
int qid = q - &qdma->q_rx[0];
drivers/net/ethernet/airoha/airoha_eth.c
576
airoha_qdma_rmw(qdma, REG_RX_CPU_IDX(qid),
drivers/net/ethernet/airoha/airoha_eth.c
609
int qid = q - &qdma->q_rx[0];
drivers/net/ethernet/airoha/airoha_eth.c
651
skb_record_rx_queue(q->skb, qid);
drivers/net/ethernet/airoha/airoha_eth.c
720
int i, qid = q - &qdma->q_rx[0];
drivers/net/ethernet/airoha/airoha_eth.c
721
int intr_reg = qid < RX_DONE_HIGH_OFFSET ? QDMA_INT_REG_IDX1
drivers/net/ethernet/airoha/airoha_eth.c
725
if (!(BIT(qid) & RX_IRQ_BANK_PIN_MASK(i)))
drivers/net/ethernet/airoha/airoha_eth.c
729
BIT(qid % RX_DONE_HIGH_OFFSET));
drivers/net/ethernet/airoha/airoha_eth.c
750
int qid = q - &qdma->q_rx[0], thr;
drivers/net/ethernet/airoha/airoha_eth.c
777
airoha_qdma_wr(qdma, REG_RX_RING_BASE(qid), dma_addr);
drivers/net/ethernet/airoha/airoha_eth.c
778
airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid),
drivers/net/ethernet/airoha/airoha_eth.c
783
airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid), RX_RING_THR_MASK,
drivers/net/ethernet/airoha/airoha_eth.c
785
airoha_qdma_rmw(qdma, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK,
drivers/net/ethernet/airoha/airoha_eth.c
787
airoha_qdma_set(qdma, REG_RX_SCATTER_CFG(qid), RX_RING_SG_EN_MASK);
drivers/net/ethernet/airoha/airoha_eth.c
798
int qid = q - &qdma->q_rx[0];
drivers/net/ethernet/airoha/airoha_eth.c
822
airoha_qdma_rmw(qdma, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK,
drivers/net/ethernet/airoha/airoha_eth.c
866
u32 qid, val = irq_q->q[head];
drivers/net/ethernet/airoha/airoha_eth.c
881
qid = FIELD_GET(IRQ_RING_IDX_MASK, val);
drivers/net/ethernet/airoha/airoha_eth.c
882
if (qid >= ARRAY_SIZE(qdma->q_tx))
drivers/net/ethernet/airoha/airoha_eth.c
885
q = &qdma->q_tx[qid];
drivers/net/ethernet/airoha/airoha_eth.c
954
int i, qid = q - &qdma->q_tx[0];
drivers/net/ethernet/airoha/airoha_eth.c
981
airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(qid),
drivers/net/ethernet/airoha/airoha_eth.c
984
airoha_qdma_wr(qdma, REG_TX_RING_BASE(qid), dma_addr);
drivers/net/ethernet/airoha/airoha_eth.c
985
airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
drivers/net/ethernet/airoha/airoha_eth.c
987
airoha_qdma_rmw(qdma, REG_TX_DMA_IDX(qid), TX_RING_DMA_IDX_MASK,
drivers/net/ethernet/airoha/airoha_npu.c
567
static u32 airoha_npu_wlan_queue_addr_get(struct airoha_npu *npu, int qid,
drivers/net/ethernet/airoha/airoha_npu.c
571
return REG_TX_BASE(qid + 2);
drivers/net/ethernet/airoha/airoha_npu.c
573
return REG_RX_BASE(qid);
drivers/net/ethernet/amazon/ena/ena_com.c
1248
u16 qid;
drivers/net/ethernet/amazon/ena/ena_com.c
1252
qid = rss->host_rss_ind_tbl[i];
drivers/net/ethernet/amazon/ena/ena_com.c
1253
if (qid >= ENA_TOTAL_NUM_QUEUES)
drivers/net/ethernet/amazon/ena/ena_com.c
1256
io_sq = &ena_dev->io_sq_queues[qid];
drivers/net/ethernet/amazon/ena/ena_com.c
1385
int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
drivers/net/ethernet/amazon/ena/ena_com.c
1389
if (qid >= ENA_TOTAL_NUM_QUEUES) {
drivers/net/ethernet/amazon/ena/ena_com.c
1390
netdev_err(ena_dev->net_device, "Invalid queue number %d but the max is %d\n", qid,
drivers/net/ethernet/amazon/ena/ena_com.c
1395
*io_sq = &ena_dev->io_sq_queues[qid];
drivers/net/ethernet/amazon/ena/ena_com.c
1396
*io_cq = &ena_dev->io_cq_queues[qid];
drivers/net/ethernet/amazon/ena/ena_com.c
2057
if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
drivers/net/ethernet/amazon/ena/ena_com.c
2059
ctx->qid, ENA_TOTAL_NUM_QUEUES);
drivers/net/ethernet/amazon/ena/ena_com.c
2063
io_sq = &ena_dev->io_sq_queues[ctx->qid];
drivers/net/ethernet/amazon/ena/ena_com.c
2064
io_cq = &ena_dev->io_cq_queues[ctx->qid];
drivers/net/ethernet/amazon/ena/ena_com.c
2072
io_cq->qid = ctx->qid;
drivers/net/ethernet/amazon/ena/ena_com.c
2078
io_sq->qid = ctx->qid;
drivers/net/ethernet/amazon/ena/ena_com.c
2110
void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
drivers/net/ethernet/amazon/ena/ena_com.c
2115
if (qid >= ENA_TOTAL_NUM_QUEUES) {
drivers/net/ethernet/amazon/ena/ena_com.c
2117
qid, ENA_TOTAL_NUM_QUEUES);
drivers/net/ethernet/amazon/ena/ena_com.c
2121
io_sq = &ena_dev->io_sq_queues[qid];
drivers/net/ethernet/amazon/ena/ena_com.c
2122
io_cq = &ena_dev->io_cq_queues[qid];
drivers/net/ethernet/amazon/ena/ena_com.h
1036
return container_of(io_sq, struct ena_com_dev, io_sq_queues[io_sq->qid]);
drivers/net/ethernet/amazon/ena/ena_com.h
1046
return container_of(io_cq, struct ena_com_dev, io_cq_queues[io_cq->qid]);
drivers/net/ethernet/amazon/ena/ena_com.h
131
u16 qid;
drivers/net/ethernet/amazon/ena/ena_com.h
172
u16 qid;
drivers/net/ethernet/amazon/ena/ena_com.h
409
u16 qid;
drivers/net/ethernet/amazon/ena/ena_com.h
532
void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid);
drivers/net/ethernet/amazon/ena/ena_com.h
542
int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
drivers/net/ethernet/amazon/ena/ena_eth_com.c
255
count, io_cq->qid, cdesc->req_id);
drivers/net/ethernet/amazon/ena/ena_eth_com.c
274
io_cq->qid, *first_cdesc_idx, count);
drivers/net/ethernet/amazon/ena/ena_eth_com.c
569
"Fetch rx packet: queue %d completed desc: %d\n", io_cq->qid, nb_hw_desc);
drivers/net/ethernet/amazon/ena/ena_eth_com.c
597
"[%s][QID#%d] Updating SQ head to: %d\n", __func__, io_sq->qid,
drivers/net/ethernet/amazon/ena/ena_eth_com.c
635
"[%s] Adding single RX desc, Queue: %u, req_id: %u\n", __func__, io_sq->qid,
drivers/net/ethernet/amazon/ena/ena_eth_com.c
67
"Decreasing entries_in_tx_burst_left of queue %d to %d\n", io_sq->qid,
drivers/net/ethernet/amazon/ena/ena_eth_com.h
146
"Queue: %d num_descs: %d num_entries_needed: %d\n", io_sq->qid, num_descs,
drivers/net/ethernet/amazon/ena/ena_eth_com.h
158
"Write submission queue doorbell for queue: %d tail: %d\n", io_sq->qid, tail);
drivers/net/ethernet/amazon/ena/ena_eth_com.h
164
"Reset available entries in tx burst for queue %d to %d\n", io_sq->qid,
drivers/net/ethernet/amazon/ena/ena_netdev.c
1205
"%s qid %d\n", __func__, rx_ring->qid);
drivers/net/ethernet/amazon/ena/ena_netdev.c
1232
rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto,
drivers/net/ethernet/amazon/ena/ena_netdev.c
1282
skb_record_rx_queue(skb, rx_ring->qid);
drivers/net/ethernet/amazon/ena/ena_netdev.c
130
ring->qid);
drivers/net/ethernet/amazon/ena/ena_netdev.c
167
struct ena_ring *ring, u16 qid)
drivers/net/ethernet/amazon/ena/ena_netdev.c
169
ring->qid = qid;
drivers/net/ethernet/amazon/ena/ena_netdev.c
173
ring->napi = &adapter->ena_napi[qid].napi;
drivers/net/ethernet/amazon/ena/ena_netdev.c
1790
napi->qid = i;
drivers/net/ethernet/amazon/ena/ena_netdev.c
1889
static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
drivers/net/ethernet/amazon/ena/ena_netdev.c
1900
tx_ring = &adapter->tx_ring[qid];
drivers/net/ethernet/amazon/ena/ena_netdev.c
1901
msix_vector = ENA_IO_IRQ_IDX(qid);
drivers/net/ethernet/amazon/ena/ena_netdev.c
1902
ena_qid = ENA_IO_TXQ_IDX(qid);
drivers/net/ethernet/amazon/ena/ena_netdev.c
1907
ctx.qid = ena_qid;
drivers/net/ethernet/amazon/ena/ena_netdev.c
1917
qid, rc);
drivers/net/ethernet/amazon/ena/ena_netdev.c
1927
qid, rc);
drivers/net/ethernet/amazon/ena/ena_netdev.c
1957
static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
drivers/net/ethernet/amazon/ena/ena_netdev.c
1968
rx_ring = &adapter->rx_ring[qid];
drivers/net/ethernet/amazon/ena/ena_netdev.c
1969
msix_vector = ENA_IO_IRQ_IDX(qid);
drivers/net/ethernet/amazon/ena/ena_netdev.c
1970
ena_qid = ENA_IO_RXQ_IDX(qid);
drivers/net/ethernet/amazon/ena/ena_netdev.c
1974
ctx.qid = ena_qid;
drivers/net/ethernet/amazon/ena/ena_netdev.c
1985
qid, rc);
drivers/net/ethernet/amazon/ena/ena_netdev.c
1995
qid, rc);
drivers/net/ethernet/amazon/ena/ena_netdev.c
234
static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
drivers/net/ethernet/amazon/ena/ena_netdev.c
236
struct ena_ring *tx_ring = &adapter->tx_ring[qid];
drivers/net/ethernet/amazon/ena/ena_netdev.c
237
struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)];
drivers/net/ethernet/amazon/ena/ena_netdev.c
2612
int qid, rc;
drivers/net/ethernet/amazon/ena/ena_netdev.c
2616
qid = skb_get_queue_mapping(skb);
drivers/net/ethernet/amazon/ena/ena_netdev.c
2617
tx_ring = &adapter->tx_ring[qid];
drivers/net/ethernet/amazon/ena/ena_netdev.c
2618
txq = netdev_get_tx_queue(dev, qid);
drivers/net/ethernet/amazon/ena/ena_netdev.c
2663
__func__, qid);
drivers/net/ethernet/amazon/ena/ena_netdev.c
301
static void ena_free_tx_resources(struct ena_adapter *adapter, int qid)
drivers/net/ethernet/amazon/ena/ena_netdev.c
303
struct ena_ring *tx_ring = &adapter->tx_ring[qid];
drivers/net/ethernet/amazon/ena/ena_netdev.c
3407
rx_ring->qid);
drivers/net/ethernet/amazon/ena/ena_netdev.c
3449
tx_ring->qid);
drivers/net/ethernet/amazon/ena/ena_netdev.c
3481
tx_ring->qid, i, time_since_last_napi, napi_scheduled);
drivers/net/ethernet/amazon/ena/ena_netdev.c
3510
int qid, budget, rc;
drivers/net/ethernet/amazon/ena/ena_netdev.c
3529
qid = adapter->last_monitored_tx_qid;
drivers/net/ethernet/amazon/ena/ena_netdev.c
3532
qid = (qid + 1) % io_queue_count;
drivers/net/ethernet/amazon/ena/ena_netdev.c
3534
tx_ring = &adapter->tx_ring[qid];
drivers/net/ethernet/amazon/ena/ena_netdev.c
3535
rx_ring = &adapter->rx_ring[qid];
drivers/net/ethernet/amazon/ena/ena_netdev.c
3541
rc = !ENA_IS_XDP_INDEX(adapter, qid) ?
drivers/net/ethernet/amazon/ena/ena_netdev.c
3549
adapter->last_monitored_tx_qid = qid;
drivers/net/ethernet/amazon/ena/ena_netdev.c
368
u32 qid)
drivers/net/ethernet/amazon/ena/ena_netdev.c
370
struct ena_ring *rx_ring = &adapter->rx_ring[qid];
drivers/net/ethernet/amazon/ena/ena_netdev.c
371
struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)];
drivers/net/ethernet/amazon/ena/ena_netdev.c
426
u32 qid)
drivers/net/ethernet/amazon/ena/ena_netdev.c
428
struct ena_ring *rx_ring = &adapter->rx_ring[qid];
drivers/net/ethernet/amazon/ena/ena_netdev.c
587
rx_ring->qid);
drivers/net/ethernet/amazon/ena/ena_netdev.c
596
rx_ring->qid);
drivers/net/ethernet/amazon/ena/ena_netdev.c
608
rx_ring->qid, i, num);
drivers/net/ethernet/amazon/ena/ena_netdev.c
621
u32 qid)
drivers/net/ethernet/amazon/ena/ena_netdev.c
623
struct ena_ring *rx_ring = &adapter->rx_ring[qid];
drivers/net/ethernet/amazon/ena/ena_netdev.c
701
is_xdp_ring = ENA_IS_XDP_INDEX(tx_ring->adapter, tx_ring->qid);
drivers/net/ethernet/amazon/ena/ena_netdev.c
712
tx_ring->qid, i);
drivers/net/ethernet/amazon/ena/ena_netdev.c
717
tx_ring->qid, i);
drivers/net/ethernet/amazon/ena/ena_netdev.c
730
tx_ring->qid));
drivers/net/ethernet/amazon/ena/ena_netdev.c
782
is_xdp ? "xdp frame" : "skb", ring->qid, req_id);
drivers/net/ethernet/amazon/ena/ena_netdev.c
788
req_id, ring->qid);
drivers/net/ethernet/amazon/ena/ena_netdev.c
819
txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->qid);
drivers/net/ethernet/amazon/ena/ena_netdev.c
850
"tx_poll: q %d skb %p completed\n", tx_ring->qid,
drivers/net/ethernet/amazon/ena/ena_netdev.c
870
tx_ring->qid, tx_pkts);
drivers/net/ethernet/amazon/ena/ena_netdev.c
962
"Page is NULL. qid %u req_id %u\n", rx_ring->qid, req_id);
drivers/net/ethernet/amazon/ena/ena_netdev.h
131
u32 qid;
drivers/net/ethernet/amazon/ena/ena_netdev.h
253
u16 qid;
drivers/net/ethernet/amazon/ena/ena_xdp.c
126
int qid, i, nxmit = 0;
drivers/net/ethernet/amazon/ena/ena_xdp.c
138
qid = smp_processor_id() % adapter->xdp_num_queues;
drivers/net/ethernet/amazon/ena/ena_xdp.c
139
qid += adapter->xdp_first_ring;
drivers/net/ethernet/amazon/ena/ena_xdp.c
140
tx_ring = &adapter->tx_ring[qid];
drivers/net/ethernet/amazon/ena/ena_xdp.c
200
rc = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, rx_ring->qid, 0);
drivers/net/ethernet/amazon/ena/ena_xdp.c
203
rx_ring->qid);
drivers/net/ethernet/amazon/ena/ena_xdp.c
207
rx_ring->qid, rc);
drivers/net/ethernet/amazon/ena/ena_xdp.c
216
rx_ring->qid, rc);
drivers/net/ethernet/amazon/ena/ena_xdp.c
228
rx_ring->qid);
drivers/net/ethernet/amazon/ena/ena_xdp.c
412
"tx_poll: q %d pkt #%d req_id %d\n", tx_ring->qid, tx_pkts, req_id);
drivers/net/ethernet/amazon/ena/ena_xdp.c
420
tx_ring->qid, tx_pkts);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
1951
static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid,
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
1954
u32 reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + qid * 4;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
2245
int qid, rc = 0;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
2256
for (qid = 0; qid < vf_rxq_count(vf); qid++) {
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
2257
qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
2258
qstate.params.update_tpa.sge_map = sge_addr[qid];
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
2260
vf->abs_vfid, qid, U64_HI(sge_addr[qid]),
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
2261
U64_LO(sge_addr[qid]));
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
2265
U64_HI(sge_addr[qid]), U64_LO(sge_addr[qid]),
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
2266
vf->abs_vfid, qid);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
235
struct bnx2x_virtf *vf, int qid,
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
241
DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
245
q_params->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
268
bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, bnx2x_vfq(vf, qid, sb_idx)),
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
275
int qid)
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
287
q_params.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
307
if (bnx2x_vfq(vf, qid, cxt)) {
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
308
bnx2x_vfq(vf, qid, cxt)->ustorm_ag_context.cdu_usage = 0;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
309
bnx2x_vfq(vf, qid, cxt)->xstorm_ag_context.cdu_reserved = 0;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
335
int qid, bool drv_only, int type)
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
348
ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_mac_obj);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
351
ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
353
ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
379
struct bnx2x_virtf *vf, int qid,
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
394
ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_mac_obj);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
399
ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
403
ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
436
int qid, bool drv_only)
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
447
rc = bnx2x_vf_mac_vlan_config(bp, vf, qid,
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
461
bnx2x_vf_mac_vlan_config(bp, vf, qid,
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
473
int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid,
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
478
DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
480
rc = bnx2x_vf_queue_create(bp, vf, qid, qctor);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
489
BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
494
int qid)
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
498
DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
501
if ((qid == LEADING_IDX) &&
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
503
rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
507
rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
511
rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
518
if (bnx2x_vfq(vf, qid, sp_obj).state != BNX2X_Q_STATE_RESET) {
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
522
qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
533
BNX2X_ERR("vf[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
586
static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid,
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
591
struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
612
int qid, unsigned long accept_flags)
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
618
bnx2x_vf_prep_rx_mode(bp, qid, &ramrod, vf, accept_flags);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
620
vfq_get(vf, qid)->accept_flags = ramrod.rx_accept_flags;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
624
int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid)
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
628
DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
631
if (qid == LEADING_IDX) {
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
632
rc = bnx2x_vf_rxmode(bp, vf, qid, 0);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
638
rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
643
rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
648
rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
660
rc = bnx2x_vf_queue_destroy(bp, vf, qid);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
666
vf->abs_vfid, qid, rc);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
453
int qid, bool drv_only);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
455
int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid,
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
458
int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
464
int qid, unsigned long accept_flags);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
1948
int qid = mbx->msg->req.q_op.vf_qid;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
1952
vf->abs_vfid, qid);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
1954
rc = bnx2x_vf_queue_teardown(bp, vf, qid);
drivers/net/ethernet/brocade/bna/bfi.h
27
u8 qid;
drivers/net/ethernet/brocade/bna/bfi.h
36
#define bfi_mhdr_2_qid(_mh) ((_mh)->mtag.h2i.qid)
drivers/net/ethernet/cavium/thunder/nicvf_main.c
1254
int qid = skb_get_queue_mapping(skb);
drivers/net/ethernet/cavium/thunder/nicvf_main.c
1255
struct netdev_queue *txq = netdev_get_tx_queue(netdev, qid);
drivers/net/ethernet/cavium/thunder/nicvf_main.c
1271
qid += nic->xdp_tx_queues;
drivers/net/ethernet/cavium/thunder/nicvf_main.c
1275
if (qid >= MAX_SND_QUEUES_PER_QS) {
drivers/net/ethernet/cavium/thunder/nicvf_main.c
1276
tmp = qid / MAX_SND_QUEUES_PER_QS;
drivers/net/ethernet/cavium/thunder/nicvf_main.c
1285
qid = qid % MAX_SND_QUEUES_PER_QS;
drivers/net/ethernet/cavium/thunder/nicvf_main.c
1288
sq = &snic->qs->sq[qid];
drivers/net/ethernet/cavium/thunder/nicvf_main.c
1290
!nicvf_sq_append_skb(snic, sq, skb, qid)) {
drivers/net/ethernet/cavium/thunder/nicvf_main.c
1302
"Transmit ring full, stopping SQ%d\n", qid);
drivers/net/ethernet/chelsio/cxgb/sge.c
1698
unsigned int qid, struct net_device *dev)
drivers/net/ethernet/chelsio/cxgb/sge.c
1701
struct cmdQ *q = &sge->cmdQ[qid];
drivers/net/ethernet/chelsio/cxgb/sge.c
1735
if (sge->tx_sched && !qid && skb->dev) {
drivers/net/ethernet/chelsio/cxgb/sge.c
1770
if (qid)
drivers/net/ethernet/chelsio/cxgb3/sge.c
1420
unsigned int qid)
drivers/net/ethernet/chelsio/cxgb3/sge.c
1427
struct sge_qset *qs = txq_to_qset(q, qid);
drivers/net/ethernet/chelsio/cxgb3/sge.c
1429
set_bit(qid, &qs->txq_stopped);
drivers/net/ethernet/chelsio/cxgb3/sge.c
1433
test_and_clear_bit(qid, &qs->txq_stopped))
drivers/net/ethernet/chelsio/cxgb3/t3_cpl.h
1167
__u8 qid:4;
drivers/net/ethernet/chelsio/cxgb3/t3_cpl.h
1171
__u8 qid:4;
drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
336
u32 qid;
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
1045
u32 cudbg_cim_obq_size(struct adapter *padap, int qid)
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
1050
QUENUMSELECT_V(qid));
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
1058
struct cudbg_error *cudbg_err, int qid)
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
1066
qsize = cudbg_cim_obq_size(padap, qid);
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
1072
no_of_read_words = t4_read_cim_obq(padap, qid,
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
974
struct cudbg_error *cudbg_err, int qid)
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
988
no_of_read_words = t4_read_cim_ibq(padap, qid,
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h
173
u32 cudbg_cim_obq_size(struct adapter *padap, int qid);
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h
246
entry->qid = txq->cntxt_id;
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h
258
entry->qid = rxq->cntxt_id;
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h
270
entry->qid = flq->cntxt_id;
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
1874
unsigned int qid,
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
1916
int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data,
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
1918
int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data,
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
1959
void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
396
unsigned int qid = (uintptr_t)inode->i_private & 7;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
397
struct adapter *adap = inode->i_private - qid;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
403
ret = t4_read_cim_ibq(adap, qid, (u32 *)p->data, CIM_IBQ_SIZE * 4);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
423
unsigned int qid = (uintptr_t)inode->i_private & 7;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
424
struct adapter *adap = inode->i_private - qid;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
430
ret = t4_read_cim_obq(adap, qid, (u32 *)p->data, 6 * CIM_OBQ_SIZE * 4);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
2202
static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
2204
u32 addr = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A) + 24 * qid + 8;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
2220
int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
2227
ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
2246
QID_V(qid) | val);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
2343
unsigned int qid,
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
2350
qid,
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
2581
u16 qid = (dropped_db >> 15) & 0x1ffff;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
2587
ret = t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS,
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
2591
"qid=%d, pidx_inc=%d\n", qid, pidx_inc);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
578
unsigned int qid = EGR_QID_G(ntohl(p->opcode_qid));
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
581
txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
907
static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
909
qid -= p->ingr_start;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
910
return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
428
u32 qoffset, qcount, tot_qcount, qid, hwqid;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
451
qid = qoffset + j;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
453
eosw_txq = &tc_port_mqprio->eosw_txq[qid];
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
249
void cxgb4_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid,
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
523
int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx, u16 size);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
530
unsigned int qid,
drivers/net/ethernet/chelsio/cxgb4/sched.c
222
unsigned int qid;
drivers/net/ethernet/chelsio/cxgb4/sched.c
233
qid = txq->q.cntxt_id;
drivers/net/ethernet/chelsio/cxgb4/sched.c
241
qe->cntxt_id = qid;
drivers/net/ethernet/chelsio/cxgb4/sge.c
2426
u32 qid;
drivers/net/ethernet/chelsio/cxgb4/sge.c
2434
qid = skb_get_queue_mapping(skb) - pi->nqsets;
drivers/net/ethernet/chelsio/cxgb4/sge.c
2435
eosw_txq = &tc_port_mqprio->eosw_txq[qid];
drivers/net/ethernet/chelsio/cxgb4/sge.c
2465
u16 qid = skb_get_queue_mapping(skb);
drivers/net/ethernet/chelsio/cxgb4/sge.c
2470
if (unlikely(qid >= pi->nqsets))
drivers/net/ethernet/chelsio/cxgb4/sge.c
4161
unsigned int qid = ntohl(rc->pldbuflen_qid);
drivers/net/ethernet/chelsio/cxgb4/sge.c
4163
qid -= adap->sge.ingr_start;
drivers/net/ethernet/chelsio/cxgb4/sge.c
4164
napi_schedule(&adap->sge.ingr_map[qid]->napi);
drivers/net/ethernet/chelsio/cxgb4/sge.c
4342
unsigned int qid,
drivers/net/ethernet/chelsio/cxgb4/sge.c
4349
ret = t4_bar2_sge_qregs(adapter, qid, qtype, 0,
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
6489
void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
6495
FW_FILTER_WR_NOREPLY_V(qid < 0));
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
6497
if (qid >= 0)
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
6499
cpu_to_be16(FW_FILTER_WR_RX_RPL_IQ_V(qid));
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
9254
unsigned int qid,
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
9285
bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
9286
bar2_qid = qid & qpp_mask;
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
9779
int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
9785
if (qid > 5 || (n & 3))
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
9788
addr = qid * nwords;
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
9821
int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
9828
if ((qid > (cim_num_obq - 1)) || (n & 3))
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
9832
QUENUMSELECT_V(qid));
drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
150
__be32 qid;
drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
236
__be16 qid;
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
533
unsigned int qid = EGR_QID_G(be32_to_cpu(p->opcode_qid));
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
546
eq_idx = EQ_IDX(s, qid);
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
549
"Egress Update QID %d out of range\n", qid);
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
555
"Egress Update QID %d TXQ=NULL\n", qid);
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
559
if (unlikely(tq->abs_id != qid)) {
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
562
qid, tq->abs_id);
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
1944
unsigned int qid, iq_idx;
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
1976
qid = RSPD_QID_G(be32_to_cpu(rc->pldbuflen_qid));
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
1977
iq_idx = IQ_IDX(s, qid);
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
1980
"Ingress QID %d out of range\n", qid);
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
1986
"Ingress QID %d RSPQ=NULL\n", qid);
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
1989
if (unlikely(rspq->abs_id != qid)) {
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
1992
qid, rspq->abs_id);
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
2176
unsigned int qid,
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
2183
ret = t4vf_bar2_sge_qregs(adapter, qid, qtype,
drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
374
unsigned int qid,
drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
767
unsigned int qid,
drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
798
bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
799
bar2_qid = qid & qpp_mask;
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
575
int qid = q->q.cntxt_id;
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
608
wr->req.ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(pi->port_id, qid);
drivers/net/ethernet/emulex/benet/be.h
964
void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
drivers/net/ethernet/emulex/benet/be_main.c
206
static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
drivers/net/ethernet/emulex/benet/be_main.c
213
val |= qid & DB_RQ_RING_ID_MASK;
drivers/net/ethernet/emulex/benet/be_main.c
235
static void be_eq_notify(struct be_adapter *adapter, u16 qid,
drivers/net/ethernet/emulex/benet/be_main.c
241
val |= qid & DB_EQ_RING_ID_MASK;
drivers/net/ethernet/emulex/benet/be_main.c
242
val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
drivers/net/ethernet/emulex/benet/be_main.c
257
void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
drivers/net/ethernet/emulex/benet/be_main.c
261
val |= qid & DB_CQ_RING_ID_MASK;
drivers/net/ethernet/emulex/benet/be_main.c
262
val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
3650
struct dpni_queue_id qid = {0};
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
3669
&queue, &qid);
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
3673
fq->tx_fqid[j] = qid.fqid;
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
3900
struct dpni_queue_id qid;
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
3904
DPNI_QUEUE_RX, fq->tc, fq->flowid, &queue, &qid);
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
3910
fq->fqid = qid.fqid;
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
3953
struct dpni_queue_id qid;
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
3959
&queue, &qid);
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
3964
fq->tx_fqid[i] = qid.fqid;
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
3968
fq->tx_qdbin = qid.qdbin;
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
3972
&queue, &qid);
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
3978
fq->fqid = qid.fqid;
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
4001
struct dpni_queue_id qid;
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
4006
DPNI_QUEUE_RX_ERR, 0, 0, &q, &qid);
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
4012
fq->fqid = qid.fqid;
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
846
int dpaa2_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags);
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
847
int dpaa2_xsk_setup_pool(struct net_device *dev, struct xsk_buff_pool *pool, u16 qid);
drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
173
static int dpaa2_xsk_disable_pool(struct net_device *dev, u16 qid)
drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
175
struct xsk_buff_pool *pool = xsk_get_pool_from_qid(dev, qid);
drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
182
ch = priv->channel[qid];
drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
222
u16 qid)
drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
251
ch = priv->channel[qid];
drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
286
err2 = dpaa2_xsk_disable_pool(dev, qid);
drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
290
err2 = xdp_rxq_info_reg_mem_model(&priv->channel[qid]->xdp_rxq,
drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
303
int dpaa2_xsk_setup_pool(struct net_device *dev, struct xsk_buff_pool *pool, u16 qid)
drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
305
return pool ? dpaa2_xsk_enable_pool(dev, pool, qid) :
drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
306
dpaa2_xsk_disable_pool(dev, qid);
drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
309
int dpaa2_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
312
struct dpaa2_eth_channel *ch = priv->channel[qid];
drivers/net/ethernet/freescale/dpaa2/dpni.c
1574
struct dpni_queue_id *qid)
drivers/net/ethernet/freescale/dpaa2/dpni.c
1607
qid->fqid = le32_to_cpu(rsp_params->fqid);
drivers/net/ethernet/freescale/dpaa2/dpni.c
1608
qid->qdbin = le16_to_cpu(rsp_params->qdbin);
drivers/net/ethernet/freescale/dpaa2/dpni.h
850
struct dpni_queue_id *qid);
drivers/net/ethernet/freescale/fec.h
503
int qid;
drivers/net/ethernet/freescale/fec_main.c
1530
estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
drivers/net/ethernet/freescale/fec_main.c
1884
skb_record_rx_queue(skb, rxq->bd.qid);
drivers/net/ethernet/freescale/fec_main.c
2250
estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
drivers/net/ethernet/freescale/fec_main.c
4005
__func__, rxq->bd.qid, err);
drivers/net/ethernet/freescale/fec_main.c
4734
estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
drivers/net/ethernet/freescale/fec_main.c
4972
rxq->bd.qid = i;
drivers/net/ethernet/freescale/fec_main.c
4988
txq->bd.qid = i;
drivers/net/ethernet/freescale/fec_main.c
548
estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
drivers/net/ethernet/freescale/fec_main.c
682
estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
drivers/net/ethernet/freescale/fec_main.c
760
estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
drivers/net/ethernet/freescale/fec_main.c
823
estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
drivers/net/ethernet/fungible/funcore/fun_queue.c
405
struct fun_queue *fun_alloc_queue(struct fun_dev *fdev, int qid,
drivers/net/ethernet/fungible/funcore/fun_queue.c
416
funq->qid = qid;
drivers/net/ethernet/fungible/funcore/fun_queue.c
420
funq->cqid = 2 * qid;
drivers/net/ethernet/fungible/funcore/fun_queue.c
421
if (funq->qid) {
drivers/net/ethernet/fungible/funcore/fun_queue.c
431
funq->cqid = qid;
drivers/net/ethernet/fungible/funcore/fun_queue.c
432
funq->sqid = qid;
drivers/net/ethernet/fungible/funcore/fun_queue.c
518
funq->qid ? "%s-q[%d]" : "%s-adminq", devname, funq->qid);
drivers/net/ethernet/fungible/funcore/fun_queue.h
154
struct fun_queue *fun_alloc_queue(struct fun_dev *fdev, int qid,
drivers/net/ethernet/fungible/funcore/fun_queue.h
85
u16 qid;
drivers/net/ethernet/fungible/funeth/funeth_tx.c
346
unsigned int qid = skb_get_queue_mapping(skb);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
347
struct funeth_txq *q = fp->txqs[qid];
drivers/net/ethernet/google/gve/gve_main.c
1172
static void gve_unreg_xsk_pool(struct gve_priv *priv, u16 qid)
drivers/net/ethernet/google/gve/gve_main.c
1179
rx = &priv->rx[qid];
drivers/net/ethernet/google/gve/gve_main.c
1186
priv->tx[gve_xdp_tx_queue_id(priv, qid)].xsk_pool = NULL;
drivers/net/ethernet/google/gve/gve_main.c
1190
struct xsk_buff_pool *pool, u16 qid)
drivers/net/ethernet/google/gve/gve_main.c
1196
rx = &priv->rx[qid];
drivers/net/ethernet/google/gve/gve_main.c
1200
gve_unreg_xsk_pool(priv, qid);
drivers/net/ethernet/google/gve/gve_main.c
1206
tx_qid = gve_xdp_tx_queue_id(priv, qid);
drivers/net/ethernet/google/gve/gve_main.c
1229
static struct xsk_buff_pool *gve_get_xsk_pool(struct gve_priv *priv, int qid)
drivers/net/ethernet/google/gve/gve_main.c
1231
if (!test_bit(qid, priv->xsk_pools))
drivers/net/ethernet/google/gve/gve_main.c
1234
return xsk_get_pool_from_qid(priv->dev, qid);
drivers/net/ethernet/google/gve/gve_main.c
1590
u16 qid)
drivers/net/ethernet/google/gve/gve_main.c
1595
if (qid >= priv->rx_cfg.num_queues) {
drivers/net/ethernet/google/gve/gve_main.c
1596
dev_err(&priv->pdev->dev, "xsk pool invalid qid %d", qid);
drivers/net/ethernet/google/gve/gve_main.c
1610
set_bit(qid, priv->xsk_pools);
drivers/net/ethernet/google/gve/gve_main.c
1616
err = gve_reg_xsk_pool(priv, dev, pool, qid);
drivers/net/ethernet/google/gve/gve_main.c
1629
gve_unreg_xsk_pool(priv, qid);
drivers/net/ethernet/google/gve/gve_main.c
1631
clear_bit(qid, priv->xsk_pools);
drivers/net/ethernet/google/gve/gve_main.c
1639
u16 qid)
drivers/net/ethernet/google/gve/gve_main.c
1648
if (qid >= priv->rx_cfg.num_queues)
drivers/net/ethernet/google/gve/gve_main.c
1651
clear_bit(qid, priv->xsk_pools);
drivers/net/ethernet/google/gve/gve_main.c
1653
pool = xsk_get_pool_from_qid(dev, qid);
drivers/net/ethernet/google/gve/gve_main.c
1669
napi_rx = &priv->ntfy_blocks[priv->rx[qid].ntfy_id].napi;
drivers/net/ethernet/google/gve/gve_main.c
1672
tx_qid = gve_xdp_tx_queue_id(priv, qid);
drivers/net/ethernet/google/gve/gve_main.c
1676
gve_unreg_xsk_pool(priv, qid);
drivers/net/ethernet/google/gve/gve_main.c
1682
if (gve_rx_work_pending(&priv->rx[qid]))
drivers/net/ethernet/google/gve/gve_tx.c
832
int i, err = 0, qid;
drivers/net/ethernet/google/gve/gve_tx.c
840
qid = gve_xdp_tx_queue_id(priv,
drivers/net/ethernet/google/gve/gve_tx.c
843
tx = &priv->tx[qid];
drivers/net/ethernet/google/gve/gve_tx_dqo.c
1575
int i, err = 0, qid;
drivers/net/ethernet/google/gve/gve_tx_dqo.c
1580
qid = gve_xdp_tx_queue_id(priv,
drivers/net/ethernet/google/gve/gve_tx_dqo.c
1583
tx = &priv->tx[qid];
drivers/net/ethernet/hisilicon/hns/hnae.h
192
__u8 qid;
drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
165
static void hns_ppe_set_qid(struct ppe_common_cb *ppe_common, u32 qid)
drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
172
PPE_CFG_QID_MODE_DEF_QID_S, qid);
drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c
264
u16 qid, u32 j)
drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c
271
rss_msb_val = (qid >> HCLGE_COMM_RSS_CFG_TBL_BW_L & 0x1) <<
drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c
283
u16 qid;
drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c
301
qid = indir[i * HCLGE_COMM_RSS_CFG_TBL_SIZE + j];
drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c
302
req->rss_qid_l[j] = qid & 0xff;
drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c
303
hclge_comm_append_rss_msb_info(req, qid, j);
drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
646
if (data->qid >= h->kinfo.num_tqps) {
drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
647
dev_err(&h->pdev->dev, "queue%u is not in use\n", data->qid);
drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
651
seq_printf(s, "Queue %u rx bd info:\n", data->qid);
drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
656
ring = &priv->ring[data->qid + data->handle->kinfo.num_tqps];
drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
691
if (data->qid >= h->kinfo.num_tqps) {
drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
692
dev_err(&h->pdev->dev, "queue%u is not in use\n", data->qid);
drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
696
seq_printf(s, "Queue %u tx bd info:\n", data->qid);
drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
701
ring = &priv->ring[data->qid];
drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
856
data[i].qid = i;
drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h
20
u16 qid;
drivers/net/ethernet/huawei/hinic/hinic_debugfs.c
29
return nic_dev->hwdev->func_to_io.global_qpn + sq->qid;
drivers/net/ethernet/huawei/hinic/hinic_debugfs.c
59
return nic_dev->hwdev->func_to_io.global_qpn + rq->qid;
drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
1344
u16 i = 0, j = 0, qid = 0;
drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
1347
for (qid = 0; qid < nic_dev->num_qps; qid++) {
drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
1351
hinic_txq_get_stats(&nic_dev->txqs[qid], &txq_stats);
drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
1360
for (qid = 0; qid < nic_dev->num_qps; qid++) {
drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
1364
hinic_rxq_get_stats(&nic_dev->rxqs[qid], &rxq_stats);
drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
308
qp->sq.qid = q_id;
drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
318
qp->rq.qid = q_id;
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
103
u16 qid;
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
84
u16 qid;
drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
661
u16 qid;
drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
664
for (qid = 0; qid < nic_dev->q_params.num_qps; qid++) {
drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
665
err = hinic3_stop_sq(&nic_dev->txqs[qid]);
drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
666
netdev_tx_reset_subqueue(netdev, qid);
drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
668
netdev_err(netdev, "Failed to stop sq%u\n", qid);
drivers/net/ethernet/intel/i40e/i40e_main.c
3424
int qid = ring->queue_index;
drivers/net/ethernet/intel/i40e/i40e_main.c
3427
qid -= ring->vsi->alloc_queue_pairs;
drivers/net/ethernet/intel/i40e/i40e_main.c
3429
if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps))
drivers/net/ethernet/intel/i40e/i40e_main.c
3432
return xsk_get_pool_from_qid(ring->vsi->netdev, qid);
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
277
u16 qid)
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
282
return (vsi && (qid < vsi->alloc_queue_pairs));
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
925
u16 qid = i40e_vc_get_pf_queue_id(vf,
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
928
reg = qid;
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
929
qid = i40e_vc_get_pf_queue_id(vf, vsi_id,
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
931
reg |= qid << 16;
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
953
u16 vsi_id, qid;
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
969
qid = i40e_vc_get_pf_queue_id(vf, vsi_id, j);
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
971
reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
drivers/net/ethernet/intel/i40e/i40e_xsk.c
104
set_bit(qid, vsi->af_xdp_zc_qps);
drivers/net/ethernet/intel/i40e/i40e_xsk.c
109
err = i40e_queue_pair_disable(vsi, qid);
drivers/net/ethernet/intel/i40e/i40e_xsk.c
113
err = i40e_realloc_rx_xdp_bi(vsi->rx_rings[qid], true);
drivers/net/ethernet/intel/i40e/i40e_xsk.c
117
err = i40e_queue_pair_enable(vsi, qid);
drivers/net/ethernet/intel/i40e/i40e_xsk.c
122
err = i40e_xsk_wakeup(vsi->netdev, qid, XDP_WAKEUP_RX);
drivers/net/ethernet/intel/i40e/i40e_xsk.c
138
static int i40e_xsk_pool_disable(struct i40e_vsi *vsi, u16 qid)
drivers/net/ethernet/intel/i40e/i40e_xsk.c
145
pool = xsk_get_pool_from_qid(netdev, qid);
drivers/net/ethernet/intel/i40e/i40e_xsk.c
152
err = i40e_queue_pair_disable(vsi, qid);
drivers/net/ethernet/intel/i40e/i40e_xsk.c
157
clear_bit(qid, vsi->af_xdp_zc_qps);
drivers/net/ethernet/intel/i40e/i40e_xsk.c
161
err = i40e_realloc_rx_xdp_bi(vsi->rx_rings[qid], false);
drivers/net/ethernet/intel/i40e/i40e_xsk.c
164
err = i40e_queue_pair_enable(vsi, qid);
drivers/net/ethernet/intel/i40e/i40e_xsk.c
184
u16 qid)
drivers/net/ethernet/intel/i40e/i40e_xsk.c
186
return pool ? i40e_xsk_pool_enable(vsi, pool, qid) :
drivers/net/ethernet/intel/i40e/i40e_xsk.c
187
i40e_xsk_pool_disable(vsi, qid);
drivers/net/ethernet/intel/i40e/i40e_xsk.c
84
u16 qid)
drivers/net/ethernet/intel/i40e/i40e_xsk.c
93
if (qid >= vsi->num_queue_pairs)
drivers/net/ethernet/intel/i40e/i40e_xsk.c
96
if (qid >= netdev->real_num_rx_queues ||
drivers/net/ethernet/intel/i40e/i40e_xsk.c
97
qid >= netdev->real_num_tx_queues)
drivers/net/ethernet/intel/i40e/i40e_xsk.h
25
u16 qid);
drivers/net/ethernet/intel/ice/ice.h
793
u16 qid)
drivers/net/ethernet/intel/ice/ice.h
795
struct xsk_buff_pool *pool = xsk_get_pool_from_qid(vsi->netdev, qid);
drivers/net/ethernet/intel/ice/ice.h
812
u16 qid = ring->q_index;
drivers/net/ethernet/intel/ice/ice.h
814
WRITE_ONCE(ring->xsk_pool, ice_get_xp_from_qid(vsi, qid));
drivers/net/ethernet/intel/ice/ice.h
831
static inline void ice_tx_xsk_pool(struct ice_vsi *vsi, u16 qid)
drivers/net/ethernet/intel/ice/ice.h
835
ring = vsi->rx_rings[qid]->xdp_ring;
drivers/net/ethernet/intel/ice/ice.h
839
WRITE_ONCE(ring->xsk_pool, ice_get_xp_from_qid(vsi, qid));
drivers/net/ethernet/intel/ice/ice_lag.c
521
u16 qid, count = 0;
drivers/net/ethernet/intel/ice/ice_lag.c
542
qid = pf->vsi[vsi_num]->txq_map[q_ctx->q_handle];
drivers/net/ethernet/intel/ice/ice_lag.c
543
qbuf->queue_info[count].q_handle = cpu_to_le16(qid);
drivers/net/ethernet/intel/ice/ice_main.c
2671
static struct ice_tx_ring *ice_xdp_ring_from_qid(struct ice_vsi *vsi, int qid)
drivers/net/ethernet/intel/ice/ice_main.c
2677
return vsi->xdp_rings[qid % vsi->num_xdp_txq];
drivers/net/ethernet/intel/ice/ice_main.c
2679
q_vector = vsi->rx_rings[qid]->q_vector;
drivers/net/ethernet/intel/ice/ice_xsk.c
124
static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
drivers/net/ethernet/intel/ice/ice_xsk.c
126
struct xsk_buff_pool *pool = xsk_get_pool_from_qid(vsi->netdev, qid);
drivers/net/ethernet/intel/ice/ice_xsk.c
145
ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
drivers/net/ethernet/intel/ice/ice_xsk.c
152
if (qid >= vsi->netdev->real_num_rx_queues ||
drivers/net/ethernet/intel/ice/ice_xsk.c
153
qid >= vsi->netdev->real_num_tx_queues)
drivers/net/ethernet/intel/ice/ice_xsk.c
197
int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
drivers/net/ethernet/intel/ice/ice_xsk.c
199
struct ice_rx_ring *rx_ring = vsi->rx_rings[qid];
drivers/net/ethernet/intel/ice/ice_xsk.c
203
if (qid >= vsi->num_rxq || qid >= vsi->num_txq) {
drivers/net/ethernet/intel/ice/ice_xsk.c
213
ret = ice_qp_dis(vsi, qid);
drivers/net/ethernet/intel/ice/ice_xsk.c
224
pool_failure = pool_present ? ice_xsk_pool_enable(vsi, pool, qid) :
drivers/net/ethernet/intel/ice/ice_xsk.c
225
ice_xsk_pool_disable(vsi, qid);
drivers/net/ethernet/intel/ice/ice_xsk.c
229
ret = ice_qp_ena(vsi, qid);
drivers/net/ethernet/intel/ice/ice_xsk.c
231
napi_schedule(&vsi->rx_rings[qid]->xdp_ring->q_vector->napi);
drivers/net/ethernet/intel/ice/ice_xsk.c
78
ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector, u16 qid)
drivers/net/ethernet/intel/ice/ice_xsk.c
83
int q, _qid = qid;
drivers/net/ethernet/intel/ice/ice_xsk.c
92
_qid = qid;
drivers/net/ethernet/intel/ice/ice_xsk.h
14
u16 qid);
drivers/net/ethernet/intel/ice/ice_xsk.h
27
u16 qid);
drivers/net/ethernet/intel/ice/ice_xsk.h
43
u16 __always_unused qid)
drivers/net/ethernet/intel/ice/ice_xsk.h
88
u16 qid) { }
drivers/net/ethernet/intel/ice/virt/queues.c
40
static bool ice_vc_isvalid_q_id(struct ice_vsi *vsi, u16 qid)
drivers/net/ethernet/intel/ice/virt/queues.c
43
return qid < vsi->alloc_txq;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1188
const struct idpf_queue_set *qs, u32 qid)
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1194
q_vector = idpf_find_rxq_vec(vport, qid);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1199
qid, ERR_PTR(err));
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1230
qid, ERR_PTR(err));
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1237
qid, ERR_PTR(err));
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1244
netif_start_subqueue(vport->netdev, qid);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1250
const struct idpf_queue_set *qs, u32 qid)
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1255
q_vector = idpf_find_rxq_vec(vport, qid);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1256
netif_stop_subqueue(vport->netdev, qid);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1264
qid, ERR_PTR(err));
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1284
int idpf_qp_switch(struct idpf_vport *vport, u32 qid, bool en)
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1286
struct idpf_q_vector *q_vector = idpf_find_rxq_vec(vport, qid);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1289
if (idpf_find_txq_vec(vport, qid) != q_vector)
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1296
return en ? idpf_qp_enable(vport, qs, qid) :
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1297
idpf_qp_disable(vport, qs, qid);
drivers/net/ethernet/intel/idpf/idpf_txrx.h
1113
int idpf_qp_switch(struct idpf_vport *vport, u32 qid, bool en);
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
2172
u32 qid;
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
2179
qid = q->rxq->q_id;
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
2182
qid = q->txq->q_id;
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
2185
qid = q->bufq->q_id;
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
2188
qid = q->complq->q_id;
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
2194
qc[i].start_queue_id = cpu_to_le32(qid);
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
2342
u32 qid, v_idx, itr_idx;
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
2348
qid = q->rxq->q_id;
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
2364
qid = q->txq->q_id;
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
2387
vqv[i].queue_id = cpu_to_le32(qid);
drivers/net/ethernet/intel/idpf/xsk.c
31
u32 qid = U32_MAX;
drivers/net/ethernet/intel/idpf/xsk.c
38
qid = grp->splitq.rxq_sets[0]->rxq.idx;
drivers/net/ethernet/intel/idpf/xsk.c
45
pool = xsk_get_pool_from_qid(vport->netdev, qid);
drivers/net/ethernet/intel/idpf/xsk.c
557
u32 qid = bpf->xsk.queue_id;
drivers/net/ethernet/intel/idpf/xsk.c
566
LIBETH_RX_BUF_STRIDE, qid,
drivers/net/ethernet/intel/idpf/xsk.c
575
ret = idpf_qp_switch(vport, qid, false);
drivers/net/ethernet/intel/idpf/xsk.c
579
netdev_name(vport->netdev), qid,
drivers/net/ethernet/intel/idpf/xsk.c
58
u32 qid;
drivers/net/ethernet/intel/idpf/xsk.c
585
ret = libeth_xsk_setup_pool(vport->netdev, qid, pool);
drivers/net/ethernet/intel/idpf/xsk.c
589
netdev_name(vport->netdev), qid,
drivers/net/ethernet/intel/idpf/xsk.c
597
ret = idpf_qp_switch(vport, qid, true);
drivers/net/ethernet/intel/idpf/xsk.c
601
netdev_name(vport->netdev), qid,
drivers/net/ethernet/intel/idpf/xsk.c
609
libeth_xsk_setup_pool(vport->netdev, qid, false);
drivers/net/ethernet/intel/idpf/xsk.c
614
int idpf_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
drivers/net/ethernet/intel/idpf/xsk.c
629
q_vector = idpf_find_rxq_vec(vport, qid);
drivers/net/ethernet/intel/idpf/xsk.c
633
libeth_xsk_wakeup(&q_vector->csd, qid);
drivers/net/ethernet/intel/idpf/xsk.c
65
qid = txq->idx - vport->dflt_qv_rsrc.xdp_txq_offset;
drivers/net/ethernet/intel/idpf/xsk.c
67
pool = xsk_get_pool_from_qid(vport->netdev, qid);
drivers/net/ethernet/intel/idpf/xsk.c
83
u32 qid;
drivers/net/ethernet/intel/idpf/xsk.c
90
qid = complq->txq_grp->txqs[0]->idx -
drivers/net/ethernet/intel/idpf/xsk.c
93
pool = xsk_get_pool_from_qid(vport->netdev, qid);
drivers/net/ethernet/intel/idpf/xsk.h
31
int idpf_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags);
drivers/net/ethernet/intel/igb/igb.h
863
u16 qid);
drivers/net/ethernet/intel/igb/igb.h
870
int igb_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags);
drivers/net/ethernet/intel/igb/igb_xsk.c
106
if (qid >= adapter->num_rx_queues)
drivers/net/ethernet/intel/igb/igb_xsk.c
109
if (qid >= netdev->real_num_rx_queues ||
drivers/net/ethernet/intel/igb/igb_xsk.c
110
qid >= netdev->real_num_tx_queues)
drivers/net/ethernet/intel/igb/igb_xsk.c
117
rx_ring = adapter->rx_ring[qid];
drivers/net/ethernet/intel/igb/igb_xsk.c
120
igb_txrx_ring_disable(adapter, qid);
drivers/net/ethernet/intel/igb/igb_xsk.c
125
igb_txrx_ring_enable(adapter, qid);
drivers/net/ethernet/intel/igb/igb_xsk.c
127
err = igb_xsk_wakeup(adapter->netdev, qid, XDP_WAKEUP_RX);
drivers/net/ethernet/intel/igb/igb_xsk.c
139
static int igb_xsk_pool_disable(struct igb_adapter *adapter, u16 qid)
drivers/net/ethernet/intel/igb/igb_xsk.c
146
pool = xsk_get_pool_from_qid(adapter->netdev, qid);
drivers/net/ethernet/intel/igb/igb_xsk.c
150
rx_ring = adapter->rx_ring[qid];
drivers/net/ethernet/intel/igb/igb_xsk.c
153
igb_txrx_ring_disable(adapter, qid);
drivers/net/ethernet/intel/igb/igb_xsk.c
162
igb_txrx_ring_enable(adapter, qid);
drivers/net/ethernet/intel/igb/igb_xsk.c
170
u16 qid)
drivers/net/ethernet/intel/igb/igb_xsk.c
172
return pool ? igb_xsk_pool_enable(adapter, pool, qid) :
drivers/net/ethernet/intel/igb/igb_xsk.c
173
igb_xsk_pool_disable(adapter, qid);
drivers/net/ethernet/intel/igb/igb_xsk.c
34
static void igb_txrx_ring_disable(struct igb_adapter *adapter, u16 qid)
drivers/net/ethernet/intel/igb/igb_xsk.c
36
struct igb_ring *tx_ring = adapter->tx_ring[qid];
drivers/net/ethernet/intel/igb/igb_xsk.c
37
struct igb_ring *rx_ring = adapter->rx_ring[qid];
drivers/net/ethernet/intel/igb/igb_xsk.c
537
int igb_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
drivers/net/ethernet/intel/igb/igb_xsk.c
550
if (qid >= adapter->num_tx_queues)
drivers/net/ethernet/intel/igb/igb_xsk.c
553
ring = adapter->tx_ring[qid];
drivers/net/ethernet/intel/igb/igb_xsk.c
57
static void igb_txrx_ring_enable(struct igb_adapter *adapter, u16 qid)
drivers/net/ethernet/intel/igb/igb_xsk.c
571
ring = adapter->rx_ring[qid];
drivers/net/ethernet/intel/igb/igb_xsk.c
59
struct igb_ring *tx_ring = adapter->tx_ring[qid];
drivers/net/ethernet/intel/igb/igb_xsk.c
60
struct igb_ring *rx_ring = adapter->rx_ring[qid];
drivers/net/ethernet/intel/igb/igb_xsk.c
86
int qid = ring->queue_index;
drivers/net/ethernet/intel/igb/igb_xsk.c
89
pool = xsk_get_pool_from_qid(adapter->netdev, qid);
drivers/net/ethernet/intel/igb/igb_xsk.c
99
u16 qid)
drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
37
u16 qid);
drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
15
int qid = ring->ring_idx;
drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
17
if (!xdp_on || !test_bit(qid, adapter->af_xdp_zc_qps))
drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
20
return xsk_get_pool_from_qid(adapter->netdev, qid);
drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
25
u16 qid)
drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
31
if (qid >= adapter->num_rx_queues)
drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
34
if (qid >= netdev->real_num_rx_queues ||
drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
35
qid >= netdev->real_num_tx_queues)
drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
46
ixgbe_txrx_ring_disable(adapter, qid);
drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
48
set_bit(qid, adapter->af_xdp_zc_qps);
drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
51
ixgbe_txrx_ring_enable(adapter, qid);
drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
511
int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
522
if (qid >= adapter->num_xdp_queues)
drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
525
ring = adapter->xdp_ring[qid];
drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
54
err = ixgbe_xsk_wakeup(adapter->netdev, qid, XDP_WAKEUP_RX);
drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
56
clear_bit(qid, adapter->af_xdp_zc_qps);
drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
65
static int ixgbe_xsk_pool_disable(struct ixgbe_adapter *adapter, u16 qid)
drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
70
pool = xsk_get_pool_from_qid(adapter->netdev, qid);
drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
78
ixgbe_txrx_ring_disable(adapter, qid);
drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
80
clear_bit(qid, adapter->af_xdp_zc_qps);
drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
84
ixgbe_txrx_ring_enable(adapter, qid);
drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
91
u16 qid)
drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
93
return pool ? ixgbe_xsk_pool_enable(adapter, pool, qid) :
drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
94
ixgbe_xsk_pool_disable(adapter, qid);
drivers/net/ethernet/intel/libeth/xsk.c
223
void libeth_xsk_wakeup(call_single_data_t *csd, u32 qid)
drivers/net/ethernet/intel/libeth/xsk.c
231
if (unlikely(qid >= nr_cpu_ids))
drivers/net/ethernet/intel/libeth/xsk.c
232
qid %= nr_cpu_ids;
drivers/net/ethernet/intel/libeth/xsk.c
234
if (qid != raw_smp_processor_id() && cpu_online(qid))
drivers/net/ethernet/intel/libeth/xsk.c
235
smp_call_function_single_async(qid, csd);
drivers/net/ethernet/intel/libeth/xsk.c
256
int libeth_xsk_setup_pool(struct net_device *dev, u32 qid, bool enable)
drivers/net/ethernet/intel/libeth/xsk.c
260
pool = xsk_get_pool_from_qid(dev, qid);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.h
14
int otx2_xsk_pool_setup(struct otx2_nic *pf, struct xsk_buff_pool *pool, u16 qid);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.h
15
int otx2_xsk_pool_enable(struct otx2_nic *pf, struct xsk_buff_pool *pool, u16 qid);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.h
16
int otx2_xsk_pool_disable(struct otx2_nic *pf, u16 qid);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1229
int qid, ret, err;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1291
qid = otx2_qos_get_qid(pfvf);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1292
if (qid < 0) {
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1299
pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1303
ceil, quantum, qid, static_cfg);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1344
return pfvf->hw.tx_queues + qid;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1369
u16 qid;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1417
qid = node->qid;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1429
otx2_qos_disable_sq(pfvf, qid);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1432
pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1435
WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1440
qid, static_cfg);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1460
WRITE_ONCE(node->qid, qid);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1507
static void otx2_reset_qdisc(struct net_device *dev, u16 qid)
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1509
struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, qid);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1521
int qid)
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1528
pfvf->qos.qid_to_sqmap[qid] = tmp->schq;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1537
u16 qid, moved_qid;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1550
qid = node->qid;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1555
otx2_qos_disable_sq(pfvf, node->qid);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1558
pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1577
if (moved_qid == 0 || moved_qid == qid)
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1589
node->qid = OTX2_QOS_QID_INNER;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1598
otx2_cfg_smq(pfvf, node, qid);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1600
otx2_qos_enable_sq(pfvf, qid);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1601
__set_bit(qid, pfvf->qos.qos_sq_bmap);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1602
node->qid = qid;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1616
u16 qid;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1629
qid = node->qid;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1641
WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1643
otx2_qos_disable_sq(pfvf, qid);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1645
pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1667
WRITE_ONCE(parent->qid, qid);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1668
__set_bit(qid, pfvf->qos.qos_sq_bmap);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1751
htb->qid = res;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
1767
htb->qid = res;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
181
if (node->qid == OTX2_QOS_QID_INNER && !node->parent) {
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
279
if (node->qid != OTX2_QOS_QID_INNER && node->qid != OTX2_QOS_QID_NONE) {
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
280
__clear_bit(node->qid, pfvf->qos.qos_sq_bmap);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
422
WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
473
WRITE_ONCE(txschq_node->qid, OTX2_QOS_QID_NONE);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
506
u32 quantum, u16 qid, bool static_cfg)
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
518
WRITE_ONCE(node->qid, qid);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
528
__set_bit(qid, pfvf->qos.qos_sq_bmap);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
553
*otx2_sw_node_find_by_qid(struct otx2_nic *pfvf, u16 qid)
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
559
if (node->qid == qid)
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
595
u16 qid;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
603
qid = READ_ONCE(node->qid);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
604
if (qid == OTX2_QOS_QID_INNER) {
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
608
res = pfvf->hw.tx_queues + qid;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
932
u16 qid)
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
934
if (pfvf->qos.qid_to_sqmap[qid] != OTX2_QOS_INVALID_SQ)
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
935
otx2_qos_disable_sq(pfvf, qid);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
937
pfvf->qos.qid_to_sqmap[qid] = node->schq;
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
939
otx2_qos_enable_sq(pfvf, qid);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
948
if (node->qid == OTX2_QOS_QID_INNER)
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
956
otx2_qos_enadis_sq(pfvf, tmp, node->qid);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
969
if (tmp->qid == OTX2_QOS_QID_INNER)
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
975
otx2_qos_enadis_sq(pfvf, tmp, tmp->qid);
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
98
if (node->qid == OTX2_QOS_QID_NONE) {
drivers/net/ethernet/marvell/octeontx2/nic/qos.h
66
u16 qid;
drivers/net/ethernet/marvell/prestera/prestera.h
179
int (*send_req)(struct prestera_device *dev, int qid, void *in_msg,
drivers/net/ethernet/marvell/prestera/prestera_pci.c
242
static u32 prestera_fw_evtq_len(struct prestera_fw *fw, u8 qid)
drivers/net/ethernet/marvell/prestera/prestera_pci.c
244
return fw->evt_queue[qid].len;
drivers/net/ethernet/marvell/prestera/prestera_pci.c
247
static u32 prestera_fw_evtq_avail(struct prestera_fw *fw, u8 qid)
drivers/net/ethernet/marvell/prestera/prestera_pci.c
249
u32 wr_idx = prestera_fw_read(fw, PRESTERA_EVTQ_WR_IDX_REG(qid));
drivers/net/ethernet/marvell/prestera/prestera_pci.c
250
u32 rd_idx = prestera_fw_read(fw, PRESTERA_EVTQ_RD_IDX_REG(qid));
drivers/net/ethernet/marvell/prestera/prestera_pci.c
252
return CIRC_CNT(wr_idx, rd_idx, prestera_fw_evtq_len(fw, qid));
drivers/net/ethernet/marvell/prestera/prestera_pci.c
256
u8 qid, u32 idx)
drivers/net/ethernet/marvell/prestera/prestera_pci.c
258
u32 rd_idx = idx & (prestera_fw_evtq_len(fw, qid) - 1);
drivers/net/ethernet/marvell/prestera/prestera_pci.c
260
prestera_fw_write(fw, PRESTERA_EVTQ_RD_IDX_REG(qid), rd_idx);
drivers/net/ethernet/marvell/prestera/prestera_pci.c
263
static u8 __iomem *prestera_fw_evtq_buf(struct prestera_fw *fw, u8 qid)
drivers/net/ethernet/marvell/prestera/prestera_pci.c
265
return fw->evt_queue[qid].addr;
drivers/net/ethernet/marvell/prestera/prestera_pci.c
268
static u32 prestera_fw_evtq_read32(struct prestera_fw *fw, u8 qid)
drivers/net/ethernet/marvell/prestera/prestera_pci.c
270
u32 rd_idx = prestera_fw_read(fw, PRESTERA_EVTQ_RD_IDX_REG(qid));
drivers/net/ethernet/marvell/prestera/prestera_pci.c
273
val = readl(prestera_fw_evtq_buf(fw, qid) + rd_idx);
drivers/net/ethernet/marvell/prestera/prestera_pci.c
274
prestera_fw_evtq_rd_set(fw, qid, rd_idx + 4);
drivers/net/ethernet/marvell/prestera/prestera_pci.c
279
u8 qid, void *buf, size_t len)
drivers/net/ethernet/marvell/prestera/prestera_pci.c
281
u32 idx = prestera_fw_read(fw, PRESTERA_EVTQ_RD_IDX_REG(qid));
drivers/net/ethernet/marvell/prestera/prestera_pci.c
282
u8 __iomem *evtq_addr = prestera_fw_evtq_buf(fw, qid);
drivers/net/ethernet/marvell/prestera/prestera_pci.c
288
idx = (idx + 4) & (prestera_fw_evtq_len(fw, qid) - 1);
drivers/net/ethernet/marvell/prestera/prestera_pci.c
291
prestera_fw_evtq_rd_set(fw, qid, idx);
drivers/net/ethernet/marvell/prestera/prestera_pci.c
298
int qid;
drivers/net/ethernet/marvell/prestera/prestera_pci.c
300
for (qid = 0; qid < fw->evt_qnum; qid++) {
drivers/net/ethernet/marvell/prestera/prestera_pci.c
301
if (prestera_fw_evtq_avail(fw, qid) >= 4)
drivers/net/ethernet/marvell/prestera/prestera_pci.c
302
return qid;
drivers/net/ethernet/marvell/prestera/prestera_pci.c
321
u8 qid;
drivers/net/ethernet/marvell/prestera/prestera_pci.c
328
while ((qid = prestera_fw_evtq_pick(fw)) < PRESTERA_EVT_QNUM_MAX) {
drivers/net/ethernet/marvell/prestera/prestera_pci.c
332
len = prestera_fw_evtq_read32(fw, qid);
drivers/net/ethernet/marvell/prestera/prestera_pci.c
333
idx = prestera_fw_read(fw, PRESTERA_EVTQ_RD_IDX_REG(qid));
drivers/net/ethernet/marvell/prestera/prestera_pci.c
335
WARN_ON(prestera_fw_evtq_avail(fw, qid) < len);
drivers/net/ethernet/marvell/prestera/prestera_pci.c
338
prestera_fw_evtq_rd_set(fw, qid, idx + len);
drivers/net/ethernet/marvell/prestera/prestera_pci.c
342
prestera_fw_evtq_read_buf(fw, qid, msg, len);
drivers/net/ethernet/marvell/prestera/prestera_pci.c
361
static void prestera_fw_cmdq_lock(struct prestera_fw *fw, u8 qid)
drivers/net/ethernet/marvell/prestera/prestera_pci.c
363
mutex_lock(&fw->cmd_queue[qid].cmd_mtx);
drivers/net/ethernet/marvell/prestera/prestera_pci.c
366
static void prestera_fw_cmdq_unlock(struct prestera_fw *fw, u8 qid)
drivers/net/ethernet/marvell/prestera/prestera_pci.c
368
mutex_unlock(&fw->cmd_queue[qid].cmd_mtx);
drivers/net/ethernet/marvell/prestera/prestera_pci.c
371
static u32 prestera_fw_cmdq_len(struct prestera_fw *fw, u8 qid)
drivers/net/ethernet/marvell/prestera/prestera_pci.c
373
return fw->cmd_queue[qid].len;
drivers/net/ethernet/marvell/prestera/prestera_pci.c
376
static u8 __iomem *prestera_fw_cmdq_buf(struct prestera_fw *fw, u8 qid)
drivers/net/ethernet/marvell/prestera/prestera_pci.c
378
return fw->cmd_queue[qid].addr;
drivers/net/ethernet/marvell/prestera/prestera_pci.c
381
static int prestera_fw_cmd_send(struct prestera_fw *fw, int qid,
drivers/net/ethernet/marvell/prestera/prestera_pci.c
392
if (ALIGN(in_size, 4) > prestera_fw_cmdq_len(fw, qid))
drivers/net/ethernet/marvell/prestera/prestera_pci.c
396
err = prestera_fw_wait_reg32(fw, PRESTERA_CMDQ_RCV_CTL_REG(qid), 0, 30);
drivers/net/ethernet/marvell/prestera/prestera_pci.c
402
prestera_fw_write(fw, PRESTERA_CMDQ_REQ_LEN_REG(qid), in_size);
drivers/net/ethernet/marvell/prestera/prestera_pci.c
404
memcpy_toio(prestera_fw_cmdq_buf(fw, qid), in_msg, in_size);
drivers/net/ethernet/marvell/prestera/prestera_pci.c
406
prestera_fw_write(fw, PRESTERA_CMDQ_REQ_CTL_REG(qid),
drivers/net/ethernet/marvell/prestera/prestera_pci.c
410
err = prestera_fw_wait_reg32(fw, PRESTERA_CMDQ_RCV_CTL_REG(qid),
drivers/net/ethernet/marvell/prestera/prestera_pci.c
417
ret_size = prestera_fw_read(fw, PRESTERA_CMDQ_RCV_LEN_REG(qid));
drivers/net/ethernet/marvell/prestera/prestera_pci.c
426
prestera_fw_cmdq_buf(fw, qid) + in_size, ret_size);
drivers/net/ethernet/marvell/prestera/prestera_pci.c
429
prestera_fw_write(fw, PRESTERA_CMDQ_REQ_CTL_REG(qid),
drivers/net/ethernet/marvell/prestera/prestera_pci.c
434
static int prestera_fw_send_req(struct prestera_device *dev, int qid,
drivers/net/ethernet/marvell/prestera/prestera_pci.c
443
prestera_fw_cmdq_lock(fw, qid);
drivers/net/ethernet/marvell/prestera/prestera_pci.c
444
ret = prestera_fw_cmd_send(fw, qid, in_msg, in_size, out_msg, out_size,
drivers/net/ethernet/marvell/prestera/prestera_pci.c
446
prestera_fw_cmdq_unlock(fw, qid);
drivers/net/ethernet/marvell/prestera/prestera_pci.c
455
u8 qid;
drivers/net/ethernet/marvell/prestera/prestera_pci.c
478
for (qid = 0; qid < fw->cmd_qnum; qid++) {
drivers/net/ethernet/marvell/prestera/prestera_pci.c
479
u32 offs = prestera_fw_read(fw, PRESTERA_CMDQ_OFFS_REG(qid));
drivers/net/ethernet/marvell/prestera/prestera_pci.c
480
struct prestera_fw_cmdq *cmdq = &fw->cmd_queue[qid];
drivers/net/ethernet/marvell/prestera/prestera_pci.c
482
cmdq->len = prestera_fw_read(fw, PRESTERA_CMDQ_LEN_REG(qid));
drivers/net/ethernet/marvell/prestera/prestera_pci.c
493
for (qid = 0; qid < fw->evt_qnum; qid++) {
drivers/net/ethernet/marvell/prestera/prestera_pci.c
494
u32 offs = prestera_fw_read(fw, PRESTERA_EVTQ_OFFS_REG(qid));
drivers/net/ethernet/marvell/prestera/prestera_pci.c
495
struct prestera_fw_evtq *evtq = &fw->evt_queue[qid];
drivers/net/ethernet/marvell/prestera/prestera_pci.c
497
evtq->len = prestera_fw_read(fw, PRESTERA_EVTQ_LEN_REG(qid));
drivers/net/ethernet/mediatek/mtk_eth_soc.c
1473
FIELD_PREP(TX_DMA_PQID, info->qid);
drivers/net/ethernet/mediatek/mtk_eth_soc.c
1524
data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
drivers/net/ethernet/mediatek/mtk_eth_soc.c
1568
.qid = skb_get_queue_mapping(skb),
drivers/net/ethernet/mediatek/mtk_eth_soc.c
1632
txd_info.qid = queue;
drivers/net/ethernet/mediatek/mtk_eth_soc.c
1981
.qid = mac->id,
drivers/net/ethernet/mediatek/mtk_eth_soc.c
2031
txd_info.qid = mac->id;
drivers/net/ethernet/mediatek/mtk_eth_soc.h
1134
u16 qid;
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
100
node->qid = MLX5E_QOS_QID_INNER;
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
134
if (node->qid != MLX5E_QOS_QID_INNER) {
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
135
__clear_bit(node->qid, htb->qos_used_qids);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
150
u16 qid;
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
160
qid = READ_ONCE(node->qid);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
161
if (qid == MLX5E_QOS_QID_INNER) {
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
165
res = mlx5e_qid_from_qos(&htb->priv->channels, qid);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
17
u16 qid;
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
292
int qid;
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
298
qid = mlx5e_htb_find_unused_qos_qid(htb);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
299
if (qid < 0) {
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
301
return qid;
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
308
node = mlx5e_htb_node_create_leaf(htb, classid, qid, parent);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
328
err = mlx5e_open_qos_sq(priv, &priv->channels, node->qid, node->hw_id);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
334
mlx5e_activate_qos_sq(priv, node->qid, node->hw_id);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
338
return mlx5e_qid_from_qos(&priv->channels, node->qid);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
349
u16 qid;
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
369
child = mlx5e_htb_node_create_leaf(htb, child_classid, node->qid, node);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
390
qid = node->qid;
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
392
WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
395
mlx5e_deactivate_qos_sq(priv, qid);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
396
mlx5e_close_qos_sq(priv, qid);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
40
if (node->qid == MLX5E_QOS_QID_INNER)
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
407
err = mlx5e_open_qos_sq(priv, &priv->channels, child->qid, child->hw_id);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
413
mlx5e_activate_qos_sq(priv, child->qid, child->hw_id);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
42
err = callback(data, node->qid, node->hw_id);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
420
child->qid = MLX5E_QOS_QID_INNER;
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
431
static struct mlx5e_qos_node *mlx5e_htb_node_find_by_qid(struct mlx5e_htb *htb, u16 qid)
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
437
if (node->qid == qid)
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
449
u16 qid, moved_qid;
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
460
qid = node->qid;
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
465
mlx5e_qid_from_qos(&priv->channels, qid));
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
466
mlx5e_deactivate_qos_sq(priv, qid);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
467
mlx5e_close_qos_sq(priv, qid);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
482
mlx5e_reactivate_qos_sq(priv, qid, txq);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
487
if (moved_qid < qid) {
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
489
WARN(moved_qid != qid - 1, "Gaps in queue numeration: destroyed queue %u, the highest queue is %u",
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
490
qid, moved_qid);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
492
mlx5e_reactivate_qos_sq(priv, qid, txq);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
496
WARN(moved_qid == qid, "Can't move node with qid %u to itself", qid);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
497
qos_dbg(htb->mdev, "Moving QoS SQ %u to %u\n", moved_qid, qid);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
501
moved_qid, qid);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
504
WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
517
__set_bit(qid, htb->qos_used_qids);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
518
WRITE_ONCE(node->qid, qid);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
521
err = mlx5e_open_qos_sq(priv, &priv->channels, node->qid, node->hw_id);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
525
node->classid, moved_qid, qid, err);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
527
mlx5e_activate_qos_sq(priv, node->qid, node->hw_id);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
547
u16 qid;
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
570
qid = node->qid;
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
572
WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
575
mlx5e_deactivate_qos_sq(priv, qid);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
576
mlx5e_close_qos_sq(priv, qid);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
580
mlx5e_reset_qdisc(htb->netdev, qid);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
591
WRITE_ONCE(node->qid, qid);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
603
err = mlx5e_open_qos_sq(priv, &priv->channels, node->qid, node->hw_id);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
609
mlx5e_activate_qos_sq(priv, node->qid, node->hw_id);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
70
mlx5e_htb_node_create_leaf(struct mlx5e_htb *htb, u16 classid, u16 qid,
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
81
node->qid = qid;
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
82
__set_bit(qid, htb->qos_used_qids);
drivers/net/ethernet/mellanox/mlx5/core/en/htb.h
14
typedef int (*mlx5e_fp_htb_enumerate)(void *data, u16 qid, u32 hw_id);
drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
113
qid = node_qid / params->num_channels;
drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
139
rcu_assign_pointer(qos_sqs[qid], sq);
drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
161
u16 qid;
drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
165
qid = mlx5e_qid_from_qos(&priv->channels, node_qid);
drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
170
mlx5e_tx_disable_queue(netdev_get_tx_queue(priv->netdev, qid));
drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
172
priv->txq2sq[qid] = sq;
drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
173
priv->txq2sq_stats[qid] = sq->stats;
drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
187
void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid)
drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
192
sq = mlx5e_get_qos_sq(priv, qid);
drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
196
qos_dbg(sq->mdev, "Deactivate QoS SQ qid %u\n", qid);
drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
199
txq_ix = mlx5e_qid_from_qos(&priv->channels, qid);
drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
211
void mlx5e_close_qos_sq(struct mlx5e_priv *priv, u16 qid)
drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
221
ix = qid % params->num_channels;
drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
222
qid /= params->num_channels;
drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
225
sq = rcu_replace_pointer(qos_sqs[qid], NULL, lockdep_is_held(&priv->state_lock));
drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
341
u16 qid = params->num_channels * i + c->ix;
drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
348
qos_dbg(c->mdev, "Deactivate QoS SQ qid %u\n", qid);
drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
351
txq_ix = mlx5e_qid_from_qos(&c->priv->channels, qid);
drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
367
void mlx5e_reactivate_qos_sq(struct mlx5e_priv *priv, u16 qid, struct netdev_queue *txq)
drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
369
qos_dbg(priv->mdev, "Reactivate QoS SQ qid %u\n", qid);
drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
374
void mlx5e_reset_qdisc(struct net_device *dev, u16 qid)
drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
376
struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, qid);
drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
38
u16 mlx5e_qid_from_qos(struct mlx5e_channels *chs, u16 qid)
drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
428
htb_qopt->qid = res;
drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
447
htb_qopt->qid = res;
drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
49
return (chs->params.num_channels + is_ptp) * mlx5e_get_dcb_num_tc(&chs->params) + qid;
drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
54
static struct mlx5e_txqsq *mlx5e_get_qos_sq(struct mlx5e_priv *priv, int qid)
drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
61
ix = qid % params->num_channels;
drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
62
qid /= params->num_channels;
drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
66
return mlx5e_state_dereference(priv, qos_sqs[qid]);
drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
76
int txq_ix, ix, qid, err = 0;
drivers/net/ethernet/mellanox/mlx5/core/en/qos.h
24
void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid);
drivers/net/ethernet/mellanox/mlx5/core/en/qos.h
25
void mlx5e_close_qos_sq(struct mlx5e_priv *priv, u16 qid);
drivers/net/ethernet/mellanox/mlx5/core/en/qos.h
26
void mlx5e_reactivate_qos_sq(struct mlx5e_priv *priv, u16 qid, struct netdev_queue *txq);
drivers/net/ethernet/mellanox/mlx5/core/en/qos.h
27
void mlx5e_reset_qdisc(struct net_device *dev, u16 qid);
drivers/net/ethernet/mellanox/mlx5/core/en/qos.h
38
u16 mlx5e_qid_from_qos(struct mlx5e_channels *chs, u16 qid);
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
219
int mlx5e_xsk_setup_pool(struct net_device *dev, struct xsk_buff_pool *pool, u16 qid)
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
224
if (unlikely(qid >= params->num_channels))
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
227
return pool ? mlx5e_xsk_enable_pool(priv, pool, qid) :
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
228
mlx5e_xsk_disable_pool(priv, qid);
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.h
25
int mlx5e_xsk_setup_pool(struct net_device *dev, struct xsk_buff_pool *pool, u16 qid);
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
10
int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
19
if (unlikely(qid >= params->num_channels))
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
22
c = priv->channels.c[qid];
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h
11
int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags);
drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
2475
int i, qid;
drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
2477
for (qid = 0; qid < max_qos_sqs; qid++)
drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
2479
ethtool_sprintf(data, qos_sq_stats_desc[i].format, qid);
drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
2486
int i, qid;
drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
2492
for (qid = 0; qid < max_qos_sqs; qid++) {
drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
2493
struct mlx5e_sq_stats *s = READ_ONCE(stats[qid]);
drivers/net/ethernet/microsoft/mana/gdma_main.c
330
enum gdma_queue_type q_type, u32 qid,
drivers/net/ethernet/microsoft/mana/gdma_main.c
338
e.eq.id = qid;
drivers/net/ethernet/microsoft/mana/gdma_main.c
346
e.cq.id = qid;
drivers/net/ethernet/microsoft/mana/gdma_main.c
354
e.rq.id = qid;
drivers/net/ethernet/microsoft/mana/gdma_main.c
362
e.sq.id = qid;
drivers/net/ethernet/netronome/nfp/abm/ctrl.c
134
unsigned int qid;
drivers/net/ethernet/netronome/nfp/abm/ctrl.c
136
qid = band * NFP_NET_MAX_RX_RINGS + alink->queue_base + queue;
drivers/net/ethernet/netronome/nfp/abm/ctrl.c
138
return __nfp_abm_ctrl_set_q_act(alink->abm, qid, act);
drivers/net/ethernet/netronome/nfp/abm/ctrl.c
54
unsigned int qid;
drivers/net/ethernet/netronome/nfp/abm/ctrl.c
58
qid = band * NFP_NET_MAX_RX_RINGS + alink->queue_base + queue;
drivers/net/ethernet/netronome/nfp/abm/ctrl.c
60
sym_offset = qid * stride + offset;
drivers/net/ethernet/netronome/nfp/nfp_net_xsk.h
13
unsigned int qid)
drivers/net/ethernet/netronome/nfp/nfp_net_xsk.h
15
return dp->xdp_prog && dp->xsk_pools[qid];
drivers/net/ethernet/pensando/ionic/ionic_if.h
2048
__le32 qid;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
1180
u32 qid;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
1189
qid = lif->hwstamp_rxq->q.index;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
1190
ctx.cmd.rx_filter_add.qid = cpu_to_le32(qid);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
1198
err = ionic_rx_filter_save(lif, 0, qid, 0, &ctx, IONIC_FILTER_STATE_SYNCED);
drivers/net/ethernet/qlogic/qed/qed_l2.c
2060
if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) {
drivers/net/ethernet/qlogic/qed/qed_l2.c
2061
rc = qed_fw_l2_queue(p_hwfn, p_params->qid,
drivers/net/ethernet/qlogic/qed/qed_l2.c
2824
params->qid = QED_RFS_NTUPLE_QID_RSS;
drivers/net/ethernet/qlogic/qed/qed_ll2.c
1538
u8 qid;
drivers/net/ethernet/qlogic/qed/qed_ll2.c
1549
qid = p_hwfn->hw_info.resc_start[QED_LL2_CTX_QUEUE] +
drivers/net/ethernet/qlogic/qed/qed_ll2.c
1555
qid += (handle - QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF);
drivers/net/ethernet/qlogic/qed/qed_ll2.c
1557
return qid;
drivers/net/ethernet/qlogic/qed/qed_ll2.c
1573
u8 qid, stats_id;
drivers/net/ethernet/qlogic/qed/qed_ll2.c
1637
qid = qed_ll2_handle_to_queue_id(p_hwfn, connection_handle,
drivers/net/ethernet/qlogic/qed/qed_ll2.c
1641
qid);
drivers/net/ethernet/qlogic/qed/qed_ll2.c
1642
p_ll2_conn->queue_id = qid;
drivers/net/ethernet/qlogic/qed/qed_ll2.c
1651
qid);
drivers/net/ethernet/qlogic/qed/qed_ll2.c
1658
p_ll2_conn->input.rx_conn_type, qid, stats_id);
drivers/net/ethernet/qlogic/qed/qed_ll2.c
1664
TSTORM_LL2_RX_PRODS, qid);
drivers/net/ethernet/qlogic/qed/qed_ll2.c
2260
u8 qid = p_ll2_conn->queue_id;
drivers/net/ethernet/qlogic/qed/qed_ll2.c
2265
CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(qid);
drivers/net/ethernet/qlogic/qed/qed_ll2.c
2279
u8 qid = p_ll2_conn->queue_id;
drivers/net/ethernet/qlogic/qed/qed_ll2.c
2284
CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(qid);
drivers/net/ethernet/qlogic/qed/qed_ll2.c
69
u8 ll2_queue_type, u8 qid)
drivers/net/ethernet/qlogic/qed/qed_ll2.c
81
stats_id = qid;
drivers/net/ethernet/qlogic/qed/qed_main.c
2912
u16 qid, struct qed_sb_info_dbg *sb_dbg)
drivers/net/ethernet/qlogic/qed/qed_main.c
2914
struct qed_hwfn *hwfn = &cdev->hwfns[qid % cdev->num_hwfns];
drivers/net/ethernet/qlogic/qed/qed_sriov.c
1009
qid = p_params->req_rx_queue[i];
drivers/net/ethernet/qlogic/qed/qed_sriov.c
1010
if (qid < min_vf_qzone || qid > max_vf_qzone) {
drivers/net/ethernet/qlogic/qed/qed_sriov.c
1013
qid,
drivers/net/ethernet/qlogic/qed/qed_sriov.c
1019
qid = p_params->req_tx_queue[i];
drivers/net/ethernet/qlogic/qed/qed_sriov.c
1020
if (qid > max_vf_qzone) {
drivers/net/ethernet/qlogic/qed/qed_sriov.c
1023
qid, p_params->rel_vf_id, max_vf_qzone);
drivers/net/ethernet/qlogic/qed/qed_sriov.c
1028
if (qid < min_vf_qzone)
drivers/net/ethernet/qlogic/qed/qed_sriov.c
1032
p_params->rel_vf_id, qid, i);
drivers/net/ethernet/qlogic/qed/qed_sriov.c
201
u16 qid,
drivers/net/ethernet/qlogic/qed/qed_sriov.c
2076
if (p_qid_tlv->qid >= MAX_QUEUES_PER_QZONE) {
drivers/net/ethernet/qlogic/qed/qed_sriov.c
2079
p_vf->relative_vf_id, p_qid_tlv->qid);
drivers/net/ethernet/qlogic/qed/qed_sriov.c
2083
return p_qid_tlv->qid;
drivers/net/ethernet/qlogic/qed/qed_sriov.c
213
p_qcid = &p_vf->vf_queues[qid].cids[i];
drivers/net/ethernet/qlogic/qed/qed_sriov.c
2691
u16 qid = req->rx_qid + i;
drivers/net/ethernet/qlogic/qed/qed_sriov.c
2693
handlers[i] = vf->vf_queues[qid].cids[qid_usage_idx].p_cid;
drivers/net/ethernet/qlogic/qed/qed_sriov.c
3425
u16 coal = 0, qid, i;
drivers/net/ethernet/qlogic/qed/qed_sriov.c
3432
qid = req->qid;
drivers/net/ethernet/qlogic/qed/qed_sriov.c
3436
if (!qed_iov_validate_rxq(p_hwfn, p_vf, qid,
drivers/net/ethernet/qlogic/qed/qed_sriov.c
3440
p_vf->abs_vf_id, qid);
drivers/net/ethernet/qlogic/qed/qed_sriov.c
3444
p_cid = qed_iov_get_vf_rx_queue_cid(&p_vf->vf_queues[qid]);
drivers/net/ethernet/qlogic/qed/qed_sriov.c
3449
if (!qed_iov_validate_txq(p_hwfn, p_vf, qid,
drivers/net/ethernet/qlogic/qed/qed_sriov.c
3453
p_vf->abs_vf_id, qid);
drivers/net/ethernet/qlogic/qed/qed_sriov.c
3457
p_queue = &p_vf->vf_queues[qid];
drivers/net/ethernet/qlogic/qed/qed_sriov.c
3494
u16 qid;
drivers/net/ethernet/qlogic/qed/qed_sriov.c
3500
qid = req->qid;
drivers/net/ethernet/qlogic/qed/qed_sriov.c
3502
if (!qed_iov_validate_rxq(p_hwfn, vf, qid,
drivers/net/ethernet/qlogic/qed/qed_sriov.c
3506
vf->abs_vf_id, qid);
drivers/net/ethernet/qlogic/qed/qed_sriov.c
3510
if (!qed_iov_validate_txq(p_hwfn, vf, qid,
drivers/net/ethernet/qlogic/qed/qed_sriov.c
3514
vf->abs_vf_id, qid);
drivers/net/ethernet/qlogic/qed/qed_sriov.c
3521
vf->abs_vf_id, rx_coal, tx_coal, qid);
drivers/net/ethernet/qlogic/qed/qed_sriov.c
3524
p_cid = qed_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]);
drivers/net/ethernet/qlogic/qed/qed_sriov.c
3531
vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
drivers/net/ethernet/qlogic/qed/qed_sriov.c
3538
struct qed_vf_queue *p_queue = &vf->vf_queues[qid];
drivers/net/ethernet/qlogic/qed/qed_sriov.c
847
int qid;
drivers/net/ethernet/qlogic/qed/qed_sriov.c
849
for (qid = 0; qid < vf->num_rxqs; qid++) {
drivers/net/ethernet/qlogic/qed/qed_sriov.c
850
qed_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid,
drivers/net/ethernet/qlogic/qed/qed_sriov.c
878
int qid = 0;
drivers/net/ethernet/qlogic/qed/qed_sriov.c
889
for (qid = 0; qid < num_rx_queues; qid++) {
drivers/net/ethernet/qlogic/qed/qed_sriov.c
891
vf->igu_sbs[qid] = p_block->igu_sb_id;
drivers/net/ethernet/qlogic/qed/qed_sriov.c
893
SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
drivers/net/ethernet/qlogic/qed/qed_sriov.c
986
u16 qid, num_irqs;
drivers/net/ethernet/qlogic/qed/qed_vf.c
1349
req->qid = p_cid->rel.queue_id;
drivers/net/ethernet/qlogic/qed/qed_vf.c
1413
req->qid = p_cid->rel.queue_id;
drivers/net/ethernet/qlogic/qed/qed_vf.c
1418
rx_coal, tx_coal, req->qid);
drivers/net/ethernet/qlogic/qed/qed_vf.c
153
p_qid_tlv->qid = p_cid->qid_usage_idx;
drivers/net/ethernet/qlogic/qed/qed_vf.c
846
u16 qid = p_cid->rel.queue_id;
drivers/net/ethernet/qlogic/qed/qed_vf.c
852
req->tx_qid = qid;
drivers/net/ethernet/qlogic/qed/qed_vf.c
882
u8 cid = p_iov->acquire_resp.resc.cid[qid];
drivers/net/ethernet/qlogic/qed/qed_vf.c
891
qid, p_cid->qid_usage_idx, *pp_doorbell, resp->offset);
drivers/net/ethernet/qlogic/qed/qed_vf.h
233
u8 qid;
drivers/net/ethernet/qlogic/qed/qed_vf.h
482
u16 qid;
drivers/net/ethernet/qlogic/qed/qed_vf.h
488
u16 qid;
drivers/net/ethernet/qlogic/qede/qede_filter.c
102
params.qid = rxq_id;
drivers/net/ethernet/sfc/falcon/farch.c
1093
int qid;
drivers/net/ethernet/sfc/falcon/farch.c
1095
qid = EF4_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
drivers/net/ethernet/sfc/falcon/farch.c
1096
if (qid < EF4_TXQ_TYPES * efx->n_tx_channels) {
drivers/net/ethernet/sfc/falcon/farch.c
1097
tx_queue = ef4_get_tx_queue(efx, qid / EF4_TXQ_TYPES,
drivers/net/ethernet/sfc/falcon/farch.c
1098
qid % EF4_TXQ_TYPES);
drivers/net/ethernet/sfc/falcon/farch.c
1115
int qid;
drivers/net/ethernet/sfc/falcon/farch.c
1118
qid = EF4_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
drivers/net/ethernet/sfc/falcon/farch.c
1120
if (qid >= efx->n_channels)
drivers/net/ethernet/sfc/falcon/farch.c
1122
channel = ef4_get_channel(efx, qid);
drivers/net/ethernet/sfc/falcon/farch.c
1129
"RXQ %d flush retry\n", qid);
drivers/net/ethernet/sfc/siena/farch.c
1087
int qid;
drivers/net/ethernet/sfc/siena/farch.c
1089
qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
drivers/net/ethernet/sfc/siena/farch.c
1090
if (qid < EFX_MAX_TXQ_PER_CHANNEL * (efx->n_tx_channels + efx->n_extra_tx_channels)) {
drivers/net/ethernet/sfc/siena/farch.c
1091
channel = efx_get_tx_channel(efx, qid / EFX_MAX_TXQ_PER_CHANNEL);
drivers/net/ethernet/sfc/siena/farch.c
1092
tx_queue = channel->tx_queue + (qid % EFX_MAX_TXQ_PER_CHANNEL);
drivers/net/ethernet/sfc/siena/farch.c
1108
int qid;
drivers/net/ethernet/sfc/siena/farch.c
1111
qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
drivers/net/ethernet/sfc/siena/farch.c
1113
if (qid >= efx->n_channels)
drivers/net/ethernet/sfc/siena/farch.c
1115
channel = efx_get_channel(efx, qid);
drivers/net/ethernet/sfc/siena/farch.c
1122
"RXQ %d flush retry\n", qid);
drivers/net/ethernet/sfc/siena/siena_sriov.c
1406
unsigned qid, seq, type, data;
drivers/net/ethernet/sfc/siena/siena_sriov.c
1408
qid = EFX_QWORD_FIELD(*event, FSF_CZ_USER_QID);
drivers/net/ethernet/sfc/siena/siena_sriov.c
1418
qid, seq, type, data);
drivers/net/ethernet/sfc/siena/siena_sriov.c
1420
if (map_vi_index(efx, qid, &vf, NULL))
drivers/net/ethernet/sfc/siena/siena_sriov.c
1494
unsigned queue, qid;
drivers/net/ethernet/sfc/siena/siena_sriov.c
1497
if (map_vi_index(efx, queue, &vf, &qid))
drivers/net/ethernet/sfc/siena/siena_sriov.c
1500
if (!test_bit(qid, vf->txq_mask))
drivers/net/ethernet/sfc/siena/siena_sriov.c
1503
__clear_bit(qid, vf->txq_mask);
drivers/net/ethernet/sfc/siena/siena_sriov.c
1513
unsigned ev_failed, queue, qid;
drivers/net/ethernet/sfc/siena/siena_sriov.c
1518
if (map_vi_index(efx, queue, &vf, &qid))
drivers/net/ethernet/sfc/siena/siena_sriov.c
1520
if (!test_bit(qid, vf->rxq_mask))
drivers/net/ethernet/sfc/siena/siena_sriov.c
1524
set_bit(qid, vf->rxq_retry_mask);
drivers/net/ethernet/sfc/siena/siena_sriov.c
1527
__clear_bit(qid, vf->rxq_mask);
drivers/net/ethernet/ti/icssg/icssg_prueth.c
1370
int prueth_xsk_wakeup(struct net_device *ndev, u32 qid, u32 flags)
drivers/net/ethernet/ti/icssg/icssg_prueth.c
1373
struct prueth_tx_chn *tx_chn = &emac->tx_chns[qid];
drivers/net/ethernet/ti/icssg/icssg_prueth.c
1376
if (emac->xsk_qid != qid) {
drivers/net/ethernet/ti/icssg/icssg_prueth.c
1377
netdev_err(ndev, "XSK queue %d not registered\n", qid);
drivers/net/ethernet/ti/icssg/icssg_prueth.c
1381
if (qid >= PRUETH_MAX_RX_FLOWS || qid >= emac->tx_ch_num) {
drivers/net/ethernet/ti/icssg/icssg_prueth.c
1382
netdev_err(ndev, "Invalid XSK queue ID %d\n", qid);
drivers/net/ethernet/ti/icssg/icssg_prueth.c
1387
netdev_err(ndev, "XSK pool not registered for queue %d\n", qid);
drivers/net/ethernet/ti/icssg/icssg_prueth.c
1392
netdev_err(ndev, "XSK pool not registered for RX queue %d\n", qid);
drivers/net/ethernet/ti/icssg/icssg_prueth.h
519
int prueth_xsk_wakeup(struct net_device *ndev, u32 qid, u32 flags);
drivers/net/ethernet/ti/icssm/icssm_prueth.c
1209
u16 qid;
drivers/net/ethernet/ti/icssm/icssm_prueth.c
1211
qid = icssm_prueth_get_tx_queue_id(emac->prueth, skb);
drivers/net/ethernet/ti/icssm/icssm_prueth.c
1212
ret = icssm_prueth_tx_enqueue(emac, skb, qid);
drivers/net/hyperv/netvsc_trace.h
139
__field( u16, qid )
drivers/net/hyperv/netvsc_trace.h
146
__entry->qid = chan->offermsg.offer.sub_channel_index;
drivers/net/hyperv/netvsc_trace.h
152
__get_str(name), __entry->qid,
drivers/net/hyperv/netvsc_trace.h
164
__field( u16, qid )
drivers/net/hyperv/netvsc_trace.h
169
__entry->qid = chan->offermsg.offer.sub_channel_index;
drivers/net/hyperv/netvsc_trace.h
173
__get_str(name), __entry->qid,
drivers/net/virtio_net.c
1599
static int virtnet_xsk_wakeup(struct net_device *dev, u32 qid, u32 flag)
drivers/net/virtio_net.c
1607
if (qid >= vi->curr_queue_pairs)
drivers/net/virtio_net.c
1610
sq = &vi->sq[qid];
drivers/net/virtio_net.c
4390
int num, int qid, const struct virtnet_stat_desc *desc)
drivers/net/virtio_net.c
4394
if (qid < 0) {
drivers/net/virtio_net.c
4399
ethtool_sprintf(p, fmt, qid, desc[i].desc);
drivers/net/virtio_net.c
4404
static void virtnet_get_stats_string(struct virtnet_info *vi, int type, int qid, u8 **data)
drivers/net/virtio_net.c
4411
if (type == VIRTNET_Q_TYPE_CQ && qid >= 0) {
drivers/net/virtio_net.c
4429
virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
drivers/net/virtio_net.c
4438
virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
drivers/net/virtio_net.c
4445
virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
drivers/net/virtio_net.c
4452
virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
drivers/net/virtio_net.c
4463
virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
drivers/net/virtio_net.c
4472
virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
drivers/net/virtio_net.c
4479
virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
drivers/net/virtio_net.c
4486
virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
drivers/net/virtio_net.c
4673
static void virtnet_fill_stats_qstat(struct virtnet_info *vi, u32 qid,
drivers/net/virtio_net.c
4684
queue_type = vq_type(vi, qid);
drivers/net/virtio_net.c
4780
static void virtnet_fill_stats(struct virtnet_info *vi, u32 qid,
drivers/net/virtio_net.c
4792
return virtnet_fill_stats_qstat(vi, qid, ctx, base, drv_stats, reply_type);
drivers/net/virtio_net.c
4798
queue_type = vq_type(vi, qid);
drivers/net/virtio_net.c
4805
offset += num_cq + num_rx * vi->curr_queue_pairs + num_tx * (qid / 2);
drivers/net/virtio_net.c
4816
offset += num_cq + num_rx * (qid / 2);
drivers/net/virtio_net.c
4915
u32 qid;
drivers/net/virtio_net.c
4930
qid = le16_to_cpu(hdr->vq_index);
drivers/net/virtio_net.c
4931
virtnet_fill_stats(vi, qid, ctx, p, false, hdr->type);
drivers/net/virtio_net.c
4940
int qid, int *idx)
drivers/net/virtio_net.c
4942
int qtype = vq_type(vi, qid);
drivers/net/virtio_net.c
4948
req->stats[*idx].vq_index = cpu_to_le16(qid);
drivers/net/virtio_net.c
4957
struct virtnet_stats_ctx *ctx, int qid)
drivers/net/virtio_net.c
4968
if (qid == -1) {
drivers/net/virtio_net.c
4973
last_vq = qid;
drivers/net/virtio_net.c
4974
first_vq = qid;
drivers/net/virtio_net.c
5848
u16 qid)
drivers/net/virtio_net.c
5866
if (qid >= vi->curr_queue_pairs)
drivers/net/virtio_net.c
5869
sq = &vi->sq[qid];
drivers/net/virtio_net.c
5870
rq = &vi->rq[qid];
drivers/net/virtio_net.c
5930
static int virtnet_xsk_pool_disable(struct net_device *dev, u16 qid)
drivers/net/virtio_net.c
5938
if (qid >= vi->curr_queue_pairs)
drivers/net/virtio_net.c
5941
sq = &vi->sq[qid];
drivers/net/virtio_net.c
5942
rq = &vi->rq[qid];
drivers/net/virtio_net.c
663
static int vq_type(struct virtnet_info *vi, int qid)
drivers/net/virtio_net.c
665
if (qid == vi->max_queue_pairs * 2)
drivers/net/virtio_net.c
668
if (qid % 2)
drivers/net/vmxnet3/vmxnet3_drv.c
1362
adapter->tx_prod_offset + tq->qid * 8,
drivers/net/vmxnet3/vmxnet3_drv.c
1398
err = xdp_rxq_info_reg(&rq->xdp_rxq, adapter->netdev, rq->qid,
drivers/net/vmxnet3/vmxnet3_drv.c
1642
BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2 &&
drivers/net/vmxnet3/vmxnet3_drv.c
1689
(rcd->rqID != rq->qid &&
drivers/net/vmxnet3/vmxnet3_drv.c
1794
skb_record_rx_queue(ctx->skb, rq->qid);
drivers/net/vmxnet3/vmxnet3_drv.c
2002
rxprod_reg[ring_idx] + rq->qid * 8,
drivers/net/vmxnet3/vmxnet3_drv.c
2648
rq->qid = i;
drivers/net/vmxnet3/vmxnet3_drv.c
3447
tq->qid = i;
drivers/net/vmxnet3/vmxnet3_int.h
270
int qid;
drivers/net/vmxnet3/vmxnet3_int.h
335
u32 qid; /* rqID in RCD for buffer from 1st ring */
drivers/net/vmxnet3/vmxnet3_xdp.c
198
VMXNET3_REG_TXPROD + tq->qid * 8,
drivers/net/vmxnet3/vmxnet3_xdp.c
217
nq = netdev_get_tx_queue(adapter->netdev, tq->qid);
drivers/net/vmxnet3/vmxnet3_xdp.c
245
nq = netdev_get_tx_queue(adapter->netdev, tq->qid);
drivers/net/wireless/ath/ar5523/ar5523.c
454
__be32 qid = cpu_to_be32(0);
drivers/net/wireless/ath/ar5523/ar5523.c
458
&qid, sizeof(qid), 0);
drivers/net/wireless/ath/ar5523/ar5523.c
486
qinfo.qid = cpu_to_be32(0);
drivers/net/wireless/ath/ar5523/ar5523_hw.h
284
__be32 qid;
drivers/net/wireless/ath/ath9k/ar9003_mac.c
381
ts->qid = MS(ads->ds_info, AR_TxQcuNum);
drivers/net/wireless/ath/ath9k/mac.h
115
u8 qid;
drivers/net/wireless/ath/ath9k/xmit.c
2736
if (ts.qid == sc->beacon.beaconq) {
drivers/net/wireless/ath/ath9k/xmit.c
2749
txq = &sc->tx.txq[ts.qid];
drivers/net/wireless/ath/wil6210/wmi.c
1427
d_len, data->info.qid, data->info.mid, data->info.cid);
drivers/net/wireless/ath/wil6210/wmi.c
860
data->info.qid, data->info.mid, data->info.cid);
drivers/net/wireless/ath/wil6210/wmi.h
2434
u8 qid;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx-gen2.c
1000
txq->id = qid;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx-gen2.c
1001
trans_pcie->txqs.txq[qid] = txq;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx-gen2.c
1008
IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx-gen2.c
1011
return qid;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx-gen2.c
969
int ret, qid;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx-gen2.c
979
qid = le16_to_cpu(rsp->queue_number);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx-gen2.c
982
if (qid >= ARRAY_SIZE(trans_pcie->txqs.txq)) {
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx-gen2.c
983
WARN_ONCE(1, "queue index %d unsupported", qid);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx-gen2.c
988
if (test_and_set_bit(qid, trans_pcie->txqs.queue_used)) {
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx-gen2.c
989
WARN_ONCE(1, "queue %d already used", qid);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx-gen2.c
994
if (WARN_ONCE(trans_pcie->txqs.txq[qid],
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx-gen2.c
995
"queue %d already allocated\n", qid)) {
drivers/net/wireless/mediatek/mt76/dma.c
1055
int qid, done = 0, cur;
drivers/net/wireless/mediatek/mt76/dma.c
1058
qid = napi - dev->napi;
drivers/net/wireless/mediatek/mt76/dma.c
1063
cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done);
drivers/net/wireless/mediatek/mt76/dma.c
1064
mt76_rx_poll_complete(dev, qid, napi);
drivers/net/wireless/mediatek/mt76/dma.c
1071
dev->drv->rx_poll_complete(dev, qid);
drivers/net/wireless/mediatek/mt76/dma.c
1078
mt76_dma_rx_queue_init(struct mt76_dev *dev, enum mt76_rxq_id qid,
drivers/net/wireless/mediatek/mt76/dma.c
1081
netif_napi_add(dev->napi_dev, &dev->napi[qid], poll);
drivers/net/wireless/mediatek/mt76/dma.c
1082
mt76_dma_rx_fill_buf(dev, &dev->q_rx[qid], false);
drivers/net/wireless/mediatek/mt76/dma.c
1083
napi_enable(&dev->napi[qid]);
drivers/net/wireless/mediatek/mt76/dma.c
273
txwi->qid = q - dev->q_rx;
drivers/net/wireless/mediatek/mt76/dma.c
465
q = &dev->q_rx[t->qid];
drivers/net/wireless/mediatek/mt76/dma.c
638
enum mt76_txq_id qid, struct sk_buff *skb,
drivers/net/wireless/mediatek/mt76/dma.c
703
ret = dev->drv->tx_prepare_skb(dev, txwi, qid, wcid, sta, &tx_info);
drivers/net/wireless/mediatek/mt76/dma.c
891
mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
drivers/net/wireless/mediatek/mt76/dma.c
893
struct mt76_queue *q = &dev->q_rx[qid];
drivers/net/wireless/mediatek/mt76/mac80211.c
1965
mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
drivers/net/wireless/mediatek/mt76/mt76.h
1337
mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
drivers/net/wireless/mediatek/mt76/mt76.h
1339
static inline int mt76_init_tx_queue(struct mt76_phy *phy, int qid, int idx,
drivers/net/wireless/mediatek/mt76/mt76.h
1345
q = mt76_init_queue(phy->dev, qid, idx, n_desc, ring_base, wed, flags);
drivers/net/wireless/mediatek/mt76/mt76.h
1349
phy->q_tx[qid] = q;
drivers/net/wireless/mediatek/mt76/mt76.h
1354
static inline int mt76_init_mcu_queue(struct mt76_dev *dev, int qid, int idx,
drivers/net/wireless/mediatek/mt76/mt76.h
1359
q = mt76_init_queue(dev, qid, idx, n_desc, ring_base, NULL, 0);
drivers/net/wireless/mediatek/mt76/mt76.h
1363
dev->q_mcu[qid] = q;
drivers/net/wireless/mediatek/mt76/mt76.h
1519
void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid);
drivers/net/wireless/mediatek/mt76/mt76.h
1844
int mt76s_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid);
drivers/net/wireless/mediatek/mt76/mt76.h
299
enum mt76_txq_id qid, struct sk_buff *skb,
drivers/net/wireless/mediatek/mt76/mt76.h
308
void (*rx_reset)(struct mt76_dev *dev, enum mt76_rxq_id qid);
drivers/net/wireless/mediatek/mt76/mt76.h
313
void (*rx_queue_init)(struct mt76_dev *dev, enum mt76_rxq_id qid,
drivers/net/wireless/mediatek/mt76/mt76.h
450
u8 qid;
drivers/net/wireless/mediatek/mt76/mt76.h
551
enum mt76_txq_id qid, struct mt76_wcid *wcid,
drivers/net/wireless/mediatek/mt76/mt7603/dma.c
32
u8 qid, tid = 0, hwq = 0;
drivers/net/wireless/mediatek/mt76/mt7603/dma.c
60
qid = tid_to_ac[tid];
drivers/net/wireless/mediatek/mt76/mt7603/dma.c
61
hwq = wmm_queue_map[qid];
drivers/net/wireless/mediatek/mt76/mt7603/dma.c
62
skb_set_queue_mapping(skb, qid);
drivers/net/wireless/mediatek/mt76/mt7603/mac.c
1014
if (qid >= MT_TXQ_BEACON)
drivers/net/wireless/mediatek/mt76/mt7603/mac.c
1045
enum mt76_txq_id qid, struct mt76_wcid *wcid,
drivers/net/wireless/mediatek/mt76/mt7603/mac.c
1079
mt7603_mac_write_txwi(dev, txwi_ptr, tx_info->skb, qid, wcid,
drivers/net/wireless/mediatek/mt76/mt7603/mac.c
916
struct sk_buff *skb, enum mt76_txq_id qid,
drivers/net/wireless/mediatek/mt76/mt7603/mac.c
925
struct mt76_queue *q = dev->mphy.q_tx[qid];
drivers/net/wireless/mediatek/mt76/mt7603/mac.c
940
if (vif_idx && qid >= MT_TXQ_BEACON)
drivers/net/wireless/mediatek/mt76/mt7603/main.c
399
int qid = skb_get_queue_mapping(skb);
drivers/net/wireless/mediatek/mt76/mt7603/main.c
401
mt76_tx_queue_skb_raw(dev, dev->mphy.q_tx[qid], skb, 0);
drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
237
enum mt76_txq_id qid, struct mt76_wcid *wcid,
drivers/net/wireless/mediatek/mt76/mt7615/mac.c
713
enum mt76_txq_id qid, bool beacon)
drivers/net/wireless/mediatek/mt76/mt7615/mac.c
751
} else if (qid >= MT_TXQ_PSD) {
drivers/net/wireless/mediatek/mt76/mt7615/main.c
757
int qid;
drivers/net/wireless/mediatek/mt76/mt7615/main.c
778
qid = skb_get_queue_mapping(skb);
drivers/net/wireless/mediatek/mt76/mt7615/main.c
779
if (qid >= MT_TXQ_PSD) {
drivers/net/wireless/mediatek/mt76/mt7615/main.c
780
qid = IEEE80211_AC_BE;
drivers/net/wireless/mediatek/mt76/mt7615/main.c
781
skb_set_queue_mapping(skb, qid);
drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
188
enum mt76_mcuq_id qid;
drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
192
qid = MT_MCUQ_WM;
drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
194
qid = MT_MCUQ_FWDL;
drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
196
return mt76_tx_queue_skb_raw(dev, dev->mt76.q_mcu[qid], skb, 0);
drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
477
enum mt76_txq_id qid, bool beacon);
drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
500
enum mt76_txq_id qid, struct mt76_wcid *wcid,
drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
545
enum mt76_txq_id qid, struct mt76_wcid *wcid,
drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
100
pid, key, qid, false);
drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
61
enum mt76_txq_id qid, struct mt76_wcid *wcid,
drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
182
enum mt76_txq_id qid, struct mt76_wcid *wcid,
drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
207
mt7663_usb_sdio_write_txwi(dev, wcid, qid, sta, key, pktid, skb);
drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
45
enum mt76_txq_id qid, struct ieee80211_sta *sta,
drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
52
mt7615_mac_write_txwi(dev, txwi, skb, wcid, sta, pid, key, qid, false);
drivers/net/wireless/mediatek/mt76/mt76_connac.h
433
enum mt76_txq_id qid, u32 changed);
drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
107
int qid = skb_get_queue_mapping(skb);
drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
111
if (!pm->tx_q[qid].skb) {
drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
113
pm->tx_q[qid].wcid = wcid;
drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
114
pm->tx_q[qid].skb = skb;
drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
498
enum mt76_txq_id qid, u32 changed)
drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
530
} else if (qid >= MT_TXQ_PSD) {
drivers/net/wireless/mediatek/mt76/mt76x02.h
197
enum mt76_txq_id qid, struct mt76_wcid *wcid,
drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
138
enum mt76_txq_id qid, struct mt76_wcid *wcid,
drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
148
if (qid == MT_TXQ_PSD && wcid && wcid->idx < 128)
drivers/net/wireless/mediatek/mt76/mt76x02_usb.h
19
enum mt76_txq_id qid, struct mt76_wcid *wcid,
drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
65
enum mt76_txq_id qid, struct mt76_wcid *wcid,
drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
70
int pid, len = tx_info->skb->len, ep = dev->mphy.q_tx[qid]->ep;
drivers/net/wireless/mediatek/mt76/mt76x02_util.c
492
u8 cw_min = 5, cw_max = 10, qid;
drivers/net/wireless/mediatek/mt76/mt76x02_util.c
495
qid = dev->mphy.q_tx[queue]->hw_idx;
drivers/net/wireless/mediatek/mt76/mt76x02_util.c
506
mt76_wr(dev, MT_EDCA_CFG_AC(qid), val);
drivers/net/wireless/mediatek/mt76/mt76x02_util.c
508
val = mt76_rr(dev, MT_WMM_TXOP(qid));
drivers/net/wireless/mediatek/mt76/mt76x02_util.c
509
val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(qid));
drivers/net/wireless/mediatek/mt76/mt76x02_util.c
510
val |= params->txop << MT_WMM_TXOP_SHIFT(qid);
drivers/net/wireless/mediatek/mt76/mt76x02_util.c
511
mt76_wr(dev, MT_WMM_TXOP(qid), val);
drivers/net/wireless/mediatek/mt76/mt76x02_util.c
514
val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(qid));
drivers/net/wireless/mediatek/mt76/mt76x02_util.c
515
val |= params->aifs << MT_WMM_AIFSN_SHIFT(qid);
drivers/net/wireless/mediatek/mt76/mt76x02_util.c
519
val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(qid));
drivers/net/wireless/mediatek/mt76/mt76x02_util.c
520
val |= cw_min << MT_WMM_CWMIN_SHIFT(qid);
drivers/net/wireless/mediatek/mt76/mt76x02_util.c
524
val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(qid));
drivers/net/wireless/mediatek/mt76/mt76x02_util.c
525
val |= cw_max << MT_WMM_CWMAX_SHIFT(qid);
drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
18
u8 qid;
drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
828
ctrl = BIT(31) | (map[i].pid << 10) | ((u32)map[i].qid << 24);
drivers/net/wireless/mediatek/mt76/mt7915/mac.c
726
enum mt76_txq_id qid, u32 changed)
drivers/net/wireless/mediatek/mt76/mt7915/mac.c
735
mt76_connac2_mac_write_txwi(dev, txwi, skb, wcid, key, pid, qid, changed);
drivers/net/wireless/mediatek/mt76/mt7915/mac.c
742
enum mt76_txq_id qid, struct mt76_wcid *wcid,
drivers/net/wireless/mediatek/mt76/mt7915/mac.c
783
qid, 0);
drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
226
enum mt76_mcuq_id qid;
drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
229
qid = MT_MCUQ_FWDL;
drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
231
qid = MT_MCUQ_WA;
drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
233
qid = MT_MCUQ_WM;
drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
237
return mt76_tx_queue_skb_raw(dev, mdev->q_mcu[qid], skb, 0);
drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
576
enum mt76_txq_id qid, u32 changed);
drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
596
enum mt76_txq_id qid, struct mt76_wcid *wcid,
drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
171
mt7915_tm_set_wmm_qid(struct mt7915_phy *phy, u8 qid, u8 aifs, u8 cw_min,
drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
178
e->queue = qid + mvif->mt76.wmm_idx * MT76_CONNAC_MAX_WMM_SETS;
drivers/net/wireless/mediatek/mt76/mt7921/mac.c
752
enum mt76_txq_id qid, struct ieee80211_sta *sta,
drivers/net/wireless/mediatek/mt76/mt7921/mac.c
759
mt76_connac2_mac_write_txwi(&dev->mt76, txwi, skb, wcid, key, pid, qid, 0);
drivers/net/wireless/mediatek/mt76/mt7921/mac.c
764
enum mt76_txq_id qid, struct mt76_wcid *wcid,
drivers/net/wireless/mediatek/mt76/mt7921/mac.c
794
mt7921_usb_sdio_write_txwi(dev, wcid, qid, sta, key, pktid, skb);
drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
258
enum mt76_txq_id qid, struct mt76_wcid *wcid,
drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
314
enum mt76_txq_id qid, struct mt76_wcid *wcid,
drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
45
pid, qid, 0);
drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
9
enum mt76_txq_id qid, struct mt76_wcid *wcid,
drivers/net/wireless/mediatek/mt76/mt7925/mac.c
1387
enum mt76_txq_id qid, struct ieee80211_sta *sta,
drivers/net/wireless/mediatek/mt76/mt7925/mac.c
1394
mt7925_mac_write_txwi(&dev->mt76, txwi, skb, wcid, key, pid, qid, 0);
drivers/net/wireless/mediatek/mt76/mt7925/mac.c
1399
enum mt76_txq_id qid, struct mt76_wcid *wcid,
drivers/net/wireless/mediatek/mt76/mt7925/mac.c
1425
mt7925_usb_sdio_write_txwi(dev, wcid, qid, sta, key, pktid, skb);
drivers/net/wireless/mediatek/mt76/mt7925/mac.c
728
enum mt76_txq_id qid, u32 changed)
drivers/net/wireless/mediatek/mt76/mt7925/mac.c
758
} else if (qid >= MT_TXQ_PSD) {
drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
276
enum mt76_txq_id qid, struct mt76_wcid *wcid,
drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
311
enum mt76_txq_id qid, u32 changed);
drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
335
enum mt76_txq_id qid, struct mt76_wcid *wcid,
drivers/net/wireless/mediatek/mt76/mt7925/pci_mac.c
45
pid, qid, 0);
drivers/net/wireless/mediatek/mt76/mt7925/pci_mac.c
9
enum mt76_txq_id qid, struct mt76_wcid *wcid,
drivers/net/wireless/mediatek/mt76/mt792x_core.c
132
qid = skb_get_queue_mapping(skb);
drivers/net/wireless/mediatek/mt76/mt792x_core.c
133
if (qid >= MT_TXQ_PSD) {
drivers/net/wireless/mediatek/mt76/mt792x_core.c
134
qid = IEEE80211_AC_BE;
drivers/net/wireless/mediatek/mt76/mt792x_core.c
135
skb_set_queue_mapping(skb, qid);
drivers/net/wireless/mediatek/mt76/mt792x_core.c
89
int qid;
drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
20
u8 qid;
drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
608
ctrl = BIT(31) | (map[i].pid << 10) | ((u32)map[i].qid << 24);
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
1040
enum mt76_txq_id qid, struct mt76_wcid *wcid,
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
1136
pid, qid, 0);
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
1912
int j, len, qid, data_len;
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
1964
qid = t->qid;
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
1966
q = &mdev->q_rx[qid];
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
2026
mt7996_queue_rx_skb(mdev, qid, skb, &info);
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
896
enum mt76_txq_id qid, u32 changed)
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
940
} else if (qid >= MT_TXQ_PSD) {
drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
277
enum mt76_mcuq_id qid;
drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
289
qid = MT_MCUQ_FWDL;
drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
296
qid = MT_MCUQ_WA;
drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
298
qid = MT_MCUQ_WM;
drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
362
return mt76_tx_queue_skb_raw(dev, mdev->q_mcu[qid], skb, 0);
drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
814
enum mt76_txq_id qid, u32 changed);
drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
832
enum mt76_txq_id qid, struct mt76_wcid *wcid,
drivers/net/wireless/mediatek/mt76/npu.c
147
enum mt76_rxq_id qid = napi - dev->napi;
drivers/net/wireless/mediatek/mt76/npu.c
161
skb = mt76_npu_dequeue(dev, &dev->q_rx[qid], &info);
drivers/net/wireless/mediatek/mt76/npu.c
165
dev->drv->rx_skb(dev, qid, skb, &info);
drivers/net/wireless/mediatek/mt76/npu.c
166
mt76_rx_poll_complete(dev, qid, napi);
drivers/net/wireless/mediatek/mt76/npu.c
170
mt76_npu_fill_rx_queue(dev, &dev->q_rx[qid]);
drivers/net/wireless/mediatek/mt76/npu.c
173
dev->drv->rx_poll_complete(dev, qid);
drivers/net/wireless/mediatek/mt76/npu.c
184
int qid = q - &dev->q_rx[0];
drivers/net/wireless/mediatek/mt76/npu.c
185
int index = qid - MT_RXQ_NPU0;
drivers/net/wireless/mediatek/mt76/npu.c
199
napi_schedule(&dev->napi[qid]);
drivers/net/wireless/mediatek/mt76/npu.c
246
int qid = FIELD_GET(MT_QFLAG_WED_RING, q->flags);
drivers/net/wireless/mediatek/mt76/npu.c
255
q->wed_regs = airoha_npu_wlan_get_queue_addr(npu, qid, xmit);
drivers/net/wireless/mediatek/mt76/npu.c
260
int err, irq, qid = q - &dev->q_rx[0];
drivers/net/wireless/mediatek/mt76/npu.c
261
int size, index = qid - MT_RXQ_NPU0;
drivers/net/wireless/mediatek/mt76/npu.c
276
size = qid == MT_RXQ_NPU1 ? NPU_RX1_DESC_NUM : NPU_RX0_DESC_NUM;
drivers/net/wireless/mediatek/mt76/npu.c
293
netif_napi_add(dev->napi_dev, &dev->napi[qid], mt76_npu_rx_poll);
drivers/net/wireless/mediatek/mt76/npu.c
295
napi_enable(&dev->napi[qid]);
drivers/net/wireless/mediatek/mt76/npu.c
405
int qid = i - MT_RXQ_NPU0;
drivers/net/wireless/mediatek/mt76/npu.c
408
status = airoha_npu_wlan_get_irq_status(npu, qid);
drivers/net/wireless/mediatek/mt76/npu.c
410
airoha_npu_wlan_disable_irq(npu, qid);
drivers/net/wireless/mediatek/mt76/sdio.c
304
int mt76s_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid)
drivers/net/wireless/mediatek/mt76/sdio.c
306
struct mt76_queue *q = &dev->q_rx[qid];
drivers/net/wireless/mediatek/mt76/sdio.c
385
int qid = q - &dev->q_rx[MT_RXQ_MAIN];
drivers/net/wireless/mediatek/mt76/sdio.c
402
if (qid == MT_RXQ_MAIN)
drivers/net/wireless/mediatek/mt76/sdio.c
519
enum mt76_txq_id qid, struct sk_buff *skb,
drivers/net/wireless/mediatek/mt76/sdio.c
533
err = dev->drv->tx_prepare_skb(dev, NULL, qid, wcid, sta, &tx_info);
drivers/net/wireless/mediatek/mt76/sdio_txrx.c
110
err = sdio_readsb(sdio->func, buf, MCR_WRDR(qid), len);
drivers/net/wireless/mediatek/mt76/sdio_txrx.c
123
while (i < intr->rx.num[qid] && buf < end) {
drivers/net/wireless/mediatek/mt76/sdio_txrx.c
85
mt76s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
drivers/net/wireless/mediatek/mt76/sdio_txrx.c
88
struct mt76_queue *q = &dev->q_rx[qid];
drivers/net/wireless/mediatek/mt76/sdio_txrx.c
94
for (i = 0; i < intr->rx.num[qid]; i++)
drivers/net/wireless/mediatek/mt76/sdio_txrx.c
95
len += round_up(intr->rx.len[qid][i] + 4, 4);
drivers/net/wireless/mediatek/mt76/testmode.c
39
int qid;
drivers/net/wireless/mediatek/mt76/testmode.c
44
qid = skb_get_queue_mapping(skb);
drivers/net/wireless/mediatek/mt76/testmode.c
45
q = phy->q_tx[qid];
drivers/net/wireless/mediatek/mt76/testmode.c
56
ret = dev->queue_ops->tx_queue_skb(phy, q, qid, skb_get(skb),
drivers/net/wireless/mediatek/mt76/tx.c
298
__mt76_tx_queue_skb(struct mt76_phy *phy, int qid, struct sk_buff *skb,
drivers/net/wireless/mediatek/mt76/tx.c
303
struct mt76_queue *q = phy->q_tx[qid];
drivers/net/wireless/mediatek/mt76/tx.c
310
idx = dev->queue_ops->tx_queue_skb(phy, q, qid, skb, wcid, sta);
drivers/net/wireless/mediatek/mt76/tx.c
463
enum mt76_txq_id qid = mt76_txq_get_qid(txq);
drivers/net/wireless/mediatek/mt76/tx.c
486
idx = __mt76_tx_queue_skb(phy, qid, skb, wcid, txq->sta, &stop);
drivers/net/wireless/mediatek/mt76/tx.c
508
idx = __mt76_tx_queue_skb(phy, qid, skb, wcid, txq->sta, &stop);
drivers/net/wireless/mediatek/mt76/tx.c
524
mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
drivers/net/wireless/mediatek/mt76/tx.c
536
txq = ieee80211_next_txq(phy->hw, qid);
drivers/net/wireless/mediatek/mt76/tx.c
549
q = phy->q_tx[qid];
drivers/net/wireless/mediatek/mt76/tx.c
580
void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid)
drivers/net/wireless/mediatek/mt76/tx.c
584
if (qid >= 4)
drivers/net/wireless/mediatek/mt76/tx.c
591
ieee80211_txq_schedule_start(phy->hw, qid);
drivers/net/wireless/mediatek/mt76/tx.c
592
len = mt76_txq_schedule_list(phy, qid);
drivers/net/wireless/mediatek/mt76/tx.c
593
ieee80211_txq_schedule_end(phy->hw, qid);
drivers/net/wireless/mediatek/mt76/tx.c
615
int qid = skb_get_queue_mapping(skb);
drivers/net/wireless/mediatek/mt76/tx.c
623
qid = MT_TXQ_PSD;
drivers/net/wireless/mediatek/mt76/tx.c
625
q = phy->q_tx[qid];
drivers/net/wireless/mediatek/mt76/tx.c
636
__mt76_tx_queue_skb(phy, qid, skb, wcid, sta, NULL);
drivers/net/wireless/mediatek/mt76/usb.c
356
enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN];
drivers/net/wireless/mediatek/mt76/usb.c
359
if (qid == MT_RXQ_MAIN && dev->usb.sg_en)
drivers/net/wireless/mediatek/mt76/usb.c
393
enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN];
drivers/net/wireless/mediatek/mt76/usb.c
396
sg_size = qid == MT_RXQ_MAIN ? MT_RX_SG_MAX_SIZE : 0;
drivers/net/wireless/mediatek/mt76/usb.c
589
mt76u_submit_rx_buf(struct mt76_dev *dev, enum mt76_rxq_id qid,
drivers/net/wireless/mediatek/mt76/usb.c
592
int ep = qid == MT_RXQ_MAIN ? MT_EP_IN_PKT_RX : MT_EP_IN_CMD_RESP;
drivers/net/wireless/mediatek/mt76/usb.c
595
mt76u_complete_rx, &dev->q_rx[qid]);
drivers/net/wireless/mediatek/mt76/usb.c
604
int qid = q - &dev->q_rx[MT_RXQ_MAIN];
drivers/net/wireless/mediatek/mt76/usb.c
619
mt76u_submit_rx_buf(dev, qid, urb);
drivers/net/wireless/mediatek/mt76/usb.c
621
if (qid == MT_RXQ_MAIN) {
drivers/net/wireless/mediatek/mt76/usb.c
641
mt76u_submit_rx_buffers(struct mt76_dev *dev, enum mt76_rxq_id qid)
drivers/net/wireless/mediatek/mt76/usb.c
643
struct mt76_queue *q = &dev->q_rx[qid];
drivers/net/wireless/mediatek/mt76/usb.c
649
err = mt76u_submit_rx_buf(dev, qid, q->entry[i].urb);
drivers/net/wireless/mediatek/mt76/usb.c
661
mt76u_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid)
drivers/net/wireless/mediatek/mt76/usb.c
663
struct mt76_queue *q = &dev->q_rx[qid];
drivers/net/wireless/mediatek/mt76/usb.c
686
return mt76u_submit_rx_buffers(dev, qid);
drivers/net/wireless/mediatek/mt76/usb.c
854
enum mt76_txq_id qid, struct sk_buff *skb,
drivers/net/wireless/mediatek/mt76/usb.c
868
err = dev->drv->tx_prepare_skb(dev, NULL, qid, wcid, sta, &tx_info);
drivers/net/wireless/mediatek/mt76/usb.c
910
mt76u_ac_to_hwq(struct mt76_dev *dev, struct mt76_queue *q, u8 qid)
drivers/net/wireless/mediatek/mt76/usb.c
912
u8 ac = qid < IEEE80211_NUM_ACS ? qid : IEEE80211_AC_BE;
drivers/net/wireless/mediatek/mt76/usb.c
931
q->ep = qid == MT_TXQ_PSD ? MT_EP_OUT_HCCA : q->hw_idx + 1;
drivers/net/wireless/mediatek/mt7601u/dma.c
352
static u8 q2ep(u8 qid)
drivers/net/wireless/mediatek/mt7601u/dma.c
355
return qid + 1;
drivers/net/wireless/mediatek/mt7601u/tx.c
29
int qid = skb_get_queue_mapping(skb);
drivers/net/wireless/mediatek/mt7601u/tx.c
31
if (WARN_ON(qid >= MT_TXQ_PSD)) {
drivers/net/wireless/mediatek/mt7601u/tx.c
32
qid = MT_TXQ_BE;
drivers/net/wireless/mediatek/mt7601u/tx.c
33
skb_set_queue_mapping(skb, qid);
drivers/net/wireless/mediatek/mt7601u/tx.c
36
return q2hwq(qid);
drivers/net/wireless/ralink/rt2x00/rt2400pci.c
1766
switch (queue->qid) {
drivers/net/wireless/ralink/rt2x00/rt2400pci.c
632
switch (queue->qid) {
drivers/net/wireless/ralink/rt2x00/rt2400pci.c
655
switch (queue->qid) {
drivers/net/wireless/ralink/rt2x00/rt2400pci.c
681
switch (queue->qid) {
drivers/net/wireless/ralink/rt2x00/rt2400pci.c
719
if (entry->queue->qid == QID_RX) {
drivers/net/wireless/ralink/rt2x00/rt2400pci.c
737
if (entry->queue->qid == QID_RX) {
drivers/net/wireless/ralink/rt2x00/rt2500pci.c
2064
switch (queue->qid) {
drivers/net/wireless/ralink/rt2x00/rt2500pci.c
721
switch (queue->qid) {
drivers/net/wireless/ralink/rt2x00/rt2500pci.c
744
switch (queue->qid) {
drivers/net/wireless/ralink/rt2x00/rt2500pci.c
770
switch (queue->qid) {
drivers/net/wireless/ralink/rt2x00/rt2500pci.c
808
if (entry->queue->qid == QID_RX) {
drivers/net/wireless/ralink/rt2x00/rt2500pci.c
826
if (entry->queue->qid == QID_RX) {
drivers/net/wireless/ralink/rt2x00/rt2500usb.c
1853
switch (queue->qid) {
drivers/net/wireless/ralink/rt2x00/rt2500usb.c
722
switch (queue->qid) {
drivers/net/wireless/ralink/rt2x00/rt2500usb.c
745
switch (queue->qid) {
drivers/net/wireless/ralink/rt2x00/rt2800lib.c
1012
entry->queue->qid, entry->entry_idx);
drivers/net/wireless/ralink/rt2x00/rt2800lib.c
1126
u8 qid;
drivers/net/wireless/ralink/rt2x00/rt2800lib.c
1134
qid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_QUEUE);
drivers/net/wireless/ralink/rt2x00/rt2800lib.c
1135
queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
drivers/net/wireless/ralink/rt2x00/rt2800lib.c
1139
qid);
drivers/net/wireless/ralink/rt2x00/rt2800lib.c
1148
entry->entry_idx, qid);
drivers/net/wireless/ralink/rt2x00/rt2800lib.c
1176
entry->entry_idx, entry->queue->qid);
drivers/net/wireless/ralink/rt2x00/rt2800lib.c
1275
switch (queue->qid) {
drivers/net/wireless/ralink/rt2x00/rt2800lib.c
838
rt2x00_set_field32(&word, TXWI_W1_PACKETID_QUEUE, entry->queue->qid);
drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
31
int idx, qid;
drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
33
switch (queue->qid) {
drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
38
qid = queue->qid;
drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
39
idx = rt2x00mmio_register_read(rt2x00dev, TX_DTX_IDX(qid));
drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
429
switch (queue->qid) {
drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
460
switch (queue->qid) {
drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
467
rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(queue->qid),
drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
489
switch (queue->qid) {
drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
532
switch (queue->qid) {
drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
571
switch (queue->qid) {
drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
615
if (entry->queue->qid == QID_RX) {
drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
634
if (entry->queue->qid == QID_RX) {
drivers/net/wireless/ralink/rt2x00/rt2800usb.c
395
if (entry->queue->qid == QID_BEACON)
drivers/net/wireless/ralink/rt2x00/rt2800usb.c
51
switch (queue->qid) {
drivers/net/wireless/ralink/rt2x00/rt2800usb.c
725
switch (queue->qid) {
drivers/net/wireless/ralink/rt2x00/rt2800usb.c
74
switch (queue->qid) {
drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
171
dump_hdr->queue_index = entry->queue->qid;
drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
324
queue->qid, (unsigned int)queue->flags,
drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
112
qid = QID_ATIM;
drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
114
queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
118
"Please file bug report to %s\n", qid, DRV_PROJECT);
drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
95
enum data_queue_qid qid = skb_get_queue_mapping(skb);
drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
1019
queue->qid);
drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
1213
struct data_queue *queue, enum data_queue_qid qid)
drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
1220
queue->qid = qid;
drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
1234
enum data_queue_qid qid;
drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
1270
qid = QID_AC_VO;
drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
1272
rt2x00queue_init(rt2x00dev, queue, qid++);
drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
500
entry->queue->qid, DRV_PROJECT);
drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
665
queue->qid);
drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
677
queue->qid, DRV_PROJECT);
drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
892
switch (queue->qid) {
drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
901
ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid);
drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
925
switch (queue->qid) {
drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
934
ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid);
drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
989
(queue->qid == QID_AC_VO) ||
drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
990
(queue->qid == QID_AC_VI) ||
drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
991
(queue->qid == QID_AC_BE) ||
drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
992
(queue->qid == QID_AC_BK);
drivers/net/wireless/ralink/rt2x00/rt2x00queue.h
455
enum data_queue_qid qid;
drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
429
switch (queue->qid) {
drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
469
if ((entry->queue->qid == QID_BEACON) &&
drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
488
switch (queue->qid) {
drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
529
queue->qid);
drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
574
if (entry->queue->qid == QID_RX)
drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
587
if (queue->qid == QID_RX) {
drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
668
if (queue->qid != QID_BEACON ||
drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
703
if (queue->qid != QID_BEACON ||
drivers/net/wireless/ralink/rt2x00/rt61pci.c
1046
switch (queue->qid) {
drivers/net/wireless/ralink/rt2x00/rt61pci.c
1069
switch (queue->qid) {
drivers/net/wireless/ralink/rt2x00/rt61pci.c
1100
switch (queue->qid) {
drivers/net/wireless/ralink/rt2x00/rt61pci.c
1292
if (entry->queue->qid == QID_RX) {
drivers/net/wireless/ralink/rt2x00/rt61pci.c
1310
if (entry->queue->qid == QID_RX) {
drivers/net/wireless/ralink/rt2x00/rt61pci.c
1787
rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, entry->queue->qid);
drivers/net/wireless/ralink/rt2x00/rt61pci.c
1812
rt2x00_set_field32(&word, TXD_W5_PID_TYPE, entry->queue->qid);
drivers/net/wireless/ralink/rt2x00/rt61pci.c
1819
if (entry->queue->qid != QID_BEACON) {
drivers/net/wireless/ralink/rt2x00/rt61pci.c
1865
skbdesc->desc_len = (entry->queue->qid == QID_BEACON) ? TXINFO_SIZE :
drivers/net/wireless/ralink/rt2x00/rt61pci.c
2940
switch (queue->qid) {
drivers/net/wireless/ralink/rt2x00/rt73usb.c
1018
switch (queue->qid) {
drivers/net/wireless/ralink/rt2x00/rt73usb.c
1041
switch (queue->qid) {
drivers/net/wireless/ralink/rt2x00/rt73usb.c
1481
rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, entry->queue->qid);
drivers/net/wireless/ralink/rt2x00/rt73usb.c
2356
switch (queue->qid) {
drivers/net/wireless/realtek/rtw89/mac_be.c
2204
u32 qid;
drivers/net/wireless/realtek/rtw89/mac_be.c
2211
qid = WDRLS_DEST_QID_POH;
drivers/net/wireless/realtek/rtw89/mac_be.c
2215
qid = WDRLS_DEST_QID_STF;
drivers/net/wireless/realtek/rtw89/mac_be.c
2227
rtw89_write32_mask(rtwdev, R_BE_RLSRPT0_CFG0, B_BE_RLSRPT0_QID_MASK, qid);
drivers/net/wireless/ti/wl1251/acx.h
304
u8 qid;
drivers/net/wireless/ti/wl1251/init.c
216
static int wl1251_hw_init_txq_fill(u8 qid,
drivers/net/wireless/ti/wl1251/init.c
220
config->qid = qid;
drivers/net/wireless/ti/wl1251/init.c
222
switch (qid) {
drivers/net/wireless/ti/wl1251/init.c
248
wl1251_error("Invalid TX queue id: %d", qid);
drivers/nvme/host/apple.c
732
c.delete_queue.qid = cpu_to_le16(1);
drivers/nvme/host/apple.c
760
c.delete_queue.qid = cpu_to_le16(1);
drivers/nvme/host/auth.c
101
__func__, qid, data->auth_type, data->auth_id);
drivers/nvme/host/auth.c
1094
chap->qid = i;
drivers/nvme/host/auth.c
111
qid, data->auth_type, data->auth_id);
drivers/nvme/host/auth.c
117
qid, le16_to_cpu(data->t_id));
drivers/nvme/host/auth.c
137
if (ctrl->opts->concat && chap->qid == 0) {
drivers/nvme/host/auth.c
181
chap->qid, data->hashid);
drivers/nvme/host/auth.c
191
chap->qid, hmac_name);
drivers/nvme/host/auth.c
206
chap->qid, hmac_name, PTR_ERR(chap->shash_tfm));
drivers/nvme/host/auth.c
215
chap->qid, data->hl);
drivers/nvme/host/auth.c
225
chap->qid, hmac_name);
drivers/nvme/host/auth.c
232
chap->qid, data->dhgid);
drivers/nvme/host/auth.c
242
chap->qid, gid_name);
drivers/nvme/host/auth.c
256
chap->qid);
drivers/nvme/host/auth.c
267
chap->qid, ret, gid_name);
drivers/nvme/host/auth.c
273
chap->qid, gid_name);
drivers/nvme/host/auth.c
277
chap->qid);
drivers/nvme/host/auth.c
29
int qid;
drivers/nvme/host/auth.c
333
__func__, chap->qid, (int)chap->hash_len, chap->c2);
drivers/nvme/host/auth.c
345
__func__, chap->qid,
drivers/nvme/host/auth.c
368
chap->qid, data->hl);
drivers/nvme/host/auth.c
374
if (chap->qid == 0)
drivers/nvme/host/auth.c
386
__func__, chap->qid, (int)chap->hash_len, data->rval);
drivers/nvme/host/auth.c
388
__func__, chap->qid, (int)chap->hash_len,
drivers/nvme/host/auth.c
392
chap->qid);
drivers/nvme/host/auth.c
398
if (chap->qid == 0)
drivers/nvme/host/auth.c
442
__func__, chap->qid, chap->s1, chap->transaction);
drivers/nvme/host/auth.c
454
__func__, chap->qid);
drivers/nvme/host/auth.c
461
chap->qid, ret);
drivers/nvme/host/auth.c
540
chap->qid, ret);
drivers/nvme/host/auth.c
559
__func__, chap->qid, chap->s2, chap->transaction);
drivers/nvme/host/auth.c
561
__func__, chap->qid, (int)chap->hash_len, challenge);
drivers/nvme/host/auth.c
563
__func__, chap->qid, ctrl->opts->subsysnqn);
drivers/nvme/host/auth.c
565
__func__, chap->qid, ctrl->opts->host->nqn);
drivers/nvme/host/auth.c
60
static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid,
drivers/nvme/host/auth.c
614
"qid %d: reusing host key\n", chap->qid);
drivers/nvme/host/auth.c
68
if (qid != 0) {
drivers/nvme/host/auth.c
719
__func__, chap->qid);
drivers/nvme/host/auth.c
723
if (chap->qid) {
drivers/nvme/host/auth.c
726
chap->qid);
drivers/nvme/host/auth.c
736
__func__, chap->qid, ret);
drivers/nvme/host/auth.c
748
__func__, chap->qid, ret);
drivers/nvme/host/auth.c
758
__func__, chap->qid, ret);
drivers/nvme/host/auth.c
770
__func__, chap->qid, ret);
drivers/nvme/host/auth.c
806
__func__, chap->qid);
drivers/nvme/host/auth.c
813
ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
drivers/nvme/host/auth.c
821
__func__, chap->qid);
drivers/nvme/host/auth.c
824
ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, CHAP_BUF_SIZE,
drivers/nvme/host/auth.c
829
chap->qid, ret < 0 ? "error" : "nvme status", ret);
drivers/nvme/host/auth.c
833
ret = nvme_auth_receive_validate(ctrl, chap->qid, chap->buf, chap->transaction,
drivers/nvme/host/auth.c
851
__func__, chap->qid);
drivers/nvme/host/auth.c
86
qid == 0 ? NVME_QID_ANY : qid, flags);
drivers/nvme/host/auth.c
860
__func__, chap->qid);
drivers/nvme/host/auth.c
871
__func__, chap->qid);
drivers/nvme/host/auth.c
879
ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
drivers/nvme/host/auth.c
887
__func__, chap->qid);
drivers/nvme/host/auth.c
89
"qid %d auth_send failed with status %d\n", qid, ret);
drivers/nvme/host/auth.c
890
ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, CHAP_BUF_SIZE,
drivers/nvme/host/auth.c
895
chap->qid, ret < 0 ? "error" : "nvme status", ret);
drivers/nvme/host/auth.c
899
ret = nvme_auth_receive_validate(ctrl, chap->qid,
drivers/nvme/host/auth.c
912
__func__, chap->qid);
drivers/nvme/host/auth.c
92
"qid %d auth_send failed with error %d\n", qid, ret);
drivers/nvme/host/auth.c
932
__func__, chap->qid);
drivers/nvme/host/auth.c
934
ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
drivers/nvme/host/auth.c
945
__func__, chap->qid);
drivers/nvme/host/auth.c
956
__func__, chap->qid, chap->status);
drivers/nvme/host/auth.c
958
ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
drivers/nvme/host/auth.c
96
static int nvme_auth_receive_validate(struct nvme_ctrl *ctrl, int qid,
drivers/nvme/host/auth.c
967
int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid)
drivers/nvme/host/auth.c
972
dev_warn(ctrl->device, "qid %d: no key\n", qid);
drivers/nvme/host/auth.c
977
dev_warn(ctrl->device, "qid %d: invalid ctrl key\n", qid);
drivers/nvme/host/auth.c
981
chap = &ctrl->dhchap_ctxs[qid];
drivers/nvme/host/auth.c
988
int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid)
drivers/nvme/host/auth.c
993
chap = &ctrl->dhchap_ctxs[qid];
drivers/nvme/host/core.c
1161
int qid, nvme_submit_flags_t flags)
drivers/nvme/host/core.c
1171
if (qid == NVME_QID_ANY)
drivers/nvme/host/core.c
1175
qid - 1);
drivers/nvme/host/fabrics.c
352
case (offsetof(struct nvmf_connect_command, qid)):
drivers/nvme/host/fabrics.c
355
inv_sqe, cmd->connect.qid);
drivers/nvme/host/fabrics.c
411
static void nvmf_connect_cmd_prep(struct nvme_ctrl *ctrl, u16 qid,
drivers/nvme/host/fabrics.c
416
cmd->connect.qid = cpu_to_le16(qid);
drivers/nvme/host/fabrics.c
418
if (qid) {
drivers/nvme/host/fabrics.c
525
int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
drivers/nvme/host/fabrics.c
533
nvmf_connect_cmd_prep(ctrl, qid, &cmd);
drivers/nvme/host/fabrics.c
540
data, sizeof(*data), qid,
drivers/nvme/host/fabrics.c
554
"qid %d: secure concatenation is not supported\n", qid);
drivers/nvme/host/fabrics.c
559
ret = nvme_auth_negotiate(ctrl, qid);
drivers/nvme/host/fabrics.c
562
"qid %d: authentication setup failed\n", qid);
drivers/nvme/host/fabrics.c
565
ret = nvme_auth_wait(ctrl, qid);
drivers/nvme/host/fabrics.c
569
qid, ret);
drivers/nvme/host/fabrics.h
231
int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid);
drivers/nvme/host/fc.c
1291
conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum);
drivers/nvme/host/nvme.h
1238
int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid);
drivers/nvme/host/nvme.h
1239
int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid);
drivers/nvme/host/nvme.h
1255
static inline int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid)
drivers/nvme/host/nvme.h
1259
static inline int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid)
drivers/nvme/host/nvme.h
817
static inline bool nvme_is_aen_req(u16 qid, __u16 command_id)
drivers/nvme/host/nvme.h
819
return !qid &&
drivers/nvme/host/nvme.h
964
int qid, nvme_submit_flags_t flags);
drivers/nvme/host/pci.c
1526
if (!nvmeq->qid)
drivers/nvme/host/pci.c
1528
return nvmeq->dev->tagset.tags[nvmeq->qid - 1];
drivers/nvme/host/pci.c
1544
if (unlikely(nvme_is_aen_req(nvmeq->qid, command_id))) {
drivers/nvme/host/pci.c
1714
c.delete_queue.qid = cpu_to_le16(id);
drivers/nvme/host/pci.c
1719
static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
drivers/nvme/host/pci.c
1734
c.create_cq.cqid = cpu_to_le16(qid);
drivers/nvme/host/pci.c
1742
static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
drivers/nvme/host/pci.c
1763
c.create_sq.sqid = cpu_to_le16(qid);
drivers/nvme/host/pci.c
1766
c.create_sq.cqid = cpu_to_le16(qid);
drivers/nvme/host/pci.c
1894
req->tag, nvme_cid(req), nvmeq->qid);
drivers/nvme/host/pci.c
1911
req->tag, nvme_cid(req), nvmeq->qid);
drivers/nvme/host/pci.c
1927
if (!nvmeq->qid || (iod->flags & IOD_ABORTED)) {
drivers/nvme/host/pci.c
1931
nvme_opcode_str(nvmeq->qid, opcode), nvmeq->qid);
drivers/nvme/host/pci.c
1944
cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
drivers/nvme/host/pci.c
1949
nvmeq->qid, blk_op_str(req_op(req)), req_op(req),
drivers/nvme/host/pci.c
2010
static void nvme_suspend_queue(struct nvme_dev *dev, unsigned int qid)
drivers/nvme/host/pci.c
2012
struct nvme_queue *nvmeq = &dev->queues[qid];
drivers/nvme/host/pci.c
2021
if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q)
drivers/nvme/host/pci.c
2078
int qid)
drivers/nvme/host/pci.c
2082
if (qid && dev->cmb_use_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) {
drivers/nvme/host/pci.c
2103
static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth)
drivers/nvme/host/pci.c
2105
struct nvme_queue *nvmeq = &dev->queues[qid];
drivers/nvme/host/pci.c
2107
if (dev->ctrl.queue_count > qid)
drivers/nvme/host/pci.c
2110
nvmeq->sqes = qid ? dev->io_sqes : NVME_ADM_SQES;
drivers/nvme/host/pci.c
2117
if (nvme_alloc_sq_cmds(dev, nvmeq, qid))
drivers/nvme/host/pci.c
2125
nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
drivers/nvme/host/pci.c
2126
nvmeq->qid = qid;
drivers/nvme/host/pci.c
2145
nvme_irq, nvmeq, "nvme%dq%d", nr, nvmeq->qid);
drivers/nvme/host/pci.c
2148
NULL, nvmeq, "nvme%dq%d", nr, nvmeq->qid);
drivers/nvme/host/pci.c
2152
static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
drivers/nvme/host/pci.c
2160
nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
drivers/nvme/host/pci.c
2162
nvme_dbbuf_init(dev, nvmeq, qid);
drivers/nvme/host/pci.c
2189
static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
drivers/nvme/host/pci.c
2202
vector = dev->num_vecs == 1 ? 0 : qid;
drivers/nvme/host/pci.c
2206
result = adapter_alloc_cq(dev, qid, nvmeq, vector);
drivers/nvme/host/pci.c
2210
result = adapter_alloc_sq(dev, qid, nvmeq);
drivers/nvme/host/pci.c
2221
nvme_init_queue(nvmeq, qid);
drivers/nvme/host/pci.c
2235
adapter_delete_sq(dev, qid);
drivers/nvme/host/pci.c
2237
adapter_delete_cq(dev, qid);
drivers/nvme/host/pci.c
3072
cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid);
drivers/nvme/host/pci.c
346
static inline unsigned int sq_idx(unsigned int qid, u32 stride)
drivers/nvme/host/pci.c
348
return qid * 2 * stride;
drivers/nvme/host/pci.c
351
static inline unsigned int cq_idx(unsigned int qid, u32 stride)
drivers/nvme/host/pci.c
353
return (qid * 2 + 1) * stride;
drivers/nvme/host/pci.c
381
u16 qid;
drivers/nvme/host/pci.c
508
struct nvme_queue *nvmeq, int qid)
drivers/nvme/host/pci.c
510
if (!dev->dbbuf_dbs || !qid)
drivers/nvme/host/pci.c
513
nvmeq->dbbuf_sq_db = &dev->dbbuf_dbs[sq_idx(qid, dev->db_stride)];
drivers/nvme/host/pci.c
514
nvmeq->dbbuf_cq_db = &dev->dbbuf_dbs[cq_idx(qid, dev->db_stride)];
drivers/nvme/host/pci.c
515
nvmeq->dbbuf_sq_ei = &dev->dbbuf_eis[sq_idx(qid, dev->db_stride)];
drivers/nvme/host/pci.c
516
nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)];
drivers/nvme/host/pci.c
521
if (!nvmeq->qid)
drivers/nvme/host/pci.c
630
unsigned qid)
drivers/nvme/host/pci.c
633
struct nvme_queue *nvmeq = &dev->queues[qid];
drivers/nvme/host/pci.c
637
tags = qid ? dev->tagset.tags[qid - 1] : dev->admin_tagset.tags[0];
drivers/nvme/host/pci.c
771
if (nvmeq->qid && nvme_ctrl_sgl_supported(&dev->ctrl)) {
drivers/nvme/host/rdma.c
1856
priv.qid = cpu_to_le16(nvme_rdma_queue_idx(queue));
drivers/nvme/host/rdma.c
1861
if (priv.qid == 0) {
drivers/nvme/host/rdma.c
1959
int qid = nvme_rdma_queue_idx(queue);
drivers/nvme/host/rdma.c
1964
nvme_fabrics_opcode_str(qid, cmd), qid);
drivers/nvme/host/tcp.c
1437
static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
drivers/nvme/host/tcp.c
1440
struct nvme_tcp_queue *queue = &ctrl->queues[qid];
drivers/nvme/host/tcp.c
1598
int qid = nvme_tcp_queue_id(queue);
drivers/nvme/host/tcp.c
1601
qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT];
drivers/nvme/host/tcp.c
1607
int qid = nvme_tcp_queue_id(queue);
drivers/nvme/host/tcp.c
1611
qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
drivers/nvme/host/tcp.c
1618
int qid = nvme_tcp_queue_id(queue);
drivers/nvme/host/tcp.c
1623
qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
drivers/nvme/host/tcp.c
1641
int qid = nvme_tcp_queue_id(queue) - 1;
drivers/nvme/host/tcp.c
1663
if (mq_map[cpu] != qid)
drivers/nvme/host/tcp.c
1677
qid, queue->io_cpu);
drivers/nvme/host/tcp.c
1684
int qid = nvme_tcp_queue_id(queue);
drivers/nvme/host/tcp.c
1688
qid, pskid, status);
drivers/nvme/host/tcp.c
1698
qid, pskid);
drivers/nvme/host/tcp.c
1702
if (qid == 0)
drivers/nvme/host/tcp.c
1716
int qid = nvme_tcp_queue_id(queue);
drivers/nvme/host/tcp.c
1723
qid, pskid);
drivers/nvme/host/tcp.c
1739
qid, ret);
drivers/nvme/host/tcp.c
1749
qid, ret);
drivers/nvme/host/tcp.c
1755
qid, queue->tls_err);
drivers/nvme/host/tcp.c
1758
"queue %d: TLS handshake complete\n", qid);
drivers/nvme/host/tcp.c
1765
static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid,
drivers/nvme/host/tcp.c
1769
struct nvme_tcp_queue *queue = &ctrl->queues[qid];
drivers/nvme/host/tcp.c
1780
if (qid > 0)
drivers/nvme/host/tcp.c
1843
qid, ret);
drivers/nvme/host/tcp.c
1857
iface, qid, ret);
drivers/nvme/host/tcp.c
1932
static void nvme_tcp_stop_queue_nowait(struct nvme_ctrl *nctrl, int qid)
drivers/nvme/host/tcp.c
1935
struct nvme_tcp_queue *queue = &ctrl->queues[qid];
drivers/nvme/host/tcp.c
1951
static void nvme_tcp_wait_queue(struct nvme_ctrl *nctrl, int qid)
drivers/nvme/host/tcp.c
1954
struct nvme_tcp_queue *queue = &ctrl->queues[qid];
drivers/nvme/host/tcp.c
1966
qid);
drivers/nvme/host/tcp.c
1969
static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
drivers/nvme/host/tcp.c
1971
nvme_tcp_stop_queue_nowait(nctrl, qid);
drivers/nvme/host/tcp.c
1972
nvme_tcp_wait_queue(nctrl, qid);
drivers/nvme/host/tcp.c
2646
int qid = nvme_tcp_queue_id(req->queue);
drivers/nvme/host/tcp.c
2651
nvme_fabrics_opcode_str(qid, cmd), qid);
drivers/nvme/host/trace.c
411
u16 qid = get_unaligned_le16(spc + 2);
drivers/nvme/host/trace.c
417
recfmt, qid, sqsize, cattr, kato);
drivers/nvme/host/trace.h
100
__entry->qid = nvme_req_qid(req);
drivers/nvme/host/trace.h
110
__entry->qid, __entry->cid, __entry->result,
drivers/nvme/host/trace.h
147
__field(int, qid)
drivers/nvme/host/trace.h
154
__entry->qid = nvme_req_qid(req);
drivers/nvme/host/trace.h
160
__entry->qid, __entry->sq_head, __entry->sq_tail
drivers/nvme/host/trace.h
26
#define parse_nvme_cmd(qid, opcode, fctype, cdw10) \
drivers/nvme/host/trace.h
29
((qid) ? \
drivers/nvme/host/trace.h
53
__field(int, qid)
drivers/nvme/host/trace.h
64
__entry->qid = nvme_req_qid(req);
drivers/nvme/host/trace.h
77
__entry->qid, __entry->cid, __entry->nsid,
drivers/nvme/host/trace.h
79
show_opcode_name(__entry->qid, __entry->opcode,
drivers/nvme/host/trace.h
81
parse_nvme_cmd(__entry->qid, __entry->opcode,
drivers/nvme/host/trace.h
91
__field(int, qid)
drivers/nvme/target/admin-cmd.c
1158
req->cmd->identify.cns, req->sq->qid);
drivers/nvme/target/admin-cmd.c
1647
if (nvmet_is_pci_ctrl(req->sq->ctrl) && !req->sq->qid &&
drivers/nvme/target/admin-cmd.c
18
u16 sqid = le16_to_cpu(req->cmd->delete_queue.qid);
drivers/nvme/target/admin-cmd.c
643
req->cmd->get_log_page.lid, req->sq->qid);
drivers/nvme/target/admin-cmd.c
86
u16 cqid = le16_to_cpu(req->cmd->delete_queue.qid);
drivers/nvme/target/auth.c
284
if (req->sq->qid > 0)
drivers/nvme/target/auth.c
352
ctrl->cntlid, req->sq->qid, req->sq->dhchap_s1,
drivers/nvme/target/auth.c
574
__func__, sq->ctrl->cntlid, sq->qid, ret);
drivers/nvme/target/auth.c
582
__func__, sq->ctrl->cntlid, sq->qid, ret);
drivers/nvme/target/auth.c
589
__func__, sq->ctrl->cntlid, sq->qid, ret);
drivers/nvme/target/auth.c
599
__func__, sq->ctrl->cntlid, sq->qid, PTR_ERR(tls_key));
drivers/nvme/target/core.c
1199
else if (likely(req->sq->qid != 0))
drivers/nvme/target/core.c
1237
if (likely(req->sq->qid != 0))
drivers/nvme/target/core.c
1312
!req->sq->ctrl || !req->sq->qid || !req->ns)
drivers/nvme/target/core.c
1507
req->cmd->common.opcode, req->sq->qid);
drivers/nvme/target/core.c
1513
req->cmd->common.opcode, req->sq->qid);
drivers/nvme/target/core.c
1518
pr_warn("qid %d not authenticated\n", req->sq->qid);
drivers/nvme/target/core.c
765
new_error_slot->sqid = cpu_to_le16(req->sq->qid);
drivers/nvme/target/core.c
784
req->cqe->sq_id = cpu_to_le16(req->sq->qid);
drivers/nvme/target/core.c
829
u16 qid, u16 size)
drivers/nvme/target/core.c
83
req->sq->qid);
drivers/nvme/target/core.c
831
cq->qid = qid;
drivers/nvme/target/core.c
834
ctrl->cqs[qid] = cq;
drivers/nvme/target/core.c
842
ctrl->cqs[cq->qid] = NULL;
drivers/nvme/target/core.c
849
u16 qid, u16 size)
drivers/nvme/target/core.c
852
sq->qid = qid;
drivers/nvme/target/core.c
855
ctrl->sqs[qid] = sq;
drivers/nvme/target/core.c
893
u16 qid, u16 size)
drivers/nvme/target/core.c
897
status = nvmet_check_cqid(ctrl, qid, true);
drivers/nvme/target/core.c
906
nvmet_cq_setup(ctrl, cq, qid, size);
drivers/nvme/target/core.c
992
sq->ctrl->sqs[sq->qid] = NULL;
drivers/nvme/target/fabrics-cmd-auth.c
117
__func__, ctrl->cntlid, req->sq->qid);
drivers/nvme/target/fabrics-cmd-auth.c
121
__func__, ctrl->cntlid, req->sq->qid,
drivers/nvme/target/fabrics-cmd-auth.c
128
ctrl->cntlid, req->sq->qid);
drivers/nvme/target/fabrics-cmd-auth.c
132
__func__, ctrl->cntlid, req->sq->qid,
drivers/nvme/target/fabrics-cmd-auth.c
145
__func__, ctrl->cntlid, req->sq->qid,
drivers/nvme/target/fabrics-cmd-auth.c
162
ctrl->cntlid, req->sq->qid);
drivers/nvme/target/fabrics-cmd-auth.c
168
ctrl->cntlid, req->sq->qid);
drivers/nvme/target/fabrics-cmd-auth.c
175
ctrl->cntlid, req->sq->qid);
drivers/nvme/target/fabrics-cmd-auth.c
177
ctrl->cntlid, req->sq->qid, data->hl, data->rval);
drivers/nvme/target/fabrics-cmd-auth.c
179
ctrl->cntlid, req->sq->qid, data->hl, response);
drivers/nvme/target/fabrics-cmd-auth.c
185
__func__, ctrl->cntlid, req->sq->qid);
drivers/nvme/target/fabrics-cmd-auth.c
188
__func__, ctrl->cntlid, req->sq->qid);
drivers/nvme/target/fabrics-cmd-auth.c
199
__func__, ctrl->cntlid, req->sq->qid, data->hl,
drivers/nvme/target/fabrics-cmd-auth.c
21
__func__, sq->ctrl->cntlid, sq->qid, sq->dhchap_tid);
drivers/nvme/target/fabrics-cmd-auth.c
285
ctrl->cntlid, req->sq->qid, data->auth_type, data->auth_id,
drivers/nvme/target/fabrics-cmd-auth.c
294
__func__, ctrl->cntlid, req->sq->qid);
drivers/nvme/target/fabrics-cmd-auth.c
295
if (!req->sq->qid) {
drivers/nvme/target/fabrics-cmd-auth.c
324
__func__, ctrl->cntlid, req->sq->qid,
drivers/nvme/target/fabrics-cmd-auth.c
330
__func__, ctrl->cntlid, req->sq->qid,
drivers/nvme/target/fabrics-cmd-auth.c
357
__func__, ctrl->cntlid, req->sq->qid);
drivers/nvme/target/fabrics-cmd-auth.c
363
ctrl->cntlid, req->sq->qid, dhchap_status);
drivers/nvme/target/fabrics-cmd-auth.c
384
ctrl->cntlid, req->sq->qid,
drivers/nvme/target/fabrics-cmd-auth.c
388
__func__, ctrl->cntlid, req->sq->qid,
drivers/nvme/target/fabrics-cmd-auth.c
41
__func__, ctrl->cntlid, req->sq->qid,
drivers/nvme/target/fabrics-cmd-auth.c
442
__func__, ctrl->cntlid, req->sq->qid, req->sq->dhchap_s1,
drivers/nvme/target/fabrics-cmd-auth.c
462
ctrl->cntlid, req->sq->qid);
drivers/nvme/target/fabrics-cmd-auth.c
469
ctrl->cntlid, req->sq->qid, data->hl, data->rval);
drivers/nvme/target/fabrics-cmd-auth.c
51
if (req->sq->qid)
drivers/nvme/target/fabrics-cmd-auth.c
534
ctrl->cntlid, req->sq->qid, req->sq->dhchap_step);
drivers/nvme/target/fabrics-cmd-auth.c
539
ctrl->cntlid, req->sq->qid, status);
drivers/nvme/target/fabrics-cmd-auth.c
552
ctrl->cntlid, req->sq->qid,
drivers/nvme/target/fabrics-cmd-auth.c
562
ctrl->cntlid, req->sq->qid, req->sq->dhchap_status);
drivers/nvme/target/fabrics-cmd-auth.c
566
ctrl->cntlid, req->sq->qid, req->sq->dhchap_step);
drivers/nvme/target/fabrics-cmd-auth.c
89
__func__, ctrl->cntlid, req->sq->qid);
drivers/nvme/target/fabrics-cmd-auth.c
93
__func__, ctrl->cntlid, req->sq->qid,
drivers/nvme/target/fabrics-cmd.c
175
u16 qid = le16_to_cpu(c->qid);
drivers/nvme/target/fabrics-cmd.c
189
if (ctrl->sqs[qid] != NULL) {
drivers/nvme/target/fabrics-cmd.c
190
pr_warn("qid %u has already been created\n", qid);
drivers/nvme/target/fabrics-cmd.c
191
req->error_loc = offsetof(struct nvmf_connect_command, qid);
drivers/nvme/target/fabrics-cmd.c
196
if (qid && sqsize > mqes) {
drivers/nvme/target/fabrics-cmd.c
220
nvmet_cq_setup(ctrl, req->cq, qid, sqsize + 1);
drivers/nvme/target/fabrics-cmd.c
221
nvmet_sq_setup(ctrl, req->sq, qid, sqsize + 1);
drivers/nvme/target/fabrics-cmd.c
232
qid, ctrl->cntlid, ret);
drivers/nvme/target/fabrics-cmd.c
233
ctrl->sqs[qid] = NULL;
drivers/nvme/target/fabrics-cmd.c
251
if (sq->qid)
drivers/nvme/target/fabrics-cmd.c
256
__func__, ctrl->cntlid, sq->qid,
drivers/nvme/target/fabrics-cmd.c
260
__func__, ctrl->cntlid, sq->qid,
drivers/nvme/target/fabrics-cmd.c
341
u16 qid = le16_to_cpu(c->qid);
drivers/nvme/target/fabrics-cmd.c
373
if (unlikely(qid > ctrl->subsys->max_qid)) {
drivers/nvme/target/fabrics-cmd.c
374
pr_warn("invalid queue id (%d)\n", qid);
drivers/nvme/target/fabrics-cmd.c
376
req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(qid);
drivers/nvme/target/fabrics-cmd.c
384
pr_debug("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid);
drivers/nvme/target/fabrics-cmd.c
425
if (cmd->connect.qid == 0)
drivers/nvme/target/fc.c
133
u16 qid;
drivers/nvme/target/fc.c
1795
be16_to_cpu(rqst->connect_cmd.qid),
drivers/nvme/target/fc.c
1835
be16_to_cpu(rqst->connect_cmd.qid)));
drivers/nvme/target/fc.c
190
nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid)
drivers/nvme/target/fc.c
192
return (assoc->association_id | qid);
drivers/nvme/target/fc.c
2481
cqe->sq_id = cpu_to_le16(fod->queue->qid);
drivers/nvme/target/fc.c
699
fcpreq->hwqid = queue->qid ?
drivers/nvme/target/fc.c
700
((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
drivers/nvme/target/fc.c
783
u16 qid, u16 sqsize)
drivers/nvme/target/fc.c
788
if (qid > NVMET_NR_QUEUES)
drivers/nvme/target/fc.c
797
assoc->a_id, qid);
drivers/nvme/target/fc.c
801
queue->qid = qid;
drivers/nvme/target/fc.c
821
WARN_ON(assoc->queues[qid]);
drivers/nvme/target/fc.c
822
assoc->queues[qid] = queue;
drivers/nvme/target/fc.c
948
u16 qid = nvmet_fc_getqueueid(connection_id);
drivers/nvme/target/fc.c
950
if (qid > NVMET_NR_QUEUES)
drivers/nvme/target/fc.c
956
queue = assoc->queues[qid];
drivers/nvme/target/nvmet.h
145
u16 qid;
drivers/nvme/target/nvmet.h
154
u16 qid;
drivers/nvme/target/nvmet.h
582
void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
drivers/nvme/target/nvmet.h
584
u16 nvmet_cq_create(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
drivers/nvme/target/nvmet.h
591
void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid,
drivers/nvme/target/nvmet.h
594
struct nvmet_cq *cq, u16 qid, u16 size);
drivers/nvme/target/passthru.c
308
if (likely(req->sq->qid != 0)) {
drivers/nvme/target/pci-epf.c
1179
iod->sq->qid, nvmet_pci_epf_iod_name(iod),
drivers/nvme/target/pci-epf.c
1281
cq->qid = cqid;
drivers/nvme/target/pci-epf.c
1315
cq->qid, ret);
drivers/nvme/target/pci-epf.c
1321
cq->qid);
drivers/nvme/target/pci-epf.c
1381
sq->qid = sqid;
drivers/nvme/target/pci-epf.c
1545
unsigned int qid, bool sq)
drivers/nvme/target/pci-epf.c
1550
queue = &ctrl->sq[qid];
drivers/nvme/target/pci-epf.c
1552
queue = &ctrl->cq[qid];
drivers/nvme/target/pci-epf.c
1556
queue->qid = qid;
drivers/nvme/target/pci-epf.c
1563
unsigned int qid;
drivers/nvme/target/pci-epf.c
1576
for (qid = 0; qid < ctrl->nr_queues; qid++) {
drivers/nvme/target/pci-epf.c
1577
nvmet_pci_epf_init_queue(ctrl, qid, true);
drivers/nvme/target/pci-epf.c
1578
nvmet_pci_epf_init_queue(ctrl, qid, false);
drivers/nvme/target/pci-epf.c
1687
sq->qid, head, sq->tail,
drivers/nvme/target/pci-epf.c
1792
cqe->sq_id = cpu_to_le16(iod->sq->qid);
drivers/nvme/target/pci-epf.c
1798
cq->qid, nvmet_pci_epf_iod_name(iod), iod->status,
drivers/nvme/target/pci-epf.c
1918
int qid;
drivers/nvme/target/pci-epf.c
1930
for (qid = 1; qid < ctrl->nr_queues; qid++)
drivers/nvme/target/pci-epf.c
1931
nvmet_pci_epf_delete_sq(ctrl->tctrl, qid);
drivers/nvme/target/pci-epf.c
1933
for (qid = 1; qid < ctrl->nr_queues; qid++)
drivers/nvme/target/pci-epf.c
1934
nvmet_pci_epf_delete_cq(ctrl->tctrl, qid);
drivers/nvme/target/pci-epf.c
600
if (!cq->qid)
drivers/nvme/target/pci-epf.c
661
cq->qid, ret);
drivers/nvme/target/pci-epf.c
669
return nvme_opcode_str(iod->sq->qid, iod->cmd.common.opcode);
drivers/nvme/target/pci-epf.c
690
iod->cq = &ctrl->cq[sq->qid];
drivers/nvme/target/pci-epf.c
91
u16 qid;
drivers/nvme/target/pr.c
902
if (req->sq->qid) {
drivers/nvme/target/pr.c
922
if (req->sq->qid) {
drivers/nvme/target/rdma.c
1391
queue->host_qid = le16_to_cpu(req->qid);
drivers/nvme/target/tcp.c
2157
if (sq->qid == 0) {
drivers/nvme/target/trace.c
330
u16 qid = get_unaligned_le16(spc + 2);
drivers/nvme/target/trace.c
336
recfmt, qid, sqsize, cattr, kato);
drivers/nvme/target/trace.h
100
__entry->qid, __entry->cid, __entry->nsid,
drivers/nvme/target/trace.h
102
show_opcode_name(__entry->qid, __entry->opcode,
drivers/nvme/target/trace.h
104
parse_nvme_cmd(__entry->qid, __entry->opcode,
drivers/nvme/target/trace.h
114
__field(int, qid)
drivers/nvme/target/trace.h
121
__entry->qid = req->cq->qid;
drivers/nvme/target/trace.h
130
__entry->qid, __entry->cid, __entry->result, __entry->status)
drivers/nvme/target/trace.h
28
#define parse_nvme_cmd(qid, opcode, fctype, cdw10) \
drivers/nvme/target/trace.h
31
(qid ? \
drivers/nvme/target/trace.h
73
__field(int, qid)
drivers/nvme/target/trace.h
86
__entry->qid = req->sq->qid;
drivers/pinctrl/pinctrl-zynqmp.c
521
qdata.qid = PM_QID_PINCTRL_GET_FUNCTION_GROUPS;
drivers/pinctrl/pinctrl-zynqmp.c
540
qdata.qid = PM_QID_PINCTRL_GET_NUM_FUNCTION_GROUPS;
drivers/pinctrl/pinctrl-zynqmp.c
643
qdata.qid = PM_QID_PINCTRL_GET_FUNCTION_NAME;
drivers/pinctrl/pinctrl-zynqmp.c
661
qdata.qid = PM_QID_PINCTRL_GET_NUM_FUNCTIONS;
drivers/pinctrl/pinctrl-zynqmp.c
678
qdata.qid = PM_QID_PINCTRL_GET_PIN_GROUPS;
drivers/pinctrl/pinctrl-zynqmp.c
834
qdata.qid = PM_QID_PINCTRL_GET_NUM_PINS;
drivers/pinctrl/pinctrl-zynqmp.c
893
qdata.qid = PM_QID_PINCTRL_GET_ATTRIBUTES;
drivers/s390/crypto/ap_bus.c
1019
to_ap_queue(dev)->qid);
drivers/s390/crypto/ap_bus.c
1072
struct ap_queue *ap_get_qdev(ap_qid_t qid)
drivers/s390/crypto/ap_bus.c
1079
if (aq->qid == qid) {
drivers/s390/crypto/ap_bus.c
1852
static int ap_get_compatible_type(ap_qid_t qid, int rawtype, unsigned int func)
drivers/s390/crypto/ap_bus.c
1859
__func__, AP_QID_CARD(qid),
drivers/s390/crypto/ap_bus.c
1860
AP_QID_QUEUE(qid), rawtype);
drivers/s390/crypto/ap_bus.c
1877
status = ap_qact(qid, 0, &apinfo);
drivers/s390/crypto/ap_bus.c
1885
__func__, AP_QID_CARD(qid),
drivers/s390/crypto/ap_bus.c
1886
AP_QID_QUEUE(qid), rawtype);
drivers/s390/crypto/ap_bus.c
1889
__func__, AP_QID_CARD(qid), AP_QID_QUEUE(qid),
drivers/s390/crypto/ap_bus.c
1909
return is_queue_dev(dev) && to_ap_queue(dev)->qid == (int)(long)data;
drivers/s390/crypto/ap_bus.c
1919
AP_QID_QUEUE(to_ap_queue(dev)->qid) == (int)(long)data;
drivers/s390/crypto/ap_bus.c
1988
ap_qid_t qid;
drivers/s390/crypto/ap_bus.c
1998
qid = AP_MKQID(ac->id, dom);
drivers/s390/crypto/ap_bus.c
2000
(void *)(long)qid,
drivers/s390/crypto/ap_bus.c
2012
rc = ap_queue_info(qid, &hwinfo, &decfg, &chkstop);
drivers/s390/crypto/ap_bus.c
2028
aq = ap_queue_create(qid, ac);
drivers/s390/crypto/ap_bus.c
2146
ap_qid_t qid;
drivers/s390/crypto/ap_bus.c
2174
qid = AP_MKQID(ap, dom);
drivers/s390/crypto/ap_bus.c
2175
if (ap_queue_info(qid, &hwinfo, &decfg, &chkstop) > 0)
drivers/s390/crypto/ap_bus.c
2248
comp_type = ap_get_compatible_type(qid, hwinfo.at, hwinfo.fac);
drivers/s390/crypto/ap_bus.c
361
static int ap_queue_info(ap_qid_t qid, struct ap_tapq_hwinfo *hwinfo,
drivers/s390/crypto/ap_bus.c
369
if (AP_QID_CARD(qid) > ap_max_adapter_id ||
drivers/s390/crypto/ap_bus.c
370
AP_QID_QUEUE(qid) > ap_max_domain_id)
drivers/s390/crypto/ap_bus.c
374
status = ap_test_queue(qid, ap_apft_available(), hwinfo);
drivers/s390/crypto/ap_bus.c
855
AP_QID_CARD(to_ap_queue(dev)->qid) == (int)(long)data)
drivers/s390/crypto/ap_bus.c
869
card = AP_QID_CARD(aq->qid);
drivers/s390/crypto/ap_bus.c
870
queue = AP_QID_QUEUE(aq->qid);
drivers/s390/crypto/ap_bus.c
992
card = AP_QID_CARD(to_ap_queue(dev)->qid);
drivers/s390/crypto/ap_bus.c
993
queue = AP_QID_QUEUE(to_ap_queue(dev)->qid);
drivers/s390/crypto/ap_bus.h
197
ap_qid_t qid; /* AP queue id. */
drivers/s390/crypto/ap_queue.c
1006
status = ap_bapq(aq->qid);
drivers/s390/crypto/ap_queue.c
1010
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
drivers/s390/crypto/ap_queue.c
1017
status = ap_test_queue(aq->qid, 1, &hwinfo);
drivers/s390/crypto/ap_queue.c
1021
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
drivers/s390/crypto/ap_queue.c
103
__ap_send(ap_qid_t qid, unsigned long psmid, void *msg, size_t msglen,
drivers/s390/crypto/ap_queue.c
1030
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
drivers/s390/crypto/ap_queue.c
1037
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
drivers/s390/crypto/ap_queue.c
1057
status = ap_test_queue(aq->qid, 1, &hwinfo);
drivers/s390/crypto/ap_queue.c
1061
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
drivers/s390/crypto/ap_queue.c
109
qid |= 0x400000UL;
drivers/s390/crypto/ap_queue.c
1107
status = ap_test_queue(aq->qid, 1, &hwinfo);
drivers/s390/crypto/ap_queue.c
111
status = ap_nqap(qid, psmid, msg, msglen);
drivers/s390/crypto/ap_queue.c
1111
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
drivers/s390/crypto/ap_queue.c
1119
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
drivers/s390/crypto/ap_queue.c
113
trace_s390_ap_nqap(AP_QID_CARD(qid), AP_QID_QUEUE(qid),
drivers/s390/crypto/ap_queue.c
1131
status = ap_aapq(aq->qid, value);
drivers/s390/crypto/ap_queue.c
1142
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
drivers/s390/crypto/ap_queue.c
1182
struct ap_queue *ap_queue_create(ap_qid_t qid, struct ap_card *ac)
drivers/s390/crypto/ap_queue.c
1196
aq->qid = qid;
drivers/s390/crypto/ap_queue.c
1374
ap_zapq(aq->qid, 0);
drivers/s390/crypto/ap_queue.c
150
status = ap_dqap(aq->qid, &aq->reply->psmid,
drivers/s390/crypto/ap_queue.c
156
trace_s390_ap_dqap(AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid),
drivers/s390/crypto/ap_queue.c
186
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
drivers/s390/crypto/ap_queue.c
197
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid),
drivers/s390/crypto/ap_queue.c
241
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
drivers/s390/crypto/ap_queue.c
266
ap_qid_t qid = aq->qid;
drivers/s390/crypto/ap_queue.c
275
status = __ap_send(qid, ap_msg->psmid,
drivers/s390/crypto/ap_queue.c
315
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
drivers/s390/crypto/ap_queue.c
341
status = ap_rapq(aq->qid, aq->rapq_fbit);
drivers/s390/crypto/ap_queue.c
355
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
drivers/s390/crypto/ap_queue.c
373
status = ap_test_queue(aq->qid, 1, &hwinfo);
drivers/s390/crypto/ap_queue.c
396
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
drivers/s390/crypto/ap_queue.c
416
status = ap_tapq(aq->qid, NULL);
drivers/s390/crypto/ap_queue.c
436
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
drivers/s390/crypto/ap_queue.c
451
status = ap_test_queue(aq->qid, 1, &hwinfo);
drivers/s390/crypto/ap_queue.c
458
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
drivers/s390/crypto/ap_queue.c
466
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
drivers/s390/crypto/ap_queue.c
479
AP_QID_CARD(aq->qid),
drivers/s390/crypto/ap_queue.c
480
AP_QID_QUEUE(aq->qid), aq->assoc_idx);
drivers/s390/crypto/ap_queue.c
492
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
drivers/s390/crypto/ap_queue.c
654
__func__, AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
drivers/s390/crypto/ap_queue.c
67
status = ap_aqic(aq->qid, qirqctrl, virt_to_phys(ind));
drivers/s390/crypto/ap_queue.c
672
status = ap_tapq(aq->qid, NULL);
drivers/s390/crypto/ap_queue.c
720
status = ap_test_queue(aq->qid, 1, &hwinfo);
drivers/s390/crypto/ap_queue.c
724
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
drivers/s390/crypto/ap_queue.c
79
AP_QID_CARD(aq->qid),
drivers/s390/crypto/ap_queue.c
80
AP_QID_QUEUE(aq->qid));
drivers/s390/crypto/ap_queue.c
929
status = ap_test_queue(aq->qid, 1, &hwinfo);
drivers/s390/crypto/ap_queue.c
933
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
drivers/s390/crypto/ap_queue.c
980
status = ap_test_queue(aq->qid, 1, &hwinfo);
drivers/s390/crypto/ap_queue.c
984
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
drivers/s390/crypto/ap_queue.c
994
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
drivers/s390/crypto/vfio_ap_ops.c
2433
q->apqn = to_ap_queue(&apdev->device)->qid;
drivers/s390/crypto/zcrypt_api.c
1052
int cpen, qpen, qid = 0, rc;
drivers/s390/crypto/zcrypt_api.c
1130
!is_desired_ep11_queue(zq->queue->qid,
drivers/s390/crypto/zcrypt_api.c
1135
AP_QID_QUEUE(zq->queue->qid)))
drivers/s390/crypto/zcrypt_api.c
1139
tr->last_qid == zq->queue->qid) ?
drivers/s390/crypto/zcrypt_api.c
1166
qid = pref_zq->queue->qid;
drivers/s390/crypto/zcrypt_api.c
1183
tr->last_qid = qid;
drivers/s390/crypto/zcrypt_api.c
1186
AP_QID_CARD(qid), AP_QID_QUEUE(qid),
drivers/s390/crypto/zcrypt_api.c
1224
int qid = 0, rc = -ENODEV;
drivers/s390/crypto/zcrypt_api.c
1269
qid = pref_zq->queue->qid;
drivers/s390/crypto/zcrypt_api.c
1279
AP_QID_CARD(qid), AP_QID_QUEUE(qid),
drivers/s390/crypto/zcrypt_api.c
1297
card = AP_QID_CARD(zq->queue->qid);
drivers/s390/crypto/zcrypt_api.c
1300
queue = AP_QID_QUEUE(zq->queue->qid);
drivers/s390/crypto/zcrypt_api.c
1304
stat->qid = zq->queue->qid;
drivers/s390/crypto/zcrypt_api.c
1325
card = AP_QID_CARD(zq->queue->qid);
drivers/s390/crypto/zcrypt_api.c
1326
queue = AP_QID_QUEUE(zq->queue->qid);
drivers/s390/crypto/zcrypt_api.c
1332
stat->qid = zq->queue->qid;
drivers/s390/crypto/zcrypt_api.c
1351
if (card == AP_QID_CARD(zq->queue->qid) &&
drivers/s390/crypto/zcrypt_api.c
1352
queue == AP_QID_QUEUE(zq->queue->qid)) {
drivers/s390/crypto/zcrypt_api.c
1355
devstat->qid = zq->queue->qid;
drivers/s390/crypto/zcrypt_api.c
1378
card = AP_QID_CARD(zq->queue->qid);
drivers/s390/crypto/zcrypt_api.c
1379
if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index ||
drivers/s390/crypto/zcrypt_api.c
1399
card = AP_QID_CARD(zq->queue->qid);
drivers/s390/crypto/zcrypt_api.c
1400
if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index ||
drivers/s390/crypto/zcrypt_api.c
1426
card = AP_QID_CARD(zq->queue->qid);
drivers/s390/crypto/zcrypt_api.c
1427
if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index ||
drivers/s390/crypto/zcrypt_api.c
1451
if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
drivers/s390/crypto/zcrypt_api.c
1474
if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
drivers/s390/crypto/zcrypt_api.c
649
int cpen, qpen, qid = 0, rc;
drivers/s390/crypto/zcrypt_api.c
704
AP_QID_QUEUE(zq->queue->qid)))
drivers/s390/crypto/zcrypt_api.c
708
tr->last_qid == zq->queue->qid) ?
drivers/s390/crypto/zcrypt_api.c
727
qid = pref_zq->queue->qid;
drivers/s390/crypto/zcrypt_api.c
738
tr->last_qid = qid;
drivers/s390/crypto/zcrypt_api.c
741
AP_QID_CARD(qid), AP_QID_QUEUE(qid),
drivers/s390/crypto/zcrypt_api.c
755
int cpen, qpen, qid = 0, rc;
drivers/s390/crypto/zcrypt_api.c
810
AP_QID_QUEUE(zq->queue->qid)))
drivers/s390/crypto/zcrypt_api.c
814
tr->last_qid == zq->queue->qid) ?
drivers/s390/crypto/zcrypt_api.c
833
qid = pref_zq->queue->qid;
drivers/s390/crypto/zcrypt_api.c
844
tr->last_qid = qid;
drivers/s390/crypto/zcrypt_api.c
847
AP_QID_CARD(qid), AP_QID_QUEUE(qid),
drivers/s390/crypto/zcrypt_api.c
863
int cpen, qpen, qid = 0, rc;
drivers/s390/crypto/zcrypt_api.c
933
tdom != AP_QID_QUEUE(zq->queue->qid)))
drivers/s390/crypto/zcrypt_api.c
937
AP_QID_QUEUE(zq->queue->qid)))
drivers/s390/crypto/zcrypt_api.c
941
tr->last_qid == zq->queue->qid) ?
drivers/s390/crypto/zcrypt_api.c
962
qid = pref_zq->queue->qid;
drivers/s390/crypto/zcrypt_api.c
964
*domain = AP_QID_QUEUE(qid);
drivers/s390/crypto/zcrypt_api.c
980
tr->last_qid = qid;
drivers/s390/crypto/zcrypt_api.c
983
AP_QID_CARD(qid), AP_QID_QUEUE(qid),
drivers/s390/crypto/zcrypt_ccamisc.c
1733
card = AP_QID_CARD(device_status[i].qid);
drivers/s390/crypto/zcrypt_ccamisc.c
1734
dom = AP_QID_QUEUE(device_status[i].qid);
drivers/s390/crypto/zcrypt_cex4.c
120
cca_get_info(AP_QID_CARD(zq->queue->qid),
drivers/s390/crypto/zcrypt_cex4.c
121
AP_QID_QUEUE(zq->queue->qid),
drivers/s390/crypto/zcrypt_cex4.c
344
ep11_get_domain_info(AP_QID_CARD(zq->queue->qid),
drivers/s390/crypto/zcrypt_cex4.c
345
AP_QID_QUEUE(zq->queue->qid),
drivers/s390/crypto/zcrypt_cex4.c
391
ep11_get_domain_info(AP_QID_CARD(zq->queue->qid),
drivers/s390/crypto/zcrypt_cex4.c
392
AP_QID_QUEUE(zq->queue->qid),
drivers/s390/crypto/zcrypt_ep11misc.c
1566
card = AP_QID_CARD(device_status[i].qid);
drivers/s390/crypto/zcrypt_ep11misc.c
1567
dom = AP_QID_QUEUE(device_status[i].qid);
drivers/s390/crypto/zcrypt_error.h
82
int card = AP_QID_CARD(zq->queue->qid);
drivers/s390/crypto/zcrypt_error.h
83
int queue = AP_QID_QUEUE(zq->queue->qid);
drivers/s390/crypto/zcrypt_msgtype50.c
357
AP_QID_CARD(zq->queue->qid),
drivers/s390/crypto/zcrypt_msgtype50.c
358
AP_QID_QUEUE(zq->queue->qid), t80h->code);
drivers/s390/crypto/zcrypt_msgtype50.c
360
__func__, AP_QID_CARD(zq->queue->qid),
drivers/s390/crypto/zcrypt_msgtype50.c
361
AP_QID_QUEUE(zq->queue->qid), t80h->code);
drivers/s390/crypto/zcrypt_msgtype50.c
390
AP_QID_CARD(zq->queue->qid),
drivers/s390/crypto/zcrypt_msgtype50.c
391
AP_QID_QUEUE(zq->queue->qid),
drivers/s390/crypto/zcrypt_msgtype50.c
395
__func__, AP_QID_CARD(zq->queue->qid),
drivers/s390/crypto/zcrypt_msgtype50.c
396
AP_QID_QUEUE(zq->queue->qid), (int)rtype);
drivers/s390/crypto/zcrypt_msgtype50.c
489
AP_QID_CARD(zq->queue->qid),
drivers/s390/crypto/zcrypt_msgtype50.c
490
AP_QID_QUEUE(zq->queue->qid), rc);
drivers/s390/crypto/zcrypt_msgtype50.c
538
AP_QID_CARD(zq->queue->qid),
drivers/s390/crypto/zcrypt_msgtype50.c
539
AP_QID_QUEUE(zq->queue->qid), rc);
drivers/s390/crypto/zcrypt_msgtype6.c
1128
AP_QID_CARD(zq->queue->qid),
drivers/s390/crypto/zcrypt_msgtype6.c
1129
AP_QID_QUEUE(zq->queue->qid), rc);
drivers/s390/crypto/zcrypt_msgtype6.c
1196
AP_QID_QUEUE(zq->queue->qid);
drivers/s390/crypto/zcrypt_msgtype6.c
1214
AP_QID_QUEUE(zq->queue->qid);
drivers/s390/crypto/zcrypt_msgtype6.c
1244
AP_QID_CARD(zq->queue->qid),
drivers/s390/crypto/zcrypt_msgtype6.c
1245
AP_QID_QUEUE(zq->queue->qid), rc);
drivers/s390/crypto/zcrypt_msgtype6.c
1297
msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid);
drivers/s390/crypto/zcrypt_msgtype6.c
234
msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid);
drivers/s390/crypto/zcrypt_msgtype6.c
304
msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid);
drivers/s390/crypto/zcrypt_msgtype6.c
580
__func__, AP_QID_CARD(zq->queue->qid),
drivers/s390/crypto/zcrypt_msgtype6.c
581
AP_QID_QUEUE(zq->queue->qid),
drivers/s390/crypto/zcrypt_msgtype6.c
587
AP_QID_CARD(zq->queue->qid),
drivers/s390/crypto/zcrypt_msgtype6.c
588
AP_QID_QUEUE(zq->queue->qid),
drivers/s390/crypto/zcrypt_msgtype6.c
591
__func__, AP_QID_CARD(zq->queue->qid),
drivers/s390/crypto/zcrypt_msgtype6.c
592
AP_QID_QUEUE(zq->queue->qid),
drivers/s390/crypto/zcrypt_msgtype6.c
729
AP_QID_CARD(zq->queue->qid),
drivers/s390/crypto/zcrypt_msgtype6.c
730
AP_QID_QUEUE(zq->queue->qid),
drivers/s390/crypto/zcrypt_msgtype6.c
734
__func__, AP_QID_CARD(zq->queue->qid),
drivers/s390/crypto/zcrypt_msgtype6.c
735
AP_QID_QUEUE(zq->queue->qid), (int)msg->hdr.type);
drivers/s390/crypto/zcrypt_msgtype6.c
764
AP_QID_CARD(zq->queue->qid),
drivers/s390/crypto/zcrypt_msgtype6.c
765
AP_QID_QUEUE(zq->queue->qid),
drivers/s390/crypto/zcrypt_msgtype6.c
769
__func__, AP_QID_CARD(zq->queue->qid),
drivers/s390/crypto/zcrypt_msgtype6.c
770
AP_QID_QUEUE(zq->queue->qid), (int)msg->hdr.type);
drivers/s390/crypto/zcrypt_msgtype6.c
794
AP_QID_CARD(zq->queue->qid),
drivers/s390/crypto/zcrypt_msgtype6.c
795
AP_QID_QUEUE(zq->queue->qid),
drivers/s390/crypto/zcrypt_msgtype6.c
799
__func__, AP_QID_CARD(zq->queue->qid),
drivers/s390/crypto/zcrypt_msgtype6.c
800
AP_QID_QUEUE(zq->queue->qid), (int)msg->hdr.type);
drivers/s390/crypto/zcrypt_msgtype6.c
825
AP_QID_CARD(zq->queue->qid),
drivers/s390/crypto/zcrypt_msgtype6.c
826
AP_QID_QUEUE(zq->queue->qid),
drivers/s390/crypto/zcrypt_msgtype6.c
830
__func__, AP_QID_CARD(zq->queue->qid),
drivers/s390/crypto/zcrypt_msgtype6.c
831
AP_QID_QUEUE(zq->queue->qid), (int)msg->hdr.type);
drivers/s390/crypto/zcrypt_queue.c
179
__func__, AP_QID_CARD(zq->queue->qid),
drivers/s390/crypto/zcrypt_queue.c
180
AP_QID_QUEUE(zq->queue->qid));
drivers/s390/crypto/zcrypt_queue.c
220
__func__, AP_QID_CARD(zq->queue->qid),
drivers/s390/crypto/zcrypt_queue.c
221
AP_QID_QUEUE(zq->queue->qid));
drivers/s390/crypto/zcrypt_queue.c
70
__func__, AP_QID_CARD(zq->queue->qid),
drivers/s390/crypto/zcrypt_queue.c
71
AP_QID_QUEUE(zq->queue->qid), online);
drivers/scsi/aacraid/aacraid.h
2730
int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify);
drivers/scsi/aacraid/commsup.c
351
static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify)
drivers/scsi/aacraid/commsup.c
363
q = &dev->queues->queue[qid];
drivers/scsi/aacraid/commsup.c
369
if (qid == AdapNormCmdQueue)
drivers/scsi/aacraid/commsup.c
378
if (qid == AdapNormCmdQueue) {
drivers/scsi/aacraid/commsup.c
389
qid, atomic_read(&q->numpending));
drivers/scsi/aacraid/commsup.c
413
int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify)
drivers/scsi/aacraid/commsup.c
418
if (qid == AdapNormCmdQueue) {
drivers/scsi/aacraid/commsup.c
420
while (!aac_get_entry(dev, qid, &entry, index, nonotify)) {
drivers/scsi/aacraid/commsup.c
429
while (!aac_get_entry(dev, qid, &entry, index, nonotify)) {
drivers/scsi/aacraid/commsup.c
824
void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
drivers/scsi/aacraid/commsup.c
838
switch (qid) {
drivers/scsi/be2iscsi/be_main.h
683
u8 qid[10];
drivers/scsi/bfa/bfa.h
50
(__mh).mtag.h2i.qid = (__bfa)->iocfc.hw_qid[__reqq];\
drivers/scsi/bfa/bfa_core.c
692
bfa_reqq_resume(struct bfa_s *bfa, int qid)
drivers/scsi/bfa/bfa_core.c
697
waitq = bfa_reqq(bfa, qid);
drivers/scsi/bfa/bfa_core.c
702
if (bfa_reqq_full(bfa, qid))
drivers/scsi/bfa/bfa_core.c
712
bfa_isr_rspq(struct bfa_s *bfa, int qid)
drivers/scsi/bfa/bfa_core.c
719
ci = bfa_rspq_ci(bfa, qid);
drivers/scsi/bfa/bfa_core.c
720
pi = bfa_rspq_pi(bfa, qid);
drivers/scsi/bfa/bfa_core.c
725
m = bfa_rspq_elem(bfa, qid, ci);
drivers/scsi/bfa/bfa_core.c
735
bfa_isr_rspq_ack(bfa, qid, ci);
drivers/scsi/bfa/bfa_core.c
740
waitq = bfa_reqq(bfa, qid);
drivers/scsi/bfa/bfa_core.c
742
bfa_reqq_resume(bfa, qid);
drivers/scsi/bfa/bfa_core.c
748
bfa_isr_reqq(struct bfa_s *bfa, int qid)
drivers/scsi/bfa/bfa_core.c
752
bfa_isr_reqq_ack(bfa, qid);
drivers/scsi/bfa/bfa_core.c
757
waitq = bfa_reqq(bfa, qid);
drivers/scsi/bfa/bfa_core.c
759
bfa_reqq_resume(bfa, qid);
drivers/scsi/bfa/bfi.h
52
u8 qid;
drivers/scsi/csiostor/csio_wr.c
1136
uint32_t wr_type, fw_qid, qid;
drivers/scsi/csiostor/csio_wr.c
1169
qid = fw_qid - wrm->fw_iq_start;
drivers/scsi/csiostor/csio_wr.c
1170
q_completed = hw->wrm.intr_map[qid];
drivers/scsi/csiostor/csio_wr.c
1172
if (unlikely(qid ==
drivers/scsi/csiostor/csio_wr.h
274
__be32 qid;
drivers/scsi/hisi_sas/hisi_sas_main.c
599
dq = &hisi_hba->dq[task->abort_task.qid];
drivers/scsi/libsas/sas_scsi_host.c
891
unsigned int qid, void *data)
drivers/scsi/libsas/sas_scsi_host.c
912
task->abort_task.qid = qid;
drivers/scsi/libsas/sas_scsi_host.c
963
unsigned int qid, void *data)
drivers/scsi/libsas/sas_scsi_host.c
966
tag, qid, data);
drivers/scsi/libsas/sas_scsi_host.c
971
unsigned int qid, void *data)
drivers/scsi/libsas/sas_scsi_host.c
974
SCSI_NO_TAG, qid, data);
drivers/scsi/lpfc/lpfc_debugfs.h
584
lpfc_debug_dump_wq_by_id(struct lpfc_hba *phba, int qid)
drivers/scsi/lpfc/lpfc_debugfs.h
589
if (phba->sli4_hba.hdwq[wq_idx].io_wq->queue_id == qid)
drivers/scsi/lpfc/lpfc_debugfs.h
592
pr_err("IO WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
drivers/scsi/lpfc/lpfc_debugfs.h
597
if (phba->sli4_hba.els_wq->queue_id == qid) {
drivers/scsi/lpfc/lpfc_debugfs.h
598
pr_err("ELS WQ[Qid:%d]\n", qid);
drivers/scsi/lpfc/lpfc_debugfs.h
603
if (phba->sli4_hba.nvmels_wq->queue_id == qid) {
drivers/scsi/lpfc/lpfc_debugfs.h
604
pr_err("NVME LS WQ[Qid:%d]\n", qid);
drivers/scsi/lpfc/lpfc_debugfs.h
618
lpfc_debug_dump_mq_by_id(struct lpfc_hba *phba, int qid)
drivers/scsi/lpfc/lpfc_debugfs.h
620
if (phba->sli4_hba.mbx_wq->queue_id == qid) {
drivers/scsi/lpfc/lpfc_debugfs.h
621
printk(KERN_ERR "MBX WQ[Qid:%d]\n", qid);
drivers/scsi/lpfc/lpfc_debugfs.h
635
lpfc_debug_dump_rq_by_id(struct lpfc_hba *phba, int qid)
drivers/scsi/lpfc/lpfc_debugfs.h
637
if (phba->sli4_hba.hdr_rq->queue_id == qid) {
drivers/scsi/lpfc/lpfc_debugfs.h
638
printk(KERN_ERR "HDR RQ[Qid:%d]\n", qid);
drivers/scsi/lpfc/lpfc_debugfs.h
642
if (phba->sli4_hba.dat_rq->queue_id == qid) {
drivers/scsi/lpfc/lpfc_debugfs.h
643
printk(KERN_ERR "DAT RQ[Qid:%d]\n", qid);
drivers/scsi/lpfc/lpfc_debugfs.h
657
lpfc_debug_dump_cq_by_id(struct lpfc_hba *phba, int qid)
drivers/scsi/lpfc/lpfc_debugfs.h
662
if (phba->sli4_hba.hdwq[cq_idx].io_cq->queue_id == qid)
drivers/scsi/lpfc/lpfc_debugfs.h
666
pr_err("IO CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
drivers/scsi/lpfc/lpfc_debugfs.h
671
if (phba->sli4_hba.els_cq->queue_id == qid) {
drivers/scsi/lpfc/lpfc_debugfs.h
672
pr_err("ELS CQ[Qid:%d]\n", qid);
drivers/scsi/lpfc/lpfc_debugfs.h
677
if (phba->sli4_hba.nvmels_cq->queue_id == qid) {
drivers/scsi/lpfc/lpfc_debugfs.h
678
pr_err("NVME LS CQ[Qid:%d]\n", qid);
drivers/scsi/lpfc/lpfc_debugfs.h
683
if (phba->sli4_hba.mbx_cq->queue_id == qid) {
drivers/scsi/lpfc/lpfc_debugfs.h
684
pr_err("MBX CQ[Qid:%d]\n", qid);
drivers/scsi/lpfc/lpfc_debugfs.h
698
lpfc_debug_dump_eq_by_id(struct lpfc_hba *phba, int qid)
drivers/scsi/lpfc/lpfc_debugfs.h
703
if (phba->sli4_hba.hdwq[eq_idx].hba_eq->queue_id == qid)
drivers/scsi/lpfc/lpfc_debugfs.h
707
printk(KERN_ERR "FCP EQ[Idx:%d|Qid:%d]\n", eq_idx, qid);
drivers/scsi/mpi3mr/mpi3mr.h
436
u16 qid;
drivers/scsi/mpi3mr/mpi3mr.h
467
u16 qid;
drivers/scsi/mpi3mr/mpi3mr_fw.c
1957
mrioc->req_qinfo[q_idx].qid = 0;
drivers/scsi/mpi3mr/mpi3mr_fw.c
2002
mrioc->op_reply_qinfo[q_idx].qid = 0;
drivers/scsi/mpi3mr/mpi3mr_fw.c
2022
reply_qid = op_reply_q->qid;
drivers/scsi/mpi3mr/mpi3mr_fw.c
2218
reply_qid = op_reply_q->qid;
drivers/scsi/mpi3mr/mpi3mr_fw.c
2325
op_reply_q->qid = reply_qid;
drivers/scsi/mpi3mr/mpi3mr_fw.c
2359
req_qid = op_req_q->qid;
drivers/scsi/mpi3mr/mpi3mr_fw.c
2433
op_req_q->qid = req_qid;
drivers/scsi/mpi3mr/mpi3mr_fw.c
2500
mrioc->op_reply_qinfo[i].qid)) {
drivers/scsi/mpi3mr/mpi3mr_fw.c
4821
mrioc->op_reply_qinfo[i].qid = 0;
drivers/scsi/mpi3mr/mpi3mr_fw.c
4834
mrioc->req_qinfo[i].qid = 0;
drivers/scsi/mpi3mr/mpi3mr_fw.c
556
reply_qidx = op_reply_q->qid - 1;
drivers/scsi/mpi3mr/mpi3mr_os.c
4004
cpu_to_le16(op_req_q->qid);
drivers/scsi/mpt3sas/mpt3sas_base.c
1603
int qid;
drivers/scsi/mpt3sas/mpt3sas_base.c
1605
for (qid = 0; qid < iopoll_q_count; qid++)
drivers/scsi/mpt3sas/mpt3sas_base.c
1606
atomic_set(&ioc->io_uring_poll_queues[qid].pause, 1);
drivers/scsi/mpt3sas/mpt3sas_base.c
1611
for (qid = 0; qid < iopoll_q_count; qid++) {
drivers/scsi/mpt3sas/mpt3sas_base.c
1612
while (atomic_read(&ioc->io_uring_poll_queues[qid].busy)) {
drivers/scsi/mpt3sas/mpt3sas_base.c
1630
int qid;
drivers/scsi/mpt3sas/mpt3sas_base.c
1632
for (qid = 0; qid < iopoll_q_count; qid++)
drivers/scsi/mpt3sas/mpt3sas_base.c
1633
atomic_set(&ioc->io_uring_poll_queues[qid].pause, 0);
drivers/scsi/mpt3sas/mpt3sas_base.c
1881
int qid = queue_num - ioc->iopoll_q_start_index;
drivers/scsi/mpt3sas/mpt3sas_base.c
1883
if (atomic_read(&ioc->io_uring_poll_queues[qid].pause) ||
drivers/scsi/mpt3sas/mpt3sas_base.c
1884
!atomic_add_unless(&ioc->io_uring_poll_queues[qid].busy, 1, 1))
drivers/scsi/mpt3sas/mpt3sas_base.c
1887
reply_q = ioc->io_uring_poll_queues[qid].reply_q;
drivers/scsi/mpt3sas/mpt3sas_base.c
1890
atomic_dec(&ioc->io_uring_poll_queues[qid].busy);
drivers/scsi/mpt3sas/mpt3sas_base.c
3162
int r, qid;
drivers/scsi/mpt3sas/mpt3sas_base.c
3176
qid = index - ioc->iopoll_q_start_index;
drivers/scsi/mpt3sas/mpt3sas_base.c
3178
ioc->driver_name, ioc->id, qid);
drivers/scsi/mpt3sas/mpt3sas_base.c
3180
ioc->io_uring_poll_queues[qid].reply_q = reply_q;
drivers/scsi/qla2xxx/qla_nx.c
3909
uint32_t r_stride, r_value, r_cnt, qid = 0;
drivers/scsi/qla2xxx/qla_nx.c
3921
qla82xx_md_rw_32(ha, s_addr, qid, 1);
drivers/scsi/qla2xxx/qla_nx.c
3928
qid += q_hdr->q_strd.queue_id_stride;
drivers/scsi/qla2xxx/qla_nx2.c
2615
uint32_t r_stride, r_value, r_cnt, qid = 0;
drivers/scsi/qla2xxx/qla_nx2.c
2628
qla8044_wr_reg_indirect(vha, s_addr, qid);
drivers/scsi/qla2xxx/qla_nx2.c
2635
qid += q_hdr->q_strd.queue_id_stride;
drivers/scsi/qla2xxx/qla_target.c
4010
int qid = GET_QID(handle);
drivers/scsi/qla2xxx/qla_target.c
4016
if (qid == rsp->req->id) {
drivers/scsi/qla2xxx/qla_target.c
4018
} else if (vha->hw->req_q_map[qid]) {
drivers/scsi/qla2xxx/qla_target.c
4022
req = vha->hw->req_q_map[qid];
drivers/scsi/qla4xxx/ql4_nx.c
2396
uint32_t r_stride, r_value, r_cnt, qid = 0;
drivers/scsi/qla4xxx/ql4_nx.c
2409
ha->isp_ops->wr_reg_indirect(ha, s_addr, qid);
drivers/scsi/qla4xxx/ql4_nx.c
2416
qid += q_hdr->q_strd.queue_id_stride;
drivers/staging/media/ipu3/ipu3-css.c
1904
if (b->queue >= IPU3_CSS_QUEUES || !imgu_css_queues[b->queue].qid)
drivers/staging/media/ipu3/ipu3-css.c
1907
b->queue_pos = imgu_css_queue_pos(css, imgu_css_queues[b->queue].qid,
drivers/staging/media/ipu3/ipu3-css.c
1937
r = imgu_css_queue_data(css, imgu_css_queues[b->queue].qid,
drivers/staging/media/ipu3/ipu3-css.c
1943
imgu_css_queues[b->queue].qid);
drivers/staging/media/ipu3/ipu3-css.c
1976
int evtype, pipe, pipeid, queue, qid, r;
drivers/staging/media/ipu3/ipu3-css.c
1999
qid = imgu_css_queues[queue].qid;
drivers/staging/media/ipu3/ipu3-css.c
2006
if (qid >= IMGU_ABI_QUEUE_NUM) {
drivers/staging/media/ipu3/ipu3-css.c
2007
dev_err(css->dev, "Invalid qid: %i\n", qid);
drivers/staging/media/ipu3/ipu3-css.c
2015
r = imgu_css_dequeue_data(css, qid, &daddr);
drivers/staging/media/ipu3/ipu3-css.c
2023
IMGU_ABI_EVENT_BUFFER_DEQUEUED(qid));
drivers/staging/media/ipu3/ipu3-css.c
98
enum imgu_abi_queue_id qid;
drivers/ufs/host/ufs-mediatek.c
2226
int qid = mcq_intr_info->qid;
drivers/ufs/host/ufs-mediatek.c
2228
hwq = &hba->uhq[qid];
drivers/ufs/host/ufs-mediatek.c
2230
events = ufshcd_mcq_read_cqis(hba, qid);
drivers/ufs/host/ufs-mediatek.c
2232
ufshcd_mcq_write_cqis(hba, events, qid);
drivers/ufs/host/ufs-mediatek.c
2253
host->mcq_intr_info[i].qid = i;
drivers/ufs/host/ufs-mediatek.h
172
u8 qid;
drivers/vdpa/alibaba/eni_vdpa.c
257
static u16 eni_vdpa_get_vq_size(struct vdpa_device *vdpa, u16 qid)
drivers/vdpa/alibaba/eni_vdpa.c
261
return vp_legacy_get_queue_size(ldev, qid);
drivers/vdpa/alibaba/eni_vdpa.c
264
static int eni_vdpa_get_vq_state(struct vdpa_device *vdpa, u16 qid,
drivers/vdpa/alibaba/eni_vdpa.c
270
static int eni_vdpa_set_vq_state(struct vdpa_device *vdpa, u16 qid,
drivers/vdpa/alibaba/eni_vdpa.c
280
if (!vp_legacy_get_queue_enable(ldev, qid)
drivers/vdpa/alibaba/eni_vdpa.c
288
static void eni_vdpa_set_vq_cb(struct vdpa_device *vdpa, u16 qid,
drivers/vdpa/alibaba/eni_vdpa.c
293
eni_vdpa->vring[qid].cb = *cb;
drivers/vdpa/alibaba/eni_vdpa.c
296
static void eni_vdpa_set_vq_ready(struct vdpa_device *vdpa, u16 qid,
drivers/vdpa/alibaba/eni_vdpa.c
306
vp_legacy_set_queue_address(ldev, qid, 0);
drivers/vdpa/alibaba/eni_vdpa.c
309
static bool eni_vdpa_get_vq_ready(struct vdpa_device *vdpa, u16 qid)
drivers/vdpa/alibaba/eni_vdpa.c
313
return vp_legacy_get_queue_enable(ldev, qid);
drivers/vdpa/alibaba/eni_vdpa.c
316
static void eni_vdpa_set_vq_num(struct vdpa_device *vdpa, u16 qid,
drivers/vdpa/alibaba/eni_vdpa.c
321
u16 n = vp_legacy_get_queue_size(ldev, qid);
drivers/vdpa/alibaba/eni_vdpa.c
330
qid, n, num);
drivers/vdpa/alibaba/eni_vdpa.c
333
static int eni_vdpa_set_vq_address(struct vdpa_device *vdpa, u16 qid,
drivers/vdpa/alibaba/eni_vdpa.c
340
vp_legacy_set_queue_address(ldev, qid, pfn);
drivers/vdpa/alibaba/eni_vdpa.c
345
static void eni_vdpa_kick_vq(struct vdpa_device *vdpa, u16 qid)
drivers/vdpa/alibaba/eni_vdpa.c
349
iowrite16(qid, eni_vdpa->vring[qid].notify);
drivers/vdpa/ifcvf/ifcvf_base.c
13
u16 ifcvf_set_vq_vector(struct ifcvf_hw *hw, u16 qid, int vector)
drivers/vdpa/ifcvf/ifcvf_base.c
17
vp_iowrite16(qid, &cfg->queue_select);
drivers/vdpa/ifcvf/ifcvf_base.c
328
u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid)
drivers/vdpa/ifcvf/ifcvf_base.c
333
last_avail_idx = vp_ioread16(&lm_cfg->vq_state_region + qid * 2);
drivers/vdpa/ifcvf/ifcvf_base.c
338
int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num)
drivers/vdpa/ifcvf/ifcvf_base.c
342
vp_iowrite16(num, &lm_cfg->vq_state_region + qid * 2);
drivers/vdpa/ifcvf/ifcvf_base.c
347
void ifcvf_set_vq_num(struct ifcvf_hw *hw, u16 qid, u32 num)
drivers/vdpa/ifcvf/ifcvf_base.c
351
vp_iowrite16(qid, &cfg->queue_select);
drivers/vdpa/ifcvf/ifcvf_base.c
355
int ifcvf_set_vq_address(struct ifcvf_hw *hw, u16 qid, u64 desc_area,
drivers/vdpa/ifcvf/ifcvf_base.c
360
vp_iowrite16(qid, &cfg->queue_select);
drivers/vdpa/ifcvf/ifcvf_base.c
371
bool ifcvf_get_vq_ready(struct ifcvf_hw *hw, u16 qid)
drivers/vdpa/ifcvf/ifcvf_base.c
376
vp_iowrite16(qid, &cfg->queue_select);
drivers/vdpa/ifcvf/ifcvf_base.c
382
void ifcvf_set_vq_ready(struct ifcvf_hw *hw, u16 qid, bool ready)
drivers/vdpa/ifcvf/ifcvf_base.c
386
vp_iowrite16(qid, &cfg->queue_select);
drivers/vdpa/ifcvf/ifcvf_base.c
392
u16 qid;
drivers/vdpa/ifcvf/ifcvf_base.c
394
for (qid = 0; qid < hw->nr_vring; qid++) {
drivers/vdpa/ifcvf/ifcvf_base.c
395
hw->vring[qid].cb.callback = NULL;
drivers/vdpa/ifcvf/ifcvf_base.c
396
hw->vring[qid].cb.private = NULL;
drivers/vdpa/ifcvf/ifcvf_base.c
397
ifcvf_set_vq_vector(hw, qid, VIRTIO_MSI_NO_VECTOR);
drivers/vdpa/ifcvf/ifcvf_base.c
428
void ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid)
drivers/vdpa/ifcvf/ifcvf_base.c
430
vp_iowrite16(qid, hw->vring[qid].notify_addr);
drivers/vdpa/ifcvf/ifcvf_base.c
72
u16 ifcvf_get_vq_size(struct ifcvf_hw *hw, u16 qid)
drivers/vdpa/ifcvf/ifcvf_base.c
76
if (qid >= hw->nr_vring)
drivers/vdpa/ifcvf/ifcvf_base.c
79
vp_iowrite16(qid, &hw->common_cfg->queue_select);
drivers/vdpa/ifcvf/ifcvf_base.c
87
u16 queue_size, max_size, qid;
drivers/vdpa/ifcvf/ifcvf_base.c
90
for (qid = 1; qid < hw->nr_vring; qid++) {
drivers/vdpa/ifcvf/ifcvf_base.c
91
queue_size = ifcvf_get_vq_size(hw, qid);
drivers/vdpa/ifcvf/ifcvf_base.h
108
void ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid);
drivers/vdpa/ifcvf/ifcvf_base.h
119
u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid);
drivers/vdpa/ifcvf/ifcvf_base.h
120
int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num);
drivers/vdpa/ifcvf/ifcvf_base.h
122
u16 ifcvf_set_vq_vector(struct ifcvf_hw *hw, u16 qid, int vector);
drivers/vdpa/ifcvf/ifcvf_base.h
124
void ifcvf_set_vq_num(struct ifcvf_hw *hw, u16 qid, u32 num);
drivers/vdpa/ifcvf/ifcvf_base.h
125
int ifcvf_set_vq_address(struct ifcvf_hw *hw, u16 qid, u64 desc_area,
drivers/vdpa/ifcvf/ifcvf_base.h
127
bool ifcvf_get_vq_ready(struct ifcvf_hw *hw, u16 qid);
drivers/vdpa/ifcvf/ifcvf_base.h
128
void ifcvf_set_vq_ready(struct ifcvf_hw *hw, u16 qid, bool ready);
drivers/vdpa/ifcvf/ifcvf_base.h
132
u16 ifcvf_get_vq_size(struct ifcvf_hw *hw, u16 qid);
drivers/vdpa/ifcvf/ifcvf_main.c
464
static int ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
drivers/vdpa/ifcvf/ifcvf_main.c
469
state->split.avail_index = ifcvf_get_vq_state(vf, qid);
drivers/vdpa/ifcvf/ifcvf_main.c
473
static int ifcvf_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
drivers/vdpa/ifcvf/ifcvf_main.c
478
return ifcvf_set_vq_state(vf, qid, state->split.avail_index);
drivers/vdpa/ifcvf/ifcvf_main.c
481
static void ifcvf_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid,
drivers/vdpa/ifcvf/ifcvf_main.c
486
vf->vring[qid].cb = *cb;
drivers/vdpa/ifcvf/ifcvf_main.c
490
u16 qid, bool ready)
drivers/vdpa/ifcvf/ifcvf_main.c
494
ifcvf_set_vq_ready(vf, qid, ready);
drivers/vdpa/ifcvf/ifcvf_main.c
497
static bool ifcvf_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid)
drivers/vdpa/ifcvf/ifcvf_main.c
501
return ifcvf_get_vq_ready(vf, qid);
drivers/vdpa/ifcvf/ifcvf_main.c
504
static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid,
drivers/vdpa/ifcvf/ifcvf_main.c
509
ifcvf_set_vq_num(vf, qid, num);
drivers/vdpa/ifcvf/ifcvf_main.c
512
static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid,
drivers/vdpa/ifcvf/ifcvf_main.c
518
return ifcvf_set_vq_address(vf, qid, desc_area, driver_area, device_area);
drivers/vdpa/ifcvf/ifcvf_main.c
521
static void ifcvf_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid)
drivers/vdpa/ifcvf/ifcvf_main.c
525
ifcvf_notify_queue(vf, qid);
drivers/vdpa/ifcvf/ifcvf_main.c
595
u16 qid)
drivers/vdpa/ifcvf/ifcvf_main.c
600
return vf->vring[qid].irq;
drivers/vdpa/ifcvf/ifcvf_main.c
606
u16 qid)
drivers/vdpa/ifcvf/ifcvf_main.c
610
return ifcvf_get_vq_size(vf, qid);
drivers/vdpa/octeon_ep/octep_vdpa.h
100
int octep_set_vq_address(struct octep_hw *oct_hw, u16 qid, u64 desc_area, u64 driver_area,
drivers/vdpa/octeon_ep/octep_vdpa.h
102
void octep_set_vq_num(struct octep_hw *oct_hw, u16 qid, u32 num);
drivers/vdpa/octeon_ep/octep_vdpa.h
103
void octep_set_vq_ready(struct octep_hw *oct_hw, u16 qid, bool ready);
drivers/vdpa/octeon_ep/octep_vdpa.h
104
bool octep_get_vq_ready(struct octep_hw *oct_hw, u16 qid);
drivers/vdpa/octeon_ep/octep_vdpa.h
105
int octep_set_vq_state(struct octep_hw *oct_hw, u16 qid, const struct vdpa_vq_state *state);
drivers/vdpa/octeon_ep/octep_vdpa.h
106
int octep_get_vq_state(struct octep_hw *oct_hw, u16 qid, struct vdpa_vq_state *state);
drivers/vdpa/octeon_ep/octep_vdpa.h
98
void octep_notify_queue(struct octep_hw *oct_hw, u16 qid);
drivers/vdpa/octeon_ep/octep_vdpa_hw.c
103
static int octep_process_mbox(struct octep_hw *oct_hw, u16 id, u16 qid, void *buffer,
drivers/vdpa/octeon_ep/octep_vdpa_hw.c
130
octep_write32_word(mbox, 0, (u32)qid);
drivers/vdpa/octeon_ep/octep_vdpa_hw.c
276
void octep_notify_queue(struct octep_hw *oct_hw, u16 qid)
drivers/vdpa/octeon_ep/octep_vdpa_hw.c
278
iowrite16(qid, oct_hw->vqs[qid].notify_addr);
drivers/vdpa/octeon_ep/octep_vdpa_hw.c
299
int octep_set_vq_address(struct octep_hw *oct_hw, u16 qid, u64 desc_area, u64 driver_area,
drivers/vdpa/octeon_ep/octep_vdpa_hw.c
304
octep_write_queue_select(oct_hw, qid);
drivers/vdpa/octeon_ep/octep_vdpa_hw.c
315
int octep_get_vq_state(struct octep_hw *oct_hw, u16 qid, struct vdpa_vq_state *state)
drivers/vdpa/octeon_ep/octep_vdpa_hw.c
317
return octep_process_mbox(oct_hw, OCTEP_MBOX_MSG_GET_VQ_STATE, qid, state,
drivers/vdpa/octeon_ep/octep_vdpa_hw.c
321
int octep_set_vq_state(struct octep_hw *oct_hw, u16 qid, const struct vdpa_vq_state *state)
drivers/vdpa/octeon_ep/octep_vdpa_hw.c
326
return octep_process_mbox(oct_hw, OCTEP_MBOX_MSG_SET_VQ_STATE, qid, &q_state,
drivers/vdpa/octeon_ep/octep_vdpa_hw.c
330
void octep_set_vq_num(struct octep_hw *oct_hw, u16 qid, u32 num)
drivers/vdpa/octeon_ep/octep_vdpa_hw.c
334
octep_write_queue_select(oct_hw, qid);
drivers/vdpa/octeon_ep/octep_vdpa_hw.c
338
void octep_set_vq_ready(struct octep_hw *oct_hw, u16 qid, bool ready)
drivers/vdpa/octeon_ep/octep_vdpa_hw.c
342
octep_write_queue_select(oct_hw, qid);
drivers/vdpa/octeon_ep/octep_vdpa_hw.c
346
bool octep_get_vq_ready(struct octep_hw *oct_hw, u16 qid)
drivers/vdpa/octeon_ep/octep_vdpa_hw.c
350
octep_write_queue_select(oct_hw, qid);
drivers/vdpa/octeon_ep/octep_vdpa_main.c
201
u16 qid;
drivers/vdpa/octeon_ep/octep_vdpa_main.c
206
for (qid = 0; qid < oct_hw->nr_vring; qid++) {
drivers/vdpa/octeon_ep/octep_vdpa_main.c
207
oct_hw->vqs[qid].cb.callback = NULL;
drivers/vdpa/octeon_ep/octep_vdpa_main.c
208
oct_hw->vqs[qid].cb.private = NULL;
drivers/vdpa/octeon_ep/octep_vdpa_main.c
227
static int octep_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
drivers/vdpa/octeon_ep/octep_vdpa_main.c
232
return octep_get_vq_state(oct_hw, qid, state);
drivers/vdpa/octeon_ep/octep_vdpa_main.c
235
static int octep_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
drivers/vdpa/octeon_ep/octep_vdpa_main.c
240
return octep_set_vq_state(oct_hw, qid, state);
drivers/vdpa/octeon_ep/octep_vdpa_main.c
243
static void octep_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid, struct vdpa_callback *cb)
drivers/vdpa/octeon_ep/octep_vdpa_main.c
247
oct_hw->vqs[qid].cb = *cb;
drivers/vdpa/octeon_ep/octep_vdpa_main.c
250
static void octep_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev, u16 qid, bool ready)
drivers/vdpa/octeon_ep/octep_vdpa_main.c
254
octep_set_vq_ready(oct_hw, qid, ready);
drivers/vdpa/octeon_ep/octep_vdpa_main.c
257
static bool octep_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid)
drivers/vdpa/octeon_ep/octep_vdpa_main.c
261
return octep_get_vq_ready(oct_hw, qid);
drivers/vdpa/octeon_ep/octep_vdpa_main.c
264
static void octep_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid, u32 num)
drivers/vdpa/octeon_ep/octep_vdpa_main.c
268
octep_set_vq_num(oct_hw, qid, num);
drivers/vdpa/octeon_ep/octep_vdpa_main.c
271
static int octep_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid, u64 desc_area,
drivers/vdpa/octeon_ep/octep_vdpa_main.c
276
pr_debug("qid[%d]: desc_area: %llx\n", qid, desc_area);
drivers/vdpa/octeon_ep/octep_vdpa_main.c
277
pr_debug("qid[%d]: driver_area: %llx\n", qid, driver_area);
drivers/vdpa/octeon_ep/octep_vdpa_main.c
278
pr_debug("qid[%d]: device_area: %llx\n\n", qid, device_area);
drivers/vdpa/octeon_ep/octep_vdpa_main.c
280
return octep_set_vq_address(oct_hw, qid, desc_area, driver_area, device_area);
drivers/vdpa/octeon_ep/octep_vdpa_main.c
283
static void octep_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid)
drivers/vdpa/pds/cmds.c
125
int pds_vdpa_cmd_init_vq(struct pds_vdpa_device *pdsv, u16 qid, u16 invert_idx,
drivers/vdpa/pds/cmds.c
134
.vdpa_vq_init.qid = cpu_to_le16(qid),
drivers/vdpa/pds/cmds.c
139
.vdpa_vq_init.intr_index = cpu_to_le16(qid),
drivers/vdpa/pds/cmds.c
147
__func__, qid, ilog2(vq_info->q_len),
drivers/vdpa/pds/cmds.c
154
qid, comp.status, ERR_PTR(err));
drivers/vdpa/pds/cmds.c
159
int pds_vdpa_cmd_reset_vq(struct pds_vdpa_device *pdsv, u16 qid, u16 invert_idx,
drivers/vdpa/pds/cmds.c
168
.vdpa_vq_reset.qid = cpu_to_le16(qid),
drivers/vdpa/pds/cmds.c
177
qid, comp.status, ERR_PTR(err));
drivers/vdpa/pds/cmds.h
13
int pds_vdpa_cmd_init_vq(struct pds_vdpa_device *pdsv, u16 qid, u16 invert_idx,
drivers/vdpa/pds/cmds.h
15
int pds_vdpa_cmd_reset_vq(struct pds_vdpa_device *pdsv, u16 qid, u16 invert_idx,
drivers/vdpa/pds/debugfs.c
245
seq_printf(seq, "qid: %d\n", vq->qid);
drivers/vdpa/pds/vdpa_dev.c
103
pdsv->vqs[qid].event_cb = *cb;
drivers/vdpa/pds/vdpa_dev.c
117
static void pds_vdpa_release_irq(struct pds_vdpa_device *pdsv, int qid)
drivers/vdpa/pds/vdpa_dev.c
119
if (pdsv->vqs[qid].irq == VIRTIO_MSI_NO_VECTOR)
drivers/vdpa/pds/vdpa_dev.c
122
free_irq(pdsv->vqs[qid].irq, &pdsv->vqs[qid]);
drivers/vdpa/pds/vdpa_dev.c
123
pdsv->vqs[qid].irq = VIRTIO_MSI_NO_VECTOR;
drivers/vdpa/pds/vdpa_dev.c
126
static void pds_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev, u16 qid, bool ready)
drivers/vdpa/pds/vdpa_dev.c
135
__func__, qid, pdsv->vqs[qid].ready, ready);
drivers/vdpa/pds/vdpa_dev.c
136
if (ready == pdsv->vqs[qid].ready)
drivers/vdpa/pds/vdpa_dev.c
148
err = pds_vdpa_cmd_init_vq(pdsv, qid, invert_idx, &pdsv->vqs[qid]);
drivers/vdpa/pds/vdpa_dev.c
151
qid, ERR_PTR(err));
drivers/vdpa/pds/vdpa_dev.c
155
err = pds_vdpa_cmd_reset_vq(pdsv, qid, invert_idx, &pdsv->vqs[qid]);
drivers/vdpa/pds/vdpa_dev.c
158
__func__, qid, ERR_PTR(err));
drivers/vdpa/pds/vdpa_dev.c
161
pdsv->vqs[qid].ready = ready;
drivers/vdpa/pds/vdpa_dev.c
164
static bool pds_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid)
drivers/vdpa/pds/vdpa_dev.c
168
return pdsv->vqs[qid].ready;
drivers/vdpa/pds/vdpa_dev.c
171
static int pds_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
drivers/vdpa/pds/vdpa_dev.c
181
if (pdsv->vqs[qid].ready) {
drivers/vdpa/pds/vdpa_dev.c
215
pdsv->vqs[qid].avail_idx = avail;
drivers/vdpa/pds/vdpa_dev.c
216
pdsv->vqs[qid].used_idx = used;
drivers/vdpa/pds/vdpa_dev.c
221
static int pds_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
drivers/vdpa/pds/vdpa_dev.c
231
if (pdsv->vqs[qid].ready) {
drivers/vdpa/pds/vdpa_dev.c
236
avail = pdsv->vqs[qid].avail_idx;
drivers/vdpa/pds/vdpa_dev.c
237
used = pdsv->vqs[qid].used_idx;
drivers/vdpa/pds/vdpa_dev.c
257
pds_vdpa_get_vq_notification(struct vdpa_device *vdpa_dev, u16 qid)
drivers/vdpa/pds/vdpa_dev.c
263
area.addr = pdsv->vqs[qid].notify_pa;
drivers/vdpa/pds/vdpa_dev.c
274
static int pds_vdpa_get_vq_irq(struct vdpa_device *vdpa_dev, u16 qid)
drivers/vdpa/pds/vdpa_dev.c
278
return pdsv->vqs[qid].irq;
drivers/vdpa/pds/vdpa_dev.c
385
int max_vq, nintrs, qid, err;
drivers/vdpa/pds/vdpa_dev.c
396
for (qid = 0; qid < pdsv->num_vqs; ++qid) {
drivers/vdpa/pds/vdpa_dev.c
397
int irq = pci_irq_vector(pdev, qid);
drivers/vdpa/pds/vdpa_dev.c
399
snprintf(pdsv->vqs[qid].irq_name, sizeof(pdsv->vqs[qid].irq_name),
drivers/vdpa/pds/vdpa_dev.c
400
"vdpa-%s-%d", dev_name(dev), qid);
drivers/vdpa/pds/vdpa_dev.c
403
pdsv->vqs[qid].irq_name,
drivers/vdpa/pds/vdpa_dev.c
404
&pdsv->vqs[qid]);
drivers/vdpa/pds/vdpa_dev.c
407
__func__, qid, ERR_PTR(err));
drivers/vdpa/pds/vdpa_dev.c
411
pdsv->vqs[qid].irq = irq;
drivers/vdpa/pds/vdpa_dev.c
419
while (qid--)
drivers/vdpa/pds/vdpa_dev.c
420
pds_vdpa_release_irq(pdsv, qid);
drivers/vdpa/pds/vdpa_dev.c
433
int qid;
drivers/vdpa/pds/vdpa_dev.c
444
for (qid = 0; qid < pdsv->num_vqs; qid++)
drivers/vdpa/pds/vdpa_dev.c
445
pds_vdpa_release_irq(pdsv, qid);
drivers/vdpa/pds/vdpa_dev.c
495
static void pds_vdpa_init_vqs_entry(struct pds_vdpa_device *pdsv, int qid,
drivers/vdpa/pds/vdpa_dev.c
498
memset(&pdsv->vqs[qid], 0, sizeof(pdsv->vqs[0]));
drivers/vdpa/pds/vdpa_dev.c
499
pdsv->vqs[qid].qid = qid;
drivers/vdpa/pds/vdpa_dev.c
500
pdsv->vqs[qid].pdsv = pdsv;
drivers/vdpa/pds/vdpa_dev.c
501
pdsv->vqs[qid].ready = false;
drivers/vdpa/pds/vdpa_dev.c
502
pdsv->vqs[qid].irq = VIRTIO_MSI_NO_VECTOR;
drivers/vdpa/pds/vdpa_dev.c
503
pdsv->vqs[qid].notify = notify;
drivers/vdpa/pds/vdpa_dev.c
72
static int pds_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid,
drivers/vdpa/pds/vdpa_dev.c
77
pdsv->vqs[qid].desc_addr = desc_addr;
drivers/vdpa/pds/vdpa_dev.c
78
pdsv->vqs[qid].avail_addr = driver_addr;
drivers/vdpa/pds/vdpa_dev.c
79
pdsv->vqs[qid].used_addr = device_addr;
drivers/vdpa/pds/vdpa_dev.c
84
static void pds_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid, u32 num)
drivers/vdpa/pds/vdpa_dev.c
88
pdsv->vqs[qid].q_len = num;
drivers/vdpa/pds/vdpa_dev.c
91
static void pds_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid)
drivers/vdpa/pds/vdpa_dev.c
95
iowrite16(qid, pdsv->vqs[qid].notify);
drivers/vdpa/pds/vdpa_dev.c
98
static void pds_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid,
drivers/vdpa/pds/vdpa_dev.h
16
u16 qid;
drivers/vdpa/virtio_pci/vp_vdpa.c
257
static int vp_vdpa_get_vq_state(struct vdpa_device *vdpa, u16 qid,
drivers/vdpa/virtio_pci/vp_vdpa.c
292
static int vp_vdpa_set_vq_state(struct vdpa_device *vdpa, u16 qid,
drivers/vdpa/virtio_pci/vp_vdpa.c
302
!vp_modern_get_queue_enable(mdev, qid)) {
drivers/vdpa/virtio_pci/vp_vdpa.c
313
static void vp_vdpa_set_vq_cb(struct vdpa_device *vdpa, u16 qid,
drivers/vdpa/virtio_pci/vp_vdpa.c
318
vp_vdpa->vring[qid].cb = *cb;
drivers/vdpa/virtio_pci/vp_vdpa.c
322
u16 qid, bool ready)
drivers/vdpa/virtio_pci/vp_vdpa.c
326
vp_modern_set_queue_enable(mdev, qid, ready);
drivers/vdpa/virtio_pci/vp_vdpa.c
329
static bool vp_vdpa_get_vq_ready(struct vdpa_device *vdpa, u16 qid)
drivers/vdpa/virtio_pci/vp_vdpa.c
333
return vp_modern_get_queue_enable(mdev, qid);
drivers/vdpa/virtio_pci/vp_vdpa.c
336
static void vp_vdpa_set_vq_num(struct vdpa_device *vdpa, u16 qid,
drivers/vdpa/virtio_pci/vp_vdpa.c
341
vp_modern_set_queue_size(mdev, qid, num);
drivers/vdpa/virtio_pci/vp_vdpa.c
344
static u16 vp_vdpa_get_vq_size(struct vdpa_device *vdpa, u16 qid)
drivers/vdpa/virtio_pci/vp_vdpa.c
348
return vp_modern_get_queue_size(mdev, qid);
drivers/vdpa/virtio_pci/vp_vdpa.c
351
static int vp_vdpa_set_vq_address(struct vdpa_device *vdpa, u16 qid,
drivers/vdpa/virtio_pci/vp_vdpa.c
357
vp_modern_queue_address(mdev, qid, desc_area,
drivers/vdpa/virtio_pci/vp_vdpa.c
363
static void vp_vdpa_kick_vq(struct vdpa_device *vdpa, u16 qid)
drivers/vdpa/virtio_pci/vp_vdpa.c
367
vp_iowrite16(qid, vp_vdpa->vring[qid].notify);
drivers/vdpa/virtio_pci/vp_vdpa.c
373
u16 qid = data & 0xFFFF;
drivers/vdpa/virtio_pci/vp_vdpa.c
375
vp_iowrite32(data, vp_vdpa->vring[qid].notify);
drivers/vdpa/virtio_pci/vp_vdpa.c
453
vp_vdpa_get_vq_notification(struct vdpa_device *vdpa, u16 qid)
drivers/vdpa/virtio_pci/vp_vdpa.c
459
notify.addr = vp_vdpa->vring[qid].notify_pa;
drivers/vhost/vdpa.c
198
static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
drivers/vhost/vdpa.c
200
struct vhost_virtqueue *vq = &v->vqs[qid];
drivers/vhost/vdpa.c
208
irq = ops->get_vq_irq(vdpa, qid);
drivers/vhost/vdpa.c
219
qid, vq->call_ctx.ctx, ret);
drivers/vhost/vdpa.c
222
static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
drivers/vhost/vdpa.c
224
struct vhost_virtqueue *vq = &v->vqs[qid];
fs/9p/cache.c
62
version = cpu_to_le32(v9inode->qid.version);
fs/9p/cache.c
63
path = cpu_to_le64(v9inode->qid.path);
fs/9p/fid.h
53
((fid->qid.version == 0) && !(s_flags & V9FS_IGNORE_QV)) ||
fs/9p/v9fs.c
663
memset(&v9inode->qid, 0, sizeof(v9inode->qid));
fs/9p/v9fs.h
141
struct p9_qid qid;
fs/9p/vfs_dir.c
130
QID2INO(&st.qid), dt_type(&st));
fs/9p/vfs_dir.c
187
QID2INO(&curdirent.qid),
fs/9p/vfs_dir.c
229
version = cpu_to_le32(v9inode->qid.version);
fs/9p/vfs_inode.c
206
wstat->qid.type = ~0;
fs/9p/vfs_inode.c
207
wstat->qid.version = ~0;
fs/9p/vfs_inode.c
208
*((long long *)&wstat->qid.path) = ~0;
fs/9p/vfs_inode.c
351
version = cpu_to_le32(v9inode->qid.version);
fs/9p/vfs_inode.c
379
if (memcmp(&v9inode->qid.version,
fs/9p/vfs_inode.c
380
&st->qid.version, sizeof(v9inode->qid.version)))
fs/9p/vfs_inode.c
383
if (v9inode->qid.type != st->qid.type)
fs/9p/vfs_inode.c
386
if (v9inode->qid.path != st->qid.path)
fs/9p/vfs_inode.c
401
memcpy(&v9inode->qid, &st->qid, sizeof(st->qid));
fs/9p/vfs_inode.c
406
struct p9_qid *qid,
fs/9p/vfs_inode.c
422
inode = iget5_locked(sb, QID2INO(qid), test, v9fs_set_inode, st);
fs/9p/vfs_inode.c
432
inode->i_ino = QID2INO(qid);
fs/9p/vfs_inode.c
460
inode = v9fs_qid_iget(sb, &st->qid, st, new);
fs/9p/vfs_inode_dotl.c
112
inode = iget5_locked(sb, QID2INO(qid), test, v9fs_set_inode_dotl, st);
fs/9p/vfs_inode_dotl.c
122
inode->i_ino = QID2INO(qid);
fs/9p/vfs_inode_dotl.c
154
inode = v9fs_qid_iget_dotl(sb, &st->qid, fid, st, new);
fs/9p/vfs_inode_dotl.c
235
struct p9_qid qid;
fs/9p/vfs_inode_dotl.c
289
err = p9_client_create_dotl(ofid, name, p9_omode, mode, gid, &qid);
fs/9p/vfs_inode_dotl.c
359
struct p9_qid qid;
fs/9p/vfs_inode_dotl.c
386
err = p9_client_mkdir_dotl(dfid, name, mode, gid, &qid);
fs/9p/vfs_inode_dotl.c
68
if (memcmp(&v9inode->qid.version,
fs/9p/vfs_inode_dotl.c
689
struct p9_qid qid;
fs/9p/vfs_inode_dotl.c
69
&st->qid.version, sizeof(v9inode->qid.version)))
fs/9p/vfs_inode_dotl.c
706
err = p9_client_symlink(dfid, name, symname, gid, &qid);
fs/9p/vfs_inode_dotl.c
72
if (v9inode->qid.type != st->qid.type)
fs/9p/vfs_inode_dotl.c
75
if (v9inode->qid.path != st->qid.path)
fs/9p/vfs_inode_dotl.c
798
struct p9_qid qid;
fs/9p/vfs_inode_dotl.c
824
err = p9_client_mknod_dotl(dfid, name, mode, rdev, gid, &qid);
fs/9p/vfs_inode_dotl.c
91
memcpy(&v9inode->qid, &st->qid, sizeof(st->qid));
fs/9p/vfs_inode_dotl.c
97
struct p9_qid *qid,
fs/ext4/super.c
6897
struct kqid qid;
fs/ext4/super.c
6902
qid = make_kqid_projid(projid);
fs/ext4/super.c
6903
dquot = dqget(sb, qid);
fs/f2fs/super.c
2165
struct kqid qid;
fs/f2fs/super.c
2170
qid = make_kqid_projid(projid);
fs/f2fs/super.c
2171
dquot = dqget(sb, qid);
fs/fuse/dev_uring.c
1093
unsigned int qid = READ_ONCE(cmd_req->qid);
fs/fuse/dev_uring.c
1102
if (qid >= ring->nr_queues) {
fs/fuse/dev_uring.c
1103
pr_info_ratelimited("fuse: Invalid ring qid %u\n", qid);
fs/fuse/dev_uring.c
1107
queue = ring->queues[qid];
fs/fuse/dev_uring.c
1109
queue = fuse_uring_create_queue(ring, qid);
fs/fuse/dev_uring.c
124
int qid;
fs/fuse/dev_uring.c
1241
unsigned int qid;
fs/fuse/dev_uring.c
1244
qid = task_cpu(current);
fs/fuse/dev_uring.c
1246
if (WARN_ONCE(qid >= ring->nr_queues,
fs/fuse/dev_uring.c
1247
"Core number (%u) exceeds nr queues (%zu)\n", qid,
fs/fuse/dev_uring.c
1249
qid = 0;
fs/fuse/dev_uring.c
1251
queue = ring->queues[qid];
fs/fuse/dev_uring.c
1252
WARN_ONCE(!queue, "Missing queue for qid %d\n", qid);
fs/fuse/dev_uring.c
128
for (qid = 0; qid < ring->nr_queues; qid++) {
fs/fuse/dev_uring.c
129
queue = READ_ONCE(ring->queues[qid]);
fs/fuse/dev_uring.c
164
int qid;
fs/fuse/dev_uring.c
169
for (qid = 0; qid < ring->nr_queues; qid++) {
fs/fuse/dev_uring.c
170
queue = READ_ONCE(ring->queues[qid]);
fs/fuse/dev_uring.c
191
int qid;
fs/fuse/dev_uring.c
196
for (qid = 0; qid < ring->nr_queues; qid++) {
fs/fuse/dev_uring.c
197
struct fuse_ring_queue *queue = ring->queues[qid];
fs/fuse/dev_uring.c
216
ring->queues[qid] = NULL;
fs/fuse/dev_uring.c
271
int qid)
fs/fuse/dev_uring.c
286
queue->qid = qid;
fs/fuse/dev_uring.c
302
if (ring->queues[qid]) {
fs/fuse/dev_uring.c
306
return ring->queues[qid];
fs/fuse/dev_uring.c
312
WRITE_ONCE(ring->queues[qid], queue);
fs/fuse/dev_uring.c
375
queue->qid, ent->state, exp_state);
fs/fuse/dev_uring.c
405
int qid;
fs/fuse/dev_uring.c
408
for (qid = 0; qid < ring->nr_queues; qid++) {
fs/fuse/dev_uring.c
409
struct fuse_ring_queue *queue = ring->queues[qid];
fs/fuse/dev_uring.c
421
ring, qid, ent, ent->state);
fs/fuse/dev_uring.c
425
ring, qid, ent, ent->state);
fs/fuse/dev_uring.c
434
int qid;
fs/fuse/dev_uring.c
439
for (qid = 0; qid < ring->nr_queues; qid++) {
fs/fuse/dev_uring.c
440
struct fuse_ring_queue *queue = READ_ONCE(ring->queues[qid]);
fs/fuse/dev_uring.c
472
int qid;
fs/fuse/dev_uring.c
474
for (qid = 0; qid < ring->nr_queues; qid++) {
fs/fuse/dev_uring.c
475
struct fuse_ring_queue *queue = READ_ONCE(ring->queues[qid]);
fs/fuse/dev_uring.c
677
queue->qid, ent, ent->state);
fs/fuse/dev_uring.c
781
pr_warn("%s qid=%d state=%d\n", __func__, ent->queue->qid,
fs/fuse/dev_uring.c
889
unsigned int qid = READ_ONCE(cmd_req->qid);
fs/fuse/dev_uring.c
897
if (qid >= ring->nr_queues)
fs/fuse/dev_uring.c
900
queue = ring->queues[qid];
fs/fuse/dev_uring.c
917
pr_info("qid=%d commit_id %llu not found\n", queue->qid,
fs/fuse/dev_uring.c
929
queue->qid, commit_id, ent->state);
fs/fuse/dev_uring.c
956
int qid;
fs/fuse/dev_uring.c
960
for (qid = 0; qid < ring->nr_queues && ready; qid++) {
fs/fuse/dev_uring.c
961
if (current_qid == qid)
fs/fuse/dev_uring.c
964
queue = ring->queues[qid];
fs/fuse/dev_uring.c
999
bool ready = is_ring_ready(ring, queue->qid);
fs/fuse/dev_uring_i.h
67
unsigned int qid;
fs/fuse/virtio_fs.c
236
unsigned int cpu, qid;
fs/fuse/virtio_fs.c
244
qid = fsvq->vq->index;
fs/fuse/virtio_fs.c
246
if (qid < VQ_REQUEST || (fs->mq_map[cpu] == qid)) {
fs/gfs2/quota.c
1371
int gfs2_quota_refresh(struct gfs2_sbd *sdp, struct kqid qid)
fs/gfs2/quota.c
1377
error = qd_get(sdp, qid, &qd);
fs/gfs2/quota.c
1662
static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
fs/gfs2/quota.c
1676
if ((qid.type != USRQUOTA) &&
fs/gfs2/quota.c
1677
(qid.type != GRPQUOTA))
fs/gfs2/quota.c
1680
error = qd_get(sdp, qid, &qd);
fs/gfs2/quota.c
1701
static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
fs/gfs2/quota.c
1717
if ((qid.type != USRQUOTA) &&
fs/gfs2/quota.c
1718
(qid.type != GRPQUOTA))
fs/gfs2/quota.c
1724
error = qd_get(sdp, qid, &qd);
fs/gfs2/quota.c
219
struct kqid qid = qd->qd_id;
fs/gfs2/quota.c
220
return (2 * (u64)from_kqid(&init_user_ns, qid)) +
fs/gfs2/quota.c
221
((qid.type == USRQUOTA) ? 0 : 1);
fs/gfs2/quota.c
229
static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, struct kqid qid)
fs/gfs2/quota.c
240
qd->qd_id = qid;
fs/gfs2/quota.c
259
struct kqid qid)
fs/gfs2/quota.c
265
if (!qid_eq(qd->qd_id, qid))
fs/gfs2/quota.c
279
static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
fs/gfs2/quota.c
283
unsigned int hash = gfs2_qd_hash(sdp, qid);
fs/gfs2/quota.c
286
*qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
fs/gfs2/quota.c
292
new_qd = qd_alloc(hash, sdp, qid);
fs/gfs2/quota.c
298
*qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
fs/gfs2/quota.c
513
static int qdsb_get(struct gfs2_sbd *sdp, struct kqid qid,
fs/gfs2/quota.c
518
error = qd_get(sdp, qid, qdp);
fs/gfs2/quota.c
86
const struct kqid qid)
fs/gfs2/quota.c
91
h = jhash(&qid, sizeof(struct kqid), h);
fs/gfs2/quota.h
32
int gfs2_quota_refresh(struct gfs2_sbd *sdp, struct kqid qid);
fs/gfs2/sys.c
253
struct kqid qid;
fs/gfs2/sys.c
264
qid = make_kqid(current_user_ns(), USRQUOTA, id);
fs/gfs2/sys.c
265
if (!qid_valid(qid))
fs/gfs2/sys.c
268
error = gfs2_quota_refresh(sdp, qid);
fs/gfs2/sys.c
275
struct kqid qid;
fs/gfs2/sys.c
286
qid = make_kqid(current_user_ns(), GRPQUOTA, id);
fs/gfs2/sys.c
287
if (!qid_valid(qid))
fs/gfs2/sys.c
290
error = gfs2_quota_refresh(sdp, qid);
fs/ocfs2/quota_global.c
894
static int ocfs2_get_next_id(struct super_block *sb, struct kqid *qid)
fs/ocfs2/quota_global.c
896
int type = qid->type;
fs/ocfs2/quota_global.c
900
trace_ocfs2_get_next_id(from_kqid(&init_user_ns, *qid), type);
fs/ocfs2/quota_global.c
911
status = qtree_get_next_id(&info->dqi_gi, qid);
fs/quota/dquot.c
1474
struct kqid qid;
fs/quota/dquot.c
1496
qid = make_kqid_uid(inode->i_uid);
fs/quota/dquot.c
1499
qid = make_kqid_gid(inode->i_gid);
fs/quota/dquot.c
1505
qid = make_kqid_projid(projid);
fs/quota/dquot.c
1508
dquot = dqget(sb, qid);
fs/quota/dquot.c
2185
int dquot_get_next_id(struct super_block *sb, struct kqid *qid)
fs/quota/dquot.c
2189
if (!sb_has_quota_active(sb, qid->type))
fs/quota/dquot.c
2191
if (!dqopt->ops[qid->type]->get_next_id)
fs/quota/dquot.c
2193
return dqopt->ops[qid->type]->get_next_id(sb, qid);
fs/quota/dquot.c
2682
int dquot_get_dqblk(struct super_block *sb, struct kqid qid,
fs/quota/dquot.c
2687
dquot = dqget(sb, qid);
fs/quota/dquot.c
2697
int dquot_get_next_dqblk(struct super_block *sb, struct kqid *qid,
fs/quota/dquot.c
2705
err = sb->dq_op->get_next_id(sb, qid);
fs/quota/dquot.c
2708
dquot = dqget(sb, *qid);
fs/quota/dquot.c
277
hashfn(const struct super_block *sb, struct kqid qid)
fs/quota/dquot.c
279
unsigned int id = from_kqid(&init_user_ns, qid);
fs/quota/dquot.c
280
int type = qid.type;
fs/quota/dquot.c
2817
int dquot_set_dqblk(struct super_block *sb, struct kqid qid,
fs/quota/dquot.c
2823
dquot = dqget(sb, qid);
fs/quota/dquot.c
303
struct kqid qid)
fs/quota/dquot.c
308
if (dquot->dq_sb == sb && qid_eq(dquot->dq_id, qid))
fs/quota/dquot.c
926
struct dquot *dqget(struct super_block *sb, struct kqid qid)
fs/quota/dquot.c
928
unsigned int hashent = hashfn(sb, qid);
fs/quota/dquot.c
931
if (!qid_has_mapping(sb->s_user_ns, qid))
fs/quota/dquot.c
934
if (!sb_has_quota_active(sb, qid.type))
fs/quota/dquot.c
939
if (!sb_has_quota_active(sb, qid.type)) {
fs/quota/dquot.c
947
dquot = find_dquot(hashent, sb, qid);
fs/quota/dquot.c
951
empty = get_empty_dquot(sb, qid.type);
fs/quota/dquot.c
958
dquot->dq_id = qid;
fs/quota/kqid.c
120
bool qid_valid(struct kqid qid)
fs/quota/kqid.c
122
switch (qid.type) {
fs/quota/kqid.c
124
return uid_valid(qid.uid);
fs/quota/kqid.c
126
return gid_valid(qid.gid);
fs/quota/kqid.c
128
return projid_valid(qid.projid);
fs/quota/netlink.c
37
void quota_send_warning(struct kqid qid, dev_t dev,
fs/quota/netlink.c
63
ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, qid.type);
fs/quota/netlink.c
67
from_kqid_munged(&init_user_ns, qid),
fs/quota/quota.c
204
struct kqid qid;
fs/quota/quota.c
211
qid = make_kqid(current_user_ns(), type, id);
fs/quota/quota.c
212
if (!qid_has_mapping(sb->s_user_ns, qid))
fs/quota/quota.c
214
ret = sb->s_qcop->get_dqblk(sb, qid, &fdq);
fs/quota/quota.c
240
struct kqid qid;
fs/quota/quota.c
247
qid = make_kqid(current_user_ns(), type, id);
fs/quota/quota.c
248
if (!qid_has_mapping(sb->s_user_ns, qid))
fs/quota/quota.c
250
ret = sb->s_qcop->get_nextdqblk(sb, &qid, &fdq);
fs/quota/quota.c
255
idq.dqb_id = from_kqid(current_user_ns(), qid);
fs/quota/quota.c
292
struct kqid qid;
fs/quota/quota.c
306
qid = make_kqid(current_user_ns(), type, id);
fs/quota/quota.c
307
if (!qid_has_mapping(sb->s_user_ns, qid))
fs/quota/quota.c
310
return sb->s_qcop->set_dqblk(sb, qid, &fdq);
fs/quota/quota.c
629
struct kqid qid;
fs/quota/quota.c
635
qid = make_kqid(current_user_ns(), type, id);
fs/quota/quota.c
636
if (!qid_has_mapping(sb->s_user_ns, qid))
fs/quota/quota.c
639
if (from_kqid(sb->s_user_ns, qid) == 0 &&
fs/quota/quota.c
654
return sb->s_qcop->set_dqblk(sb, qid, &qdq);
fs/quota/quota.c
710
struct kqid qid;
fs/quota/quota.c
715
qid = make_kqid(current_user_ns(), type, id);
fs/quota/quota.c
716
if (!qid_has_mapping(sb->s_user_ns, qid))
fs/quota/quota.c
718
ret = sb->s_qcop->get_dqblk(sb, qid, &qdq);
fs/quota/quota.c
736
struct kqid qid;
fs/quota/quota.c
742
qid = make_kqid(current_user_ns(), type, id);
fs/quota/quota.c
743
if (!qid_has_mapping(sb->s_user_ns, qid))
fs/quota/quota.c
745
ret = sb->s_qcop->get_nextdqblk(sb, &qid, &qdq);
fs/quota/quota.c
748
id_out = from_kqid(current_user_ns(), qid);
fs/quota/quota_tree.c
42
static int get_index(struct qtree_mem_dqinfo *info, struct kqid qid, int depth)
fs/quota/quota_tree.c
44
qid_t id = from_kqid(&init_user_ns, qid);
fs/quota/quota_tree.c
842
int qtree_get_next_id(struct qtree_mem_dqinfo *info, struct kqid *qid)
fs/quota/quota_tree.c
844
qid_t id = from_kqid(&init_user_ns, *qid);
fs/quota/quota_tree.c
850
*qid = make_kqid(&init_user_ns, qid->type, id);
fs/quota/quota_v2.c
404
static int v2_get_next_id(struct super_block *sb, struct kqid *qid)
fs/quota/quota_v2.c
412
ret = qtree_get_next_id(sb_dqinfo(sb, qid->type)->dqi_priv, qid);
fs/xfs/xfs_quotaops.c
225
struct kqid qid,
fs/xfs/xfs_quotaops.c
234
id = from_kqid(&init_user_ns, qid);
fs/xfs/xfs_quotaops.c
235
return xfs_qm_scall_getquota(mp, id, xfs_quota_type(qid.type), qdq);
fs/xfs/xfs_quotaops.c
242
struct kqid *qid,
fs/xfs/xfs_quotaops.c
252
id = from_kqid(&init_user_ns, *qid);
fs/xfs/xfs_quotaops.c
253
ret = xfs_qm_scall_getquota_next(mp, &id, xfs_quota_type(qid->type),
fs/xfs/xfs_quotaops.c
259
*qid = make_kqid(current_user_ns(), qid->type, id);
fs/xfs/xfs_quotaops.c
266
struct kqid qid,
fs/xfs/xfs_quotaops.c
276
return xfs_qm_scall_setqlim(mp, from_kqid(&init_user_ns, qid),
fs/xfs/xfs_quotaops.c
277
xfs_quota_type(qid.type), qdq);
include/linux/dqblk_qtree.h
57
int qtree_get_next_id(struct qtree_mem_dqinfo *info, struct kqid *qid);
include/linux/firmware/xlnx-zynqmp.h
553
u32 qid;
include/linux/netdevice.h
3947
u32 qid)
include/linux/netdevice.h
3949
netdev_tx_reset_queue(netdev_get_tx_queue(dev, qid));
include/linux/nvme-fc.h
287
__be16 qid;
include/linux/nvme-rdma.h
67
__le16 qid;
include/linux/nvme.h
1515
__le16 qid;
include/linux/nvme.h
1620
#define show_opcode_name(qid, opcode, fctype) \
include/linux/nvme.h
1623
((qid) ? \
include/linux/nvme.h
1706
__le16 qid;
include/linux/nvme.h
2036
static inline const char *nvme_opcode_str(int qid, u8 opcode)
include/linux/nvme.h
2038
return qid ? nvme_get_opcode_str(opcode) :
include/linux/nvme.h
2043
int qid, const struct nvme_command *cmd)
include/linux/nvme.h
2048
return nvme_opcode_str(qid, cmd->common.opcode);
include/linux/pds/pds_adminq.h
746
__le16 qid;
include/linux/pds/pds_adminq.h
783
__le16 qid;
include/linux/qed/qed_eth_if.h
63
u16 qid;
include/linux/qed/qed_if.h
1208
u16 qid, struct qed_sb_info_dbg *sb_dbg);
include/linux/quota.h
105
kqid.uid = make_kuid(from, qid);
include/linux/quota.h
108
kqid.gid = make_kgid(from, qid);
include/linux/quota.h
111
kqid.projid = make_kprojid(from, qid);
include/linux/quota.h
187
static inline bool qid_has_mapping(struct user_namespace *ns, struct kqid qid)
include/linux/quota.h
189
return from_kqid(ns, qid) != (qid_t) -1;
include/linux/quota.h
320
int (*get_next_id)(struct super_block *sb, struct kqid *qid); /* Get next ID with existing structure in the quota file */
include/linux/quota.h
339
int (*get_next_id) (struct super_block *sb, struct kqid *qid);
include/linux/quota.h
511
extern void quota_send_warning(struct kqid qid, dev_t dev,
include/linux/quota.h
514
static inline void quota_send_warning(struct kqid qid, dev_t dev,
include/linux/quota.h
79
extern qid_t from_kqid(struct user_namespace *to, struct kqid qid);
include/linux/quota.h
80
extern qid_t from_kqid_munged(struct user_namespace *to, struct kqid qid);
include/linux/quota.h
81
extern bool qid_valid(struct kqid qid);
include/linux/quota.h
98
enum quota_type type, qid_t qid)
include/linux/quotaops.h
46
struct dquot *dqget(struct super_block *sb, struct kqid qid);
include/linux/quotaops.h
93
int dquot_get_next_id(struct super_block *sb, struct kqid *qid);
include/linux/soc/airoha/airoha_offload.h
201
u32 (*wlan_get_queue_addr)(struct airoha_npu *npu, int qid,
include/linux/soc/airoha/airoha_offload.h
236
int qid, bool xmit)
include/linux/soc/airoha/airoha_offload.h
238
return npu->ops.wlan_get_queue_addr(npu, qid, xmit);
include/linux/soc/airoha/airoha_offload.h
292
int qid, bool xmit)
include/net/9p/9p.h
395
struct p9_qid qid;
include/net/9p/9p.h
412
struct p9_qid qid;
include/net/9p/client.h
249
struct p9_qid qid;
include/net/9p/client.h
268
struct p9_qid qid;
include/net/9p/client.h
295
kgid_t gid, struct p9_qid *qid);
include/net/9p/client.h
297
kgid_t gid, struct p9_qid *qid);
include/net/9p/client.h
319
dev_t rdev, kgid_t gid, struct p9_qid *qid);
include/net/9p/client.h
321
kgid_t gid, struct p9_qid *qid);
include/net/libeth/xsk.h
682
void libeth_xsk_wakeup(call_single_data_t *csd, u32 qid);
include/net/libeth/xsk.h
686
int libeth_xsk_setup_pool(struct net_device *dev, u32 qid, bool enable);
include/net/pkt_cls.h
878
u16 qid;
include/scsi/libsas.h
552
unsigned int qid;
include/scsi/libsas.h
700
u16 tag, unsigned int qid,
include/scsi/libsas.h
703
unsigned int qid, void *data);
include/uapi/linux/fuse.h
1307
uint16_t qid;
mm/shmem_quota.c
114
static int shmem_get_next_id(struct super_block *sb, struct kqid *qid)
mm/shmem_quota.c
116
struct mem_dqinfo *info = sb_dqinfo(sb, qid->type);
mm/shmem_quota.c
118
qid_t id = from_kqid(&init_user_ns, *qid);
mm/shmem_quota.c
123
if (!sb_has_quota_active(sb, qid->type))
mm/shmem_quota.c
154
*qid = make_kqid(&init_user_ns, qid->type, entry->id);
net/9p/client.c
1009
err = p9pdu_readf(&req->rc, clnt->proto_version, "Q", &qid);
net/9p/client.c
1017
qid.type, qid.path, qid.version);
net/9p/client.c
1019
memmove(&fid->qid, &qid, sizeof(struct p9_qid));
net/9p/client.c
1086
memmove(&fid->qid, &wqids[nwqids - 1], sizeof(struct p9_qid));
net/9p/client.c
1088
memmove(&fid->qid, &oldfid->qid, sizeof(struct p9_qid));
net/9p/client.c
1111
struct p9_qid qid;
net/9p/client.c
1130
err = p9pdu_readf(&req->rc, clnt->proto_version, "Qd", &qid, &iounit);
net/9p/client.c
1137
p9_is_proto_dotl(clnt) ? "RLOPEN" : "ROPEN", qid.type,
net/9p/client.c
1138
qid.path, qid.version, iounit);
net/9p/client.c
1140
memmove(&fid->qid, &qid, sizeof(struct p9_qid));
net/9p/client.c
1152
u32 mode, kgid_t gid, struct p9_qid *qid)
net/9p/client.c
1175
err = p9pdu_readf(&req->rc, clnt->proto_version, "Qd", qid, &iounit);
net/9p/client.c
1182
qid->type, qid->path, qid->version, iounit);
net/9p/client.c
1184
memmove(&ofid->qid, qid, sizeof(struct p9_qid));
net/9p/client.c
1201
struct p9_qid qid;
net/9p/client.c
1218
err = p9pdu_readf(&req->rc, clnt->proto_version, "Qd", &qid, &iounit);
net/9p/client.c
1225
qid.type, qid.path, qid.version, iounit);
net/9p/client.c
1227
memmove(&fid->qid, &qid, sizeof(struct p9_qid));
net/9p/client.c
1239
const char *symtgt, kgid_t gid, struct p9_qid *qid)
net/9p/client.c
1256
err = p9pdu_readf(&req->rc, clnt->proto_version, "Q", qid);
net/9p/client.c
1263
qid->type, qid->path, qid->version);
net/9p/client.c
1642
ret->size, ret->type, ret->dev, ret->qid.type, ret->qid.path,
net/9p/client.c
1643
ret->qid.version, ret->mode,
net/9p/client.c
1700
ret->qid.type, ret->qid.path, ret->qid.version,
net/9p/client.c
1765
wst->size, wst->type, wst->dev, wst->qid.type,
net/9p/client.c
1766
wst->qid.path, wst->qid.version,
net/9p/client.c
2050
dev_t rdev, kgid_t gid, struct p9_qid *qid)
net/9p/client.c
2065
err = p9pdu_readf(&req->rc, clnt->proto_version, "Q", qid);
net/9p/client.c
2071
qid->type, qid->path, qid->version);
net/9p/client.c
2080
kgid_t gid, struct p9_qid *qid)
net/9p/client.c
2094
err = p9pdu_readf(&req->rc, clnt->proto_version, "Q", qid);
net/9p/client.c
2099
p9_debug(P9_DEBUG_9P, "<<< RMKDIR qid %x.%llx.%x\n", qid->type,
net/9p/client.c
2100
qid->path, qid->version);
net/9p/client.c
991
struct p9_qid qid;
net/9p/protocol.c
347
struct p9_qid *qid =
net/9p/protocol.c
351
&qid->type, &qid->version,
net/9p/protocol.c
352
&qid->path);
net/9p/protocol.c
367
&stbuf->dev, &stbuf->qid,
net/9p/protocol.c
489
&stbuf->qid,
net/9p/protocol.c
585
const struct p9_qid *qid =
net/9p/protocol.c
589
qid->type, qid->version,
net/9p/protocol.c
590
qid->path);
net/9p/protocol.c
599
stbuf->dev, &stbuf->qid,
net/9p/protocol.c
780
ret = p9pdu_readf(&fake_pdu, clnt->proto_version, "Qqbs", &dirent->qid,
net/sched/sch_api.c
2173
u32 qid;
net/sched/sch_api.c
2193
qid = TC_H_MAJ(clid);
net/sched/sch_api.c
2198
if (qid && qid1) {
net/sched/sch_api.c
2200
if (qid != qid1)
net/sched/sch_api.c
2203
qid = qid1;
net/sched/sch_api.c
2204
} else if (qid == 0)
net/sched/sch_api.c
2205
qid = rtnl_dereference(dev->qdisc)->handle;
net/sched/sch_api.c
2213
portid = TC_H_MAKE(qid, portid);
net/sched/sch_api.c
2215
if (qid == 0)
net/sched/sch_api.c
2216
qid = rtnl_dereference(dev->qdisc)->handle;
net/sched/sch_api.c
2220
q = qdisc_lookup(dev, qid);
net/sched/sch_api.c
2232
clid = qid;
net/sched/sch_api.c
2234
clid = TC_H_MAKE(qid, clid);
net/sched/sch_htb.c
1378
if (err || offload_opt.qid >= dev->num_tx_queues)
net/sched/sch_htb.c
1380
return netdev_get_tx_queue(dev, offload_opt.qid);
net/sched/sch_htb.c
1908
dev_queue = netdev_get_tx_queue(dev, offload_opt.qid);
net/xdp/xsk.c
1288
u32 flags, qid;
net/xdp/xsk.c
1325
qid = sxdp->sxdp_queue_id;
net/xdp/xsk.c
1357
if (umem_xs->queue_id != qid || umem_xs->dev != dev) {
net/xdp/xsk.c
1377
qid);
net/xdp/xsk.c
1425
err = xp_assign_dev(xs->pool, dev, qid, flags);
net/xdp/xsk.c
1440
xs->queue_id = qid;
net/xdp/xsk.c
1443
if (qid < dev->real_num_rx_queues) {
net/xdp/xsk.c
1446
rxq = __netif_get_rx_queue(dev, qid);