cq_idx
hdev->completion_queue[i].cq_idx = i;
u32 cq_idx;
u32 (*get_queue_id_for_cq)(struct hl_device *hdev, u32 cq_idx);
queue_work(hdev->cq_wq[cq->cq_idx], &job->finish_work);
static u32 gaudi_get_queue_id_for_cq(struct hl_device *hdev, u32 cq_idx)
return gaudi_cq_assignment[cq_idx];
static u32 gaudi2_get_queue_id_for_cq(struct hl_device *hdev, u32 cq_idx)
u32 goya_get_queue_id_for_cq(struct hl_device *hdev, u32 cq_idx)
return cq_idx;
u32 goya_get_queue_id_for_cq(struct hl_device *hdev, u32 cq_idx);
u16 cq_idx;
u16 cq_idx;
u16 cq_idx;
result->cq_idx = cmd_completion.cq_idx;
destroy_cmd.cq_idx = params->cq_idx;
params->cq_idx, err);
u16 cq_idx;
u16 cq_idx;
static int efa_destroy_cq_idx(struct efa_dev *dev, int cq_idx)
struct efa_com_destroy_cq_params params = { .cq_idx = cq_idx };
cq->cq_idx, cq->cpu_addr, cq->size, &cq->dma_addr);
efa_destroy_cq_idx(dev, cq->cq_idx);
xa_erase(&dev->cqs_xa, cq->cq_idx);
resp.cq_idx = result.cq_idx;
cq->cq_idx = result.cq_idx;
cq->cq_idx);
err = xa_err(xa_store(&dev->cqs_xa, cq->cq_idx, cq, GFP_KERNEL));
cq->cq_idx);
cq->cq_idx, result.actual_depth, &cq->dma_addr, cq->cpu_addr);
xa_erase(&dev->cqs_xa, cq->cq_idx);
efa_destroy_cq_idx(dev, cq->cq_idx);
create_qp_params.send_cq_idx = to_ecq(init_attr->send_cq)->cq_idx;
create_qp_params.recv_cq_idx = to_ecq(init_attr->recv_cq)->cq_idx;
u32 *cq_idx)
*cq_idx = temp;
if (*cq_idx >= dev->hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt)
*cq_idx = IRDMA_INVALID_CQ_IDX;
} while (*cq_idx == IRDMA_INVALID_CQ_IDX);
u32 cq_idx;
cq_idx = temp;
if (cq_idx == cq->cq_uk.cq_id)
struct irdma_sc_dev *dev, u32 cq_idx)
struct irdma_cq *icq = READ_ONCE(rf->cq_table[cq_idx]);
cq_idx);
struct irdma_sc_dev *dev, u32 cq_idx)
if (cq_idx == IRDMA_RSVD_CQ_ID_CQP) {
} else if (cq_idx == IRDMA_RSVD_CQ_ID_ILQ ||
cq_idx == IRDMA_RSVD_CQ_ID_IEQ) {
cq = (cq_idx == IRDMA_RSVD_CQ_ID_ILQ) ?
u32 cq_idx;
if (!irdma_sc_process_ceq(dev, sc_ceq, &cq_idx)) {
if (likely(cq_idx > IRDMA_RSVD_CQ_ID_IEQ)) {
irdma_process_normal_ceqe(rf, dev, cq_idx);
irdma_process_reserved_ceqe(rf, dev, cq_idx);
u32 *cq_idx);
static void ocrdma_qp_cq_handler(struct ocrdma_dev *dev, u16 cq_idx)
if (cq_idx >= OCRDMA_MAX_CQ)
cq = dev->cq_tbl[cq_idx];
u32 cq_idx[USNIC_QP_GRP_MAX_CQS];
resp.cq_idx[i] = chunk->res[i]->vnic_idx;
u32 cq_idx;
u32 cq_idx;
u16 cq_idx;
u16 cq_idx;
u16 cq_idx;
u16 cq_idx;
struct ena_com_io_sq *io_sq, u16 cq_idx)
create_cmd.cq_idx = cq_idx;
rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
io_cq->idx = cmd_completion.cq_idx;
destroy_cmd.cq_idx = io_cq->idx;
u8 cq_idx; /* Completion queue index */
int qidx = cq_poll->cq_idx;
cq_poll->cq_idx = qidx;
static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
struct cmp_queue *cq = &qs->cq[cq_idx];
struct snd_queue *sq = &qs->sq[cq_idx];
struct rcv_queue *rq = &qs->rq[cq_idx];
cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_idx);
cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9;
cq_idx, processed_cqe);
txq_idx = nicvf_netdev_qidx(nic, cq_idx);
nicvf_xdp_sq_doorbell(nic, sq, cq_idx);
work_done = nicvf_cq_intr_handler(netdev, cq->cq_idx, napi, budget);
cq->cq_idx);
nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->cq_idx);
cq->cq_idx, cq_head);
nicvf_enable_intr(nic, NICVF_INTR_CQ, cq->cq_idx);
rq->cq_idx = qidx;
(rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) |
sq->cq_idx = qidx;
mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx;
u8 cq_idx; /* CQ index (0 to 7) in the QS */
u8 cq_idx; /* CQ index (0 to 7) in the above QS */
__be16 cq_idx;
pool = &pfvf->qset.pool[cq->cq_idx];
__cn10k_aura_freeptr(pfvf, cq->cq_idx, ptrs,
__cn10k_aura_freeptr(pfvf, cq->cq_idx, ptrs,
cq->cq_idx = qidx;
cq->cq_idx, cq->pool_ptrs - 1)))
otx2_free_rcv_seg(pfvf, cqe, cq->cq_idx);
((u64)cq->cq_idx << 32) | processed_cqe);
qidx = cq->cq_idx - pfvf->hw.rx_queues;
((u64)cq->cq_idx << 32) | processed_cqe);
int qidx = cq->cq_idx;
if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx)) {
seg_size[seg], parse, cq->cq_idx))
skb_record_rx_queue(skb, cq->cq_idx);
((u64)cq->cq_idx << 32) | processed_cqe);
otx2_aura_freeptr(pfvf, cq->cq_idx, bufptr + OTX2_HEAD_ROOM);
qidx = cq->cq_idx - pfvf->hw.rx_queues;
((u64)cq->cq_idx << 32) | processed_cqe);
qidx = cq->cq_idx - pfvf->hw.rx_queues;
int workdone = 0, cq_idx, i;
cq_idx = cq_poll->cq_ids[i];
if (unlikely(cq_idx == CINT_INVALID_CQ))
cq = &qset->cq[cq_idx];
u64 incr = (u64)(cq->cq_idx) << 32;
pool = &pfvf->qset.pool[cq->cq_idx];
work = &pfvf->refill_wrk[cq->cq_idx];
u8 cq_idx;
cq_idx = cq_idx % priv->rx_ring_num;
rx_cq = priv->rx_cq[cq_idx];
cq->cq_idx = cq_idx;
netif_queue_set_napi(cq->dev, cq_idx, NETDEV_QUEUE_TYPE_TX, &cq->napi);
cq_idx);
netif_queue_set_napi(cq->dev, cq_idx, NETDEV_QUEUE_TYPE_RX, &cq->napi);
netif_queue_set_napi(cq->dev, cq->cq_idx, qtype, NULL);
int cq_idx)
int cq_idx;
int cq_idx);
nvmeq->dbbuf_cq_db = &dev->dbbuf_dbs[cq_idx(qid, dev->db_stride)];
nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)];
int cq_idx;
for (cq_idx = 0; cq_idx < phba->cfg_hdw_queue; cq_idx++)
if (phba->sli4_hba.hdwq[cq_idx].io_cq->queue_id == qid)
if (cq_idx < phba->cfg_hdw_queue) {
pr_err("IO CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
lpfc_debug_dump_q(phba->sli4_hba.hdwq[cq_idx].io_cq);
uint16_t cq_idx = smp_processor_id() % qedf->num_queues;
io_req->task_params->cq_rss_number = cq_idx;
u16 cq_idx;
cq_idx = smp_processor_id() % qedi->num_queues;
task_params.cq_rss_number = cq_idx;
unsigned int cq_idx,
unsigned int cq_idx;
for (cq_idx = snic->wq_count; cq_idx < snic->cq_count; cq_idx++) {
nent_per_cq = vnic_cq_fw_service(&snic->cq[cq_idx],
__u16 cq_idx;