rq_depth
bool rq_depth_calc_max_depth(struct rq_depth *rqd)
bool rq_depth_scale_up(struct rq_depth *rqd)
bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
bool rq_depth_calc_max_depth(struct rq_depth *rqd);
bool rq_depth_scale_up(struct rq_depth *rqd);
struct rq_depth *rqd = &rwb->rq_depth;
struct rq_depth *rqd = &rwb->rq_depth;
} else if (rwb->rq_depth.max_depth <= 2) {
rwb->wb_normal = rwb->rq_depth.max_depth;
rwb->wb_normal = (rwb->rq_depth.max_depth + 1) / 2;
rwb->wb_background = (rwb->rq_depth.max_depth + 3) / 4;
if (!rq_depth_scale_up(&rwb->rq_depth))
if (!rq_depth_scale_down(&rwb->rq_depth, hard_throttle))
struct rq_depth *rqd = &rwb->rq_depth;
struct rq_depth *rqd = &rwb->rq_depth;
struct rq_depth *rqd = &rwb->rq_depth;
limit = rwb->rq_depth.max_depth;
RQWB(rqos)->rq_depth.queue_depth = blk_queue_depth(rqos->disk->queue);
struct rq_depth rq_depth;
rwb->rq_depth.default_depth = RWB_DEF_DEPTH;
rwb->rq_depth.queue_depth = blk_queue_depth(q);
params->rq_depth;
u32 rq_depth;
create_qp_params.rq_depth = init_attr->cap.max_recv_wr;
__le16 rq_depth;
u32 *rq_depth, u8 *rq_shift)
*rq_shift, rq_depth);
u32 *rq_depth, u8 *rq_shift);
u32 rq_depth;
ret = irdma_uk_calc_depth_shift_rq(ukinfo, &ukinfo->rq_depth,
(ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift;
ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift;
status = irdma_uk_calc_depth_shift_rq(ukinfo, &ukinfo->rq_depth,
kzalloc_objs(*iwqp->kqp.rq_wrid_mem, ukinfo->rq_depth);
size = (ukinfo->sq_depth + ukinfo->rq_depth) * IRDMA_QP_WQE_MIN_SIZE;
ukinfo->shadow_area = ukinfo->rq[ukinfo->rq_depth].elem;
info->rq_pa + (ukinfo->rq_depth * IRDMA_QP_WQE_MIN_SIZE);
ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift;
iwqp->max_recv_wr = (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift;
.rq_depth = areq->rq_depth,
if (areq->rq_depth) {
if (cq_count < 2 || sq_count < 2 + !!fdev->admin_q->rq_depth)
u16 rq_depth;
for (i = 0; i < funq->rq_depth; i++) {
for (i = 0; i < funq->rq_depth; i++) {
funq->rq_tail = funq->rq_depth - 1;
if (++funq->rq_buf_idx == funq->rq_depth)
funq->rq_depth;
funq->rqes = fun_alloc_ring_mem(funq->fdev->dev, funq->rq_depth,
fun_free_ring_mem(dev, funq->rq_depth, sizeof(*funq->rqes),
if (req->rq_depth) {
if (req->rq_depth) {
funq->rq_depth = req->rq_depth;
funq->rq_depth, funq->rq_dma_addr, 0, 0,
u32 rq_depth;
u32 rq_depth;
unsigned int rq_depth;
unsigned int rq_depth;
ring->rx_pending = fp->rq_depth;
fp->rq_depth == ring->rx_pending)
.rq_depth = ring->rx_pending,
fp->rq_depth = ring->rx_pending;
fp->cq_depth = 2 * fp->rq_depth;
.rq_depth = fp->rq_depth,
fp->rq_depth = min_t(unsigned int, RQ_DEPTH, fdev->q_depth);
.rq_depth = ADMIN_RQ_DEPTH,
qset->rq_depth, qset->rxq_start, qset->state);
.rq_depth = fp->rq_depth,
u16 rq_depth;
ring->rx_pending = nic_dev->rq_depth;
new_rq_depth == nic_dev->rq_depth)
nic_dev->sq_depth, nic_dev->rq_depth,
nic_dev->rq_depth = new_rq_depth;
unsigned int rq_depth)
hw_ioctxt.rq_depth = ilog2(rq_depth);
int hinic_hwdev_ifup(struct hinic_hwdev *hwdev, u16 sq_depth, u16 rq_depth)
func_to_io->rq_depth = rq_depth;
err = set_hw_ioctxt(hwdev, sq_depth, rq_depth);
u16 rq_depth;
int hinic_hwdev_ifup(struct hinic_hwdev *hwdev, u16 sq_depth, u16 rq_depth);
func_to_io->rq_depth, HINIC_RQ_WQE_SIZE);
u16 rq_depth;
((hw_ctxt)->rq_depth >= HINIC_QUEUE_MIN_DEPTH && \
(hw_ctxt)->rq_depth <= HINIC_QUEUE_MAX_DEPTH && \
if (!hw_ctxt->rq_depth && !hw_ctxt->sq_depth &&
nic_dev->rq_depth = HINIC_RQ_DEPTH;
nic_dev->rq_depth);
rq_num.rq_depth = ilog2(nic_dev->rq_depth);
u32 rq_depth;
int hinic3_set_root_ctxt(struct hinic3_hwdev *hwdev, u32 rq_depth, u32 sq_depth,
root_ctxt.rq_depth = ilog2(rq_depth);
int hinic3_set_root_ctxt(struct hinic3_hwdev *hwdev, u32 rq_depth, u32 sq_depth,
u16 rq_depth;
nic_dev->q_params.rq_depth = HINIC3_RQ_DEPTH;
q_params->rq_depth, q_params->rxqs_res);
hinic3_free_rxqs_res(netdev, q_params->num_qps, q_params->rq_depth,
q_params->rq_depth, q_params->rxqs_res);
qp_params->rq_depth = trxq_params->rq_depth;
u32 rq_depth;
u16 q_id, u32 rq_depth, u16 rq_msix_idx)
err = hinic3_wq_create(hwdev, &rq->wq, rq_depth,
u32 rq_depth, u16 qp_msix_idx)
err = hinic3_create_rq(hwdev, rq, q_id, rq_depth, qp_msix_idx);
qp_params->sq_depth, qp_params->rq_depth,
u32 rq_depth;
rq_depth = nic_io->rq[0].wq.q_depth << HINIC3_NORMAL_RQ_WQE;
err = hinic3_set_root_ctxt(hwdev, rq_depth, nic_io->sq[0].wq.q_depth,
u32 rq_depth;
u32 rq_depth, u16 buf_len)
u32 free_wqebbs = rq_depth - 1;
u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res)
u64 cqe_mem_size = sizeof(struct hinic3_rq_cqe) * rq_depth;
rqres->rx_info = kzalloc_objs(*rqres->rx_info, rq_depth);
pp_params.pool_size = rq_depth * nic_dev->rx_buf_len /
pkt_idx = hinic3_alloc_rx_buffers(rqres, rq_depth,
hinic3_free_rxqs_res(netdev, idx, rq_depth, rxqs_res);
u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res)
u64 cqe_mem_size = sizeof(struct hinic3_rq_cqe) * rq_depth;
hinic3_free_rx_buffers(rqres, rq_depth);
u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res)
rxq->q_depth = rq_depth;
rxq->q_depth = nic_dev->q_params.rq_depth;
rxq->q_mask = nic_dev->q_params.rq_depth - 1;
u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res);
u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res);
u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res);
rdma_opts->rq_depth = result.uint_32;
ctx->rdma_opts.rq_depth = P9_RDMA_RQ_DEPTH;
int rq_depth;
if (rdma->rq_depth != P9_RDMA_RQ_DEPTH)
seq_printf(m, ",rq=%u", rdma->rq_depth);
rdma->rq_depth = opts->rq_depth;
sema_init(&rdma->rq_sem, rdma->rq_depth);
opts.sq_depth + opts.rq_depth + 1,
qp_attr.cap.max_recv_wr = opts.rq_depth;
int rq_depth;
unsigned int ctxts, rq_depth, maxpayload;
rq_depth = newxprt->sc_max_requests + newxprt->sc_max_bc_requests +
if (rq_depth > dev->attrs.max_qp_wr) {
rq_depth = dev->attrs.max_qp_wr;
newxprt->sc_max_requests = rq_depth - 2;
newxprt->sc_sq_depth = rq_depth +
ib_alloc_cq_any(dev, newxprt, rq_depth, IB_POLL_WORKQUEUE);
qp_attr.cap.max_recv_wr = rq_depth;
newxprt->sc_sq_depth, rq_depth);