icq
return bic->icq.q->elevator->elevator_data;
static struct bfq_io_cq *icq_to_bic(struct io_cq *icq)
return container_of(icq, struct bfq_io_cq, icq);
icq_to_bic(async_bfqq->next_rq->elv.icq) == bfqq->bic &&
static void bfq_exit_icq(struct io_cq *icq)
struct bfq_io_cq *bic = icq_to_bic(icq);
int ioprio = bic->icq.ioc->ioprio;
if (atomic_read(&bic->icq.ioc->active_ref) == 0 ||
if (!rq->elv.icq || !bfqq)
if (rq->elv.icq) {
put_io_context(rq->elv.icq->ioc);
rq->elv.icq = NULL;
rq->elv.icq = ioc_find_get_icq(rq->q);
if (unlikely(!rq->elv.icq))
bic = icq_to_bic(rq->elv.icq);
struct io_cq icq; /* must be the first member */
struct io_cq *icq = hlist_entry(ioc->icq_list.first,
struct request_queue *q = icq->q;
ioc_destroy_icq(icq);
ioc_destroy_icq(icq);
struct io_cq *icq =
spin_lock(&icq->ioc->lock);
ioc_destroy_icq(icq);
spin_unlock(&icq->ioc->lock);
struct io_cq *icq;
icq = rcu_dereference(ioc->icq_hint);
if (icq && icq->q == q)
icq = radix_tree_lookup(&ioc->icq_tree, q->id);
if (icq && icq->q == q)
rcu_assign_pointer(ioc->icq_hint, icq); /* allowed to race */
icq = NULL;
return icq;
struct io_cq *icq;
icq = kmem_cache_alloc_node(et->icq_cache, GFP_ATOMIC | __GFP_ZERO,
if (!icq)
kmem_cache_free(et->icq_cache, icq);
icq->ioc = ioc;
icq->q = q;
INIT_LIST_HEAD(&icq->q_node);
INIT_HLIST_NODE(&icq->ioc_node);
if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
hlist_add_head(&icq->ioc_node, &ioc->icq_list);
list_add(&icq->q_node, &q->icq_list);
et->ops.init_icq(icq);
kmem_cache_free(et->icq_cache, icq);
icq = ioc_lookup_icq(q);
if (!icq)
static void ioc_exit_icq(struct io_cq *icq)
return icq;
struct io_cq *icq = NULL;
struct elevator_type *et = icq->q->elevator->type;
icq = ioc_lookup_icq(q);
if (!icq) {
icq = ioc_create_icq(q);
if (!icq) {
if (icq->flags & ICQ_EXITED)
return icq;
et->ops.exit_icq(icq);
icq->flags |= ICQ_EXITED;
struct io_cq *icq;
hlist_for_each_entry(icq, &ioc->icq_list, ioc_node)
ioc_exit_icq(icq);
static void ioc_destroy_icq(struct io_cq *icq)
struct io_context *ioc = icq->ioc;
struct request_queue *q = icq->q;
if (icq->flags & ICQ_DESTROYED)
radix_tree_delete(&ioc->icq_tree, icq->q->id);
hlist_del_init(&icq->ioc_node);
list_del_init(&icq->q_node);
if (rcu_access_pointer(ioc->icq_hint) == icq)
ioc_exit_icq(icq);
icq->__rcu_icq_cache = et->icq_cache;
icq->flags |= ICQ_DESTROYED;
kfree_rcu(icq, __rcu_head);
struct irdma_cq *icq = READ_ONCE(rf->cq_table[cq_idx]);
if (unlikely(!icq)) {
cq = &icq->sc_cq;