mq_ctx
struct blk_flush_queue *fq = blk_get_flush_queue(flush_rq->mq_ctx);
flush_rq->mq_ctx = first_rq->mq_ctx;
struct blk_mq_ctx *ctx = rq->mq_ctx;
struct blk_flush_queue *fq = blk_get_flush_queue(rq->mq_ctx);
ctx = blk_mq_next_ctx(hctx, rq->mq_ctx);
if (cpu == rq->mq_ctx->cpu ||
cpus_share_cache(cpu, rq->mq_ctx->cpu) &&
cpus_equal_capacity(cpu, rq->mq_ctx->cpu)))
return cpu_online(rq->mq_ctx->cpu);
cpu = rq->mq_ctx->cpu;
rq->mq_ctx->cpu == raw_smp_processor_id()) ||
BUG_ON(rq->mq_ctx != ctx);
struct blk_mq_ctx *ctx = rq->mq_ctx;
this_ctx = rq->mq_ctx;
} else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx ||
rq->mq_ctx = ctx;
return rq->mq_ctx->cpu;
struct blk_mq_ctx *ctx = rq->mq_ctx;
blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag);
rq->mq_ctx->cpu);
struct kyber_ctx_queue *kcq = &khd->kcqs[rq->mq_ctx->index_hw[hctx->type]];
rq->mq_ctx->index_hw[hctx->type]);