qmap
xive->qmap |= (1 << prio);
if (xive->qmap & (1 << i)) {
if (xive->qmap & (1 << prio))
u8 qmap;
void blk_mq_map_hw_queues(struct blk_mq_queue_map *qmap,
for (queue = 0; queue < qmap->nr_queues; queue++) {
qmap->mq_map[cpu] = qmap->queue_offset + queue;
blk_mq_map_queues(qmap);
void blk_mq_map_queues(struct blk_mq_queue_map *qmap)
masks = group_cpus_evenly(qmap->nr_queues, &nr_masks);
qmap->mq_map[cpu] = qmap->queue_offset;
for (queue = 0; queue < qmap->nr_queues; queue++) {
qmap->mq_map[cpu] = qmap->queue_offset + queue;
int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int index)
if (index == qmap->mq_map[i])
static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
qmap->mq_map[cpu] = 0;
extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);
unsigned long qmap = 0;
__set_bit(j, &qmap);
j = find_next_zero_bit(&qmap, max, j);
__set_bit(j, &qmap);
u16 qcount = 0, max_qcount, qmap, sections = 0;
qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
u16 qmap;
qmap =
qmap = 0;
ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
u16 qcount, qmap, sections = 0;
qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
u16 sections, qmap, num_qps;
qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
ctxt.info.tc_mapping[0] = cpu_to_le16(qmap);
u16 offset = 0, qmap = 0, tx_count = 0, rx_count = 0, pow = 0;
qmap = FIELD_PREP(ICE_AQ_VSI_TC_Q_OFFSET_M, offset);
qmap |= FIELD_PREP(ICE_AQ_VSI_TC_Q_NUM_M, pow);
ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
u16 qcount, qmap;
qmap = FIELD_PREP(ICE_AQ_VSI_TC_Q_OFFSET_M, offset);
qmap |= FIELD_PREP(ICE_AQ_VSI_TC_Q_NUM_M, pow);
ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
u16 pow, offset = 0, qcount_tx = 0, qcount_rx = 0, qmap;
qmap = FIELD_PREP(ICE_AQ_VSI_TC_Q_OFFSET_M, tc0_offset);
qmap |= FIELD_PREP(ICE_AQ_VSI_TC_Q_NUM_M, pow);
ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
unsigned long qmap;
qmap = map->rxq_map;
for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
qmap = map->txq_map;
for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
u32 qmap;
qmap = 0;
qmap |= (1 << queue);
mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
.qmap = true,
.qmap = true,
.qmap = true,
.qmap = true,
.qmap = true,
.qmap = true,
.qmap = true,
.qmap = true,
.qmap = true,
.qmap = true,
.qmap = true,
.qmap = true,
.qmap = true,
.qmap = true,
.qmap = true,
.qmap = true,
.qmap = true,
.qmap = true,
if (endpoint->config.qmap) {
if (endpoint->config.qmap) {
if (endpoint->config.qmap && !endpoint->toward_ipa) {
if (endpoint->config.qmap)
bool qmap;
if (endpoint->config.qmap && skb->protocol != htons(ETH_P_MAP))
struct blk_mq_queue_map *qmap = &host->tag_set.map[HCTX_TYPE_DEFAULT];
"qmap->nr_queues: %d\n", qmap->nr_queues);
blk_mq_map_hw_queues(qmap, &l_pdev->dev, FNIC_PCI_OFFSET);
struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
queue = qmap->mq_map[raw_smp_processor_id()];
struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
for (queue = 0; queue < qmap->nr_queues; queue++) {
qmap->mq_map[cpu] = qmap->queue_offset + queue;
struct blk_mq_queue_map *qmap;
qmap = &shost->tag_set.map[i];
qmap->nr_queues = hisi_hba->cq_nvecs;
qmap->nr_queues = hisi_hba->iopoll_q_cnt;
qmap->nr_queues = 0;
if (!qmap->nr_queues)
qmap->queue_offset = qoff;
blk_mq_map_queues(qmap);
blk_mq_map_hw_queues(qmap, hisi_hba->dev,
qoff += qmap->nr_queues;
struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
blk_mq_map_hw_queues(qmap, &pm8001_ha->pdev->dev, 1);
blk_mq_map_queues(qmap);
struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
blk_mq_map_queues(qmap);
blk_mq_map_hw_queues(qmap, &vha->hw->pdev->dev,
void blk_mq_map_queues(struct blk_mq_queue_map *qmap);
void blk_mq_map_hw_queues(struct blk_mq_queue_map *qmap,
__array(values, struct qmap);