hw_q
#define GEM_ISR(hw_q) (0x0400 + ((hw_q) << 2))
#define GEM_TBQP(hw_q) (0x0440 + ((hw_q) << 2))
#define GEM_RBQP(hw_q) (0x0480 + ((hw_q) << 2))
#define GEM_RBQS(hw_q) (0x04A0 + ((hw_q) << 2))
#define GEM_IER(hw_q) (0x0600 + ((hw_q) << 2))
#define GEM_IDR(hw_q) (0x0620 + ((hw_q) << 2))
#define GEM_IMR(hw_q) (0x0640 + ((hw_q) << 2))
#define GEM_ENST_START_TIME(hw_q) (0x0800 + ((hw_q) << 2))
#define GEM_ENST_ON_TIME(hw_q) (0x0820 + ((hw_q) << 2))
#define GEM_ENST_OFF_TIME(hw_q) (0x0840 + ((hw_q) << 2))
unsigned int hw_q, q;
for (hw_q = 0, q = 0; hw_q < bp->num_queues; ++hw_q) {
if (hw_q) {
queue->ISR = GEM_ISR(hw_q - 1);
queue->IER = GEM_IER(hw_q - 1);
queue->IDR = GEM_IDR(hw_q - 1);
queue->IMR = GEM_IMR(hw_q - 1);
queue->TBQP = GEM_TBQP(hw_q - 1);
queue->RBQP = GEM_RBQP(hw_q - 1);
queue->RBQS = GEM_RBQS(hw_q - 1);
queue->ENST_START_TIME = GEM_ENST_START_TIME(hw_q);
queue->ENST_ON_TIME = GEM_ENST_ON_TIME(hw_q);
queue->ENST_OFF_TIME = GEM_ENST_OFF_TIME(hw_q);
mbox->h2fq.hw_q = mbox->barmem + OCTEP_CTRL_MBOX_TOTAL_INFO_SZ;
mbox->f2hq.hw_q = mbox->barmem +
qbuf = (q->hw_q + *pi);
qbuf = (q->hw_q + *pi);
qbuf = (q->hw_q + *ci);
qbuf = (q->hw_q + *ci);
u8 __iomem *hw_q;
const struct fbnic_hw_q_stats *hw_q = &fbd->hw_stats.hw_q[i];
fbnic_report_hw_stats(fbnic_gstrings_hw_q_stats, hw_q,
struct fbnic_hw_q_stats *hw_q)
for (i = 0; i < fbd->max_num_queues; i++, hw_q++) {
&hw_q->rde_pkt_err);
&hw_q->rde_pkt_cq_drop);
&hw_q->rde_pkt_bdq_drop);
struct fbnic_hw_q_stats *hw_q)
for (i = 0; i < fbd->max_num_queues; i++, hw_q++) {
&hw_q->rde_pkt_err);
&hw_q->rde_pkt_cq_drop);
&hw_q->rde_pkt_bdq_drop);
struct fbnic_hw_q_stats *hw_q)
fbnic_get_hw_rxq_stats32(fbd, hw_q);
fbnic_reset_hw_rxq_stats(fbd, fbd->hw_stats.hw_q);
fbnic_get_hw_rxq_stats32(fbd, fbd->hw_stats.hw_q);
struct fbnic_hw_q_stats hw_q[FBNIC_MAX_QUEUES];
struct fbnic_hw_q_stats *hw_q);
rx_over += fbd->hw_stats.hw_q[i].rde_pkt_cq_drop.value;
rx_over += fbd->hw_stats.hw_q[i].rde_pkt_bdq_drop.value;
rx_errors += fbd->hw_stats.hw_q[i].rde_pkt_err.value;
fbnic_get_hw_q_stats(fbd, fbd->hw_stats.hw_q);
rx->hw_drop_overruns = fbd->hw_stats.hw_q[idx].rde_pkt_cq_drop.value +
fbd->hw_stats.hw_q[idx].rde_pkt_bdq_drop.value;
rx->hw_drops = fbd->hw_stats.hw_q[idx].rde_pkt_err.value +
struct mt76_wcid *wcid, int hw_q)
u8 ep = q2ep(hw_q);
struct mt76_wcid *wcid, int hw_q);
int hw_q = skb2q(skb);
if (mt7601u_dma_enqueue_tx(dev, skb, wcid, hw_q))
u8 cw_min = 5, cw_max = 10, hw_q = q2hwq(queue);
if (!hw_q)
mt76_wr(dev, MT_EDCA_CFG_AC(hw_q), val);
val = mt76_rr(dev, MT_WMM_TXOP(hw_q));
val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(hw_q));
val |= params->txop << MT_WMM_TXOP_SHIFT(hw_q);
mt76_wr(dev, MT_WMM_TXOP(hw_q), val);
val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(hw_q));
val |= params->aifs << MT_WMM_AIFSN_SHIFT(hw_q);
val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(hw_q));
val |= cw_min << MT_WMM_CWMIN_SHIFT(hw_q);
val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(hw_q));
val |= cw_max << MT_WMM_CWMAX_SHIFT(hw_q);
struct hw_q *q;
struct hw_q *q_next;