tx_queue
struct tx_queue *q;
q = &sc->tx_queue[DEF_TXQUEUE];
gen_txintr(sc, &sc->tx_queue[DEF_TXQUEUE]);
gen_txintr(struct gen_softc *sc, struct tx_queue *q)
struct tx_queue tx_queue[NTXQUEUE];
static void gen_txintr(struct gen_softc *sc, struct tx_queue *q);
struct tx_queue *q;
q = &sc->tx_queue[queue];
sc->tx_queue[i].queue = i;
sc->tx_queue[DEF_TXQUEUE].hwindex = GENET_DMA_DEFAULT_QUEUE;
uint8_t tx_queue;
uint8_t tx_queue;
uint8_t tx_queue;
uint8_t tx_queue;
tx_queue = &sc->tx_queues[i];
iflib_softirq_alloc_generic(ctx, &tx_queue->irq,
IFLIB_INTR_TX, tx_queue, i, irq_name);
struct enetc_tx_queue *tx_queue;
uint16_t tx_queue;
int tx_queue = intr->irq_rid;
(void) qcom_ess_edma_hw_intr_tx_intr_set_enable(sc, tx_queue,
taskqueue_enqueue(sc->sc_tx_state[tx_queue].completion_tq,
&sc->sc_tx_state[tx_queue].completion_task);
qcom_ess_edma_hw_intr_tx_ack(struct qcom_ess_edma_softc *sc, int tx_queue)
EDMA_RING_LOCK_ASSERT(&sc->sc_tx_ring[tx_queue]);
EDMA_REG_WRITE(sc, EDMA_REG_TX_ISR, (1U << tx_queue));
int tx_queue);
struct ecore_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
&p_ll2_info->tx_queue.txq_chain, OSAL_NULL);
capacity = ecore_chain_get_capacity(&p_ll2_info->tx_queue.txq_chain);
p_ll2_info->tx_queue.descq_array = p_descq;
p_pkt = &p_ll2_conn->tx_queue.cur_completing_packet;
if (!p_ll2_conn->tx_queue.b_completing_packet || !p_addr)
&p_ll2_info->tx_queue.tx_sb_index,
&p_ll2_info->tx_queue.p_fw_cons);
p_ll2_info->tx_queue.b_cb_registred = true;
if (p_ll2_conn->tx_queue.cur_completing_bd_idx == p_pkt->bd_used)
cur_frag_idx = p_ll2_conn->tx_queue.cur_completing_bd_idx++;
p_tx = &p_ll2_conn->tx_queue;
p_ll2_conn->tx_queue.cur_completing_bd_idx;
p_tx = &p_ll2_conn->tx_queue;
struct ecore_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain;
if (p_ll2->tx_queue.cur_send_frag_num == pkt->num_of_bds)
for (frag_idx = p_ll2->tx_queue.cur_send_frag_num;
bool b_notify = p_ll2_conn->tx_queue.cur_send_packet->notify_fw;
struct ecore_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
if (p_ll2_conn->tx_queue.cur_send_frag_num !=
p_ll2_conn->tx_queue.cur_send_packet->bd_used)
OSAL_LIST_PUSH_TAIL(&p_ll2_conn->tx_queue.cur_send_packet->list_entry,
&p_ll2_conn->tx_queue.sending_descq);
p_ll2_conn->tx_queue.cur_send_packet = OSAL_NULL;
p_ll2_conn->tx_queue.cur_send_frag_num = 0;
bd_prod = ecore_chain_get_prod_idx(&p_ll2_conn->tx_queue.txq_chain);
p_tx = &p_ll2_conn->tx_queue;
if (!p_ll2_conn->tx_queue.cur_send_packet)
p_cur_send_packet = p_ll2_conn->tx_queue.cur_send_packet;
cur_send_frag_num = p_ll2_conn->tx_queue.cur_send_frag_num;
p_ll2_conn->tx_queue.cur_send_frag_num++;
OSAL_SPIN_LOCK_IRQSAVE(&p_ll2_conn->tx_queue.lock, flags);
OSAL_SPIN_UNLOCK_IRQSAVE(&p_ll2_conn->tx_queue.lock, flags);
p_ll2_conn->tx_queue.b_cb_registred = false;
p_ll2_conn->tx_queue.tx_sb_index);
OSAL_FREE(p_hwfn->p_dev, p_ll2_conn->tx_queue.descq_array);
ecore_chain_free(p_hwfn->p_dev, &p_ll2_conn->tx_queue.txq_chain);
if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_ll2_info[i].tx_queue.lock))
OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->p_ll2_info[i].tx_queue.lock);
struct ecore_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
#define ECORE_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registred)
struct ecore_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
struct ecore_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
struct ecore_ll2_tx_queue tx_queue;
__u32 rx_queue, tx_queue;
tx_queue = sdp_sk(sk)->write_seq - sdp_sk(sk)->tx_ring.una_seq;
rx_queue, tx_queue, sk->sk_state);
pci_vtcon_port_to_vq(struct pci_vtcon_port *port, bool tx_queue)
qnum = tx_queue ? port->vsp_txq : port->vsp_rxq;