tx_bd
struct greth_bd *rx_bd, *tx_bd;
tx_bd = greth->tx_bd_base;
greth_write_bd(&tx_bd[i].addr, dma_addr);
greth_write_bd(&tx_bd[i].stat, 0);
#define TX_DESC_CNT (BNGE_PAGE_SIZE / sizeof(struct tx_bd))
#define HW_TXBD_RING_SIZE (sizeof(struct tx_bd) * TX_DESC_CNT)
struct tx_bd *tx_desc_ring[MAX_TX_PAGES];
struct tx_bd *txbd, *txbd0;
struct eth_tx_start_bd **tx_bd, u16 hlen,
struct eth_tx_start_bd *h_tx_bd = *tx_bd;
*tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j];
i, cos, j, tx_bd[0], tx_bd[1],
tx_bd[2], tx_bd[3]);
struct tx_bd *txbd, *txbd0;
#define TX_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct tx_bd))
#define HW_TXBD_RING_SIZE (sizeof(struct tx_bd) * TX_DESC_CNT)
struct tx_bd *tx_desc_ring[MAX_TX_PAGES];
struct tx_bd *txbd;
struct rtw89_pci_tx_bd_32 *tx_bd, *head;
tx_bd = head + bd_ring->wp;
return tx_bd;
bd = uccs->confBd = uccs->tx_bd = qe_muram_addr(uccs->tx_base_offset);
bds_ofs = cpm_muram_alloc(sizeof(*mspi->tx_bd) +
mspi->tx_bd = cpm_muram_addr(bds_ofs);
mspi->rx_bd = cpm_muram_addr(bds_ofs + sizeof(*mspi->tx_bd));
iowrite16be(cpm_muram_offset(mspi->tx_bd), &mspi->pram->tbase);
cpm_muram_free(cpm_muram_offset(mspi->tx_bd));
struct cpm_buf_desc __iomem *tx_bd = mspi->tx_bd;
iowrite32be(mspi->tx_dma, &tx_bd->cbd_bufaddr);
iowrite32be(mspi->tx_dma + xfer_ofs, &tx_bd->cbd_bufaddr);
iowrite16be(xfer_len, &tx_bd->cbd_datlen);
&tx_bd->cbd_sc);
struct cpm_buf_desc __iomem *tx_bd;
qe_port->tx_bd_base = uccs->tx_bd;
struct qe_bd __iomem *tx_bd; /* next BD for new Tx request */