tx_desc
areap = &bgep->tx_desc;
dma_area_t tx_desc; /* transmit descriptors */
(knp++)->value.ui64 = bgep->tx_desc.cookie.dmac_laddress;
DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &bgep->tx_desc);
area = bgep->tx_desc;
bge_free_dma_mem(&bgep->tx_desc);
struct tx_desc *desc; /* KVA of descriptor ring */
offset * sizeof (struct tx_desc), 0,
0, eq->pidx * sizeof (struct tx_desc),
(eq->pidx - eq->pending) * sizeof (struct tx_desc),
eq->pending * sizeof (struct tx_desc),
sizeof (struct tx_desc) / sizeof (uint64_t);
dma_area_t tx_desc; /* transmit descriptors */
descp = &dmfep->tx_desc;
descp = &dmfep->tx_desc;
dmfe_setup_put32(&dmfep->tx_desc, SETUPBUF_PHYS+index/2,
&dmfep->tx_desc);
dmfe_free_dma_mem(&dmfep->tx_desc);
bzero(dmfep->tx_desc.setup_va, SETUPBUF_SIZE);
descp = &dmfep->tx_desc;
descp = &dmfep->tx_desc;
struct e1000_tx_desc *tx_desc;
tx_desc = &(tx_ring->tbd_first[hw_tdt]);
length += tx_desc->lower.flags.length;
eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
tx_desc.value = 0;
hpi_desc_handle, &tx_desc,
tx_desc.value = 0;
save_desc_p = &tx_desc;
tmp_desc_p = &tx_desc;
tx_desc_t tx_desc, *tmp_desc_p;
uint32_t tx_desc;
ngep->tx_desc = NGE_SEND_JB2500_SLOTS_DESC;
ngep->tx_desc = NGE_SEND_JB4500_SLOTS_DESC;
ngep->tx_desc = NGE_SEND_JB9000_SLOTS_DESC;
ngep->tx_desc = NGE_SEND_JB9000_SLOTS_DESC;
ngep->tx_desc = NGE_SEND_LOWMEM_SLOTS_DESC;
ngep->tx_desc = dev_param_p->tx_desc_num;
ngep->tx_desc = NGE_SEND_JB2500_SLOTS_DESC;
ngep->tx_desc = NGE_SEND_JB4500_SLOTS_DESC;
ngep->tx_desc = NGE_SEND_JB9000_SLOTS_DESC;
ngep->tx_desc = NGE_SEND_JB9000_SLOTS_DESC;
ngep->tx_desc = NGE_SEND_LOWMEM_SLOTS_DESC;
ngep->tx_desc =
txbuffsize = ngep->tx_desc * ngep->buf_size;
txdescsize = ngep->tx_desc;
srp->desc.nslots = ngep->tx_desc;
int i, ring, tx_desc, rx_desc, rx_jdesc, maxrx;
tx_desc = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
if (tx_desc >= 256 && tx_desc <= MAX_CMD_DESCRIPTORS && ISP2(tx_desc)) {
adapter->MaxTxDescCount = tx_desc;
tx_desc_t tx_desc, *tmp_desc_p;
tx_desc.value = 0;
&tx_desc,
tx_desc.value = 0;
save_desc_p = &tx_desc;
tmp_desc_p = &tx_desc;
dma_area_t tx_desc;
val32 = rgep->tx_desc.cookie.dmac_laddress;
val32 = rgep->tx_desc.cookie.dmac_laddress >> 32;
DMA_ZERO(rgep->tx_desc);
DMA_SYNC(rgep->tx_desc, DDI_DMA_SYNC_FORDEV);
rgep->tx_desc = rgep->dma_area_txdesc;
DMA_ZERO(rgep->tx_desc);
rgep->tx_ring = rgep->tx_desc.mem_va;
desc = rgep->tx_desc;
DMA_SYNC(rgep->tx_desc, DDI_DMA_SYNC_FORDEV);
DMA_SYNC(rgep->tx_desc, DDI_DMA_SYNC_FORDEV);
struct tx_desc *TxDescQRead; /* The next ring entry to be freed */
struct tx_desc *TxDescQWrite; /* The next free ring entry */
struct tx_desc *TxDescQStart; /* The start of the ring entries */
struct tx_desc *TxDescQEnd; /* The end of the ring entries */
struct tx_desc *Tx_desc;
struct tx_desc *pTx_desc = adapter->pMil->pNonphysical->TxDescQStart;
length = sizeof (struct tx_desc) * TX_RING_SIZE + ALIGNMENT;
pMil->Tx_desc = (struct tx_desc *)
desc = &dnetp->tx_desc[current_desc];
struct tx_desc_type *ring = dnetp->tx_desc;
struct tx_desc_type *descp = &(dnetp->tx_desc[index]);
if ((dnetp->tx_desc != NULL) &&
if (dnetp->tx_desc == NULL) {
(caddr_t *)&dnetp->tx_desc, &len,
NULL, (caddr_t)dnetp->tx_desc,
bzero(dnetp->tx_desc, len);
if (dnetp->tx_desc != NULL) {
dnetp->tx_desc = NULL;
*(uint32_t *)&dnetp->tx_desc[i].desc0 = 0;
*(uint32_t *)&dnetp->tx_desc[i].desc1 = 0;
dnetp->tx_desc[i].buffer1 = 0;
dnetp->tx_desc[i].buffer2 = 0;
dnetp->tx_desc[i - 1].desc1.end_of_ring = 1;
struct tx_desc_type *ring = dnetp->tx_desc;
struct tx_desc_type *desc = dnetp->tx_desc;
desc = dnetp->tx_desc;
struct tx_desc_type *tx_desc; /* virtual addr of xmit desc */