tbi
tbi(2, addr);
tbi(tbi_type, addr);
tbi(d->tbi_type, d->addr);
bool va55, tbi, lva;
tbi = (wi->regime == TR_EL2 ?
if (!tbi && (u64)sign_extend64(va, 55) != va)
bool tbi, tbid;
tbi = tcr & BIT(20);
tbi = tcr & TCR_TBI1;
tbi = tcr & TCR_TBI0;
return tbi && !tbid;
struct device_node *tbi;
for_each_child_of_node(np, tbi) {
if (of_node_is_type(tbi, "tbi-phy")) {
tbi);
if (tbi) {
const u32 *prop = of_get_property(tbi, "reg", NULL);
tbi);
of_node_put(tbi);
of_node_put(tbi);
vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
u32 map_type = tbi->map_type;
dma_unmap_single(&pdev->dev, tbi->dma_addr, tbi->len,
dma_unmap_page(&pdev->dev, tbi->dma_addr, tbi->len,
tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */
struct vmxnet3_tx_buf_info *tbi;
tbi = &tq->buf_info[eop_idx];
BUG_ON(!tbi->skb);
map_type = tbi->map_type;
xdp_return_frame_bulk(tbi->xdpf, bq);
dev_kfree_skb_any(tbi->skb);
tbi->skb = NULL;
struct vmxnet3_tx_buf_info *tbi;
tbi = tq->buf_info + tq->tx_ring.next2comp;
map_type = tbi->map_type;
vmxnet3_unmap_tx_buf(tbi, adapter->pdev);
if (tbi->skb) {
xdp_return_frame_bulk(tbi->xdpf, &bq);
dev_kfree_skb_any(tbi->skb);
tbi->skb = NULL;
struct vmxnet3_tx_buf_info *tbi = NULL;
tbi = tq->buf_info + tq->tx_ring.next2fill;
tbi->map_type = VMXNET3_MAP_NONE;
tbi = tq->buf_info + tq->tx_ring.next2fill;
tbi->map_type = VMXNET3_MAP_SINGLE;
tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
tbi->len = buf_size;
gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
tbi = tq->buf_info + tq->tx_ring.next2fill;
tbi->map_type = VMXNET3_MAP_PAGE;
tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag,
if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
tbi->len = buf_size;
gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
tbi->skb = skb;
tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
tbi->sop_idx * tq->tx_ts_desc_size);
struct vmxnet3_tx_buf_info *tbi = NULL;
tbi = tq->buf_info + tq->tx_ring.next2fill;
tbi->map_type = VMXNET3_MAP_XDP;
tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr)) {
tbi->map_type |= VMXNET3_MAP_SINGLE;
tbi->dma_addr = page_pool_get_dma_addr(page) +
tbi->dma_addr, buf_size,
tbi->xdpf = xdpf;
tbi->len = buf_size;
gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
tbi->sop_idx = ctx.sop_txd - tq->tx_ring.base;
for (tbi = 0; tbi < bd_list->num_tabs; tbi++) {
bd_table = bd_list->bd_table_array[tbi];
tbi, bdi, gbdi++, bd, (unsigned long long)dma,
int tbi, bdi, gbdi;
int tbi, bdi;
for (tbi = 0; tbi < bd_list->num_tabs; tbi++) {
bd_table = bd_list->bd_table_array[tbi];
return (bdi + (tbi * bd_list->num_bds_table));
int tbi;
tbi = bdi / ep->bd_list.num_bds_table;
bdi, ep->bd_list.num_bds_table, tbi);
return tbi;
int tbi = bdi_to_tbi(ep, bdi);
local_bdi = bdi - (tbi * ep->bd_list.num_bds_table);
return (ep->bd_list.bd_table_array[tbi]->start_bd + local_bdi);
int start_bdi, end_bdi, tbi, eqp_bdi, curr_hw_dqpi;
tbi = bdi_to_tbi(ep, req->bd_xfr.next_hwd_bdi);
table = ep->bd_list.bd_table_array[tbi];
tbi * ep->bd_list.num_bds_table);