__xa_erase
__xa_erase(&dist->lpi_xa, irq->intid);
__xa_erase(&mte_pages, xa_state.xa_index);
page = __xa_erase(&brd->brd_pages, aligned_sector >> PAGE_SECTORS_SHIFT);
queue = __xa_erase(&fpriv->userq_mgr.userq_xa, args->in.queue_id);
__xa_erase(&userq_mgr->userq_xa, queue_id);
__xa_erase(xa, index);
__xa_erase(xa, index);
__xa_erase(&userq->fence_drv_xa, index);
__xa_erase(&guc->context_lookup, id);
__xa_erase(&ucontext->mmap_xa, entry->start_pgoff + i);
__xa_erase(&ucontext->mmap_xa, i - 1);
__xa_erase(&queries, id);
__xa_erase(&queries, query->id);
__xa_erase(&multicast_table, mc->id);
__xa_erase(&multicast_table, mc->id);
__xa_erase(&multicast_table, mc->id);
__xa_erase(&ep->com.dev->hwtids, ep->hwtid);
__xa_erase(&rhp->qps, qhp->wq.sq.qid);
__xa_erase(&hfi1_dev_table, dd->unit);
__xa_erase(&hr_dev->qp_table.dip_xa, hr_dip->dip_idx);
__xa_erase(xa, hr_qp->qpn);
__xa_erase(&imr->implicit_children, idx);
__xa_erase(&vfio_device_set_xa,
__xa_erase(xa, index);
__xa_erase(&cache->reqs, index);
DBG_BUGON(__xa_erase(&sbi->managed_pslots, pcl->pos) != pcl);
__xa_erase(&btnc->i_pages, oldkey);
f = __xa_erase(&smap->i_pages, index);
void *__xa_erase(struct xarray *, unsigned long index);
entry = __xa_erase(xa, index);
entry = __xa_erase(xa, index);
EXPORT_SYMBOL(__xa_erase);
entry = __xa_erase(xa, index);
netmem_ref netmem = (__force netmem_ref)__xa_erase(
__xa_erase(&devlink->snapshot_ids, id);
__xa_erase(&sk->sk_user_frags, p->tokens[i]);
__xa_erase(&hierarchy->shapers, index);
__xa_erase(&hierarchy->shapers, index);
__xa_erase(&aa_secids, secid);