xa_alloc
ret = xa_alloc(&memory_groups, &mgid, new_group, xa_limit_31b,
error = xa_alloc(&srv_sess->index_idr, &sess_dev->device_id, sess_dev,
ret = xa_alloc(&dma_heap_minors, &minor, heap,
ret = xa_alloc(&client->resource_xa, &index, resource, xa_limit_32b,
ret = xa_alloc(&fw_device_xa, &minor, device, XA_LIMIT(0, MINORMASK), GFP_KERNEL);
ret = xa_alloc(&fw_upload_xa, &sec->fw_name_id, sec,
r = xa_alloc(&uq_mgr->userq_xa, &qid, queue,
r = xa_alloc(&waitq->fence_drv_xa, &index, fence_drv,
r = xa_alloc(drm_minor_get_xa(type), &minor->index,
r = xa_alloc(&drm_minors_xa, &minor->index,
ret = xa_alloc(&file_private->syncobj_xa, handle, syncobj, xa_limit_32b,
ret = xa_alloc(&file_private->syncobj_xa, handle, syncobj, xa_limit_32b,
err = xa_alloc(&file_priv->vm_xa, &id, &ppgtt->vm,
err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL);
ret = xa_alloc(&ext_data.fpriv->context_xa, &id, NULL,
ret = xa_alloc(&fpriv->context_xa, id, NULL, xa_limit_32b, GFP_KERNEL);
err = xa_alloc(&fpriv->context_xa, &id, NULL, xa_limit_32b, GFP_KERNEL);
err = xa_alloc(&pvr_dev->ctx_ids, &ctx->ctx_id, ctx, xa_limit_32b, GFP_KERNEL);
err = xa_alloc(&pvr_file->ctx_handles, &args->handle, ctx, xa_limit_32b, GFP_KERNEL);
err = xa_alloc(&pvr_file->free_list_handles,
err = xa_alloc(&pvr_file->hwrt_handles,
err = xa_alloc(&pvr_file->vm_ctx_handles,
err = xa_alloc(&pvr_file->pvr_dev->free_list_ids,
err = xa_alloc(&pvr_dev->job_ids, &job->id, job, xa_limit_32b, GFP_KERNEL);
err = xa_alloc(array, &id, sig_sync, xa_limit_32b, GFP_KERNEL);
err = xa_alloc(&mgr->handles, id, ctx, xa_limit_32b, GFP_KERNEL);
ret = xa_alloc(&priv->jm_ctxs, &args->handle, jm_ctx,
ret = xa_alloc(&pool->xa, &id, heap,
ret = xa_alloc(&pool->xa, &id, vm,
ret = xa_alloc(&gpool->xa, &gid, group, XA_LIMIT(1, MAX_GROUPS_PER_POOL), GFP_KERNEL);
ret = xa_alloc(&job->dependencies, &id, fence, xa_limit_32b, GFP_KERNEL);
err = xa_alloc(&fpriv->contexts, &args->context, context, XA_LIMIT(1, U32_MAX),
err = xa_alloc(&context->mappings, &args->mapping, mapping, XA_LIMIT(1, U32_MAX),
return xa_alloc(&plane_cfg->possible_crtcs, &crtc_idx, crtc_cfg,
return xa_alloc(&encoder_cfg->possible_crtcs, &crtc_idx, crtc_cfg,
return xa_alloc(&connector_cfg->possible_encoders, &encoder_idx,
err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL);
err = xa_alloc(&group->xa, &pos, xe_lrc_get(q->lrc[0]),
ret = xa_alloc(&xe->irq.msix.indexes, &id, irq_buf, limit, GFP_KERNEL);
err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
err = xa_alloc(&op->prefetch_range.range,
ret = xa_alloc(&rdma_nets, &rnet->id, rnet, xa_limit_32b, GFP_KERNEL);
return xa_alloc(&uobj->ufile->idr, &uobj->id, NULL, xa_limit_32b,
if (xa_alloc(&ctx_table, &ctx->id, NULL, xa_limit_32b, GFP_KERNEL)) {
err = xa_alloc(&dev->counter_stats->xa_counters, &counter->id,
int rv = xa_alloc(&sdev->qp_xa, &qp->base_qp.qp_num, qp, xa_limit_32b,
ret = xa_alloc(&arm_smmu_asid_xa, &asid, smmu_domain,
ret = xa_alloc(&arm_smmu_asid_xa, &asid, smmu_domain,
rc = xa_alloc(&fault->response, &group->cookie, group,
rc = xa_alloc(&iopt->access_list, &new_id, access, xa_limit_16b,
rc = xa_alloc(&ictx->objects, &obj->id, XA_ZERO_ENTRY, xa_limit_31b,
ret = xa_alloc(&uacce_xa, &uacce->dev_id, uacce, xa_limit_32b,
return xa_alloc(&pf->sf_nums, sfnum, NULL, xa_limit_32b,
ret = xa_alloc(&ice_aux_id, &pf->aux_idx, NULL, XA_LIMIT(1, INT_MAX),
ret = xa_alloc(&pf->irq_tracker.entries, &index, entry, limit,
status = xa_alloc(&pi->sched_node_ids, &new_node->id, NULL, XA_LIMIT(0, UINT_MAX),
err = xa_alloc(&ice_sf_aux_id, &id, NULL, xa_limit_32b,
err = xa_alloc(&ctx->xarray, &mi->id, mi, XA_LIMIT(1, ctx->max_id),
err = xa_alloc(&post_act->ids, &handle->id, post_attr,
err = xa_alloc(&macsec->sc_xarray, &sc_xarray_element->fs_id, sc_xarray_element,
err = xa_alloc(&pool->irqs, &irq_index, NULL, pool->xa_num_irqs, GFP_KERNEL);
ret = xa_alloc(&st->idx_xa, &xa_id, idx_data, st->index_limit, GFP_KERNEL);
err = xa_alloc(&pr_core->prr_xa, &prr->index, prr, pr_core->prr_ids,
err = xa_alloc(&nn->xa_ipsec, &saidx, x,
ret = xa_alloc(&pse_pw_d_map, &index, pw_d, XA_LIMIT(1, PSE_PW_D_LIMIT),
ret = xa_alloc(&opp_configs, &id, data, XA_LIMIT(1, INT_MAX),
ret = xa_alloc(ns->xa, &entry->devid, entry, PMT_XA_LIMIT, GFP_KERNEL);
ret = xa_alloc(&auxdev_array, &intel_vsec_dev->id, intel_vsec_dev,
err = xa_alloc(&ptp_clocks_map, &index, ptp, xa_limit_31b,
ret = xa_alloc(&tpg_xa, &val, se_tpg,
if (xa_alloc(&udev->commands, &cmd_id, tcmu_cmd, XA_LIMIT(1, 0xffff),
rc = xa_alloc(&ctxdata->sess_list, &sess_id, sess, xa_limit_32b,
ret = xa_alloc(&liteuart_array, &dev_id, uart, limit, GFP_KERNEL);
ret = xa_alloc(&dev->worker_xa, &id, worker, xa_limit_32b, GFP_KERNEL);
ret = xa_alloc(&dev->worker_xa, &id, worker, xa_limit_32b, GFP_KERNEL);
rv = xa_alloc(&ls->ls_lkbxa, &lkb->lkb_id, lkb, limit, GFP_ATOMIC);
rv = xa_alloc(&ls->ls_recover_xa, &id, r, limit, GFP_ATOMIC);
struct xa_node *xa_alloc;
.xa_alloc = NULL, \
ret = xa_alloc(&ctx->zcrx_ctxs, &id, NULL, xa_limit_31b, GFP_KERNEL);
ret = xa_alloc(&ctx->zcrx_ctxs, &id, NULL, xa_limit_31b, GFP_KERNEL);
ret = xa_alloc(xa, &index, desc, limit, GFP_KERNEL);
XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0);
XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b,
XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0);
XA_BUG_ON(xa, xa_alloc(xa, &id, name + i, xa_limit_32b,
XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_value(8),
XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_value(6),
XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(index), xa_limit_32b,
XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(UINT_MAX - 1),
XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(UINT_MAX),
XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(0),
XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5),
XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5),
XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0);
RCU_INIT_POINTER(node->parent, xas->xa_alloc);
xas->xa_alloc = node;
struct xa_node *child = xas->xa_alloc;
xas->xa_alloc = rcu_dereference_raw(child->parent);
struct xa_node *child = xas->xa_alloc;
RCU_INIT_POINTER(child->parent, xas->xa_alloc);
xas->xa_alloc = rcu_dereference_raw(child->parent);
struct xa_node *next, *node = xas->xa_alloc;
xas->xa_alloc = node = next;
xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
if (!xas->xa_alloc)
xas->xa_alloc->parent = NULL;
XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
if (!xas->xa_alloc)
xas->xa_alloc->parent = NULL;
XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
struct xa_node *node = xas->xa_alloc;
xas->xa_alloc = NULL;
error = xa_alloc(&mem_cgroup_private_ids, &memcg->id.id, NULL,
err = xa_alloc(&binding->bound_rxqs, &xa_idx, rxq, xa_limit_32b,
err = xa_alloc(&pool->dma_mapped, &id, netmem_to_page(netmem),
return xa_alloc(&devlink->snapshot_ids, id, xa_mk_value(1),
ret = xa_alloc(&dev->ethtool->rss_ctx, &ctx_id, ctx,
ret = xa_alloc(&dev->ethtool->rss_ctx, &req.rss_context, ctx,
rc = xa_alloc(&qrtr_ports, port, ipc, QRTR_EPH_PORT_RANGE,
ret = xa_alloc(&hierarchy->shapers, &index, NULL,
if (xa_alloc(&rd->rd_xa, &rn->rn_index, rn, xa_limit_32b, GFP_KERNEL) < 0)