xa_store
ret = xa_store(&mte_pages, pfn, tag_storage, GFP_KERNEL);
prev = xa_store(&sr_forward_xa, enc,
prev = xa_store(&sr_forward_xa, enc,
ret = xa_store(&sr_forward_xa, encoding,
old = xa_store(&its->translation_cache, cache_key, irq, GFP_KERNEL_ACCOUNT);
ret = xa_store(&mte_pages, page_swap_entry(page).val, tag_storage,
err = xa_err(xa_store(&rtas_token_to_function_xarray,
ret = xa_err(xa_store(&vepc->page_array, index, epc_page, GFP_KERNEL));
if (!xa_err(xa_store(&bio_slabs, size, bslab, GFP_KERNEL)))
xa_store(&hetero_cpu, hetero_id,
xa_store(&auxdev->sysfs.irqs, irq, no_free_ptr(info), GFP_KERNEL);
ret = xa_err(xa_store(&memory_blocks, memory->dev.id, memory,
ptr = xa_store(usp_xa, index, c, GFP_KERNEL);
ptr = xa_store(res_xa, us_index, n, GFP_KERNEL);
ptr = xa_store(hb_xa, hb_index, n, GFP_KERNEL);
ptr = xa_store(mw_xa, mw_index, n, GFP_KERNEL);
old_rec = xa_store(&array_rec->rec_gen_media,
old_rec = xa_store(&array_rec->rec_dram,
xa_store(&numa_info_xa, numa_info->pxm, numa_info, GFP_KERNEL);
xa_store(&xa, key, range, GFP_KERNEL);
entry = xa_store(drm_minor_get_xa(type), minor->index, minor, GFP_KERNEL);
xa_store(drm_minor_get_xa(type), minor->index, NULL, GFP_KERNEL);
old = xa_store(&fpriv->context_xa, id, ctx, GFP_KERNEL);
old = xa_store(&fpriv->proto_context_xa, *id, pc, GFP_KERNEL);
xa_store(&job->dependencies, index, fence, GFP_KERNEL);
return xa_err(xa_store(&vbo->detached_resources, (unsigned long)res, res, GFP_KERNEL));
ret = xa_err(xa_store(&ct->fence_lookup,
ret = xa_err(xa_store(&ct->fence_lookup, g2h_fence.seqno,
ret = xa_err(xa_store(&guc->submission_state.exec_queue_lookup,
ret = xa_err(xa_store(&sr->xa, idx, pentry, GFP_KERNEL));
ptr = xa_store(&job->dep.drm.dependencies, 0, fence,
xa_store(&cm.local_id_table, cm_local_id(cm_id_priv->id.local_id),
ret = xa_err(xa_store(&rdma_nets, rnet->id, NULL, GFP_KERNEL));
rc = xa_store(&device->client_data, client->client_id, data,
ret = xa_err(xa_store(&device->client_data, client->client_id, NULL,
ret = xa_err(xa_store(&device->compat_devs, rnet->id,
old = xa_store(&ufile->idr, uobj->id, uobj, GFP_KERNEL);
xa_store(&multicast_table, mc->id, mc, 0);
xa_store(&ctx_table, ctx->id, ctx, GFP_KERNEL);
err = xa_err(xa_store(&qp_init_attr->xrcd->tgt_qps, real_qp->qp_num,
err = xa_err(xa_store(&dev->cqs_xa, cq->cq_idx, cq, GFP_KERNEL));
old_entry = xa_store(&dev->qp_xa, 1, qp, GFP_KERNEL);
ret = xa_err(xa_store(&roce_bond_xa, bus_num, die_info, GFP_KERNEL));
ret = xa_err(xa_store(dip_xa, qpn, hr_dip, GFP_KERNEL));
err = xa_err(xa_store(qpn_opfc_xa, qp_num, per_qp_opfc, GFP_KERNEL));
return xa_err(xa_store(&dev->odp_mkeys, mlx5_base_mkey(mmkey->key),
err = xa_err(xa_store(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key),
ret = xa_store(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key),
xa_ret = xa_store(&elem->pool->xa, elem->index, elem, GFP_KERNEL);
res = xa_store(&srpt_memory_caches, object_size, e, GFP_KERNEL);
WARN_ON(xa_is_err(xa_store(&group->pasid_array,
WARN_ON(xa_is_err(xa_store(&group->pasid_array,
WARN_ON(xa_is_err(xa_store(&group->pasid_array,
curr = xa_store(&group->pasid_array, IOMMU_NO_PASID, entry, GFP_KERNEL);
WARN_ON(xa_is_err(xa_store(&igroup->pasid_attach, pasid, attach,
WARN_ON(xa_is_err(xa_store(&attach->device_array, idev->obj.id,
xa_store(&iopt->domains, iopt->next_domain_id, domain, GFP_KERNEL);
xa_store(&iopt->domains, index, iter_domain, GFP_KERNEL);
rc = xa_err(xa_store(ioas_list, index, ioas, GFP_KERNEL));
entry = xa_store(&its->its_devices, dev_id, its_dev, GFP_KERNEL);
xa_store(&ice_adapters, index, adapter, GFP_KERNEL);
err = xa_err(xa_store(&table->comp_eqs, vecidx, eq, GFP_KERNEL));
return xa_err(xa_store(&table->comp_irqs, vecidx, irq, GFP_KERNEL));
return xa_err(xa_store(&table->comp_irqs, vecidx, irq, GFP_KERNEL));
old_entry = xa_store(&esw->offloads.vhca_map, vhca_id, vhca_map_entry, GFP_KERNEL);
err = xa_err(xa_store(&fc_stats->counters, id, counter, GFP_KERNEL));
err = xa_err(xa_store(fs_id_xa, fs_id, fs_id_iter, GFP_KERNEL));
err = xa_err(xa_store(&pool->irqs, irq->pool_index, irq, GFP_KERNEL));
if (xa_err(xa_store(&ctx->peer_ctx_xa, peer_vhca_id, peer_ctx, GFP_KERNEL)))
WARN_ON(xa_err(xa_store(&dmn->peer_dmn_xa, peer_vhca_id, peer_dmn, GFP_KERNEL)));
ret = xa_err(xa_store(&dmn->csum_fts_xa, vport_num,
err = xa_err(xa_store(&nh_grp->nhgi->nexthop_counters, nh->id, nhct,
xa_store(&gc->irq_contexts, i, gic, GFP_KERNEL);
xa_store(&gc->irq_contexts, i, gic, GFP_KERNEL);
old = xa_store(&ctrl->cels, csi, cel, GFP_KERNEL);
old = xa_store(&ctrl->cels, csi, effects, GFP_KERNEL);
xa_store(&p2pdma->map_types, map_types_idx(client),
if (xa_err(xa_store(&efct->lookup, id, tgt_node, GFP_KERNEL)))
rc = xa_err(xa_store(&domain->lookup, fc_id, domain->nport,
rc = xa_err(xa_store(&nport->lookup, port_id, node, GFP_ATOMIC));
rc = xa_err(xa_store(&nport->domain->lookup, fc_id, nport, GFP_ATOMIC));
if (xa_store(&udev->data_pages, dpi + i, page, GFP_NOIO)) {
return WARN_ON(xa_is_err(xa_store(&tee_dma_heap, id, h, GFP_KERNEL)));
existing = xa_store(&delayed_refs->head_refs, index, head_ref, GFP_ATOMIC);
existing = xa_store(&root->inodes, ino, inode, GFP_ATOMIC);
xa_store(&ses->se_slots, i, xa_mk_value(slot->sl_seqid), 0);
if (!slot || xa_is_err(xa_store(&new->se_slots, 0, slot, GFP_KERNEL)))
if (xa_is_err(xa_store(&new->se_slots, i, slot, gfp))) {
!xa_is_err(xa_store(&session->se_slots, s, slot,
ret = xa_err(xa_store(&sess->tree_conns, tree_conn->id, tree_conn,
old = xa_store(&sess->rpc_handle_list, id, entry, KSMBD_DEFAULT_GFP);
return xa_err(xa_store(&conn->sessions, sess->id, sess, KSMBD_DEFAULT_GFP));
old = xa_store(&sess->ksmbd_chann_list, (long)conn, chann,
old = xa_store(&sess->ksmbd_chann_list, (long)conn,
void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
ret = xa_err(xa_store(&tctx->xa, (unsigned long)ctx,
if (xa_store(&ctx->zcrx_ctxs, id, ifq, GFP_KERNEL))
if (xa_store(&ctx->zcrx_ctxs, id, ifq, GFP_KERNEL))
ret = xa_store(&syscalls_metadata_sparse, i, meta,
entry = xa_store(&dmirror->pt, pfn, entry, GFP_ATOMIC);
entry = xa_store(&dmirror->pt, addr >> PAGE_SHIFT, entry, GFP_ATOMIC);
entry = xa_store(&dmirror->pt, pfn + i, entry, GFP_ATOMIC);
XA_BUG_ON(xa, xa_store(xa, 3, xa_mk_index(3), GFP_KERNEL) != NULL);
XA_BUG_ON(xa, xa_store(xa, 4, xa_mk_index(4), GFP_KERNEL) != NULL);
XA_BUG_ON(xa, xa_store(xa, 4, NULL, GFP_KERNEL) != xa_mk_index(4));
XA_BUG_ON(xa, xa_store(xa, 0, name + i, GFP_KERNEL) != NULL);
XA_BUG_ON(xa, xa_store(xa, 0, name + i, 0) != NULL);
xa_store(xa, ULONG_MAX, xa, GFP_KERNEL);
return xa_store(xa, index, xa_mk_index(index), gfp);
xa_store(xa, 3, xa, GFP_KERNEL);
XA_BUG_ON(xa, xa_err(xa_store(xa, 1, xa_mk_value(0), GFP_KERNEL)) != 0);
void *x = xa_store(xarray, i, pages[i], GFP_KERNEL);
EXPORT_SYMBOL(xa_store);
xa_store(&mem_cgroup_private_ids, memcg->id.id, memcg, GFP_KERNEL);
old = xa_store(swap_zswap_tree(page_swpentry),
WARN_ON(xa_store(&net->dev_by_index, dev->ifindex, dev, GFP_KERNEL));
res = xa_store(&node->seq_blocks, block_idx, block, GFP_ATOMIC);
xa_store(&psp_devs, psd->id, NULL, GFP_KERNEL);
if (xa_err(xa_store(&node->qrtr_tx_flow, key, flow,
old = xa_store(&node->servers, port, srv, GFP_KERNEL);
if (xa_store(&nodes, node_id, node, GFP_KERNEL)) {
xa_store(xa, i, xa_mk_value(i), GFP_KERNEL);
xa_store(&array, 100, xa_mk_value(100), GFP_KERNEL);
r = xa_err(xa_store(&kvm->mem_attr_array, i, entry,