alloc_percpu
twd_evt = alloc_percpu(struct clock_event_device);
mmu->last_vcpu_ran = alloc_percpu(typeof(*mmu->last_vcpu_ran));
vmcs = alloc_percpu(struct kvm_context);
cpu_cf_root.cfptr = alloc_percpu(struct cpu_cf_ptr);
pai_root[idx].mapptr = alloc_percpu(struct pai_mapptr);
pcpu = alloc_percpu(struct cpu_perf_ibs);
pmu->ctx = alloc_percpu(struct amd_uncore_ctx *);
uncore->info = alloc_percpu(union amd_uncore_info);
pmu->ctx = alloc_percpu(struct amd_uncore_ctx *);
pmu->ctx = alloc_percpu(struct amd_uncore_ctx *);
bts_ctx = alloc_percpu(struct bts_ctx);
hv_ghcb_pg = alloc_percpu(union hv_ghcb *);
cpu_cstate_entry = alloc_percpu(struct cstate_entry);
savic_page = alloc_percpu(struct secure_avic_page);
arch_cpu_scale = alloc_percpu(struct arch_hybrid_cpu_scale);
msrs = alloc_percpu(struct msr);
mcdb = alloc_percpu(struct mc_debug_data);
bdev->bd_stats = alloc_percpu(struct disk_stats);
bs->cache = alloc_percpu(struct bio_alloc_cache);
ioc->pcpu_stat = alloc_percpu(struct ioc_pcpu_stat);
ctxs->queue_ctx = alloc_percpu(struct blk_mq_ctx);
streams = alloc_percpu(struct crypto_acomp_stream);
queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
hdev->hldio.inflight_ios = alloc_percpu(s64);
sess->cpu_queues = alloc_percpu(struct rnbd_cpu_qlist);
sess->cpu_rr = alloc_percpu(int);
comp->stream = alloc_percpu(struct zcomp_strm);
arch_timer_evt = alloc_percpu(struct clock_event_device);
gt_evt = alloc_percpu(struct clock_event_device);
hv_clock_event = alloc_percpu(struct clock_event_device);
jcore_pit_percpu = alloc_percpu(struct jcore_pit);
armada_370_xp_evt = alloc_percpu(struct clock_event_device);
msm_evt = alloc_percpu(struct clock_event_device);
acpi_perf_data = alloc_percpu(struct acpi_processor_performance);
pcc_cpu_info = alloc_percpu(struct pcc_cpu);
haltpoll_cpuidle_devices = alloc_percpu(struct cpuidle_device);
priv->ppriv = alloc_percpu(*priv->ppriv);
wq_table = alloc_percpu(struct wq_table_entry);
chan->local = alloc_percpu(typeof(*chan->local));
channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
irq_pcpu = alloc_percpu(struct ffa_pcpu_irq);
regs = alloc_percpu(struct sdei_registered_event);
hv_context.cpu_context = alloc_percpu(struct hv_per_cpu_context);
hyperv_pcpu_input_arg = alloc_percpu(void *);
hyperv_pcpu_output_arg = alloc_percpu(void *);
hv_synic_eventring_tail = alloc_percpu(u8 *);
root_scheduler_input = alloc_percpu(void *);
root_scheduler_output = alloc_percpu(void *);
synic_pages = alloc_percpu(struct hv_synic_pages);
works = alloc_percpu(struct work_struct);
csdev->perf_sink_id_map.cpu_map = alloc_percpu(atomic_t);
event_data->path = alloc_percpu(struct coresight_path *);
drvdata->cpudata = alloc_percpu(typeof(*drvdata->cpudata));
drvdata->handle = alloc_percpu(struct perf_output_handle *);
intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
entry->comp_vect_affinity = alloc_percpu(u16);
ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
dd->int_counter = alloc_percpu(u64);
dd->rcv_limit = alloc_percpu(u64);
dd->send_schedule = alloc_percpu(u64);
dd->tx_opstats = alloc_percpu(struct hfi1_opcode_stats_perctx);
sc->buffers_allocated = alloc_percpu(u32);
stats->pcpu_stats = alloc_percpu(typeof(*stats->pcpu_stats));
clt_path->mp_skip_entry = alloc_percpu(typeof(*clt_path->mp_skip_entry));
clt->pcpu_path = alloc_percpu(typeof(*clt->pcpu_path));
srv_path->stats->rdma_stats = alloc_percpu(struct rtrs_srv_stats_rdma_stats);
gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
gic->dist_base.percpu_base = alloc_percpu(void __iomem *);
gic->cpu_base.percpu_base = alloc_percpu(void __iomem *);
imsic->lpriv = alloc_percpu(typeof(*imsic->lpriv));
global->local = alloc_percpu(typeof(*global->local));
cache->data_heads = alloc_percpu(struct pcache_cache_data_head);
stats->last = alloc_percpu(struct dm_stats_last_position);
md->pending_io = alloc_percpu(unsigned long);
conf->percpu = alloc_percpu(struct raid5_percpu);
bond->rr_tx_counter = alloc_percpu(u32);
sge->port_stats[i] = alloc_percpu(struct sge_port_stats);
priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
priv->percpu_extras = alloc_percpu(*priv->percpu_extras);
priv->sgt_cache = alloc_percpu(*priv->sgt_cache);
priv->fd = alloc_percpu(*priv->fd);
fcoe->ddp_pool = alloc_percpu(struct ixgbe_fcoe_ddp_pool);
pp->ports = alloc_percpu(struct mvneta_pcpu_port);
txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu);
port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
pfvf->hw.lmt_info = alloc_percpu(struct otx2_lmt_info);
np->txrx_stats = alloc_percpu(struct nv_txrx_stats);
priv->pcpu_stats = alloc_percpu(struct rmnet_pcpu_stats);
rx_sa->stats = alloc_percpu(struct macsec_rx_sa_stats);
tx_sa->stats = alloc_percpu(struct macsec_tx_sa_stats);
ppp->xmit_recursion = alloc_percpu(struct ppp_xmit_recursion);
lb_priv->pcpu_stats = alloc_percpu(struct lb_pcpu_stats);
lb_port_priv->pcpu_stats = alloc_percpu(struct lb_stats);
struct multicore_worker __percpu *worker = alloc_percpu(struct multicore_worker);
trans_pcie->txqs.tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
nd_region->lane = alloc_percpu(struct nd_percpu_lane);
spe_pmu->handle = alloc_percpu(typeof(*spe_pmu->handle));
msc->error_dev_id = alloc_percpu(struct mpam_msc *);
h->lockup_detected = alloc_percpu(u32);
phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info);
phba->sli4_hba.c_stat = alloc_percpu(struct lpfc_hdwq_stat);
phba->cmf_stat = alloc_percpu(struct lpfc_cgn_stat);
device->raid_io_stats = alloc_percpu(struct pqi_raid_io_stats);
qh->stats = alloc_percpu(struct knav_queue_stats);
new->stats = alloc_percpu(struct se_dev_entry_io_stats);
dev->stats = alloc_percpu(struct se_dev_io_stats);
lun->lun_stats = alloc_percpu(struct scsi_port_stats);
optee_pcpu = alloc_percpu(struct optee_pcpu);
acpi_perf_data = alloc_percpu(struct acpi_processor_performance);
ctx->cpu = alloc_percpu(struct kioctx_cpu);
sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
sdp->sd_lkstats = alloc_percpu(struct gfs2_pcpu_lkstats);
mnt->mnt_pcp = alloc_percpu(struct mnt_pcp);
#define nfs_alloc_iostats() alloc_percpu(struct nfs_iostats)
percpu = alloc_percpu(struct squashfs_stream);
cil->xc_pcp = alloc_percpu(struct xlog_cil_pcp);
mp->m_inodegc = alloc_percpu(struct xfs_inodegc);
mp->m_stats.xs_stats = alloc_percpu(struct xfsstats);
xfsstats.xs_stats = alloc_percpu(struct xfsstats);
lport->stats = alloc_percpu(struct fc_stats);
lru->percpu_lru = alloc_percpu(struct bpf_lru_list);
clru->local_list = alloc_percpu(struct bpf_lru_locallist);
netdev->xdp_bulkq = alloc_percpu(struct xdp_dev_bulk_queue);
s->freelist = alloc_percpu(struct pcpu_freelist_head);
cgrp->rstat_base_cpu = alloc_percpu(struct cgroup_rstat_base_cpu);
css->rstat_cpu = alloc_percpu(struct css_rstat_cpu);
ss->lhead = alloc_percpu(struct llist_head);
pmu->cpu_pmu_context = alloc_percpu(struct perf_cpu_pmu_context *);
jc->info = alloc_percpu(struct perf_cgroup_info);
cpu_events = alloc_percpu(typeof(*cpu_events));
mm->futex_ref = alloc_percpu(unsigned int);
ipi_mux_pcpu = alloc_percpu(typeof(*ipi_mux_pcpu));
desc->kstat_irqs = alloc_percpu(struct irqstat);
sem->read_count = alloc_percpu(int);
pd->reorder_list = alloc_percpu(struct padata_list);
pd->squeue = alloc_percpu(struct padata_serial_queue);
ssp->sda = alloc_percpu(struct srcu_data);
ssp->sda = alloc_percpu(struct srcu_data);
chan->buf = alloc_percpu(struct rchan_buf *);
tick_work_cpu = alloc_percpu(struct tick_work);
ca->cpuusage = alloc_percpu(u64);
ca->cpustat = alloc_percpu(struct kernel_cpustat);
sch->pcpu = alloc_percpu(struct scx_sched_pcpu);
cgroup->psi->pcpu = alloc_percpu(struct psi_group_cpu);
d->sd = alloc_percpu(struct sched_domain *);
sdd->sd = alloc_percpu(struct sched_domain *);
sdd->sds = alloc_percpu(struct sched_domain_shared *);
sdd->sg = alloc_percpu(struct sched_group *);
sdd->sgc = alloc_percpu(struct sched_group_capacity *);
cfd->csd = alloc_percpu(call_single_data_t);
alloc_percpu(struct work_struct);
bt->sequence = alloc_percpu(unsigned long);
tinfo->tbuf = alloc_percpu(struct trace_user_buf);
buf->data = alloc_percpu(struct trace_array_cpu);
list = alloc_percpu(struct hlist_head);
buf = (char __percpu *)alloc_percpu(perf_trace_t);
ustring_per_cpu = alloc_percpu(struct ustring_buffer);
tr->last_func_repeats = alloc_percpu(struct trace_func_repeats);
tk->nhit = alloc_percpu(unsigned long);
buffers = alloc_percpu(struct trace_buffer_struct);
tu->nhits = alloc_percpu(unsigned long);
uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
works = alloc_percpu(struct work_struct);
wq->cpu_pwq = alloc_percpu(struct pool_workqueue *);
tag->counters = alloc_percpu(struct alloc_tag_counters);
pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat);
zone->per_cpu_zonestats = alloc_percpu(struct per_cpu_zonestat);
zone->per_cpu_pageset = alloc_percpu(struct per_cpu_pages);
alloc_percpu(struct per_cpu_nodestat);
sbinfo->ino_batch = alloc_percpu(ino_t);
s->cpu_stats = alloc_percpu(struct kmem_cache_stats);
s->cpu_sheaves = alloc_percpu(struct slub_percpu_sheaves);
pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx);
caifd->pcpu_refcnt = alloc_percpu(int);
this->pcpu_refcnt = alloc_percpu(int);
ro->uniq = alloc_percpu(struct uniqframe);
dev->pcpu_refcnt = alloc_percpu(int);
gcells->cells = alloc_percpu(struct gro_cell);
tbl->stats = alloc_percpu(struct neigh_statistics);
pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats);
net->core.prot_inuse = alloc_percpu(struct prot_inuse);
net->mib.tcp_statistics = alloc_percpu(struct tcp_mib);
net->mib.ip_statistics = alloc_percpu(struct ipstats_mib);
net->mib.net_statistics = alloc_percpu(struct linux_mib);
net->mib.udp_statistics = alloc_percpu(struct udp_mib);
net->mib.udplite_statistics = alloc_percpu(struct udp_mib);
net->mib.icmp_statistics = alloc_percpu(struct icmp_mib);
t->stats = alloc_percpu(struct trie_use_stats);
net->mib.udp_stats_in6 = alloc_percpu(struct udp_mib);
net->mib.udplite_stats_in6 = alloc_percpu(struct udp_mib);
net->mib.ipv6_statistics = alloc_percpu(struct ipstats_mib);
net->mib.icmpv6_statistics = alloc_percpu(struct icmpv6_mib);
mdev->stats = alloc_percpu(struct mpls_pcpu_stats);
struct mptcp_mib __percpu *mib = alloc_percpu(struct mptcp_mib);
s->cpustats = alloc_percpu(struct ip_vs_cpu_stats);
net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
net->ft.stat = alloc_percpu(struct nf_flow_table_stat);
snet->stats = alloc_percpu(struct synproxy_stats);
new->scratch = alloc_percpu(*new->scratch);
m->scratch = alloc_percpu(struct nft_pipapo_scratch *);
ovs_pcpu_storage = alloc_percpu(*ovs_pcpu_storage);
po->tx_ring.pending_refcnt = alloc_percpu(unsigned int);
p->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
fnew->pf = alloc_percpu(struct tc_basic_pcnt);
new->pf = alloc_percpu(struct tc_matchall_pcnt);
n->pcpu_success = alloc_percpu(u32);
sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
net->sctp.sctp_statistics = alloc_percpu(struct sctp_mib);
net->smc.smc_stats = alloc_percpu(struct smc_stats);
tmp->tfm_entry = alloc_percpu(struct tipc_tfm *);
net->mib.tls_statistics = alloc_percpu(struct linux_tls_mib);
net->mib.xfrm_statistics = alloc_percpu(struct linux_xfrm_mib);
net->xfrm.state_cache_input = alloc_percpu(struct hlist_head);