Symbol: per_cpu
arch/alpha/kernel/irq.c
82
seq_printf(p, "%10lu ", per_cpu(irq_pmi_count, j));
arch/alpha/kernel/time.c
119
struct clock_event_device *ce = &per_cpu(cpu_ce, cpu);
arch/alpha/kernel/time.c
176
struct clock_event_device *ce = &per_cpu(cpu_ce, cpu);
arch/alpha/kernel/time.c
186
struct clock_event_device *ce = &per_cpu(cpu_ce, cpu);
arch/alpha/kernel/time.c
94
struct clock_event_device *ce = &per_cpu(cpu_ce, cpu);
arch/arc/include/asm/mmu_context.h
53
#define asid_cpu(cpu) per_cpu(asid_cache, cpu)
arch/arc/kernel/setup.c
649
register_cpu(&per_cpu(cpu_topology, cpu), cpu);
arch/arm/include/asm/smp_plat.h
38
struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpu);
arch/arm/include/asm/system_misc.h
23
harden_branch_predictor_fn_t fn = per_cpu(harden_branch_predictor_fn,
arch/arm/kernel/irq.c
70
per_cpu(irq_stack_ptr, cpu) = &stack[THREAD_SIZE];
arch/arm/kernel/machine_kexec.c
115
csd = &per_cpu(cpu_stop_csd, cpu);
arch/arm/kernel/setup.c
1271
cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
arch/arm/kernel/setup.c
1277
per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
arch/arm/kernel/setup.c
1278
(per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
arch/arm/kernel/smp.c
391
struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
arch/arm/kernel/smp.c
487
bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
arch/arm/kernel/smp.c
622
per_cpu(cpu_completion, cpu) = completion;
arch/arm/kernel/smp.c
628
complete(per_cpu(cpu_completion, cpu));
arch/arm/kernel/smp.c
805
if (!per_cpu(l_p_j_ref, first)) {
arch/arm/kernel/smp.c
807
per_cpu(l_p_j_ref, cpu) =
arch/arm/kernel/smp.c
808
per_cpu(cpu_data, cpu).loops_per_jiffy;
arch/arm/kernel/smp.c
809
per_cpu(l_p_j_ref_freq, cpu) = freq->old;
arch/arm/kernel/smp.c
824
lpj = cpufreq_scale(per_cpu(l_p_j_ref, first),
arch/arm/kernel/smp.c
825
per_cpu(l_p_j_ref_freq, first), freq->new);
arch/arm/kernel/smp.c
827
per_cpu(cpu_data, cpu).loops_per_jiffy = lpj;
arch/arm/kernel/smp_twd.c
227
if (per_cpu(percpu_setup_called, cpu)) {
arch/arm/kernel/smp_twd.c
233
per_cpu(percpu_setup_called, cpu) = true;
arch/arm/kernel/traps.c
915
per_cpu(overflow_stack_ptr, cpu) = &stack[OVERFLOW_STACK_SIZE];
arch/arm/mach-alpine/alpine_cpu_pm.c
36
&al_cpu_resume_regs->per_cpu[phys_cpu].resume_addr);
arch/arm/mach-alpine/alpine_cpu_resume.h
22
struct al_cpu_resume_regs_per_cpu per_cpu[];
arch/arm/mach-bcm/platsmp-brcmstb.c
62
return per_cpu(per_cpu_sw_state, cpu);
arch/arm/mach-bcm/platsmp-brcmstb.c
68
per_cpu(per_cpu_sw_state, cpu) = val;
arch/arm/mach-omap2/omap-mpuss-lowpower.c
120
struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
arch/arm/mach-omap2/omap-mpuss-lowpower.c
131
struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
arch/arm/mach-omap2/omap-mpuss-lowpower.c
185
struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
arch/arm/mach-omap2/omap-mpuss-lowpower.c
230
struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu);
arch/arm/mach-omap2/omap-mpuss-lowpower.c
317
struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu);
arch/arm/mach-omap2/omap-mpuss-lowpower.c
375
pm_info = &per_cpu(omap4_pm_info, 0x0);
arch/arm/mach-omap2/omap-mpuss-lowpower.c
399
pm_info = &per_cpu(omap4_pm_info, 0x1);
arch/arm/mach-omap2/omap-wakeupgen.c
195
per_cpu(irqmasks, cpu)[i] = wakeupgen_readl(i, cpu);
arch/arm/mach-omap2/omap-wakeupgen.c
203
wakeupgen_writel(per_cpu(irqmasks, cpu)[i], i, cpu);
arch/arm/mach-qcom/platsmp.c
320
if (!per_cpu(cold_boot_done, cpu)) {
arch/arm/mach-qcom/platsmp.c
323
per_cpu(cold_boot_done, cpu) = true;
arch/arm/mm/context.c
144
asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
arch/arm/mm/context.c
153
asid = per_cpu(reserved_asids, i);
arch/arm/mm/context.c
155
per_cpu(reserved_asids, i) = asid;
arch/arm/mm/context.c
180
if (per_cpu(reserved_asids, cpu) == asid) {
arch/arm/mm/context.c
182
per_cpu(reserved_asids, cpu) = newasid;
arch/arm/mm/context.c
254
&& atomic64_xchg(&per_cpu(active_asids, cpu), asid))
arch/arm/mm/context.c
270
atomic64_set(&per_cpu(active_asids, cpu), asid);
arch/arm/mm/context.c
67
asid = per_cpu(active_asids, cpu).counter;
arch/arm/mm/context.c
69
asid = per_cpu(reserved_asids, cpu);
arch/arm/mm/proc-v7-bugs.c
71
if (per_cpu(harden_branch_predictor_fn, cpu))
arch/arm/mm/proc-v7-bugs.c
76
per_cpu(harden_branch_predictor_fn, cpu) =
arch/arm/mm/proc-v7-bugs.c
82
per_cpu(harden_branch_predictor_fn, cpu) =
arch/arm/mm/proc-v7-bugs.c
88
per_cpu(harden_branch_predictor_fn, cpu) =
arch/arm/mm/proc-v7-bugs.c
95
per_cpu(harden_branch_predictor_fn, cpu) =
arch/arm/xen/enlighten.c
153
if (per_cpu(xen_vcpu, cpu) != NULL)
arch/arm/xen/enlighten.c
165
per_cpu(xen_vcpu, cpu) = vcpup;
arch/arm/xen/enlighten.c
494
per_cpu(xen_vcpu_id, cpu) = cpu;
arch/arm64/kernel/cpufeature.c
4017
struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);
arch/arm64/kernel/cpuinfo.c
282
return *pos < nr_cpu_ids ? &per_cpu(cpu_data, *pos) : NULL;
arch/arm64/kernel/cpuinfo.c
363
struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);
arch/arm64/kernel/cpuinfo.c
385
struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);
arch/arm64/kernel/cpuinfo.c
403
struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);
arch/arm64/kernel/cpuinfo.c
531
struct cpuinfo_arm64 *info = &per_cpu(cpu_data, 0);
arch/arm64/kernel/fpsimd.c
2012
per_cpu(fpsimd_last_state.st, cpu) = NULL;
arch/arm64/kernel/irq.c
50
per_cpu(irq_shadow_call_stack_ptr, cpu) =
arch/arm64/kernel/irq.c
61
per_cpu(irq_stack_ptr, cpu) = p;
arch/arm64/kernel/mte.c
578
switch (per_cpu(mte_tcf_preferred, dev->id)) {
arch/arm64/kernel/mte.c
606
per_cpu(mte_tcf_preferred, dev->id) = tcf;
arch/arm64/kernel/mte.c
621
per_cpu(mte_tcf_preferred, cpu) = MTE_CTRL_TCF_ASYNC;
arch/arm64/kernel/sdei.c
108
s = per_cpu(*ptr, cpu);
arch/arm64/kernel/sdei.c
110
per_cpu(*ptr, cpu) = NULL;
arch/arm64/kernel/sdei.c
132
per_cpu(*ptr, cpu) = s;
arch/arm64/kernel/sdei.c
55
p = per_cpu(*ptr, cpu);
arch/arm64/kernel/sdei.c
57
per_cpu(*ptr, cpu) = NULL;
arch/arm64/kernel/sdei.c
79
per_cpu(*ptr, cpu) = p;
arch/arm64/kernel/smp.c
510
struct cpu *c = &per_cpu(cpu_devices, cpu);
arch/arm64/kernel/smp.c
537
struct cpu *c = &per_cpu(cpu_devices, cpu);
arch/arm64/kernel/topology.c
114
WRITE_ONCE(per_cpu(arch_max_freq_scale, cpu), (unsigned long)ratio);
arch/arm64/kernel/topology.c
178
time_is_before_jiffies(per_cpu(cpu_amu_samples.last_scale_update, cpu)))
arch/arm64/kvm/arm.c
2440
if (per_cpu(kvm_hyp_initialized, cpu))
arch/arm64/kvm/arm.c
2443
free_pages(per_cpu(kvm_arm_hyp_stack_base, cpu), NVHE_STACK_SHIFT - PAGE_SHIFT);
arch/arm64/kvm/arm.c
2656
per_cpu(kvm_arm_hyp_stack_base, cpu) = stack_base;
arch/arm64/kvm/arm.c
2732
char *stack_base = (char *)per_cpu(kvm_arm_hyp_stack_base, cpu);
arch/arm64/kvm/vmid.c
53
vmid = atomic64_xchg_relaxed(&per_cpu(active_vmids, cpu), 0);
arch/arm64/kvm/vmid.c
57
vmid = per_cpu(reserved_vmids, cpu);
arch/arm64/kvm/vmid.c
59
per_cpu(reserved_vmids, cpu) = vmid;
arch/arm64/kvm/vmid.c
83
if (per_cpu(reserved_vmids, cpu) == vmid) {
arch/arm64/kvm/vmid.c
85
per_cpu(reserved_vmids, cpu) = newvmid;
arch/arm64/mm/context.c
113
asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0);
arch/arm64/mm/context.c
122
asid = per_cpu(reserved_asids, i);
arch/arm64/mm/context.c
124
per_cpu(reserved_asids, i) = asid;
arch/arm64/mm/context.c
149
if (per_cpu(reserved_asids, cpu) == asid) {
arch/arm64/mm/context.c
151
per_cpu(reserved_asids, cpu) = newasid;
arch/hexagon/kernel/smp.c
102
struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
arch/hexagon/kernel/smp.c
85
struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
arch/hexagon/kernel/time.c
125
&per_cpu(clock_events, cpu);
arch/hexagon/kernel/time.c
140
struct clock_event_device *ce_dev = &per_cpu(clock_events, cpu);
arch/loongarch/include/asm/irq.h
32
unsigned long low = per_cpu(irq_stack, cpu);
arch/loongarch/kernel/irq.c
120
per_cpu(irq_stack, i) = (unsigned long)page_address(page);
arch/loongarch/kernel/irq.c
122
per_cpu(irq_stack, i), per_cpu(irq_stack, i) + IRQ_STACK_SIZE);
arch/loongarch/kernel/paravirt.c
220
st = &per_cpu(steal_time, cpu);
arch/loongarch/kernel/paravirt.c
271
src = &per_cpu(steal_time, cpu);
arch/loongarch/kernel/paravirt.c
32
src = &per_cpu(steal_time, cpu);
arch/loongarch/kernel/paravirt.c
51
irq_cpustat_t *info = &per_cpu(irq_stat, cpu);
arch/loongarch/kernel/paravirt.c
84
info = &per_cpu(irq_stat, i);
arch/loongarch/kernel/process.c
374
csd = &per_cpu(backtrace_csd, cpu);
arch/loongarch/kernel/smp.c
275
per_cpu(irq_stat, cpu).ipi_irqs[IPI_RESCHEDULE]++;
arch/loongarch/kernel/smp.c
280
per_cpu(irq_stat, cpu).ipi_irqs[IPI_CALL_FUNCTION]++;
arch/loongarch/kernel/smp.c
285
per_cpu(irq_stat, cpu).ipi_irqs[IPI_IRQ_WORK]++;
arch/loongarch/kernel/smp.c
290
per_cpu(irq_stat, cpu).ipi_irqs[IPI_CLEAR_VECTOR]++;
arch/loongarch/kernel/smp.c
389
per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
arch/loongarch/kernel/smp.c
427
per_cpu(cpu_state, cpu) = CPU_ONLINE;
arch/loongarch/kernel/smp.c
470
while (per_cpu(cpu_state, cpu) != CPU_DEAD)
arch/loongarch/kernel/smp.c
93
seq_put_decimal_ull_width(p, " ", per_cpu(irq_stat, cpu).ipi_irqs[i], 10);
arch/loongarch/kernel/time.c
162
cd = &per_cpu(constant_clockevent_device, cpu);
arch/loongarch/kernel/time.c
42
cd = &per_cpu(constant_clockevent_device, cpu);
arch/loongarch/kernel/traps.c
1110
stack = per_cpu(irq_stack, cpu) + IRQ_STACK_START;
arch/microblaze/kernel/setup.c
172
per_cpu(KM, 0) = 0x1; /* We start in kernel mode */
arch/microblaze/kernel/setup.c
173
per_cpu(CURRENT_SAVE, 0) = (unsigned long)current;
arch/mips/cavium-octeon/octeon-irq.c
1082
raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
arch/mips/cavium-octeon/octeon-irq.c
1085
pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
arch/mips/cavium-octeon/octeon-irq.c
1105
set_bit(coreid, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
arch/mips/cavium-octeon/octeon-irq.c
2425
isc_ctl.s.idt = per_cpu(octeon_irq_ciu3_idt_ip2, cpu);
arch/mips/cavium-octeon/octeon-irq.c
2542
isc_ctl.s.idt = per_cpu(octeon_irq_ciu3_idt_ip2, cpu);
arch/mips/cavium-octeon/octeon-irq.c
2706
ciu3_info = per_cpu(octeon_ciu3_info, cpu);
arch/mips/cavium-octeon/octeon-irq.c
2725
ciu3_info = per_cpu(octeon_ciu3_info, cpu);
arch/mips/cavium-octeon/octeon-irq.c
2735
unsigned int idt = per_cpu(octeon_irq_ciu3_idt_ip3, cpu);
arch/mips/cavium-octeon/octeon-irq.c
310
raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
arch/mips/cavium-octeon/octeon-irq.c
316
pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
arch/mips/cavium-octeon/octeon-irq.c
325
pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
arch/mips/cavium-octeon/octeon-irq.c
413
lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
arch/mips/cavium-octeon/octeon-irq.c
415
pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
arch/mips/cavium-octeon/octeon-irq.c
417
pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
arch/mips/cavium-octeon/octeon-irq.c
446
lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
arch/mips/cavium-octeon/octeon-irq.c
448
pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
arch/mips/cavium-octeon/octeon-irq.c
450
pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
arch/mips/cavium-octeon/octeon-irq.c
486
set_bit(cd->bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
arch/mips/cavium-octeon/octeon-irq.c
490
set_bit(cd->bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
arch/mips/cavium-octeon/octeon-irq.c
634
&per_cpu(octeon_irq_ciu0_en_mirror, cpu));
arch/mips/cavium-octeon/octeon-irq.c
641
&per_cpu(octeon_irq_ciu1_en_mirror, cpu));
arch/mips/cavium-octeon/octeon-irq.c
664
&per_cpu(octeon_irq_ciu0_en_mirror, cpu));
arch/mips/cavium-octeon/octeon-irq.c
671
&per_cpu(octeon_irq_ciu1_en_mirror, cpu));
arch/mips/cavium-octeon/octeon-irq.c
818
lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
arch/mips/cavium-octeon/octeon-irq.c
822
pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
arch/mips/cavium-octeon/octeon-irq.c
824
pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
arch/mips/cavium-octeon/octeon-irq.c
869
unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
arch/mips/cavium-octeon/octeon-irq.c
882
unsigned long *pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
arch/mips/cavium-octeon/smp.c
311
while (per_cpu(cpu_state, cpu) != CPU_DEAD)
arch/mips/cavium-octeon/smp.c
350
per_cpu(cpu_state, cpu) = CPU_DEAD;
arch/mips/kernel/cevt-bcm1480.c
100
struct clock_event_device *cd = &per_cpu(sibyte_hpt_clockevent, cpu);
arch/mips/kernel/cevt-bcm1480.c
101
unsigned char *name = per_cpu(sibyte_hpt_name, cpu);
arch/mips/kernel/cevt-r4k.c
153
cd = &per_cpu(mips_clockevent_device, cpu);
arch/mips/kernel/cevt-r4k.c
259
cd = &per_cpu(mips_clockevent_device, cpu);
arch/mips/kernel/cevt-r4k.c
295
cd = &per_cpu(mips_clockevent_device, cpu);
arch/mips/kernel/cevt-sb1250.c
100
unsigned char *name = per_cpu(sibyte_hpt_name, cpu);
arch/mips/kernel/cevt-sb1250.c
99
struct clock_event_device *cd = &per_cpu(sibyte_hpt_clockevent, cpu);
arch/mips/kernel/mips-cm.c
312
spin_lock_init(&per_cpu(cm_core_lock, cpu));
arch/mips/kernel/mips-cm.c
362
spin_lock_irqsave(&per_cpu(cm_core_lock, curr_core),
arch/mips/kernel/mips-cm.c
363
per_cpu(cm_core_lock_flags, curr_core));
arch/mips/kernel/mips-cm.c
383
spin_unlock_irqrestore(&per_cpu(cm_core_lock, curr_core),
arch/mips/kernel/mips-cm.c
384
per_cpu(cm_core_lock_flags, curr_core));
arch/mips/kernel/mips-cpc.c
100
spin_lock_irqsave(&per_cpu(cpc_core_lock, curr_core),
arch/mips/kernel/mips-cpc.c
101
per_cpu(cpc_core_lock_flags, curr_core));
arch/mips/kernel/mips-cpc.c
120
spin_unlock_irqrestore(&per_cpu(cpc_core_lock, curr_core),
arch/mips/kernel/mips-cpc.c
121
per_cpu(cpc_core_lock_flags, curr_core));
arch/mips/kernel/mips-cpc.c
77
spin_lock_init(&per_cpu(cpc_core_lock, cpu));
arch/mips/kernel/pm-cps.c
117
entry = per_cpu(nc_asm_enter, cpu)[state];
arch/mips/kernel/pm-cps.c
154
core_ready_count = per_cpu(ready_count, cpu);
arch/mips/kernel/pm-cps.c
162
barrier = &per_cpu(pm_barrier, cpumask_first(&cpu_sibling_map[cpu]));
arch/mips/kernel/pm-cps.c
641
if (per_cpu(nc_asm_enter, cpu)[state])
arch/mips/kernel/pm-cps.c
654
per_cpu(nc_asm_enter, sibling)[state] = entry_fn;
arch/mips/kernel/pm-cps.c
657
if (!per_cpu(ready_count, cpu)) {
arch/mips/kernel/pm-cps.c
665
per_cpu(ready_count, sibling) = core_rc;
arch/mips/kernel/process.c
751
csd = &per_cpu(backtrace_csd, cpu);
arch/mips/kernel/smp-bmips.c
336
per_cpu(ipi_action_mask, cpu) |= action;
arch/mips/kernel/smp-bmips.c
348
per_cpu(ipi_action_mask, cpu) = 0;
arch/mips/kernel/smp.c
754
csd = &per_cpu(tick_broadcast_csd, cpu);
arch/mips/kernel/time.c
57
per_cpu(pcp_lpj_ref, cpu) =
arch/mips/kernel/time.c
59
per_cpu(pcp_lpj_ref_freq, cpu) = freq->old;
arch/mips/kernel/time.c
74
lpj = cpufreq_scale(per_cpu(pcp_lpj_ref, cpu),
arch/mips/kernel/time.c
75
per_cpu(pcp_lpj_ref_freq, cpu),
arch/mips/kernel/topology.c
16
struct cpu *c = &per_cpu(cpu_devices, i);
arch/mips/loongson64/hpet.c
183
cd = &per_cpu(hpet_clockevent_device, cpu);
arch/mips/loongson64/hpet.c
227
cd = &per_cpu(hpet_clockevent_device, cpu);
arch/mips/loongson64/smp.c
416
per_cpu(cpu_state, cpu) = CPU_ONLINE;
arch/mips/loongson64/smp.c
497
per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
arch/mips/loongson64/smp.c
532
while (per_cpu(cpu_state, cpu) != CPU_DEAD)
arch/mips/loongson64/smp.c
792
state_addr = &per_cpu(cpu_state, cpu);
arch/mips/math-emu/me-debugfs.c
23
ps = &per_cpu(fpuemustats, cpu);
arch/mips/mm/context.c
115
if (per_cpu(reserved_mmids, cpu) == mmid) {
arch/mips/mm/context.c
117
per_cpu(reserved_mmids, cpu) = newmmid;
arch/mips/mm/context.c
86
mmid = per_cpu(reserved_mmids, cpu);
arch/mips/mm/context.c
89
per_cpu(reserved_mmids, cpu) = mmid;
arch/mips/sgi-ip27/ip27-irq.c
192
unsigned long *mask = per_cpu(irq_enable_mask, cpu);
arch/mips/sgi-ip27/ip27-irq.c
232
unsigned long *mask = per_cpu(irq_enable_mask, cpu);
arch/mips/sgi-ip27/ip27-irq.c
255
unsigned long *mask = per_cpu(irq_enable_mask, cpu);
arch/mips/sgi-ip27/ip27-irq.c
55
unsigned long *mask = per_cpu(irq_enable_mask, hd->cpu);
arch/mips/sgi-ip27/ip27-irq.c
65
unsigned long *mask = per_cpu(irq_enable_mask, hd->cpu);
arch/mips/sgi-ip27/ip27-timer.c
49
struct clock_event_device *cd = &per_cpu(hub_rt_clockevent, cpu);
arch/mips/sgi-ip27/ip27-timer.c
75
struct clock_event_device *cd = &per_cpu(hub_rt_clockevent, cpu);
arch/mips/sgi-ip27/ip27-timer.c
76
unsigned char *name = per_cpu(hub_rt_name, cpu);
arch/mips/sgi-ip30/ip30-irq.c
147
unsigned long *mask = &per_cpu(irq_enable_mask, hd->cpu);
arch/mips/sgi-ip30/ip30-irq.c
156
unsigned long *mask = &per_cpu(irq_enable_mask, hd->cpu);
arch/mips/sgi-ip30/ip30-irq.c
166
unsigned long *mask = &per_cpu(irq_enable_mask, hd->cpu);
arch/mips/sgi-ip30/ip30-irq.c
250
unsigned long *mask = &per_cpu(irq_enable_mask, cpu);
arch/mips/sgi-ip30/ip30-irq.c
281
mask = &per_cpu(irq_enable_mask, 0);
arch/mips/sgi-ip30/ip30-irq.c
284
mask = &per_cpu(irq_enable_mask, 1);
arch/openrisc/kernel/time.c
118
&per_cpu(clockevent_openrisc_timer, cpu);
arch/openrisc/kernel/time.c
73
&per_cpu(clockevent_openrisc_timer, cpu);
arch/parisc/kernel/irq.c
126
#define irq_stats(x) (&per_cpu(irq_stat, x))
arch/parisc/kernel/irq.c
318
return per_cpu(cpu_data, cpu).txn_addr;
arch/parisc/kernel/irq.c
330
(!per_cpu(cpu_data, next_cpu).txn_addr ||
arch/parisc/kernel/irq.c
403
stack_start = (unsigned long) &per_cpu(irq_stack_union, cpu).stack;
arch/parisc/kernel/irq.c
406
last_usage = &per_cpu(irq_stat.irq_stack_usage, cpu);
arch/parisc/kernel/irq.c
422
last_usage = &per_cpu(irq_stat.kernel_stack_usage, cpu);
arch/parisc/kernel/irq.c
454
union_ptr = &per_cpu(irq_stack_union, smp_processor_id());
arch/parisc/kernel/irq.c
503
eirr_val = mfctl(23) & cpu_eiem & per_cpu(local_ack_eiem, cpu);
arch/parisc/kernel/irq.c
523
per_cpu(cpu_data, cpu).hpa);
arch/parisc/kernel/irq.c
541
set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
arch/parisc/kernel/irq.c
75
per_cpu(local_ack_eiem, cpu) &= ~mask;
arch/parisc/kernel/irq.c
78
set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
arch/parisc/kernel/irq.c
90
per_cpu(local_ack_eiem, cpu) |= mask;
arch/parisc/kernel/irq.c
93
set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
arch/parisc/kernel/perf.c
523
cpu_device = per_cpu(cpu_data, 0).dev;
arch/parisc/kernel/perf.c
525
per_cpu(cpu_data, 0).dev->name);
arch/parisc/kernel/processor.c
161
p = &per_cpu(cpu_data, cpuid);
arch/parisc/kernel/processor.c
341
per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision;
arch/parisc/kernel/processor.c
342
per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model;
arch/parisc/kernel/processor.c
385
strscpy(cpu_name, per_cpu(cpu_data, 0).dev->name, sizeof(cpu_name));
arch/parisc/kernel/processor.c
392
const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
arch/parisc/kernel/setup.c
299
per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision;
arch/parisc/kernel/setup.c
300
per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model;
arch/parisc/kernel/smp.c
123
struct cpuinfo_parisc *p = &per_cpu(cpu_data, this_cpu);
arch/parisc/kernel/smp.c
128
spinlock_t *lock = &per_cpu(ipi_lock, this_cpu);
arch/parisc/kernel/smp.c
199
struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpu);
arch/parisc/kernel/smp.c
200
spinlock_t *lock = &per_cpu(ipi_lock, cpu);
arch/parisc/kernel/smp.c
335
const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid);
arch/parisc/kernel/smp.c
342
memset(&per_cpu(irq_stat, cpuid), 0, sizeof(irq_cpustat_t));
arch/parisc/kernel/smp.c
421
spin_lock_init(&per_cpu(ipi_lock, cpu));
arch/parisc/kernel/time.c
47
cd = &per_cpu(parisc_clockevent_device, cpu);
arch/parisc/kernel/time.c
84
cd = &per_cpu(parisc_clockevent_device, cpu);
arch/parisc/kernel/toc.c
70
BUG_ON(regs != (struct pt_regs *)&per_cpu(toc_stack, raw_smp_processor_id()));
arch/parisc/kernel/topology.c
40
per_cpu(cpu_devices, cpuid).hotpluggable = 1;
arch/parisc/kernel/topology.c
42
if (register_cpu(&per_cpu(cpu_devices, cpuid), cpuid))
arch/parisc/kernel/topology.c
49
p = &per_cpu(cpu_data, cpuid);
arch/parisc/kernel/topology.c
51
const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
arch/powerpc/include/asm/smp.h
116
return per_cpu(cpu_sibling_map, cpu);
arch/powerpc/include/asm/smp.h
121
return per_cpu(cpu_core_map, cpu);
arch/powerpc/include/asm/smp.h
126
return per_cpu(cpu_l2_cache_map, cpu);
arch/powerpc/include/asm/smp.h
131
return per_cpu(cpu_smallcore_map, cpu);
arch/powerpc/include/asm/smp.h
145
return per_cpu(cpu_smallcore_map, cpu);
arch/powerpc/include/asm/smp.h
147
return per_cpu(cpu_sibling_map, cpu);
arch/powerpc/include/asm/topology.h
142
#define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
arch/powerpc/include/asm/topology.h
143
#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
arch/powerpc/kernel/cacheinfo.c
466
return cpumask_first(per_cpu(thread_group_l1_cache_map,
arch/powerpc/kernel/cacheinfo.c
469
return cpumask_first(per_cpu(thread_group_l2_cache_map,
arch/powerpc/kernel/cacheinfo.c
472
return cpumask_first(per_cpu(thread_group_l3_cache_map,
arch/powerpc/kernel/cacheinfo.c
549
WARN_ON_ONCE(per_cpu(cache_dir_pcpu, cpu_id) != NULL);
arch/powerpc/kernel/cacheinfo.c
551
per_cpu(cache_dir_pcpu, cpu_id) = cache_dir;
arch/powerpc/kernel/cacheinfo.c
918
cache_dir = per_cpu(cache_dir_pcpu, cpu_id);
arch/powerpc/kernel/cacheinfo.c
924
per_cpu(cache_dir_pcpu, cpu_id) = NULL;
arch/powerpc/kernel/iommu.c
111
per_cpu(iommu_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS);
arch/powerpc/kernel/irq.c
101
seq_put_decimal_ull_width(p, " ", per_cpu(irq_stat, j).timer_irqs_event, 10);
arch/powerpc/kernel/irq.c
106
seq_put_decimal_ull_width(p, " ", per_cpu(irq_stat, j).broadcast_irqs_event, 10);
arch/powerpc/kernel/irq.c
111
seq_put_decimal_ull_width(p, " ", per_cpu(irq_stat, j).timer_irqs_others, 10);
arch/powerpc/kernel/irq.c
116
seq_put_decimal_ull_width(p, " ", per_cpu(irq_stat, j).spurious_irqs, 10);
arch/powerpc/kernel/irq.c
121
seq_put_decimal_ull_width(p, " ", per_cpu(irq_stat, j).pmu_irqs, 10);
arch/powerpc/kernel/irq.c
126
seq_put_decimal_ull_width(p, " ", per_cpu(irq_stat, j).mce_exceptions, 10);
arch/powerpc/kernel/irq.c
140
seq_put_decimal_ull_width(p, " ", per_cpu(irq_stat, j).sreset_irqs, 10);
arch/powerpc/kernel/irq.c
146
seq_put_decimal_ull_width(p, " ", per_cpu(irq_stat, j).soft_nmi_irqs, 10);
arch/powerpc/kernel/irq.c
154
seq_put_decimal_ull_width(p, " ", per_cpu(irq_stat, j).doorbell_irqs, 10);
arch/powerpc/kernel/irq.c
167
u64 sum = per_cpu(irq_stat, cpu).timer_irqs_event;
arch/powerpc/kernel/irq.c
169
sum += per_cpu(irq_stat, cpu).broadcast_irqs_event;
arch/powerpc/kernel/irq.c
170
sum += per_cpu(irq_stat, cpu).pmu_irqs;
arch/powerpc/kernel/irq.c
171
sum += per_cpu(irq_stat, cpu).mce_exceptions;
arch/powerpc/kernel/irq.c
172
sum += per_cpu(irq_stat, cpu).spurious_irqs;
arch/powerpc/kernel/irq.c
173
sum += per_cpu(irq_stat, cpu).timer_irqs_others;
arch/powerpc/kernel/irq.c
177
sum += per_cpu(irq_stat, cpu).sreset_irqs;
arch/powerpc/kernel/irq.c
179
sum += per_cpu(irq_stat, cpu).soft_nmi_irqs;
arch/powerpc/kernel/irq.c
182
sum += per_cpu(irq_stat, cpu).doorbell_irqs;
arch/powerpc/kernel/setup-common.c
230
pvr = per_cpu(cpu_pvr, cpu_id);
arch/powerpc/kernel/smp.c
1035
return per_cpu(cpu_l2_cache_map, cpu);
arch/powerpc/kernel/smp.c
1047
return per_cpu(cpu_coregroup_map, cpu);
arch/powerpc/kernel/smp.c
1069
zalloc_cpumask_var_node(&per_cpu(cpu_smallcore_map, cpu),
arch/powerpc/kernel/smp.c
1098
return per_cpu(cpu_coregroup_map, cpu);
arch/powerpc/kernel/smp.c
1130
zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
arch/powerpc/kernel/smp.c
1132
zalloc_cpumask_var_node(&per_cpu(cpu_l2_cache_map, cpu),
arch/powerpc/kernel/smp.c
1134
zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
arch/powerpc/kernel/smp.c
1137
zalloc_cpumask_var_node(&per_cpu(cpu_coregroup_map, cpu),
arch/powerpc/kernel/smp.c
1246
per_cpu(cpu_state, cpu) = CPU_DEAD;
arch/powerpc/kernel/smp.c
1256
per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
arch/powerpc/kernel/smp.c
1261
return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
arch/powerpc/kernel/smp.c
1266
return per_cpu(cpu_state, cpu) == CPU_DEAD;
arch/powerpc/kernel/smp.c
1447
for_each_cpu(i, per_cpu(thread_group_l2_cache_map, cpu)) {
arch/powerpc/kernel/smp.c
1538
for_each_cpu(i, per_cpu(thread_group_l1_cache_map, cpu)) {
arch/powerpc/kernel/smp.c
280
struct cpu_messages *info = &per_cpu(ipi_message, cpu);
arch/powerpc/kernel/smp.c
705
per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
arch/powerpc/kernel/smp.c
707
per_cpu(next_tlbcam_idx, id)
arch/powerpc/kernel/smp.c
970
mask = &per_cpu(thread_group_l1_cache_map, cpu);
arch/powerpc/kernel/smp.c
974
mask = &per_cpu(thread_group_l2_cache_map, cpu);
arch/powerpc/kernel/smp.c
976
mask = &per_cpu(thread_group_l3_cache_map, cpu);
arch/powerpc/kernel/sysfs.c
1155
struct cpu *c = &per_cpu(cpu_devices, cpu);
arch/powerpc/kernel/sysfs.c
838
struct cpu *c = &per_cpu(cpu_devices, cpu);
arch/powerpc/kernel/sysfs.c
937
struct cpu *c = &per_cpu(cpu_devices, cpu);
arch/powerpc/kernel/time.c
832
struct clock_event_device *dec = &per_cpu(decrementers, cpu);
arch/powerpc/kernel/watchdog.c
154
cpu, tb, per_cpu(wd_timer_tb, cpu),
arch/powerpc/kernel/watchdog.c
155
tb_to_ns(tb - per_cpu(wd_timer_tb, cpu)) / 1000000);
arch/powerpc/kernel/watchdog.c
353
per_cpu(wd_timer_tb, cpu) = tb;
arch/powerpc/kernel/watchdog.c
387
if (tb - per_cpu(wd_timer_tb, cpu) >= wd_panic_timeout_tb) {
arch/powerpc/kernel/watchdog.c
413
cpu, tb, per_cpu(wd_timer_tb, cpu),
arch/powerpc/kernel/watchdog.c
414
tb_to_ns(tb - per_cpu(wd_timer_tb, cpu)) / 1000000);
arch/powerpc/kernel/watchdog.c
469
if (tb - per_cpu(wd_timer_tb, cpu) >= ticks) {
arch/powerpc/kernel/watchdog.c
470
per_cpu(wd_timer_tb, cpu) = tb;
arch/powerpc/kvm/book3s_hv.c
3438
struct preempted_vcore_list *lp = &per_cpu(preempted_vcores, cpu);
arch/powerpc/kvm/book3s_hv.c
3470
lp = &per_cpu(preempted_vcores, vc->pcpu);
arch/powerpc/mm/mem.c
310
per_cpu(next_tlbcam_idx, smp_processor_id()) =
arch/powerpc/perf/core-book3s.c
2512
struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
arch/powerpc/perf/core-fsl-emb.c
675
struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
arch/powerpc/perf/imc-pmu.c
1039
addr = (u64)per_cpu(thread_imc_mem, smp_processor_id());
arch/powerpc/perf/imc-pmu.c
1127
u64 ldbar_value, *local_mem = per_cpu(thread_imc_mem, smp_processor_id());
arch/powerpc/perf/imc-pmu.c
1207
u64 *local_mem = per_cpu(trace_imc_mem, cpu_id);
arch/powerpc/perf/imc-pmu.c
1220
per_cpu(trace_imc_mem, cpu_id) = local_mem;
arch/powerpc/perf/imc-pmu.c
1270
return (u64)per_cpu(trace_imc_mem, smp_processor_id());
arch/powerpc/perf/imc-pmu.c
1560
per_cpu(local_nest_imc_refc, cpu) = &nest_imc_refc[i];
arch/powerpc/perf/imc-pmu.c
1603
if (per_cpu(thread_imc_mem, i))
arch/powerpc/perf/imc-pmu.c
1604
free_pages((u64)per_cpu(thread_imc_mem, i), order);
arch/powerpc/perf/imc-pmu.c
1614
if (per_cpu(trace_imc_mem, i))
arch/powerpc/perf/imc-pmu.c
1615
free_pages((u64)per_cpu(trace_imc_mem, i), order);
arch/powerpc/perf/imc-pmu.c
341
return per_cpu(local_nest_imc_refc, cpu);
arch/powerpc/perf/imc-pmu.c
922
u64 *local_mem = per_cpu(thread_imc_mem, cpu_id);
arch/powerpc/perf/imc-pmu.c
938
per_cpu(thread_imc_mem, cpu_id) = local_mem;
arch/powerpc/perf/vpa-dtl.c
203
struct vpa_dtl *dtl = &per_cpu(vpa_dtl_cpu, event->cpu);
arch/powerpc/perf/vpa-dtl.c
334
struct vpa_dtl *dtl = &per_cpu(vpa_dtl_cpu, cpu);
arch/powerpc/perf/vpa-dtl.c
436
struct vpa_dtl *dtl = &per_cpu(vpa_dtl_cpu, event->cpu);
arch/powerpc/perf/vpa-dtl.c
477
struct vpa_dtl *dtl = &per_cpu(vpa_dtl_cpu, event->cpu);
arch/powerpc/platforms/powernv/rng.c
110
if (per_cpu(pnv_rng, cpu) == NULL ||
arch/powerpc/platforms/powernv/rng.c
112
per_cpu(pnv_rng, cpu) = rng;
arch/powerpc/platforms/powernv/subcore.c
155
while(per_cpu(split_state, i).step < step)
arch/powerpc/platforms/powernv/subcore.c
196
per_cpu(split_state, cpu).step = SYNC_STEP_UNSPLIT;
arch/powerpc/platforms/powernv/subcore.c
230
split_core_secondary_loop(&per_cpu(split_state, cpu).step);
arch/powerpc/platforms/powernv/subcore.c
262
per_cpu(split_state, smp_processor_id()).step = SYNC_STEP_FINISHED;
arch/powerpc/platforms/powernv/subcore.c
320
while(per_cpu(split_state, cpu).step < SYNC_STEP_FINISHED)
arch/powerpc/platforms/powernv/subcore.c
355
state = &per_cpu(split_state, cpu);
arch/powerpc/platforms/powernv/vas.c
139
per_cpu(cpu_vas_id, cpu) = vasid;
arch/powerpc/platforms/powernv/vas.c
187
vasid = per_cpu(cpu_vas_id, smp_processor_id());
arch/powerpc/platforms/powernv/vas.c
208
return per_cpu(cpu_vas_id, cpu);
arch/powerpc/platforms/ps3/interrupt.c
180
pd = &per_cpu(ps3_private, cpu);
arch/powerpc/platforms/ps3/interrupt.c
684
struct ps3_private *pd = &per_cpu(ps3_private, cpu);
arch/powerpc/platforms/ps3/interrupt.c
694
struct ps3_private *pd = &per_cpu(ps3_private, cpu);
arch/powerpc/platforms/ps3/interrupt.c
719
dump_bmp(&per_cpu(ps3_private, 0));
arch/powerpc/platforms/ps3/interrupt.c
720
dump_bmp(&per_cpu(ps3_private, 1));
arch/powerpc/platforms/ps3/interrupt.c
726
dump_bmp(&per_cpu(ps3_private, 0));
arch/powerpc/platforms/ps3/interrupt.c
727
dump_bmp(&per_cpu(ps3_private, 1));
arch/powerpc/platforms/ps3/interrupt.c
750
struct ps3_private *pd = &per_cpu(ps3_private, cpu);
arch/powerpc/platforms/ps3/smp.c
40
virq = per_cpu(ps3_ipi_virqs, cpu)[msg];
arch/powerpc/platforms/ps3/smp.c
54
unsigned int *virqs = per_cpu(ps3_ipi_virqs, cpu);
arch/powerpc/platforms/ps3/smp.c
95
unsigned int *virqs = per_cpu(ps3_ipi_virqs, cpu);
arch/powerpc/platforms/pseries/dtl.c
113
struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu);
arch/powerpc/platforms/pseries/dtl.c
129
return per_cpu(dtl_rings, dtl->cpu).write_index;
arch/powerpc/platforms/pseries/dtl.c
357
struct dtl *dtl = &per_cpu(cpu_dtl, i);
arch/powerpc/platforms/pseries/dtl.c
93
struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu);
arch/powerpc/platforms/pseries/hvCall_inst.c
134
per_cpu(hcall_stats, cpu),
arch/powerpc/platforms/pseries/lpar.c
411
struct dtl_worker *d = &per_cpu(dtl_workers, cpu);
arch/powerpc/platforms/pseries/lpar.c
418
per_cpu(dtl_entry_ridx, cpu) = 0;
arch/powerpc/platforms/pseries/lpar.c
421
per_cpu(dtl_entry_ridx, cpu) = be64_to_cpu(lppaca_of(cpu).dtl_idx);
arch/powerpc/platforms/pseries/lpar.c
430
struct dtl_worker *d = &per_cpu(dtl_workers, cpu);
arch/powerpc/sysdev/xive/common.c
1059
xc = per_cpu(xive_cpu, cpu);
arch/powerpc/sysdev/xive/common.c
1205
xc = per_cpu(xive_cpu, cpu);
arch/powerpc/sysdev/xive/common.c
1494
xc = per_cpu(xive_cpu, cpu);
arch/powerpc/sysdev/xive/common.c
1505
per_cpu(xive_cpu, cpu) = xc;
arch/powerpc/sysdev/xive/common.c
1731
struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
arch/powerpc/sysdev/xive/common.c
1819
struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
arch/powerpc/sysdev/xive/common.c
272
struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
arch/powerpc/sysdev/xive/common.c
505
struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
arch/powerpc/sysdev/xive/common.c
529
struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
arch/powerpc/sysdev/xive/common.c
610
struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
arch/riscv/kernel/irq.c
100
per_cpu(irq_stack_ptr, cpu) = p;
arch/riscv/kernel/irq.c
112
per_cpu(irq_stack_ptr, cpu) = per_cpu(irq_stack, cpu);
arch/riscv/kernel/irq.c
86
per_cpu(irq_shadow_call_stack_ptr, cpu) =
arch/riscv/kernel/sys_hwprobe.c
229
int this_perf = per_cpu(misaligned_access_speed, cpu);
arch/riscv/kernel/sys_hwprobe.c
266
int this_perf = per_cpu(vector_misaligned_access, cpu);
arch/riscv/kernel/traps_misaligned.c
493
if (per_cpu(vector_misaligned_access, cpu)
arch/riscv/kernel/traps_misaligned.c
511
if (per_cpu(misaligned_access_speed, cpu) !=
arch/riscv/kernel/unaligned_access_speed.c
114
per_cpu(misaligned_access_speed, cpu) = speed;
arch/riscv/kernel/unaligned_access_speed.c
245
if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN) {
arch/riscv/kernel/unaligned_access_speed.c
248
per_cpu(misaligned_access_speed, cpu) = unaligned_scalar_speed_param;
arch/riscv/kernel/unaligned_access_speed.c
294
if (per_cpu(vector_misaligned_access, cpu) != RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN)
arch/riscv/kernel/unaligned_access_speed.c
372
per_cpu(vector_misaligned_access, cpu) = speed;
arch/riscv/kernel/unaligned_access_speed.c
396
per_cpu(vector_misaligned_access, cpu) = unaligned_vector_speed_param;
arch/riscv/kernel/unaligned_access_speed.c
401
if (per_cpu(vector_misaligned_access, cpu) != RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN)
arch/riscv/kernel/unaligned_access_speed.c
453
per_cpu(misaligned_access_speed, cpu) = unaligned_scalar_speed_param;
arch/riscv/kernel/unaligned_access_speed.c
46
if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN)
arch/riscv/kernel/unaligned_access_speed.c
474
per_cpu(vector_misaligned_access, cpu) = unaligned_vector_speed_param;
arch/riscv/mm/context.c
168
old_active_cntx = atomic_long_read(&per_cpu(active_context, cpu));
arch/riscv/mm/context.c
171
atomic_long_cmpxchg_relaxed(&per_cpu(active_context, cpu),
arch/riscv/mm/context.c
187
atomic_long_set(&per_cpu(active_context, cpu), cntx);
arch/riscv/mm/context.c
51
if (per_cpu(reserved_context, cpu) == cntx) {
arch/riscv/mm/context.c
53
per_cpu(reserved_context, cpu) = newcntx;
arch/riscv/mm/context.c
73
cntx = atomic_long_xchg_relaxed(&per_cpu(active_context, i), 0);
arch/riscv/mm/context.c
81
cntx = per_cpu(reserved_context, i);
arch/riscv/mm/context.c
84
per_cpu(reserved_context, i) = cntx;
arch/s390/include/asm/processor.h
96
return test_bit(flag, &per_cpu(pcpu_devices, cpu).flags);
arch/s390/kernel/diag/diag.c
97
stat = &per_cpu(diag_stat, cpu);
arch/s390/kernel/idle.c
58
struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
arch/s390/kernel/idle.c
67
struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
arch/s390/kernel/irq.c
277
per_cpu(irq_stat, cpu).irqs[irq]);
arch/s390/kernel/perf_cpum_sf.c
1709
struct cpu_hw_sf *cpuhw = &per_cpu(cpu_hw_sf, event->cpu);
arch/s390/kernel/perf_cpum_sf.c
766
cpuhw = &per_cpu(cpu_hw_sf, event->cpu);
arch/s390/kernel/perf_cpum_sf.c
820
cpuhw = &per_cpu(cpu_hw_sf, cpu);
arch/s390/kernel/processor.c
171
struct cpuid *id = &per_cpu(cpu_info.cpu_id, cpu);
arch/s390/kernel/processor.c
321
struct cpuid *id = &per_cpu(cpu_info.cpu_id, n);
arch/s390/kernel/smp.c
1002
count = sysfs_emit(buf, "%d\n", per_cpu(pcpu_devices, dev->id).state);
arch/s390/kernel/smp.c
1040
per_cpu(pcpu_devices, cpu + i).state = CPU_STATE_STANDBY;
arch/s390/kernel/smp.c
1055
per_cpu(pcpu_devices, cpu + i).state = CPU_STATE_CONFIGURED;
arch/s390/kernel/smp.c
1074
return sysfs_emit(buf, "%d\n", per_cpu(pcpu_devices, dev->id).address);
arch/s390/kernel/smp.c
170
if (per_cpu(pcpu_devices, cpu).address == address)
arch/s390/kernel/smp.c
171
return &per_cpu(pcpu_devices, cpu);
arch/s390/kernel/smp.c
367
per_cpu(pcpu_devices, 0).address = stap();
arch/s390/kernel/smp.c
390
if (per_cpu(pcpu_devices, cpu).address == address)
arch/s390/kernel/smp.c
416
: : "d" (per_cpu(pcpu_devices, cpu).address));
arch/s390/kernel/smp.c
655
per_cpu(pcpu_devices, cpu).polarization = val;
arch/s390/kernel/smp.c
660
return per_cpu(pcpu_devices, cpu).polarization;
arch/s390/kernel/smp.c
665
per_cpu(pcpu_devices, cpu).capacity = val;
arch/s390/kernel/smp.c
670
return per_cpu(pcpu_devices, cpu).capacity;
arch/s390/kernel/smp.c
684
return per_cpu(pcpu_devices, cpu).address;
arch/s390/kernel/smp.c
760
core_id = per_cpu(pcpu_devices, 0).address >> smp_cpu_mt_shift;
arch/s390/kernel/smp.c
980
per_cpu(pcpu_devices, 0).address = stap();
arch/s390/kernel/time.c
152
cd = &per_cpu(comparators, cpu);
arch/s390/pci/pci_irq.c
233
cpu_data = &per_cpu(irq_data, cpu);
arch/sh/kernel/cpu/sh2/smp-j2.c
116
pmsg = &per_cpu(j2_ipi_messages, cpu);
arch/sh/kernel/cpu/sh2/smp-j2.c
24
volatile unsigned *pmsg = &per_cpu(j2_ipi_messages, cpu);
arch/sh/kernel/hw_breakpoint.c
311
bp = per_cpu(bp_per_reg[i], cpu);
arch/sh/kernel/irq.c
48
seq_put_decimal_ull_width(p, " ", per_cpu(irq_stat.__nmi_count, j), 10);
arch/sh/kernel/perf_event.c
336
struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
arch/sh/kernel/smp.c
200
per_cpu(cpu_state, cpu) = CPU_ONLINE;
arch/sh/kernel/smp.c
218
per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
arch/sh/kernel/smp.c
81
per_cpu(cpu_state, cpu) = CPU_ONLINE;
arch/sh/kernel/smp.c
91
if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
arch/sh/kernel/topology.c
50
struct cpu *c = &per_cpu(cpu_devices, i);
arch/sparc/include/asm/cpudata_32.h
29
#define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu))
arch/sparc/include/asm/cpudata_64.h
35
#define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu))
arch/sparc/include/asm/mmu_context_64.h
85
per_cpu(per_cpu_secondary_mm, cpu) = mm;
arch/sparc/include/asm/topology_64.h
50
#define topology_sibling_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
arch/sparc/kernel/iommu-common.c
43
per_cpu(iommu_hash_common, i) = hash_32(i, IOMMU_POOL_HASHBITS);
arch/sparc/kernel/leon_kernel.c
290
ce = &per_cpu(sparc32_clockevent, cpu);
arch/sparc/kernel/leon_smp.c
298
work = &per_cpu(leon_ipi_work, cpu);
arch/sparc/kernel/leon_smp.c
312
struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu);
arch/sparc/kernel/leon_smp.c
323
struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu);
arch/sparc/kernel/leon_smp.c
334
struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu);
arch/sparc/kernel/nmi.c
154
per_cpu(wd_enabled, cpu) = 0;
arch/sparc/kernel/nmi.c
192
if (!per_cpu(wd_enabled, cpu))
arch/sparc/kernel/nmi.c
61
if (per_cpu(nmi_touch, cpu) != 1)
arch/sparc/kernel/nmi.c
62
per_cpu(nmi_touch, cpu) = 1;
arch/sparc/kernel/pci_sun4v.c
1292
per_cpu(iommu_batch, i).pglist = (u64 *) page;
arch/sparc/kernel/smp_64.c
1252
cpumask_clear(&per_cpu(cpu_sibling_map, i));
arch/sparc/kernel/smp_64.c
1254
cpumask_set_cpu(i, &per_cpu(cpu_sibling_map, i));
arch/sparc/kernel/smp_64.c
1261
cpumask_set_cpu(j, &per_cpu(cpu_sibling_map, i));
arch/sparc/kernel/smp_64.c
1333
for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu))
arch/sparc/kernel/smp_64.c
1334
cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i));
arch/sparc/kernel/smp_64.c
1335
cpumask_clear(&per_cpu(cpu_sibling_map, cpu));
arch/sparc/kernel/smp_64.c
1417
per_cpu(poke, cpu) = true;
arch/sparc/kernel/smp_64.c
1420
per_cpu(poke, cpu) = false;
arch/sparc/kernel/sun4d_smp.c
201
work = &per_cpu(sun4d_ipi_work, cpu);
arch/sparc/kernel/sun4d_smp.c
239
struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu);
arch/sparc/kernel/sun4d_smp.c
250
struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu);
arch/sparc/kernel/sun4d_smp.c
261
struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu);
arch/sparc/kernel/sun4d_smp.c
386
ce = &per_cpu(sparc32_clockevent, cpu);
arch/sparc/kernel/sun4m_smp.c
251
ce = &per_cpu(sparc32_clockevent, cpu);
arch/sparc/kernel/sysfs.c
116
ra = __pa(&per_cpu(mmu_stats, smp_processor_id()));
arch/sparc/kernel/sysfs.c
210
struct cpu *c = &per_cpu(cpu_devices, cpu);
arch/sparc/kernel/sysfs.c
224
struct cpu *c = &per_cpu(cpu_devices, cpu);
arch/sparc/kernel/sysfs.c
23
struct hv_mmu_statistics *p = &per_cpu(mmu_stats, dev->id); \
arch/sparc/kernel/sysfs.c
254
struct cpu *c = &per_cpu(cpu_devices, cpu);
arch/sparc/kernel/time_32.c
213
struct clock_event_device *ce = &per_cpu(sparc32_clockevent, cpu);
arch/sparc/kernel/time_64.c
644
struct freq_table *ft = &per_cpu(sparc64_freq_table, cpu);
arch/sparc/kernel/time_64.c
662
ft = &per_cpu(sparc64_freq_table, cpu);
arch/sparc/kernel/time_64.c
723
struct clock_event_device *evt = &per_cpu(sparc64_events, cpu);
arch/sparc/mm/init_64.c
793
mm = per_cpu(per_cpu_secondary_mm, cpu);
arch/um/kernel/irq.c
27
#define irq_stats(x) (&per_cpu(irq_stat, x))
arch/x86/coco/sev/core.c
1198
per_cpu(runtime_data, cpu) = data;
arch/x86/coco/sev/core.c
1207
per_cpu(svsm_caa, cpu) = caa;
arch/x86/coco/sev/core.c
1208
per_cpu(svsm_caa_pa, cpu) = __pa(caa);
arch/x86/coco/sev/core.c
1217
data = per_cpu(runtime_data, cpu);
arch/x86/coco/sev/core.c
578
data = per_cpu(runtime_data, cpu);
arch/x86/coco/sev/core.c
649
vmsa = per_cpu(sev_vmsa, cpu);
arch/x86/coco/sev/core.c
719
data = per_cpu(runtime_data, cpu);
arch/x86/coco/sev/core.c
787
cur_vmsa = per_cpu(sev_vmsa, cpu);
arch/x86/coco/sev/core.c
801
caa = per_cpu(svsm_caa, cpu);
arch/x86/coco/sev/core.c
883
per_cpu(sev_vmsa, cpu) = vmsa;
arch/x86/coco/sev/core.c
957
data = per_cpu(runtime_data, cpu);
arch/x86/coco/sev/core.c
966
address = per_cpu(svsm_caa_pa, cpu);
arch/x86/events/amd/core.c
580
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
arch/x86/events/amd/core.c
604
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
arch/x86/events/amd/core.c
619
nb = per_cpu(cpu_hw_events, i).amd_nb;
arch/x86/events/amd/core.c
636
struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
arch/x86/events/core.c
1569
cpuc = &per_cpu(cpu_hw_events, cpu);
arch/x86/events/core.c
1603
prev_left = per_cpu(pmc_prev_left[idx], cpu);
arch/x86/events/core.c
1828
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
arch/x86/events/core.c
1847
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
arch/x86/events/core.c
795
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
arch/x86/events/intel/bts.c
148
struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
arch/x86/events/intel/bts.c
187
struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
arch/x86/events/intel/core.c
5649
ret = intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu);
arch/x86/events/intel/core.c
6029
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
arch/x86/events/intel/core.c
6064
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
arch/x86/events/intel/core.c
6120
pc = per_cpu(cpu_hw_events, i).shared_regs;
arch/x86/events/intel/core.c
6139
sibling = &per_cpu(cpu_hw_events, i);
arch/x86/events/intel/core.c
6191
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
arch/x86/events/intel/core.c
8566
free_excl_cntrs(&per_cpu(cpu_hw_events, c));
arch/x86/events/intel/ds.c
778
struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
arch/x86/events/intel/ds.c
790
if (!per_cpu(cpu_hw_events, cpu).ds)
arch/x86/events/intel/ds.c
877
per_cpu(insn_buffer, cpu) = insn_buff;
arch/x86/events/intel/ds.c
899
kfree(per_cpu(insn_buffer, cpu));
arch/x86/events/intel/ds.c
900
per_cpu(insn_buffer, cpu) = NULL;
arch/x86/events/intel/ds.c
960
per_cpu(cpu_hw_events, cpu).ds = ds;
arch/x86/events/intel/ds.c
966
per_cpu(cpu_hw_events, cpu).ds = NULL;
arch/x86/events/intel/p4.c
1050
if (__test_and_clear_bit(idx, per_cpu(p4_running, smp_processor_id())))
arch/x86/events/intel/p4.c
994
__set_bit(idx, per_cpu(p4_running, smp_processor_id()));
arch/x86/hyperv/ivm.c
378
cur_vmsa = per_cpu(hv_sev_vmsa, cpu);
arch/x86/hyperv/ivm.c
384
per_cpu(hv_sev_vmsa, cpu) = vmsa;
arch/x86/hyperv/mmu.c
56
return per_cpu(cpu_tlbstate_shared.is_lazy, cpu);
arch/x86/include/asm/desc.h
53
return per_cpu(gdt_page, cpu).gdt;
arch/x86/include/asm/kmsan.h
46
return &per_cpu(metadata_array[off], cpu);
arch/x86/include/asm/percpu.h
644
&per_cpu(_name, _cpu))
arch/x86/include/asm/percpu.h
662
#define early_per_cpu(_name, _cpu) per_cpu(_name, _cpu)
arch/x86/include/asm/preempt.h
47
per_cpu(__preempt_count, (cpu)) = PREEMPT_DISABLED; \
arch/x86/include/asm/processor.h
216
#define cpu_data(cpu) per_cpu(cpu_info, cpu)
arch/x86/include/asm/processor.h
694
return per_cpu(cpu_info.topo.llc_id, cpu);
arch/x86/include/asm/processor.h
699
return per_cpu(cpu_info.topo.l2c_id, cpu);
arch/x86/include/asm/resctrl.h
142
WRITE_ONCE(per_cpu(pqr_state.default_closid, cpu), closid);
arch/x86/include/asm/resctrl.h
143
WRITE_ONCE(per_cpu(pqr_state.default_rmid, cpu), rmid);
arch/x86/include/asm/smp.h
132
#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
arch/x86/include/asm/smp.h
133
#define cpu_acpi_id(cpu) per_cpu(x86_cpu_to_acpiid, cpu)
arch/x86/include/asm/smp.h
144
return per_cpu(cpu_llc_shared_map, cpu);
arch/x86/include/asm/smp.h
149
return per_cpu(cpu_l2c_shared_map, cpu);
arch/x86/include/asm/stackprotector.h
46
per_cpu(__stack_chk_guard, cpu) = idle->stack_canary;
arch/x86/include/asm/topology.h
199
#define topology_die_cpumask(cpu) (per_cpu(cpu_die_map, cpu))
arch/x86/include/asm/topology.h
201
#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
arch/x86/include/asm/topology.h
202
#define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
arch/x86/include/asm/topology.h
307
return per_cpu(arch_freq_scale, cpu);
arch/x86/include/asm/uv/uv_hub.h
192
#define uv_cpu_info_per(cpu) (&per_cpu(__uv_cpu_info, cpu))
arch/x86/include/asm/uv/uv_hub.h
770
#define uv_cpu_nmi_per(cpu) (per_cpu(uv_cpu_nmi, cpu))
arch/x86/kernel/acpi/boot.c
805
set_apicid_to_node(per_cpu(x86_cpu_to_apicid, cpu), NUMA_NO_NODE);
arch/x86/kernel/acpi/madt_wakeup.c
40
u32 apicid = per_cpu(x86_cpu_to_apicid, cpu);
arch/x86/kernel/apic/apic_common.c
14
return per_cpu(x86_cpu_to_apicid, cpu);
arch/x86/kernel/apic/apic_common.c
25
return (int)per_cpu(x86_cpu_to_apicid, mps_cpu);
arch/x86/kernel/apic/apic_numachip.c
71
int local_apicid, apicid = per_cpu(x86_cpu_to_apicid, cpu);
arch/x86/kernel/apic/ipi.c
194
__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, cpu),
arch/x86/kernel/apic/ipi.c
206
__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
arch/x86/kernel/apic/ipi.c
222
__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
arch/x86/kernel/apic/vector.c
190
BUG_ON(!IS_ERR_OR_NULL(per_cpu(vector_irq, newcpu)[newvec]));
arch/x86/kernel/apic/vector.c
191
per_cpu(vector_irq, newcpu)[newvec] = desc;
arch/x86/kernel/apic/vector.c
365
per_cpu(vector_irq, apicd->cpu)[vector] = VECTOR_SHUTDOWN;
arch/x86/kernel/apic/vector.c
374
per_cpu(vector_irq, apicd->prev_cpu)[vector] = VECTOR_SHUTDOWN;
arch/x86/kernel/apic/vector.c
915
per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
arch/x86/kernel/apic/x2apic_cluster.c
110
struct cpumask **cpu_cmsk = &per_cpu(cluster_masks, cpu_i);
arch/x86/kernel/apic/x2apic_cluster.c
135
if (per_cpu(cluster_masks, cpu))
arch/x86/kernel/apic/x2apic_cluster.c
150
cmsk = per_cpu(cluster_masks, cpu_i);
arch/x86/kernel/apic/x2apic_cluster.c
156
per_cpu(cluster_masks, cpu) = cmsk;
arch/x86/kernel/apic/x2apic_cluster.c
170
per_cpu(cluster_masks, cpu) = cmsk;
arch/x86/kernel/apic/x2apic_cluster.c
188
if (!zalloc_cpumask_var_node(&per_cpu(ipi_mask, cpu), GFP_KERNEL, node))
arch/x86/kernel/apic/x2apic_cluster.c
196
struct cpumask *cmsk = per_cpu(cluster_masks, dead_cpu);
arch/x86/kernel/apic/x2apic_cluster.c
200
free_cpumask_var(per_cpu(ipi_mask, dead_cpu));
arch/x86/kernel/apic/x2apic_cluster.c
58
struct cpumask *cmsk = per_cpu(cluster_masks, cpu);
arch/x86/kernel/apic/x2apic_phys.c
46
u32 dest = per_cpu(x86_cpu_to_apicid, cpu);
arch/x86/kernel/apic/x2apic_phys.c
69
__x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu),
arch/x86/kernel/apic/x2apic_savic.c
254
u32 dest = per_cpu(x86_cpu_to_apicid, cpu);
arch/x86/kernel/apic/x2apic_savic.c
270
send_ipi(per_cpu(x86_cpu_to_apicid, cpu), vector, 0);
arch/x86/kernel/apic/x2apic_uv_x.c
1771
int apicid = per_cpu(x86_cpu_to_apicid, cpu);
arch/x86/kernel/apic/x2apic_uv_x.c
696
unsigned long apicid = per_cpu(x86_cpu_to_apicid, cpu);
arch/x86/kernel/callthunks.c
347
per_cpu(__x86_call_count, cpu),
arch/x86/kernel/callthunks.c
348
per_cpu(__x86_ret_count, cpu),
arch/x86/kernel/callthunks.c
349
per_cpu(__x86_stuffs_count, cpu),
arch/x86/kernel/callthunks.c
350
per_cpu(__x86_ctxsw_count, cpu));
arch/x86/kernel/cpu/amd.c
1280
if (per_cpu(amd_dr_addr_mask, cpu)[dr] == mask)
arch/x86/kernel/cpu/amd.c
1284
per_cpu(amd_dr_addr_mask, cpu)[dr] = mask;
arch/x86/kernel/cpu/amd.c
1295
return per_cpu(amd_dr_addr_mask[dr], smp_processor_id());
arch/x86/kernel/cpu/aperfmperf.c
348
per_cpu(arch_freq_scale, cpu) = SCHED_CAPACITY_SCALE;
arch/x86/kernel/cpu/common.c
2137
tss = &per_cpu(cpu_tss_rw, cpu);
arch/x86/kernel/cpu/mce/amd.c
1062
if (per_cpu(smca_bank_counts, cpu)[bank_type] == 1)
arch/x86/kernel/cpu/mce/amd.c
1067
per_cpu(smca_banks, cpu)[bank].sysfs_id);
arch/x86/kernel/cpu/mce/amd.c
148
b = &per_cpu(smca_banks, cpu)[bank];
arch/x86/kernel/cpu/mce/amd.c
536
if ((bank >= per_cpu(mce_num_banks, cpu)) || (block >= NR_BLOCKS))
arch/x86/kernel/cpu/mce/amd.c
573
per_cpu(bank_map, cpu) |= BIT_ULL(bank);
arch/x86/kernel/cpu/mce/core.c
1114
struct mce_hw_err *etmp = &per_cpu(hw_errs_seen, cpu);
arch/x86/kernel/cpu/mce/core.c
1119
err = &per_cpu(hw_errs_seen, cpu);
arch/x86/kernel/cpu/mce/core.c
1153
memset(&per_cpu(hw_errs_seen, cpu), 0, sizeof(struct mce_hw_err));
arch/x86/kernel/cpu/mce/core.c
1843
timer_delete_sync(&per_cpu(mce_timer, cpu));
arch/x86/kernel/cpu/mce/core.c
2538
if (bank >= per_cpu(mce_num_banks, s->id))
arch/x86/kernel/cpu/mce/core.c
2541
b = &per_cpu(mce_banks_array, s->id)[bank];
arch/x86/kernel/cpu/mce/core.c
2559
if (bank >= per_cpu(mce_num_banks, s->id))
arch/x86/kernel/cpu/mce/core.c
2562
b = &per_cpu(mce_banks_array, s->id)[bank];
arch/x86/kernel/cpu/mce/core.c
2691
dev = per_cpu(mce_device, cpu);
arch/x86/kernel/cpu/mce/core.c
2713
for (j = 0; j < per_cpu(mce_num_banks, cpu); j++) {
arch/x86/kernel/cpu/mce/core.c
2719
per_cpu(mce_device, cpu) = dev;
arch/x86/kernel/cpu/mce/core.c
2736
struct device *dev = per_cpu(mce_device, cpu);
arch/x86/kernel/cpu/mce/core.c
2745
for (i = 0; i < per_cpu(mce_num_banks, cpu); i++)
arch/x86/kernel/cpu/mce/core.c
2750
per_cpu(mce_device, cpu) = NULL;
arch/x86/kernel/cpu/mce/inject.c
129
struct mce *i = &per_cpu(injectm, m->extcpu);
arch/x86/kernel/cpu/mce/inject.c
257
struct mce *mcpu = &per_cpu(injectm, cpu);
arch/x86/kernel/cpu/microcode/core.c
432
ret = per_cpu(ucode_ctrl.result, ctrl_cpu);
arch/x86/kernel/cpu/microcode/core.c
470
per_cpu(ucode_ctrl.ctrl, sibling) = ctrl;
arch/x86/kernel/cpu/microcode/core.c
480
per_cpu(ucode_ctrl.nmi_enabled, cpu) = true;
arch/x86/kernel/cpu/microcode/core.c
499
per_cpu(ucode_ctrl.ctrl, cpu) = SCTRL_DONE;
arch/x86/kernel/cpu/microcode/core.c
631
switch (per_cpu(ucode_ctrl.result, cpu)) {
arch/x86/kernel/cpu/microcode/core.c
730
per_cpu(ucode_ctrl, cpu) = ctrl;
arch/x86/kernel/cpu/microcode/core.c
739
per_cpu(ucode_ctrl, cpu) = ctrl;
arch/x86/kernel/cpu/topology.c
408
per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID;
arch/x86/kernel/cpu/vmware.c
230
struct vmware_steal_time *steal = &per_cpu(vmw_steal_time, cpu);
arch/x86/kernel/cpu/vmware.c
258
struct vmware_steal_time *st = &per_cpu(vmw_steal_time, cpu);
arch/x86/kernel/espfix_64.c
141
if (likely(per_cpu(espfix_stack, cpu)))
arch/x86/kernel/espfix_64.c
202
per_cpu(espfix_stack, cpu) = addr;
arch/x86/kernel/espfix_64.c
203
per_cpu(espfix_waddr, cpu) = (unsigned long)stack_page
arch/x86/kernel/hpet.c
652
per_cpu(cpu_hpet_channel, cpu) = hc;
arch/x86/kernel/hpet.c
688
struct hpet_channel *hc = per_cpu(cpu_hpet_channel, cpu);
arch/x86/kernel/hpet.c
694
per_cpu(cpu_hpet_channel, cpu) = NULL;
arch/x86/kernel/hw_breakpoint.c
298
(unsigned long)&per_cpu(cpu_tss_rw, cpu),
arch/x86/kernel/hw_breakpoint.c
308
(unsigned long)&per_cpu(cpu_tlbstate, cpu),
arch/x86/kernel/hw_breakpoint.c
316
if (within_area(addr, end, (unsigned long)&per_cpu(cpu_dr7, cpu),
arch/x86/kernel/irq.c
141
seq_printf(p, "%10u ", per_cpu(mce_exception_count, j));
arch/x86/kernel/irq.c
145
seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
arch/x86/kernel/irq.c
246
sum += per_cpu(mce_exception_count, cpu);
arch/x86/kernel/irq.c
247
sum += per_cpu(mce_poll_count, cpu);
arch/x86/kernel/irq.c
65
#define irq_stats(x) (&per_cpu(irq_stat, x))
arch/x86/kernel/irq_32.c
112
if (per_cpu(hardirq_stack_ptr, cpu))
arch/x86/kernel/irq_32.c
124
per_cpu(hardirq_stack_ptr, cpu) = page_address(ph);
arch/x86/kernel/irq_32.c
125
per_cpu(softirq_stack_ptr, cpu) = page_address(ps);
arch/x86/kernel/irq_64.c
54
per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE - 8;
arch/x86/kernel/irq_64.c
67
per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE - 8;
arch/x86/kernel/irq_64.c
74
if (per_cpu(hardirq_stack_ptr, cpu))
arch/x86/kernel/irqinit.c
88
per_cpu(vector_irq, 0)[ISA_IRQ_VECTOR(i)] = irq_to_desc(i);
arch/x86/kernel/itmt.c
170
return per_cpu(sched_core_priority, cpu);
arch/x86/kernel/itmt.c
189
per_cpu(sched_core_priority, cpu) = prio;
arch/x86/kernel/kvm.c
1077
apicid = per_cpu(x86_cpu_to_apicid, cpu);
arch/x86/kernel/kvm.c
340
struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
arch/x86/kernel/kvm.c
426
src = &per_cpu(steal_time, cpu);
arch/x86/kernel/kvm.c
459
__set_percpu_decrypted(&per_cpu(apf_reason, cpu), sizeof(apf_reason));
arch/x86/kernel/kvm.c
460
__set_percpu_decrypted(&per_cpu(steal_time, cpu), sizeof(steal_time));
arch/x86/kernel/kvm.c
461
__set_percpu_decrypted(&per_cpu(kvm_apic_eoi, cpu), sizeof(kvm_apic_eoi));
arch/x86/kernel/kvm.c
545
apic_id = per_cpu(x86_cpu_to_apicid, cpu);
arch/x86/kernel/kvm.c
659
kvm_hypercall1(KVM_HC_SCHED_YIELD, per_cpu(x86_cpu_to_apicid, cpu));
arch/x86/kernel/kvm.c
684
src = &per_cpu(steal_time, cpu);
arch/x86/kernel/kvm.c
807
struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
arch/x86/kernel/kvmclock.c
250
if (per_cpu(hv_clock_per_cpu, 0) && kvmclock_vsyscall) {
arch/x86/kernel/kvmclock.c
267
struct pvclock_vsyscall_time_info *p = per_cpu(hv_clock_per_cpu, cpu);
arch/x86/kernel/kvmclock.c
274
if (!cpu || (p && p != per_cpu(hv_clock_per_cpu, 0)))
arch/x86/kernel/kvmclock.c
285
per_cpu(hv_clock_per_cpu, cpu) = p;
arch/x86/kernel/process.c
552
if (!per_cpu(ssb_state, cpu).shared_state)
arch/x86/kernel/process.c
556
st->shared_state = per_cpu(ssb_state, cpu).shared_state;
arch/x86/kernel/setup_percpu.c
166
per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
arch/x86/kernel/setup_percpu.c
167
per_cpu(cpu_number, cpu) = cpu;
arch/x86/kernel/setup_percpu.c
177
per_cpu(x86_cpu_to_apicid, cpu) =
arch/x86/kernel/setup_percpu.c
179
per_cpu(x86_cpu_to_acpiid, cpu) =
arch/x86/kernel/setup_percpu.c
183
per_cpu(x86_cpu_to_node_map, cpu) =
arch/x86/kernel/smpboot.c
1090
per_cpu(fpu_fpregs_owner_ctx, cpu) = NULL;
arch/x86/kernel/smpboot.c
1162
per_cpu(cpu_info.cpu_index, cpu) = nr_cpu_ids;
arch/x86/kernel/smpboot.c
1168
zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu), GFP_KERNEL, node);
arch/x86/kernel/smpboot.c
1169
zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu), GFP_KERNEL, node);
arch/x86/kernel/smpboot.c
1170
zalloc_cpumask_var_node(&per_cpu(cpu_die_map, cpu), GFP_KERNEL, node);
arch/x86/kernel/smpboot.c
1171
zalloc_cpumask_var_node(&per_cpu(cpu_llc_shared_map, cpu), GFP_KERNEL, node);
arch/x86/kernel/smpboot.c
1172
zalloc_cpumask_var_node(&per_cpu(cpu_l2c_shared_map, cpu), GFP_KERNEL, node);
arch/x86/kernel/smpboot.c
977
per_cpu(current_task, cpu) = idle;
arch/x86/kernel/smpboot.c
987
per_cpu(cpu_current_top_of_stack, cpu) = task_top_of_stack(idle);
arch/x86/kernel/tsc.c
1009
per_cpu(cyc2ns.data[0].cyc2ns_offset, cpu) = offset;
arch/x86/kernel/tsc.c
1010
per_cpu(cyc2ns.data[1].cyc2ns_offset, cpu) = offset;
arch/x86/kvm/vmx/posted_intr.c
182
raw_spin_lock_nested(&per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu),
arch/x86/kvm/vmx/posted_intr.c
185
&per_cpu(wakeup_vcpus_on_cpu, vcpu->cpu));
arch/x86/kvm/vmx/posted_intr.c
186
raw_spin_unlock(&per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu));
arch/x86/kvm/vmx/posted_intr.c
256
struct list_head *wakeup_list = &per_cpu(wakeup_vcpus_on_cpu, cpu);
arch/x86/kvm/vmx/posted_intr.c
257
raw_spinlock_t *spinlock = &per_cpu(wakeup_vcpus_on_cpu_lock, cpu);
arch/x86/kvm/vmx/posted_intr.c
271
INIT_LIST_HEAD(&per_cpu(wakeup_vcpus_on_cpu, cpu));
arch/x86/kvm/vmx/posted_intr.c
272
raw_spin_lock_init(&per_cpu(wakeup_vcpus_on_cpu_lock, cpu));
arch/x86/kvm/vmx/posted_intr.c
93
raw_spinlock_t *spinlock = &per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu);
arch/x86/kvm/vmx/tdx.c
3504
INIT_LIST_HEAD(&per_cpu(associated_tdvcpus, i));
arch/x86/kvm/vmx/tdx.c
435
struct list_head *tdvcpus = &per_cpu(associated_tdvcpus, cpu);
arch/x86/kvm/vmx/tdx.c
741
list_add(&tdx->cpu_list, &per_cpu(associated_tdvcpus, cpu));
arch/x86/kvm/vmx/vmx.c
1545
&per_cpu(loaded_vmcss_on_cpu, cpu));
arch/x86/kvm/vmx/vmx.c
1549
prev = per_cpu(current_vmcs, cpu);
arch/x86/kvm/vmx/vmx.c
1551
per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
arch/x86/kvm/vmx/vmx.c
3010
u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
arch/x86/kvm/vmx/vmx.c
3039
list_for_each_entry_safe(v, n, &per_cpu(loaded_vmcss_on_cpu, cpu),
arch/x86/kvm/vmx/vmx.c
3136
free_vmcs(per_cpu(vmxarea, cpu));
arch/x86/kvm/vmx/vmx.c
3137
per_cpu(vmxarea, cpu) = NULL;
arch/x86/kvm/vmx/vmx.c
3167
per_cpu(vmxarea, cpu) = vmcs;
arch/x86/kvm/vmx/vmx.c
828
list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu),
arch/x86/kvm/vmx/vmx.c
845
if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs)
arch/x86/kvm/vmx/vmx.c
846
per_cpu(current_vmcs, cpu) = NULL;
arch/x86/kvm/vmx/vmx.c
8954
INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
arch/x86/kvm/x86.c
5184
if (vcpu != per_cpu(last_vcpu, cpu)) {
arch/x86/kvm/x86.c
5194
per_cpu(last_vcpu, cpu) = vcpu;
arch/x86/kvm/x86.c
586
WARN_ON_ONCE(per_cpu(user_return_msrs, cpu).registered);
arch/x86/kvm/x86.c
9784
per_cpu(cpu_tsc_khz, cpu) = tsc_khz;
arch/x86/mm/cpu_entry_area.c
117
cea_map_percpu_pages(cea, &per_cpu(cpu_debug_store, cpu), npages,
arch/x86/mm/cpu_entry_area.c
147
per_cpu(cea_exception_stacks, cpu) = &cea->estacks;
arch/x86/mm/cpu_entry_area.c
172
&per_cpu(doublefault_stack, cpu), 1, PAGE_KERNEL);
arch/x86/mm/cpu_entry_area.c
233
cea_map_percpu_pages(&cea->tss, &per_cpu(cpu_tss_rw, cpu),
arch/x86/mm/cpu_entry_area.c
237
per_cpu(cpu_entry_area, cpu) = cea;
arch/x86/mm/cpu_entry_area.c
25
return per_cpu(_cea_offset, cpu);
arch/x86/mm/cpu_entry_area.c
35
per_cpu(_cea_offset, i) = i;
arch/x86/mm/cpu_entry_area.c
56
per_cpu(_cea_offset, i) = cea;
arch/x86/mm/numa.c
349
return per_cpu(x86_cpu_to_node_map, cpu);
arch/x86/mm/numa.c
368
return per_cpu(x86_cpu_to_node_map, cpu);
arch/x86/mm/numa.c
93
per_cpu(x86_cpu_to_node_map, cpu) = node;
arch/x86/mm/pti.c
463
unsigned long va = (unsigned long)&per_cpu(cpu_tss_rw, cpu);
arch/x86/mm/tlb.c
1299
struct mm_struct *loaded_mm = per_cpu(cpu_tlbstate.loaded_mm, cpu);
arch/x86/mm/tlb.c
1310
if (per_cpu(cpu_tlbstate_shared.is_lazy, cpu))
arch/x86/mm/tlb.c
360
if (per_cpu(cpu_tlbstate.loaded_mm, cpu) != mm)
arch/x86/mm/tlb.c
363
if (per_cpu(cpu_tlbstate_shared.is_lazy, cpu))
arch/x86/mm/tlb.c
478
while (READ_ONCE(per_cpu(cpu_tlbstate.loaded_mm, cpu)) == LOADED_MM_SWITCHING)
arch/x86/mm/tlb.c
481
if (READ_ONCE(per_cpu(cpu_tlbstate.loaded_mm, cpu)) != mm)
arch/x86/mm/tlb.c
492
if (READ_ONCE(per_cpu(cpu_tlbstate.loaded_mm_asid, cpu)) != bc_asid) {
arch/x86/platform/uv/uv_time.c
313
struct clock_event_device *ced = &per_cpu(cpu_ced, cpu);
arch/x86/xen/enlighten.c
209
per_cpu(xen_vcpu, cpu) =
arch/x86/xen/enlighten.c
213
per_cpu(xen_vcpu, cpu) = NULL;
arch/x86/xen/enlighten.c
238
if (per_cpu(xen_vcpu, cpu) == &per_cpu(xen_vcpu_info, cpu))
arch/x86/xen/enlighten.c
242
vcpup = &per_cpu(xen_vcpu_info, cpu);
arch/x86/xen/enlighten.c
258
per_cpu(xen_vcpu, cpu) = vcpup;
arch/x86/xen/enlighten_hvm.c
165
per_cpu(xen_vcpu_id, cpu) = cpu_acpi_id(cpu);
arch/x86/xen/enlighten_hvm.c
167
per_cpu(xen_vcpu_id, cpu) = cpu;
arch/x86/xen/enlighten_pv.c
1199
per_cpu(xen_vcpu_id, cpu) = cpu;
arch/x86/xen/enlighten_pv.c
1449
per_cpu(xen_vcpu_id, 0) = 0;
arch/x86/xen/enlighten_pv.c
1467
xen_initial_gdt = &per_cpu(gdt_page, 0);
arch/x86/xen/enlighten_pv.c
1577
if (per_cpu(xen_vcpu, cpu) == NULL)
arch/x86/xen/enlighten_pv.c
619
struct desc_struct *shadow = &per_cpu(shadow_tls_desc, cpu).desc[i];
arch/x86/xen/mmu_pv.c
1012
if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
arch/x86/xen/mmu_pv.c
996
if (per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
arch/x86/xen/pmu.c
517
per_cpu(xenpmu_shared, cpu).xenpmu_data = xenpmu_data;
arch/x86/xen/pmu.c
518
per_cpu(xenpmu_shared, cpu).flags = 0;
arch/x86/xen/pmu.c
550
free_pages((unsigned long)per_cpu(xenpmu_shared, cpu).xenpmu_data, 0);
arch/x86/xen/pmu.c
551
per_cpu(xenpmu_shared, cpu).xenpmu_data = NULL;
arch/x86/xen/smp.c
106
per_cpu(xen_debug_irq, cpu).irq = rc;
arch/x86/xen/smp.c
113
per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
arch/x86/xen/smp.c
122
per_cpu(xen_callfuncsingle_irq, cpu).irq = rc;
arch/x86/xen/smp.c
34
kfree(per_cpu(xen_resched_irq, cpu).name);
arch/x86/xen/smp.c
35
per_cpu(xen_resched_irq, cpu).name = NULL;
arch/x86/xen/smp.c
36
if (per_cpu(xen_resched_irq, cpu).irq >= 0) {
arch/x86/xen/smp.c
37
unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL);
arch/x86/xen/smp.c
38
per_cpu(xen_resched_irq, cpu).irq = -1;
arch/x86/xen/smp.c
40
kfree(per_cpu(xen_callfunc_irq, cpu).name);
arch/x86/xen/smp.c
41
per_cpu(xen_callfunc_irq, cpu).name = NULL;
arch/x86/xen/smp.c
42
if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) {
arch/x86/xen/smp.c
43
unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL);
arch/x86/xen/smp.c
44
per_cpu(xen_callfunc_irq, cpu).irq = -1;
arch/x86/xen/smp.c
46
kfree(per_cpu(xen_debug_irq, cpu).name);
arch/x86/xen/smp.c
47
per_cpu(xen_debug_irq, cpu).name = NULL;
arch/x86/xen/smp.c
48
if (per_cpu(xen_debug_irq, cpu).irq >= 0) {
arch/x86/xen/smp.c
49
unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL);
arch/x86/xen/smp.c
50
per_cpu(xen_debug_irq, cpu).irq = -1;
arch/x86/xen/smp.c
52
kfree(per_cpu(xen_callfuncsingle_irq, cpu).name);
arch/x86/xen/smp.c
53
per_cpu(xen_callfuncsingle_irq, cpu).name = NULL;
arch/x86/xen/smp.c
54
if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) {
arch/x86/xen/smp.c
55
unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq,
arch/x86/xen/smp.c
57
per_cpu(xen_callfuncsingle_irq, cpu).irq = -1;
arch/x86/xen/smp.c
69
per_cpu(xen_resched_irq, cpu).name = resched_name;
arch/x86/xen/smp.c
78
per_cpu(xen_resched_irq, cpu).irq = rc;
arch/x86/xen/smp.c
83
per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
arch/x86/xen/smp.c
92
per_cpu(xen_callfunc_irq, cpu).irq = rc;
arch/x86/xen/smp.c
99
per_cpu(xen_debug_irq, cpu).name = debug_name;
arch/x86/xen/smp_hvm.c
51
per_cpu(xen_vcpu_id, cpu) = XEN_VCPU_ID_INVALID;
arch/x86/xen/smp_pv.c
100
unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL);
arch/x86/xen/smp_pv.c
101
per_cpu(xen_irq_work, cpu).irq = -1;
arch/x86/xen/smp_pv.c
104
kfree(per_cpu(xen_pmu_irq, cpu).name);
arch/x86/xen/smp_pv.c
105
per_cpu(xen_pmu_irq, cpu).name = NULL;
arch/x86/xen/smp_pv.c
106
if (per_cpu(xen_pmu_irq, cpu).irq >= 0) {
arch/x86/xen/smp_pv.c
107
unbind_from_irqhandler(per_cpu(xen_pmu_irq, cpu).irq, NULL);
arch/x86/xen/smp_pv.c
108
per_cpu(xen_pmu_irq, cpu).irq = -1;
arch/x86/xen/smp_pv.c
118
per_cpu(xen_irq_work, cpu).name = callfunc_name;
arch/x86/xen/smp_pv.c
127
per_cpu(xen_irq_work, cpu).irq = rc;
arch/x86/xen/smp_pv.c
131
per_cpu(xen_pmu_irq, cpu).name = pmu_name;
arch/x86/xen/smp_pv.c
138
per_cpu(xen_pmu_irq, cpu).irq = rc;
arch/x86/xen/smp_pv.c
279
per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
arch/x86/xen/smp_pv.c
300
per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1;
arch/x86/xen/smp_pv.c
97
kfree(per_cpu(xen_irq_work, cpu).name);
arch/x86/xen/smp_pv.c
98
per_cpu(xen_irq_work, cpu).name = NULL;
arch/x86/xen/smp_pv.c
99
if (per_cpu(xen_irq_work, cpu).irq >= 0) {
arch/x86/xen/spinlock.c
100
per_cpu(irq_name, cpu) = NULL;
arch/x86/xen/spinlock.c
105
irq = per_cpu(lock_kicker_irq, cpu);
arch/x86/xen/spinlock.c
110
per_cpu(lock_kicker_irq, cpu) = -1;
arch/x86/xen/spinlock.c
23
int irq = per_cpu(lock_kicker_irq, cpu);
arch/x86/xen/spinlock.c
72
WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n",
arch/x86/xen/spinlock.c
73
cpu, per_cpu(lock_kicker_irq, cpu));
arch/x86/xen/spinlock.c
76
per_cpu(irq_name, cpu) = name;
arch/x86/xen/spinlock.c
86
per_cpu(lock_kicker_irq, cpu) = irq;
arch/x86/xen/spinlock.c
99
kfree(per_cpu(irq_name, cpu));
arch/x86/xen/time.c
338
evt = &per_cpu(xen_clock_events, cpu).evt;
arch/x86/xen/time.c
348
struct xen_clock_event_device *xevt = &per_cpu(xen_clock_events, cpu);
arch/xtensa/include/asm/mmu_context.h
35
#define cpu_asid_cache(cpu) per_cpu(asid_cache, cpu)
arch/xtensa/kernel/irq.c
61
seq_printf(p, " %10lu", per_cpu(nmi_count, cpu));
arch/xtensa/kernel/setup.c
381
struct cpu *cpu = &per_cpu(cpu_data, i);
arch/xtensa/kernel/smp.c
418
struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
arch/xtensa/kernel/smp.c
457
per_cpu(ipi_data, cpu).ipi_count[i]);
arch/xtensa/kernel/time.c
129
struct ccount_timer *timer = &per_cpu(ccount_timer, cpu);
arch/xtensa/kernel/traps.c
443
per_cpu(exc_table, cpu).type[cause] = (handler);\
arch/xtensa/kernel/traps.c
451
void *previous = per_cpu(exc_table, 0).default_handler[cause];
block/blk-mq.c
1263
blk_complete_reqs(&per_cpu(blk_cpu_done, cpu));
block/blk-mq.c
1304
if (llist_add(&rq->ipi_list, &per_cpu(blk_cpu_done, cpu)))
block/blk-mq.c
1305
smp_call_function_single_async(cpu, &per_cpu(blk_cpu_csd, cpu));
block/blk-mq.c
5293
init_llist_head(&per_cpu(blk_cpu_done, i));
block/blk-mq.c
5295
INIT_CSD(&per_cpu(blk_cpu_csd, i),
drivers/accel/habanalabs/common/hldio.c
144
sum += per_cpu(*hdev->hldio.inflight_ios, i);
drivers/acpi/acpi_processor.c
212
if (per_cpu(processor_device_array, pr->id) != NULL &&
drivers/acpi/acpi_processor.c
213
per_cpu(processor_device_array, pr->id) != device) {
drivers/acpi/acpi_processor.c
223
per_cpu(processor_device_array, pr->id) = device;
drivers/acpi/acpi_processor.c
224
per_cpu(processors, pr->id) = pr;
drivers/acpi/acpi_processor.c
254
per_cpu(processors, pr->id) = NULL;
drivers/acpi/acpi_processor.c
42
pr = per_cpu(processors, cpu);
drivers/acpi/acpi_processor.c
470
per_cpu(processors, pr->id) = NULL;
drivers/acpi/acpi_processor.c
511
per_cpu(processor_device_array, pr->id) = NULL;
drivers/acpi/acpi_processor.c
512
per_cpu(processors, pr->id) = NULL;
drivers/acpi/arm64/cpuidle.c
21
struct acpi_processor *pr = per_cpu(processors, cpu);
drivers/acpi/cppc_acpi.c
1006
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
drivers/acpi/cppc_acpi.c
1085
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
drivers/acpi/cppc_acpi.c
1123
cpc_desc = per_cpu(cpc_desc_ptr, cpu);
drivers/acpi/cppc_acpi.c
1183
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
drivers/acpi/cppc_acpi.c
1208
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
drivers/acpi/cppc_acpi.c
1236
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
drivers/acpi/cppc_acpi.c
1261
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
drivers/acpi/cppc_acpi.c
1343
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
drivers/acpi/cppc_acpi.c
1348
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
drivers/acpi/cppc_acpi.c
1433
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
drivers/acpi/cppc_acpi.c
1482
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
drivers/acpi/cppc_acpi.c
1485
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
drivers/acpi/cppc_acpi.c
1559
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
drivers/acpi/cppc_acpi.c
1562
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
drivers/acpi/cppc_acpi.c
1750
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
drivers/acpi/cppc_acpi.c
1752
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
drivers/acpi/cppc_acpi.c
1900
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu_num);
drivers/acpi/cppc_acpi.c
1904
cpc_desc = per_cpu(cpc_desc_ptr, cpu_num);
drivers/acpi/cppc_acpi.c
366
struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i);
drivers/acpi/cppc_acpi.c
464
cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
drivers/acpi/cppc_acpi.c
480
cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
drivers/acpi/cppc_acpi.c
509
cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
drivers/acpi/cppc_acpi.c
531
match_cpc_ptr = per_cpu(cpc_desc_ptr, i);
drivers/acpi/cppc_acpi.c
854
per_cpu(cpu_pcc_subspace_idx, pr->id) = pcc_subspace_id;
drivers/acpi/cppc_acpi.c
897
per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
drivers/acpi/cppc_acpi.c
902
per_cpu(cpc_desc_ptr, pr->id) = NULL;
drivers/acpi/cppc_acpi.c
937
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, pr->id);
drivers/acpi/cppc_acpi.c
950
cpc_ptr = per_cpu(cpc_desc_ptr, pr->id);
drivers/acpi/processor_driver.c
102
struct acpi_processor *pr = per_cpu(processors, cpu);
drivers/acpi/processor_driver.c
133
struct acpi_processor *pr = per_cpu(processors, cpu);
drivers/acpi/processor_idle.c
1285
dev = per_cpu(acpi_cpuidle_device, pr->id);
drivers/acpi/processor_idle.c
1324
_pr = per_cpu(processors, cpu);
drivers/acpi/processor_idle.c
1327
dev = per_cpu(acpi_cpuidle_device, cpu);
drivers/acpi/processor_idle.c
1337
_pr = per_cpu(processors, cpu);
drivers/acpi/processor_idle.c
1342
dev = per_cpu(acpi_cpuidle_device, cpu);
drivers/acpi/processor_idle.c
1367
pr = per_cpu(processors, cpu);
drivers/acpi/processor_idle.c
1424
per_cpu(acpi_cpuidle_device, pr->id) = dev;
drivers/acpi/processor_idle.c
1433
per_cpu(acpi_cpuidle_device, pr->id) = NULL;
drivers/acpi/processor_idle.c
1440
struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id);
drivers/acpi/processor_idle.c
583
struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
drivers/acpi/processor_idle.c
680
struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
drivers/acpi/processor_idle.c
694
cx = per_cpu(acpi_cstate[index], dev->cpu);
drivers/acpi/processor_idle.c
709
struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
drivers/acpi/processor_idle.c
750
per_cpu(acpi_cstate[count], dev->cpu) = cx;
drivers/acpi/processor_perflib.c
156
pr = per_cpu(processors, cpu);
drivers/acpi/processor_perflib.c
180
struct acpi_processor *pr = per_cpu(processors, cpu);
drivers/acpi/processor_perflib.c
215
struct acpi_processor *pr = per_cpu(processors, cpu);
drivers/acpi/processor_perflib.c
613
pr = per_cpu(processors, i);
drivers/acpi/processor_perflib.c
632
pr = per_cpu(processors, i);
drivers/acpi/processor_perflib.c
651
pr = per_cpu(processors, i);
drivers/acpi/processor_perflib.c
677
match_pr = per_cpu(processors, j);
drivers/acpi/processor_perflib.c
705
match_pr = per_cpu(processors, j);
drivers/acpi/processor_perflib.c
722
pr = per_cpu(processors, i);
drivers/acpi/processor_perflib.c
752
pr = per_cpu(processors, cpu);
drivers/acpi/processor_perflib.c
784
pr = per_cpu(processors, cpu);
drivers/acpi/processor_thermal.c
132
pr = per_cpu(processors, i);
drivers/acpi/processor_thermal.c
167
struct acpi_processor *pr = per_cpu(processors, cpu);
drivers/acpi/processor_thermal.c
191
struct acpi_processor *pr = per_cpu(processors, cpu);
drivers/acpi/processor_thermal.c
45
per_cpu(cpufreq_thermal_reduction_step, phys_package_first_cpu(cpu))
drivers/acpi/processor_throttling.c
1112
match_pr = per_cpu(processors, i);
drivers/acpi/processor_throttling.c
118
match_pr = per_cpu(processors, j);
drivers/acpi/processor_throttling.c
150
match_pr = per_cpu(processors, j);
drivers/acpi/processor_throttling.c
172
pr = per_cpu(processors, i);
drivers/acpi/processor_throttling.c
211
pr = per_cpu(processors, cpu);
drivers/acpi/processor_throttling.c
72
pr = per_cpu(processors, i);
drivers/acpi/processor_throttling.c
93
pr = per_cpu(processors, i);
drivers/acpi/riscv/cpuidle.c
26
struct acpi_processor *pr = per_cpu(processors, cpu);
drivers/base/arch_topology.c
113
rcu_assign_pointer(per_cpu(sft_data, cpu), NULL);
drivers/base/arch_topology.c
161
per_cpu(arch_freq_scale, i) = scale;
drivers/base/arch_topology.c
205
WRITE_ONCE(per_cpu(hw_pressure, cpu), pressure);
drivers/base/arch_topology.c
253
(per_cpu(capacity_freq_ref, cpu) ?: 1);
drivers/base/arch_topology.c
260
(per_cpu(capacity_freq_ref, cpu) ?: 1);
drivers/base/arch_topology.c
303
per_cpu(capacity_freq_ref, cpu) =
drivers/base/arch_topology.c
348
per_cpu(capacity_freq_ref, cpu) = cppc_perf_to_khz(&perf_caps, raw_capacity[cpu]);
drivers/base/arch_topology.c
362
per_cpu(capacity_freq_ref, cpu) * HZ_PER_KHZ);
drivers/base/arch_topology.c
407
per_cpu(capacity_freq_ref, cpu) = policy->cpuinfo.max_freq;
drivers/base/arch_topology.c
409
per_cpu(capacity_freq_ref, cpu) * HZ_PER_KHZ);
drivers/base/arch_topology.c
90
rcu_assign_pointer(per_cpu(sft_data, cpu), data);
drivers/base/cacheinfo.c
26
#define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu))
drivers/base/cacheinfo.c
639
#define per_cpu_cache_dev(cpu) (per_cpu(ci_cache_dev, cpu))
drivers/base/cacheinfo.c
645
#define per_cpu_index_dev(cpu) (per_cpu(ci_index_dev, cpu))
drivers/base/cpu.c
102
per_cpu(cpu_sys_devices, logical_cpu) = NULL;
drivers/base/cpu.c
437
per_cpu(cpu_sys_devices, num) = &cpu->dev;
drivers/base/cpu.c
449
return per_cpu(cpu_sys_devices, cpu);
drivers/base/cpu.c
565
struct cpu *c = &per_cpu(cpu_devices, cpu);
drivers/base/cpu.c
575
unregister_cpu(&per_cpu(cpu_devices, num));
drivers/base/topology.c
217
per_cpu(cpu_scale, cpu) = capacity;
drivers/clocksource/arm_arch_timer.c
488
per_cpu(timer_unstable_counter_workaround, i) = wa;
drivers/clocksource/ingenic-timer.c
120
csd = &per_cpu(ingenic_cevt_csd, timer->cpu);
drivers/clocksource/timer-econet-en751221.c
147
struct clock_event_device *cd = &per_cpu(econet_timer_pcpu, i);
drivers/clocksource/timer-econet-en751221.c
98
struct clock_event_device *cd = &per_cpu(econet_timer_pcpu, cpu);
drivers/clocksource/timer-nxp-pit.c
231
per_cpu(pit_timers, cpu) = pit;
drivers/clocksource/timer-nxp-pit.c
240
per_cpu(pit_timers, cpu) = NULL;
drivers/clocksource/timer-nxp-pit.c
245
struct pit_timer *pit = per_cpu(pit_timers, cpu);
drivers/clocksource/timer-nxp-stm.c
318
per_cpu(stm_timers, cpu) = stm_timer;
drivers/clocksource/timer-nxp-stm.c
329
struct stm_timer *stm_timer = per_cpu(stm_timers, cpu);
drivers/cpufreq/amd_freq_sensitivity.c
46
struct cpu_data_t *data = &per_cpu(cpu_data, policy->cpu);
drivers/cpufreq/cppc_cpufreq.c
100
__cppc_scale_freq_tick(&per_cpu(cppc_freq_inv, smp_processor_id()));
drivers/cpufreq/cppc_cpufreq.c
133
struct cppc_freq_invariance *cppc_fi = &per_cpu(cppc_freq_inv, smp_processor_id());
drivers/cpufreq/cppc_cpufreq.c
157
cppc_fi = &per_cpu(cppc_freq_inv, cpu);
drivers/cpufreq/cppc_cpufreq.c
205
cppc_fi = &per_cpu(cppc_freq_inv, cpu);
drivers/cpufreq/cppc_cpufreq.c
418
return CPPC_EM_COST_GAP * per_cpu(efficiency_class, cpu) +
drivers/cpufreq/cppc_cpufreq.c
560
per_cpu(efficiency_class, cpu) = index;
drivers/cpufreq/cppc_cpufreq.c
95
per_cpu(arch_freq_scale, cppc_fi->cpu) = local_freq_scale;
drivers/cpufreq/cpufreq.c
1357
per_cpu(cpufreq_cpu_data, cpu) = NULL;
drivers/cpufreq/cpufreq.c
1446
per_cpu(cpufreq_cpu_data, j) = policy;
drivers/cpufreq/cpufreq.c
1602
policy = per_cpu(cpufreq_cpu_data, cpu);
drivers/cpufreq/cpufreq.c
1673
policy = per_cpu(cpufreq_cpu_data, cpu);
drivers/cpufreq/cpufreq.c
1754
struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
drivers/cpufreq/cpufreq.c
195
struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
drivers/cpufreq/cpufreq.c
203
return per_cpu(cpufreq_cpu_data, cpu);
drivers/cpufreq/cpufreq.c
2606
WRITE_ONCE(per_cpu(cpufreq_pressure, cpu), pressure);
drivers/cpufreq/cpufreq_governor.c
103
struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
drivers/cpufreq/cpufreq_governor.c
138
struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
drivers/cpufreq/cpufreq_governor.c
333
struct cpu_dbs_info *cdbs = &per_cpu(cpu_dbs, cpu);
drivers/cpufreq/cpufreq_governor.c
369
struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
drivers/cpufreq/cpufreq_governor.c
384
struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
drivers/cpufreq/cpufreq_governor.c
533
struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
drivers/cpufreq/intel_pstate.c
3548
struct acpi_processor *pr = per_cpu(processors, i);
drivers/cpufreq/intel_pstate.c
3592
struct acpi_processor *pr = per_cpu(processors, i);
drivers/cpufreq/loongson3_cpufreq.c
245
if (per_cpu(freq_data, cpu))
drivers/cpufreq/loongson3_cpufreq.c
279
per_cpu(freq_data, cpu) = data;
drivers/cpufreq/loongson3_cpufreq.c
293
policy->freq_table = per_cpu(freq_data, cpu)->table;
drivers/cpufreq/loongson3_cpufreq.c
294
policy->suspend_freq = policy->freq_table[per_cpu(freq_data, cpu)->def_freq_level].frequency;
drivers/cpufreq/loongson3_cpufreq.c
299
per_cpu(freq_data, i) = per_cpu(freq_data, cpu);
drivers/cpufreq/loongson3_cpufreq.c
309
loongson3_cpufreq_target(policy, per_cpu(freq_data, cpu)->def_freq_level);
drivers/cpufreq/pcc-cpufreq.c
258
pr = per_cpu(processors, cpu);
drivers/cpufreq/powernow-k8.c
1080
per_cpu(powernow_data, cpu) = data;
drivers/cpufreq/powernow-k8.c
1094
struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
drivers/cpufreq/powernow-k8.c
1106
per_cpu(powernow_data, cpu) = NULL;
drivers/cpufreq/powernow-k8.c
1119
struct powernow_k8_data *data = per_cpu(powernow_data, cpu);
drivers/cpufreq/powernow-k8.c
932
struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
drivers/cpufreq/powernv-cpufreq.c
1083
per_cpu(chip_info, cpu) = &chips[i];
drivers/cpufreq/powernv-cpufreq.c
399
struct chip *chip = per_cpu(chip_info, policy->cpu); \
drivers/cpufreq/sh-cpufreq.c
108
struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
drivers/cpufreq/sh-cpufreq.c
139
struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
drivers/cpufreq/sh-cpufreq.c
39
return (clk_get_rate(&per_cpu(sh_cpuclk, cpu)) + 500) / 1000;
drivers/cpufreq/sh-cpufreq.c
47
struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
drivers/cpufreq/sh-cpufreq.c
91
struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu);
drivers/cpufreq/speedstep-centrino.c
261
per_cpu(centrino_model, policy->cpu) = model;
drivers/cpufreq/speedstep-centrino.c
296
if ((per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_BANIAS]) ||
drivers/cpufreq/speedstep-centrino.c
297
(per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_A1]) ||
drivers/cpufreq/speedstep-centrino.c
298
(per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_B0])) {
drivers/cpufreq/speedstep-centrino.c
303
if ((!per_cpu(centrino_model, cpu)) ||
drivers/cpufreq/speedstep-centrino.c
304
(!per_cpu(centrino_model, cpu)->op_points))
drivers/cpufreq/speedstep-centrino.c
309
per_cpu(centrino_model, cpu)->op_points[i].frequency
drivers/cpufreq/speedstep-centrino.c
312
if (msr == per_cpu(centrino_model, cpu)->op_points[i].driver_data)
drivers/cpufreq/speedstep-centrino.c
313
return per_cpu(centrino_model, cpu)->
drivers/cpufreq/speedstep-centrino.c
317
return per_cpu(centrino_model, cpu)->op_points[i-1].frequency;
drivers/cpufreq/speedstep-centrino.c
367
per_cpu(centrino_cpu, policy->cpu) = &cpu_ids[i];
drivers/cpufreq/speedstep-centrino.c
369
if (!per_cpu(centrino_cpu, policy->cpu)) {
drivers/cpufreq/speedstep-centrino.c
398
policy->freq_table = per_cpu(centrino_model, policy->cpu)->op_points;
drivers/cpufreq/speedstep-centrino.c
407
if (per_cpu(centrino_model, cpu))
drivers/cpufreq/speedstep-centrino.c
408
per_cpu(centrino_model, cpu) = NULL;
drivers/cpufreq/speedstep-centrino.c
429
if (unlikely(per_cpu(centrino_model, cpu) == NULL)) {
drivers/cpufreq/speedstep-centrino.c
435
op_points = &per_cpu(centrino_model, cpu)->op_points[index];
drivers/cpufreq/vexpress-spc-cpufreq.c
102
return per_cpu(cpu_last_req_freq, cpu);
drivers/cpufreq/vexpress-spc-cpufreq.c
117
prev_rate = per_cpu(cpu_last_req_freq, cpu);
drivers/cpufreq/vexpress-spc-cpufreq.c
118
per_cpu(cpu_last_req_freq, cpu) = rate;
drivers/cpufreq/vexpress-spc-cpufreq.c
119
per_cpu(physical_cluster, cpu) = new_cluster;
drivers/cpufreq/vexpress-spc-cpufreq.c
143
per_cpu(cpu_last_req_freq, cpu) = prev_rate;
drivers/cpufreq/vexpress-spc-cpufreq.c
144
per_cpu(physical_cluster, cpu) = old_cluster;
drivers/cpufreq/vexpress-spc-cpufreq.c
184
new_cluster = actual_cluster = per_cpu(physical_cluster, cpu);
drivers/cpufreq/vexpress-spc-cpufreq.c
429
per_cpu(physical_cluster, cpu) = cur_cluster;
drivers/cpufreq/vexpress-spc-cpufreq.c
432
per_cpu(physical_cluster, policy->cpu) = A15_CLUSTER;
drivers/cpufreq/vexpress-spc-cpufreq.c
443
per_cpu(cpu_last_req_freq, policy->cpu) =
drivers/cpufreq/vexpress-spc-cpufreq.c
77
cpu_freq = per_cpu(cpu_last_req_freq, j);
drivers/cpufreq/vexpress-spc-cpufreq.c
79
if (cluster == per_cpu(physical_cluster, j) &&
drivers/cpufreq/vexpress-spc-cpufreq.c
89
u32 cur_cluster = per_cpu(physical_cluster, cpu);
drivers/cpufreq/virtual-cpufreq.c
161
num_perftbl_entries = per_cpu(perftbl_num_entries, policy->cpu);
drivers/cpufreq/virtual-cpufreq.c
286
per_cpu(perftbl_num_entries, cpu) = num_perftbl_entries;
drivers/cpuidle/coupled.c
334
call_single_data_t *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu);
drivers/cpuidle/coupled.c
646
other_dev = per_cpu(cpuidle_devices, cpu);
drivers/cpuidle/coupled.c
669
csd = &per_cpu(cpuidle_coupled_poke_cb, dev->cpu);
drivers/cpuidle/coupled.c
742
dev = per_cpu(cpuidle_devices, cpu);
drivers/cpuidle/coupled.c
758
dev = per_cpu(cpuidle_devices, cpu);
drivers/cpuidle/cpuidle-arm.c
160
dev = per_cpu(cpuidle_devices, cpu);
drivers/cpuidle/cpuidle-cps.c
108
device = &per_cpu(cpuidle_dev, cpu);
drivers/cpuidle/cpuidle-cps.c
159
device = &per_cpu(cpuidle_dev, cpu);
drivers/cpuidle/cpuidle-powernv.c
166
struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
drivers/cpuidle/cpuidle-powernv.c
178
struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
drivers/cpuidle/cpuidle-psci.c
449
dev = per_cpu(cpuidle_devices, cpu);
drivers/cpuidle/cpuidle-pseries.c
305
struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
drivers/cpuidle/cpuidle-pseries.c
317
struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
drivers/cpuidle/cpuidle-riscv-sbi.c
542
dev = per_cpu(cpuidle_devices, cpu);
drivers/cpuidle/cpuidle.c
617
per_cpu(cpuidle_devices, dev->cpu) = NULL;
drivers/cpuidle/cpuidle.c
643
if (per_cpu(cpuidle_devices, cpu)) {
drivers/cpuidle/cpuidle.c
659
per_cpu(cpuidle_devices, cpu) = dev;
drivers/cpuidle/cpuidle.c
754
device = &per_cpu(cpuidle_dev, cpu);
drivers/cpuidle/cpuidle.c
786
device = &per_cpu(cpuidle_dev, cpu);
drivers/cpuidle/driver.c
387
struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
drivers/cpuidle/driver.c
40
return per_cpu(cpuidle_drivers, cpu);
drivers/cpuidle/driver.c
60
per_cpu(cpuidle_drivers, cpu) = NULL;
drivers/cpuidle/driver.c
84
per_cpu(cpuidle_drivers, cpu) = drv;
drivers/cpuidle/governors/ladder.c
138
struct ladder_device *ldev = &per_cpu(ladder_devices, dev->cpu);
drivers/cpuidle/governors/menu.c
510
struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
drivers/crypto/caam/qi.c
452
drv_ctx->rsp_fq = per_cpu(pcpu_qipriv.rsp_fq, drv_ctx->cpu);
drivers/crypto/caam/qi.c
528
if (kill_fq(qidev, per_cpu(pcpu_qipriv.rsp_fq, i)))
drivers/crypto/caam/qi.c
530
free_netdev(per_cpu(pcpu_qipriv.net_dev, i));
drivers/crypto/caam/qi.c
655
per_cpu(pcpu_qipriv.rsp_fq, cpu) = fq;
drivers/crypto/caam/qi.c
716
kfree(per_cpu(pcpu_qipriv.rsp_fq, i));
drivers/crypto/nx/nx-common-powernv.c
1000
txwin = per_cpu(cpu_txwin, i);
drivers/crypto/nx/nx-common-powernv.c
1004
per_cpu(cpu_txwin, i) = NULL;
drivers/crypto/nx/nx-common-powernv.c
736
per_cpu(cpu_txwin, i) = txwin;
drivers/crypto/nx/nx-common-powernv.c
741
if (!per_cpu(cpu_txwin, i)) {
drivers/crypto/padlock-aes.c
155
if (&ctx->cword.encrypt == per_cpu(paes_last_cword, cpu) ||
drivers/crypto/padlock-aes.c
156
&ctx->cword.decrypt == per_cpu(paes_last_cword, cpu))
drivers/crypto/padlock-aes.c
157
per_cpu(paes_last_cword, cpu) = NULL;
drivers/crypto/padlock-aes.c
175
if (cword != per_cpu(paes_last_cword, cpu))
drivers/crypto/padlock-aes.c
185
per_cpu(paes_last_cword, raw_smp_processor_id()) = cword;
drivers/firmware/psci/psci_checker.c
388
struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
drivers/hv/mshv_vtl_main.c
206
static void mshv_vtl_configure_reg_page(struct mshv_vtl_per_cpu *per_cpu)
drivers/hv/mshv_vtl_main.c
230
per_cpu->reg_page = reg_page;
drivers/hv/mshv_vtl_main.c
296
struct hv_per_cpu_context *per_cpu;
drivers/hv/mshv_vtl_main.c
303
per_cpu = this_cpu_ptr(hv_context.cpu_context);
drivers/hv/mshv_vtl_main.c
305
msg = (struct hv_message *)per_cpu->hyp_synic_message_page + VTL2_VMBUS_SINT_INDEX;
drivers/hv/mshv_vtl_main.c
311
event_flags = (union hv_synic_event_flags *)per_cpu->hyp_synic_event_page +
drivers/hv/mshv_vtl_main.c
328
struct mshv_vtl_per_cpu *per_cpu = this_cpu_ptr(&mshv_vtl_per_cpu);
drivers/hv/mshv_vtl_main.c
330
per_cpu->run = (struct mshv_vtl_run *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
drivers/hv/mshv_vtl_main.c
331
if (!per_cpu->run)
drivers/hv/mshv_vtl_main.c
335
mshv_vtl_configure_reg_page(per_cpu);
drivers/hwtracing/coresight/coresight-core.c
1014
csdev->def_sink = per_cpu(csdev_sink, source_ops(csdev)->cpu_id(csdev));
drivers/hwtracing/coresight/coresight-core.c
72
per_cpu(csdev_sink, cpu) = csdev;
drivers/hwtracing/coresight/coresight-core.c
78
return per_cpu(csdev_sink, cpu);
drivers/hwtracing/coresight/coresight-core.c
787
sink == per_cpu(csdev_sink, source_ops(csdev)->cpu_id(csdev))) {
drivers/hwtracing/coresight/coresight-cpu-debug.c
396
drvdata = per_cpu(debug_drvdata, cpu);
drivers/hwtracing/coresight/coresight-cpu-debug.c
428
drvdata = per_cpu(debug_drvdata, cpu);
drivers/hwtracing/coresight/coresight-cpu-debug.c
447
drvdata = per_cpu(debug_drvdata, cpu);
drivers/hwtracing/coresight/coresight-cpu-debug.c
465
drvdata = per_cpu(debug_drvdata, cpu);
drivers/hwtracing/coresight/coresight-cpu-debug.c
579
if (per_cpu(debug_drvdata, drvdata->cpu)) {
drivers/hwtracing/coresight/coresight-cpu-debug.c
593
per_cpu(debug_drvdata, drvdata->cpu) = drvdata;
drivers/hwtracing/coresight/coresight-cpu-debug.c
628
per_cpu(debug_drvdata, drvdata->cpu) = NULL;
drivers/hwtracing/coresight/coresight-cpu-debug.c
641
per_cpu(debug_drvdata, drvdata->cpu) = NULL;
drivers/hwtracing/coresight/coresight-etm-perf.c
359
csdev = per_cpu(csdev_src, cpu);
drivers/hwtracing/coresight/coresight-etm-perf.c
482
struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
drivers/hwtracing/coresight/coresight-etm-perf.c
633
struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
drivers/hwtracing/coresight/coresight-etm-perf.c
840
per_cpu(csdev_src, cpu) = csdev;
drivers/hwtracing/coresight/coresight-etm-perf.c
843
per_cpu(csdev_src, cpu) = NULL;
drivers/hwtracing/coresight/coresight-etm4x-core.c
2344
per_cpu(delayed_probe, drvdata->cpu) = delayed;
drivers/hwtracing/coresight/coresight-etm4x-core.c
2453
per_cpu(delayed_probe, cpu) = NULL;
drivers/hwtracing/coresight/coresight-etm4x-core.c
2465
had_delayed_probe = per_cpu(delayed_probe, drvdata->cpu);
drivers/hwtracing/coresight/coresight-platform.c
748
pr = per_cpu(processors, i);
drivers/hwtracing/coresight/coresight-sysfs.c
236
per_cpu(tracer_path, cpu) = path;
drivers/hwtracing/coresight/coresight-sysfs.c
286
path = per_cpu(tracer_path, cpu);
drivers/hwtracing/coresight/coresight-sysfs.c
287
per_cpu(tracer_path, cpu) = NULL;
drivers/idle/intel_idle.c
1769
struct acpi_processor *pr = per_cpu(processors, cpu);
drivers/infiniband/sw/siw/siw_main.c
189
usage = atomic_read(&per_cpu(siw_use_cnt, cpu));
drivers/infiniband/sw/siw/siw_main.c
200
atomic_inc(&per_cpu(siw_use_cnt, tx_cpu));
drivers/infiniband/sw/siw/siw_main.c
209
atomic_dec(&per_cpu(siw_use_cnt, cpu));
drivers/infiniband/sw/siw/siw_qp_tx.c
1214
tx_task = &per_cpu(siw_tx_task_g, cpu);
drivers/infiniband/sw/siw/siw_qp_tx.c
1238
wake_up(&per_cpu(siw_tx_task_g, cpu).waiting);
drivers/infiniband/sw/siw/siw_qp_tx.c
1249
struct tx_task_t *tx_task = &per_cpu(siw_tx_task_g, nr_cpu);
drivers/infiniband/sw/siw/siw_qp_tx.c
1301
llist_add(&qp->tx_list, &per_cpu(siw_tx_task_g, qp->tx_cpu).active);
drivers/infiniband/sw/siw/siw_qp_tx.c
1303
wake_up(&per_cpu(siw_tx_task_g, qp->tx_cpu).waiting);
drivers/irqchip/irq-aclint-sswi.c
24
writel(0x1, per_cpu(sswi_cpu_regs, cpu));
drivers/irqchip/irq-aclint-sswi.c
95
per_cpu(sswi_cpu_regs, cpu) = reg + hart_index * 4;
drivers/irqchip/irq-armada-370-xp.c
172
void __iomem *per_cpu;
drivers/irqchip/irq-armada-370-xp.c
220
writel(hwirq, mpic->per_cpu + MPIC_INT_SET_MASK);
drivers/irqchip/irq-armada-370-xp.c
231
writel(hwirq, mpic->per_cpu + MPIC_INT_CLEAR_MASK);
drivers/irqchip/irq-armada-370-xp.c
314
reg = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK);
drivers/irqchip/irq-armada-370-xp.c
316
writel(reg, mpic->per_cpu + MPIC_IN_DRBEL_MASK);
drivers/irqchip/irq-armada-370-xp.c
319
writel(1, mpic->per_cpu + MPIC_INT_CLEAR_MASK);
drivers/irqchip/irq-armada-370-xp.c
369
writel(0, mpic->per_cpu + MPIC_INT_CLEAR_MASK);
drivers/irqchip/irq-armada-370-xp.c
397
writel(MPIC_INT_CAUSE_PERF(cpuid), mpic->per_cpu + MPIC_INT_FABRIC_MASK);
drivers/irqchip/irq-armada-370-xp.c
406
reg = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK);
drivers/irqchip/irq-armada-370-xp.c
408
writel(reg, mpic->per_cpu + MPIC_IN_DRBEL_MASK);
drivers/irqchip/irq-armada-370-xp.c
416
reg = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK);
drivers/irqchip/irq-armada-370-xp.c
418
writel(reg, mpic->per_cpu + MPIC_IN_DRBEL_MASK);
drivers/irqchip/irq-armada-370-xp.c
445
writel(~BIT(d->hwirq), mpic->per_cpu + MPIC_IN_DRBEL_CAUSE);
drivers/irqchip/irq-armada-370-xp.c
532
writel(i, mpic->per_cpu + MPIC_INT_SET_MASK);
drivers/irqchip/irq-armada-370-xp.c
538
writel(0, mpic->per_cpu + MPIC_IN_DRBEL_MASK);
drivers/irqchip/irq-armada-370-xp.c
541
writel(0, mpic->per_cpu + MPIC_IN_DRBEL_CAUSE);
drivers/irqchip/irq-armada-370-xp.c
544
writel(0, mpic->per_cpu + MPIC_INT_CLEAR_MASK);
drivers/irqchip/irq-armada-370-xp.c
616
writel(hwirq, mpic->per_cpu + MPIC_INT_CLEAR_MASK);
drivers/irqchip/irq-armada-370-xp.c
643
cause = readl_relaxed(mpic->per_cpu + MPIC_IN_DRBEL_CAUSE);
drivers/irqchip/irq-armada-370-xp.c
645
writel(~cause, mpic->per_cpu + MPIC_IN_DRBEL_CAUSE);
drivers/irqchip/irq-armada-370-xp.c
660
cause = readl_relaxed(mpic->per_cpu + MPIC_IN_DRBEL_CAUSE);
drivers/irqchip/irq-armada-370-xp.c
680
cause = readl_relaxed(mpic->per_cpu + MPIC_PPI_CAUSE);
drivers/irqchip/irq-armada-370-xp.c
710
irqstat = readl_relaxed(mpic->per_cpu + MPIC_CPU_INTACK);
drivers/irqchip/irq-armada-370-xp.c
733
mpic->doorbell_mask = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK);
drivers/irqchip/irq-armada-370-xp.c
755
writel(i, mpic->per_cpu + MPIC_INT_CLEAR_MASK);
drivers/irqchip/irq-armada-370-xp.c
772
writel(mpic->doorbell_mask, mpic->per_cpu + MPIC_IN_DRBEL_MASK);
drivers/irqchip/irq-armada-370-xp.c
783
writel(0, mpic->per_cpu + MPIC_INT_CLEAR_MASK);
drivers/irqchip/irq-armada-370-xp.c
785
writel(1, mpic->per_cpu + MPIC_INT_CLEAR_MASK);
drivers/irqchip/irq-armada-370-xp.c
848
err = mpic_map_region(node, 1, &mpic->per_cpu, NULL);
drivers/irqchip/irq-csky-mpintc.c
265
per_cpu(intcl_reg, cpu) = INTCL_base + (INTCL_SIZE * cpu);
drivers/irqchip/irq-csky-mpintc.c
266
writel_relaxed(BIT(0), per_cpu(intcl_reg, cpu) + INTCL_PICTLR);
drivers/irqchip/irq-gic-v3.c
1235
per_cpu(has_rss, cpu) = !!(gic_read_ctlr() & ICC_CTLR_EL1_RSS);
drivers/irqchip/irq-gic-v3.c
1239
bool have_rss = per_cpu(has_rss, i) && per_cpu(has_rss, cpu);
drivers/irqchip/irq-gic-v5-irs.c
202
irs_data = per_cpu(per_cpu_irs_data, smp_processor_id());
drivers/irqchip/irq-gic-v5-irs.c
377
if (!per_cpu(cpu_iaffid, cpuid).valid) {
drivers/irqchip/irq-gic-v5-irs.c
382
*iaffid = per_cpu(cpu_iaffid, cpuid).iaffid;
drivers/irqchip/irq-gic-v5-irs.c
520
irs_data = per_cpu(per_cpu_irs_data, cpuid);
drivers/irqchip/irq-gic-v5-irs.c
644
per_cpu(cpu_iaffid, cpu).iaffid = iaffids[i];
drivers/irqchip/irq-gic-v5-irs.c
645
per_cpu(cpu_iaffid, cpu).valid = true;
drivers/irqchip/irq-gic-v5-irs.c
648
per_cpu(per_cpu_irs_data, cpu) = irs_data;
drivers/irqchip/irq-gic-v5-irs.c
870
per_cpu(cpu_iaffid, cpu).iaffid = gicc->iaffid;
drivers/irqchip/irq-gic-v5-irs.c
871
per_cpu(cpu_iaffid, cpu).valid = true;
drivers/irqchip/irq-gic-v5-irs.c
872
pr_debug("Processed IAFFID %u for CPU%d", per_cpu(cpu_iaffid, cpu).iaffid, cpu);
drivers/irqchip/irq-gic-v5-irs.c
875
per_cpu(per_cpu_irs_data, cpu) = current_irs_data;
drivers/irqchip/irq-loongarch-avec.c
304
per_cpu(irq_map, adata->cpu)[adata->vec] = NULL;
drivers/irqchip/irq-loongarch-avec.c
311
per_cpu(irq_map, adata->prev_cpu)[adata->prev_vec] = NULL;
drivers/irqchip/irq-ompic.c
106
set_bit(ipi_msg, &per_cpu(ops, dst_cpu));
drivers/irqchip/irq-ompic.c
124
unsigned long *pending_ops = &per_cpu(ops, cpu);
drivers/leds/trigger/ledtrig-cpu.c
154
struct led_trigger_cpu *trig = &per_cpu(cpu_trig, cpu);
drivers/mailbox/zynqmp-ipi-mailbox.c
853
per_cpu(per_cpu_pdata, cpu) = pdata;
drivers/perf/arm_pmu.c
592
if (per_cpu(cpu_irq, cpu) == irq)
drivers/perf/arm_pmu.c
606
if (per_cpu(cpu_irq, cpu) != irq)
drivers/perf/arm_pmu.c
609
ops = per_cpu(cpu_irq_ops, cpu);
drivers/perf/arm_pmu.c
619
if (per_cpu(cpu_irq, cpu) == 0)
drivers/perf/arm_pmu.c
621
if (WARN_ON(irq != per_cpu(cpu_irq, cpu)))
drivers/perf/arm_pmu.c
624
per_cpu(cpu_irq_ops, cpu)->free_pmuirq(irq, cpu, armpmu);
drivers/perf/arm_pmu.c
626
per_cpu(cpu_irq, cpu) = 0;
drivers/perf/arm_pmu.c
627
per_cpu(cpu_irq_ops, cpu) = NULL;
drivers/perf/arm_pmu.c
691
per_cpu(cpu_irq, cpu) = irq;
drivers/perf/arm_pmu.c
692
per_cpu(cpu_irq_ops, cpu) = irq_ops;
drivers/perf/arm_pmu.c
703
return per_cpu(hw_events->irq, cpu);
drivers/perf/arm_pmu.c
729
per_cpu(cpu_irq_ops, cpu)->enable_pmuirq(irq);
drivers/perf/arm_pmu.c
744
per_cpu(cpu_irq_ops, cpu)->disable_pmuirq(irq);
drivers/perf/arm_pmu_acpi.c
220
per_cpu(pmu_irqs, cpu) = irq;
drivers/perf/arm_pmu_acpi.c
230
irq = per_cpu(pmu_irqs, cpu);
drivers/perf/arm_pmu_acpi.c
241
if (per_cpu(pmu_irqs, irq_cpu) == irq)
drivers/perf/arm_pmu_acpi.c
242
per_cpu(pmu_irqs, irq_cpu) = 0;
drivers/perf/arm_pmu_acpi.c
256
pmu = per_cpu(probed_pmus, cpu);
drivers/perf/arm_pmu_acpi.c
279
int other_irq = per_cpu(hw_events->irq, cpu);
drivers/perf/arm_pmu_acpi.c
298
int irq = per_cpu(pmu_irqs, cpu);
drivers/perf/arm_pmu_acpi.c
300
per_cpu(probed_pmus, cpu) = pmu;
drivers/perf/arm_pmu_acpi.c
305
per_cpu(hw_events->irq, cpu) = irq;
drivers/perf/arm_pmu_acpi.c
325
if (per_cpu(probed_pmus, cpu))
drivers/perf/arm_pmu_acpi.c
345
unsigned long cpu_cpuid = per_cpu(cpu_data, cpu).reg_midr;
drivers/perf/arm_pmu_acpi.c
382
struct arm_pmu *pmu = per_cpu(probed_pmus, cpu);
drivers/perf/arm_pmu_acpi.c
397
cpuid = per_cpu(cpu_data, cpu).reg_midr;
drivers/perf/arm_pmu_platform.c
146
if (per_cpu(hw_events->irq, cpu)) {
drivers/perf/arm_pmu_platform.c
151
per_cpu(hw_events->irq, cpu) = irq;
drivers/perf/arm_pmu_platform.c
164
int irq = per_cpu(hw_events->irq, cpu);
drivers/perf/arm_pmu_platform.c
182
int irq = per_cpu(hw_events->irq, cpu);
drivers/perf/arm_pmu_platform.c
54
per_cpu(hw_events->irq, cpu) = irq;
drivers/platform/x86/amd/hfi/hfi.c
312
struct amd_hfi_cpuinfo *hfi_info = &per_cpu(amd_hfi_cpuinfo, cpu);
drivers/platform/x86/intel/pmc/cnp.c
232
per_cpu(pkg_cst_config, cpunum) = val;
drivers/platform/x86/intel/pmc/cnp.c
243
wrmsrq(MSR_PKG_CST_CONFIG_CONTROL, per_cpu(pkg_cst_config, cpunum));
drivers/platform/x86/intel/pmc/cnp.c
246
per_cpu(pkg_cst_config, cpunum));
drivers/platform/x86/intel/tpmi_power_domains.c
123
return per_cpu(tpmi_cpu_info, cpu_no).punit_core_id;
drivers/platform/x86/intel/tpmi_power_domains.c
132
return per_cpu(tpmi_cpu_info, cpu_no).punit_domain_id;
drivers/platform/x86/intel/tpmi_power_domains.c
145
info = &per_cpu(tpmi_cpu_info, cpu_no);
drivers/platform/x86/intel/tpmi_power_domains.c
189
struct tpmi_cpu_info *info = &per_cpu(tpmi_cpu_info, cpu);
drivers/pmdomain/governor.c
377
dev = per_cpu(cpuidle_devices, cpu);
drivers/powercap/dtpm_cpu.c
155
per_cpu(dtpm_per_cpu, dtpm_cpu->cpu) = NULL;
drivers/powercap/dtpm_cpu.c
174
dtpm_cpu = per_cpu(dtpm_per_cpu, cpu);
drivers/powercap/dtpm_cpu.c
185
dtpm_cpu = per_cpu(dtpm_per_cpu, cpu);
drivers/powercap/dtpm_cpu.c
201
dtpm_cpu = per_cpu(dtpm_per_cpu, cpu);
drivers/powercap/dtpm_cpu.c
225
per_cpu(dtpm_per_cpu, cpu) = dtpm_cpu;
drivers/powercap/dtpm_cpu.c
251
per_cpu(dtpm_per_cpu, cpu) = NULL;
drivers/powercap/idle_inject.c
153
ii_dev = per_cpu(idle_inject_device, cpu);
drivers/powercap/idle_inject.c
347
if (per_cpu(idle_inject_device, cpu)) {
drivers/powercap/idle_inject.c
352
per_cpu(idle_inject_device, cpu) = ii_dev;
drivers/powercap/idle_inject.c
361
per_cpu(idle_inject_device, cpu_rb) = NULL;
drivers/powercap/idle_inject.c
402
per_cpu(idle_inject_device, cpu) = NULL;
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
2610
p = &per_cpu(bnx2fc_percpu, cpu);
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
2631
p = &per_cpu(bnx2fc_percpu, cpu);
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
2717
p = &per_cpu(bnx2fc_percpu, cpu);
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1045
fps = &per_cpu(bnx2fc_percpu, cpu);
drivers/scsi/bnx2i/bnx2i_hwi.c
1921
p = &per_cpu(bnx2i_percpu, blk_mq_rq_cpu(scsi_cmd_to_rq(sc)));
drivers/scsi/bnx2i/bnx2i_init.c
416
p = &per_cpu(bnx2i_percpu, cpu);
drivers/scsi/bnx2i/bnx2i_init.c
435
p = &per_cpu(bnx2i_percpu, cpu);
drivers/scsi/bnx2i/bnx2i_init.c
490
p = &per_cpu(bnx2i_percpu, cpu);
drivers/scsi/bnx2i/bnx2i_iscsi.c
1498
p = &per_cpu(bnx2i_percpu, cpu);
drivers/scsi/fcoe/fcoe.c
1396
fps = &per_cpu(fcoe_percpu, cpu);
drivers/scsi/fcoe/fcoe.c
2308
pp = &per_cpu(fcoe_percpu, cpu);
drivers/scsi/qedi/qedi_main.c
1286
p = &per_cpu(qedi_percpu, cpu);
drivers/scsi/qedi/qedi_main.c
2906
p = &per_cpu(qedi_percpu, cpu);
drivers/soc/fsl/qbman/bman.c
599
portal = &per_cpu(bman_affine_portal, c->cpu);
drivers/soc/fsl/qbman/qman.c
1361
portal = &per_cpu(qman_affine_portal, c->cpu);
drivers/thermal/intel/intel_hfi.c
182
index = per_cpu(hfi_cpu_info, cpu).index;
drivers/thermal/intel/intel_hfi.c
263
info = &per_cpu(hfi_cpu_info, cpu);
drivers/thermal/intel/intel_hfi.c
427
info = &per_cpu(hfi_cpu_info, cpu);
drivers/thermal/intel/intel_hfi.c
511
struct hfi_cpu_info *info = &per_cpu(hfi_cpu_info, cpu);
drivers/thermal/intel/intel_hfi.c
598
struct hfi_cpu_info *info = &per_cpu(hfi_cpu_info, 0);
drivers/thermal/intel/therm_throt.c
149
per_cpu(thermal_state, cpu).event.name); \
drivers/thermal/intel/therm_throt.c
380
struct thermal_state *pstate = &per_cpu(thermal_state, this_cpu);
drivers/thermal/intel/therm_throt.c
440
struct thermal_state *pstate = &per_cpu(thermal_state, this_cpu);
drivers/thermal/intel/therm_throt.c
530
struct thermal_state *state = &per_cpu(thermal_state, cpu);
drivers/thermal/intel/therm_throt.c
556
struct thermal_state *state = &per_cpu(thermal_state, cpu);
drivers/xen/events/events_2l.c
152
per_cpu(cpu_evtchn_mask, cpu)[idx] &
drivers/xen/events/events_2l.c
268
xen_ulong_t *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu);
drivers/xen/events/events_2l.c
280
v = per_cpu(xen_vcpu, i);
drivers/xen/events/events_2l.c
289
v = per_cpu(xen_vcpu, cpu);
drivers/xen/events/events_2l.c
353
memset(per_cpu(cpu_evtchn_mask, i), 0, sizeof(xen_ulong_t) *
drivers/xen/events/events_2l.c
359
memset(per_cpu(cpu_evtchn_mask, cpu), 0, sizeof(xen_ulong_t) *
drivers/xen/events/events_2l.c
52
clear_bit(evtchn, BM(per_cpu(cpu_evtchn_mask, cpu)));
drivers/xen/events/events_2l.c
58
clear_bit(evtchn, BM(per_cpu(cpu_evtchn_mask, old_cpu)));
drivers/xen/events/events_2l.c
59
set_bit(evtchn, BM(per_cpu(cpu_evtchn_mask, cpu)));
drivers/xen/events/events_base.c
1254
ret = per_cpu(ipi_to_irq, cpu)[ipi];
drivers/xen/events/events_base.c
1369
ret = per_cpu(virq_to_irq, cpu)[virq];
drivers/xen/events/events_base.c
1645
evtchn = per_cpu(ipi_to_evtchn, cpu)[vector];
drivers/xen/events/events_base.c
1807
int irq = per_cpu(virq_to_irq, old_cpu)[virq];
drivers/xen/events/events_base.c
1809
per_cpu(virq_to_irq, old_cpu)[virq] = -1;
drivers/xen/events/events_base.c
1810
per_cpu(virq_to_irq, tcpu)[virq] = irq;
drivers/xen/events/events_base.c
1983
if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
drivers/xen/events/events_base.c
2012
if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
drivers/xen/events/events_base.c
2222
.vcpu = per_cpu(xen_vcpu_id, cpu),
drivers/xen/events/events_base.c
371
per_cpu(ipi_to_irq, cpu)[ipi] = info->irq;
drivers/xen/events/events_base.c
372
per_cpu(ipi_to_evtchn, cpu)[ipi] = evtchn;
drivers/xen/events/events_base.c
382
per_cpu(virq_to_irq, cpu)[virq] = info->irq;
drivers/xen/events/events_base.c
433
int irq = per_cpu(virq_to_irq, cpu)[virq];
drivers/xen/events/events_base.c
557
struct lateeoi_work *eoi = &per_cpu(lateeoi, info->eoi_cpu);
drivers/xen/events/events_base.c
567
struct lateeoi_work *eoi = &per_cpu(lateeoi, info->eoi_cpu);
drivers/xen/events/events_base.c
636
(info->irq_epoch == per_cpu(irq_epoch, cpu) || delay)) {
drivers/xen/events/events_base.c
691
struct lateeoi_work *eoi = &per_cpu(lateeoi, cpu);
drivers/xen/events/events_base.c
977
per_cpu(virq_to_irq, cpu)[virq_from_irq(info)] = -1;
drivers/xen/events/events_base.c
980
per_cpu(ipi_to_irq, cpu)[ipi_from_irq(info)] = -1;
drivers/xen/events/events_base.c
981
per_cpu(ipi_to_evtchn, cpu)[ipi_from_irq(info)] = 0;
drivers/xen/events/events_fifo.c
105
struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
drivers/xen/events/events_fifo.c
277
struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
drivers/xen/events/events_fifo.c
324
control_block = per_cpu(cpu_control_block, cpu);
drivers/xen/events/events_fifo.c
346
void *control_block = per_cpu(cpu_control_block, cpu);
drivers/xen/events/events_fifo.c
359
per_cpu(cpu_control_block, cpu) = NULL;
drivers/xen/events/events_fifo.c
388
per_cpu(cpu_control_block, cpu) = control_block;
drivers/xen/events/events_fifo.c
399
if (!per_cpu(cpu_control_block, cpu))
drivers/xen/time.c
122
per_cpu(old_runstate_time, cpu)[i] +=
drivers/xen/time.c
141
return per_cpu(xen_runstate, vcpu).state == RUNSTATE_runnable;
drivers/xen/time.c
156
area.addr.v = &per_cpu(xen_runstate, cpu);
drivers/xen/time.c
82
res->time[i] += per_cpu(old_runstate_time, cpu)[i];
drivers/xen/xen-acpi-processor.c
467
_pr = per_cpu(processors, i /* APIC ID */);
drivers/xen/xen-acpi-processor.c
550
pr = per_cpu(processors, i);
fs/buffer.c
3020
tot += per_cpu(bh_accounting, i).nr;
fs/buffer.c
3053
struct bh_lru *b = &per_cpu(bh_lrus, cpu);
fs/buffer.c
3059
this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
fs/buffer.c
3060
per_cpu(bh_accounting, cpu).nr = 0;
fs/dcache.c
170
sum += per_cpu(nr_dentry, i);
fs/dcache.c
179
sum += per_cpu(nr_dentry_unused, i);
fs/dcache.c
189
sum += per_cpu(nr_dentry_negative, i);
fs/ext4/mballoc.c
460
__seq += per_cpu(discard_pa_seq, __cpu);
fs/inode.c
118
sum += data_race(per_cpu(mg_ctime_updates, i));
fs/inode.c
128
sum += data_race(per_cpu(mg_fine_stamps, i));
fs/inode.c
138
sum += data_race(per_cpu(mg_ctime_swaps, i));
fs/inode.c
87
sum += per_cpu(nr_inodes, i);
fs/inode.c
96
sum += per_cpu(nr_unused, i);
fs/nfsd/filecache.c
1409
hits += per_cpu(nfsd_file_cache_hits, i);
fs/nfsd/filecache.c
1410
acquisitions += per_cpu(nfsd_file_acquisitions, i);
fs/nfsd/filecache.c
1411
allocations += per_cpu(nfsd_file_allocations, i);
fs/nfsd/filecache.c
1412
releases += per_cpu(nfsd_file_releases, i);
fs/nfsd/filecache.c
1413
total_age += per_cpu(nfsd_file_total_age, i);
fs/nfsd/filecache.c
1414
evictions += per_cpu(nfsd_file_evictions, i);
fs/nfsd/filecache.c
984
per_cpu(nfsd_file_cache_hits, i) = 0;
fs/nfsd/filecache.c
985
per_cpu(nfsd_file_acquisitions, i) = 0;
fs/nfsd/filecache.c
986
per_cpu(nfsd_file_allocations, i) = 0;
fs/nfsd/filecache.c
987
per_cpu(nfsd_file_releases, i) = 0;
fs/nfsd/filecache.c
988
per_cpu(nfsd_file_total_age, i) = 0;
fs/nfsd/filecache.c
989
per_cpu(nfsd_file_evictions, i) = 0;
include/linux/arch_topology.h
22
return per_cpu(capacity_freq_ref, cpu);
include/linux/arch_topology.h
29
return per_cpu(arch_freq_scale, cpu);
include/linux/arch_topology.h
56
return per_cpu(hw_pressure, cpu);
include/linux/context_tracking_state.h
143
return context_tracking_enabled() && per_cpu(context_tracking.active, cpu);
include/linux/cpufreq.h
260
return READ_ONCE(per_cpu(cpufreq_pressure, cpu));
include/linux/irqdesc.h
147
return desc->kstat_irqs ? per_cpu(desc->kstat_irqs->cnt, cpu) : 0;
include/linux/kernel_stat.h
51
#define kstat_cpu(cpu) per_cpu(kstat, cpu)
include/linux/kernel_stat.h
52
#define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu)
include/linux/topology.h
107
per_cpu(numa_node, cpu) = node;
include/linux/topology.h
150
return per_cpu(_numa_mem_, cpu);
include/linux/topology.h
157
per_cpu(_numa_mem_, cpu) = node;
include/linux/topology.h
336
return per_cpu(cpu_scale, cpu);
include/linux/topology.h
93
return per_cpu(numa_node, cpu);
include/xen/xen-ops.h
19
return per_cpu(xen_vcpu_id, cpu);
init/calibrate.c
284
if (per_cpu(cpu_loops_per_jiffy, this_cpu)) {
init/calibrate.c
285
lpj = per_cpu(cpu_loops_per_jiffy, this_cpu);
init/calibrate.c
309
per_cpu(cpu_loops_per_jiffy, this_cpu) = lpj;
kernel/cgroup/cgroup.c
7135
per_cpu(cgrp_dead_tasks_iwork, cpu) =
kernel/context_tracking.c
681
if (!per_cpu(context_tracking.active, cpu)) {
kernel/context_tracking.c
682
per_cpu(context_tracking.active, cpu) = true;
kernel/debug/debug_core.c
253
csd = &per_cpu(kgdb_roundup_csd, cpu);
kernel/events/core.c
11114
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
kernel/events/core.c
11134
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
kernel/events/core.c
15041
swhash = &per_cpu(swevent_htable, cpu);
kernel/events/core.c
15044
INIT_LIST_HEAD(&per_cpu(pmu_sb_events.list, cpu));
kernel/events/core.c
15045
raw_spin_lock_init(&per_cpu(pmu_sb_events.lock, cpu));
kernel/events/core.c
15047
INIT_LIST_HEAD(&per_cpu(sched_cb_list, cpu));
kernel/events/core.c
15061
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
kernel/events/hw_breakpoint.c
864
per_cpu(*cpu_events, cpu) = bp;
kernel/events/hw_breakpoint.c
885
unregister_hw_breakpoint(per_cpu(*cpu_events, cpu));
kernel/fork.c
172
total += per_cpu(process_counts, cpu);
kernel/irq/irqdesc.c
1013
sum += data_race(per_cpu(desc->kstat_irqs->cnt, cpu));
kernel/irq/irqdesc.c
999
return desc && desc->kstat_irqs ? per_cpu(desc->kstat_irqs->cnt, cpu) : 0;
kernel/irq/proc.c
489
unsigned int cnt = desc->kstat_irqs ? per_cpu(desc->kstat_irqs->cnt, j) : 0;
kernel/irq_work.c
165
if (!llist_add(&work->node.llist, &per_cpu(lazy_list, cpu)))
kernel/irq_work.c
168
work = &per_cpu(irq_work_wakeup, cpu);
kernel/kcsan/core.c
801
per_cpu(kcsan_rand_state, cpu) = (u32)get_cycles();
kernel/locking/lock_events.c
77
sum += per_cpu(lockevents[id], cpu);
kernel/locking/lockdep.c
307
&per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
kernel/locking/lockdep.c
332
&per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
kernel/locking/lockdep.c
4608
if (unlikely(per_cpu(hardirqs_enabled, cpu))) {
kernel/locking/lockdep.c
4613
per_cpu(hardirqs_enabled, cpu) = 0;
kernel/locking/lockdep_internals.h
234
__cpu_lockdep_stats = &per_cpu(lockdep_stats, __cpu); \
kernel/locking/lockdep_internals.h
255
ops += per_cpu(lockdep_stats.lock_class_ops[idx], cpu);
kernel/locking/percpu-rwsem.c
194
__sum += per_cpu(var, cpu); \
kernel/locking/qspinlock_stat.h
112
per_cpu(pv_kick_time, cpu) = start;
kernel/locking/qspinlock_stat.h
52
sum += per_cpu(lockevents[id], cpu);
kernel/locking/qspinlock_stat.h
60
kicks += per_cpu(EVENT_COUNT(pv_kick_unlock), cpu);
kernel/locking/qspinlock_stat.h
64
kicks += per_cpu(EVENT_COUNT(pv_kick_wake), cpu);
kernel/rcu/rcutorture.c
2737
pipesummary[i] += READ_ONCE(per_cpu(rcu_torture_count, cpu)[i]);
kernel/rcu/rcutorture.c
2738
batchsummary[i] += READ_ONCE(per_cpu(rcu_torture_batch, cpu)[i]);
kernel/rcu/rcutorture.c
2980
t = per_cpu(ksoftirqd, cpu);
kernel/rcu/rcutorture.c
2986
t = per_cpu(ktimerd, cpu);
kernel/rcu/rcutorture.c
4540
per_cpu(rcu_torture_count, cpu)[i] = 0;
kernel/rcu/rcutorture.c
4541
per_cpu(rcu_torture_batch, cpu)[i] = 0;
kernel/rcu/tree.c
2927
per_cpu(rcu_data.rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
kernel/rcu/tree.c
2990
per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0;
kernel/rseq.c
136
stats.exit += data_race(per_cpu(rseq_stats.exit, cpu));
kernel/rseq.c
137
stats.signal += data_race(per_cpu(rseq_stats.signal, cpu));
kernel/rseq.c
138
stats.slowpath += data_race(per_cpu(rseq_stats.slowpath, cpu));
kernel/rseq.c
139
stats.fastpath += data_race(per_cpu(rseq_stats.fastpath, cpu));
kernel/rseq.c
140
stats.ids += data_race(per_cpu(rseq_stats.ids, cpu));
kernel/rseq.c
141
stats.cs += data_race(per_cpu(rseq_stats.cs, cpu));
kernel/rseq.c
142
stats.clear += data_race(per_cpu(rseq_stats.clear, cpu));
kernel/rseq.c
143
stats.fixup += data_race(per_cpu(rseq_stats.fixup, cpu));
kernel/rseq.c
145
stats.s_granted += data_race(per_cpu(rseq_stats.s_granted, cpu));
kernel/rseq.c
146
stats.s_expired += data_race(per_cpu(rseq_stats.s_expired, cpu));
kernel/rseq.c
147
stats.s_revoked += data_race(per_cpu(rseq_stats.s_revoked, cpu));
kernel/rseq.c
148
stats.s_yielded += data_race(per_cpu(rseq_stats.s_yielded, cpu));
kernel/rseq.c
149
stats.s_aborted += data_race(per_cpu(rseq_stats.s_aborted, cpu));
kernel/scftorture.c
161
pool = &per_cpu(scf_free_pool, cpu);
kernel/scftorture.c
171
pool = &per_cpu(scf_free_pool, cpu);
kernel/scftorture.c
190
invoked_count += data_race(per_cpu(scf_invoked_count, cpu));
kernel/sched/clock.c
106
return &per_cpu(sched_clock_data, cpu);
kernel/sched/clock.c
169
per_cpu(sched_clock_data, cpu) = *scd;
kernel/sched/core.c
3833
return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
kernel/sched/core.c
3845
return per_cpu(sd_share_id, this_cpu) == per_cpu(sd_share_id, that_cpu);
kernel/sched/core.c
6352
queue_balance_callback(rq, &per_cpu(core_balance_head, rq->cpu), sched_core_balance);
kernel/sched/cpufreq.c
37
if (WARN_ON(per_cpu(cpufreq_update_util_data, cpu)))
kernel/sched/cpufreq.c
41
rcu_assign_pointer(per_cpu(cpufreq_update_util_data, cpu), data);
kernel/sched/cpufreq.c
57
rcu_assign_pointer(per_cpu(cpufreq_update_util_data, cpu), NULL);
kernel/sched/cpufreq_schedutil.c
501
struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
kernel/sched/cpufreq_schedutil.c
866
struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
kernel/sched/deadline.c
3151
zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
kernel/sched/deadline.c
635
queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
kernel/sched/deadline.c
640
queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
kernel/sched/ext_idle.c
235
sd = rcu_dereference(per_cpu(sd_llc, cpu));
kernel/sched/ext_idle.c
250
sd = rcu_dereference(per_cpu(sd_llc, cpu));
kernel/sched/ext_idle.c
266
sd = rcu_dereference(per_cpu(sd_numa, cpu));
kernel/sched/ext_idle.c
285
sd = rcu_dereference(per_cpu(sd_numa, cpu));
kernel/sched/ext_idle.c
681
BUG_ON(!alloc_cpumask_var_node(&per_cpu(local_idle_cpumask, i),
kernel/sched/ext_idle.c
683
BUG_ON(!alloc_cpumask_var_node(&per_cpu(local_llc_idle_cpumask, i),
kernel/sched/ext_idle.c
685
BUG_ON(!alloc_cpumask_var_node(&per_cpu(local_numa_idle_cpumask, i),
kernel/sched/fair.c
11079
llc_weight = per_cpu(sd_llc_size, env->dst_cpu);
kernel/sched/fair.c
11083
sd_share = rcu_dereference_all(per_cpu(sd_llc_shared, env->dst_cpu));
kernel/sched/fair.c
12543
sd = rcu_dereference_all(per_cpu(sd_asym_packing, cpu));
kernel/sched/fair.c
12561
sd = rcu_dereference_all(per_cpu(sd_asym_cpucapacity, cpu));
kernel/sched/fair.c
12582
sds = rcu_dereference_all(per_cpu(sd_llc_shared, cpu));
kernel/sched/fair.c
12614
sd = rcu_dereference_all(per_cpu(sd_llc, cpu));
kernel/sched/fair.c
12643
sd = rcu_dereference_all(per_cpu(sd_llc, cpu));
kernel/sched/fair.c
14038
zalloc_cpumask_var_node(&per_cpu(load_balance_mask, i), GFP_KERNEL, cpu_to_node(i));
kernel/sched/fair.c
14039
zalloc_cpumask_var_node(&per_cpu(select_rq_mask, i), GFP_KERNEL, cpu_to_node(i));
kernel/sched/fair.c
14040
zalloc_cpumask_var_node(&per_cpu(should_we_balance_tmpmask, i),
kernel/sched/fair.c
2592
sd = rcu_dereference_all(per_cpu(sd_numa, env.src_cpu));
kernel/sched/fair.c
7572
sds = rcu_dereference_all(per_cpu(sd_llc_shared, cpu));
kernel/sched/fair.c
7581
sds = rcu_dereference_all(per_cpu(sd_llc_shared, cpu));
kernel/sched/fair.c
7710
sd_share = rcu_dereference_all(per_cpu(sd_llc_shared, target));
kernel/sched/fair.c
7916
sd = rcu_dereference_all(per_cpu(sd_asym_cpucapacity, target));
kernel/sched/fair.c
7931
sd = rcu_dereference_all(per_cpu(sd_llc, target));
kernel/sched/rt.c
2412
zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
kernel/sched/rt.c
389
queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
kernel/sched/rt.c
394
queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
kernel/sched/sched.h
1390
#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
kernel/sched/sched.h
3423
struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
kernel/sched/topology.c
221
if (rcu_access_pointer(per_cpu(sd_asym_cpucapacity, i))) {
kernel/sched/topology.c
2790
if (rcu_access_pointer(per_cpu(sd_asym_cpucapacity, cpu)))
kernel/sched/topology.c
690
rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
kernel/sched/topology.c
691
per_cpu(sd_llc_size, cpu) = size;
kernel/sched/topology.c
692
per_cpu(sd_llc_id, cpu) = id;
kernel/sched/topology.c
693
rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds);
kernel/sched/topology.c
704
per_cpu(sd_share_id, cpu) = id;
kernel/sched/topology.c
707
rcu_assign_pointer(per_cpu(sd_numa, cpu), sd);
kernel/sched/topology.c
710
rcu_assign_pointer(per_cpu(sd_asym_packing, cpu), sd);
kernel/sched/topology.c
713
rcu_assign_pointer(per_cpu(sd_asym_cpucapacity, cpu), sd);
kernel/smp.c
109
init_llist_head(&per_cpu(call_single_queue, i));
kernel/smp.c
275
cpu_cur_csd = smp_load_acquire(&per_cpu(cur_csd, cpux)); /* Before func and info. */
kernel/smp.c
292
*bug_id, READ_ONCE(per_cpu(cur_csd_func, cpux)),
kernel/smp.c
293
READ_ONCE(per_cpu(cur_csd_info, cpux)));
kernel/smp.c
299
if (atomic_cmpxchg_acquire(&per_cpu(trigger_backtrace, cpu), 1, 0))
kernel/smp.c
412
if (llist_add(node, &per_cpu(call_single_queue, cpu)))
kernel/smp.c
56
struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
kernel/smp.c
78
struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
kernel/smp.c
844
if (llist_add(&csd->node.llist, &per_cpu(call_single_queue, cpu))) {
kernel/smpboot.c
30
struct task_struct *tsk = per_cpu(idle_threads, cpu);
kernel/smpboot.c
39
per_cpu(idle_threads, smp_processor_id()) = current;
kernel/smpboot.c
50
struct task_struct *tsk = per_cpu(idle_threads, cpu);
kernel/smpboot.c
57
per_cpu(idle_threads, cpu) = tsk;
kernel/softirq.c
1040
per_cpu(tasklet_vec, cpu).tail =
kernel/softirq.c
1041
&per_cpu(tasklet_vec, cpu).head;
kernel/softirq.c
1042
per_cpu(tasklet_hi_vec, cpu).tail =
kernel/softirq.c
1043
&per_cpu(tasklet_hi_vec, cpu).head;
kernel/softirq.c
1080
if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
kernel/softirq.c
1081
*__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
kernel/softirq.c
1082
__this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
kernel/softirq.c
1083
per_cpu(tasklet_vec, cpu).head = NULL;
kernel/softirq.c
1084
per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
kernel/softirq.c
1088
if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
kernel/softirq.c
1089
*__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
kernel/softirq.c
1090
__this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
kernel/softirq.c
1091
per_cpu(tasklet_hi_vec, cpu).head = NULL;
kernel/softirq.c
1092
per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
kernel/stop_machine.c
409
work = &per_cpu(cpu_stopper.stop_work, cpu);
kernel/stop_machine.c
477
struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
kernel/stop_machine.c
489
struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
kernel/stop_machine.c
529
struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
kernel/stop_machine.c
541
sched_set_stop_task(cpu, per_cpu(cpu_stopper.thread, cpu));
kernel/stop_machine.c
546
struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
kernel/stop_machine.c
553
struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
kernel/stop_machine.c
574
struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
kernel/stop_machine.c
93
struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
kernel/taskstats.c
322
listeners = &per_cpu(listener_array, cpu);
kernel/taskstats.c
340
listeners = &per_cpu(listener_array, cpu);
kernel/taskstats.c
695
INIT_LIST_HEAD(&(per_cpu(listener_array, i).list));
kernel/taskstats.c
696
init_rwsem(&(per_cpu(listener_array, i).sem));
kernel/time/clockevents.c
402
return ced == per_cpu(tick_cpu_device, cpu).evtdev ? -EAGAIN : -EBUSY;
kernel/time/clockevents.c
729
&per_cpu(tick_cpu_device, dev->id);
kernel/time/clockevents.c
743
return &per_cpu(tick_cpu_device, dev->id);
kernel/time/clockevents.c
753
struct device *dev = &per_cpu(tick_percpu_dev, cpu);
kernel/time/hrtimer.c
2238
struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
kernel/time/hrtimer.c
228
return &per_cpu(hrtimer_bases, cpu);
kernel/time/hrtimer.c
2308
new_base = &per_cpu(hrtimer_bases, ncpu);
kernel/time/hrtimer.c
233
return &per_cpu(hrtimer_bases, get_nohz_timer_target());
kernel/time/hrtimer.c
967
cpu_base = &per_cpu(hrtimer_bases, cpu);
kernel/time/tick-broadcast.c
104
return per_cpu(tick_oneshot_wakeup_device, cpu);
kernel/time/tick-broadcast.c
144
per_cpu(tick_oneshot_wakeup_device, cpu) = newdev;
kernel/time/tick-broadcast.c
381
td = &per_cpu(tick_cpu_device, cpumask_first(mask));
kernel/time/tick-broadcast.c
712
td = &per_cpu(tick_cpu_device, cpu);
kernel/time/tick-broadcast.c
999
td = &per_cpu(tick_cpu_device, cpu);
kernel/time/tick-common.c
332
td = &per_cpu(tick_cpu_device, cpu);
kernel/time/tick-common.c
66
return &per_cpu(tick_cpu_device, cpu);
kernel/time/tick-sched.c
1636
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
kernel/time/tick-sched.c
1663
set_bit(0, &per_cpu(tick_cpu_sched, cpu).check_clocks);
kernel/time/tick-sched.c
436
irq_work_queue_on(&per_cpu(nohz_full_kick_work, cpu), cpu);
kernel/time/tick-sched.c
44
return &per_cpu(tick_cpu_sched, cpu);
kernel/time/tick-sched.c
834
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
kernel/time/tick-sched.c
860
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
kernel/time/timekeeping_debug.c
65
sum += data_race(per_cpu(timekeeping_mg_floor_swaps, cpu));
kernel/time/timer_list.c
115
struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
kernel/trace/fgraph.c
1140
WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
kernel/trace/fgraph.c
1148
ret_stack = per_cpu(idle_ret_stack, cpu);
kernel/trace/fgraph.c
1153
per_cpu(idle_ret_stack, cpu) = ret_stack;
kernel/trace/ftrace.c
1024
stat = &per_cpu(ftrace_profile_stats, cpu);
kernel/trace/ftrace.c
691
stat = &per_cpu(ftrace_profile_stats, cpu);
kernel/trace/trace.c
2257
per_cpu(trace_buffered_event, cpu) = event;
kernel/trace/trace.c
2262
per_cpu(trace_buffered_event, cpu))
kernel/trace/trace.c
2306
free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
kernel/trace/trace.c
2307
per_cpu(trace_buffered_event, cpu) = NULL;
kernel/trace/trace.c
738
mutex_lock(&per_cpu(cpu_access_lock, cpu));
kernel/trace/trace.c
747
mutex_unlock(&per_cpu(cpu_access_lock, cpu));
kernel/trace/trace.c
757
mutex_init(&per_cpu(cpu_access_lock, cpu));
kernel/trace/trace_events.c
5023
disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
kernel/trace/trace_events.c
5040
atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
kernel/trace/trace_hwlat.c
460
kthread = per_cpu(hwlat_per_cpu_data, cpu).kthread;
kernel/trace/trace_hwlat.c
463
per_cpu(hwlat_per_cpu_data, cpu).kthread = NULL;
kernel/trace/trace_hwlat.c
490
if (per_cpu(hwlat_per_cpu_data, cpu).kthread)
kernel/trace/trace_hwlat.c
499
per_cpu(hwlat_per_cpu_data, cpu).kthread = kthread;
kernel/trace/trace_irqsoff.c
113
if (likely(!per_cpu(tracing_cpu, cpu)))
kernel/trace/trace_irqsoff.c
170
per_cpu(tracing_cpu, cpu) = 0;
kernel/trace/trace_irqsoff.c
404
if (per_cpu(tracing_cpu, cpu))
kernel/trace/trace_irqsoff.c
421
per_cpu(tracing_cpu, cpu) = 1;
kernel/trace/trace_irqsoff.c
438
if (unlikely(per_cpu(tracing_cpu, cpu)))
kernel/trace/trace_irqsoff.c
439
per_cpu(tracing_cpu, cpu) = 0;
kernel/trace/trace_osnoise.c
1936
kthread = xchg_relaxed(&(per_cpu(per_cpu_osnoise_var, cpu).kthread), NULL);
kernel/trace/trace_osnoise.c
1956
per_cpu(per_cpu_osnoise_var, cpu).sampling = false;
kernel/trace/trace_osnoise.c
1990
if (per_cpu(per_cpu_osnoise_var, cpu).kthread)
kernel/trace/trace_osnoise.c
1999
per_cpu(per_cpu_osnoise_var, cpu).sampling = true;
kernel/trace/trace_osnoise.c
2013
per_cpu(per_cpu_osnoise_var, cpu).kthread = kthread;
kernel/trace/trace_osnoise.c
2046
kthread = xchg_relaxed(&(per_cpu(per_cpu_osnoise_var, cpu).kthread), NULL);
kernel/trace/trace_osnoise.c
2699
struct dentry *per_cpu;
kernel/trace/trace_osnoise.c
2710
per_cpu = tracefs_create_dir("per_cpu", top_dir);
kernel/trace/trace_osnoise.c
2711
if (!per_cpu)
kernel/trace/trace_osnoise.c
2716
cpu_dir = tracefs_create_dir(cpu_str, per_cpu);
kernel/trace/trace_osnoise.c
2732
tracefs_remove(per_cpu);
kernel/trace/trace_osnoise.c
3032
per_cpu(per_cpu_osnoise_var, cpu).sampling = 0;
kernel/trace/trace_uprobe.c
836
nhits += per_cpu(*tu->nhits, cpu);
kernel/watchdog.c
159
per_cpu(watchdog_hardlockup_touched, cpu) = true;
kernel/watchdog.c
164
int hrint = atomic_read(&per_cpu(hrtimer_interrupts, cpu));
kernel/watchdog.c
166
if (per_cpu(hrtimer_interrupts_saved, cpu) == hrint)
kernel/watchdog.c
174
per_cpu(hrtimer_interrupts_saved, cpu) = hrint;
kernel/watchdog.c
191
if (per_cpu(watchdog_hardlockup_touched, cpu)) {
kernel/watchdog.c
192
per_cpu(watchdog_hardlockup_touched, cpu) = false;
kernel/watchdog.c
222
if (per_cpu(watchdog_hardlockup_warned, cpu))
kernel/watchdog.c
268
per_cpu(watchdog_hardlockup_warned, cpu) = true;
kernel/watchdog.c
270
per_cpu(watchdog_hardlockup_warned, cpu) = false;
kernel/watchdog.c
708
per_cpu(watchdog_report_ts, cpu) = SOFTLOCKUP_DELAY_REPORT;
kernel/watchdog_perf.c
222
struct perf_event *event = per_cpu(watchdog_ev, cpu);
kernel/watchdog_perf.c
244
struct perf_event *event = per_cpu(watchdog_ev, cpu);
kernel/workqueue.c
1226
return &per_cpu(bh_pool_irq_works, pool->cpu)[high];
kernel/workqueue.c
3688
&per_cpu(bh_worker_pools, smp_processor_id())[highpri];
kernel/workqueue.c
3752
struct worker_pool *pool = &per_cpu(bh_worker_pools, cpu)[i];
kernel/workqueue.c
548
for ((pool) = &per_cpu(bh_worker_pools, cpu)[0]; \
kernel/workqueue.c
549
(pool) < &per_cpu(bh_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
kernel/workqueue.c
553
for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
kernel/workqueue.c
554
(pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
kernel/workqueue.c
7152
static DEVICE_ATTR_RO(per_cpu);
kernel/workqueue.c
7660
per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
kernel/workqueue.c
7692
touched = READ_ONCE(per_cpu(wq_watchdog_touched_cpu, pool->cpu));
kernel/workqueue.c
7758
per_cpu(wq_watchdog_touched_cpu, cpu) = now;
lib/debugobjects.c
1163
pcp_free += per_cpu(pool_pcpu.cnt, cpu);
lib/irq_poll.c
198
list_splice_init(&per_cpu(blk_cpu_iopoll, cpu),
lib/irq_poll.c
212
INIT_LIST_HEAD(&per_cpu(blk_cpu_iopoll, i));
lib/radix-tree.c
1584
rtp = &per_cpu(radix_tree_preloads, cpu);
mm/huge_memory.c
606
struct mthp_stat *this = &per_cpu(mthp_stats, cpu);
mm/kasan/sw_tags.c
44
per_cpu(prng_state, cpu) = (u32)get_cycles();
mm/memcontrol.c
2052
struct memcg_stock_pcp *memcg_st = &per_cpu(memcg_stock, cpu);
mm/memcontrol.c
2053
struct obj_stock_pcp *obj_st = &per_cpu(obj_stock, cpu);
mm/memcontrol.c
2082
drain_obj_stock(&per_cpu(obj_stock, cpu));
mm/memcontrol.c
2083
drain_stock_fully(&per_cpu(memcg_stock, cpu));
mm/memory-failure.c
2645
mf_cpu = &per_cpu(memory_failure_cpu, cpu);
mm/mlock.c
228
fbatch = &per_cpu(mlock_fbatch.fbatch, cpu);
mm/mlock.c
235
return folio_batch_count(&per_cpu(mlock_fbatch.fbatch, cpu));
mm/page_alloc.c
5877
per_cpu_pages_init(&per_cpu(boot_pageset, cpu), &per_cpu(boot_zonestats, cpu));
mm/page_alloc.c
6198
struct per_cpu_zonestat *pzstats = &per_cpu(boot_zonestats, cpu);
mm/slub.c
3984
sfw = &per_cpu(slub_flush, cpu);
mm/slub.c
3996
sfw = &per_cpu(slub_flush, cpu);
mm/slub.c
4044
sfw = &per_cpu(slub_flush, cpu);
mm/slub.c
4059
sfw = &per_cpu(slub_flush, cpu);
mm/swap.c
323
struct folio_batch *fbatch = &per_cpu(cpu_fbatches.lru_activate, cpu);
mm/swap.c
644
struct cpu_fbatches *fbatches = &per_cpu(cpu_fbatches, cpu);
mm/swap.c
777
struct cpu_fbatches *fbatches = &per_cpu(cpu_fbatches, cpu);
mm/swap.c
875
struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
mm/swap.c
885
flush_work(&per_cpu(lru_add_drain_work, cpu));
mm/vmalloc.c
2669
return &per_cpu(vmap_block_queue, index).vmap_blocks;
mm/vmalloc.c
2787
struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, vb->cpu);
mm/vmalloc.c
2824
struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
mm/vmalloc.c
2960
struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
mm/vmalloc.c
5442
vbq = &per_cpu(vmap_block_queue, i);
mm/vmalloc.c
5445
p = &per_cpu(vfree_deferred, i);
mm/vmstat.c
121
struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
mm/vmstat.c
149
struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
mm/vmstat.c
2124
struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
mm/vmstat.c
2168
disable_delayed_work_sync(&per_cpu(vmstat_work, cpu));
mm/vmstat.c
2193
enable_delayed_work(&per_cpu(vmstat_work, cpu));
mm/vmstat.c
2200
disable_delayed_work_sync(&per_cpu(vmstat_work, cpu));
net/core/dev.c
12709
sd = &per_cpu(softnet_data, cpu);
net/core/dev.c
12710
oldsd = &per_cpu(softnet_data, oldcpu);
net/core/dev.c
13180
per_cpu(system_page_pool.pool, cpuid) = pp_ptr;
net/core/dev.c
13250
struct softnet_data *sd = &per_cpu(softnet_data, i);
net/core/dev.c
13314
pp_ptr = per_cpu(system_page_pool.pool, i);
net/core/dev.c
13320
per_cpu(system_page_pool.pool, i) = NULL;
net/core/dev.c
5012
sd_input_head = READ_ONCE(per_cpu(softnet_data, cpu).input_queue_head);
net/core/dev.c
5076
head = READ_ONCE(per_cpu(softnet_data, next_cpu).input_queue_head);
net/core/dev.c
5159
((int)(READ_ONCE(per_cpu(softnet_data, tcpu).input_queue_head) -
net/core/dev.c
5283
struct softnet_data *sd = &per_cpu(softnet_data, cpu);
net/core/dev.c
5359
sd = &per_cpu(softnet_data, cpu);
net/core/dev.c
6509
struct softnet_data *sd = &per_cpu(softnet_data, cpu);
net/core/drop_monitor.c
1066
struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu);
net/core/drop_monitor.c
1087
struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu);
net/core/drop_monitor.c
1121
struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu);
net/core/drop_monitor.c
1151
struct per_cpu_dm_data *data = &per_cpu(dm_cpu_data, cpu);
net/core/drop_monitor.c
1182
struct per_cpu_dm_data *data = &per_cpu(dm_cpu_data, cpu);
net/core/drop_monitor.c
1210
struct per_cpu_dm_data *data = &per_cpu(dm_cpu_data, cpu);
net/core/drop_monitor.c
1446
struct per_cpu_dm_data *data = &per_cpu(dm_cpu_data, cpu);
net/core/drop_monitor.c
1490
struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu);
net/core/drop_monitor.c
1692
data = &per_cpu(dm_cpu_data, cpu);
net/core/drop_monitor.c
1700
data = &per_cpu(dm_cpu_data, cpu);
net/core/drop_monitor.c
1712
hw_data = &per_cpu(dm_hw_cpu_data, cpu);
net/core/drop_monitor.c
1720
hw_data = &per_cpu(dm_hw_cpu_data, cpu);
net/core/net-procfs.c
101
sd = &per_cpu(softnet_data, *pos);
net/core/sysctl_net_core.c
229
sd = &per_cpu(softnet_data, i);
net/core/sysctl_net_core.c
253
sd = &per_cpu(softnet_data, i);
net/ipv4/icmp.c
1761
per_cpu(ipv4_icmp_sk, i) = sk;
net/ipv4/netfilter/arp_tables.c
608
seqcount_t *s = &per_cpu(xt_recseq, cpu);
net/ipv4/netfilter/ip_tables.c
747
seqcount_t *s = &per_cpu(xt_recseq, cpu);
net/ipv4/route.c
1561
struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
net/ipv4/route.c
245
return &per_cpu(rt_cache_stat, cpu);
net/ipv4/route.c
258
return &per_cpu(rt_cache_stat, cpu);
net/ipv4/route.c
3745
struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
net/ipv4/tcp.c
3132
total += per_cpu(tcp_orphan_count, i);
net/ipv4/tcp_ipv4.c
3738
per_cpu(ipv4_tcp_sk.sock, cpu) = sk;
net/ipv4/tcp_output.c
1368
struct tsq_work *tsq = &per_cpu(tsq_work, i);
net/ipv4/tcp_sigpool.c
103
kfree(rcu_replace_pointer(per_cpu(sigpool_scratch.pad, cpu),
net/ipv4/tcp_sigpool.c
82
old_scratch = rcu_replace_pointer(per_cpu(sigpool_scratch.pad, cpu),
net/ipv6/icmp.c
1282
per_cpu(ipv6_icmp_sk, i) = sk;
net/ipv6/netfilter/ip6_tables.c
764
seqcount_t *s = &per_cpu(xt_recseq, cpu);
net/netfilter/nft_ct.c
352
ct = per_cpu(nft_ct_pcpu_template, cpu);
net/netfilter/nft_ct.c
356
per_cpu(nft_ct_pcpu_template, cpu) = NULL;
net/netfilter/nft_ct.c
377
per_cpu(nft_ct_pcpu_template, cpu) = tmp;
net/netfilter/x_tables.c
1456
seqcount_t *s = &per_cpu(xt_recseq, cpu);
net/netfilter/x_tables.c
2015
seqcount_init(&per_cpu(xt_recseq, i));
net/rds/ib_stats.c
97
src = (uint64_t *)&(per_cpu(rds_ib_stats, cpu));
net/rds/page.c
163
rem = &per_cpu(rds_page_remainders, cpu);
net/rds/rds.h
1002
per_cpu(which, get_cpu()).member += count; \
net/rds/rds.h
997
per_cpu(which, get_cpu()).member++; \
net/rds/stats.c
130
src = (uint64_t *)&(per_cpu(rds_stats, cpu));
net/rds/tcp_stats.c
64
src = (uint64_t *)&(per_cpu(rds_tcp_stats, cpu));
net/sunrpc/stats.c
111
count += per_cpu(vers->vs_count[j], k);
net/xfrm/xfrm_input.c
853
trans = &per_cpu(xfrm_trans_tasklet, i);
samples/kmemleak/kmemleak-test.c
77
per_cpu(kmemleak_test_pointer, i) = kmalloc(129, GFP_KERNEL);
samples/kmemleak/kmemleak-test.c
79
per_cpu(kmemleak_test_pointer, i));
security/apparmor/lsm.c
2288
per_cpu(aa_local_buffers, i).hold = 0;
security/apparmor/lsm.c
2289
per_cpu(aa_local_buffers, i).count = 0;
security/apparmor/lsm.c
2290
INIT_LIST_HEAD(&per_cpu(aa_local_buffers, i).head);
security/selinux/selinuxfs.c
1542
return &per_cpu(avc_cache_stats, cpu);
tools/perf/util/auxtrace.c
194
bool per_cpu = !perf_cpu_map__has_any_cpu(evlist->core.user_requested_cpus);
tools/perf/util/auxtrace.c
203
if (per_cpu) {
tools/perf/util/evsel.c
1499
bool per_cpu = opts->target.default_per_cpu && !opts->target.per_thread;
tools/perf/util/evsel.c
1601
(!opts->no_inherit || target__has_cpu(&opts->target) || per_cpu ||
virt/kvm/kvm_main.c
6506
if (!alloc_cpumask_var_node(&per_cpu(cpu_kick_mask, cpu),
virt/kvm/kvm_main.c
6568
free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
virt/kvm/kvm_main.c
6589
free_cpumask_var(per_cpu(cpu_kick_mask, cpu));